prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
import pytest
import torch as th
import numpy as np
from syft.frameworks.torch.mpc.fss import DPF, DIF, n
@pytest.mark.parametrize("op", ["eq", "le"])
def test_fss_class(op):
class_ = {"eq": DPF, "le": DIF}[op]
th_op = {"eq": np.equal, "le": np.less_equal}[op]
gather_op = {"eq": "__add__", "le": "__add__"}[op]
# single value
primitive = class_.keygen(n_values=1)
alpha, s_00, s_01, *CW = primitive
mask = np.random.randint(0, 2 ** n, alpha.shape, dtype=alpha.dtype) # IID in int32
k0, k1 = [((alpha - mask) % 2 ** n, s_00, *CW), (mask, s_01, *CW)]
x = np.array([0])
x_masked = x + k0[0] + k1[0]
y0 = class_.eval(0, x_masked, *k0[1:])
y1 = class_.eval(1, x_masked, *k1[1:])
assert (getattr(y0, gather_op)(y1) == th_op(x, 0)).all()
# 1D tensor
primitive = class_.keygen(n_values=3)
alpha, s_00, s_01, *CW = primitive
mask = np.random.randint(0, 2 ** n, alpha.shape, dtype=alpha.dtype)
k0, k1 = [((alpha - mask) % 2 ** n, s_00, *CW), (mask, s_01, *CW)]
x =
|
np.array([0, 2, -2])
|
numpy.array
|
from copy import deepcopy
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin, clone
from sklearn.decomposition import PCA as skPCA
from sklearn.model_selection import BaseCrossValidator, KFold
from sklearn.model_selection._split import BaseShuffleSplit
from .ChemometricsScaler import ChemometricsScaler
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
import matplotlib as mpl
import scipy.stats as st
import matplotlib.cm as cm
__author__ = 'gscorreia89'
from copy import deepcopy
class ChemometricsPCA(BaseEstimator):
"""
ChemometricsPCA object - Wrapper for sklearn.decomposition PCA algorithms, with tailored methods
for Chemometric Data analysis.
:param ncomps: Number of PCA components desired.
:type ncomps: int
:param sklearn.decomposition._BasePCA pca_algorithm: scikit-learn PCA algorithm to use (inheriting from _BasePCA).
:param scaler: The object which will handle data scaling.
:type scaler: ChemometricsScaler object, scaling/preprocessing objects from scikit-learn or None
:param kwargs pca_type_kwargs: Keyword arguments to be passed during initialization of pca_algorithm.
:raise TypeError: If the pca_algorithm or scaler objects are not of the right class.
"""
# Constant usage of kwargs might look excessive but ensures that most things from scikit-learn can be used directly
# no matter what PCA algorithm is used
def __init__(self, ncomps=2, pca_algorithm=skPCA, scaler=ChemometricsScaler(), **pca_type_kwargs):
try:
# Perform the check with is instance but avoid abstract base class runs. PCA needs number of comps anyway!
init_pca_algorithm = pca_algorithm(n_components=ncomps, **pca_type_kwargs)
if not isinstance(init_pca_algorithm, (BaseEstimator, TransformerMixin)):
raise TypeError("Use a valid scikit-learn PCA model please")
if not (isinstance(scaler, TransformerMixin) or scaler is None):
raise TypeError("Scikit-learn Transformer-like object or None")
if scaler is None:
scaler = ChemometricsScaler(0, with_std=False)
self.pca_algorithm = init_pca_algorithm
# Most initialized as None, before object is fitted.
self.scores = None
self.loadings = None
self._ncomps = ncomps
self._scaler = scaler
self.cvParameters = None
self.modelParameters = None
self._isfitted = False
except TypeError as terp:
print(terp.args[0])
raise terp
def fit(self, x, **fit_params):
"""
Perform model fitting on the provided x data matrix and calculate basic goodness-of-fit metrics.
Equivalent to scikit-learn's default BaseEstimator method.
:param x: Data matrix to fit the PCA model.
:type x: numpy.ndarray, shape [n_samples, n_features].
:param kwargs fit_params: Keyword arguments to be passed to the .fit() method of the core sklearn model.
:raise ValueError: If any problem occurs during fitting.
"""
try:
# This scaling check is always performed to ensure running model with scaling or with scaling == None
# always give consistent results (same type of data scale expected for fitting,
# returned by inverse_transform, etc
if self.scaler is not None:
xscaled = self.scaler.fit_transform(x)
self.pca_algorithm.fit(xscaled, **fit_params)
self.scores = self.pca_algorithm.transform(xscaled)
ss = np.sum((xscaled - np.mean(xscaled, 0)) ** 2)
predicted = self.pca_algorithm.inverse_transform(self.scores)
rss =
|
np.sum((xscaled - predicted) ** 2)
|
numpy.sum
|
from matplotlib import pyplot as plt
from numpy import cos as cos
from numpy import sin as sin
import numpy as np
from numpy import diff
import matplotlib.pyplot as plt
from mujoco_py import (functions)
import ImpFuncs as IF
import globals
L1=globals.L1
L2=globals.L2
L3=globals.L3
SimStep=globals.SimStep
waitingTime=globals.waitingTime
sim=globals.sim
viewer=globals.viewer
#___# General navigation functions:
#full path navigation function
def Navigate(K,M,B,P,D,wantedPath, wantedTimes, waitingTime,method,methodCntrl,methodParams):
#initialazing:
dofs=np.shape(P[0])
elbow=0 #can be 0/1 - choose the path of the inverse kinematics of the manipulator.
q = invkin(wantedPath[0][0], wantedPath[0][1],wantedPath[0][2],elbow)
sim.data.qpos[0] = q[0]
sim.data.qpos[1] = q[1]
sim.data.qpos[2] = q[2]
#Impedance initital condition (for IMP only):
xm=np.array([wantedPath[0][0],wantedPath[0][1],wantedPath[0][2]]).reshape(-1,1)
xmDot=np.array([0,0,0]).reshape(-1,1) #specific for Robot at rest
#Generalazied data storage vectors:
accumulatedForce = []
accumulatedControl = []
accumulatedTravel = []
accumulatedPlannedPath = []
accumulatedJoints = []
accumulatedJointsVel = []
waitSim2(waitingTime)
forcebool=0 #boolean variable that can be used to check if the endeffector is being touched.
#General navigation loop:
for i in range(len(wantedPath) - 1):
nSteps =
|
np.int32(wantedTimes[i] / SimStep)
|
numpy.int32
|
import numpy as np
import matplotlib.pyplot as plt
plt.plot(np.arange(15),
|
np.arange(15)
|
numpy.arange
|
# Zebrafish Analysis
import math
import numpy as np
import cv2
from scipy.signal import savgol_filter
class ModelConfig:
def __init__(self, num_segments, min_angle, max_angle, num_angles, tail_length, tail_detection, prune_retention, test_width, draw_width, auto_detect_tail_length):
self.min_angle = min_angle # Minimum angle for each segment (relative to previous segment)
self.max_angle = max_angle # Maximum angle for each segment
self.num_models_per_segment = num_angles # Number of angles to try for each segment
self.prune_freq = 1 # When constructing the model, segment interval at which the model pruning function is called. Lower numbers are faster
self.max_num_models_to_keep = prune_retention # Number of models retained after each round of pruning
segment_height = int(tail_length / num_segments)
self.segment_heights = [segment_height] * num_segments # Length must match num_segments
self.segment_widths = [test_width] * num_segments # Used for constructing and testing straight-line models. Length must match num_segment
self.num_segments = num_segments
self.smoothed_segment_width = draw_width # Used for drawing the final smoothed version of the tail curve
self.tail_offset = 30 # Distance in pixels from the head point to the tail model start point
self.rotated_img_size = 200 # Size of the internal image of the rotated fish
self.tail_point_detection_algorithm = tail_detection
# Auto-detect tail length settings
self.auto_detect_tail_length = auto_detect_tail_length
if auto_detect_tail_length:
# Override number of segments with a high number
# Tail end will be detected automatically before this is reached and model generation will be stopped at that point
self.num_segments = 20
self.segment_heights = [5] * self.num_segments
self.segment_widths = [2] * self.num_segments
self.segment_score_improvement_threshold = 250 # Amount by which a segment must improve the model score to be considered valid. If 'num_fail_segments' segments in a row are considered invalid, no further segments are added to that model
self.num_fail_segments = 2 # Number of continuous segments which fail to meet the threshold improvement score for model generation to stop
self.min_segments = 5 # Minimum number of segments in the model - There will be at least this many segments, even if the failing criteria is reached
class MaskCanvas:
def __init__(self, canvas_width, canvas_height):
self.canvas = np.zeros((canvas_width, canvas_height), np.uint8)
self.points = []
self.scores = [0]
self.angle_offset = 0
def add_point(self, point):
self.points.append(point)
def last_point(self):
return self.points[-1] if len(self.points) > 0 else None
def last_score(self):
return self.scores[-1]
def score_improvement(self, n):
if len(self.scores) < n + 1:
return 0
return self.scores[-1 * n] - self.scores[-1 * (n+1)]
def getOrientation(frame, config):
th_val = 1
ret, binary_img = cv2.threshold(frame, th_val, 255, cv2.THRESH_BINARY)
im, contours, hierarchy = cv2.findContours(binary_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
# Sort contours by area
contours.sort(key=lambda ar: cv2.contourArea(ar))
largest_contour = contours[-1]
[vx, vy, x, y] = cv2.fitLine(largest_contour, cv2.DIST_L2, 0, 0.01, 0.01)
line_angle = math.atan2(vy, vx)
line_angle_degrees = math.degrees(line_angle)
angle = line_angle_degrees + 90
x, y, w, h = cv2.boundingRect(largest_contour)
img_cropped = frame[y:y+h, x:x+w]
rotated_img, actual_angle = rotateFishImage(img_cropped, angle, config)
return rotated_img, actual_angle
def rotateFishImage(img, angle_degrees, config):
input_image = np.zeros((config.rotated_img_size, config.rotated_img_size), np.uint8)
y_mid = config.rotated_img_size // 2
def _get_range(l):
bot = math.floor(y_mid - l/2)
top = math.floor(y_mid + l/2)
return bot, top
h_ran, w_ran = list(map(_get_range, img.shape))
input_image[h_ran[0]:h_ran[1], w_ran[0]:w_ran[1]] = img
rows, cols = input_image.shape
center = (rows//2, cols//2)
M = cv2.getRotationMatrix2D(center, angle_degrees, 1.0)
rotated_img = cv2.warpAffine(input_image, M, (cols, rows))
img_top = rotated_img[0:y_mid, 0:config.rotated_img_size]
img_bottom = rotated_img[y_mid:config.rotated_img_size, 0:config.rotated_img_size]
# Use valid pixel count to determine which half contains the head
top_area = len(img_top[img_top != 0])
bottom_area = len(img_bottom[img_bottom != 0])
head_half = img_top if bottom_area < top_area else img_bottom
# Find rotation angle again, this time only using the head half
im, contours, hierarchy = cv2.findContours(head_half.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
contours.sort(key=lambda ar: ar.size)
largest_contour = contours[-1]
[vx, vy, x, y] = cv2.fitLine(largest_contour, cv2.DIST_L2, 0, 0.01, 0.01)
line_angle = math.atan2(vy, vx)
line_angle_degrees = (math.degrees(line_angle) + 90) % 360
final_angle = (angle_degrees + line_angle_degrees) % 360
rows, cols = input_image.shape[:2]
center = (rows/2, cols/2)
M = cv2.getRotationMatrix2D(center, final_angle, 1.0)
rotated_output_img = cv2.warpAffine(input_image, M, (cols, rows))
# Check again whether the image is rotated 180 degrees, and correct if necessary
img_top = rotated_output_img[0:y_mid, 0:config.rotated_img_size]
img_bottom = rotated_output_img[y_mid:config.rotated_img_size, 0:config.rotated_img_size]
top_area = len(img_top[img_top != 0])
bottom_area = len(img_bottom[img_bottom != 0])
if bottom_area > top_area:
correction_angle = 180
M = cv2.getRotationMatrix2D(center, correction_angle, 1.0)
rotated_output_img = cv2.warpAffine(rotated_output_img, M, (cols, rows))
final_angle = (final_angle + correction_angle) % 360
return rotated_output_img, final_angle
def getHeadMask(frame):
th_val = 1
ret, img_thresh = cv2.threshold(frame, th_val, 255, cv2.THRESH_BINARY)
# Remove excess noise by drawing only the largest contour
head_mask = np.zeros((img_thresh.shape[0], img_thresh.shape[1]), np.uint8)
im, contours, hierarchy = cv2.findContours(img_thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
contours.sort(key=lambda ar: ar.size)
head_contour = contours[-1]
cv2.drawContours(head_mask, [head_contour], 0, 255, cv2.FILLED)
return head_mask
def getHeadPoint(rotated_img, angle):
theta = math.radians(- angle)
img_binary = np.zeros(rotated_img.shape, np.uint8)
im, contours, hierarchy = cv2.findContours(rotated_img.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
contours.sort(key=lambda ar: ar.size)
largest_contour = contours[-1]
cv2.drawContours(img_binary, [largest_contour], 0, 255, cv2.FILLED)
valid_pixels = np.nonzero(img_binary)
x = valid_pixels[1]
y = valid_pixels[0]
head_x = x[np.argmin(y)]
head_y = np.min(y)
w, h = rotated_img.shape[:2]
center_x = w / 2
center_y = h / 2
hypot = math.hypot(center_x - head_x, center_y - head_y)
rotated_head_x = center_x - (hypot * math.sin(theta))
rotated_head_y = center_y - (hypot * math.cos(theta))
return rotated_head_x, rotated_head_y
def getTailStartPoint(head_mask, head_point, config):
# Calculate the angle from the head point to the contour center
# Then, 'walk' down the line from the head point to the contour center point a set length
im, contours, hierarchy = cv2.findContours(head_mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
contour = contours[-1]
# Get contour center
contour_moments = cv2.moments(contour)
contour_center_x = int(contour_moments['m10'] / contour_moments['m00'])
contour_center_y = int(contour_moments['m01'] / contour_moments['m00'])
head_x = head_point[0]
head_y = head_point[1]
head_contour_center_angle = math.atan2(contour_center_y - head_y, contour_center_x - head_x)
head_x = head_point[0]
head_y = head_point[1]
# Calculate tail start point
tail_start_x = head_x + config.tail_offset * math.cos(head_contour_center_angle)
tail_start_y = head_y + config.tail_offset * math.sin(head_contour_center_angle)
return (int(tail_start_x), int(tail_start_y))
def getModelMasks(head_mask, base_angle, frame, head_point, config):
# Generate the test image used for scoring models
test_image = getTestImage(frame, head_mask, head_point, base_angle, config)
# Create starting object
initial_canvas = MaskCanvas(head_mask.shape[0], head_mask.shape[1])
# Add tail starting point
initial_point = getTailStartPoint(head_mask, head_point, config)
initial_canvas.add_point(initial_point)
# Set base angle
initial_canvas.angle_offset = -base_angle
canvas_set = [initial_canvas]
output_canvas_set = drawModelSegments(canvas_set, config.num_segments, test_image, config)
return output_canvas_set
def getTestImage(frame, head_mask, head_point, angle, config):
# Remove the head from the test image using a triangular mask, so it doesn't interfere with tail model scoring
center_x, center_y = getTailStartPoint(head_mask, head_point, config)
triangle_length = 100
t0_angle_radians = math.radians(angle)
t0_x = center_x + triangle_length * math.sin(t0_angle_radians)
t0_y = center_y - triangle_length * math.cos(t0_angle_radians)
t1_angle_radians = math.radians(angle - 90)
t1_x = center_x + triangle_length * math.sin(t1_angle_radians)
t1_y = center_y - triangle_length * math.cos(t1_angle_radians)
t2_angle_radians = math.radians(angle + 90)
t2_x = center_x + triangle_length * math.sin(t2_angle_radians)
t2_y = center_y - triangle_length * math.cos(t2_angle_radians)
triangle_points =
|
np.array([(t0_x, t0_y), (t1_x, t1_y), (t2_x, t2_y)])
|
numpy.array
|
"""
This is the main script for predicting a segmentation of an input MRA image. Segmentations can be predicted for multiple
models eather on rough grid (the parameters are then read out from the Unet/models/tuned_params.cvs file) or on fine
grid.
"""
import os
from scipy.ndimage.filters import convolve
import numpy as np
import helper
import time
class Predictor():
def __init__(self, model, train_metadata, prob_dir, error_dir, patients, patients_dir, label_filename, threshold=0.5):
self.model = model
self.train_metadata = train_metadata
self.PROB_DIR = prob_dir
self.ERROR_DIR = error_dir
self.patients = patients
self.PATIENTS_DIR = patients_dir
self.threshold = threshold
self.label_filename = label_filename
return
# where to save probability map from validation as nifti
def get_probs_filepath(self, patient):
return os.path.join(self.PROB_DIR, 'probs_' + patient + '_.nii')
# where to save error mask
def get_errormasks_filepath(self, patient):
return os.path.join(self.ERROR_DIR, 'error_mask_' + patient + '_.nii')
def predict(self, patch_size, data_dir, patch_size_z=None):
print('________________________________________________________________________________')
print('patient dir:', data_dir)
# -----------------------------------------------------------
# LOADING MODEL, IMAGE AND MASK
# -----------------------------------------------------------
print('> Loading image...')
img_mat = helper.load_nifti_mat_from_file(
os.path.join(data_dir, '001.nii')).astype(np.float32)
print('> Loading mask...')
if not os.path.exists(os.path.join(data_dir, 'mask.nii')):
avg_mat = convolve(img_mat.astype(dtype=float), np.ones((16,16,16), dtype=float)/4096, mode='constant', cval=0)
mask_mat = np.where(avg_mat > 10.0, 1, 0)
helper.create_and_save_nifti(mask_mat, os.path.join(data_dir, 'mask.nii'))
else:
mask_mat = helper.load_nifti_mat_from_file(
os.path.join(data_dir, 'mask.nii'))
# -----------------------------------------------------------
# PREDICTION
# -----------------------------------------------------------
# the segmentation is going to be saved in this probability matrix
prob_mat = np.zeros(img_mat.shape, dtype=np.float32)
x_dim, y_dim, z_dim = prob_mat.shape
# get the x, y and z coordinates where there is brain
x, y, z =
|
np.where(mask_mat > 0)
|
numpy.where
|
import numpy as np
import os
pwd = os.path.abspath(os.path.abspath(__file__))
father_path = os.path.abspath(os.path.dirname(pwd))
def concatenate_data(num_list: list = [], is_o:bool=False, is_s:bool=False,istest:bool=False):
if len(num_list) != 0:
if not is_o and not is_s:
for i, number in enumerate(num_list):
data_path = os.path.abspath(father_path + os.path.sep + "t"+str(number) + "_data.txt")
label_path = os.path.abspath(father_path + os.path.sep + "t"+str(number) + "_label.txt")
data = np.loadtxt(data_path)
label = np.loadtxt(label_path)
if i == 0:
final_data = data
final_label = label
else:
final_data = np.concatenate([data, final_data], 0)
final_label = np.concatenate([label, final_label], 0)
print(final_data.shape)
if istest:
final_data_path = os.path.abspath(father_path + os.path.sep + "test_t_data.txt")
final_label_path = os.path.abspath(father_path + os.path.sep + "test_t_label.txt")
else:
final_data_path = os.path.abspath(father_path + os.path.sep + "t_data.txt")
final_label_path = os.path.abspath(father_path + os.path.sep + "t_label.txt")
"""Calculate the number/proportion of different movements"""
number = np.zeros((2, 7))
for i in range(final_label.shape[0]):
number[0, int(final_label[i])] += 1
data_num = sum(number[0, :])
for i in range(7):
number[1, i] = number[0, i] / data_num * 100
print("Still: %d, Forward: %d, Left: %d, Right: %d, Left_Still: %d, Right_Still: %d, Backward: %d"
% (number[0, 0], number[0, 1], number[0, 2], number[0, 3], number[0, 4], number[0, 5], number[0, 6]))
print(
"Still: %.2f, Forward: %.2f, Left: %.2f, Right: %.2f, Left_Still: %.2f, Right_Still: %.2f, Backward: %.2f"
% (number[1, 0], number[1, 1], number[1, 2], number[1, 3], number[1, 4], number[1, 5], number[1, 6]))
elif is_o:
for i, number in enumerate(num_list):
data_path = os.path.abspath(father_path + os.path.sep + "o"+str(number) + "_data.txt")
label_path = os.path.abspath(father_path + os.path.sep + "o"+str(number) + "_label.txt")
data = np.loadtxt(data_path)
label = np.loadtxt(label_path)
if i == 0:
final_data = data
final_label = label
else:
final_data = np.concatenate([data, final_data], 0)
final_label = np.concatenate([label, final_label], 0)
print(final_data.shape)
if istest:
final_data_path = os.path.abspath(father_path + os.path.sep + "test_o_data.txt")
final_label_path = os.path.abspath(father_path + os.path.sep + "test_o_label.txt")
else:
final_data_path = os.path.abspath(father_path + os.path.sep + "o_data.txt")
final_label_path = os.path.abspath(father_path + os.path.sep + "o_label.txt")
elif is_s:
for i, number in enumerate(num_list):
data_path = os.path.abspath(father_path + os.path.sep + "s"+str(number) + "_data.txt")
label_path = os.path.abspath(father_path + os.path.sep + "s"+str(number) + "_label.txt")
data = np.loadtxt(data_path)
label = np.loadtxt(label_path)
if i == 0:
final_data = data
final_label = label
else:
final_data = np.concatenate([data, final_data], 0)
final_label = np.concatenate([label, final_label], 0)
print(final_data.shape)
final_data_path = os.path.abspath(father_path + os.path.sep + "s_data.txt")
final_label_path = os.path.abspath(father_path + os.path.sep + "s_label.txt")
# print(final_data)
np.savetxt(final_data_path, final_data, fmt="%.3f")
np.savetxt(final_label_path, final_label, fmt="%d")
return final_data,final_label
def concatenate_o_s(o_data,o_label,s_data,s_label):
final_data = np.concatenate([o_data, s_data], 0)
final_label = np.concatenate([o_label, s_label], 0)
final_data_path = os.path.abspath(father_path + os.path.sep + "os_data.txt")
final_label_path = os.path.abspath(father_path + os.path.sep + "os_label.txt")
np.savetxt(final_data_path, final_data, fmt="%.3f")
|
np.savetxt(final_label_path, final_label, fmt="%d")
|
numpy.savetxt
|
""" Searches using MCMC-based methods """
import sys
import os
import copy
import logging
from collections import OrderedDict
import subprocess
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from ptemcee import Sampler as PTSampler
import corner
import dill as pickle
import pyfstat.core as core
from pyfstat.core import tqdm, args, read_par
import pyfstat.optimal_setup_functions as optimal_setup_functions
import pyfstat.helper_functions as helper_functions
class MCMCSearch(core.BaseSearchClass):
"""MCMC search using ComputeFstat
Parameters
----------
theta_prior: dict
Dictionary of priors and fixed values for the search parameters.
For each parameters (key of the dict), if it is to be held fixed
the value should be the constant float, if it is be searched, the
value should be a dictionary of the prior.
tref, minStartTime, maxStartTime: int
GPS seconds of the reference time, start time and end time. While tref
is requirede, minStartTime and maxStartTime default to None in which
case all available data is used.
label, outdir: str
A label and output directory (optional, defaults is `'data'`) to
name files
sftfilepattern: str, optional
Pattern to match SFTs using wildcards (*?) and ranges [0-9];
mutiple patterns can be given separated by colons.
detectors: str, optional
Two character reference to the detectors to use, specify None for no
contraint and comma separate for multiple references.
nsteps: list (2,), optional
Number of burn-in and production steps to take, [nburn, nprod]. See
`pyfstat.MCMCSearch.setup_initialisation()` for details on adding
initialisation steps.
nwalkers, ntemps: int, optional
The number of walkers and temperates to use in the parallel
tempered PTSampler.
log10beta_min float < 0, optional
The log_10(beta) value, if given the set of betas passed to PTSampler
are generated from `np.logspace(0, log10beta_min, ntemps)` (given
in descending order to ptemcee).
theta_initial: dict, array, optional
A dictionary of distribution about which to distribute the
initial walkers about
rhohatmax: float, optional
Upper bound for the SNR scale parameter (required to normalise the
Bayes factor) - this needs to be carefully set when using the
evidence.
binary: bool, optional
If true, search over binary parameters
BSGL: bool, optional
If true, use the BSGL statistic
SSBPrec: int, optional
SSBPrec (SSB precision) to use when calling ComputeFstat
minCoverFreq, maxCoverFreq: float, optional
Minimum and maximum instantaneous frequency which will be covered
over the SFT time span as passed to CreateFstatInput
injectSources: dict, optional
If given, inject these properties into the SFT files before running
the search
assumeSqrtSX: float, optional
Don't estimate noise-floors, but assume (stationary) per-IFO sqrt{SX}
transientWindowType: str
If 'rect' or 'exp',
compute atoms so that a transient (t0,tau) map can later be computed.
('none' instead of None explicitly calls the transient-window function,
but with the full range, for debugging)
Currently only supported for nsegs=1.
tCWFstatMapVersion: str
Choose between standard 'lal' implementation,
'pycuda' for gpu, and some others for devel/debug.
Attributes
----------
symbol_dictionary: dict
Key, val pairs of the parameters (i.e. `F0`, `F1`), to Latex math
symbols for plots
unit_dictionary: dict
Key, val pairs of the parameters (i.e. `F0`, `F1`), and the
units (i.e. `Hz`)
transform_dictionary: dict
Key, val pairs of the parameters (i.e. `F0`, `F1`), where the key is
itself a dictionary which can item `multiplier`, `subtractor`, or
`unit` by which to transform by and update the units.
"""
symbol_dictionary = dict(
F0="$f$",
F1="$\dot{f}$",
F2="$\ddot{f}$",
Alpha=r"$\alpha$",
Delta="$\delta$",
asini="asini",
period="P",
ecc="ecc",
tp="tp",
argp="argp",
)
unit_dictionary = dict(
F0="Hz",
F1="Hz/s",
F2="Hz/s$^2$",
Alpha=r"rad",
Delta="rad",
asini="",
period="s",
ecc="",
tp="",
argp="",
)
transform_dictionary = {}
def __init__(
self,
theta_prior,
tref,
label,
outdir="data",
minStartTime=None,
maxStartTime=None,
sftfilepattern=None,
detectors=None,
nsteps=[100, 100],
nwalkers=100,
ntemps=1,
log10beta_min=-5,
theta_initial=None,
rhohatmax=1000,
binary=False,
BSGL=False,
SSBprec=None,
minCoverFreq=None,
maxCoverFreq=None,
injectSources=None,
assumeSqrtSX=None,
transientWindowType=None,
tCWFstatMapVersion="lal",
):
self.theta_prior = theta_prior
self.tref = tref
self.label = label
self.outdir = outdir
self.minStartTime = minStartTime
self.maxStartTime = maxStartTime
self.sftfilepattern = sftfilepattern
self.detectors = detectors
self.nsteps = nsteps
self.nwalkers = nwalkers
self.ntemps = ntemps
self.log10beta_min = log10beta_min
self.theta_initial = theta_initial
self.rhohatmax = rhohatmax
self.binary = binary
self.BSGL = BSGL
self.SSBprec = SSBprec
self.minCoverFreq = minCoverFreq
self.maxCoverFreq = maxCoverFreq
self.injectSources = injectSources
self.assumeSqrtSX = assumeSqrtSX
self.transientWindowType = transientWindowType
self.tCWFstatMapVersion = tCWFstatMapVersion
if os.path.isdir(outdir) is False:
os.mkdir(outdir)
self._add_log_file()
logging.info("Set-up MCMC search for model {}".format(self.label))
if sftfilepattern:
logging.info("Using data {}".format(self.sftfilepattern))
else:
logging.info("No sftfilepattern given")
if injectSources:
logging.info("Inject sources: {}".format(injectSources))
self.pickle_path = "{}/{}_saved_data.p".format(self.outdir, self.label)
self._unpack_input_theta()
self.ndim = len(self.theta_keys)
if self.log10beta_min:
self.betas = np.logspace(0, self.log10beta_min, self.ntemps)
else:
self.betas = None
if args.clean and os.path.isfile(self.pickle_path):
os.rename(self.pickle_path, self.pickle_path + ".old")
self._set_likelihoodcoef()
self._log_input()
def _set_likelihoodcoef(self):
self.likelihoodcoef = np.log(70.0 / self.rhohatmax ** 4)
def _log_input(self):
logging.info("theta_prior = {}".format(self.theta_prior))
logging.info("nwalkers={}".format(self.nwalkers))
logging.info("nsteps = {}".format(self.nsteps))
logging.info("ntemps = {}".format(self.ntemps))
logging.info("log10beta_min = {}".format(self.log10beta_min))
def _initiate_search_object(self):
logging.info("Setting up search object")
self.search = core.ComputeFstat(
tref=self.tref,
sftfilepattern=self.sftfilepattern,
minCoverFreq=self.minCoverFreq,
maxCoverFreq=self.maxCoverFreq,
detectors=self.detectors,
BSGL=self.BSGL,
transientWindowType=self.transientWindowType,
minStartTime=self.minStartTime,
maxStartTime=self.maxStartTime,
binary=self.binary,
injectSources=self.injectSources,
assumeSqrtSX=self.assumeSqrtSX,
SSBprec=self.SSBprec,
tCWFstatMapVersion=self.tCWFstatMapVersion,
)
if self.minStartTime is None:
self.minStartTime = self.search.minStartTime
if self.maxStartTime is None:
self.maxStartTime = self.search.maxStartTime
def logp(self, theta_vals, theta_prior, theta_keys, search):
H = [
self._generic_lnprior(**theta_prior[key])(p)
for p, key in zip(theta_vals, theta_keys)
]
return np.sum(H)
def logl(self, theta, search):
for j, theta_i in enumerate(self.theta_idxs):
self.fixed_theta[theta_i] = theta[j]
twoF = search.get_fullycoherent_twoF(
self.minStartTime, self.maxStartTime, *self.fixed_theta
)
return twoF / 2.0 + self.likelihoodcoef
def _unpack_input_theta(self):
full_theta_keys = ["F0", "F1", "F2", "Alpha", "Delta"]
if self.binary:
full_theta_keys += ["asini", "period", "ecc", "tp", "argp"]
full_theta_keys_copy = copy.copy(full_theta_keys)
full_theta_symbols = [
"$f$",
"$\dot{f}$",
"$\ddot{f}$",
r"$\alpha$",
r"$\delta$",
]
if self.binary:
full_theta_symbols += ["asini", "period", "ecc", "tp", "argp"]
self.theta_keys = []
fixed_theta_dict = {}
for key, val in self.theta_prior.items():
if type(val) is dict:
fixed_theta_dict[key] = 0
self.theta_keys.append(key)
elif type(val) in [float, int, np.float64]:
fixed_theta_dict[key] = val
else:
raise ValueError(
"Type {} of {} in theta not recognised".format(type(val), key)
)
full_theta_keys_copy.pop(full_theta_keys_copy.index(key))
if len(full_theta_keys_copy) > 0:
raise ValueError(
("Input dictionary `theta` is missing the" "following keys: {}").format(
full_theta_keys_copy
)
)
self.fixed_theta = [fixed_theta_dict[key] for key in full_theta_keys]
self.theta_idxs = [full_theta_keys.index(k) for k in self.theta_keys]
self.theta_symbols = [full_theta_symbols[i] for i in self.theta_idxs]
idxs = np.argsort(self.theta_idxs)
self.theta_idxs = [self.theta_idxs[i] for i in idxs]
self.theta_symbols = [self.theta_symbols[i] for i in idxs]
self.theta_keys = [self.theta_keys[i] for i in idxs]
def _evaluate_logpost(self, p0vec):
init_logp = np.array(
[
self.logp(p, self.theta_prior, self.theta_keys, self.search)
for p in p0vec
]
)
init_logl = np.array([self.logl(p, self.search) for p in p0vec])
return init_logl + init_logp
def _check_initial_points(self, p0):
for nt in range(self.ntemps):
logging.info("Checking temperature {} chains".format(nt))
num = sum(self._evaluate_logpost(p0[nt]) == -np.inf)
if num > 0:
logging.warning(
"Of {} initial values, {} are -np.inf due to the prior".format(
len(p0[0]), num
)
)
p0 = self._generate_new_p0_to_fix_initial_points(p0, nt)
def _generate_new_p0_to_fix_initial_points(self, p0, nt):
logging.info("Attempting to correct intial values")
init_logpost = self._evaluate_logpost(p0[nt])
idxs = np.arange(self.nwalkers)[init_logpost == -np.inf]
count = 0
while sum(init_logpost == -np.inf) > 0 and count < 100:
for j in idxs:
p0[nt][j] = p0[nt][np.random.randint(0, self.nwalkers)] * (
1 + np.random.normal(0, 1e-10, self.ndim)
)
init_logpost = self._evaluate_logpost(p0[nt])
count += 1
if sum(init_logpost == -np.inf) > 0:
logging.info("Failed to fix initial priors")
else:
logging.info("Suceeded to fix initial priors")
return p0
def setup_initialisation(self, nburn0, scatter_val=1e-10):
""" Add an initialisation step to the MCMC run
If called prior to `run()`, adds an intial step in which the MCMC
simulation is run for `nburn0` steps. After this, the MCMC simulation
continues in the usual manner (i.e. for nburn and nprod steps), but the
walkers are reset scattered around the maximum likelihood position
of the initialisation step.
Parameters
----------
nburn0: int
Number of initialisation steps to take
scatter_val: float
Relative number to scatter walkers around the maximum likelihood
position after the initialisation step
"""
logging.info(
"Setting up initialisation with nburn0={}, scatter_val={}".format(
nburn0, scatter_val
)
)
self.nsteps = [nburn0] + self.nsteps
self.scatter_val = scatter_val
# def setup_burnin_convergence_testing(
# self, n=10, test_type='autocorr', windowed=False, **kwargs):
# """ Set up convergence testing during the MCMC simulation
#
# Parameters
# ----------
# n: int
# Number of steps after which to test convergence
# test_type: str ['autocorr', 'GR']
# If 'autocorr' use the exponential autocorrelation time (kwargs
# passed to `get_autocorr_convergence`). If 'GR' use the Gelman-Rubin
# statistic (kwargs passed to `get_GR_convergence`)
# windowed: bool
# If True, only calculate the convergence test in a window of length
# `n`
# **kwargs:
# Passed to either `_test_autocorr_convergence()` or
# `_test_GR_convergence()` depending on `test_type`.
#
# """
# logging.info('Setting up convergence testing')
# self.convergence_n = n
# self.convergence_windowed = windowed
# self.convergence_test_type = test_type
# self.convergence_kwargs = kwargs
# self.convergence_diagnostic = []
# self.convergence_diagnosticx = []
# if test_type in ['autocorr']:
# self._get_convergence_test = self._test_autocorr_convergence
# elif test_type in ['GR']:
# self._get_convergence_test = self._test_GR_convergence
# else:
# raise ValueError('test_type {} not understood'.format(test_type))
#
#
# def _test_autocorr_convergence(self, i, sampler, test=True, n_cut=5):
# try:
# acors = np.zeros((self.ntemps, self.ndim))
# for temp in range(self.ntemps):
# if self.convergence_windowed:
# j = i-self.convergence_n
# else:
# j = 0
# x = np.mean(sampler.chain[temp, :, j:i, :], axis=0)
# acors[temp, :] = emcee.autocorr.exponential_time(x)
# c = np.max(acors, axis=0)
# except emcee.autocorr.AutocorrError:
# logging.info('Failed to calculate exponential autocorrelation')
# c = np.zeros(self.ndim) + np.nan
# except AttributeError:
# logging.info('Unable to calculate exponential autocorrelation')
# c = np.zeros(self.ndim) + np.nan
#
# self.convergence_diagnosticx.append(i - self.convergence_n/2.)
# self.convergence_diagnostic.append(list(c))
#
# if test:
# return i > n_cut * np.max(c)
#
# def _test_GR_convergence(self, i, sampler, test=True, R=1.1):
# if self.convergence_windowed:
# s = sampler.chain[0, :, i-self.convergence_n+1:i+1, :]
# else:
# s = sampler.chain[0, :, :i+1, :]
# N = float(self.convergence_n)
# M = float(self.nwalkers)
# W = np.mean(np.var(s, axis=1), axis=0)
# per_walker_mean = np.mean(s, axis=1)
# mean = np.mean(per_walker_mean, axis=0)
# B = N / (M-1.) * np.sum((per_walker_mean-mean)**2, axis=0)
# Vhat = (N-1)/N * W + (M+1)/(M*N) * B
# c = np.sqrt(Vhat/W)
# self.convergence_diagnostic.append(c)
# self.convergence_diagnosticx.append(i - self.convergence_n/2.)
#
# if test and np.max(c) < R:
# return True
# else:
# return False
#
# def _test_convergence(self, i, sampler, **kwargs):
# if np.mod(i+1, self.convergence_n) == 0:
# return self._get_convergence_test(i, sampler, **kwargs)
# else:
# return False
#
# def _run_sampler_with_conv_test(self, sampler, p0, nprod=0, nburn=0):
# logging.info('Running {} burn-in steps with convergence testing'
# .format(nburn))
# iterator = tqdm(sampler.sample(p0, iterations=nburn), total=nburn)
# for i, output in enumerate(iterator):
# if self._test_convergence(i, sampler, test=True,
# **self.convergence_kwargs):
# logging.info(
# 'Converged at {} before max number {} of steps reached'
# .format(i, nburn))
# self.convergence_idx = i
# break
# iterator.close()
# logging.info('Running {} production steps'.format(nprod))
# j = nburn
# iterator = tqdm(sampler.sample(output[0], iterations=nprod),
# total=nprod)
# for result in iterator:
# self._test_convergence(j, sampler, test=False,
# **self.convergence_kwargs)
# j += 1
# return sampler
def _run_sampler(self, sampler, p0, nprod=0, nburn=0, window=50):
for result in tqdm(
sampler.sample(p0, iterations=nburn + nprod), total=nburn + nprod
):
pass
self.mean_acceptance_fraction = np.mean(sampler.acceptance_fraction, axis=1)
logging.info(
"Mean acceptance fraction: {}".format(self.mean_acceptance_fraction)
)
if self.ntemps > 1:
self.tswap_acceptance_fraction = sampler.tswap_acceptance_fraction
logging.info(
"Tswap acceptance fraction: {}".format(
sampler.tswap_acceptance_fraction
)
)
self.autocorr_time = sampler.get_autocorr_time(window=window)
logging.info("Autocorrelation length: {}".format(self.autocorr_time))
return sampler
def _estimate_run_time(self):
""" Print the estimated run time
Uses timing coefficients based on a Lenovo T460p Intel(R)
Core(TM) i5-6300HQ CPU @ 2.30GHz.
"""
# Todo: add option to time on a machine, and move coefficients to
# ~/.pyfstat.conf
if (
type(self.theta_prior["Alpha"]) == dict
or type(self.theta_prior["Delta"]) == dict
):
tau0LD = 5.2e-7
tau0T = 1.5e-8
tau0S = 1.2e-4
tau0C = 5.8e-6
else:
tau0LD = 1.3e-7
tau0T = 1.5e-8
tau0S = 9.1e-5
tau0C = 5.5e-6
Nsfts = (self.maxStartTime - self.minStartTime) / 1800.0
if hasattr(self, "run_setup"):
ts = []
for row in self.run_setup:
nsteps = row[0]
nsegs = row[1]
numb_evals = np.sum(nsteps) * self.nwalkers * self.ntemps
t = (tau0S + tau0LD * Nsfts) * numb_evals
if nsegs > 1:
t += (tau0C + tau0T * Nsfts) * nsegs * numb_evals
ts.append(t)
time = np.sum(ts)
else:
numb_evals = np.sum(self.nsteps) * self.nwalkers * self.ntemps
time = (tau0S + tau0LD * Nsfts) * numb_evals
if getattr(self, "nsegs", 1) > 1:
time += (tau0C + tau0T * Nsfts) * self.nsegs * numb_evals
logging.info(
"Estimated run-time = {} s = {:1.0f}:{:1.0f} m".format(
time, *divmod(time, 60)
)
)
def run(self, proposal_scale_factor=2, create_plots=True, window=50, **kwargs):
""" Run the MCMC simulatation
Parameters
----------
proposal_scale_factor: float
The proposal scale factor used by the sampler, see Goodman & Weare
(2010). If the acceptance fraction is too low, you can raise it by
decreasing the a parameter; and if it is too high, you can reduce
it by increasing the a parameter [Foreman-Mackay (2013)].
create_plots: bool
If true, save trace plots of the walkers
window: int
The minimum number of autocorrelation times needed to trust the
result when estimating the autocorrelation time (see
ptemcee.Sampler.get_autocorr_time for further details.
**kwargs:
Passed to _plot_walkers to control the figures
Returns
-------
sampler: ptemcee.Sampler
The ptemcee ptsampler object
"""
self.old_data_is_okay_to_use = self._check_old_data_is_okay_to_use()
if self.old_data_is_okay_to_use is True:
logging.warning("Using saved data from {}".format(self.pickle_path))
d = self.get_saved_data_dictionary()
self.samples = d["samples"]
self.lnprobs = d["lnprobs"]
self.lnlikes = d["lnlikes"]
self.all_lnlikelihood = d["all_lnlikelihood"]
self.chain = d["chain"]
return
self._initiate_search_object()
self._estimate_run_time()
sampler = PTSampler(
ntemps=self.ntemps,
nwalkers=self.nwalkers,
dim=self.ndim,
logl=self.logl,
logp=self.logp,
logpargs=(self.theta_prior, self.theta_keys, self.search),
loglargs=(self.search,),
betas=self.betas,
a=proposal_scale_factor,
)
p0 = self._generate_initial_p0()
p0 = self._apply_corrections_to_p0(p0)
self._check_initial_points(p0)
# Run initialisation steps if required
ninit_steps = len(self.nsteps) - 2
for j, n in enumerate(self.nsteps[:-2]):
logging.info(
"Running {}/{} initialisation with {} steps".format(j, ninit_steps, n)
)
sampler = self._run_sampler(sampler, p0, nburn=n, window=window)
if create_plots:
fig, axes = self._plot_walkers(sampler, **kwargs)
fig.tight_layout()
fig.savefig(
"{}/{}_init_{}_walkers.png".format(self.outdir, self.label, j)
)
p0 = self._get_new_p0(sampler)
p0 = self._apply_corrections_to_p0(p0)
self._check_initial_points(p0)
sampler.reset()
if len(self.nsteps) > 1:
nburn = self.nsteps[-2]
else:
nburn = 0
nprod = self.nsteps[-1]
logging.info("Running final burn and prod with {} steps".format(nburn + nprod))
sampler = self._run_sampler(sampler, p0, nburn=nburn, nprod=nprod)
if create_plots:
try:
fig, axes = self._plot_walkers(sampler, nprod=nprod, **kwargs)
fig.tight_layout()
fig.savefig("{}/{}_walkers.png".format(self.outdir, self.label))
except RuntimeError as e:
logging.warning("Failed to save walker plots due to Erro {}".format(e))
samples = sampler.chain[0, :, nburn:, :].reshape((-1, self.ndim))
lnprobs = sampler.logprobability[0, :, nburn:].reshape((-1))
lnlikes = sampler.loglikelihood[0, :, nburn:].reshape((-1))
all_lnlikelihood = sampler.loglikelihood[:, :, nburn:]
self.samples = samples
self.chain = sampler.chain
self.lnprobs = lnprobs
self.lnlikes = lnlikes
self.all_lnlikelihood = all_lnlikelihood
self._save_data(
sampler, samples, lnprobs, lnlikes, all_lnlikelihood, sampler.chain
)
return sampler
def _get_rescale_multiplier_for_key(self, key):
""" Get the rescale multiplier from the transform_dictionary
Can either be a float, a string (in which case it is interpretted as
a attribute of the MCMCSearch class, e.g. minStartTime, or non-existent
in which case 0 is returned
"""
if key not in self.transform_dictionary:
return 1
if "multiplier" in self.transform_dictionary[key]:
val = self.transform_dictionary[key]["multiplier"]
if type(val) == str:
if hasattr(self, val):
multiplier = getattr(
self, self.transform_dictionary[key]["multiplier"]
)
else:
raise ValueError("multiplier {} not a class attribute".format(val))
else:
multiplier = val
else:
multiplier = 1
return multiplier
def _get_rescale_subtractor_for_key(self, key):
""" Get the rescale subtractor from the transform_dictionary
Can either be a float, a string (in which case it is interpretted as
a attribute of the MCMCSearch class, e.g. minStartTime, or non-existent
in which case 0 is returned
"""
if key not in self.transform_dictionary:
return 0
if "subtractor" in self.transform_dictionary[key]:
val = self.transform_dictionary[key]["subtractor"]
if type(val) == str:
if hasattr(self, val):
subtractor = getattr(
self, self.transform_dictionary[key]["subtractor"]
)
else:
raise ValueError("subtractor {} not a class attribute".format(val))
else:
subtractor = val
else:
subtractor = 0
return subtractor
def _scale_samples(self, samples, theta_keys):
""" Scale the samples using the transform_dictionary """
for key in theta_keys:
if key in self.transform_dictionary:
idx = theta_keys.index(key)
s = samples[:, idx]
subtractor = self._get_rescale_subtractor_for_key(key)
s = s - subtractor
multiplier = self._get_rescale_multiplier_for_key(key)
s *= multiplier
samples[:, idx] = s
return samples
def _get_labels(self, newline_units=False):
""" Combine the units, symbols and rescaling to give labels """
labels = []
for key in self.theta_keys:
label = None
s = self.symbol_dictionary[key]
s.replace("_{glitch}", r"_\textrm{glitch}")
u = self.unit_dictionary[key]
if key in self.transform_dictionary:
if "symbol" in self.transform_dictionary[key]:
s = self.transform_dictionary[key]["symbol"]
if "label" in self.transform_dictionary[key]:
label = self.transform_dictionary[key]["label"]
if "unit" in self.transform_dictionary[key]:
u = self.transform_dictionary[key]["unit"]
if label is None:
if newline_units:
label = "{} \n [{}]".format(s, u)
else:
label = "{} [{}]".format(s, u)
labels.append(label)
return labels
def plot_corner(
self,
figsize=(7, 7),
add_prior=False,
nstds=None,
label_offset=0.4,
dpi=300,
rc_context={},
tglitch_ratio=False,
fig_and_axes=None,
save_fig=True,
**kwargs
):
""" Generate a corner plot of the posterior
Using the `corner` package (https://pypi.python.org/pypi/corner/),
generate estimates of the posterior from the production samples.
Parameters
----------
figsize: tuple (7, 7)
Figure size in inches (passed to plt.subplots)
add_prior: bool, str
If true, plot the prior as a red line. If 'full' then for uniform
priors plot the full extent of the prior.
nstds: float
The number of standard deviations to plot centered on the mean
label_offset: float
Offset the labels from the plot: useful to precent overlapping the
tick labels with the axis labels
dpi: int
Passed to plt.savefig
rc_context: dict
Dictionary of rc values to set while generating the figure (see
matplotlib rc for more details)
tglitch_ratio: bool
If true, and tglitch is a parameter, plot posteriors as the
fractional time at which the glitch occurs instead of the actual
time
fig_and_axes: tuple
fig and axes to plot on, the axes must be of the right shape,
namely (ndim, ndim)
save_fig: bool
If true, save the figure, else return the fig, axes
**kwargs:
Passed to corner.corner
Returns
-------
fig, axes:
The matplotlib figure and axes, only returned if save_fig = False
"""
if "truths" in kwargs and len(kwargs["truths"]) != self.ndim:
logging.warning("len(Truths) != ndim, Truths will be ignored")
kwargs["truths"] = None
if self.ndim < 2:
with plt.rc_context(rc_context):
if fig_and_axes is None:
fig, ax = plt.subplots(figsize=figsize)
else:
fig, ax = fig_and_axes
ax.hist(self.samples, bins=50, histtype="stepfilled")
ax.set_xlabel(self.theta_symbols[0])
fig.savefig("{}/{}_corner.png".format(self.outdir, self.label), dpi=dpi)
return
with plt.rc_context(rc_context):
if fig_and_axes is None:
fig, axes = plt.subplots(self.ndim, self.ndim, figsize=figsize)
else:
fig, axes = fig_and_axes
samples_plt = copy.copy(self.samples)
labels = self._get_labels(newline_units=True)
samples_plt = self._scale_samples(samples_plt, self.theta_keys)
if tglitch_ratio:
for j, k in enumerate(self.theta_keys):
if k == "tglitch":
s = samples_plt[:, j]
samples_plt[:, j] = (s - self.minStartTime) / (
self.maxStartTime - self.minStartTime
)
labels[j] = r"$R_{\textrm{glitch}}$"
if type(nstds) is int and "range" not in kwargs:
_range = []
for j, s in enumerate(samples_plt.T):
median = np.median(s)
std = np.std(s)
_range.append((median - nstds * std, median + nstds * std))
elif "range" in kwargs:
_range = kwargs.pop("range")
else:
_range = None
hist_kwargs = kwargs.pop("hist_kwargs", dict())
if "normed" not in hist_kwargs:
hist_kwargs["normed"] = True
fig_triangle = corner.corner(
samples_plt,
labels=labels,
fig=fig,
bins=50,
max_n_ticks=4,
plot_contours=True,
plot_datapoints=True,
# label_kwargs={'fontsize': 12},
data_kwargs={"alpha": 0.1, "ms": 0.5},
range=_range,
hist_kwargs=hist_kwargs,
**kwargs
)
axes_list = fig_triangle.get_axes()
axes = np.array(axes_list).reshape(self.ndim, self.ndim)
plt.draw()
for ax in axes[:, 0]:
ax.yaxis.set_label_coords(-label_offset, 0.5)
for ax in axes[-1, :]:
ax.xaxis.set_label_coords(0.5, -label_offset)
for ax in axes_list:
ax.set_rasterized(True)
ax.set_rasterization_zorder(-10)
for tick in ax.xaxis.get_major_ticks():
# tick.label.set_fontsize(8)
tick.label.set_rotation("horizontal")
for tick in ax.yaxis.get_major_ticks():
# tick.label.set_fontsize(8)
tick.label.set_rotation("vertical")
plt.tight_layout(h_pad=0.0, w_pad=0.0)
fig.subplots_adjust(hspace=0.05, wspace=0.05)
if add_prior:
self._add_prior_to_corner(axes, self.samples, add_prior)
if save_fig:
fig_triangle.savefig(
"{}/{}_corner.png".format(self.outdir, self.label), dpi=dpi
)
else:
return fig, axes
def plot_chainconsumer(self, save_fig=True, label_offset=0.25, dpi=300, **kwargs):
""" Generate a corner plot of the posterior using chainconsumer
Parameters
----------
dpi: int
Passed to plt.savefig
**kwargs:
Passed to chainconsumer.plotter.plot
"""
if "truths" in kwargs and len(kwargs["truths"]) != self.ndim:
logging.warning("len(Truths) != ndim, Truths will be ignored")
kwargs["truths"] = None
samples_plt = copy.copy(self.samples)
labels = self._get_labels(newline_units=True)
samples_plt = self._scale_samples(samples_plt, self.theta_keys)
import chainconsumer
c = chainconsumer.ChainConsumer()
c.add_chain(samples_plt, parameters=labels)
c.configure(smooth=0, summary=False, sigma2d=True)
fig = c.plotter.plot(**kwargs)
axes_list = fig.get_axes()
axes = np.array(axes_list).reshape(self.ndim, self.ndim)
plt.draw()
for ax in axes[:, 0]:
ax.yaxis.set_label_coords(-label_offset, 0.5)
for ax in axes[-1, :]:
ax.xaxis.set_label_coords(0.5, -label_offset)
for ax in axes_list:
ax.set_rasterized(True)
ax.set_rasterization_zorder(-10)
# for tick in ax.xaxis.get_major_ticks():
# #tick.label.set_fontsize(8)
# tick.label.set_rotation('horizontal')
# for tick in ax.yaxis.get_major_ticks():
# #tick.label.set_fontsize(8)
# tick.label.set_rotation('vertical')
plt.tight_layout(h_pad=0.0, w_pad=0.0)
fig.subplots_adjust(hspace=0.05, wspace=0.05)
if save_fig:
fig.savefig("{}/{}_corner.png".format(self.outdir, self.label), dpi=dpi)
else:
return fig
def _add_prior_to_corner(self, axes, samples, add_prior):
for i, key in enumerate(self.theta_keys):
ax = axes[i][i]
s = samples[:, i]
lnprior = self._generic_lnprior(**self.theta_prior[key])
if add_prior == "full" and self.theta_prior[key]["type"] == "unif":
lower = self.theta_prior[key]["lower"]
upper = self.theta_prior[key]["upper"]
r = upper - lower
xlim = [lower - 0.05 * r, upper + 0.05 * r]
x = np.linspace(xlim[0], xlim[1], 1000)
else:
xlim = ax.get_xlim()
x = np.linspace(s.min(), s.max(), 1000)
multiplier = self._get_rescale_multiplier_for_key(key)
subtractor = self._get_rescale_subtractor_for_key(key)
ax.plot(
(x - subtractor) * multiplier,
[np.exp(lnprior(xi)) for xi in x],
"-C3",
label="prior",
)
for j in range(i, self.ndim):
axes[j][i].set_xlim(xlim[0], xlim[1])
for k in range(0, i):
axes[i][k].set_ylim(xlim[0], xlim[1])
def plot_prior_posterior(self, normal_stds=2):
""" Plot the posterior in the context of the prior """
fig, axes = plt.subplots(nrows=self.ndim, figsize=(8, 4 * self.ndim))
N = 1000
from scipy.stats import gaussian_kde
for i, (ax, key) in enumerate(zip(axes, self.theta_keys)):
prior_dict = self.theta_prior[key]
prior_func = self._generic_lnprior(**prior_dict)
if prior_dict["type"] == "unif":
x = np.linspace(prior_dict["lower"], prior_dict["upper"], N)
prior = prior_func(x)
prior[0] = 0
prior[-1] = 0
elif prior_dict["type"] == "log10unif":
upper = prior_dict["log10upper"]
lower = prior_dict["log10lower"]
x = np.linspace(lower, upper, N)
prior = [prior_func(xi) for xi in x]
elif prior_dict["type"] == "norm":
lower = prior_dict["loc"] - normal_stds * prior_dict["scale"]
upper = prior_dict["loc"] + normal_stds * prior_dict["scale"]
x = np.linspace(lower, upper, N)
prior = prior_func(x)
elif prior_dict["type"] == "halfnorm":
lower = prior_dict["loc"]
upper = prior_dict["loc"] + normal_stds * prior_dict["scale"]
x = np.linspace(lower, upper, N)
prior = [prior_func(xi) for xi in x]
elif prior_dict["type"] == "neghalfnorm":
upper = prior_dict["loc"]
lower = prior_dict["loc"] - normal_stds * prior_dict["scale"]
x = np.linspace(lower, upper, N)
prior = [prior_func(xi) for xi in x]
else:
raise ValueError(
"Not implemented for prior type {}".format(prior_dict["type"])
)
priorln = ax.plot(x, prior, "C3", label="prior")
ax.set_xlabel(self.theta_symbols[i])
s = self.samples[:, i]
while len(s) > 10 ** 4:
# random downsample to avoid slow calculation of kde
s = np.random.choice(s, size=int(len(s) / 2.0))
kde = gaussian_kde(s)
ax2 = ax.twinx()
postln = ax2.plot(x, kde.pdf(x), "k", label="posterior")
ax2.set_yticklabels([])
ax.set_yticklabels([])
lns = priorln + postln
labs = [l.get_label() for l in lns]
axes[0].legend(lns, labs, loc=1, framealpha=0.8)
fig.savefig("{}/{}_prior_posterior.png".format(self.outdir, self.label))
def plot_cumulative_max(self, **kwargs):
""" Plot the cumulative twoF for the maximum posterior estimate
See the pyfstat.core.plot_twoF_cumulative function for further details
"""
d, maxtwoF = self.get_max_twoF()
for key, val in self.theta_prior.items():
if key not in d:
d[key] = val
if "add_pfs" in kwargs:
self.generate_loudest()
if hasattr(self, "search") is False:
self._initiate_search_object()
if self.binary is False:
self.search.plot_twoF_cumulative(
self.label,
self.outdir,
F0=d["F0"],
F1=d["F1"],
F2=d["F2"],
Alpha=d["Alpha"],
Delta=d["Delta"],
tstart=self.minStartTime,
tend=self.maxStartTime,
**kwargs
)
else:
self.search.plot_twoF_cumulative(
self.label,
self.outdir,
F0=d["F0"],
F1=d["F1"],
F2=d["F2"],
Alpha=d["Alpha"],
Delta=d["Delta"],
asini=d["asini"],
period=d["period"],
ecc=d["ecc"],
argp=d["argp"],
tp=d["argp"],
tstart=self.minStartTime,
tend=self.maxStartTime,
**kwargs
)
def _generic_lnprior(self, **kwargs):
""" Return a lambda function of the pdf
Parameters
----------
**kwargs:
A dictionary containing 'type' of pdf and shape parameters
"""
def log_of_unif(x, a, b):
above = x < b
below = x > a
if type(above) is not np.ndarray:
if above and below:
return -np.log(b - a)
else:
return -np.inf
else:
idxs = np.array([all(tup) for tup in zip(above, below)])
p = np.zeros(len(x)) - np.inf
p[idxs] = -np.log(b - a)
return p
def log_of_log10unif(x, log10lower, log10upper):
log10x = np.log10(x)
above = log10x < log10upper
below = log10x > log10lower
if type(above) is not np.ndarray:
if above and below:
return -np.log(x * np.log(10) * (log10upper - log10lower))
else:
return -np.inf
else:
idxs = np.array([all(tup) for tup in zip(above, below)])
p = np.zeros(len(x)) - np.inf
p[idxs] = -np.log(x * np.log(10) * (log10upper - log10lower))
return p
def log_of_halfnorm(x, loc, scale):
if x < loc:
return -np.inf
else:
return -0.5 * (
(x - loc) ** 2 / scale ** 2 + np.log(0.5 * np.pi * scale ** 2)
)
def cauchy(x, x0, gamma):
return 1.0 / (np.pi * gamma * (1 + ((x - x0) / gamma) ** 2))
def exp(x, x0, gamma):
if x > x0:
return np.log(gamma) - gamma * (x - x0)
else:
return -np.inf
if kwargs["type"] == "unif":
return lambda x: log_of_unif(x, kwargs["lower"], kwargs["upper"])
if kwargs["type"] == "log10unif":
return lambda x: log_of_log10unif(
x, kwargs["log10lower"], kwargs["log10upper"]
)
elif kwargs["type"] == "halfnorm":
return lambda x: log_of_halfnorm(x, kwargs["loc"], kwargs["scale"])
elif kwargs["type"] == "neghalfnorm":
return lambda x: log_of_halfnorm(-x, kwargs["loc"], kwargs["scale"])
elif kwargs["type"] == "norm":
return lambda x: -0.5 * (
(x - kwargs["loc"]) ** 2 / kwargs["scale"] ** 2
+ np.log(2 * np.pi * kwargs["scale"] ** 2)
)
else:
logging.info("kwargs:", kwargs)
raise ValueError("Print unrecognise distribution")
def _generate_rv(self, **kwargs):
dist_type = kwargs.pop("type")
if dist_type == "unif":
return np.random.uniform(low=kwargs["lower"], high=kwargs["upper"])
if dist_type == "log10unif":
return 10 ** (
np.random.uniform(low=kwargs["log10lower"], high=kwargs["log10upper"])
)
if dist_type == "norm":
return np.random.normal(loc=kwargs["loc"], scale=kwargs["scale"])
if dist_type == "halfnorm":
return np.abs(np.random.normal(loc=kwargs["loc"], scale=kwargs["scale"]))
if dist_type == "neghalfnorm":
return -1 * np.abs(
np.random.normal(loc=kwargs["loc"], scale=kwargs["scale"])
)
if dist_type == "lognorm":
return np.random.lognormal(mean=kwargs["loc"], sigma=kwargs["scale"])
else:
raise ValueError("dist_type {} unknown".format(dist_type))
def _plot_walkers(
self,
sampler,
symbols=None,
alpha=0.8,
color="k",
temp=0,
lw=0.1,
nprod=0,
add_det_stat_burnin=False,
fig=None,
axes=None,
xoffset=0,
plot_det_stat=False,
context="ggplot",
labelpad=5,
):
""" Plot all the chains from a sampler """
if symbols is None:
symbols = self._get_labels()
if context not in plt.style.available:
raise ValueError(
(
"The requested context {} is not available; please select a"
" context from `plt.style.available`"
).format(context)
)
if np.ndim(axes) > 1:
axes = axes.flatten()
shape = sampler.chain.shape
if len(shape) == 3:
nwalkers, nsteps, ndim = shape
chain = sampler.chain[:, :, :].copy()
if len(shape) == 4:
ntemps, nwalkers, nsteps, ndim = shape
if temp < ntemps:
logging.info("Plotting temperature {} chains".format(temp))
else:
raise ValueError(
("Requested temperature {} outside of" "available range").format(
temp
)
)
chain = sampler.chain[temp, :, :, :].copy()
samples = chain.reshape((nwalkers * nsteps, ndim))
samples = self._scale_samples(samples, self.theta_keys)
chain = chain.reshape((nwalkers, nsteps, ndim))
if plot_det_stat:
extra_subplots = 1
else:
extra_subplots = 0
with plt.style.context((context)):
plt.rcParams["text.usetex"] = True
if fig is None and axes is None:
fig = plt.figure(figsize=(4, 3.0 * ndim))
ax = fig.add_subplot(ndim + extra_subplots, 1, 1)
axes = [ax] + [
fig.add_subplot(ndim + extra_subplots, 1, i)
for i in range(2, ndim + 1)
]
idxs = np.arange(chain.shape[1])
burnin_idx = chain.shape[1] - nprod
# if hasattr(self, 'convergence_idx'):
# last_idx = self.convergence_idx
# else:
last_idx = burnin_idx
if ndim > 1:
for i in range(ndim):
axes[i].ticklabel_format(useOffset=False, axis="y")
cs = chain[:, :, i].T
if burnin_idx > 0:
axes[i].plot(
xoffset + idxs[: last_idx + 1],
cs[: last_idx + 1],
color="C3",
alpha=alpha,
lw=lw,
)
axes[i].axvline(xoffset + last_idx, color="k", ls="--", lw=0.5)
axes[i].plot(
xoffset + idxs[burnin_idx:],
cs[burnin_idx:],
color="k",
alpha=alpha,
lw=lw,
)
axes[i].set_xlim(0, xoffset + idxs[-1])
if symbols:
axes[i].set_ylabel(symbols[i], labelpad=labelpad)
# if subtractions[i] == 0:
# axes[i].set_ylabel(symbols[i], labelpad=labelpad)
# else:
# axes[i].set_ylabel(
# symbols[i]+'$-$'+symbols[i]+'$^\mathrm{s}$',
# labelpad=labelpad)
# if hasattr(self, 'convergence_diagnostic'):
# ax = axes[i].twinx()
# axes[i].set_zorder(ax.get_zorder()+1)
# axes[i].patch.set_visible(False)
# c_x = np.array(self.convergence_diagnosticx)
# c_y = np.array(self.convergence_diagnostic)
# break_idx = np.argmin(np.abs(c_x - burnin_idx))
# ax.plot(c_x[:break_idx], c_y[:break_idx, i], '-C0',
# zorder=-10)
# ax.plot(c_x[break_idx:], c_y[break_idx:, i], '-C0',
# zorder=-10)
# if self.convergence_test_type == 'autocorr':
# ax.set_ylabel(r'$\tau_\mathrm{exp}$')
# elif self.convergence_test_type == 'GR':
# ax.set_ylabel('PSRF')
# ax.ticklabel_format(useOffset=False)
else:
axes[0].ticklabel_format(useOffset=False, axis="y")
cs = chain[:, :, temp].T
if burnin_idx:
axes[0].plot(
idxs[:burnin_idx],
cs[:burnin_idx],
color="C3",
alpha=alpha,
lw=lw,
)
axes[0].plot(
idxs[burnin_idx:], cs[burnin_idx:], color="k", alpha=alpha, lw=lw
)
if symbols:
axes[0].set_ylabel(symbols[0], labelpad=labelpad)
axes[-1].set_xlabel(r"$\textrm{Number of steps}$", labelpad=0.2)
if plot_det_stat:
if len(axes) == ndim:
axes.append(fig.add_subplot(ndim + 1, 1, ndim + 1))
lnl = sampler.loglikelihood[temp, :, :]
if burnin_idx and add_det_stat_burnin:
burn_in_vals = lnl[:, :burnin_idx].flatten()
try:
twoF_burnin = (
burn_in_vals[~np.isnan(burn_in_vals)] - self.likelihoodcoef
)
axes[-1].hist(twoF_burnin, bins=50, histtype="step", color="C3")
except ValueError:
logging.info(
"Det. Stat. hist failed, most likely all "
"values where the same"
)
pass
else:
twoF_burnin = []
prod_vals = lnl[:, burnin_idx:].flatten()
try:
twoF = prod_vals[~np.isnan(prod_vals)] - self.likelihoodcoef
axes[-1].hist(twoF, bins=50, histtype="step", color="k")
except ValueError:
logging.info(
"Det. Stat. hist failed, most likely all "
"values where the same"
)
pass
if self.BSGL:
axes[-1].set_xlabel(r"$\mathcal{B}_\mathrm{S/GL}$")
else:
axes[-1].set_xlabel(r"$\widetilde{2\mathcal{F}}$")
axes[-1].set_ylabel(r"$\textrm{Counts}$")
combined_vals = np.append(twoF_burnin, twoF)
if len(combined_vals) > 0:
minv = np.min(combined_vals)
maxv = np.max(combined_vals)
Range = abs(maxv - minv)
axes[-1].set_xlim(minv - 0.1 * Range, maxv + 0.1 * Range)
xfmt = matplotlib.ticker.ScalarFormatter()
xfmt.set_powerlimits((-4, 4))
axes[-1].xaxis.set_major_formatter(xfmt)
return fig, axes
def _apply_corrections_to_p0(self, p0):
""" Apply any correction to the initial p0 values """
return p0
def _generate_scattered_p0(self, p):
""" Generate a set of p0s scattered about p """
p0 = [
[
p + self.scatter_val * p * np.random.randn(self.ndim)
for i in range(self.nwalkers)
]
for j in range(self.ntemps)
]
return p0
def _generate_initial_p0(self):
""" Generate a set of init vals for the walkers """
if type(self.theta_initial) == dict:
logging.info("Generate initial values from initial dictionary")
if hasattr(self, "nglitch") and self.nglitch > 1:
raise ValueError("Initial dict not implemented for nglitch>1")
p0 = [
[
[
self._generate_rv(**self.theta_initial[key])
for key in self.theta_keys
]
for i in range(self.nwalkers)
]
for j in range(self.ntemps)
]
elif self.theta_initial is None:
logging.info("Generate initial values from prior dictionary")
p0 = [
[
[
self._generate_rv(**self.theta_prior[key])
for key in self.theta_keys
]
for i in range(self.nwalkers)
]
for j in range(self.ntemps)
]
else:
raise ValueError("theta_initial not understood")
return p0
def _get_new_p0(self, sampler):
""" Returns new initial positions for walkers are burn0 stage
This returns new positions for all walkers by scattering points about
the maximum posterior with scale `scatter_val`.
"""
temp_idx = 0
pF = sampler.chain[temp_idx, :, :, :]
lnl = sampler.loglikelihood[temp_idx, :, :]
lnp = sampler.logprobability[temp_idx, :, :]
# General warnings about the state of lnp
if np.any(np.isnan(lnp)):
logging.warning(
"Of {} lnprobs {} are nan".format(np.shape(lnp), np.sum(np.isnan(lnp)))
)
if np.any(np.isposinf(lnp)):
logging.warning(
"Of {} lnprobs {} are +np.inf".format(
np.shape(lnp), np.sum(np.isposinf(lnp))
)
)
if np.any(np.isneginf(lnp)):
logging.warning(
"Of {} lnprobs {} are -np.inf".format(
np.shape(lnp), np.sum(
|
np.isneginf(lnp)
|
numpy.isneginf
|
import paddlehub as hub
import argparse
import cv2
from PIL import Image, ImageDraw, ImageFont
import moviepy.editor as mpe
from moviepy.editor import VideoFileClip
import numpy as np
import random
import copy
from tqdm import tqdm
class segUtils():
def __init__(self):
super(segUtils, self).__init__()
self.module = hub.Module(name="deeplabv3p_xception65_humanseg")
def do_seg(self, frame):
res = self.module.segmentation(images=[frame], use_gpu=True)
return res[0]['data']
class detUtils():
def __init__(self):
super(detUtils, self).__init__()
self.module = hub.Module(name="yolov3_resnet50_vd_coco2017")
def do_det(self, frame):
res = self.module.object_detection(images=[frame], use_gpu=True)
for r in res[0]['data']:
if r['label'] == 'person':
return r
def cv2ImgAddText(img, text, left, top, textColor=(255, 0, 0), textSize=50):
if (isinstance(img, np.ndarray)): # 判断是否OpenCV图片类型
img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
draw = ImageDraw.Draw(img)
fontStyle = ImageFont.truetype(
"font/simsun.ttc", textSize, encoding="utf-8")
draw.text((left+1, top+1), text, (0, 0, 0), font=fontStyle)
draw.text((left, top), text, textColor, font=fontStyle)
return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)
su = segUtils()
du = detUtils()
class Position():
def __init__(self, x, y, ranrange):
super(Position, self).__init__()
self.x = x
self.y = y
self.th = min(x, y) / 2
self.index = 0
self.speedx = 0
self.speedy = 0
self.ranrange = ranrange
def getdirection(self):
speed_x = random.randint(-self.ranrange, self.ranrange)
speed_y = random.randint(-self.ranrange, self.ranrange)
return speed_x, speed_y
def getPos(self):
if self.index % 7 == 0:
self.speedx, self.speedy = self.getdirection()
if self.index > 4:
self.speedx *= 1.1
self.speedy *= 1.1
self.index += 1
newx = self.x + self.speedx
newy = self.y + self.speedy
if newx < self.x - self.th:
self.speedx = -self.speedx
newx = self.x - self.th
elif newx > self.x + self.th:
self.speedx = -self.speedx
newx = self.x + self.th
if newy < self.y - self.th:
self.speedy = -self.speedy
newy = self.y - self.th
elif newy > self.y + self.th:
self.speedy = -self.speedy
newy = self.y + self.th
return newx if newx > 0 else 0, newy
def crop(frame, bbox, margin):
h, w = frame.shape[:2]
left = int(bbox['left'])
right = int(bbox['right'])
top = int(bbox['top'])
bottom = int(bbox['bottom'])
left = left - margin if left - margin > 0 else 0
right = right + margin if right + margin < w else w - 1
top = top - margin if top - margin > 0 else 0
bottom = bottom + margin if bottom + margin < h else h - 1
return frame[top:bottom, left:right,:]
def compose(humanimg, backimg, left):
leftimg = cv2.imread(humanimg)
leftback = cv2.imread(backimg)
bbox = du.do_det(leftimg)
leftimg = crop(leftimg, bbox, 20)
height, width = leftback.shape[:2]
h, w = leftimg.shape[:2]
newheight = int(height * 3 / 5)
newwidth = int(newheight * w / h)
leftimg = cv2.resize(leftimg, (newwidth, newheight))
leftmask = np.around(su.do_seg(leftimg) / 255)
leftmask3 =
|
np.repeat(leftmask[:,:,np.newaxis], 3, axis=2)
|
numpy.repeat
|
# -*- coding: utf-8 -*-
import numpy as np
from .base import Tracker
from ..base import Property
from ..dataassociator import DataAssociator
from ..deleter import Deleter
from ..reader import DetectionReader
from ..initiator import Initiator
from ..updater import Updater
from ..types.prediction import GaussianStatePrediction
from ..types.update import GaussianStateUpdate
from ..functions import gm_reduce_single
class SingleTargetTracker(Tracker):
"""A simple single target tracker.
Track a single object using Stone Soup components. The tracker works by
first calling the :attr:`data_associator` with the active track, and then
either updating the track state with the result of the :attr:`updater` if
a detection is associated, or with the prediction if no detection is
associated to the track. The track is then checked for deletion by the
:attr:`deleter`, and if deleted the :attr:`initiator` is called to generate
a new track. Similarly if no track is present (i.e. tracker is initialised
or deleted in previous iteration), only the :attr:`initiator` is called.
Parameters
----------
Attributes
----------
track : :class:`~.Track`
Current track being maintained. Also accessible as the sole item in
:attr:`tracks`
"""
initiator = Property(
Initiator,
doc="Initiator used to initialise the track.")
deleter = Property(
Deleter,
doc="Deleter used to delete the track")
detector = Property(
DetectionReader,
doc="Detector used to generate detection objects.")
data_associator = Property(
DataAssociator,
doc="Association algorithm to pair predictions to detections")
updater = Property(
Updater,
doc="Updater used to update the track object to the new state.")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.track = None
@property
def tracks(self):
if self.track is not None:
return {self.track}
else:
return set()
def tracks_gen(self):
self.track = None
for time, detections in self.detector.detections_gen():
if self.track is not None:
associations = self.data_associator.associate(
self.tracks, detections, time)
if associations[self.track]:
state_post = self.updater.update(associations[self.track])
self.track.append(state_post)
else:
self.track.append(
associations[self.track].prediction)
if self.track is None or self.deleter.delete_tracks(self.tracks):
new_tracks = self.initiator.initiate(detections)
if new_tracks:
track = next(iter(new_tracks))
self.track = track
else:
self.track = None
yield time, self.tracks
class MultiTargetTracker(Tracker):
"""A simple multi target tracker.
Track multiple objects using Stone Soup components. The tracker works by
first calling the :attr:`data_associator` with the active tracks, and then
either updating the track state with the result of the :attr:`updater` if
a detection is associated, or with the prediction if no detection is
associated to the track. Tracks are then checked for deletion by the
:attr:`deleter`, and remaining unassociated detections are passed to the
:attr:`initiator` to generate new tracks.
Parameters
----------
"""
initiator = Property(
Initiator,
doc="Initiator used to initialise the track.")
deleter = Property(
Deleter,
doc="Initiator used to initialise the track.")
detector = Property(
DetectionReader,
doc="Detector used to generate detection objects.")
data_associator = Property(
DataAssociator,
doc="Association algorithm to pair predictions to detections")
updater = Property(
Updater,
doc="Updater used to update the track object to the new state.")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._tracks = set()
@property
def tracks(self):
return self._tracks.copy()
def tracks_gen(self):
self._tracks = set()
for time, detections in self.detector.detections_gen():
associations = self.data_associator.associate(
self._tracks, detections, time)
associated_detections = set()
for track, hypothesis in associations.items():
if hypothesis:
state_post = self.updater.update(hypothesis)
track.append(state_post)
associated_detections.add(hypothesis.measurement)
else:
track.append(hypothesis.prediction)
self._tracks -= self.deleter.delete_tracks(self._tracks)
self._tracks |= self.initiator.initiate(
detections - associated_detections)
yield time, self.tracks
class MultiTargetMixtureTracker(Tracker):
"""A simple multi target tracker that receives associations from a
(Guassian) Mixture associator.
Track multiple objects using Stone Soup components. The tracker works by
first calling the :attr:`data_associator` with the active tracks, and then
either updating the track state with the result of the
:attr:`data_associator` that reduces the (Gaussian) Mixture of all
possible track-detection associations, or with the prediction if no
detection is associated to the track. Tracks are then checked for deletion
by the :attr:`deleter`, and remaining unassociated detections are passed
to the :attr:`initiator` to generate new tracks.
Parameters
----------
"""
initiator = Property(
Initiator,
doc="Initiator used to initialise the track.")
deleter = Property(
Deleter,
doc="Initiator used to initialise the track.")
detector = Property(
DetectionReader,
doc="Detector used to generate detection objects.")
data_associator = Property(
DataAssociator,
doc="Association algorithm to pair predictions to detections")
updater = Property(
Updater,
doc="Updater used to update the track object to the new state.")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._tracks = set()
@property
def tracks(self):
return self._tracks.copy()
def tracks_gen(self):
self._tracks = set()
for time, detections in self.detector.detections_gen():
associations = self.data_associator.associate(
self._tracks, detections, time)
unassociated_detections = set(detections)
for track, multihypothesis in associations.items():
# calculate each Track's state as a Gaussian Mixture of
# its possible associations with each detection, then
# reduce the Mixture to a single Gaussian State
posterior_states = []
posterior_state_weights = []
for hypothesis in multihypothesis:
if not hypothesis:
posterior_states.append(hypothesis.prediction)
else:
posterior_states.append(
self.updater.update(hypothesis))
posterior_state_weights.append(
hypothesis.probability)
means = np.array([state.state_vector for state
in posterior_states])
means = np.reshape(means, np.shape(means)[:-1])
covars = np.array([state.covar for state
in posterior_states])
covars = np.reshape(covars, (
|
np.shape(covars)
|
numpy.shape
|
import numpy as np
from funcGeneral import max2,argmax2, max3,argmax3, deriv1,deriv1_step2,enhance,argmin3
from scipy import interpolate
import matplotlib.pyplot as plt
### PEAK DETECTION ###
def peakListAll(dProject,keys):
for key in keys:
key1=str('dPeak'+key)
dProject[key1]=fPeakList(dProject['dData'][key],False,False)
return dProject
def fPeakList(dataIn,isDel=False,isAdd=False,repType=None):
## RepTpes: "Cubic", "Poly2"
peakX,peakY=peakDetection(dataIn)
if peakX[0]<3:
peakX=np.delete(peakX,0)
peakY=np.delete(peakY,0)
if peakX[-1]>len(dataIn)-4:
peakX=np.delete(peakX,-1)
peakY=np.delete(peakY,-1)
dPeakList=DPeakList()
dPeakList['NPeak']=len(peakX)
dPeakList['pos']=peakX
dPeakList['amp']=peakY
dPeakList['score']=np.ones(dPeakList['NPeak'])
dPeakList['averW'],dPeakList['stdW'],dPeakList['minW'],dPeakList['maxW']=findAverPeakW(peakX,rate=0.33,minR=0.4,maxR=1.8)
if isDel:
dPeakList=delPeaks(dPeakList, dataIn)
if isAdd:
dPeakList=addPeaks(dPeakList, dataIn)
if repType!=None:
dPeakList['X']=[]
dPeakList['Y']=[]
for i in range(dPeakList['NPeak']):
if repType=="Cubic":
newX,newY,newPos,newAmp=fitSplineToPeak(dataIn,dPeakList['pos'][i],wid=3)
elif repType=="Poly2":
newX,newY,newPos,newAmp=fitPolyToPeak(dataIn,dPeakList['pos'][i],wid=3)
elif repType=="Amp":
newX,newY,newPos,newAmp=peakX[i],peakY[i],dPeakList['pos'][i],peakY[i]
dPeakList['pos'][i]=newPos
dPeakList['amp'][i]=newAmp
dPeakList['X'].append(newX)
dPeakList['Y'].append(newY)
return dPeakList
def DPeakList():
dPeakList={}
dPeakList['NPeak']=0
dPeakList['pos']=np.array([],dtype='i4')
dPeakList['amp']=np.array([],dtype='f4')
dPeakList['wid']=np.array([],dtype='f4')
dPeakList['area']=np.array([],dtype='f4')
dPeakList['averW']=np.array([],dtype='f4')
dPeakList['stdW']=np.array([],dtype='f4')
dPeakList['minW']=np.array([],dtype='f4')
dPeakList['maxW']=np.array([],dtype='f4')
return dPeakList
def peakDetection(dataIn,isY=True):
if len(dataIn)<3:
peakX=np.array([])
return peakX
derivData=deriv1(dataIn)
peakX=findPeakX(derivData,dataIn)
if isY:
peakY=findPeakY(dataIn,peakX)
return peakX, peakY
else:
return peakX
def peakDetection_v3(dataIn,isY=True):
if len(dataIn)<3:
peakX=np.array([])
return peakX
derivData1=deriv1(dataIn)
derivData2=deriv1_step2(dataIn)
av_deriv=np.add(derivData1,derivData2)/2
peakX=findPeakX_v5(av_deriv,dataIn)
if isY:
peakY=findPeakY(dataIn,peakX)
return peakX, peakY
else:
return peakX
def troughDetection(dataIn,isY=True):
if len(dataIn)<3:
peakX=np.array([])
return peakX
derivData=deriv1(dataIn)
peakX=findtroughX(derivData,dataIn)
if isY:
peakY=findPeakY(dataIn,peakX)
return peakX, peakY
else:
return peakX
def peakDetection_v2(dataIn,isY=True):
if len(dataIn)<2:
peakX=np.array([])
return peakX
derivData=deriv1(dataIn)
peakX=findPeakX(derivData,dataIn)
peakX=findPeakX(derivData,dataIn)
#peakX=findPeakX_v3(dataIn)
if isY:
peakY=findPeakY(dataIn,peakX)
return peakX, peakY
else:
return peakX
def findPeakX(derivData,dataIn):
peakX=np.array([],dtype='i4')
NData=len(derivData)
i=0
while i<NData-1:
if np.sign(derivData[i]) > np.sign(derivData[i+1]):
peak=argmax3(dataIn[i-1],dataIn[i],dataIn[i+1])
peak=i-1+peak
i=i+1
if peak>2:
peakX=np.append(peakX,peak)
i+=1
return peakX
def findPeakX_v5(derivData,dataIn):
peakX=np.array([],dtype='i4')
NData=len(derivData)
i=0
window_size=10
window_number=NData/window_size
for j in range(window_number):
derivData_w=derivData[j*(window_size):((j+1)*window_size)]
dataIn_w=dataIn[j*(window_size):((j+1)*window_size)]
#print len(derivData_w)
mean_ddw=np.mean(derivData_w)
derivData_w=derivData_w-mean_ddw
#plt.plot(np.arange(window_size),derivData_w)
#plt.plot(np.arange(window_size),dataIn_w)
#plt.show()
for i in range(window_size-1):
if np.sign(derivData_w[i]) > np.sign(derivData_w[i+1]):
peak=argmax3(dataIn_w[i-1],dataIn_w[i],dataIn_w[i+1])
peak=i-1+peak+j*window_size
if peak>2 and peak<NData-1:
peakX=
|
np.append(peakX,peak)
|
numpy.append
|
import glob
import numpy as np
import matplotlib
# matplotlib.use('Agg')
import matplotlib; matplotlib.use('PDF')
matplotlib.rc('font', family='serif', size=25)
matplotlib.rc('text', usetex=True)
from matplotlib import pyplot as plt
algo_to_alg = {
"rbpo_ent_100_alpha_1": ["E100-A1",'c'],
# "rbpo_ent_100_alpha_0.1": ["E100-A0.1",'b'],
# "rbpo_ent_100_alpha_0.5": ["E100-A0.5",[0.8,0.7,0.7]],
# "rbpo_ent10_alpha_1": ["E10-A1",'r'],
# "rbpo_ent10_alpha_0.1": ["E10-A0.5",'y'],
"rbpo_ent10_alpha_0.25": ["E10-A0.25",'m'],
"rbpo_ent10_alpha_0.5": ["E10-A0.5",'k'],
"rbpo_ent1_alpha_1": ["E1-A1",[1.0,0.0,0.8]],
"rbpo_noent_alpha_1": ["E0-A1",'g'],
}
# ent weights
# algo_to_alg = {
# "rbpo_ent_100_alpha_1": ["100",'#FA1900'],
# "rbpo_ent10_alpha_1": ["10",'#BFBFBF'],
# "rbpo_noent_alpha_1": ["0",'#8C7F70'],
# }
# name = "ent"
# algnames = ['rbpo_noent_alpha_1', 'rbpo_ent10_alpha_1',
# 'rbpo_ent_100_alpha_1']
# ent input
algo_to_alg = {
"rbpo_ent_100_alpha_1": ["Belief",'#FA1900'],
# "rbpo_ent_100_alpha_0.1": ["E100-A0.1",'b'],
# "rbpo_ent_100_alpha_0.5": ["E100-A0.5",[0.8,0.7,0.7]],
# "rbpo_ent10_alpha_1": ["E10-A1",'r'],
# "rbpo_ent10_alpha_0.1": ["E10-A0.5",'y'],
"rbpo_ent_hidden_ent100_alpha_1": ["None",'#504E75'],
"rbpo_ent_input_ent100_alpha_1": ["Entropy",'#698F6E']
}
name = "ent_input"
algnames = ['rbpo_ent_hidden_ent100_alpha_1',
'rbpo_ent_input_ent100_alpha_1',
'rbpo_ent_100_alpha_1']
# baselines
# algo_to_alg = {
# "bpo_ent100": ["BPO",'#8C7F70'],
# "upmle_ent_100": ["UPMLE",'#F2D39B'],
# "rbpo_ent_100_alpha_1": [r'\bf{RBPO}','#FA1900']
# }
# name = "baseline"
# algnames = ['bpo_ent100', 'upmle_ent_100',
# 'rbpo_ent_100_alpha_1']
stat = dict()
fig, ax = plt.subplots(figsize=(8,6))
env = "maze10"
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
max_step = 7000
we = [80.73, 23.68*1.96]
random = -500 * 0.9 + 500 * 0.1
maximum = 500
plt.plot([0, max_step], [500, 500], 'k--', lw=4)
# plt.text(max_step + 40, maximum - 10, r'Optimal$^*$', color='k')
plt.fill_between([0,max_step], y1=[we[0]-we[1],we[0]-we[1]],
y2=[we[0]+we[1],we[0]+we[1]], alpha=0.3, color="#597223")
plt.plot([0, max_step], [we[0],we[0]], color='#597223', lw=4)
# plt.text(max_step + 40, we[0] - 10, r'Expert', color='#597223')
if name == "baseline":
# plt.ylim(random, 500)
plt.yticks([random, 0, we[0], maximum])
plt.plot([0, max_step], [random, random], '--', color="#878787", lw=4)
# plt.text(max_step + 40, random - 10, r'Random', color='#878787')
else:
# plt.ylim(we[0]-we[1], 500)
plt.ylim(0, 500)
plt.xlim(0, max_step)
plt.xticks([6000])
for i, pr in enumerate(algnames):
files = glob.glob("/home/gilwoo/output/{}/{}/*.txt".format(env, pr))
files.sort()
print(pr, len(files))
if len(files) == 0:
continue
rewards = []
for f in files:
try:
data = np.genfromtxt(f, delimiter='\t', skip_header=1)
except:
continue
print(f)
print(data.shape)
if data.shape[0] < 5:
continue
timestep = int(f.split("/")[-1].split(".")[0])
if timestep > max_step:
continue
rewards += [(timestep, np.mean(data[:, 1]), 1.96*np.std(data[:,1])/np.sqrt(data.shape[0]))]
rewards =
|
np.array(rewards)
|
numpy.array
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # S_MinVarFacRep [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=S_MinVarFacRep&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=eb-optim-pseudo-inv-lo).
# ## Prepare the environment
# +
import os
import os.path as path
import sys
sys.path.append(path.abspath('../../functions-legacy'))
import numpy as np
from numpy import arange, array, ones, zeros
from numpy.linalg import solve
from numpy.random import rand
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure, plot, legend, ylabel, \
xlabel, title
plt.style.use('seaborn')
from ARPM_utils import save_plot
# input parameters
n_ = 100 # max market dimension
nstep = arange(5,n_+1) # grid of market dimensions
s2_Z_ = array([[1]]) # variance of factor
stepsize = len(nstep)
s2_P_Z_MV = zeros((stepsize, 1))
s2_P_Z = zeros((stepsize, 1))
for n in range(stepsize): # set covariance of the residuals
d =
|
rand(nstep[n], 1)
|
numpy.random.rand
|
import numpy as np
import utils_parent as utils_parent
from VGMM import VGMM
from visualization import T_SNE_Plot
from config_manager import ConfigManager
from mdataset_class import InputDataset
import json
import os
def cluster_save2disk_label(result_path, phi, num_clusters):
print("clustering given variational parameter phi(mu/sigma) data from arguments")
vgmm = VGMM(num_clusters)
mdict, X_prediction_vgmm = vgmm.cluster(phi)
# save the result of clustering
path = result_path + ConfigManager.cluster_index_json_name # path = result_path + "/" + "cluster_dict.json"
vgmm.save_dict(path, mdict)
path = result_path + ConfigManager.cluster_predict_tsv_name # path = result_path + "/" + "cluster_predict.tsv"
vgmm.save_predict(path, X_prediction_vgmm)
path = result_path + ConfigManager.cluster_predict_npy_name # path = result_path + "/" + "cluster_predict.npy"
np.save(path, X_prediction_vgmm)
print("cluster results saved to labeled path")
def generate_metadata(m, mdict, num_clusters):
for i in range(num_clusters):
d = mdict[str(i)]
m[d] = i
return m
def concatenate_index_array(d, num_labels, num_clusters):
pos = {}
# pos['ij']: label i, cluster j
for j in range(num_clusters):
# init with data of label 0 in cluster j
p = d[str(0) + str(j)]
# concatenate p with label i in cluster j
for i in np.arange(1, num_labels):
p = np.concatenate((p, d[str(i) + str(j)]), axis=None)
pos[str(j)] = p.tolist()
return pos # pos[j]: represent cluster j with multi-label
def cluster_common_embeding_labelwise(y, data_path, num_labels, num_clusters):
z = np.load(data_path + ConfigManager.z_name)
# global ground truth
# y = np.load(data_path + ConfigManager.y_name)[:z.shape[0]]
d_label = InputDataset.split_data_according_to_label(y, num_labels)
# cluster data of each label
vgmm = VGMM(num_clusters)
pos = {}
for i in range(num_labels):
print("cluster label "+str(i))
# extract data of label i
data = z[d_label[str(i)]]
# extract global index of data with label i
data_pos = d_label[str(i)]
_, data_pred = vgmm.cluster(data)
for j in range(num_clusters):
# store the index of cluster j into dictionary ij, i represent label i , cluster j
pos[str(i) + str(j)] = data_pos[np.where(data_pred == j)[0]]
pos_index_cluster = concatenate_index_array(pos,num_labels=num_labels, num_clusters=num_clusters)
vgmm.save_dict(data_path + ConfigManager.cluster_index_json_name, pos_index_cluster)
#generate metadata for visualization
m =
|
np.zeros(y.shape)
|
numpy.zeros
|
import numpy as np
from numpy.fft import fft
import os
import scipy.ndimage.filters as nd_filters
import cv2
def matlab_factorial(l):
res = []
for elt in l:
if not len(res):
if elt == 0:
res.append(1)
else:
res.append(elt)
else:
res.append(elt * l[-1])
return res
class OFilter:
def __init__(self, order, mask_size, mode='symmetric'):
self.order = order
self.mask_size = mask_size
self.mode = mode
def local_filter(self, x):
x.sort()
return x[self.order]
def ordfilt2(self, A):
return nd_filters.generic_filter(np.pad(A, 1, self.mode), self.local_filter, size=(self.mask_size, self.mask_size))
def bf_filter(x,bfdata):
# filtering bank
#
# x is a 2d matrix
# bfdata is the filter bank, it is a structure with the following elements:
# .number number of filters,
# .bf a 3d matrix with the filters,
# .factor factor of the filters.
#
n_filters, bf, factor = bfdata['number'], bfdata['bf'], bfdata['factor']
filtered = np.zeros((x.shape[0], x.shape[1], n_filters))
for idx in range(n_filters):
filtered[:,:,idx] = factor[idx] * cv2.filter2D(x, -1, np.conj(bf[:,:,idx]))
return filtered
def const_bf(SZ,ORDER):
#Compute the simple CHT functions
# bfdata = const_bf(SZ,ORDER)
#
# SZ is the size of patch
# ORDER is the number of basis functions
#
# bfdata is a structure with the following elements:
# .number number of basis functions,
# .orders a matrix with 2 columns, where the first column indicates n and the second column m,
# .bf a 3d matrix with the basis functions,
# .factor factor for ortonormal basis functions.
#
NM = np.concatenate(np.zeros((ORDER,1)), np.arange(ORDER),axis=1)
BF =
|
np.zeros((SZ, SZ, NM.shape[0]))
|
numpy.zeros
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Função do cáculo da sigmóide
def sigmoid(x):
return 1/(1+np.exp(-x))
#deriva da função sigmoide
def sigmoid_prime(sigmoid_):
return sigmoid_ * (1 - sigmoid_)
def calc_combinacao_linear (i, w):
inputs = np.array(i)
weights = np.array(w)
return np.dot(inputs, weights)
DataSet=pd.read_csv('arruela_.csv')
DataSet.head()
DataSet.drop(['Hora','Tamanho'], axis=1, inplace=True)
DataSet.head()
DataSet.describe()
DataSet.columns
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
DataScaled = scaler.fit_transform(DataSet)
DataSetScaled = pd.DataFrame(np.array(DataScaled), columns = ['Referencia','NumAmostra', 'Area', 'Delta', 'Output1','Output2'])
DataSetScaled.head()
x = DataSetScaled.drop(['Output1', 'Output2'], axis=1)
y = DataSet[['Output1','Output2']]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.33, random_state=101)
#Tamanho do DataSet de Treinamento
n_records, n_features = X_train.shape
#Arquitetura da MPL
N_input = 4
N_hidden = 3
N_output = 2
taxa_aprendizagem = 0.3
#Pesos da Camada Oculta (Inicialização Aleatória)
pesos_ent_camada_oculta = np.random.normal(0, scale=0.1, size=(N_input, N_hidden))
#Pesos da Camada de Saída (Inicialização Aleatória)
pesos_camada_saida = np.random.normal(0, scale=0.1, size=(N_hidden, N_output))
epocas = 3000
last_loss = None
EvolucaoError = []
IndiceError = []
for e in range(epocas):
delta_pesos_camada_oculta = np.zeros(pesos_ent_camada_oculta.shape)
delta_pesos_camada_saida =
|
np.zeros(pesos_camada_saida.shape)
|
numpy.zeros
|
#!/usr/bin/env python3
"""
Python functions that implement the core MAMA processing
"""
import logging
from typing import Any, Dict, List, Tuple, Union
import warnings
import numpy as np
import pandas as pd
from scipy.stats import norm
from core_mama import (create_omega_matrix, create_sigma_matrix, run_mama_method, qc_omega,
qc_sigma)
from reg_mama import (REG_INT_OPT_NAME, REG_LD_OPT_NAME, REG_LD_SCALE_FACTOR_NAME, REG_SE_OPT_NAME,
MAMA_REG_OPT_ALL_FREE, run_ldscore_regressions)
from util.df import Filter, intersect_indices
from util.sumstats import (SNP_COL, BP_COL, CHR_COL, BETA_COL, FREQ_COL, SE_COL, A1_COL,
A2_COL, P_COL, INFO_COL, N_COL, Z_COL, COMPLEMENT, BASES,
MAX_RSID_LOGGING, create_freq_filter, create_chr_filter,
standardize_all_sumstats, process_sumstats)
# Constants / Parameters / Types #############
# Pylint upper-case errors disabled here to adhere to Python typing module conventions
AncestryId = Any # pylint: disable=invalid-name
PhenotypeId = Any # pylint: disable=invalid-name
PopulationId = Tuple[AncestryId, PhenotypeId]
# Columns that MAMA requires
MAMA_REQ_STD_COLS = {SNP_COL, CHR_COL, BETA_COL, FREQ_COL, SE_COL, A1_COL, A2_COL, BP_COL, P_COL}
# Map of default regular expressions used to convert summary stat column names to standardized names
MAMA_RE_EXPR_MAP = {
SNP_COL : '.*SNP.*|.*RS.*',
BP_COL : '.*BP.*|.*POS.*',
CHR_COL : '.*CHR.*',
BETA_COL : '.*BETA.*',
FREQ_COL : '.*FREQ.*|.*FRQ.*|.*AF',
SE_COL : '.*SE.*',
A1_COL : '.*A1.*|.*MAJOR.*|.*EFFECT.*ALL.*|REF.*',
A2_COL : '.*A2.*|.*MINOR.*|.*OTHER.*ALL.*|ALT.*',
P_COL : 'P|P.*VAL.*',
INFO_COL : 'INFO',
N_COL : 'N',
}
# Frequency filtering defaults
DEFAULT_MAF_MIN = 0.01
DEFAULT_MAF_MAX = 0.99
# Chromosome filtering defaults
DEFAULT_CHR_LIST = [str(cnum) for cnum in range(1, 23)] + ['X', 'Y']
# Filter-related materials
NAN_FILTER = 'NO NAN'
FREQ_FILTER = 'FREQ BOUNDS'
SE_FILTER = 'SE BOUNDS'
CHR_FILTER = 'CHR VALUES'
SNP_DUP_ALL_FILTER = 'DUPLICATE ALLELE SNPS'
SNP_PALIN_FILT = 'PALINDROMIC SNPS'
SNP_INVALID_ALLELES_FILTER = 'INVALID ALLELES'
SNP_NEGATIVE_P_FILTER = 'NEGATIVE GWAS P'
MAMA_STD_FILTER_FUNCS = {
NAN_FILTER :
{
'func' : lambda df: df[list(MAMA_REQ_STD_COLS)].isnull().any(axis=1),
'description' : "Filters out SNPs with any NaN values in required "
"columns %s" % MAMA_REQ_STD_COLS
},
FREQ_FILTER :
{
'func' : create_freq_filter(DEFAULT_MAF_MIN, DEFAULT_MAF_MAX),
'description' : "Filters out SNPs with FREQ values outside of "
"[%s, %s]" % (DEFAULT_MAF_MIN, DEFAULT_MAF_MAX)
},
SE_FILTER :
{
'func' : lambda df: df[SE_COL].le(0.0),
'description' : "Filters out SNPs with non-positive SE values"
},
CHR_FILTER :
{
'func' : create_chr_filter(DEFAULT_CHR_LIST),
'description' : "Filters out SNPs with listed chromosomes not in %s" % DEFAULT_CHR_LIST
},
SNP_DUP_ALL_FILTER :
{
'func' : lambda df: df[A1_COL] == df[A2_COL],
'description' : "Filters out SNPs with major allele = minor allele"
},
SNP_PALIN_FILT :
{
'func' : lambda df: df[A1_COL].replace(COMPLEMENT) == df[A2_COL],
'description' : "Filters out SNPs where major / minor alleles are a base pair"
},
SNP_INVALID_ALLELES_FILTER :
{
'func' : lambda df: ~df[A1_COL].isin(BASES) | ~df[A2_COL].isin(BASES),
'description' : "Filters out SNPs with alleles not in %s" % BASES
},
SNP_NEGATIVE_P_FILTER :
{
'func' : lambda df: df[P_COL].lt(0.0),
'description' : "Filters out SNPs with negative GWAS P values"
},
}
# Column name to rename N column to if it exists (to help resolve ambiguity since there should be
# an effective N column added)
ORIGINAL_N_COL_RENAME = "N_ORIG"
# Column name to rename N column to if it exists (to help resolve ambiguity since there should be
# an effective N column added)
N_EFF_COL = "N_EFF"
# Derived Constants###########################
# Filter function dictionaries (name to function mapping or description) for MAMA
MAMA_STD_FILTERS = {fname : (finfo['func'], finfo['description'])
for fname, finfo in MAMA_STD_FILTER_FUNCS.items()}
# Regular expression indicating columns to keep in harmonized summary statistics
MAMA_HARM_COLS_RE = '|'.join(MAMA_REQ_STD_COLS)
# Calculate constants used in determination of P values for MAMA
ln = np.log # pylint: disable=invalid-name
LN_2 = ln(2.0)
RECIP_LN_10 = np.reciprocal(ln(10.0))
# Functions ##################################
#################################
def obtain_df(possible_df: Union[str, pd.DataFrame], id_val: Any, sep_arg: Union[None, str] = None
) -> pd.DataFrame:
"""
Small helper function that handles functionality related to reading in a DataFrame
:param possible_df: Should either be a string indicating the full path to a file to be
read into a DataFrame or the DataFrame itself. All other possibilities will
result in this function raising an error
:param id_val: Used for logging / error-reporting to identify the data being read / checked
:raises RuntimeError: If possible_df is not a string or pd.DataFrame
:return pd.DataFrame: Returns a DataFrame
"""
# If this is (presumably) a filename, read in the file
if isinstance(possible_df, str):
logging.info("Reading in %s file: %s", id_val, possible_df)
# Catch ParserWarning that warns of switch to Python engine if that happens
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=pd.errors.ParserWarning,
message="Falling back to the \'python\' engine")
possible_df = pd.read_csv(possible_df, sep=sep_arg, comment='#')
# If neither a string (presumed to be a filename) nor DataFrame are passed in, throw error
elif not isinstance(possible_df, pd.DataFrame):
raise RuntimeError("ERROR: Either pass in filename or DataFrame for %s rather than [%s]" %
(id_val, type(possible_df)))
return possible_df
#################################
def qc_ldscores(ldscores_df: pd.DataFrame):
"""
Runs QC steps on LD scores. This will be much lighter-weight than what is done on summary
statistics, as it assumes that the LD score file was generated using this software.
:param ldscores_df: Dataframe holding ldscores
:return pd.DataFrame: DataFrame containing the QC'ed LD scores
"""
# Make copy of the dataframe (this copy will be modified)
df = ldscores_df.copy()
# Drop any lines with NaN
nan_drops = df.isnull().any(axis=1)
df.drop(df.index[nan_drops], inplace=True)
# Make sure SNP IDs are lower case ("rs..." rather than "RS...")
df[SNP_COL] = df[SNP_COL].str.lower()
# Set SNP column to be the index and sort
df.set_index(SNP_COL, inplace=True)
df.sort_index(inplace=True)
return df
#################################
def harmonize_all(sumstats: Dict[PopulationId, pd.DataFrame], ldscores: pd.DataFrame,
snp_list: pd.Index = None):
"""
Does the harmonization between the QC'ed input summary statistics and the LD scores. The
DataFrames are all modified in place (SNPs/rows dropped and reference alleles transformed
as needed), and all inputs are expected to have indices = SNP ID (beginning with "rs")
:param sumstats: Dictionary mapping a population id to a DataFrame holding the summary
stat information. The DFs should all have been QCed already.
:param ldscores: DataFrame of LD score information
:param snp_list: If specified, a Series containing rsIDs to which to restrict analysis
"""
# Intersect all the SNP lists to get the SNPs all data sources have in common
snp_intersection = intersect_indices(sumstats.values(), ldscores)
if snp_list is not None:
logging.info("Restricting to user-supplied SNP list (%s SNPs)...", len(snp_list))
snp_intersection = snp_intersection.intersection(snp_list)
logging.info("\n\nNumber of SNPS in initial intersection of all sources: %s",
len(snp_intersection))
# Reduce each DF down to the SNP intersection (and drop extraneous columns, too)
for pop_df in sumstats.values():
snps_to_drop = pop_df.index.difference(snp_intersection)
pop_df.drop(snps_to_drop, inplace=True)
pop_df.drop(pop_df.columns.difference(list(MAMA_RE_EXPR_MAP.keys())), axis=1, inplace=True)
snps_to_drop = ldscores.index.difference(snp_intersection)
ldscores.drop(snps_to_drop, inplace=True)
# Standardize alleles in the summary statistics
logging.info("\nStandardizing reference alleles in summary statistics.")
ref_popid, tot_drop_indices, drop_dict, ref_flip_dict = standardize_all_sumstats(sumstats) # pylint: disable=unused-variable,line-too-long
logging.info("Standardized to population: %s", ref_popid)
logging.info("Dropped %s SNPs during reference allele standardization.", tot_drop_indices.sum())
if logging.root.level <= logging.DEBUG:
logging.debug("RS IDs of drops during standardization: %s",
sumstats[ref_popid].index[tot_drop_indices].to_list())
# Drop SNPs as a result of standardization of reference alleles
for pop_df in sumstats.values():
pop_df.drop(pop_df.index[tot_drop_indices], inplace=True)
ldscores.drop(ldscores.index[tot_drop_indices], inplace=True)
#################################
def write_sumstats_to_file(filename: str, df: pd.DataFrame):
"""
Helper function that writes a summary statistics DataFrame to disk
:param filename: Full path to output file
:param df: DataFrame holding the summary statistics
"""
df.to_csv(filename, sep="\t", index_label=SNP_COL, na_rep="NaN")
#################################
def collate_df_values(sumstats: Dict[PopulationId, pd.DataFrame], ldscores: pd.DataFrame,
ordering: List[PopulationId] = None) -> Tuple[np.ndarray, np.ndarray,
np.ndarray]:
"""
Function that gathers data from DataFrames (betas, ses, etc.) into ndarrays for use in
vectorized processing
:param sumstats: Dictionary of population identifier -> DataFrame
:param ldscores: DataFrame for the LD scores
:param ordering: Optional parameter indicating the order in which populations should be arranged
(if not specified, the ordering of the sumstats dictionary keys will be used)
:return: Betas (MxP), SEs (MxP), and LD scores (MxPxP)
"""
# Make sure ordering is specified
if not ordering:
ordering = list(sumstats.keys())
# Move summary statistic data into arrays to allow for vectorized operations
# 1) Gather important numbers to use for shapes and dimensions
num_pops = len(sumstats)
num_snps = len(ldscores)
# 2) Create empty arrays so the memory can be allocated all at once
beta_arr = np.zeros((num_snps, num_pops))
se_arr = np.zeros((num_snps, num_pops))
ldscore_arr =
|
np.zeros((num_snps, num_pops, num_pops))
|
numpy.zeros
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 14 18:41:54 2020
@author: Vicky
Neural PDE - Tensorflow 2.X
Testing with Advection Equation
PDE: u_t + 1.0*u_x
IC: u(0, x) = exp^(-200(x-0.25)^2),
BC: Periodic
Domain: t ∈ [0,1.5], x ∈ [0,1]
"""
import os
import numpy as np
import tfpde
# %%
#Neural Network Hyperparameters
NN_parameters = {'Network_Type': 'Regular',
'input_neurons' : 2,
'output_neurons' : 1,
'num_layers' : 3,
'num_neurons' : 100
}
#Neural PDE Hyperparameters
NPDE_parameters = {'Sampling_Method': '',
'N_initial' : 100, #Number of Randomly sampled Data points from the IC vector
'N_boundary' : 300, #Number of Boundary Points
'N_domain' : 5000 #Number of Domain points generated
}
#PDE
PDE_parameters = {'Inputs': 't, x',
'Outputs': 'u',
'Equation': 'D(u, t) + 1.0*D(u, x)',
'lower_range': [0.0, 0.0], #Float
'upper_range': [1.5, 1.0], #Float
'Boundary_Condition': "Dirichlet",
'Boundary_Vals' : None,
'Initial_Condition': lambda x: np.exp(-200*(x-0.25)**2),
'Initial_Vals': None
}
# %%
#Using Simulation Data at the Initial and Boundary Values (BC would be Dirichlet under that case)
N_f = NPDE_parameters['N_domain']
N_i = NPDE_parameters['N_initial']
N_b = NPDE_parameters['N_boundary']
# Data Location
data_loc = os.path.abspath('..') + '/Data/'
data = np.load(data_loc + 'Advection.npz')
t = data['t'].flatten()[:,None]
x = data['x'].flatten()[:,None]
Exact =
|
np.real(data['U'])
|
numpy.real
|
import copy
import matplotlib.pyplot as plt
import numpy as np
import rmsd
from matplotlib.ticker import NullFormatter
from scipy.stats import gaussian_kde
def save_figure(filename, fig=None):
if fig is None:
plt.savefig(filename + ".png", bbox_inches="tight")
plt.savefig(filename + ".pdf", bbox_inches="tight")
else:
fig.savefig(filename + ".png", bbox_inches="tight")
fig.savefig(filename + ".pdf", bbox_inches="tight")
return
def get_ratio(inertia):
inertia.sort()
ratio = np.zeros(2)
ratio[0] = inertia[0]/inertia[2]
ratio[1] = inertia[1]/inertia[2]
return ratio
def get_gaussian_kernel(xvalues):
bins = np.linspace(0.0,1.0, 200)
gaussian_kernel = gaussian_kde(xvalues)
values = gaussian_kernel(bins)
return bins, values
def rotation_matrix(sigma):
"""
https://en.wikipedia.org/wiki/Rotation_matrix
"""
radians = sigma * np.pi / 180.0
r11 = np.cos(radians)
r12 = -np.sin(radians)
r21 = np.sin(radians)
r22 = np.cos(radians)
R = np.array([[r11, r12], [r21, r22]])
return R
def scale_triangle_with_kde(xvalues, yvalues, filename="_istwk"):
fig_kde, axes_kde = plt.subplots(3, sharex=True, sharey=True)
fig_his, ax_his = plt.subplots(1)
# define edges
sphere = np.array([1, 1])
rod = np.array([0, 1])
disc = np.array([0.5, scale_func(0.5)])
sphere = sphere[np.newaxis]
rod = rod[np.newaxis]
disc = disc[np.newaxis]
# define and scale coord for distances to sphere
yvalues_scale = scale_func(copy.deepcopy(yvalues))
coord = np.array([xvalues, yvalues_scale])
linewidth=0.9
dots = [sphere, rod, disc]
names = ["Sphere", "Rod", "Disc"]
for ax, dot, name in zip(axes_kde, dots, names):
dist = distance(coord, dot.T)
bins, values = get_gaussian_kernel(dist)
ax.plot(bins, values, "k", linewidth=linewidth, label=name)
ax.text(0.045,0.75,name,
horizontalalignment='left',
transform=ax.transAxes)
ax.get_yaxis().set_visible(False)
ax = axes_kde[-1]
ax.set_xticks([0, 1])
ax.set_xticklabels(["is shape", "not shape"])
# prettify
fig_kde.subplots_adjust(hspace=0)
# save
save_figure(filename+"_kde", fig=fig_kde)
# His
nbins = 100
H, xedges, yedges = np.histogram2d(xvalues, yvalues, bins=nbins)
H = np.rot90(H)
H = np.flipud(H)
Hmasked = np.ma.masked_where(H==0,H) # Mask pixels with a value of zero
pc = ax_his.pcolormesh(xedges,yedges,Hmasked, cmap="PuRd")
ax_his.set_aspect('equal')
ax_his.get_yaxis().set_visible(False)
ax_his.get_xaxis().set_visible(False)
ax_his.spines['top'].set_visible(False)
ax_his.spines['right'].set_visible(False)
ax_his.spines['bottom'].set_visible(False)
ax_his.spines['left'].set_visible(False)
ax_his.set_ylim([0.5-0.05, 1.05])
ax_his.set_xlim([0.0-0.05, 1.0+0.05])
max_count = np.max(H)
max_count /= 10
max_count = np.floor(max_count)
max_count *= 10
cb_ticks = np.linspace(1, max_count, 3, dtype=int)
# cb_ticks = [1, max_count]
cb = fig_his.colorbar(pc, orientation="horizontal", ax=ax_his, ticks=cb_ticks, pad=0.05)
cb.outline.set_edgecolor('white')
# prettify
ax_his.text(1.0, 1.02, "Sphere (1,1)",
horizontalalignment='center')
ax_his.text(0.0, 1.02, "Rod (0,1)",
horizontalalignment='center')
ax_his.text(0.5, 0.5- 0.04, "Disc (0.5,0.5)",
horizontalalignment='center')
save_figure(filename + "_his", fig=fig_his)
return
def scale_func(Y):
"""
scale
1.0 - 0.5
too
1.0 - 0.8660254037844386
"""
target_y = 0.133975
factor_y = 1.0 + ( -target_y)/0.5
diff = Y - 1.0
add = diff*factor_y
Y += add
return Y
def distance(coord, dot):
dist = coord - dot
dist = dist**2
dist = np.sum(dist, axis=0)
dist = np.sqrt(dist)
return dist
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--filename', type=str, help="csv")
args = parser.parse_args()
# get name
name = args.filename.split(".")
name = ".".join(name[:-1])
# Read csvfile
f = open(args.filename)
X = []
Y = []
for i, line in enumerate(f):
line = line.split()
if len(line) > 3:
smi = line[0]
line = line[1:]
line = [float(x) for x in line]
line = np.array(line)
ratio = get_ratio(line)
if sum(line) == 0:
print("zero sum", i+1)
continue
X.append(ratio[0])
Y.append(ratio[1])
X = np.array(X)
Y = np.array(Y)
# what to dooo
scale_triangle_with_kde(X, Y, filename=name)
return
def test():
name = args.filename.split(".")
name = ".".join(name[:-1])
print(name)
# Triangle
X = [0, 0.5, 1, 0]
Y = [1, 0.5, 1, 1]
tri = [X, Y]
tri = np.array(tri)
# plt.plot(X, Y, linewidth=0.5, color="grey")
X = [0, 0.5, 1]
Y = [1.0, 0.5, 1.0]
R = np.array([X, Y]).T
cent_tri = rmsd.centroid(R)
print(cent_tri)
X = np.array(X)
Y = np.array(Y)
Y = scale_func(Y)
print("scale")
print(Y)
coord = np.array([X, Y]).T
plt.plot(X, Y, "rx")
cent_tri = rmsd.centroid(coord)
print("center:", cent_tri)
plt.plot(*cent_tri, "ko")
plt.plot(X, Y)
sphere = np.array([1, 1])
rod =
|
np.array([0, 1])
|
numpy.array
|
# Copyright 2013 <NAME>
from __future__ import division
import numpy as np
from sklearn.preprocessing import LabelBinarizer, LabelEncoder
from .base import BaseSequenceClassifier
from ._decode import viterbi
from ._utils import atleast2d_or_csr, check_random_state, safe_sparse_dot
class StructuredPerceptron(BaseSequenceClassifier):
"""Structured perceptron for sequence classification.
This implements the averaged structured perceptron algorithm of Collins,
with the addition of a learning rate.
References
----------
<NAME> (2002). Discriminative training methods for hidden Markov
models: Theory and experiments with perceptron algorithm. EMNLP.
"""
def __init__(self, decode="viterbi", learning_rate=.1, max_iter=10,
random_state=None, verbose=0):
self.decode = decode
self.learning_rate = learning_rate
self.max_iter = max_iter
self.random_state = random_state
self.verbose = verbose
def fit(self, X, y, lengths):
X = atleast2d_or_csr(X)
y, Y_true = _one_hot(y)
lengths = np.asarray(lengths)
n_samples, n_features = X.shape
n_classes = Y_true.shape[1]
start = np.cumsum(lengths) - lengths
end = start + lengths
t_trans, t_init, t_final = _count_trans(y, start, end, n_classes)
w = np.zeros((n_classes, n_features))
b =
|
np.zeros(n_classes)
|
numpy.zeros
|
from functools import partial
import numpy as np
import pandas as pd
import logging
logging.basicConfig(level=logging.INFO)
n = 100
m = 200
|
np.random.seed(1)
|
numpy.random.seed
|
import numpy as np
import astropy.units as u
from astropy.time import Time, TimeDelta
from sunpy.coordinates import sun
import os
import glob
import h5py
import matplotlib.pyplot as plt
import matplotlib as mpl
import moviepy.editor as mpy
from moviepy.video.io.bindings import mplfig_to_npimage
from skimage import measure
import scipy.ndimage as ndi
from numba import jit
mpl.rc("axes", labelsize=16)
mpl.rc("ytick", labelsize=16)
mpl.rc("xtick", labelsize=16)
mpl.rc("legend", fontsize=16)
class Observer:
"""
A class returning the HEEQ and Carrington coordinates of a specified Planet or spacecraft, for a given set of times.
The positions are linearly interpolated from a 2-hour resolution ephemeris that spans 1974-01-01 until 2020-01-01.
Allowed bodies are Earth, Venus, Mercury, STEREO-A and STEREO-B.
Attributes:
body: String name of the planet or spacecraft.
lat: HEEQ latitude of body at all values of time.
lat_c: Carrington latitude of body at all values of time.
lon: HEEQ longitude of body at all values of time.
lon_c: Carrington longitude of body at all values of time.
r: HEEQ radius of body at all values of time.
r_c: Carrington radius of body at all values of time.
time: Array of Astropy Times
"""
def __init__(self, body, times):
"""
:param body: String indicating which body to look up the positions of .
:param times: A list/array of Astropy Times to interpolate the coordinate of the selected body.
"""
bodies = ["EARTH", "VENUS", "MERCURY", "STA", "STB"]
if body.upper() in bodies:
self.body = body.upper()
else:
print("Warning, body {} not recognised.".format(body))
print("Only {} are valid.".format(bodies))
print("Defaulting to Earth")
self.body = "EARTH"
# Get path to ephemeris file and open
dirs = _setup_dirs_()
ephem = h5py.File(dirs['ephemeris'], 'r')
# Now get observers coordinates
all_time = Time(ephem[self.body]['HEEQ']['time'], format='jd')
# Pad out the window to account for single values being passed.
dt = TimeDelta(2 * 60 * 60, format='sec')
id_epoch = (all_time >= (times.min() - dt)) & (all_time <= (times.max() + dt))
epoch_time = all_time[id_epoch]
self.time = times
r = ephem[self.body]['HEEQ']['radius'][id_epoch]
self.r = np.interp(times.jd, epoch_time.jd, r)
self.r = (self.r * u.km).to(u.solRad)
lon = np.deg2rad(ephem[self.body]['HEEQ']['longitude'][id_epoch])
lon = np.unwrap(lon)
self.lon = np.interp(times.jd, epoch_time.jd, lon)
self.lon = _zerototwopi_(self.lon)
self.lon = self.lon * u.rad
lat = np.deg2rad(ephem[self.body]['HEEQ']['latitude'][id_epoch])
self.lat = np.interp(times.jd, epoch_time.jd, lat)
self.lat = self.lat * u.rad
r = ephem[self.body]['CARR']['radius'][id_epoch]
self.r_c = np.interp(times.jd, epoch_time.jd, r)
self.r_c = (self.r_c * u.km).to(u.solRad)
lon = np.deg2rad(ephem[self.body]['CARR']['longitude'][id_epoch])
lon = np.unwrap(lon)
self.lon_c = np.interp(times.jd, epoch_time.jd, lon)
self.lon_c = _zerototwopi_(self.lon_c)
self.lon_c = self.lon_c * u.rad
lat = np.deg2rad(ephem[self.body]['CARR']['latitude'][id_epoch])
self.lat_c = np.interp(times.jd, epoch_time.jd, lat)
self.lat_c = self.lat_c * u.rad
ephem.close()
return
class ConeCME:
"""
A class containing the parameters of a cone model cme.
Attributes:
t_launch: Time of Cone CME launch, in seconds after the start of the simulation.
longitude: Longitude of the CME launch direction, in radians.
v: CME nose speed in km/s.
width: Angular width of the CME, in radians.
initial_height: Initiation height of the CME, in km. Defaults to HUXt inner boundary at 30 solar radii.
radius: Initial radius of the CME, in km.
thickness: Thickness of the CME cone, in km.
coords: Dictionary containing the radial and longitudinal (for HUXT2D) coordinates of the of Cone CME for each
model time step.
"""
# Some decorators for checking the units of input arguments
@u.quantity_input(t_launch=u.s)
@u.quantity_input(longitude=u.deg)
@u.quantity_input(v=(u.km / u.s))
@u.quantity_input(width=u.deg)
@u.quantity_input(thickness=u.solRad)
def __init__(self, t_launch=0.0 * u.s, longitude=0.0 * u.deg, latitude=0.0 * u.deg, v=1000.0 * (u.km / u.s),
width=30.0 * u.deg,
thickness=5.0 * u.solRad):
"""
Set up a Cone CME with specified parameters.
:param t_launch: Time of Cone CME launch, in seconds after the start of the simulation.
:param longitude: HEEQ Longitude of the CME launch direction, in radians.
:param latitude: HEEQ latitude of the CME launch direction, in radians.
:param v: CME nose speed in km/s.
:param width: Angular width of the CME, in degrees.
:param thickness: Thickness of the CME cone, in solar radii
"""
self.t_launch = t_launch # Time of CME launch, after the start of the simulation
lon = _zerototwopi_(longitude.to(u.rad).value) * u.rad
self.longitude = lon # Longitudinal launch direction of the CME
self.latitude = latitude.to(u.rad) # Latitude launch direction of the CME
self.v = v # CME nose speed
self.width = width # Angular width
self.initial_height = 30.0 * u.solRad # Initial height of CME (should match inner boundary of HUXt)
self.radius = self.initial_height * np.tan(self.width / 2.0) # Initial radius of CME
self.thickness = thickness # Extra CME thickness
self.coords = {}
return
def parameter_array(self):
"""
Returns a numpy array of CME parameters. This is used in the numba optimised solvers that don't play nicely
with classes.
"""
cme_parameters = [self.t_launch.to('s').value, self.longitude.to('rad').value, self.latitude.to('rad').value,
self.width.to('rad').value, self.v.value, self.initial_height.to('km').value,
self.radius.to('km').value, self.thickness.to('km').value]
return cme_parameters
def _track_1d_(self, model):
"""
Tracks the length of each ConeCME through a 1D HUXt solution in model.
:param model: An instance of HUXt with one model longitude, with solutions for the CME and ambient fields.
:return: updates the ConeCME.coords dictionary of CME coordinates.
"""
# Owens definition of CME in HUXt:
diff = model.v_grid_cme - model.v_grid_amb
cme_bool = diff >= 20 * model.kms
# Workflow: Loop over each CME, track CME through each time step,
# find contours of boundary, save to dict.
self.coords = {j: {'lon_pix': np.array([]) * u.pix, 'r_pix': np.array([]) * u.pix,
'lon': np.array([]) * model.lon.unit, 'r': np.array([]) * model.r.unit} for j in
range(model.nt_out)}
first_frame = True
for j, t in enumerate(model.time_out):
if t < self.t_launch:
continue
cme_bool_t = cme_bool[j, :]
# Center the solution on the CME longitude to avoid edge effects
# measure separate CME regions.
cme_label, n_cme = measure.label(cme_bool_t.astype(int), connectivity=1, background=0, return_num=True)
cme_tags = [i for i in range(1, n_cme + 1)]
if n_cme != 0:
if first_frame:
# Find only the label in the origin region of this CME
# Use a binary mask over the source region of the CME.
target = np.zeros(cme_bool_t.shape)
target[0] = 1
first_frame = False
# Find the CME label that intersects this region
matches_id = []
matches_level = []
for label in cme_tags:
this_label = cme_label == label
overlap = np.sum(np.logical_and(target, this_label))
if overlap > 0:
matches_id.append(label)
matches_level.append(overlap)
if len(matches_id) != 0:
# Check only one match, if not find closest match.
if len(matches_id) == 1:
match_id = matches_id[0]
else:
print("Warning, multiple matches found, selecting match with greatest target overlap")
match_id = matches_id[np.argmax(matches_level)]
cme_id = cme_label == match_id
r_pix = np.argwhere(cme_id)
self.coords[j]['r_pix'] = r_pix * u.pix
self.coords[j]['r'] = np.interp(r_pix, np.arange(0, model.nr), model.r)
# Longitude is fixed, but adding it in here helps with saving and plotting routines.
self.coords[j]['lon_pix'] = np.zeros(r_pix.shape) * u.pix
self.coords[j]['lon'] = np.ones(r_pix.shape) * model.lon
# Update the target, so next iteration finds CME that overlaps with this frame.
target = cme_id.copy()
return
def _track_2d_(self, model):
"""
Tracks the perimeter of each ConeCME through the HUXt solution in model.
:param model: An HUXt instance, solving for multiple longitudes, with solutions for the CME and ambient fields.
:return: updates the ConeCME.coords dictionary of CME coordinates.
"""
# Owens definition of CME in HUXt:
diff = model.v_grid_cme - model.v_grid_amb
cme_bool = diff >= 20 * model.kms
# Find index of middle longitude for centering arrays on the CMEs
id_mid_lon = np.argmin(np.abs(model.lon - np.median(model.lon)))
# Workflow: Loop over each CME, center model solution on CME source lon,
# track CME through each time step, find contours of boundary, save to dict.
# Get index of CME longitude
id_cme_lon = np.argmin(np.abs(model.lon - self.longitude))
self.coords = {j: {'lon_pix': np.array([]) * u.pix, 'r_pix': np.array([]) * u.pix,
'lon': np.array([]) * model.lon.unit, 'r': np.array([]) * model.r.unit} for j in
range(model.nt_out)}
first_frame = True
for j, t in enumerate(model.time_out):
if t < self.t_launch:
continue
cme_bool_t = cme_bool[j, :, :]
# Center the solution on the CME longitude to avoid edge effects
center_shift = id_mid_lon - id_cme_lon
cme_bool_t = np.roll(cme_bool_t, center_shift, axis=1)
# measure separate CME regions.
cme_label, n_cme = measure.label(cme_bool_t.astype(int), connectivity=1, background=0, return_num=True)
cme_tags = [i for i in range(1, n_cme + 1)]
if n_cme != 0:
if first_frame:
# Find only the label in the origin region of this CME
# Use a binary mask over the source region of the CME.
target = np.zeros(cme_bool_t.shape)
half_width = self.width / (2 * model.dlon)
left_edge = np.int32(id_mid_lon - half_width)
right_edge = np.int32(id_mid_lon + half_width)
target[0, left_edge:right_edge] = 1
first_frame = False
# Find the CME label that intersects this region
matches_id = []
matches_level = []
for label in cme_tags:
this_label = cme_label == label
overlap = np.sum(np.logical_and(target, this_label))
if overlap > 0:
matches_id.append(label)
matches_level.append(overlap)
if len(matches_id) != 0:
# Check only one match, if not find closest match.
if len(matches_id) == 1:
match_id = matches_id[0]
else:
print("Warning, multiple matches found, selecting match with greatest target overlap")
match_id = matches_id[np.argmax(matches_level)]
# Find the coordinates of this region and store
cme_id = cme_label == match_id
# Fill holes in the labelled region
cme_id_filled = ndi.binary_fill_holes(cme_id)
coords = measure.find_contours(cme_id_filled, 0.5)
# Contour can be broken at inner and outer boundary, so stack broken contours
if len(coords) == 1:
coord_array = coords[0]
elif len(coords) > 1:
coord_array = np.vstack(coords)
r_pix = coord_array[:, 0]
# Remove centering and correct wraparound indices
lon_pix = coord_array[:, 1] - center_shift
lon_pix[lon_pix < 0] += model.nlon
lon_pix[lon_pix > model.nlon] -= model.nlon
self.coords[j]['lon_pix'] = lon_pix * u.pix
self.coords[j]['r_pix'] = r_pix * u.pix
self.coords[j]['r'] = np.interp(r_pix, np.arange(0, model.nr), model.r)
self.coords[j]['lon'] = np.interp(lon_pix, np.arange(0, model.nlon), model.lon)
# Update the target, so next iteration finds CME that overlaps with this frame.
target = cme_id.copy()
return
class HUXt:
"""
A class containing the HUXt model described in Owens et al. (2020, DOI: 10.1007/s11207-020-01605-3)
Users must specify the solar wind speed boundary condition through either the v_boundary, or cr_num keyword
arguments. Failure to do so defaults to a 400 km/s boundary. v_boundary takes precedence over cr_num, so specifying
both results in only v_boundary being used.
Model coordinate system is HEEQ radius and longitude.
Attributes:
cmes: A list of ConeCME instances used in the model solution.
cr_num: If provided, this gives the Carrington rotation number of the selected period, else 9999.
cr_lon_init: The initial Carrington longitude of Earth at the models initial timestep.
daysec: seconds in a day.
dlon: Longitudinal grid spacing (in radians)
dr: Radial grid spacing (in km).
dt: Model time step (in seconds), set by the CFL condition with v_max and dr.
dt_out: Output model time step (in seconds).
dt_scale: Integer scaling number to set the model output time step relative to the models CFL time step.
dtdr: Ratio of the model time step and radial grid step (in seconds/km).
kms: astropy.unit instance of km/s.
lon: Array of model longtidues (in radians).
r_grid: Array of longitudinal coordinates meshed with the radial coordinates (in radians).
nlon: Number of longitudinal grid points.
nr: Number of radial grid points.
Nt: Total number of model time steps, including spin up.
nt_out: Number of output model time steps.
r_accel: Scale parameter determining the residual solar wind acceleration.
r: Radial grid (in km).
r_grid: Array of radial coordinates meshed with the longitudinal coordinates (in km).
rrel: Radial grid relative to first grid point (in km).
simtime: Simulation time (in seconds).
synodic_period: Solar Synodic rotation period from Earth (in seconds).
time: Array of model time steps, including spin up (in seconds).
time_init: The UTC time corresonding to the initial Carrington rotation number and longitude. Else, NaN.
time_out: Array of output model time steps (in seconds).
twopi: two pi radians
v_boundary: Inner boundary solar wind speed profile (in km/s).
v_grid_amb: Array of ambient model solution excluding ConeCMEs for each time, radius, and longitude (in km/s).
v_grid_cme: Array of model solution inlcuding ConeCMEs for each time, radius, and longitude (in km/s).
v_max: Maximum model speed (in km/s), used with the CFL condition to set the model time step.
"""
# Decorators to check units on input arguments
@u.quantity_input(v_boundary=(u.km / u.s))
@u.quantity_input(simtime=u.day)
@u.quantity_input(cr_lon_init=u.deg)
def __init__(self, v_boundary=np.NaN * (u.km / u.s), cr_num=np.NaN, cr_lon_init=360.0 * u.deg,
r_min=30 * u.solRad, r_max=240 * u.solRad,
lon_out=np.NaN * u.rad, lon_start=np.NaN * u.rad, lon_stop=np.NaN * u.rad,
simtime=5.0 * u.day, dt_scale=1.0, map_inwards=False):
"""
Initialise the HUXt instance.
:param v_boundary: Inner solar wind speed boundary condition. Must be an array of size 128 with units of km/s.
:param cr_num: Integer Carrington rotation number. Used to lookup the longitudinal solar wind speed profile
at the solar equator from HelioMAS. This is then used as the inner boundary condition.
:param cr_lon_init: Carrington longitude of Earth at model initialisation, in degrees.
:param lon_out: A specific single longitude to compute HUXt solution along.
:param lon_start: The first longitude (in a clockwise sense) of the longitude range to solve HUXt over.
:param lon_stop: The last longitude (in a clockwise sense) of the longitude range to solve HUXt over.
:param r_min: The radial inner boundary distance of HUXt.
:param r_max: The radial outer boundary distance of HUXt.
:param simtime: Duration of the simulation window, in days.
:param dt_scale: Integer scaling number to set the model output time step relative to the models CFL time.
:param map_inwards: Boolean, determines whether map_v_boundary_inwards is used to estimate boundary speed at
distances inwards of 30Rs
"""
# some constants and units
constants = huxt_constants()
self.twopi = constants['twopi']
self.daysec = constants['daysec']
self.kms = constants['kms']
self.alpha = constants['alpha'] # Scale parameter for residual SW acceleration
self.r_accel = constants['r_accel'] # Spatial scale parameter for residual SW acceleration
self.synodic_period = constants['synodic_period'] # Solar Synodic rotation period from Earth.
self.v_max = constants['v_max']
del constants
# Extract paths of figure and data directories
dirs = _setup_dirs_()
self._boundary_dir_ = dirs['boundary_conditions']
self._data_dir_ = dirs['HUXt_data']
self._figure_dir_ = dirs['HUXt_figures']
self._ephemeris_file = dirs['ephemeris']
# Setup radial coordinates - in solar radius
self.r, self.dr, self.rrel, self.nr = radial_grid(r_min=r_min, r_max=r_max)
self.buffertime = ((5.0 * u.day) / (210 * u.solRad)) * self.rrel[-1]
# Setup longitude coordinates - in radians.
self.lon, self.dlon, self.nlon = longitude_grid(lon_out=lon_out, lon_start=lon_start, lon_stop=lon_stop)
# Setup time coords - in seconds
self.simtime = simtime.to('s') # number of days to simulate (in seconds)
self.dt_scale = dt_scale * u.dimensionless_unscaled
time_grid_dict = time_grid(self.simtime, self.dt_scale)
self.dtdr = time_grid_dict['dtdr']
self.Nt = time_grid_dict['Nt']
self.dt = time_grid_dict['dt']
self.time = time_grid_dict['time']
self.nt_out = time_grid_dict['nt_out']
self.dt_out = time_grid_dict['dt_out']
self.time_out = time_grid_dict['time_out']
del time_grid_dict
# Check cr_lon_init, make sure in 0-2pi range.
self.cr_lon_init = cr_lon_init.to('rad')
if (self.cr_lon_init < 0.0 * u.rad) | (self.cr_lon_init > self.twopi * u.rad):
print("Warning: cr_lon_init={}, outside expected range. Rectifying to 0-2pi.".format(self.cr_lon_init))
self.cr_lon_init = _zerototwopi_(self.cr_lon_init.value) * u.rad
# Determine the boundary conditions from input v_boundary and cr_num, and cr_lon_init
if np.all(np.isnan(v_boundary)) & np.isnan(cr_num):
print("Warning: No boundary conditions supplied. Defaulting to 400 km/s boundary")
self.v_boundary = 400 * np.ones(128) * self.kms
self.cr_num = 9999 * u.dimensionless_unscaled
elif not np.all(np.isnan(v_boundary)):
assert v_boundary.size == 128
self.v_boundary = v_boundary
if np.isnan(cr_num):
# Set dummy number for cr_num
self.cr_num = 9999 * u.dimensionless_unscaled
else:
self.cr_num = cr_num * u.dimensionless_unscaled
elif not np.isnan(cr_num):
# Find and load in the boundary condition file
self.cr_num = cr_num * u.dimensionless_unscaled
cr_tag = "CR{:03d}.hdf5".format(np.int32(self.cr_num.value))
boundary_file = os.path.join(self._boundary_dir_, cr_tag)
if os.path.exists(boundary_file):
data = h5py.File(boundary_file, 'r')
self.v_boundary = data['v_boundary'] * u.Unit(data['v_boundary'].attrs['unit'])
data.close()
else:
print("Warning: {} not found. Defaulting to 400 km/s boundary".format(boundary_file))
self.v_boundary = 400 * np.ones(128) * self.kms
# Keep a protected version that isn't processed for use in saving/loading model runs
self._v_boundary_init_ = self.v_boundary.copy()
if map_inwards:
self._map_inwards_ = 1.0 * u.dimensionless_unscaled
else:
self._map_inwards_ = 0.0 * u.dimensionless_unscaled
if map_inwards:
# Assumes inner boundary was specified at 30 Rs, which is true for default Carrington maps.
r_outer = 30 * u.solRad
r_inner = self.r.min()
self.v_boundary = map_v_boundary_inwards(self.v_boundary, r_outer, r_inner)
# Rotate the boundary condition as required by cr_lon_init.
if self.cr_lon_init != 360 * u.rad:
lon_boundary, dlon, nlon = longitude_grid()
lon_shifted = _zerototwopi_((lon_boundary - self.cr_lon_init).value)
id_sort = np.argsort(lon_shifted)
lon_shifted = lon_shifted[id_sort]
v_b_shifted = self.v_boundary[id_sort]
self.v_boundary = np.interp(lon_boundary.value, lon_shifted, v_b_shifted, period=self.twopi)
# Compute model UTC initalisation time, if using Carrington map boundary.
if self.cr_num.value != 9999:
cr_frac = self.cr_num.value + ((self.twopi - self.cr_lon_init.value) / self.twopi)
self.time_init = sun.carrington_rotation_time(cr_frac)
else:
self.time_init = np.NaN
# Preallocate space for the output for the solar wind fields for the cme and ambient solution.
self.v_grid_cme = np.zeros((self.nt_out, self.nr, self.nlon)) * self.kms
self.v_grid_amb = np.zeros((self.nt_out, self.nr, self.nlon)) * self.kms
# Mesh the spatial coordinates.
self.lon_grid, self.r_grid = np.meshgrid(self.lon, self.r)
# Empty dictionary for storing the coordinates of CME boundaries.
self.cmes = []
# Numpy array of model parameters for parsing to external functions that use numba
self.model_params = np.array([self.dtdr.value, self.alpha.value, self.r_accel.value,
self.dt_scale.value, self.nt_out, self.nr, self.nlon,
self.r[0].to('km').value])
return
def solve(self, cme_list, save=False, tag=''):
"""
Solve HUXt for the provided boundary conditions and cme list
:param cme_list: A list of ConeCME instances to use in solving HUXt
:param save: Boolean, if True saves model output to HDF5 file
:param tag: String, appended to the filename of saved soltuion.
Returns:
"""
# Check only cone cmes in cme list
cme_list_checked = []
for cme in cme_list:
if isinstance(cme, ConeCME):
cme_list_checked.append(cme)
else:
print("Warning: cme_list contained objects other than ConeCME instances. These will be excluded")
self.cmes = cme_list_checked
# If CMEs parsed, get an array of their parameters for using with the solver (which doesn't do classes)
if len(self.cmes) > 0:
cme_params = [cme.parameter_array() for cme in self.cmes]
cme_params = np.array(cme_params)
# Sort the CMEs in launch order.
id_sort = np.argsort(cme_params[:, 0])
cme_params = cme_params[id_sort]
do_cme = 1
else:
do_cme = 0
cme_params = np.NaN * np.zeros((1, 8))
buffersteps = np.fix(self.buffertime.to(u.s) / self.dt)
buffertime = buffersteps * self.dt
model_time = np.arange(-buffertime.value, (self.simtime.to('s') + self.dt).value, self.dt.value) * self.dt.unit
dlondt = self.twopi * self.dt / self.synodic_period
all_lons, dlon, nlon = longitude_grid()
# How many radians of Carrington rotation in this simulation length
simlon = self.twopi * self.simtime / self.synodic_period
# How many radians of Carrington rotation in the spin up period
bufferlon = self.twopi * buffertime / self.synodic_period
# Loop through model longitudes and solve each radial profile.
for i in range(self.lon.size):
if self.lon.size == 1:
lon_out = self.lon.value
else:
lon_out = self.lon[i].value
# Find the Carrigton longitude range spanned by the spin up and simulation period,
# centered on simulation longitude
lon_start = (lon_out - simlon - dlondt)
lon_stop = (lon_out + bufferlon)
lonint = np.arange(lon_start, lon_stop, dlondt)
# Rectify so that it is between 0 - 2pi
loninit = _zerototwopi_(lonint)
# Interpolate the inner boundary speed to this higher resolution
vinit = np.interp(loninit, all_lons.value, self.v_boundary.value, period=2 * np.pi)
# convert from cr longitude to timesolve
vinput = np.flipud(vinit)
v_amb, v_cme = solve_radial(vinput, model_time, self.rrel.value, lon_out, self.model_params, do_cme,
cme_params)
self.v_grid_amb[:, :, i] = v_amb * self.kms
self.v_grid_cme[:, :, i] = v_cme * self.kms
# Update CMEs positions by tracking through the solution.
updated_cmes = []
for cme in self.cmes:
if self.lon.size == 1:
cme._track_1d_(self)
elif self.lon.size > 1:
cme._track_2d_(self)
updated_cmes.append(cme)
self.cmes = updated_cmes
if save:
if tag == '':
print("Warning, blank tag means file likely to be overwritten")
self.save(tag=tag)
return
def save(self, tag=''):
"""
Save model output to a HDF5 file.
:param tag: identifying string to append to the filename
:return out_filepath: Full path to the saved file.
"""
# Open up hdf5 data file for the HI flow stats
filename = "HUXt_CR{:03d}_{}.hdf5".format(np.int32(self.cr_num.value), tag)
out_filepath = os.path.join(self._data_dir_, filename)
if os.path.isfile(out_filepath):
# File exists, so delete and start new.
print("Warning: {} already exists. Overwriting".format(out_filepath))
os.remove(out_filepath)
out_file = h5py.File(out_filepath, 'w')
# Save the Cone CME parameters to a new group.
allcmes = out_file.create_group('ConeCMEs')
for i, cme in enumerate(self.cmes):
cme_name = "ConeCME_{:02d}".format(i)
cmegrp = allcmes.create_group(cme_name)
for k, v in cme.__dict__.items():
if k != "coords":
dset = cmegrp.create_dataset(k, data=v.value)
dset.attrs['unit'] = v.unit.to_string()
out_file.flush()
# Now handle the dictionary of CME boundary coordinates coords > time_out > position
if k == "coords":
coordgrp = cmegrp.create_group(k)
for time, position in v.items():
time_label = "t_out_{:03d}".format(time)
timegrp = coordgrp.create_group(time_label)
for pos_label, pos_data in position.items():
dset = timegrp.create_dataset(pos_label, data=pos_data.value)
dset.attrs['unit'] = pos_data.unit.to_string()
out_file.flush()
# Loop over the attributes of model instance and save select keys/attributes.
keys = ['cr_num', 'cr_lon_init', 'simtime', 'dt', 'v_max', 'r_accel', 'alpha',
'dt_scale', 'time_out', 'dt_out', 'r', 'dr', 'lon', 'dlon', 'r_grid', 'lon_grid',
'v_grid_cme', 'v_grid_amb', 'v_boundary', '_v_boundary_init_', '_map_inwards_']
for k, v in self.__dict__.items():
if k in keys:
dset = out_file.create_dataset(k, data=v.value)
dset.attrs['unit'] = v.unit.to_string()
# Add on the dimensions of the spatial grids
if k in ['r_grid', 'lon_grid']:
dset.dims[0].label = 'radius'
dset.dims[1].label = 'longitude'
# Add on the dimensions of the output speed fields.
if k in ['v_grid_cme', 'v_grid_amb']:
dset.dims[0].label = 'time'
dset.dims[1].label = 'radius'
dset.dims[2].label = 'longitude'
out_file.flush()
out_file.close()
return out_filepath
@u.quantity_input(time=u.day)
def plot(self, time, field='cme', save=False, tag=''):
"""
Make a contour plot on polar axis of the solar wind solution at a specific time.
:param time: Time to look up closet model time to (with an astropy.unit of time).
:param field: String, either 'cme', or 'ambient', specifying which solution to plot.
:param save: Boolean to determine if the figure is saved.
:param tag: String to append to the filename if saving the figure.
:return fig: Figure handle.
:return ax: Axes handle.
"""
if field not in ['cme', 'ambient']:
print("Error, field must be either 'cme', or 'ambient'. Default to CME")
field = 'cme'
if (time < self.time_out.min()) | (time > (self.time_out.max())):
print("Error, input time outside span of model times. Defaulting to closest time")
id_t = np.argmin(np.abs(self.time_out - time))
# Get plotting data
lon_arr, dlon, nlon = longitude_grid()
lon, rad = np.meshgrid(lon_arr.value, self.r.value)
if field == 'cme':
v_sub = self.v_grid_cme.value[id_t, :, :].copy()
elif field == 'ambient':
v_sub = self.v_grid_amb.value[id_t, :, :].copy()
# Insert into full array
if lon_arr.size != self.lon.size:
v = np.zeros((self.nr, nlon)) * np.NaN
if self.lon.size != 1:
for i, lo in enumerate(self.lon):
id_match = np.argwhere(lon_arr == lo)[0][0]
v[:, id_match] = v_sub[:, i]
else:
print('Warning: Trying to contour single radial solution will fail.')
else:
v = v_sub
# Pad out to fill the full 2pi of contouring
pad = lon[:, 0].reshape((lon.shape[0], 1)) + self.twopi
lon = np.concatenate((lon, pad), axis=1)
pad = rad[:, 0].reshape((rad.shape[0], 1))
rad = np.concatenate((rad, pad), axis=1)
pad = v[:, 0].reshape((v.shape[0], 1))
v = np.concatenate((v, pad), axis=1)
mymap = mpl.cm.viridis
mymap.set_over('lightgrey')
mymap.set_under([0, 0, 0])
dv = 10
levels = np.arange(200, 800 + dv, dv)
fig, ax = plt.subplots(figsize=(10, 10), subplot_kw={"projection": "polar"})
cnt = ax.contourf(lon, rad, v, levels=levels, cmap=mymap, extend='both')
# Add on CME boundaries
if field == 'cme':
cme_colors = ['r', 'c', 'm', 'y', 'deeppink', 'darkorange']
for j, cme in enumerate(self.cmes):
cid = np.mod(j, len(cme_colors))
ax.plot(cme.coords[id_t]['lon'], cme.coords[id_t]['r'], '-', color=cme_colors[cid], linewidth=3)
# Add on observers if looking at a Carrington rotation.
if self.cr_num.value != 9999:
for body, style in zip(['EARTH', 'VENUS', 'MERCURY', 'STA', 'STB'], ['co', 'mo', 'ko', 'rs', 'y^']):
obs = self.get_observer(body)
ax.plot(obs.lon[id_t], obs.r[id_t], style, markersize=16, label=body)
# Add on a legend.
fig.legend(ncol=5, loc='lower center', frameon=False, handletextpad=0.2, columnspacing=1.0)
ax.set_ylim(0, self.r.value.max())
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.patch.set_facecolor('slategrey')
fig.subplots_adjust(left=0.05, bottom=0.16, right=0.95, top=0.99)
# Add color bar
pos = ax.get_position()
dw = 0.005
dh = 0.045
left = pos.x0 + dw
bottom = pos.y0 - dh
wid = pos.width - 2 * dw
cbaxes = fig.add_axes([left, bottom, wid, 0.03])
cbar1 = fig.colorbar(cnt, cax=cbaxes, orientation='horizontal')
cbar1.set_label("Solar Wind Speed (km/s)")
cbar1.set_ticks(np.arange(200, 900, 100))
# Add label
label = "Time: {:3.2f} days".format(self.time_out[id_t].to(u.day).value)
fig.text(0.675, pos.y0, label, fontsize=16)
label = "HUXt2D"
fig.text(0.175, pos.y0, label, fontsize=16)
if save:
cr_num = np.int32(self.cr_num.value)
filename = "HUXt_CR{:03d}_{}_frame_{:03d}.png".format(cr_num, tag, id_t)
filepath = os.path.join(self._figure_dir_, filename)
fig.savefig(filepath)
return fig, ax
def animate(self, field, tag):
"""
Animate the model solution, and save as an MP4.
:param field: String, either 'cme', or 'ambient', specifying which solution to animate.
:param tag: String to append to the filename of the animation.
"""
if field not in ['cme', 'ambient']:
print("Error, field must be either 'cme', or 'ambient'. Default to CME")
field = 'cme'
# Set the duration of the movie
# Scaled so a 5 day simulation with dt_scale=4 is a 10 second movie.
duration = self.simtime.value * (10 / 432000)
def make_frame(t):
"""
Produce the frame required by MoviePy.VideoClip.
:param t: time through the movie
"""
# Get the time index closest to this fraction of movie duration
i = np.int32((self.nt_out - 1) * t / duration)
fig, ax = self.plot(self.time_out[i], field)
frame = mplfig_to_npimage(fig)
plt.close('all')
return frame
cr_num = np.int32(self.cr_num.value)
filename = "HUXt_CR{:03d}_{}_movie.mp4".format(cr_num, tag)
filepath = os.path.join(self._figure_dir_, filename)
animation = mpy.VideoClip(make_frame, duration=duration)
animation.write_videofile(filepath, fps=24, codec='libx264')
return
def plot_radial(self, time, lon, field='cme', save=False, tag=''):
"""
Plot the radial solar wind profile at model time closest to specified time.
:param time: Time (in seconds) to find the closest model time step to.
:param lon: The model longitude of the selected radial to plot.
:param field: String, either 'cme', 'ambient', or 'both' specifying which solution to plot.
:param save: Boolean to determine if the figure is saved.
:param tag: String to append to the filename if saving the figure.
:return: fig: Figure handle
:return: ax: Axes handle
"""
if field not in ['cme', 'ambient', 'both']:
print("Error, field must be either 'cme', or 'ambient'. Default to cme")
field = 'cme'
if (time < self.time_out.min()) | (time > (self.time_out.max())):
print("Error, input time outside span of model times. Defaulting to closest time")
id_t = np.argmin(np.abs(self.time_out - time))
time = self.time_out[id_t]
if self.lon.size != 1:
if (lon < self.lon.min()) | (lon > (self.lon.max())):
print("Error, input lon outside range of model longitudes. Defaulting to closest longitude")
id_lon = np.argmin(np.abs(self.lon - lon))
lon = self.lon[id_lon]
fig, ax = plt.subplots(figsize=(14, 7))
# Get plotting data
id_t = np.argmin(np.abs(self.time_out - time))
time_out = self.time_out[id_t].to(u.day).value
if self.lon.size == 1:
id_lon = 0
lon_out = self.lon.value
else:
id_lon = np.argmin(np.abs(self.lon - lon))
lon_out = self.lon[id_lon].to(u.deg).value
if field == 'cme':
label = 'Cone Run'
ax.plot(self.r, self.v_grid_cme[id_t, :, id_lon], 'k-', label=label)
elif field == 'ambient':
label = 'Ambient'
ax.plot(self.r, self.v_grid_amb[id_t, :, id_lon], '--', color='slategrey', label=label)
elif field == 'both':
label = 'Cone Run'
ax.plot(self.r, self.v_grid_cme[id_t, :, id_lon], 'k-', label=label)
label = 'Ambient'
ax.plot(self.r, self.v_grid_amb[id_t, :, id_lon], '--', color='slategrey', label=label)
# Plot the CME points on if needed
if field in ['cme', 'both']:
cme_colors = ['r', 'c', 'm', 'y', 'deeppink', 'darkorange']
for c, cme in enumerate(self.cmes):
cc = np.mod(c, len(cme_colors))
id_r = np.int32(cme.coords[id_t]['r_pix'].value)
label = "CME {:02d}".format(c)
ax.plot(self.r[id_r], self.v_grid_cme[id_t, id_r, id_lon], '.', color=cme_colors[cc], label=label)
ax.set_ylim(250, 1500)
ax.set_ylabel('Solar Wind Speed (km/s)')
ax.set_xlim(self.r.value.min(), self.r.value.max())
ax.set_xlabel('Radial distance ($R_{sun}$)')
fig.subplots_adjust(left=0.1, bottom=0.1, right=0.95, top=0.95)
# Add label
time_label = " Time: {:3.2f} days".format(time_out)
lon_label = " Lon: {:3.2f}$^\circ$".format(lon_out)
label = "HUXt" + time_label + lon_label
ax.set_title(label, fontsize=20)
ax.legend(loc=1)
if save:
cr_num = np.int32(self.cr_num.value)
lon_tag = "{}deg".format(lon.to(u.deg).value)
filename = "HUXt_CR{:03d}_{}_{}_radial_profile_lon_{}_frame_{:03d}.png".format(cr_num, tag, field, lon_tag,
id_t)
filepath = os.path.join(self._figure_dir_, filename)
fig.savefig(filepath)
return fig, ax
def plot_timeseries(self, radius, lon, field='cme', save=False, tag=''):
"""
Plot the solar wind model timeseries at model radius and longitude closest to those specified.
:param radius: Radius to find the closest model radius to.
:param lon: Longitude to find the closest model longitude to.
:param field: String, either 'cme', 'ambient', or 'both' specifying which solution to plot.
:param save: Boolean to determine if the figure is saved.
:param tag: String to append to the filename if saving the figure.
:return: fig: Figure handle
:return: ax: Axes handle
"""
if field not in ['cme', 'ambient', 'both']:
print("Error, field must be either 'cme', or 'ambient'. Default to cme")
field = 'cme'
if (radius < self.r.min()) | (radius > (self.r.max())):
print("Error, specified radius outside of model radial grid")
if self.lon.size != 1:
if (lon < self.lon.min()) | (lon > (self.lon.max())):
print("Error, input lon outside range of model longitudes. Defaulting to closest longitude")
id_lon = np.argmin(np.abs(self.lon - lon))
lon = self.lon[id_lon]
fig, ax = plt.subplots(figsize=(14, 7))
# Get plotting data
id_r = np.argmin(np.abs(self.r - radius))
r_out = self.r[id_r].value
if self.lon.size == 1:
id_lon = 0
lon_out = self.lon.value
else:
id_lon = np.argmin(np.abs(self.lon - lon))
lon_out = self.lon[id_lon].value
t_day = self.time_out.to(u.day)
if field == 'cme':
label = 'Cone Run'
ax.plot(t_day, self.v_grid_cme[:, id_r, id_lon], 'k-', label=label)
elif field == 'ambient':
label = 'Ambient'
ax.plot(t_day, self.v_grid_amb[:, id_r, id_lon], '--', color='slategrey', label=label)
elif field == 'both':
label = 'Cone Run'
ax.plot(t_day, self.v_grid_cme[:, id_r, id_lon], 'k-', label=label)
label = 'Ambient'
ax.plot(t_day, self.v_grid_amb[:, id_r, id_lon], '--', color='slategrey', label=label)
ax.set_ylim(250, 1500)
ax.set_ylabel('Solar Wind Speed (km/s)')
ax.set_xlim(t_day.value.min(), t_day.value.max())
ax.set_xlabel('Time (days)')
fig.subplots_adjust(left=0.1, bottom=0.1, right=0.95, top=0.95)
# Add label
radius_label = " Radius: {:3.2f}".format(r_out) + "$R_{sun}$ "
lon_label = " Longitude: {:3.2f}".format(lon_out) + "$^\circ$"
label = "HUXt" + radius_label + lon_label
ax.set_title(label, fontsize=20)
ax.legend(loc=1)
if save:
cr_num = np.int32(self.cr_num.value)
r_tag = np.int32(r_out)
lon_tag = np.int32(lon_out)
template_string = "HUXt1D_CR{:03d}_{}_{}_time_series_radius_{:03d}_lon_{:03d}.png"
filename = template_string.format(cr_num, tag, field, r_tag, lon_tag)
filepath = os.path.join(self._figure_dir_, filename)
fig.savefig(filepath)
return fig, ax
def get_observer(self, body):
"""
Returns an instance of the Observer class, giving the HEEQ and Carrington coordinates at each model timestep.
This is only well defined if the model was initialised with a Carrington rotation number.
:param body: String specifying which body to look up. Valid bodies are Earth, Venus, Mercury, STA, and STB.
"""
times = self.time_init + self.time_out
obs = Observer(body, times)
return obs
def huxt_constants():
"""
Return some constants used in all HUXt model classes
"""
twopi = 2.0 * np.pi
daysec = 24 * 60 * 60 * u.s
kms = u.km / u.s
alpha = 0.15 * u.dimensionless_unscaled # Scale parameter for residual SW acceleration
r_accel = 50 * u.solRad # Spatial scale parameter for residual SW acceleration
synodic_period = 27.2753 * daysec # Solar Synodic rotation period from Earth.
v_max = 2000 * kms
dr = 1.5 * u.solRad # Radial grid step. With v_max, this sets the model time step.
constants = {'twopi': twopi, 'daysec': daysec, 'kms': kms, 'alpha': alpha,
'r_accel': r_accel, 'synodic_period': synodic_period, 'v_max': v_max,
'dr': dr}
return constants
@u.quantity_input(r_min=u.solRad)
@u.quantity_input(r_max=u.solRad)
def radial_grid(r_min=30.0 * u.solRad, r_max=240. * u.solRad):
"""
Define the radial grid of the HUXt model. Step size is fixed, but inner and outer boundary may be specified.
:param r_min: The heliocentric distance of the inner radial boundary
:param r_max: The heliocentric distance of the outer radial boundary
"""
if r_min >= r_max:
print("Warning, r_min cannot be less than r_max. Defaulting to r_min=30rs and r_max=240rs")
r_min = 30 * u.solRad
r_max = 240 * u.solRad
if r_min < 5.0 * u.solRad:
print("Warning, r_min should not be less than 5.0rs. Defaulting to 5.0rs")
r_min = 5.0 * u.solRad
if r_max > 400 * u.solRad:
print("Warning, r_max should not be more than 400rs. Defaulting to 400rs")
r_max = 400 * u.solRad
constants = huxt_constants()
dr = constants['dr']
r = np.arange(r_min.value, r_max.value + dr.value, dr.value)
r = r * dr.unit
nr = r.size
rrel = r - r[0]
return r, dr, rrel, nr
@u.quantity_input(lon_out=u.rad)
@u.quantity_input(lon_start=u.rad)
@u.quantity_input(lon_stop=u.rad)
def longitude_grid(lon_out=np.NaN * u.rad, lon_start=np.NaN * u.rad, lon_stop=np.NaN * u.rad):
"""
Define the longitude grid of the HUXt model.
:param lon_out:
:param lon_start: The first longitude (in a clockwise sense) of a longitude range
:param lon_stop: The last longitude (in a clockwise sense) of a longitude range
"""
# Check the inputs.
twopi = 2.0 * np.pi
single_longitude = False
longitude_range = False
if np.isfinite(lon_out):
# Select single longitude only. Check in range
if (lon_out < 0 * u.rad) | (lon_out > twopi * u.rad):
lon_out = _zerototwopi_(lon_out.to('rad').value)
lon_out = lon_out * u.rad
single_longitude = True
elif np.isfinite(lon_start) & np.isfinite(lon_stop):
# Select a range of longitudes. Check limits in range.
if (lon_start < 0 * u.rad) | (lon_start > twopi * u.rad):
lon_start = _zerototwopi_(lon_start.to('rad').value)
lon_start = lon_start * u.rad
if (lon_stop < 0 * u.rad) | (lon_stop > twopi * u.rad):
lon_stop = _zerototwopi_(lon_stop.to('rad').value)
lon_stop = lon_stop * u.rad
longitude_range = True
# Form the full longitude grid.
nlon = 128
dlon = twopi / nlon
lon_min_full = dlon / 2.0
lon_max_full = twopi - (dlon / 2.0)
lon, dlon = np.linspace(lon_min_full, lon_max_full, nlon, retstep=True)
lon = lon * u.rad
dlon = dlon * u.rad
# Now get only the selected longitude or range of longitudes
if single_longitude:
# Lon out takes precedence over lon_min and lon_max
id_match = np.argmin(np.abs(lon - lon_out))
lon = lon[id_match]
nlon = lon.size
elif longitude_range:
# How to do the logic of this?
# Want clockwise between lon start and lon stop
if lon_start < lon_stop:
id_match = (lon >= lon_start) & (lon <= lon_stop)
elif lon_start > lon_stop:
id_match = (lon >= lon_start) | (lon <= lon_stop)
lon = lon[id_match]
nlon = lon.size
return lon, dlon, nlon
def time_grid(simtime, dt_scale):
"""
Define the model timestep and time grid based on CFL condition and specified simulation time.
:param simtime: The length of the simulation
:param dt_scale: An integer specifying how frequently model timesteps should be saved to output.
"""
constants = huxt_constants()
v_max = constants['v_max']
dr = constants['dr']
dr = dr.to('km')
dt = (dr / v_max).to('s')
dtdr = dt / dr
nt = np.int32(np.floor(simtime.to(dt.unit) / dt)) # number of time steps in the simulation
time = np.arange(0, nt) * dt # Model time steps
dt_out = dt_scale * dt # time step of the output
nt_out = np.int32(nt / dt_scale) # number of time steps in the output
time_out = np.arange(0, nt_out) * dt_out # Output time steps
time_grid_dict = {'dt': dt, 'dtdr': dtdr, 'Nt': nt, 'time': time,
'dt_out': dt_out, 'nt_out': nt_out, 'time_out': time_out}
return time_grid_dict
def _setup_dirs_():
"""
Function to pull out the directories of boundary conditions, ephemeris, and to save figures and output data.
"""
# Find the config.dat file path
files = glob.glob('config.dat')
if len(files) != 1:
# If wrong number of config files, guess directories
print('Error: Cannot find correct config file with project directories. Check config.dat exists')
print('Defaulting to current directory')
dirs = {'root': os.getcwd()}
for rel_path in ['boundary_conditions', 'ephemeris', 'HUXt_data', 'HUXt_figures']:
if rel_path == 'ephemeris':
dirs[rel_path] = os.path.join(os.getcwd(), "ephemeris.hdf5")
else:
dirs[rel_path] = os.getcwd()
else:
# Extract data and figure directories from config.dat
with open(files[0], 'r') as file:
lines = file.read().splitlines()
root = lines[0].split(',')[1]
dirs = {line.split(',')[0]: os.path.join(root, line.split(',')[1]) for line in lines[1:]}
# Just check the directories exist.
for val in dirs.values():
if not os.path.exists(val):
print('Error, invalid path, check config.dat: ' + val)
return dirs
@jit(nopython=True)
def _zerototwopi_(angles):
"""
Function to constrain angles to the 0 - 2pi domain.
:param angles: a numpy array of angles
:return: a numpy array of angles
"""
twopi = 2.0 * np.pi
angles_out = angles
a = -np.floor_divide(angles_out, twopi)
angles_out = angles_out + (a * twopi)
return angles_out
@jit(nopython=True)
def solve_radial(vinput, model_time, rrel, lon, params, do_cme, cme_params):
"""
Solve the radial profile as a function of time (including spinup), and return radial profile at specified
output timesteps.
:param vinput: Timeseries of inner boundary solar wind speeds
:param model_time: Array of model timesteps
:param rrel: Array of model radial coordinates relative to inner boundary coordinate
:param lon: The longitude of this radial
:param params: Array of HUXt parameters
:param do_cme: Boolean, if True any provided ConeCMEs are included in the solution.
:param cme_params: Array of ConeCME parameters to include in the solution. 1 Row for each CME, with columns as
required by _cone_cme_boundary_
Returns:
"""
# Main model loop
# ----------------------------------------------------------------------------------------
dtdr = params[0]
alpha = params[1]
r_accel = params[2]
dt_scale = np.int32(params[3])
nt_out = np.int32(params[4])
nr = np.int32(params[5])
r_boundary = params[7]
lat = 0.0 # This is used in computing the ConeCME boundary condtions, which
# Preallocate space for solutions
v_grid_amb = np.zeros((nt_out, nr))
v_grid_cme = np.zeros((nt_out, nr))
iter_count = 0
t_out = 0
for t, time in enumerate(model_time):
# Get the initial condition, which will update in the loop,
# and snapshots saved to output at right steps.
if t == 0:
v_cme = np.ones(nr) * 400
v_amb = np.ones(nr) * 400
# Update the inner boundary conditions
v_amb[0] = vinput[t]
v_cme[0] = vinput[t]
# Compute boundary speed of each CME at this time. Set boundary to the maximum CME speed at this time.
if time > 0:
if do_cme == 1:
n_cme = cme_params.shape[0]
v_update_cme = np.zeros(n_cme)
for i in range(n_cme):
cme = cme_params[i, :]
v_update_cme[i] = _cone_cme_boundary_(r_boundary, lon, lat, time, v_cme[0], cme)
v_cme[0] = v_update_cme.max()
# update cone cme v(r) for the given longitude
# =====================================
u_up = v_cme[1:].copy()
u_dn = v_cme[:-1].copy()
u_up_next = _upwind_step_(u_up, u_dn, dtdr, alpha, r_accel, rrel)
# Save the updated time step
v_cme[1:] = u_up_next.copy()
u_up = v_amb[1:].copy()
u_dn = v_amb[:-1].copy()
u_up_next = _upwind_step_(u_up, u_dn, dtdr, alpha, r_accel, rrel)
# Save the updated time step
v_amb[1:] = u_up_next.copy()
# Save this frame to output if output
if time >= 0:
iter_count = iter_count + 1
if iter_count == dt_scale:
if t_out <= nt_out - 1:
v_grid_amb[t_out, :] = v_amb.copy()
v_grid_cme[t_out, :] = v_cme.copy()
t_out = t_out + 1
iter_count = 0
return v_grid_amb, v_grid_cme
@jit(nopython=True)
def _upwind_step_(v_up, v_dn, dtdr, alpha, r_accel, rrel):
"""
Compute the next step in the upwind scheme of Burgers equation with added acceleration of the solar wind.
:param v_up: A numpy array of the upwind radial values. Units of km/s.
:param v_dn: A numpy array of the downwind radial values. Units of km/s.
:param dtdr: Ratio of HUXts time step and radial grid step. Units of s/km.
:param alpha: Scale parameter for residual Solar wind acceleration.
:param r_accel: Spatial scale parameter of residual solar wind acceleration. Units of km.
:param rrel: The model radial grid relative to the radial inner boundary coordinate. Units of km.
:return: The upwind values at the next time step, numpy array with units of km/s.
"""
# Arguments for computing the acceleration factor
accel_arg = -rrel[:-1] / r_accel
accel_arg_p = -rrel[1:] / r_accel
# Get estimate of next time step
v_up_next = v_up - dtdr * v_up * (v_up - v_dn)
# Compute the probable speed at 30rS from the observed speed at r
v_source = v_dn / (1.0 + alpha * (1.0 - np.exp(accel_arg)))
# Then compute the speed gain between r and r+dr
v_diff = alpha * v_source * (np.exp(accel_arg) - np.exp(accel_arg_p))
# Add the residual acceleration over this grid cell
v_up_next = v_up_next + (v_dn * dtdr * v_diff)
return v_up_next
@jit(nopython=True)
def _cone_cme_boundary_(r_boundary, lon, lat, time, v_boundary, cme_params):
"""
Update inner speed boundary condition with the time dependent cone cme speed, for HUXt1D.
:param r_boundary: Height of model inner boundary.
:param lon: A HEEQ latitude, in radians.
:param lat: A HEEQ longitude, in radians.
:param time: Model time step, in seconds
:param v_boundary: Array of the ambient solar wind speed inner boundary condition, in km/s
:param cme_params: An array containing the cme parameters
:return:
"""
cme_t_launch = cme_params[0]
cme_lon = cme_params[1]
cme_lat = cme_params[2]
cme_width = cme_params[3]
cme_v = cme_params[4]
# cme_initial_height = cme_params[5]
cme_radius = cme_params[6]
cme_thickness = cme_params[7]
# Center the longitude array on CME nose, running from -pi to pi, to avoid dealing with any 0/2pi crossings
lon_cent = lon - cme_lon
if lon_cent > np.pi:
lon_cent = 2.0 * np.pi - lon_cent
if lon_cent < -np.pi:
lon_cent = lon_cent + 2.0 * np.pi
lat_cent = lat - cme_lat
if lat_cent > np.pi:
lat_cent = 2.0 * np.pi - lat_cent
if lat_cent < -np.pi:
lat_cent = lat_cent + 2.0 * np.pi
# Compute great circle distance from nose to input latitude
# sigma = np.arccos(np.sin(lon)*np.sin(cme_lon) + np.cos(lat)*np.cos(cme_lat)*np.cos(lon - cme_lon))
sigma = np.arccos(
|
np.cos(lat_cent)
|
numpy.cos
|
import logging
import numpy as np
import pandas as pd
from . import snpmatch
from . import parsers
from . import snp_genotype
log = logging.getLogger(__name__)
def simulateSNPs(g, AccID, numSNPs, outFile=None, err_rate=0.001):
assert type(AccID) is str, "provide Accession ID as a string"
assert AccID in g.g.accessions, "accession is not present in the matrix!"
AccToCheck = np.where(g.g.accessions == AccID)[0][0]
log.info("loading input files")
acc_snp = g.g_acc.snps[:,AccToCheck]
informative_snps = np.where(acc_snp >= 0)[0] ## Removing NAs for accession
input_df = pd.DataFrame(np.column_stack((np.array(g.g.chromosomes)[informative_snps], g.g.positions[informative_snps], acc_snp[informative_snps] )), columns = ["chr", 'pos', 'snp'])
## Input -- pandas dataframe with chr, position and genotype
#assert type(input_df) == pd.core.frame.DataFrame, "please provide a pandas dataframe"
#assert input_df.shape[1] >= 3, "first three columns are needed in dataframe: chr, pos, snp"
## default error rates = 0.001
log.info("sampling %s positions" % numSNPs)
sampleSNPs = np.sort(np.random.choice(
|
np.arange(input_df.shape[0])
|
numpy.arange
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import multivariate_normal
class NaiveBayesFromScratch():
def __init__(self, X, y):
self.num_examples, self.num_features = X.shape
self.num_classes = len(np.unique(y))
def fit(self, X, y):
self.classes_mean = {}
self.classes_variance = {}
self.classes_prior = {}
for c in range(self.num_classes):
X_c = X[y == c]
self.classes_mean[str(c)] = np.mean(X_c, axis=0)
self.classes_variance[str(c)] = np.var(X_c, axis=0)
self.classes_prior[str(c)] = X_c.shape[0] / X.shape[0]
def predict(self, X):
probs =
|
np.zeros((X.shape[0], self.num_classes))
|
numpy.zeros
|
"""
Class Statistics for basic statistical analysis of labeled (segmented) data.
# Author: <NAME> (Max Planck Institute for Biochemistry)
# $Id$
"""
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division
from builtins import zip
from builtins import object
#from past.utils import old_div
__version__ = "$Revision$"
import scipy
import scipy.ndimage as ndimage
import numpy
class Statistics(object):
"""
Basic statistical analysis of labeled (segmented) data.
Basic usage for calculating statistics:
st = Statistics()
st.calculate(data=data_array, labels=labels_array, ids=[1,3,7])
The results (mean, std, min, max, minPos, maxPos) are stored in arrays
with the same names (mean, std, ...). The individual values can be obtained
as: st.mean[id], st.std[id], ... and the total values (all segments taken
together) are in st.mean[0], st.std[0], ... .
Slightly more complicated usage:
st = Statistics(data=data_array, labels=labels_array, ids=[1,3,7])
st.calculate(centers=array_of_positions)
In addition to the above results, positions of min/max in respect to
centers are calculated in cartesian (minVec, maxVec) and spherical
coordinates if appropriate (minPhi, maxPhi in 2-3d, and minTheta,
maxTheat in 3d).
Even more complicated:
st = Statistics(data=data_array, labels=labels_array, ids=[1,3,7],
sliceCoord=array_of_positions, axis=1)
st.calculate(centers=array_of_positions)
The same results are calculated as above, but instead of segments as
specified in labels, each segment is restricted to a ndim-1 dimensional
slice defined by the position given as the corresponding element of
sliceCoord array and axis.
"""
##################################################################
#
# Initialization of data structures and related attributes
#
##################################################################
def __init__(self, data=None, labels=None, ids=None, sliceCoord=None,
axis=0):
"""
Sets self.data, self.labels and related attributes.
Attributes data and labels can be changed using setData method. If
ids are not given, all ids present in labels are considered.
Arrays data and labels are not modified by methods of this class.
If sliceCoord is given, the statistics are not calculated on the
segments (labels), but on a (ndim-1 dimensional) slices of labels.
The slice used for a given label is defined by the corresponding
position given in sliceCoord and by axis. SliceCoords and and axis
can't be changed.
If ids is a single number a flag self._numIds is set to True. If ids
is an array (even if it has 0, or 1 element) self._numInd = False
Arguments:
- data: (ndarray) image to be analyzed
- labels: ndarray that defines segements
- ids: array of ids, or a single int
- sliceCoord: array where each element specifies coordinates of
- axis:
"""
# initial values
self.calculated = None
self.data = None
self.labels = None
self._ids = None
# declare results data structures
self.mean = None
self.std = None
self.min = None
self.max = None
self.minPos = None
self.maxPos = None
self.minVec = None
self.maxVec = None
self.minPhi = None
self.maxPhi = None
self.minTheta = None
self.maxTheta = None
# parse arguments
self.setData(data=data, labels=labels, ids=ids,
sliceCoord=sliceCoord, axis=axis)
def setData(self, data=None, labels=None, ids=None,
sliceCoord=None, axis=0):
"""
Sets self.data, self.labels and related attributes (_maxId, ids,
calculated) and initializes arrays that hold results.
However, inconsistencies may arise if the dimensions of shape and labels
are changed. Also, it does not reset the results data structures,
so the results may contain values for both previous and current
data and labels for different ids.
If sliceCoord is given, each segment (from labels) is restricted to
a ndim-1 subarray defined by sliceCoord element corresponding to the
segment and axis. Attribute self.labels is changed to contain only
the ndim-1 dimensional segments.
Arguments:
- data: array to be analyzed
- labels: labels (segmentation) array), default all 1's
- ids: array (or other iterrable) of ids. Can be a single int for
1d data only
- sliceCoord: array of positions that (together with axes) define
the ndim-1 dimensional slices of labels
- axis: axis perpendicular to the ndim-1 dimensional slices
"""
# set self.data and self.ndim
if data is not None: self.data = data
try:
self.ndim = self.data.ndim
except AttributeError:
pass
# set self.labels to labels, or ones (if self.data exists)
try:
if labels is not None:
self.labels = labels
elif self.labels is None:
self.labels = numpy.ones(shape=self.data.shape, dtype=int)
except AttributeError:
pass
# set ids, _maxId and calculated
self._setIds(ids)
# set self.labels to a slice through self.labels if needed
if sliceCoord is not None: self.setSlicedLabels(sliceCoord, axis)
def _setIds(self, ids=None):
"""
Sets self._ids (type ndarray) either to ids if ids given, or to
the array of all ids present in self.labels. self._maxId is then set
to the max id of self_.ids
Also sets self._singleId to True if ids is a single int.
Arguments:
- ids: list of ids, or a single int
"""
# set self._ids, self._maxId
if ids is not None:
# from ids
if isinstance(ids, int):
self._numberId = True
ids = [ids]
else:
self._numberId = False
try:
self._ids = numpy.array(ids)
self._maxId = self._ids.max()
except ValueError:
# ids is []
self._ids = numpy.array(ids, dtype='int_')
self._maxId = 0
elif self._ids is None and self.labels is not None:
# from self.labels
try:
all = numpy.unique(self.labels)
self._ids = all.compress(all>0)
self._maxId = self._ids.max()
except (AttributeError, ValueError):
self._ids = numpy.array([], dtype='int_')
self._maxId = 0
# create or enlarge self.calculated
if self._ids is not None:
self._prepareArrays(arrays=('calculated',), dtypes=(bool,))
def reorder(self, order, data=None):
"""
Reorders elements of data array(s).
If data (1d numarray) is given, its elements are reordered according to
the dictionary order, where keys are old array indices (segment ids) and
values are new array indices (segment ids).
If data is not given, arrays self.volume, self.surface,
self.surfaceData, self.center are reordered in the same way.
Arguments:
- order: dictionary with old (keys) and new ids (values)
- data: array to be reordered
Sets all data attributes (self.mean, self.std, self.min, ...) if data
is None.
Returns (new) reordered array if data is not None.
"""
if data is None:
# reorderes all data of this instance
vars = ['mean', 'std', 'min', 'max', 'minPos', 'maxPos', 'minVec',
'maxVec', 'minPhi', 'maxPhi', 'minTheta', 'maxTheta']
for var in vars:
if self.__dict__[var] is not None:
self.__dict__[var] = self.reorder(order=order,
data=self.__dict__[var])
else:
# reorderes data array
reordered = data.copy()
reordered[list(order.values())] = data[list(order.keys())]
return reordered
def _prepareArrays(self, arrays, dtypes, widths=0):
"""
Creates or extends 1D and 2D arrays along axis 0.
For each array, if self.array is None, a new array of dimension
self.maxId+1 along axis 0 is created. If an array already exist,
it is extended along axis 0 (the new dimension is a new value
of self._maxId+1).
If an array is created, its data type is taken from dtypes. The new
array is 1d if the corresponding width <= 1, and 2d (dimension along
axis 1 is given by the width) otherwise.
An extended array keeps all the elements of the old one. The new
elements are set to 0. It also keeps the dtype and the shape from the
old array (arguments dtypes and widths are not used).
Arguments:
- arrays: list of attribute names (strings) of the arrays to be
initialized or extended
- dtypes: list of dtypes of arrays (used only for initialization)
- widths: list of (or single int) dimensions of the array along axis 1
For a width <= 1, 1d an array is created, otherwise an 2d array. Used
only for initializat
"""
# parse widths
if isinstance(widths, int): widths = [widths] * len(arrays)
for [attr, dtp, wid] in zip(arrays, dtypes, widths):
arr = self.__dict__[attr]
if wid <= 1:
# make 1D arrays
if arr is None:
self.__dict__[attr] = numpy.zeros(self._maxId+1, dtype=dtp)
elif self._maxId >= arr.shape[0]:
new =
|
numpy.zeros(self._maxId+1, dtype=arr.dtype)
|
numpy.zeros
|
"""
* MIT License
*
* Copyright (c) 2019 <NAME>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without
* limitation the rights to use, copy, modify, merge, publish, distribute,
* sublicense, and/or sell copies of the Software, and to permit persons to
* whom the Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
"""
# header files
import numpy as np
import cv2
# function for preprocessing of image
def preprocess_image(frame, camera_matrix, dist_matrix):
# undistort the frame
frame = cv2.undistort(frame, camera_matrix, dist_matrix)
# average blurring
frame = cv2.blur(frame, (3, 3))
# Convert to HLS color space and apply masks
hls = cv2.cvtColor(frame, cv2.COLOR_BGR2HLS).astype(np.float)
lower_white =
|
np.array([0, 200, 0], dtype=np.uint8)
|
numpy.array
|
# mAP and topk recall rate for image retrieval
import numpy as np
import torch
from torch.autograd import Variable
import pdb
def main():
x_query = Variable(torch.rand(3,100))
x_gallery = Variable(torch.rand(9,100))
y_query = Variable(torch.LongTensor([0,1,2]))
y_gallery = Variable(torch.LongTensor([0,0,1,1,1,1,2,2,2]))
test=ImageRetrieval()
result1=test(x_query,x_gallery,y_query,y_gallery)
result2=test.getby_numpy(x_query.data.numpy(),x_gallery.data.numpy(),
y_query.data.numpy(),y_gallery.data.numpy())
print('p={},r={}'.format(result1[0],result1[1]))
print('p={},r={}'.format(result2[0],result2[1]))
class ImageRetrieval:
def __init__(self, topk=10, cuda=False):
self.topk = topk
self.cuda = cuda
def normalize(self, x, tool, axis=None, epsilon=10e-12):
''' Devide the vectors in x by their norms.'''
if axis is None:
axis = len(x.shape) - 1
if tool == 'numpy':
norm = np.linalg.norm(x, axis=axis, keepdims=True)
elif tool == 'torch':
norm = torch.mul(x,x).sum(dim=axis, keepdim=True).sqrt()
x = x / (norm + epsilon)
return x
def __call__(self, x_query, x_gallery, y_query, y_gallery):
x_query = self.normalize(x_query, 'torch')
x_gallery = self.normalize(x_gallery, 'torch')
score_mat = torch.mm(x_query, x_gallery.transpose(1,0))
temp1 = torch.eye(x_query.size(0))
temp2 = torch.ones(x_query.size(0))
score_mask = temp2 - temp1
if self.cuda:
score_mask = score_mask.cuda()
if x_query.size(0) == x_gallery.size(0):
score_mat = torch.mul(score_mask, score_mat)
# compute label matrix
y_query = y_query[:,None]
y_gallery = y_gallery[:,None]
label_mat = y_query==y_gallery.transpose(1,0)
label_mat=label_mat.type(torch.FloatTensor)
# sort scores and labels
_,idx_sorted = torch.sort(-score_mat, dim=1)
tmp_list = [(label_mat[x, idx_sorted[x]])[None,:] for x in range(label_mat.shape[0])]
label_sorted = torch.zeros(label_mat.size())
torch.cat(tmp_list, out=label_sorted)
if self.cuda:
label_sorted = label_sorted.cuda()
if x_query.size(0) == x_gallery.size(0):
label_sorted = torch.mul(score_mask, label_sorted)
label_sorted = Variable(label_sorted, requires_grad=False)
# check the number of matching images
num_positive = torch.sum(label_sorted, dim=1)
idx = num_positive.nonzero()
# compute precision of top positives
if idx.numel() != 0:
precision = torch.zeros(idx.size(0))
precision = Variable(precision, requires_grad=False)
if self.cuda:
precision = precision.cuda()
for i,j in enumerate(idx):
num = float(num_positive[j])
temp = label_sorted[j].nonzero()
den = float(temp[-1][-1])
if den+1 == 0:
pdb.set_trace()
precision[i] = num/(den+1)
precision = torch.mean(precision).item()
else:
precision = 0.0
# compute top k recall
if idx.numel() != 0:
if label_sorted.size(-1) < self.topk:
topk = label_sorted.size(-1)
else:
topk = self.topk
total = torch.sum(label_sorted[idx,:topk].view(-1,topk), dim=1)
num = float(total.nonzero().size(0))
den = float(idx.size(0))
recall = num/den
else:
recall = 0.0
return precision,recall
def getby_numpy(self, x_query, x_gallery, y_query, y_gallery):
x_query = self.normalize(x_query,'numpy')
x_gallery = self.normalize(x_gallery,'numpy')
score_mat = np.dot(x_query,x_gallery.T)
# compute label matrix
y_query = y_query[:,None]
y_gallery = y_gallery[:,None]
label_mat = y_query==y_gallery.T
idx_sorted = np.argsort(-score_mat, axis=1)
label_sorted = [label_mat[x, idx_sorted[x]] for x in range(label_mat.shape[0])]
label_sorted = np.array(label_sorted)
label_sorted = label_sorted.astype(float)
# check the number of matching images
num_positive = np.sum(label_sorted, axis=1)
idx = num_positive.nonzero()
# compute precision of top positives
if len(idx[0]) != 0:
precision = np.zeros((len(idx[0])))
for i,j in enumerate(idx[0]):
num = float(num_positive[j])
temp = label_sorted[j].nonzero()
den = float(temp[0][-1])
precision[i] = num/(den+1)
precision = float(
|
np.mean(precision)
|
numpy.mean
|
from sklearn import svm
import pandas as pd
import numpy as np
import pickle
from sklearn import metrics
from sklearn.model_selection import train_test_split
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("-i", "--file", dest="filename", default='data/data_rh_all.csv',
help="PATH of training FILE")
parser.add_argument("-t", "--test", dest="test_size", default=0.3, type=float,
help="Test size. A number between 0 and 1. default value is 0.3")
parser.add_argument("-o", "--output", dest="output_file", default='model_svm',
help="Name of the saved model. default is 'model_svm'. the model will be saved in the 'models' folder with .sav extension")
args = parser.parse_args()
print(f'--> Loading dataset from {args.filename}')
df = pd.read_csv('args.filename', index_col=0)
print('DONE')
# prepare X and y variables
X = np.array(df.iloc[:,:-1])
y =
|
np.array(df['y'])
|
numpy.array
|
#! /usr/bin/env python2.7
"""
Author: <EMAIL>
"""
from __future__ import print_function
import numpy as np
flatten = lambda l: sum(map(flatten, l), []) if isinstance(l,list) else [l]
class Tree():
def __init__(self, bounds, noise_thres, root=False):
self.bounds = bounds
self.children = []
self.root = root
self.threshold = noise_thres
def insert(self, new_bounds, current_thres):
if new_bounds[0] < self.bounds[0] or new_bounds[1] > self.bounds[1]:
raise ValueError("child out of parents bounds")
#print(list(map(lambda x: x.bounds, self.children)))
fitting_child = filter(lambda x: x.bounds[0] <= new_bounds[0] and x.bounds[1] >= new_bounds[1],
self.children)
#fitting_child = list(fitting_child)
# recursive insert
if len(fitting_child) == 1:
fitting_child[0].insert(new_bounds, current_thres)
# or insert here
else:
self.children.append(Tree(new_bounds, current_thres))
#print('inserted bounds ', new_bounds)
def concat(self):
#print(self.bounds, map(lambda x: x.bounds, self.children))
if not self.root:
while len(self.children) == 1:
# only one child in list
new_children = self.children[0].children
# pull it up a layer
self.children = new_children
# and apply it recursively
[child.concat() for child in self.children]
def extendedges(self):
# if im the root myself do nothing
if not self.root and self.children != []:
# only at level two or so
innerbounds = []
for i in range(len(self.children)-1):
innerbounds.append(int(round((self.children[i].bounds[1] + self.children[i+1].bounds[0])/2.)))
new_bounds = [self.bounds[0]] + innerbounds + [self.bounds[1]]
for child, new_bounds in zip(self.children, zip(new_bounds, new_bounds[1:])):
child.bounds = new_bounds
[child.extendedges() for child in self.children]
def toflatList(self, l=[]):
if self.bounds[0] != 0 and self.children == []:
return self.bounds
else:
return flatten([t.toflatList() for t in self.children])
def __str__(self, space=""):
return "{}\n{}".format(space+str(self.bounds)+" [{:4.3e}]".format(self.threshold),
''.join([t.__str__(" "+space) for t in self.children]))
def detect_peak_recursive(array, thres, next_step):
"""
peakfinder with recursion and tree output
:param array:
:param thres: initial noise threshold
:param next_step: function eg lambda thres: thres*step
:return: pt.toflatList(), pt, thresholds
"""
pt = Tree((0,array.shape[0]), thres, root=True)
peaks = detect_peak_simple(array, lthres=thres)
[pt.insert((peak[0], peak[1]), thres) for peak in peaks]
thresholds = [thres]
while True:
thres = next_step(thres)
peaks = detect_peak_simple(array, lthres=thres)
thresholds.append(thres)
[pt.insert((peak[0], peak[1]), thres) for peak in peaks]
if peaks == []:
break
print(pt)
pt.concat()
print(pt)
pt.extendedges()
print(pt)
return pt.toflatList(), pt, thresholds
def detect_peak_simple(array, lthres):
"""
detect noise separated peaks
"""
ind =
|
np.where(array > lthres)
|
numpy.where
|
import numpy as np
import math
from emukit.core import ParameterSpace, ContinuousParameter
def stybtang2():
parameter_space = ParameterSpace([ContinuousParameter('x1', -5, 5), ContinuousParameter('x2', -5, 5)])
return _stybtang, parameter_space
def stybtang5():
parameter_space = ParameterSpace([ContinuousParameter('x1', -5, 5), ContinuousParameter('x2', -5, 5), ContinuousParameter('x3', -5, 5), ContinuousParameter('x4', -5, 5), ContinuousParameter('x5', -5, 5)])
return _stybtang, parameter_space
def stybtang10_scaled():
"""
A single global mininimum `H(z) = -39.166166 * d` at `z = [-2.903534]^d`
"""
parameter_space = ParameterSpace([ContinuousParameter('x1', -0.05, 0.05), ContinuousParameter('x2', -0.05, 0.05), ContinuousParameter('x3', -5, 5), ContinuousParameter('x4', -5, 5), ContinuousParameter('x5', -5, 5),
ContinuousParameter('x6', -5, 5), ContinuousParameter('x7', -5, 5), ContinuousParameter('x8', -5, 5), ContinuousParameter('x9', -0.05, 0.05), ContinuousParameter('x10', -5, 5)])
return _stybtang, parameter_space
def stybtang10():
"""
A single global mininimum `H(z) = -39.166166 * d` at `z = [-2.903534]^d`
"""
parameter_space = ParameterSpace([ContinuousParameter('x1', -5, 5), ContinuousParameter('x2', -5, 5), ContinuousParameter('x3', -5, 5), ContinuousParameter('x4', -5, 5), ContinuousParameter('x5', -5, 5),
ContinuousParameter('x6', -5, 5), ContinuousParameter('x7', -5, 5), ContinuousParameter('x8', -5, 5), ContinuousParameter('x9', -5, 5), ContinuousParameter('x10', -5, 5)])
return _stybtang, parameter_space
def _stybtang(x):
if len(x.shape) == 1:
y = 0.5 * np.sum(np.power(x, 4) - 16 * np.power(x, 2) + 5 * x)
return y[:,None]
else:
y = 0.5 * np.sum(np.power(x, 4) - 16 * np.power(x, 2) + 5 * x, axis=1)
return y[:,None]
def michalewiczw2():
parameter_space = ParameterSpace([ContinuousParameter('x1', 0, math.pi), ContinuousParameter('x2', 0, math.pi)])
return _michalewicz, parameter_space
def michalewiczw10():
parameter_space = ParameterSpace([ContinuousParameter('x1', 0, math.pi), ContinuousParameter('x2', 0, math.pi), ContinuousParameter('x3', 0, math.pi), ContinuousParameter('x4', 0, math.pi), ContinuousParameter('x5', 0, math.pi),
ContinuousParameter('x6', 0, math.pi), ContinuousParameter('x7', 0, math.pi), ContinuousParameter('x8', 0, math.pi), ContinuousParameter('x9', 0, math.pi), ContinuousParameter('x10', 0, math.pi)])
return _michalewicz, parameter_space
def _michalewicz(x):
assert len(x.shape) == 2, 'x input must be 2 dimensional array'
indx = np.arange(1.0, 1.0 + int(x.shape[1]))
indx = np.expand_dims(indx, 0)
y = -np.sum(np.sin(x) * np.sin(x * indx / np.pi) ** (2 * 10), axis=-1)
return y[:,None]
def Hartmann6():
parameter_space = ParameterSpace([ContinuousParameter('x1', 0, 1), ContinuousParameter('x2', 0, 1), ContinuousParameter('x3', 0, 1), ContinuousParameter('x4', 0, 1), ContinuousParameter('x5', 0, 1),
ContinuousParameter('x6', 0, 1)])
return _Hartmann6, parameter_space
def _Hartmann6(x):
"""
x: N X D
optimal value: -3.32237
optimizer: [(0.20169, 0.150011, 0.476874, 0.275332, 0.311652, 0.6573)]
"""
alpha = np.array([1.0, 1.2, 3.0, 3.2])
A = np.array([
[10, 3, 17, 3.5, 1.7, 8],
[0.05, 10, 17, 0.1, 8, 14],
[3, 3.5, 1.7, 10, 17, 8],
[17, 8, 0.05, 10, 0.1, 14],
])
P = np.array([
[1312, 1696, 5569, 124, 8283, 5886],
[2329, 4135, 8307, 3736, 1004, 9991],
[2348, 1451, 3522, 2883, 3047, 6650],
[4047, 8828, 8732, 5743, 1091, 381],
])
x = np.expand_dims(x, axis=-2) # N X 1 X D
A = np.expand_dims(A, axis=0) # 1 X 4 X D
P = np.expand_dims(P, axis=0) # 1 X 4 X D
inner_sum = np.sum(A * (x - 0.0001*P)**2, axis =-1) # N X 4
alpha =
|
np.expand_dims(alpha, axis=0)
|
numpy.expand_dims
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Apply NN for prediction MW and Dmax.
Data are first resampled to get estimation of uncertainties
"""
import argparse
import logging
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser(description='Apply NN model.')
parser.add_argument('type', type=str, help='p (protein), idp (intrinsically disordered protein) or na'
'(nucleic acid)')
parser.add_argument('parameter', type=str, help='mw (molecular weight) or dmax (maximum intraparticle distance)')
parser.add_argument('dataPath', metavar='path', type=str, help='path to the data file')
parser.add_argument('I0', type=float, help='intensity in origin from AUTORG')
parser.add_argument('Rg', type=float, help='radius of gyration from AUTORRG')
parser.add_argument('--units', type=str, default='nanometer', help='angular units: angstrom or nanometer')
parser.add_argument('--n', default=1000, type=int, help='how many times to resample')
parser.add_argument('--mode', default="WARNING", type=str, help='Logging level (default = WARNING), DEBUG, INFO')
# parser.add_argument('-o', '--output', type=str, default="", help='prefix to output CSV files')
args = parser.parse_args()
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from keras.models import model_from_json
from gnnom.mysaxsdocument import saxsdocument
from gnnom.normalisation.meanvariance import normalise
import numpy as np
import json
import time
# from normalisation.meanvariance import normalise
import matplotlib
from utils.log import log_warning, log_and_raise_error, log_debug, log_info
if args.mode == 'DEBUG':
logging.basicConfig(level=logging.DEBUG)
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from IPython.display import set_matplotlib_formats
logging.getLogger('matplotlib.font_manager').disabled = True
set_matplotlib_formats('svg')
# from scipy import stats
smax3 = 1.0
smax2 = 4.980390e-01
smax1 = 1.960780e-01
smin0 = 0.0196078
multiplier = 1
# check arguments
mType = args.type
if mType not in ['p', 'idp', 'na']:
parser.error("Wrong type of molecule! Please choose between p, idp and na.")
par = args.parameter
if par not in ['mw', 'dmax']:
parser.error("Wrong Parameter! Please choose between mw and dmax.")
units = args.units
if units not in ['angstrom', 'nanometer']:
parser.error("Wrong units! Please choose between ANGSTROM and NANOMETER.")
n = args.n
inputFilename = args.dataPath
I0 = args.I0
Rg = args.Rg
# read saxs data, find smin and smax
try:
cur, __ = saxsdocument.read(inputFilename)
s = cur['s']
if units == "nanometer":
s = [ss / 10.0 for ss in s] # change to angstroms
Rg = Rg / 10.0
smin = min(s)
smax = max(s)
if smin > smin0:
log_and_raise_error(logger, f"Insufficient angular range! smin = {smin} > {smin0} A^-1")
if smax >= smax3:
lastIndex = 256
elif smax >= smax2:
lastIndex = 129
elif smax >= smax1:
lastIndex = 52
else:
log_and_raise_error(logger, f"Insufficient angular range! smax = {smax} < {smax1} A^-1")
I = np.divide(cur['I'], I0)
Err = np.divide(cur['Err'], I0)
except Exception as e:
log_warning(logger, f"Error: Could not read {inputFilename}:")
raise Exception(e)
# read appropriate model
try:
modelPath = os.path.join(os.getcwd(), "gnnom/models", f"smax-index-{lastIndex}", f"{par}-3l-80u-{mType}",
f"gnnom-{par}-5-{lastIndex}-e100-u80")
jsonFilename = modelPath + ".json"
# load json and create model
jsonFile = open(jsonFilename, 'r')
loadedModelJson = jsonFile.read()
json_data = json.loads(loadedModelJson)
# Optional fields in json
mw_kda = 1.0
if 'Normalization coefficient' in json_data:
multiplier = float(json_data['Normalization coefficient'])
if 'meanIs' in json_data:
meanIs = json_data['meanIs']
stdIs = json_data['stdIs']
elif 'meanIs' not in json_data:
log_warning(logger,f"{jsonFilename} does not contain normalization coefficients!"
f"Proceeding without normalization...")
mw_kda = 0.001
# Compulsory fields in json
smin = (float)(json_data['smin'])
smax = (float)(json_data['smax'])
firstPointIndex = (int)(json_data['firstPointIndex'])
lastPointIndex = (int)(json_data['lastPointIndex'])
jsonFile.close()
loadedModel = model_from_json(loadedModelJson)
# load weights into new model
h5Filename = modelPath + ".h5"
loadedModel.load_weights(h5Filename)
inputLength = loadedModel.input_shape[1] # I(s) points
log_debug(logger, f"Expected input: {inputLength} points.")
# outputLength = loadedModel.output_shape[1] # p(r) points
log_info(logger, "Model loaded. Yeah!")
except KeyError as e:
raise Exception(f"Error: Oops, model cannot be loaded! Missing value: {e}")
except Exception as e:
raise Exception(f"Error: {e}")
# generate a grid for model
sModel = np.linspace(smin, smax, inputLength)
halfStep = (sModel[1] - sModel[0]) / 2
# regrid data file to the grid from model
sNew, INew, ErrNew = ([] for i in range(3))
for sm in sModel:
sTemp, ITemp, ErrTemp = ([] for i in range(3))
for se, ie, erre in zip(s, I, Err):
if se - halfStep < sm <= se + halfStep:
sTemp.append(se)
ITemp.append(ie)
ErrTemp.append(erre)
elif sm > smax + halfStep:
break # to speed up
sNew.append(np.mean(sTemp))
INew.append(np.mean(ITemp))
er = np.sqrt(sum(np.square(ErrTemp))) / len(ErrTemp)
ErrNew.append(er)
# # DEBUG
# plt.scatter(s, np.log10(I), c='blue', alpha=0.5, edgecolors='black')
# plt.plot(sNew, np.log10(INew), c='red')
# plt.show()
# saxsdocument.write("SASDH39-regrid3.dat", {'s': sNew, 'I': INew, 'Err': ErrNew})
start = time.monotonic()
# resample n times and run nn to do prediiction
data = []
for i in range(n):
Is = np.random.normal(INew, ErrNew)
# saxsdocument.write(f"{inputFilename}-resample-{i}.dat", {"s": sModel, "I": Is, 'Err': ErrNew})
# exit()
try:
Is, __, __ = normalise(Is, stdIs, meanIs)
data.append(Is)
except:
data.append(Is)
pass
data =
|
np.array(data)
|
numpy.array
|
from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
import random
# Enable font colors
class bcolors:
""" For the purpose of print in terminal with colors """
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def obs_to_state(obs, info):
"""
This function converts observation into state
Args:
obs: [x, y, v_x, v_y, cos(theta), sin(theta), theta_dot]
theta= robot orientation, alpha= angle between r->g and x-axis
info: {"goal_position", ...}
Returns:
state: [r_norm, p_norm, alpha, alpha_dot, beta, beta_dot]
r_norm: distance from map origin to robot
p_norm: distance from robot to goal
alpha: angle from map's x to r
beta: angle from robot's x to p
*_dot: angular velocity
"""
# compute states
r = obs[:2]
p = info["goal_position"] - obs[:2]
r_norm =
|
np.linalg.norm(r)
|
numpy.linalg.norm
|
import numpy as np
import os
import cv2
import matplotlib.pyplot as plt
def import_faces_as_cols(path):
entries = os.scandir(path)
entriesLst = list(entries)
# Randomly read a image to get the information of the image
img = cv2.imread(path+'/'+entriesLst[0].name, 0) # gray
height, width = img.shape
num = len(entriesLst)
columns =
|
np.zeros(((height*width), num))
|
numpy.zeros
|
import os
import os.path as osp
from glob import glob
import numpy as np
import cv2
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
import time
import torch
import torch.nn as nn
import torchvision.ops as tv_ops
import torch.nn.functional as F
import torchvision.transforms as transforms
from torch_scatter import scatter, scatter_softmax, scatter_max, scatter_log_softmax
from extensions.ray_aabb.jit import ray_aabb
from extensions.pcl_aabb.jit import pcl_aabb
import constants
import models.pointnet as pnet
import models.resnet_dilated as resnet_dilated
import models.implicit_net as im_net
import utils.point_utils as point_utils
import utils.vis_utils as vis_utils
import utils.loss_utils as loss_utils
from utils.training_utils import *
class LIDF(nn.Module):
def __init__(self, opt, device):
super(LIDF, self).__init__()
self.opt = opt
self.device = device
# build models
self.build_model()
def build_model(self):
# positional embedding
if self.opt.model.pos_encode:
self.embed_fn, embed_ch = im_net.get_embedder(self.opt.model.multires)
self.embeddirs_fn, embeddirs_ch = im_net.get_embedder(self.opt.model.multires_views)
else:
self.embed_fn, embed_ch = im_net.get_embedder(self.opt.model.multires, i=-1)
self.embeddirs_fn, embeddirs_ch = im_net.get_embedder(self.opt.model.multires_views, i=-1)
assert embed_ch == embeddirs_ch == 3
# rgb model
if self.opt.model.rgb_model_type == 'resnet':
self.resnet_model = resnet_dilated.Resnet34_8s(inp_ch=self.opt.model.rgb_in, out_ch=self.opt.model.rgb_out).to(self.device)
else:
raise NotImplementedError('Does not support RGB model: {}'.format(self.opt.model.rgb_model_type))
# pointnet model
if self.opt.model.pnet_model_type == 'twostage':
self.pnet_model = pnet.PointNet2Stage(input_channels=self.opt.model.pnet_in,
output_channels=self.opt.model.pnet_out, gf_dim=self.opt.model.pnet_gf).to(self.device)
else:
raise NotImplementedError('Does not support PNET model: {}'.format(self.opt.model.pnet_model_type))
# decoder input dim
if self.opt.model.rgb_embedding_type == 'ROIAlign':
dec_inp_dim = self.opt.model.pnet_out + self.opt.model.rgb_out * (self.opt.model.roi_out_bbox**2) \
+ 2 * embed_ch + embeddirs_ch
else:
raise NotImplementedError('Does not support RGB embedding: {}'.format(self.opt.model.rgb_embedding_type))
# offset decoder
if self.opt.model.offdec_type == 'IMNET':
self.offset_dec = im_net.IMNet(inp_dim=dec_inp_dim, out_dim=1,
gf_dim=self.opt.model.imnet_gf, use_sigmoid=self.opt.model.use_sigmoid).to(self.device)
elif self.opt.model.offdec_type == 'IEF':
self.offset_dec = im_net.IEF(self.device, inp_dim=dec_inp_dim, out_dim=1, gf_dim=self.opt.model.imnet_gf,
n_iter=self.opt.model.n_iter, use_sigmoid=self.opt.model.use_sigmoid).to(self.device)
else:
raise NotImplementedError('Does not support Offset Decoder Type: {}'.format(self.opt.model.offdec_type))
# prob decoder
if self.opt.loss.prob_loss_type == 'ray':
prob_out_dim = 1
if self.opt.model.probdec_type == 'IMNET':
self.prob_dec = im_net.IMNet(inp_dim=dec_inp_dim, out_dim=prob_out_dim,
gf_dim=self.opt.model.imnet_gf, use_sigmoid=self.opt.model.use_sigmoid).to(self.device)
else:
raise NotImplementedError('Does not support Prob Decoder Type: {}'.format(self.opt.model.probdec_type))
# loss function
self.pos_loss_fn = nn.L1Loss()
print('loss_fn at GPU {}'.format(self.opt.gpu_id))
def prepare_data(self, batch, exp_type, pred_mask):
# fetch data
batch = to_gpu(batch, self.device)
rgb_img = batch['rgb']
bs = rgb_img.shape[0]
h,w = rgb_img.shape[2],rgb_img.shape[3]
corrupt_mask = batch['corrupt_mask'].squeeze(1)
xyz_corrupt = batch['xyz_corrupt']
if 'valid_mask' in batch.keys():
valid_mask = batch['valid_mask'].squeeze(1)
else:
valid_mask = 1 - corrupt_mask
# flat h and w dim
xyz_corrupt_flat = xyz_corrupt.permute(0, 2, 3, 1).contiguous().reshape(bs,-1,3)
# arrange data in a dictionary
data_dict = {
'bs': bs,
'h': h,
'w': w,
'rgb_img': rgb_img,
'corrupt_mask': corrupt_mask,
'valid_mask': valid_mask,
'xyz_corrupt_flat': xyz_corrupt_flat,
'fx': batch['fx'].float(),
'fy': batch['fy'].float(),
'cx': batch['cx'].float(),
'cy': batch['cy'].float(),
'item_path': batch['item_path'],
}
# add pred_mask
if exp_type != 'train':
if self.opt.mask_type == 'pred':
data_dict['pred_mask'] = pred_mask
data_dict['valid_mask'] = 1 - pred_mask
elif self.opt.mask_type == 'all':
data_dict['pred_mask'] = torch.ones_like(data_dict['corrupt_mask'])
inp_zero_mask = (batch['depth_corrupt'] == 0).squeeze(1).float()
data_dict['valid_mask'] = 1 - inp_zero_mask
if exp_type == 'train' or exp_type == 'test':
xyz = batch['xyz']
xyz_flat = xyz.permute(0, 2, 3, 1).contiguous().reshape(bs,-1,3)
data_dict['xyz_flat'] = xyz_flat
return data_dict
def get_valid_points(self, data_dict):
'''
If valid_sample_num == -1, use all valid points. Otherwise uniformly sample valid points in a small block.
valid_idx: (valid_point_num,2), 1st dim is batch idx, 2nd dim is flattened img idx.
'''
bs,h,w = data_dict['bs'], data_dict['h'], data_dict['w']
if self.opt.grid.valid_sample_num != -1: # sample valid points
valid_idx = point_utils.sample_valid_points(data_dict['valid_mask'], self.opt.grid.valid_sample_num, block_x=8, block_y=8)
else: # get all valid points
valid_mask_flat = data_dict['valid_mask'].reshape(bs,-1)
valid_idx = torch.nonzero(valid_mask_flat, as_tuple=False)
valid_bid = valid_idx[:,0]
valid_flat_img_id = valid_idx[:,1]
# get rgb and xyz for valid points.
valid_xyz = data_dict['xyz_corrupt_flat'][valid_bid, valid_flat_img_id]
rgb_img_flat = data_dict['rgb_img'].permute(0,2,3,1).contiguous().reshape(bs,-1,3)
valid_rgb = rgb_img_flat[valid_bid, valid_flat_img_id]
# update intermediate data in data_dict
data_dict.update({
'valid_bid': valid_bid,
'valid_flat_img_id': valid_flat_img_id,
'valid_xyz': valid_xyz,
'valid_rgb': valid_rgb,
})
def get_occ_vox_bound(self, data_dict):
##################################
# Get occupied voxel in a batch
##################################
# setup grid properties
xmin = torch.Tensor(constants.XMIN).float().to(self.device)
xmax = torch.Tensor(constants.XMAX).float().to(self.device)
min_bb = torch.min(xmax- xmin).item()
part_size = min_bb / self.opt.grid.res
# we need half voxel margin on each side
xmin = xmin - 0.5 * part_size
xmax = xmax + 0.5 * part_size
# get occupied grid
occ_vox_bid_global_coord, revidx, valid_v_pid, \
valid_v_rel_coord, idx_grid = point_utils.batch_get_occupied_idx(
data_dict['valid_xyz'], data_dict['valid_bid'].unsqueeze(-1),
xmin=xmin, xmax=xmax,
crop_size=part_size, overlap=False)
# images in current minibatch do not have occupied voxels
if occ_vox_bid_global_coord.shape[0] == 0:
print('No occupied voxel', data_dict['item_path'])
return False
occ_vox_bid = occ_vox_bid_global_coord[:,0]
occ_vox_global_coord = occ_vox_bid_global_coord[:,1:]
''' compute occupied voxel bound '''
bound_min = xmin.unsqueeze(0) + occ_vox_global_coord * part_size
bound_max = bound_min + part_size
voxel_bound = torch.cat((bound_min,bound_max),1)
# update data_dict
data_dict.update({
'xmin': xmin,
'part_size': part_size,
'revidx': revidx,
'valid_v_pid': valid_v_pid,
'valid_v_rel_coord': valid_v_rel_coord,
'occ_vox_bid': occ_vox_bid,
'occ_vox_global_coord': occ_vox_global_coord,
'voxel_bound': voxel_bound,
})
return True
def get_miss_ray(self, data_dict, exp_type):
#####################################
# compute ray dir and img grid index
#####################################
bs,h,w = data_dict['bs'], data_dict['h'], data_dict['w']
fx,fy = data_dict['fx'], data_dict['fy']
cx,cy = data_dict['cx'], data_dict['cy']
y_ind, x_ind = torch.meshgrid(torch.arange(h), torch.arange(w))
x_ind = x_ind.unsqueeze(0).repeat(bs,1,1).float().to(self.device)
y_ind = y_ind.unsqueeze(0).repeat(bs,1,1).float().to(self.device)
# img grid index, (bs,h*w,2)
img_ind_flat = torch.stack((x_ind,y_ind),-1).reshape(bs,h*w,2).long()
cam_x = x_ind - cx.reshape(-1,1,1)
cam_y = (y_ind - cy.reshape(-1,1,1)) * fx.reshape(-1,1,1) / fy.reshape(-1,1,1)
cam_z = fx.reshape(-1,1,1).repeat(1,h,w)
ray_dir = torch.stack((cam_x,cam_y,cam_z),-1)
ray_dir = ray_dir / torch.norm(ray_dir,dim=-1,keepdim=True)
ray_dir_flat = ray_dir.reshape(bs,-1,3)
###################################
# sample miss points
# (miss_point_num,2): 1st dim is batch idx, second dim is flatted img idx.
###################################
if exp_type != 'train' and self.opt.mask_type in ['pred', 'all']:
pred_mask_flat = data_dict['pred_mask'].view(bs,-1)
miss_idx = torch.nonzero(pred_mask_flat, as_tuple=False)
else:
corrupt_mask_flat = data_dict['corrupt_mask'].view(bs,-1)
miss_idx = torch.nonzero(corrupt_mask_flat, as_tuple=False)
if exp_type == 'train' and self.opt.grid.miss_sample_num != -1 and bs*self.opt.grid.miss_sample_num < miss_idx.shape[0]:
''' randomly sample miss point. make them as continuous as possible '''
miss_bid = miss_idx[:,0]
# get max miss ray cnt for all examples inside a minibatch
miss_bid_nodup, _, miss_bid_cnt = torch.unique_consecutive(miss_bid,dim=0,return_counts=True,return_inverse=True)
# make sure cnt is sorted and fill in zero if non exist
miss_bid_cnt_sorted = scatter(miss_bid_cnt, miss_bid_nodup,
dim=0, dim_size=bs, reduce="sum")
miss_bid_sid_eid = torch.cumsum(miss_bid_cnt_sorted, 0)
miss_bid_sid_eid = torch.cat((torch.Tensor([0]).long().to(self.device), miss_bid_sid_eid),0)
sample_list = []
# iterate over examples in a batch
for i in range(miss_bid_sid_eid.shape[0]-1):
cur_sid = miss_bid_sid_eid[i].item()
cur_eid = miss_bid_sid_eid[i+1].item()
cur_cnt = miss_bid_cnt_sorted[i].item()
if cur_cnt > self.opt.grid.miss_sample_num: # sample random miss points
start_range = cur_cnt - self.opt.grid.miss_sample_num + 1
start_id = np.random.choice(start_range) + cur_sid
sample_list.append(miss_idx[start_id:start_id+self.opt.grid.miss_sample_num])
else: # add all miss points
sample_list.append(miss_idx[cur_sid:cur_eid])
miss_idx = torch.cat(sample_list,0)
total_miss_sample_num = miss_idx.shape[0]
miss_bid = miss_idx[:,0]
miss_flat_img_id = miss_idx[:,1]
# get ray dir and img index for sampled miss point
miss_ray_dir = ray_dir_flat[miss_bid, miss_flat_img_id]
miss_img_ind = img_ind_flat[miss_bid, miss_flat_img_id]
# update data_dict
data_dict.update({
'miss_bid': miss_bid,
'miss_flat_img_id': miss_flat_img_id,
'miss_ray_dir': miss_ray_dir,
'miss_img_ind': miss_img_ind,
'total_miss_sample_num': total_miss_sample_num
})
def compute_ray_aabb(self, data_dict):
##################################
# Run ray AABB slab test
# mask: (occ_vox_num_in_batch, miss_ray_num_in_batch)
# dist: (occ_vox_num_in_batch, miss_ray_num_in_batch,2). store in voxel dist and out voxel dist
##################################
mask, dist = ray_aabb.forward(data_dict['miss_ray_dir'], data_dict['voxel_bound'],
data_dict['miss_bid'].int(), data_dict['occ_vox_bid'].int())
mask = mask.long()
dist = dist.float()
# get idx of ray-voxel intersect pair
intersect_idx = torch.nonzero(mask, as_tuple=False)
occ_vox_intersect_idx = intersect_idx[:,0]
miss_ray_intersect_idx = intersect_idx[:,1]
# images in current mini batch do not have ray occ vox intersection pair.
if intersect_idx.shape[0] == 0:
print('No miss ray and occ vox intersection pair', data_dict['item_path'])
return False
data_dict.update({
'mask': mask,
'dist': dist,
'occ_vox_intersect_idx': occ_vox_intersect_idx,
'miss_ray_intersect_idx': miss_ray_intersect_idx,
})
return True
def compute_gt(self, data_dict):
###########################################
# Compute Groundtruth for position and ray termination label
###########################################
# get gt pos for sampled missing point
gt_pos = data_dict['xyz_flat'][data_dict['miss_bid'], data_dict['miss_flat_img_id']]
# pcl_mask(i,j) indicates if j-th missing point gt pos inside i-th voxel
pcl_mask = pcl_aabb.forward(gt_pos, data_dict['voxel_bound'], data_dict['miss_bid'].int(), data_dict['occ_vox_bid'].int())
pcl_mask = pcl_mask.long()
# compute gt label for ray termination
pcl_label = pcl_mask[data_dict['occ_vox_intersect_idx'], data_dict['miss_ray_intersect_idx']]
pcl_label_float = pcl_label.float()
# get intersected voxels
unique_intersect_vox_idx, occ_vox_intersect_idx_nodup2dup = torch.unique(data_dict['occ_vox_intersect_idx'], sorted=True, dim=0, return_inverse=True)
intersect_voxel_bound = data_dict['voxel_bound'][unique_intersect_vox_idx]
intersect_vox_bid = data_dict['occ_vox_bid'][unique_intersect_vox_idx]
# get sampled valid pcl inside intersected voxels
valid_intersect_mask = pcl_aabb.forward(data_dict['valid_xyz'], intersect_voxel_bound.contiguous(), data_dict['valid_bid'].int(), intersect_vox_bid.int().contiguous())
valid_intersect_mask = valid_intersect_mask.long()
try:
valid_intersect_nonzero_idx = torch.nonzero(valid_intersect_mask, as_tuple=False)
except:
print(data_dict['valid_xyz'].shape)
print(valid_intersect_mask.shape)
print(unique_intersect_vox_idx.shape, intersect_voxel_bound.shape)
print(data_dict['item_path'])
valid_xyz_in_intersect = data_dict['valid_xyz'][valid_intersect_nonzero_idx[:,1]]
valid_rgb_in_intersect = data_dict['valid_rgb'][valid_intersect_nonzero_idx[:,1]]
valid_bid_in_intersect = data_dict['valid_bid'][valid_intersect_nonzero_idx[:,1]]
# update data_dict
data_dict.update({
'gt_pos': gt_pos,
'pcl_label': pcl_label,
'pcl_label_float': pcl_label_float,
'valid_xyz_in_intersect': valid_xyz_in_intersect,
'valid_rgb_in_intersect': valid_rgb_in_intersect,
'valid_bid_in_intersect': valid_bid_in_intersect
})
def get_embedding(self, data_dict):
###########################
# Get embedding
##########################
bs,h,w = data_dict['bs'], data_dict['h'], data_dict['w']
''' Positional Encoding '''
# compute intersect pos
intersect_dist = data_dict['dist'][data_dict['occ_vox_intersect_idx'], data_dict['miss_ray_intersect_idx']]
intersect_enter_dist, intersect_leave_dist = intersect_dist[:,0], intersect_dist[:,1]
intersect_dir = data_dict['miss_ray_dir'][data_dict['miss_ray_intersect_idx']]
intersect_enter_pos = intersect_dir * intersect_enter_dist.unsqueeze(-1)
intersect_leave_pos = intersect_dir * intersect_leave_dist.unsqueeze(-1)
intersect_voxel_bound = data_dict['voxel_bound'][data_dict['occ_vox_intersect_idx']]
intersect_voxel_center = (intersect_voxel_bound[:,:3] + intersect_voxel_bound[:,3:]) / 2.
if self.opt.model.intersect_pos_type == 'rel':
inp_enter_pos = intersect_enter_pos - intersect_voxel_center
inp_leave_pos = intersect_leave_pos - intersect_voxel_center
else:
inp_enter_pos = intersect_enter_pos
inp_leave_pos = intersect_leave_pos
# positional encoding
intersect_enter_pos_embed = self.embed_fn(inp_enter_pos)
intersect_leave_pos_embed = self.embed_fn(inp_leave_pos)
intersect_dir_embed = self.embeddirs_fn(intersect_dir)
''' RGB Embedding '''
miss_ray_intersect_img_ind = data_dict['miss_img_ind'][data_dict['miss_ray_intersect_idx']]
miss_ray_intersect_bid = data_dict['miss_bid'][data_dict['miss_ray_intersect_idx']]
full_rgb_feat = self.resnet_model(data_dict['rgb_img'])
# ROIAlign to pool features
if self.opt.model.rgb_embedding_type == 'ROIAlign':
# compute input boxes for ROI Align
miss_ray_intersect_ul = miss_ray_intersect_img_ind - self.opt.model.roi_inp_bbox // 2
miss_ray_intersect_br = miss_ray_intersect_img_ind + self.opt.model.roi_inp_bbox // 2
# clamp is done in original image coords
miss_ray_intersect_ul[:,0] = torch.clamp(miss_ray_intersect_ul[:,0], min=0., max=w-1)
miss_ray_intersect_ul[:,1] = torch.clamp(miss_ray_intersect_ul[:,1], min=0., max=h-1)
miss_ray_intersect_br[:,0] = torch.clamp(miss_ray_intersect_br[:,0], min=0., max=w-1)
miss_ray_intersect_br[:,1] = torch.clamp(miss_ray_intersect_br[:,1], min=0., max=h-1)
roi_boxes = torch.cat((miss_ray_intersect_bid.unsqueeze(-1), miss_ray_intersect_ul, miss_ray_intersect_br),-1).float()
# sampled rgb features for ray-voxel intersect pair. (pair num,rgb_feat_len,roi_out_bbox,roi_out_bbox)
spatial_scale = 1.0
intersect_rgb_feat = tv_ops.roi_align(full_rgb_feat, roi_boxes,
output_size=self.opt.model.roi_out_bbox,
spatial_scale=spatial_scale,
aligned=True)
try:
intersect_rgb_feat = intersect_rgb_feat.reshape(intersect_rgb_feat.shape[0],-1)
except:
print(intersect_rgb_feat.shape)
print(roi_boxes.shape)
print(data_dict['miss_ray_intersect_idx'].shape, miss_ray_intersect_bid.shape, miss_ray_intersect_img_ind.shape)
print(data_dict['total_miss_sample_num'])
print(data_dict['item_path'])
else:
raise NotImplementedError('Does not support RGB embedding type: {}'.format(self.opt.model.rgb_embedding_type))
''' Voxel Embedding '''
valid_v_rgb = data_dict['valid_rgb'][data_dict['valid_v_pid']]
if self.opt.model.pnet_pos_type == 'rel': # relative position w.r.t voxel center
pnet_inp = torch.cat((data_dict['valid_v_rel_coord'], valid_v_rgb),-1)
else:
raise NotImplementedError('Does not support Pnet pos type: {}'.format(self.opt.model.pnet_pos_type))
# pointnet forward
if self.opt.model.pnet_model_type == 'twostage':
occ_voxel_feat = self.pnet_model(inp_feat=pnet_inp, vox2point_idx=data_dict['revidx'])
else:
raise NotImplementedError('Does not support pnet model type: {}'.format(self.opt.model.pnet_model_type))
intersect_voxel_feat = occ_voxel_feat[data_dict['occ_vox_intersect_idx']]
# update data_dict
data_dict.update({
'intersect_dir': intersect_dir,
'intersect_enter_dist': intersect_enter_dist,
'intersect_leave_dist': intersect_leave_dist,
'intersect_enter_pos': intersect_enter_pos,
'intersect_leave_pos': intersect_leave_pos,
'intersect_enter_pos_embed': intersect_enter_pos_embed,
'intersect_leave_pos_embed': intersect_leave_pos_embed,
'intersect_dir_embed': intersect_dir_embed,
'full_rgb_feat': full_rgb_feat,
'intersect_rgb_feat': intersect_rgb_feat,
'intersect_voxel_feat': intersect_voxel_feat
})
def get_pred(self, data_dict, exp_type, epoch):
########################################################
# Concat embedding and send to decoder
########################################################
inp_embed = torch.cat(( data_dict['intersect_voxel_feat'].contiguous(), data_dict['intersect_rgb_feat'].contiguous(),
data_dict['intersect_enter_pos_embed'].contiguous(),
data_dict['intersect_leave_pos_embed'].contiguous(), data_dict['intersect_dir_embed'].contiguous()),-1)
pred_offset = self.offset_dec(inp_embed)
pred_prob_end = self.prob_dec(inp_embed)
# scale pred_offset from (0,1) to (offset_range[0], offset_range[1]).
pred_scaled_offset = pred_offset * (self.opt.grid.offset_range[1] - self.opt.grid.offset_range[0]) + self.opt.grid.offset_range[0]
pred_scaled_offset = pred_scaled_offset * np.sqrt(3) * data_dict['part_size']
pair_pred_pos = data_dict['intersect_enter_pos'] + pred_scaled_offset * data_dict['intersect_dir']
# we detach the pred_prob_end. we don't want pos loss to affect ray terminate score.
if self.opt.loss.prob_loss_type == 'ray':
pred_prob_end_softmax = scatter_softmax(pred_prob_end.detach()[:,0], data_dict['miss_ray_intersect_idx'])
# training uses GT pcl_label to get max_pair_id (voxel with largest prob)
if exp_type == 'train' and epoch < self.opt.model.maxpool_label_epo:
_, max_pair_id = scatter_max(data_dict['pcl_label_float'], data_dict['miss_ray_intersect_idx'],
dim_size=data_dict['total_miss_sample_num'])
# test/valid uses pred_prob_end_softmax to get max_pair_id (voxel with largest prob)
else:
_, max_pair_id = scatter_max(pred_prob_end_softmax, data_dict['miss_ray_intersect_idx'],
dim_size=data_dict['total_miss_sample_num'])
if self.opt.model.scatter_type == 'Maxpool':
dummy_pos = torch.zeros([1,3]).float().to(self.device)
pair_pred_pos_dummy = torch.cat((pair_pred_pos, dummy_pos),0)
pred_pos = pair_pred_pos_dummy[max_pair_id]
else:
raise NotImplementedError('Does not support Scatter Type: {}'.format(self.opt.model.scatter_type))
assert pred_pos.shape[0] == data_dict['total_miss_sample_num']
# update data_dict
data_dict.update({
'pair_pred_pos': pair_pred_pos,
'max_pair_id': max_pair_id,
'pred_prob_end': pred_prob_end,
'pred_prob_end_softmax': pred_prob_end_softmax,
'pred_pos': pred_pos,
})
def compute_loss(self, data_dict, exp_type, epoch):
bs,h,w = data_dict['bs'], data_dict['h'], data_dict['w']
''' position loss '''
if self.opt.loss.pos_loss_type == 'single':
if not self.opt.loss.hard_neg:
pos_loss = self.pos_loss_fn(data_dict['pred_pos'], data_dict['gt_pos'])
else:
pos_loss_unreduce = torch.mean((data_dict['pred_pos'] - data_dict['gt_pos']).abs(),-1)
k = int(pos_loss_unreduce.shape[0] * self.opt.loss.hard_neg_ratio)
pos_loss_topk,_ = torch.topk(pos_loss_unreduce, k)
pos_loss = torch.mean(pos_loss_topk)
''' Ending probability loss '''
if self.opt.loss.prob_loss_type == 'ray':
pred_prob_end_log_softmax = scatter_log_softmax(data_dict['pred_prob_end'][:,0], data_dict['miss_ray_intersect_idx'])
pcl_label_idx = torch.nonzero(data_dict['pcl_label'], as_tuple=False).reshape(-1)
prob_loss_unreduce = -1*pred_prob_end_log_softmax[pcl_label_idx]
if not self.opt.loss.hard_neg:
prob_loss = torch.mean(prob_loss_unreduce)
else:
k = int(prob_loss_unreduce.shape[0] * self.opt.loss.hard_neg_ratio)
prob_loss_topk,_ = torch.topk(prob_loss_unreduce, k)
prob_loss = torch.mean(prob_loss_topk)
''' surface normal loss '''
if exp_type == 'train':
gt_pcl = data_dict['xyz_flat'].clone()
pred_pcl = data_dict['xyz_flat'].clone()
else:
gt_pcl = data_dict['xyz_corrupt_flat'].clone()
pred_pcl = data_dict['xyz_corrupt_flat'].clone()
gt_pcl[data_dict['miss_bid'], data_dict['miss_flat_img_id']] = data_dict['gt_pos']
gt_pcl = gt_pcl.reshape(bs,h,w,3).permute(0,3,1,2).contiguous()
gt_surf_norm_img,_,_ = point_utils.get_surface_normal(gt_pcl)
gt_surf_norm_flat = gt_surf_norm_img.permute(0,2,3,1).contiguous().reshape(bs,h*w,3)
gt_surf_norm = gt_surf_norm_flat[data_dict['miss_bid'], data_dict['miss_flat_img_id']]
pred_pcl[data_dict['miss_bid'], data_dict['miss_flat_img_id']] = data_dict['pred_pos']
pred_pcl = pred_pcl.reshape(bs,h,w,3).permute(0,3,1,2).contiguous()
pred_surf_norm_img, dx, dy = point_utils.get_surface_normal(pred_pcl)
pred_surf_norm_flat = pred_surf_norm_img.permute(0,2,3,1).contiguous().reshape(bs,h*w,3)
pred_surf_norm = pred_surf_norm_flat[data_dict['miss_bid'], data_dict['miss_flat_img_id']]
# surface normal loss
cosine_val = F.cosine_similarity(pred_surf_norm, gt_surf_norm, dim=-1)
surf_norm_dist = (1 - cosine_val) / 2.
if not self.opt.loss.hard_neg:
surf_norm_loss = torch.mean(surf_norm_dist)
else:
k = int(surf_norm_dist.shape[0] * self.opt.loss.hard_neg_ratio)
surf_norm_dist_topk,_ = torch.topk(surf_norm_dist, k)
surf_norm_loss = torch.mean(surf_norm_dist_topk)
# angle err
angle_err = torch.mean(torch.acos(torch.clamp(cosine_val,min=-1,max=1)))
angle_err = angle_err / np.pi * 180.
# smooth loss
dx_dist = torch.sum(dx*dx,1)
dx_dist_flat = dx_dist.reshape(bs,h*w)
miss_dx_dist = dx_dist_flat[data_dict['miss_bid'], data_dict['miss_flat_img_id']]
dy_dist = torch.sum(dy*dy,1)
dy_dist_flat = dy_dist.reshape(bs,h*w)
miss_dy_dist = dy_dist_flat[data_dict['miss_bid'], data_dict['miss_flat_img_id']]
if not self.opt.loss.hard_neg:
smooth_loss = torch.mean(miss_dx_dist) + torch.mean(miss_dy_dist)
else:
k = int(miss_dx_dist.shape[0] * self.opt.loss.hard_neg_ratio)
miss_dx_dist_topk,_ = torch.topk(miss_dx_dist, k)
miss_dy_dist_topk,_ = torch.topk(miss_dy_dist, k)
smooth_loss = torch.mean(miss_dx_dist_topk) + torch.mean(miss_dy_dist_topk)
''' loss net '''
loss_net = self.opt.loss.pos_w * pos_loss + self.opt.loss.prob_w * prob_loss
if self.opt.loss.surf_norm_w > 0 and epoch >= self.opt.loss.surf_norm_epo:
loss_net += self.opt.loss.surf_norm_w * surf_norm_loss
if self.opt.loss.smooth_w > 0 and epoch >= self.opt.loss.smooth_epo:
loss_net += self.opt.loss.smooth_w * smooth_loss
#######################
# Evaluation Metric
#######################
# ending accuracy for missing point
_, pred_label = scatter_max(data_dict['pred_prob_end_softmax'], data_dict['miss_ray_intersect_idx'],
dim_size=data_dict['total_miss_sample_num'])
_, gt_label = scatter_max(data_dict['pcl_label'], data_dict['miss_ray_intersect_idx'],
dim_size=data_dict['total_miss_sample_num'])
acc = torch.sum(torch.eq(pred_label, gt_label).float()) / torch.numel(pred_label)
# position L2 error: we don't want to consider 0 depth point in the position L2 error.
zero_mask = torch.sum(data_dict['gt_pos'].abs(),dim=-1)
zero_mask[zero_mask!=0] = 1.
elem_num = torch.sum(zero_mask)
if elem_num.item() == 0:
err = torch.Tensor([0]).float().to(self.device)
else:
err = torch.sum(torch.sqrt(torch.sum((data_dict['pred_pos'] - data_dict['gt_pos'])**2,-1))*zero_mask) / elem_num
# compute depth errors following cleargrasp
zero_mask_idx = torch.nonzero(zero_mask, as_tuple=False).reshape(-1)
if exp_type != 'train':
if bs != 1:
pred_depth = data_dict['pred_pos'][:,2]
gt_depth = data_dict['gt_pos'][:,2]
pred = pred_depth[zero_mask_idx]
gt = gt_depth[zero_mask_idx]
else:
# scale image to make sure it is same as cleargrasp eval metric
gt_xyz = data_dict['xyz_flat'].clone()
gt_xyz = gt_xyz.reshape(bs,h,w,3).cpu().numpy()
gt_depth = gt_xyz[0,:,:,2]
gt_depth = cv2.resize(gt_depth, (256, 144), interpolation=cv2.INTER_NEAREST)
gt_depth[np.isnan(gt_depth)] = 0
gt_depth[np.isinf(gt_depth)] = 0
mask_valid_region = (gt_depth > 0)
seg_mask = data_dict['corrupt_mask'].cpu().numpy()
seg_mask = seg_mask[0].astype(np.uint8)
seg_mask = cv2.resize(seg_mask, (256, 144), interpolation=cv2.INTER_NEAREST)
mask_valid_region = np.logical_and(mask_valid_region, seg_mask)
mask_valid_region = (mask_valid_region.astype(np.uint8) * 255)
pred_xyz = data_dict['xyz_corrupt_flat'].clone()
pred_xyz[data_dict['miss_bid'], data_dict['miss_flat_img_id']] = data_dict['pred_pos']
pred_xyz = pred_xyz.reshape(bs,h,w,3).cpu().numpy()
pred_depth = pred_xyz[0,:,:,2]
pred_depth = cv2.resize(pred_depth, (256, 144), interpolation=cv2.INTER_NEAREST)
gt = torch.from_numpy(gt_depth).float().to(self.device)
pred = torch.from_numpy(pred_depth).float().to(self.device)
mask = torch.from_numpy(mask_valid_region).bool().to(self.device)
gt = gt[mask]
pred = pred[mask]
# compute metrics
safe_log = lambda x: torch.log(torch.clamp(x, 1e-6, 1e6))
safe_log10 = lambda x: torch.log(torch.clamp(x, 1e-6, 1e6))
thresh = torch.max(gt / pred, pred / gt)
a1 = (thresh < 1.05).float().mean()
a2 = (thresh < 1.10).float().mean()
a3 = (thresh < 1.25).float().mean()
rmse = ((gt - pred)**2).mean().sqrt()
rmse_log = ((safe_log(gt) - safe_log(pred))**2).mean().sqrt()
log10 = (safe_log10(gt) - safe_log10(pred)).abs().mean()
abs_rel = ((gt - pred).abs() / gt).mean()
mae = (gt - pred).abs().mean()
sq_rel = ((gt - pred)**2 / gt).mean()
# update data_dict
data_dict.update({
'zero_mask_idx': zero_mask_idx,
'gt_surf_norm_img': gt_surf_norm_img,
'pred_surf_norm_img': pred_surf_norm_img
})
# loss dict
loss_dict = {
'pos_loss': pos_loss,
'prob_loss': prob_loss,
'surf_norm_loss': surf_norm_loss,
'smooth_loss': smooth_loss,
'loss_net': loss_net,
'acc': acc,
'err': err,
'angle_err': angle_err,
}
if exp_type != 'train':
loss_dict.update({
'a1': a1,
'a2': a2,
'a3': a3,
'rmse': rmse,
'rmse_log': rmse_log,
'log10': log10,
'abs_rel': abs_rel,
'mae': mae,
'sq_rel': sq_rel,
})
return loss_dict
def forward(self, batch, exp_type, epoch, pred_mask=None):
loss_dict = {}
# prepare input and gt data
data_dict = self.prepare_data(batch, exp_type, pred_mask)
# get valid points data
self.get_valid_points(data_dict)
# get occupied voxel data
occ_vox_flag = self.get_occ_vox_bound(data_dict)
if exp_type == 'train' and self.opt.dist.ddp:
# have to set barrier to wait for all processes finished forward pass
dist.barrier()
success_num = torch.Tensor([occ_vox_flag]).to(self.device)
dist.all_reduce(success_num, op=dist.ReduceOp.SUM)
# at least one gpu fails: clear grad buffer and return
if success_num[0] < self.opt.dist.ngpus_per_node:
print('gpu {}: {}'.format(self.opt.gpu_id, success_num[0]))
return False, data_dict, loss_dict
elif not occ_vox_flag:
return False, data_dict, loss_dict
# get miss ray data
self.get_miss_ray(data_dict, exp_type)
miss_sample_flag = (data_dict['total_miss_sample_num'] != 0)
if exp_type == 'train' and self.opt.dist.ddp:
# have to set barrier to wait for all processes finished forward pass
dist.barrier()
success_num = torch.Tensor([miss_sample_flag]).to(self.device)
dist.all_reduce(success_num, op=dist.ReduceOp.SUM)
# at least one gpu fails: clear grad buffer and return
if success_num[0] < self.opt.dist.ngpus_per_node:
print('gpu {}: {}'.format(self.opt.gpu_id, success_num[0]))
return False, data_dict, loss_dict
elif not miss_sample_flag:
return False, data_dict, loss_dict
# ray AABB slab test
intersect_pair_flag = self.compute_ray_aabb(data_dict)
if exp_type == 'train' and self.opt.dist.ddp:
# have to set barrier to wait for all processes finished forward pass
dist.barrier()
success_num = torch.Tensor([intersect_pair_flag]).to(self.device)
dist.all_reduce(success_num, op=dist.ReduceOp.SUM)
# at least one gpu fails: clear grad buffer and return
if success_num[0] < self.opt.dist.ngpus_per_node:
print('gpu {}: {}'.format(self.opt.gpu_id, success_num[0]))
return False, data_dict, loss_dict
elif not intersect_pair_flag:
return False, data_dict, loss_dict
# compute gt
if exp_type == 'train' or exp_type == 'test':
self.compute_gt(data_dict)
# get embedding
self.get_embedding(data_dict)
# get prediction
self.get_pred(data_dict, exp_type, epoch)
# compute loss
if exp_type == 'train' or exp_type == 'test':
loss_dict = self.compute_loss(data_dict, exp_type, epoch)
return True, data_dict, loss_dict
class RefineNet(nn.Module):
def __init__(self, opt, device):
super(RefineNet, self).__init__()
self.opt = opt
self.device = device
# build models
self.build_model()
def build_model(self):
# positional embedding
if self.opt.refine.pos_encode:
self.embed_fn, embed_ch = im_net.get_embedder(self.opt.refine.multires)
self.embeddirs_fn, embeddirs_ch = im_net.get_embedder(self.opt.refine.multires_views)
else:
self.embed_fn, embed_ch = im_net.get_embedder(self.opt.refine.multires, i=-1)
self.embeddirs_fn, embeddirs_ch = im_net.get_embedder(self.opt.refine.multires_views, i=-1)
assert embed_ch == embeddirs_ch == 3
# pointnet
if self.opt.refine.pnet_model_type == 'twostage':
self.pnet_model = pnet.PointNet2Stage(input_channels=self.opt.refine.pnet_in,
output_channels=self.opt.refine.pnet_out, gf_dim=self.opt.refine.pnet_gf).to(self.device)
else:
raise NotImplementedError('Does not support Pnet type for RefineNet: {}'.format(self.opt.refine.pnet_model_type))
# decoder input dim
dec_inp_dim = self.opt.refine.pnet_out + embed_ch + embeddirs_ch
if self.opt.model.rgb_embedding_type == 'ROIAlign':
dec_inp_dim += self.opt.model.rgb_out * (self.opt.model.roi_out_bbox**2)
else:
raise NotImplementedError('Does not support RGB embedding: {}'.format(self.opt.model.rgb_embedding_type))
# offset decoder
if self.opt.refine.offdec_type == 'IMNET':
self.offset_dec = im_net.IMNet(inp_dim=dec_inp_dim, out_dim=1,
gf_dim=self.opt.refine.imnet_gf, use_sigmoid=self.opt.refine.use_sigmoid).to(self.device)
elif self.opt.refine.offdec_type == 'IEF':
self.offset_dec = im_net.IEF(self.device, inp_dim=dec_inp_dim, out_dim=1, gf_dim=self.opt.refine.imnet_gf,
n_iter=self.opt.refine.n_iter, use_sigmoid=self.opt.refine.use_sigmoid).to(self.device)
else:
raise NotImplementedError('Does not support Offset Decoder Type: {}'.format(self.opt.refine.offdec_type))
# loss function
self.pos_loss_fn = nn.L1Loss()
print('loss_fn at GPU {}'.format(self.opt.gpu_id))
def compute_loss(self, data_dict, exp_type, epoch):
bs,h,w = data_dict['bs'], data_dict['h'], data_dict['w']
''' position loss '''
if self.opt.loss.pos_loss_type == 'single':
if not self.opt.loss.hard_neg:
pos_loss = self.pos_loss_fn(data_dict['pred_pos_refine'], data_dict['gt_pos'])
else:
pos_loss_unreduce = torch.mean((data_dict['pred_pos_refine'] - data_dict['gt_pos']).abs(),-1)
k = int(pos_loss_unreduce.shape[0] * self.opt.loss.hard_neg_ratio)
pos_loss_topk,_ = torch.topk(pos_loss_unreduce, k)
pos_loss = torch.mean(pos_loss_topk)
else:
raise NotImplementedError('Does not support pos_loss_type for refine model'.format(self.opt.loss.pos_loss_type))
''' surface normal loss '''
if exp_type == 'train':
gt_pcl = data_dict['xyz_flat'].clone()
pred_pcl = data_dict['xyz_flat'].clone()
else:
gt_pcl = data_dict['xyz_corrupt_flat'].clone()
pred_pcl = data_dict['xyz_corrupt_flat'].clone()
gt_pcl[data_dict['miss_bid'], data_dict['miss_flat_img_id']] = data_dict['gt_pos']
gt_pcl = gt_pcl.reshape(bs,h,w,3).permute(0,3,1,2).contiguous()
gt_surf_norm_img,_,_ = point_utils.get_surface_normal(gt_pcl)
gt_surf_norm_flat = gt_surf_norm_img.permute(0,2,3,1).contiguous().reshape(bs,h*w,3)
gt_surf_norm = gt_surf_norm_flat[data_dict['miss_bid'], data_dict['miss_flat_img_id']]
pred_pcl[data_dict['miss_bid'], data_dict['miss_flat_img_id']] = data_dict['pred_pos_refine']
pred_pcl = pred_pcl.reshape(bs,h,w,3).permute(0,3,1,2).contiguous()
pred_surf_norm_img, dx, dy = point_utils.get_surface_normal(pred_pcl)
pred_surf_norm_flat = pred_surf_norm_img.permute(0,2,3,1).contiguous().reshape(bs,h*w,3)
pred_surf_norm = pred_surf_norm_flat[data_dict['miss_bid'], data_dict['miss_flat_img_id']]
# surface normal loss
cosine_val = F.cosine_similarity(pred_surf_norm, gt_surf_norm, dim=-1)
surf_norm_dist = (1 - cosine_val) / 2.
if not self.opt.loss.hard_neg:
surf_norm_loss = torch.mean(surf_norm_dist)
else:
k = int(surf_norm_dist.shape[0] * self.opt.loss.hard_neg_ratio)
surf_norm_dist_topk,_ = torch.topk(surf_norm_dist, k)
surf_norm_loss = torch.mean(surf_norm_dist_topk)
# angle err
angle_err = torch.mean(torch.acos(torch.clamp(cosine_val,min=-1,max=1)))
angle_err = angle_err / np.pi * 180.
# smooth loss
dx_dist = torch.sum(dx*dx,1)
dx_dist_flat = dx_dist.reshape(bs,h*w)
miss_dx_dist = dx_dist_flat[data_dict['miss_bid'], data_dict['miss_flat_img_id']]
dy_dist = torch.sum(dy*dy,1)
dy_dist_flat = dy_dist.reshape(bs,h*w)
miss_dy_dist = dy_dist_flat[data_dict['miss_bid'], data_dict['miss_flat_img_id']]
if not self.opt.loss.hard_neg:
smooth_loss = torch.mean(miss_dx_dist) + torch.mean(miss_dy_dist)
else:
k = int(miss_dx_dist.shape[0] * self.opt.loss.hard_neg_ratio)
miss_dx_dist_topk,_ = torch.topk(miss_dx_dist, k)
miss_dy_dist_topk,_ = torch.topk(miss_dy_dist, k)
smooth_loss = torch.mean(miss_dx_dist_topk) + torch.mean(miss_dy_dist_topk)
''' loss net '''
loss_net = self.opt.loss.pos_w * pos_loss
if self.opt.loss.surf_norm_w > 0 and epoch >= self.opt.loss.surf_norm_epo:
loss_net += self.opt.loss.surf_norm_w * surf_norm_loss
if self.opt.loss.smooth_w > 0 and epoch >= self.opt.loss.smooth_epo:
loss_net += self.opt.loss.smooth_w * smooth_loss
#######################
# Evaluation Metric
#######################
# position L2 error: we don't want to consider 0 depth point in the position L2 error.
zero_mask = torch.sum(data_dict['gt_pos'].abs(),dim=-1)
zero_mask[zero_mask!=0] = 1.
elem_num = torch.sum(zero_mask)
if elem_num.item() == 0:
err = torch.Tensor([0]).float().to(self.device)
else:
err = torch.sum(torch.sqrt(torch.sum((data_dict['pred_pos_refine'] - data_dict['gt_pos'])**2,-1))*zero_mask) / elem_num
# compute depth errors following cleargrasp
zero_mask_idx = torch.nonzero(zero_mask, as_tuple=False).reshape(-1)
if exp_type != 'train':
if bs != 1:
pred_depth = data_dict['pred_pos_refine'][:,2]
gt_depth = data_dict['gt_pos'][:,2]
pred = pred_depth[zero_mask_idx]
gt = gt_depth[zero_mask_idx]
else:
# scale image to make sure it is same as cleargrasp eval metric
gt_xyz = data_dict['xyz_flat'].clone()
gt_xyz = gt_xyz.reshape(bs,h,w,3).cpu().numpy()
gt_depth = gt_xyz[0,:,:,2]
# same size as cleargrasp for fair comparison
gt_depth = cv2.resize(gt_depth, (256, 144), interpolation=cv2.INTER_NEAREST)
gt_depth[np.isnan(gt_depth)] = 0
gt_depth[np.isinf(gt_depth)] = 0
mask_valid_region = (gt_depth > 0)
seg_mask = data_dict['corrupt_mask'].cpu().numpy()
seg_mask = seg_mask[0].astype(np.uint8)
seg_mask = cv2.resize(seg_mask, (256, 144), interpolation=cv2.INTER_NEAREST)
mask_valid_region = np.logical_and(mask_valid_region, seg_mask)
mask_valid_region = (mask_valid_region.astype(np.uint8) * 255)
pred_xyz = data_dict['xyz_corrupt_flat'].clone()
pred_xyz[data_dict['miss_bid'], data_dict['miss_flat_img_id']] = data_dict['pred_pos_refine']
pred_xyz = pred_xyz.reshape(bs,h,w,3).cpu().numpy()
pred_depth = pred_xyz[0,:,:,2]
pred_depth = cv2.resize(pred_depth, (256, 144), interpolation=cv2.INTER_NEAREST)
gt = torch.from_numpy(gt_depth).float().to(self.device)
pred = torch.from_numpy(pred_depth).float().to(self.device)
mask = torch.from_numpy(mask_valid_region).bool().to(self.device)
gt = gt[mask]
pred = pred[mask]
# compute metrics
safe_log = lambda x: torch.log(torch.clamp(x, 1e-6, 1e6))
safe_log10 = lambda x: torch.log(torch.clamp(x, 1e-6, 1e6))
thresh = torch.max(gt / pred, pred / gt)
a1 = (thresh < 1.05).float().mean()
a2 = (thresh < 1.10).float().mean()
a3 = (thresh < 1.25).float().mean()
rmse = ((gt - pred)**2).mean().sqrt()
rmse_log = ((safe_log(gt) - safe_log(pred))**2).mean().sqrt()
log10 = (safe_log10(gt) - safe_log10(pred)).abs().mean()
abs_rel = ((gt - pred).abs() / gt).mean()
mae = (gt - pred).abs().mean()
sq_rel = ((gt - pred)**2 / gt).mean()
# update data_dict
data_dict.update({
'zero_mask_idx': zero_mask_idx,
'pred_surf_norm_img_refine': pred_surf_norm_img
})
# loss dict
loss_dict = {
'pos_loss': pos_loss,
'surf_norm_loss': surf_norm_loss,
'smooth_loss': smooth_loss,
'loss_net': loss_net,
'err': err,
'angle_err': angle_err,
}
if exp_type != 'train':
loss_dict.update({
'a1': a1,
'a2': a2,
'a3': a3,
'rmse': rmse,
'rmse_log': rmse_log,
'log10': log10,
'abs_rel': abs_rel,
'mae': mae,
'sq_rel': sq_rel,
})
return loss_dict
def get_pred_refine(self, data_dict, pred_pos, exp_type, cur_iter):
bs,h,w = data_dict['bs'], data_dict['h'], data_dict['w']
concat_dummy = lambda feat: torch.cat((feat, torch.zeros([1,feat.shape[1]]).to(feat.dtype).to(self.device)),0)
# manually perturb prediction by adding noise, we only perturb in 1st iter.
if exp_type == 'train' and self.opt.refine.perturb and cur_iter == 0 and np.random.random() < self.opt.refine.perturb_prob:
prob =
|
np.random.random()
|
numpy.random.random
|
import numpy as np
import matplotlib.pyplot as plt
import scipy.io
from PIL import Image
def section_2_3(data):
print('\n\n\n############# Section 2-3 ##################')
######################## Section 2 ##############################
# List keys of dataset
print(data.keys())
x0 = data['x'][0]
y0 = data['y'][0]
z0 = data['z'][0]
plt.figure(figsize=(10,10))
plt.plot(x0, linewidth=4)
plt.plot(y0, linewidth=4)
plt.plot(z0, linewidth=4)
plt.legend([r'$x_0$',r'$y_0$',r'$z_0$'],fontsize=15)
plt.xticks(range(0,31,1),range(400,710,10),rotation=90,fontsize=15)
plt.yticks(fontsize=15)
plt.xlabel('Wavelength,'+r'$\lambda$'+'(nm)',fontsize=20)
plt.ylabel('color matching functions',fontsize=20)
plt.savefig('color_matching_function.tif')
plt.savefig('color_matching_function.png')
A_inv = np.array([[0.2430,0.8560,-0.0440],[-0.3910,1.1650,0.0870],[0.0100,-0.0080,0.5630]])
T = np.array([[x0],[y0],[z0]])
T = np.reshape(T,[T.shape[0],T.shape[-1]])
print('T = ',T.shape)
print('A_inv = ',A_inv.shape)
CMF = np.matmul(A_inv,T)
print('CMF = ',CMF.shape)
plt.figure(figsize=(10,10))
plt.plot(CMF[0,:], linewidth=4)
plt.plot(CMF[1,:], linewidth=4)
plt.plot(CMF[2,:], linewidth=4)
plt.legend([r'$l_0$',r'$m_0$',r'$s_0$'],fontsize=15)
plt.xticks(range(0,31,1),range(400,710,10),rotation=90,fontsize=15)
plt.yticks(np.divide(range(0,11,1),100),fontsize=15)
plt.xlabel('Wavelength,'+r'$\lambda$'+'(nm)',fontsize=20)
plt.ylabel('color matching functions',fontsize=20)
plt.savefig('CMF.tif')
plt.savefig('CMF.png')
D_65 = data['illum1'][0]
flour = data['illum2'][0]
plt.figure(figsize=(10,10))
plt.plot(D_65, linewidth=4)
plt.plot(flour, linewidth=4)
plt.legend([r'$D_{65}$','Fluorescent'],fontsize=15)
plt.xticks(range(0,31,1),range(400,710,10),rotation=90,fontsize=15)
plt.xlabel('Wavelength,'+r'$\lambda$'+'(nm)',fontsize=20)
plt.yticks(fontsize=15)
plt.ylabel('Spectrum',fontsize=20)
plt.savefig('Spectrum.tif')
plt.savefig('Spectrum.png')
########################## Section 3 ###############################
S = np.sum(T,axis=0)
S = np.reshape(S,[1,S.shape[0]])
print('S = ',S.shape)
x,y,z = x0/S,y0/S,z0/S
x,y,z = x.T,y.T,z.T
print('x = ',x.shape)
print('y = ',y.shape)
print('z = ',z.shape)
#D_65 primaries
R = [0.73467, 0.26533, 0.0]
G = [0.27376, 0.71741, 0.00883]
B = [0.16658, 0.00886, 0.82456]
x_w,y_w,z_w = 0.3127, 0.3290, 0.3583#D_65 white point
x_e,y_e,z_e = 0.3333, 0.3333, 0.3333#Equal energy white point
plt.figure(figsize=(10,10))
plt.plot(x,y, linewidth=4)
plt.xlabel('X',fontsize=20)
plt.yticks(fontsize=15)
plt.ylabel('Y',fontsize=20)
plt.title("Chromaticity Diagram (as a function of "+r'$\lambda$'+")",fontsize=15)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
L = 400
for xt,yt in zip(x,y):
label = r'$\lambda=$'+str(L)
plt.annotate(label, # this is the text
(xt,yt), # these are the coordinates to position the label
textcoords="offset points", # how to position the text
xytext=(10,0), # distance from text to points (x,y)
ha='center',
fontsize=15) # horizontal alignment can be left, right or center
L += 10
plt.savefig('Chromaticity.png')
plt.savefig('Chromaticity.tif')
plt.figure(figsize=(10,10))
plt.plot(x,y, linewidth=4, label='Chromaticity diagram')
plt.plot(R[0],R[1],marker="o", markersize=15, markeredgecolor="red", markerfacecolor="red", label='D_65_Red_Primary')
plt.plot(G[0],G[1],marker="o", markersize=15, markeredgecolor="green", markerfacecolor="green", label='D_65_Green_Primary')
plt.plot(B[0],B[1],marker="o", markersize=15, markeredgecolor="blue", markerfacecolor="blue", label='D_65_Blue_Primary')
plt.plot([R[0],G[0]],[R[1],G[1]], linewidth=4)
plt.plot([R[0],B[0]],[R[1],B[1]], linewidth=4)
plt.plot([B[0],G[0]],[B[1],G[1]], linewidth=4)
plt.plot(x_w,y_w, marker="o", markersize=15, markeredgecolor="Black", markerfacecolor="Black", label='D_65_white_point')
plt.plot(x_e,y_e, marker="o", markersize=15, markeredgecolor="Grey", markerfacecolor="Grey", label='Equal_energy_white_point')
plt.xlabel('X',fontsize=20)
plt.yticks(fontsize=15)
plt.ylabel('Y',fontsize=20)
plt.title("Chromaticity Diagram (D_65) (as a function of "+r'$\lambda$'+")",fontsize=15)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.annotate('R',(R[0],R[1]),textcoords="offset points",xytext=(10,10),ha='center',fontsize=20)
plt.annotate('G',(G[0],G[1]),textcoords="offset points",xytext=(10,10),ha='center',fontsize=20)
plt.annotate('B',(B[0],B[1]),textcoords="offset points",xytext=(10,10),ha='center',fontsize=20)
plt.legend()
plt.savefig('Chromaticity_Combined_D_65.png')
plt.savefig('Chromaticity_Combined_D_65.tif')
#Rec. 709RGB primaries
R = [0.640, 0.330, 0.030]
G = [0.300, 0.600, 0.100]
B = [0.150, 0.060, 0.790]
plt.figure(figsize=(10,10))
plt.plot(x,y, linewidth=4, label='Chromaticity diagram')
plt.plot(R[0],R[1],marker="o", markersize=15, markeredgecolor="red", markerfacecolor="red", label='Rec_709_RGB_Red_Primary')
plt.plot(G[0],G[1],marker="o", markersize=15, markeredgecolor="green", markerfacecolor="green", label='Rec_709_RGB_Green_Primary')
plt.plot(B[0],B[1],marker="o", markersize=15, markeredgecolor="blue", markerfacecolor="blue", label='Rec_709_RGB_Blue_Primary')
plt.plot([R[0],G[0]],[R[1],G[1]], linewidth=4)
plt.plot([R[0],B[0]],[R[1],B[1]], linewidth=4)
plt.plot([B[0],G[0]],[B[1],G[1]], linewidth=4)
plt.plot(x_w,y_w, marker="o", markersize=15, markeredgecolor="Black", markerfacecolor="Black", label='D_65_white_point')
plt.plot(x_e,y_e, marker="o", markersize=15, markeredgecolor="Grey", markerfacecolor="Grey", label='Equal_energy_white_point')
plt.legend()
plt.xlabel('X',fontsize=20)
plt.yticks(fontsize=15)
plt.ylabel('Y',fontsize=20)
plt.title("Chromaticity Diagram (Rec. 709 RGB) (as a function of "+r'$\lambda$'+")",fontsize=15)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.annotate('R',(R[0],R[1]),textcoords="offset points",xytext=(10,10),ha='center',fontsize=20)
plt.annotate('G',(G[0],G[1]),textcoords="offset points",xytext=(10,10),ha='center',fontsize=20)
plt.annotate('B',(B[0],B[1]),textcoords="offset points",xytext=(10,10),ha='center',fontsize=20)
plt.savefig('Chromaticity_Combined_709.png')
plt.savefig('Chromaticity_Combined_709.tif')
##################### Section 4 ###############################
def gamma_correction(gamma,img):
gamma_corrected_image = np.power((img/255),1/gamma)*255
return gamma_corrected_image
def section_4(illuminant,name,T):
print('\n\n\n############# Section 4 ##################')
data = np.load('./reflect.npy',allow_pickle=True)[()]
R = data['R']
print('R = ',R.shape)
print(name,' = ',illuminant.shape)
I = np.zeros(R.shape)
for i in range(0,R.shape[0]):
for j in range(0,R.shape[1]):
I[i,j,:] = np.multiply(R[i,j,:],illuminant)
print('I = ',I.shape)
XYZ =
|
np.zeros((I.shape[0],I.shape[1],T.shape[0]))
|
numpy.zeros
|
#! /usr/bin/env python3
import rclpy
import random
import numpy as np
import math
import time
import copy
import lzma
import pickle
from collections import deque
from rclpy.action import ActionClient
from rclpy.node import Node
from nav2_msgs.action import ComputePathToPose, NavigateToPose
from nav_msgs.msg import OccupancyGrid
from multi_robot_explore.explore_util import ExploreUtil
from geometry_msgs.msg import PointStamped
from geometry_msgs.msg import Pose, PoseStamped, Point
from multi_robot_interfaces.msg import Frontier, RobotTracks
from multi_robot_interfaces.srv import GetLocalMap, GetLocalMapAndFrontier, GetLocalMapAndFrontierCompress
from robot_control_interface.robot_control_node import RobotControlInterface
from multi_robot_explore.peer_interface_node import PeerInterfaceNode
from multi_robot_explore.map_frontier_merger import MapAndFrontierMerger
# import explore_util.ExploreUtil as explore_util
# input: occupancyGrid, robot_pos, window_size,
#output: get a set of frontiers
class GroupCoordinator(Node):
def __init__(self, robot_name):
super().__init__('group_coordinator_node_' + robot_name)
#robot_pos: tuple double(x, y)
self.local_map_ = None
self.local_frontiers_msg_ = None
self.window_frontiers_ = None
self.cluster_list_ = None
self.robot_name_ = robot_name
self.curr_pos_ = None
self.e_util = ExploreUtil()
self.peer_map_ = dict()
self.cluster_pose_dict_ = dict()
self.peer_local_frontiers_ = dict()
self.peer_data_updated_ = dict()
self.merge_map_frontier_timeout_ = 5
self.cluster_state_dict_ = dict()
self.merged_map_ = None
self.merged_frontiers_ = []
self.init_offset_to_world_dict_ = dict()
self.init_offset_to_current_robot_dict_ = dict()
self.robot_radius_ = 0.1
self.robot_radius_cell_size = 40
self.compute_path_client_dict_ = dict()
self.current_computing_robot_ = None
self.get_path_done_dict_ = dict()
self.window_frontiers_rank_ = None
self.corridor_distance_ = 2.5
self.debug_merge_frontiers_pub_ = self.create_publisher(OccupancyGrid, self.robot_name_ + '/merged_frontiers_debug', 10)
self.debug_merge_map_pub_ = self.create_publisher(OccupancyGrid, self.robot_name_ + '/merged_map_debug', 10)
self.robot_track_sub_ = self.create_subscription(
RobotTracks,
'robot_tracks',
self.robotTrackCallback,
10)
self.robot_track_sub_ # prevent unused variable warning
self.peer_merge_map_pub_dict_ = dict()
self.get_path_result_dict_ = dict()
self.peer_tracks_dict_ = dict()
def robotTrackCallback(self, msg):
peer_name = msg.robot_name.data
track = msg.robot_tracks
self.peer_tracks_dict_[peer_name] = track
def setGroupInfo(self, cluster_list, local_map, local_frontiers_msg, cluster_state_dict, init_offset_to_world_dict):
self.local_map_ = local_map
self.cluster_list_ = cluster_list #self.cluster_list_ includes self.robot_name_
self.local_frontiers_msg_ = local_frontiers_msg
self.cluster_state_dict_ = cluster_state_dict #self.cluster_state_dict_ includes state information for self.robot_name_
self.init_offset_to_world_dict_ = init_offset_to_world_dict
for peer in self.cluster_list_:
self.compute_path_client_dict_[peer] = ActionClient(self, ComputePathToPose, peer + '/compute_path_to_pose')
if peer != self.robot_name_:
self.peer_merge_map_pub_dict_[peer] = self.create_publisher(OccupancyGrid, peer + '/merged_map_debug', 10)
current_robot_offset_world_pose = self.init_offset_to_world_dict_[self.robot_name_]
for peer in self.init_offset_to_world_dict_:
if peer == self.robot_name_:
self.init_offset_to_current_robot_dict_[peer] = Pose()
self.init_offset_to_current_robot_dict_[peer].position.x = 0.0
self.init_offset_to_current_robot_dict_[peer].position.y = 0.0
self.init_offset_to_current_robot_dict_[peer].position.z = 0.0
self.init_offset_to_current_robot_dict_[peer].orientation.x = 0.0
self.init_offset_to_current_robot_dict_[peer].orientation.y = 0.0
self.init_offset_to_current_robot_dict_[peer].orientation.z = 0.0
self.init_offset_to_current_robot_dict_[peer].orientation.w = 1.0
else:
self.init_offset_to_current_robot_dict_[peer] = Pose()
self.init_offset_to_current_robot_dict_[peer].position.x = self.init_offset_to_world_dict_[peer].position.x - current_robot_offset_world_pose.position.x
self.init_offset_to_current_robot_dict_[peer].position.y = self.init_offset_to_world_dict_[peer].position.y - current_robot_offset_world_pose.position.y
self.init_offset_to_current_robot_dict_[peer].position.z = self.init_offset_to_world_dict_[peer].position.z - current_robot_offset_world_pose.position.z
self.init_offset_to_current_robot_dict_[peer].orientation.x = 0.0
self.init_offset_to_current_robot_dict_[peer].orientation.y = 0.0
self.init_offset_to_current_robot_dict_[peer].orientation.z = 0.0
self.init_offset_to_current_robot_dict_[peer].orientation.w = 1.0
# getPathLengthToPose functions
def getPathLengthToPose(self, robot, target_pose):
self.get_path_done_dict_[robot] = False
self.current_computing_robot_ = robot
target_pose_stamped = PoseStamped()
target_pose_stamped.header.frame_id = self.current_computing_robot_ + "/map"
target_pose_stamped.pose = target_pose
goal_msg = ComputePathToPose.Goal()
goal_msg.pose = target_pose_stamped
goal_msg.planner_id = 'GridBased'
self.compute_path_client_dict_[robot].wait_for_server()
# send_goal_async test
self.send_goal_future = self.compute_path_client_dict_[robot].send_goal_async(goal_msg)
self.send_goal_future.add_done_callback(self.getPathLengthResponseCallback)
return self.send_goal_future
#send_goal test
# result = self.compute_path_client_.send_goal(goal_msg)
# path = result.path
# return len(path.poses)
def getPathLengthResponseCallback(self, future):
goal_handle = future.result()
if not goal_handle.accepted:
self.get_logger().info('GetPathLengthGoal rejected')
return
self.get_logger().error('{}GetPathLengthGoal accepted'.format(self.current_computing_robot_))
self.get_result_future = goal_handle.get_result_async()
self.get_result_future.add_done_callback(self.getPathLengthResultCallback)
def getPathLengthResultCallback(self, future):
result = future.result().result
path_length = 0.0
pre_pose = result.path.poses[0]
for pose in result.path.poses:
if pose.pose.position.x == pre_pose.pose.position.x and pose.pose.position.y == pre_pose.pose.position.y:
continue
else:
path_length += math.sqrt((pose.pose.position.x - pre_pose.pose.position.x)*(pose.pose.position.x - pre_pose.pose.position.x) + (pose.pose.position.y - pre_pose.pose.position.y)*(pose.pose.position.y - pre_pose.pose.position.y))
pre_pose = pose
self.get_path_result_dict_[self.current_computing_robot_] = path_length
self.get_logger().warn('{},get the getPathLength result: {}'.format(self.current_computing_robot_, path_length))
self.get_path_done_dict_[self.current_computing_robot_] = True
def getPathLength(self, robot):
return self.get_path_result_dict_[robot]
# def distBetweenPoseAndFrontiers(self, pose, frontier, map, min_radius, max_radius):
# f_pt = self.extractTargetFromFrontier(frontier, map, min_radius, max_radius)
# return math.sqrt((f_pt[0] - pose.position.x)*(f_pt[0] - pose.position.x) + (f_pt[1] - pose.position.y)*(f_pt[1] - pose.position.y))
def distBetweenPoseAndFrontiers(self, pose, frontier, map, min_radius, max_radius):
fpt = (frontier.frontier[0].point.x, frontier.frontier[0].point.y)
return (fpt[0] - pose.position.x)*(fpt[0] - pose.position.x) + (fpt[1] - pose.position.y)*(fpt[1] - pose.position.y)
def extractTargetFromFrontier(self, frontier, map, min_radius, max_radius):
f_connect = []
for pt in frontier.frontier:
f_connect.append((pt.point.x, pt.point.y))
# print('f_connect append:{},{}'.format(pt.point.x, pt.point.y))
observe_pt_and_frontier_pt = self.e_util.getObservePtForFrontiers(f_connect, map, min_radius, max_radius)
if observe_pt_and_frontier_pt == None:
return None
observe_pt = observe_pt_and_frontier_pt[0]
frontier_pt = observe_pt_and_frontier_pt[1]
return observe_pt
def checkDirectLineCrossObs(self, start, end, map):
#prerequisites: 'start' and 'end' and 'map' both in the current robot frame
#start: (x, y) coordinates in map frame
resolution = map.info.resolution
offset_x = map.info.origin.position.x
offset_y = map.info.origin.position.y
map_width = map.info.width
map_height = map.info.height
map_array =
|
np.asarray(map.data, dtype=np.int8)
|
numpy.asarray
|
# coding: utf-8
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
import os
import numpy as np
import time
import random
import torch
import torch.nn.functional as F
try:
import cPickle as pickle
except ImportError:
import pickle
from modified_tokenizers import get_tokenizers
from data import split_imdb_files, split_snli_files
def genetic_attack(opt, model, dataset, genetic_test_num):
from ..attack_surface import WordSubstitutionAttackSurface, LMConstrainedAttackSurface
lm_file_path = opt.imdb_lm_file_path if opt.dataset=='imdb' else opt.snli_lm_file_path
if opt.lm_constraint_on_genetic_attack:
attack_surface = LMConstrainedAttackSurface.from_files(opt.substitution_dict_path, lm_file_path)
else:
attack_surface = WordSubstitutionAttackSurface.from_files(opt.substitution_dict_path, lm_file_path)
tokenizer, _ = get_tokenizers(opt.plm_type)
# process data
if dataset == 'imdb':
train_texts, train_labels, dev_texts, dev_labels, test_texts, test_labels = split_imdb_files(opt)
input_max_len = opt.imdb_input_max_len
# randomly select test examples
indexes = [i for i in range(len(test_labels))]
random.seed(opt.rand_seed)
random.shuffle(indexes)
test_texts = [test_texts[i] for i in indexes]
test_labels = [test_labels[i] for i in indexes]
indexes = []
for i, x in enumerate(test_texts):
words = x.split()
if attack_surface.check_in(words):
indexes.append(i)
test_texts = [test_texts[i] for i in indexes]
test_labels = [test_labels[i] for i in indexes]
test_num = min(len(test_labels), genetic_test_num)
test_texts = test_texts[:test_num]
test_labels = test_labels[:test_num]
wrapped_model = WrappedModelCls(tokenizer, input_max_len)
elif dataset == 'snli':
train_perms, train_hypos, train_labels, dev_perms, dev_hypos, dev_labels, test_perms, test_hypos, test_labels = split_snli_files(opt)
input_max_len = opt.snli_input_max_len
# randomly select test examples
indexes = [i for i in range(len(test_labels))]
random.seed(opt.rand_seed)
random.shuffle(indexes)
test_perms = [test_perms[i] for i in indexes]
test_hypos = [test_hypos[i] for i in indexes]
test_labels = [test_labels[i] for i in indexes]
indexes = []
for i, x_h in enumerate(test_hypos):
words = x_h.split()
if attack_surface.check_in(words):
indexes.append(i)
test_perms = [test_perms[i] for i in indexes]
test_hypos = [test_hypos[i] for i in indexes]
test_labels = [test_labels[i] for i in indexes]
test_num = min(len(test_labels), genetic_test_num)
test_perms = test_perms[:test_num]
test_hypos = test_hypos[:test_num]
test_labels = test_labels[:test_num]
wrapped_model = WrappedModelNli(tokenizer, input_max_len, opt.plm_type)
else:
raise NotImplementedError
model.plm.eval()
model.cls_to_logit.eval()
# genetic attack
genetic_adversary = GeneticAdversary(opt, attack_surface, num_iters=opt.genetic_iters, pop_size=opt.genetic_pop_size)
# run genetic attack by multiprocessing
from multiprocessing import Process, Pipe
conn_main = []
conn_p = []
for i in range(test_num):
c1, c2 = Pipe()
conn_main.append(c1)
conn_p.append(c2)
process_list = []
for i in range(test_num):
if dataset == 'imdb':
p = Process(target=genetic_adversary.attack_binary_classification, args=(conn_p[i], wrapped_model, test_texts[i], test_labels[i]))
elif dataset == 'snli':
p = Process(target=genetic_adversary.attack_nli, args=(conn_p[i], wrapped_model, test_perms[i], test_hypos[i], test_labels[i]))
else:
raise NotImplementedError
p.start()
process_list.append(p)
accuracy = process_queries_for_genetic_attack(model, test_num, input_max_len, process_list, conn_main)
#print("acc under genetic attack: ", accuracy)
return accuracy
def process_queries_for_genetic_attack(model, test_num, input_max_len, process_list, conn_main, batch_size = 32):
tested = 0
correct = 0
process_done=[False for i in range(test_num)]
t_start = time.clock()
device = model.device
text_x = torch.zeros(batch_size, input_max_len, dtype=torch.long).to(device)
attention_mask = torch.zeros(batch_size, input_max_len, dtype=torch.long).to(device)
process_id = 0
process_id = 0
bs_count=0
res_dict = {}
# polling
while(1):
# stop when all test examples are processed
if tested == test_num:
break
# collect queries
if process_done[process_id]==False:
cm=conn_main[process_id]
if cm.poll():
msg = cm.recv()
# msg == 0 or 1 means genetic attack for this example is finished
if msg == 0 or msg == 1:
tested += 1
correct += msg
cm.close()
process_done[process_id]=True
process_list[process_id].join()
print('acc under genetic {}/{}, time cost: {}'.format(correct, tested, time.clock() - t_start))
else:
new_text_x, new_attention_mask = msg
text_x[bs_count] = new_text_x.to(device)
attention_mask[bs_count] = new_attention_mask.to(device)
res_dict[process_id]=bs_count
bs_count +=1
# process queries by batches
if bs_count==batch_size or bs_count>=(test_num-tested):
with torch.no_grad():
logit = model(text_x, attention_mask).detach().cpu()
for key in res_dict.keys():
cm=conn_main[key]
cm.send(logit[res_dict[key]])
bs_count = 0
res_dict = {}
# increase process_id
process_id=(process_id+1)%test_num
return correct/tested
class GeneticAdversary(object):
def __init__(self, opt, attack_surface, num_iters=20, pop_size=60):
super(GeneticAdversary, self).__init__()
self.attack_surface = attack_surface
self.num_iters = num_iters
self.pop_size = pop_size
self.opt = opt
def perturb_binary_classification(self, words, choices, model, y, conn_p):
if all(len(c) == 1 for c in choices): return words
good_idxs = [i for i, c in enumerate(choices) if len(c) > 1]
idx = random.sample(good_idxs, 1)[0]
x_list = [' '.join(words[:idx] + [w_new] + words[idx+1:])
for w_new in choices[idx]]
logits = [model.query(x, conn_p) for x in x_list]
preds = [F.softmax(logit, dim=-1).cpu().numpy() for logit in logits]
preds_of_y = [p[y] for p in preds]
best_idx = min(enumerate(preds_of_y), key=lambda x: x[1])[0]
cur_words = list(words)
cur_words[idx] = choices[idx][best_idx]
return cur_words, preds_of_y[best_idx]
def attack_binary_classification(self, conn_p, model, x, y):
random.seed(self.opt.rand_seed)
words = x.split()
y =
|
np.argmax(y)
|
numpy.argmax
|
# coding: utf-8
# In[1]:
import argparse
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
import horovod.keras as hvd
import horovod.tensorflow as hvd_tf
hvd.init()
assert hvd_tf.mpi_threads_supported()
# Make sure MPI is not re-initialized.
import mpi4py.rc
mpi4py.rc.initialize = False
from mpi4py import MPI
assert hvd.size() == MPI.COMM_WORLD.Get_size()
from model_definitions import *
import tensorflow as tf
import keras
from keras import backend as K
from keras.utils import plot_model
import importlib
from patch_display import make_mosaic_result, save_img, format_patches_for_display, format_patches_for_display_colormap
from keras.callbacks import ModelCheckpoint, TensorBoard, ReduceLROnPlateau
import cnes_data
import cnes_data.common as common
import ml_metrics
#os.environ['NCCL_P2P_DISABLE'] = '1'
#os.environ['CUDA_VISIBLE_DEVICES'] = str(hvd.local_rank())
use_background_layer = False # use the background layer in the model
PATCH_SIZE = 64
N_TIMESTEPS = 11 #33
N_CHANNELS = 30 #10
NB_PATCH_PER_EPOCH = 10000
nb_valid_patch = 500
batch_size_weight_estimation = 32
nb_iterations_weight_estimation = 200 # could be done accurately enough on 100 iterations.
b_lstm = 0 # 1 to use LSTM model or 0 to use network duplication over timesteps
use_contours = False
if use_contours:
use_background_layer = True
PATCH_SIZE = 64
use_rf_annotations = False
in_notebook = False
try:
get_ipython
in_notebook = True
except:
print("Running in terminal...")
# Parser creation
parser = argparse.ArgumentParser(description='Sentinel2 hvd training')
# Args
parser.add_argument('-rep', '--rep', metavar='[REP_IMAGE]', help='', required=True)
parser.add_argument('-tile', '--tile', metavar='[ID_TILE]', help='Tile ID', required=True)
parser.add_argument('-out', '--out', metavar='[OUT_DIR]', help='Output directory that will contain the learned model', required=True)
parser.add_argument('-recover', '--recover', metavar='[RECOVER]', help='true/false to allow to start training from a saved model', required=True)
parser.add_argument('-raster', '--raster', metavar='[RASTER_DATA]', help='Directory containing rasterized labeled data', required=True)
parser.add_argument('-epochs', '--epochs', metavar='[EPOCH_NUMBER]', help='Number of epochs', default=75, required=False)
# Command line parsing
args = vars(parser.parse_args())
DB_PATH = os.path.abspath(args['rep'])
t_tile_name = args['tile'].split(' ')
resume_training = args['recover'] == "true"
name_experiment = os.path.normpath(args['out'])
s_raster_dir = args['raster']
NUM_EPOCHS = int(args['epochs'])
use_hyperas_optim = 0
if not os.path.exists(DB_PATH):
print("Dataset file {} does not exist!".format(DB_PATH))
if not os.path.exists(name_experiment):
try:
os.makedirs(name_experiment)
except FileExistsError:
pass
snapshot_file_name = name_experiment+'/'+name_experiment+'_best_weights.h5'
if resume_training:
if not os.path.exists(snapshot_file_name):
raise Exception("ERROR: Trying to resume from non-existing snapshot {}".format(snapshot_file_name))
print("Training will resume from snapshot {}".format(snapshot_file_name))
def data_generator(batch_size, gen, epoch_len, temporal_seq = 0, use_background_layer=True, u_nb_tile=1, b_lstm=0):
""" Generates training sequences on demand
"""
cnes_gen_util = cnes_data.CnesGen10mUtilHvd(gen, PATCH_SIZE)
while True:
for idx in range(epoch_len):
# no need to exchange patchs between workers if just one tile as input
if u_nb_tile == 1:
X, Y = gen.generate_train_patch_fast(batch_size)
else:
X, Y = cnes_gen_util.generate_train_patch_using_sharing(batch_size)
# X : shape (4, 330, 64, 64) (if batch_size=4)
Y = np.reshape(Y,(Y.shape[0], Y.shape[1],Y.shape[2]*Y.shape[3]))
Y = np.transpose(Y,(0,2,1))
if temporal_seq > 0 and not b_lstm:
X = np.split(X,temporal_seq,axis=1)
if temporal_seq > 0 and b_lstm:
X = np.transpose(np.array(np.split(X, temporal_seq, axis=1)), (1,0,2,3,4)) # batch_size, nb_dates, nb_channels, 64,64) # (4, 11, 30, 64, 64)
#print(X.shape)
yield (X, np.array(Y))
def data_generator_without_mpi4py(batch_size, gen, epoch_len, temporal_seq = 0, use_background_layer=True, b_lstm=0):
""" Generates training sequences on demand
"""
while True:
for idx in range(epoch_len):
X, Y = gen.generate_train_patch_fast(batch_size)
Y = np.reshape(Y,(Y.shape[0], Y.shape[1],Y.shape[2]*Y.shape[3]))
Y = np.transpose(Y,(0,2,1))
if temporal_seq > 0 and not b_lstm:
X = np.split(X,temporal_seq,axis=1)
if temporal_seq > 0 and b_lstm:
X = np.transpose(np.array(np.split(X, temporal_seq, axis=1)), (1,0,2,3,4)) # batch_size, nb_dates, nb_channels, 64,64) # (4, 11, 30, 64, 64)
yield (X, np.array(Y))
def compute_class_stats_train(gen, batch_size, nb_iterations):
# patch VP : il ne faut pas se baser sur la frequence dans les annotations ou dans les images, mais de celle dans les patchs generes
# la generation des patchs fait en sorte d'equilibrer les classes, et la frequence des classes dans les patchs n'est pas la meme que dans les annotations
cnes_gen_util = cnes_data.CnesGen10mUtilHvd(gen, PATCH_SIZE)
class_stats = np.zeros(len(CLASS_ID_SET))
for i in range(nb_iterations):
if hvd.rank() == 0 and i % 20 == 0:
print("{} / {}".format(i, nb_iterations))
patch, gt_patch = cnes_gen_util.generate_train_patch_using_sharing(batch_size)
for ct in range(len(CLASS_ID_SET)):
positive_positions = np.where(gt_patch[:,ct,...] > 0)
class_stats[ct] += len(positive_positions[0])
return class_stats
# Draw a patch of samples to visually evaluate the network state
class DrawEstimates(keras.callbacks.Callback):
def set_data(self, data, mask,name_experiment='.'):
self.samples = data
self.masks = mask
self.preds = np.zeros(mask.shape)
self.epoch = 0
self.name_experiment = name_experiment
def on_epoch_begin(self, batch, logs={}):
self.epoch += 1
print("")
def on_epoch_end(self, batch, logs={}):
self.preds = self.model.predict(self.samples, batch_size=32, verbose=2)
# draw !
self.draw_and_save()
def draw_and_save(self):
# save
samples = np.concatenate(self.samples, axis=1)
masks = self.masks[:self.preds.shape[0],:self.preds.shape[1],:self.preds.shape[2]]
plot_patch, gt_patches_viz, preds_patches_viz = format_patches_for_display_colormap(samples, masks, self.preds,
input_ch=[2,1,0], input_gain=1, colormap=color_map)
save_img(make_mosaic_result(plot_patch, gt_patches_viz, preds_patches_viz),
name_experiment + '/sample_results_' + str(self.hvd_rank) + "_" + str(self.epoch))
def draw(self):
samples = np.concatenate(self.samples, axis=1)
plot_patch, gt_patches_viz, preds_patches_viz = format_patches_for_display_colormap(samples, self.masks, self.preds,
input_ch=[2,1,0], input_gain=5, colormap=color_map)
def __init__(self, hvd_rank):
self.hvd_rank = hvd_rank
class PrintClassStats(keras.callbacks.Callback):
epoch = 0
def set_gen(self, gen):
self.cnes_gen = gen
def on_epoch_begin(self, batch, logs={}):
self.epoch += 1
def on_epoch_end(self, batch, logs={}):
# get estimates
stats = self.cnes_gen.get_running_stats()
print("stats at rank {} : {}".format(hvd.rank(), stats))
stats_mat = np.zeros((len(CLASS_ID_SET)+1, 2), np.float32)
stats_mat[0,1] = stats[0]
idx = 1
for cid in CLASS_ID_SET:
stats_mat[idx,0] = cid
if cid in stats:
stats_mat[idx,1] = stats[cid]
idx+=1
print("Gathering stats from all MPI instances, rank {}".format(hvd.rank()))
all_stats = hvd.allgather(stats_mat) #comm.gather(stats, root=0)
total_px = 0
if hvd.rank() == 0:
print("Epoch {} class freqs:".format(self.epoch))
class_stats = {class_id:0 for class_id in CLASS_ID_SET}
for class_id in CLASS_ID_SET:
#print("Data for class {}: {}".format(class_id, all_stats[all_stats[:,0] == class_id, :]))
px_class = np.sum(all_stats[all_stats[:,0] == class_id, 1])
class_stats[class_id] += px_class
total_px += px_class
non_annot_px =
|
np.sum(all_stats[all_stats[:,0] == 0, 1])
|
numpy.sum
|
#!/usr/bin/env python
"""
Class to represent a 2D spline.
Hazen 12/13
"""
import math
import numpy
import numpy.linalg
import storm_analysis.spliner.spline1D as spline1D
class Spline2D(spline1D.Spline):
def __init__(self, d, coeff = False, verbose = False):
if (d.shape[0] != d.shape[1]):
assert "input matrix must be square!"
size = d.shape[0]
self.max_i = size - 1
#
# The coefficients have already been calculated, so just use them.
#
if (type(coeff) == type(
|
numpy.array([])
|
numpy.array
|
# # -*- coding: UTF-8 -*-
# trial on the : Satomi machine
# Created by Ush on 2018/5/18
# Project name : class10_ODE
# Please contact CHIH, HSIN-CHING/D0631008 when expect to refer this source code.
# NOTE : no liability on any loss nor damage by using this source code. it is your own risk.
from __future__ import division
from pycallgraph import PyCallGraph
from pycallgraph.output import GraphvizOutput
import scipy.linalg as la
import numpy as np
import cmath
from rdp import rdp
# http://pycallgraph.readthedocs.io/en/master/examples/basic.html#source-code
from math import sqrt # call sqrt from cmath for complex number
from numpy import matrix
from scipy.integrate import odeint
from pylab import *
class NCM11:
def __init__(self, A, choice):
"do something here"
@staticmethod
# https://zh.wikipedia.org/wiki/道格拉斯-普克算法
# http://52north.github.io/wps-profileregistry/generic/dp-line-generalization.html
# https://github.com/nramm/maskiton/blob/master/server/plugins/appion/pyami/douglaspeucker.py
def RDP_middle(Px, Py, EPS):
result_x = []
result_y = []
recResults1_X = []
recResults1_Y = []
recResults2_X = []
recResults2_Y = []
dmax,index = 0,0
length = len(Py)
for i in range(1, length - 2):
d = NCM11.d(Px[0], Py[0], Px[i], Py[i], Px[length - 1], Py[length - 1])
if (d > dmax):
index = i
dmax = d
if (dmax >= EPS):
# Recursive call
recResults1_X, recResults1_Y = NCM11.RDP_middle(Px[: index + 1], Py[:index + 1], EPS)
recResults2_X, recResults2_Y = NCM11.RDP_middle(Px[index:], Py[index:], EPS)
# Build the result list
result_x = np.vstack((recResults1_X[:-1], recResults2_X))
result_y = np.vstack((recResults1_Y[:-1], recResults2_Y))
else:
result_x = np.vstack((Px[0], Px[-1]))
result_y = np.vstack((Py[0], Py[-1]))
return result_x, result_y
@staticmethod
# FMI : find middle index
def FMI(Py):
middle = float(len(Py)) / 2
if middle % 2 != 0:
middle = int(middle - 0.5)
return middle
@staticmethod
# input : P Polyline { P1, P2 ....Pn }, epsilon : offset
# output : list simplification algorithms
def rdp_Ramer_Douglas_Pecker(Px, Py, EPS):
# https://pypi.org/project/rdp/
# input : P Polyline { P1, P2 ....Pn }, epsilon : offset
# output : list simplification algorithms
result = rdp(np.column_stack((Px, Py)), epsilon=EPS)
return [row[0] for row in result], [row[1] for row in result]
@staticmethod
def Standard_Deviation_Method(Px, Py, EPS):
result_x = []
result_y = []
MAF = []
x_start = Px[0]
y_start = Py[0]
max_samples = 3
EPS = EPS * 0.25
result_x = np.append(result_x, x_start)
result_y = np.append(result_y, y_start)
p_size = Py.shape[0]
for index in range(1, p_size - 1):
Pack1x = np.array([Px[index - 1], Px[index], Px[index + 1]])
SD1x = np.std(Pack1x)
Pack1y = np.array([Py[index - 1], Py[index], Py[index + 1]])
SD1y = np.std(Pack1y)
MAF = np.append(MAF, sqrt(SD1x ** 2 + SD1y ** 2))
Average = np.mean(MAF)
if len(MAF) == max_samples:
MAF = np.delete(MAF, 0)
print(index, sqrt(SD1x ** 2 + SD1y ** 2), Average)
if (sqrt(SD1x ** 2 + SD1y ** 2) - Average) > (EPS):
result_x = np.append(result_x, Px[index])
result_y = np.append(result_y, Py[index])
else:
pass
result_x = np.append(result_x, Px[p_size - 1])
result_y = np.append(result_y, Py[p_size - 1])
return result_x, result_y
@staticmethod
def Simplification_Perpendicular_Distance(Px, Py, epsilon):
# input : P Polyline { P1, P2 ....Pn }, epsilon : offset
# output : list simplification algorithms
result_x = []
result_y = []
x_start = Px[0]
y_start = Py[0]
result_x = np.append(result_x, x_start)
result_y = np.append(result_y, y_start)
p_size = Py.shape[0]
for index in range(1, p_size - 1):
x_target = Px[index]
y_target = Py[index]
x_end = Px[index + 1]
y_end = Py[index + 1]
d_result = NCM11.d(x_start, y_start, x_target, y_target, x_end, y_end)
if (d_result > epsilon): # keep the original data and save into output vector
result_x =
|
np.append(result_x, Px[index])
|
numpy.append
|
# Analysis
import xarray as xr
import numpy as np
from scipy.signal import butter, sosfiltfilt
from glob import glob
from datetime import timedelta
procdir = '/glade/work/mckinnon/obsLE/proc'
def LFCA(da, N=30, L=1/10, fs=12, order=3, landmask=None, monthly=True):
"""Perform LFCA (as per Wills et al, 2018, GRL) on a dataarray.
Parameters
----------
da : xarray.DataArray
Data to perform LFCA on (time x lat x lon)
N : int
Number of EOFs to retain
L : float
Cutoff frequency for lowpass filter (e.g. 1/10 for per decade)
fs : float
Sampling frequency (1/12 for monthly)
order : int
Order of the Butterworth filter
landmask : xarray.DataArray or None
If None, do not perform any masking
If DataArray, indicates land locations
monthly : bool
If True, perform lowpass filtering for each month separately
Returns
-------
LFPs : numpy.ndarray
2D array of N spatial patterns (nlat*nlon x N)
LFCs : numpy.ndarray
2D array of N time series (ntime x N)
"""
from eofs.xarray import Eof
# remove empirical seasonal cycle
da = da.groupby('time.month') - da.groupby('time.month').mean('time')
ntime, nlat, nlon = da.shape
if landmask is not None:
# expand land mask to ntime
lnd_mask = np.repeat(is_land.values[np.newaxis, :, :], ntime, axis=0)
da = da.where(lnd_mask)
coslat = np.cos(np.deg2rad(da['lat'].values)).clip(0., 1.)
wgts = np.sqrt(coslat)[..., np.newaxis]
solver = Eof(da, weights=wgts)
eofs = solver.eofs(eofscaling=0) # normalized st L2 norm = 1
eigenvalues = solver.eigenvalues()
# Low pass filter data
if monthly:
fs = 1
nyq = 0.5 * fs # Nyquist frequency
low = L / nyq
sos = butter(order, low, btype='low', output='sos') # Coefficients for Butterworth filter
if monthly:
X_tilde = np.empty((da.shape))
for kk in range(12):
X_tilde[kk::12, :, :] = sosfiltfilt(sos, da.values[kk::12, :, :], padtype='even', axis=0)
else:
X_tilde = sosfiltfilt(sos, da.values, axis=0)
a_k = eofs.values[:N, :, :].reshape((N, nlat*nlon))
sigma_k = np.sqrt(eigenvalues.values[:N])
if landmask is not None:
lnd_mask_vec = is_land.values.flatten()
else:
lnd_mask_vec = np.ones((nlat*nlon,), dtype=bool)
PC_tilde = np.empty((ntime, N))
for kk in range(N):
PC_tilde[:, kk] = 1/sigma_k[kk]*np.dot(X_tilde.reshape((ntime, nlat*nlon))[:, lnd_mask_vec],
a_k[kk, lnd_mask_vec])
R = np.dot(PC_tilde.T, PC_tilde)/(N - 1)
R_eigvals, e_k = np.linalg.eig(R) # eigenvalues already sorted
# eigenvalues are in columns
u_k = np.dot((a_k.T)/sigma_k, e_k)
LFPs = np.dot(sigma_k*(a_k.T), e_k)
# Time series:
LFCs = np.dot(da.values.reshape((ntime, nlat*nlon))[:, lnd_mask_vec], u_k[lnd_mask_vec, :])
return LFPs, LFCs
# Get land mask
land_dir = '/gpfs/fs1/collections/cdg/data/cesmLE/CESM-CAM5-BGC-LE/lnd/proc/tseries/monthly/SOILWATER_10CM'
land_file = '%s/b.e11.B20TRC5CNBDRD.f09_g16.002.clm2.h0.SOILWATER_10CM.192001-200512.nc' % land_dir
ds_lnd = xr.open_dataset(land_file)['SOILWATER_10CM']
is_land = ~np.isnan(ds_lnd[0, ...])
nlat, nlon = is_land.shape
n_lfc_save = 5
all_LFP = []
all_LFC = []
valid_years = np.arange(1921, 2006)
nyrs = len(valid_years)
members = np.hstack((np.arange(1, 36), np.arange(101, 106)))
cesmdir = '/gpfs/fs1/collections/cdg/data/cesmLE/CESM-CAM5-BGC-LE/atm/proc/tseries/monthly'
for m in members:
print(m)
files = glob('%s/PREC*/b.e11.B20TRC5CNBDRD.f09_g16.%03i.cam.h0.PREC*.??????-200512.nc' % (cesmdir, m))
ds_cesm = xr.open_mfdataset(files, concat_dim='precip_type', combine='by_coords')
da_cesm = ds_cesm['PRECC'] + ds_cesm['PRECL'] + ds_cesm['PRECSL'] + ds_cesm['PRECSC']
da_cesm = da_cesm.assign_coords(time=da_cesm.time-timedelta(days=1))
da_cesm = da_cesm.sel({'time': np.isin(da_cesm['time.year'], valid_years)})
da_cesm = da_cesm.assign_coords({'lat': np.round(da_cesm.lat, 3)})
# change to mm /day
da_cesm *= 1000*24*60*60
# need to load to speed up compute
da_cesm = da_cesm.load()
LFP_save = np.empty((nlat*nlon, n_lfc_save, 12))
LFC_save = np.empty((nyrs, n_lfc_save, 12))
for month in range(1, 13):
tmp_ds = da_cesm.sel(time=da_cesm['time.month'] == month)
ntime, nlat, nlon = tmp_ds.shape
LFPs, LFCs = LFCA(tmp_ds, fs=1, monthly=False, landmask=None)
if np.mean(LFCs[-5:, 0]) < np.mean(LFCs[:5, 0]):
multiplier = -1
else:
multiplier = 1
LFP_save[:, :, month-1] = multiplier*LFPs[:, :n_lfc_save]
LFC_save[:, :, month-1] = multiplier*LFCs[:, :n_lfc_save]
del LFCs, LFPs
LFP_da = xr.DataArray(LFP_save.reshape((nlat, nlon, n_lfc_save, 12)),
dims=['lat', 'lon', 'LFP', 'month'],
coords=[da_cesm.lat, da_cesm.lon, np.arange(1, 6), np.arange(1, 13)])
LFC_da = xr.DataArray(LFC_save,
dims=['year', 'LFP', 'month'],
coords=[
|
np.unique(da_cesm['time.year'])
|
numpy.unique
|
# -*- coding: utf-8 -*-
import logging
import warnings
from collections import namedtuple, defaultdict
try:
from collections import Sequence
except ImportError:
from collections.abc import Sequence
import numpy as np
from glypy.structure.glycan_composition import FrozenMonosaccharideResidue, HashableGlycanComposition
from glycopeptidepy.structure.fragmentation_strategy import StubGlycopeptideStrategy, _AccumulatorBag
from glycan_profiling.serialize import GlycanCombination, GlycanTypes
from glycan_profiling.database.disk_backed_database import PPMQueryInterval
from glycan_profiling.chromatogram_tree import Unmodified
from glycan_profiling.structure.denovo import MassWrapper, PathSet, PathFinder
logger = logging.getLogger("glycresoft.core_search")
hexnac = FrozenMonosaccharideResidue.from_iupac_lite("HexNAc")
hexose = FrozenMonosaccharideResidue.from_iupac_lite("Hex")
xylose = FrozenMonosaccharideResidue.from_iupac_lite("Xyl")
fucose = FrozenMonosaccharideResidue.from_iupac_lite("Fuc")
dhex = FrozenMonosaccharideResidue.from_iupac_lite("dHex")
neuac = FrozenMonosaccharideResidue.from_iupac_lite("NeuAc")
neugc = FrozenMonosaccharideResidue.from_iupac_lite("NeuGc")
def approximate_internal_size_of_glycan(glycan_composition):
terminal_groups = glycan_composition._getitem_fast(neuac) +\
glycan_composition._getitem_fast(neugc)
side_groups = glycan_composition._getitem_fast(fucose) + glycan_composition._getitem_fast(dhex)
n = sum(glycan_composition.values())
n -= terminal_groups
if side_groups > 1:
n -= 1
return n
def glycan_side_group_count(glycan_composition):
side_groups = glycan_composition._getitem_fast(
fucose) + glycan_composition._getitem_fast(dhex)
return side_groups
def isclose(a, b, rtol=1e-05, atol=1e-08):
return abs(a - b) <= atol + rtol * abs(b)
default_components = (hexnac, hexose, xylose, fucose,)
class CoreMotifFinder(PathFinder):
def __init__(self, components=None, product_error_tolerance=1e-5, minimum_peptide_mass=350.): # pylint: disable=super-init-not-called
if components is None:
components = default_components
self.components = list(map(MassWrapper, components))
self.product_error_tolerance = product_error_tolerance
self.minimum_peptide_mass = minimum_peptide_mass
def find_n_linked_core(self, groups, min_size=1):
sequence = [hexnac, hexnac, hexose, hexose, hexose]
expected_n = len(sequence)
terminals = dict()
for label, paths in groups.items():
label_i = 0
expected_i = 0
path_n = len(label)
while label_i < path_n and expected_i < expected_n:
edge = label[label_i]
label_i += 1
expected = sequence[expected_i]
if expected == edge:
expected_i += 1
elif edge == fucose:
continue
else:
break
if expected_i >= min_size:
for path in paths:
last_path = terminals.get(path[0].start)
if last_path is None:
terminals[path[0].start] = path
else:
terminals[path[0].start] = max((path, last_path), key=lambda x: x.total_signal)
return PathSet(terminals.values())
def find_o_linked_core(self, groups, min_size=1):
sequence = [(hexnac, hexose), (hexnac, hexose, fucose,), (hexnac, hexose, fucose,)]
expected_n = len(sequence)
terminals = dict()
for label, paths in groups.items():
label_i = 0
expected_i = 0
path_n = len(label)
while label_i < path_n and expected_i < expected_n:
edge = label[label_i]
label_i += 1
expected = sequence[expected_i]
if edge in expected:
expected_i += 1
else:
break
if expected_i >= min_size:
for path in paths:
last_path = terminals.get(path[0].start)
if last_path is None:
terminals[path[0].start] = path
else:
terminals[path[0].start] = max((path, last_path), key=lambda x: x.total_signal)
return PathSet(terminals.values())
def find_gag_linker_core(self, groups, min_size=1):
sequence = [xylose, hexose, hexose, ]
expected_n = len(sequence)
terminals = dict()
for label, paths in groups.items():
label_i = 0
expected_i = 0
path_n = len(label)
while label_i < path_n and expected_i < expected_n:
edge = label[label_i]
label_i += 1
expected = sequence[expected_i]
if expected == edge:
expected_i += 1
elif edge == fucose:
continue
else:
break
if expected_i >= min_size:
for path in paths:
last_path = terminals.get(path[0].start)
if last_path is None:
terminals[path[0].start] = path
else:
terminals[path[0].start] = max((path, last_path), key=lambda x: x.total_signal)
return PathSet(terminals.values())
def estimate_peptide_mass(self, scan, topn=100, mass_shift=Unmodified, query_mass=None):
graph = self._find_edges(scan, mass_shift=mass_shift)
paths = self._init_paths(graph)
groups = self._aggregate_paths(paths)
n_linked_paths = self.find_n_linked_core(groups)
o_linked_paths = self.find_o_linked_core(groups)
gag_linker_paths = self.find_gag_linker_core(groups)
peptide_masses = []
has_tandem_shift = abs(mass_shift.tandem_mass) > 0
# TODO: split the different motif masses up according to core type efficiently
# but for now just lump them all together
for path in n_linked_paths:
if path.start_mass < self.minimum_peptide_mass:
continue
peptide_masses.append(path.start_mass)
if has_tandem_shift:
peptide_masses.append(path.start_mass - mass_shift.tandem_mass)
for path in o_linked_paths:
if path.start_mass < self.minimum_peptide_mass:
continue
peptide_masses.append(path.start_mass)
if has_tandem_shift:
peptide_masses.append(path.start_mass - mass_shift.tandem_mass)
for path in gag_linker_paths:
if path.start_mass < self.minimum_peptide_mass:
continue
peptide_masses.append(path.start_mass)
if has_tandem_shift:
peptide_masses.append(path.start_mass - mass_shift.tandem_mass)
peptide_masses.sort()
return peptide_masses
def build_peptide_filter(self, scan, error_tolerance=1e-5, mass_shift=Unmodified, query_mass=None):
peptide_masses = self.estimate_peptide_mass(
scan, mass_shift=mass_shift, query_mass=query_mass)
out = []
if len(peptide_masses) == 0:
return IntervalFilter([])
last = PPMQueryInterval(peptide_masses[0], error_tolerance)
for point in peptide_masses[1:]:
interval = PPMQueryInterval(point, error_tolerance)
if interval.overlaps(last):
last.extend(interval)
else:
out.append(last)
last = interval
out.append(last)
return IntervalFilter(out)
class CoarseStubGlycopeptideFragment(object):
__slots__ = ['key', 'is_core', 'mass']
def __init__(self, key, mass, is_core):
self.key = key
self.mass = mass
self.is_core = is_core
def __eq__(self, other):
try:
return self.key == other.key and self.is_core == other.is_core
except AttributeError:
return self.key == other
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(int(self.mass))
def __lt__(self, other):
return self.mass < other.mass
def __gt__(self, other):
return self.mass > other.mass
def __reduce__(self):
return self.__class__, (self.key, self.mass, self.is_core)
def __repr__(self):
return "%s(%s, %f, %r)" % (
self.__class__.__name__,
self.key, self.mass, self.is_core
)
class GlycanCombinationRecordBase(object):
__slots__ = ['id', 'dehydrated_mass', 'composition', 'count', 'glycan_types',
'size', "_fragment_cache", "internal_size_approximation", "_hash",
'fragment_set_properties']
def is_n_glycan(self):
return GlycanTypes.n_glycan in self.glycan_types
def is_o_glycan(self):
return GlycanTypes.o_glycan in self.glycan_types
def is_gag_linker(self):
return GlycanTypes.gag_linker in self.glycan_types
def get_n_glycan_fragments(self):
if GlycanTypes.n_glycan not in self._fragment_cache:
strategy = StubGlycopeptideStrategy(None, extended=True)
shifts = strategy.n_glycan_composition_fragments(
self.composition, 1, 0)
fragment_structs = []
for shift in shifts:
if shift["key"]['HexNAc'] <= 2 and shift["key"]["Hex"] <= 3:
is_core = True
else:
is_core = False
fragment_structs.append(
CoarseStubGlycopeptideFragment(
shift['key'], shift['mass'], is_core))
self._fragment_cache[GlycanTypes.n_glycan] = sorted(
set(fragment_structs))
return self._fragment_cache[GlycanTypes.n_glycan]
else:
return self._fragment_cache[GlycanTypes.n_glycan]
def get_o_glycan_fragments(self):
if GlycanTypes.o_glycan not in self._fragment_cache:
strategy = StubGlycopeptideStrategy(None, extended=True)
shifts = strategy.o_glycan_composition_fragments(
self.composition, 1, 0)
fragment_structs = []
for shift in shifts:
shift['key'] = _AccumulatorBag(shift['key'])
fragment_structs.append(
CoarseStubGlycopeptideFragment(
shift['key'], shift['mass'], True))
self._fragment_cache[GlycanTypes.o_glycan] = sorted(
set(fragment_structs))
return self._fragment_cache[GlycanTypes.o_glycan]
else:
return self._fragment_cache[GlycanTypes.o_glycan]
def get_gag_linker_glycan_fragments(self):
if GlycanTypes.gag_linker not in self._fragment_cache:
strategy = StubGlycopeptideStrategy(None, extended=True)
shifts = strategy.gag_linker_composition_fragments(
self.composition, 1, 0)
fragment_structs = []
for shift in shifts:
shift['key'] = _AccumulatorBag(shift['key'])
fragment_structs.append(
CoarseStubGlycopeptideFragment(
shift['key'], shift['mass'], True))
self._fragment_cache[GlycanTypes.gag_linker] = sorted(
set(fragment_structs))
return self._fragment_cache[GlycanTypes.gag_linker]
else:
return self._fragment_cache[GlycanTypes.gag_linker]
def clear(self):
self._fragment_cache.clear()
try:
from glycan_profiling._c.tandem.core_search import GlycanCombinationRecordBase
except ImportError as err:
print(err)
pass
class GlycanCombinationRecord(GlycanCombinationRecordBase):
"""Represent a glycan combination compactly in memory
Attributes
----------
composition : :class:`~.HashableGlycanComposition`
The glycan combination's composition in monosaccharide units
count : int
The number of distinct glycans this combination contains
dehydrated_mass : float
The total mass shift applied to a peptide when this combination is attached
to it
glycan_types : list
The types of glycans combined to make this entity
"""
__slots__ = ()
@classmethod
def from_combination(cls, combination):
inst = cls(
id=combination.id,
dehydrated_mass=combination.dehydrated_mass(),
composition=combination.convert(),
count=combination.count,
glycan_types=tuple(set([
c.name for component_classes in combination.component_classes
for c in component_classes])),
)
return inst
@classmethod
def from_hypothesis(cls, session, hypothesis_id):
query = session.query(GlycanCombination).filter(
GlycanCombination.hypothesis_id == hypothesis_id).group_by(
GlycanCombination.composition, GlycanCombination.count).order_by(
GlycanCombination.dehydrated_mass()) # pylint: disable=no-value-for-parameter
candidates = query.all()
out = []
for candidate in candidates:
out.append(cls.from_combination(candidate))
return out
def _to_dict(self):
return {
"id": self.id,
"dehydrated_mass": self.dehydrated_mass,
"composition": str(self.composition),
"count": self.count,
"glycan_types": list(map(str, self.glycan_types)),
}
@classmethod
def _from_dict(cls, d):
d['composition'] = HashableGlycanComposition.parse(d['composition'])
d['glycan_types'] = [GlycanTypes[t] for t in d['glycan_types']]
return cls(**d)
def __init__(self, id, dehydrated_mass, composition, count, glycan_types):
self.id = id
self.dehydrated_mass = dehydrated_mass
self.composition = composition
self.size = sum(composition.values())
self.internal_size_approximation = self._approximate_total_size()
self.side_group_count = glycan_side_group_count(self.composition)
self.count = count
self.glycan_types = list(glycan_types)
self._fragment_cache = dict()
self._hash = hash(self.composition)
self.fragment_set_properties = dict()
def __eq__(self, other):
return (self.composition == other.composition) and (self.count == other.count) and (
self.glycan_types == other.glycan_types)
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return self._hash
def _approximate_total_size(self):
return approximate_internal_size_of_glycan(self.composition)
def __reduce__(self):
return GlycanCombinationRecord, (self.id, self.dehydrated_mass, self.composition, self.count, self.glycan_types), self.__getstate__()
def __getstate__(self):
return {
"fragment_set_properties": self.fragment_set_properties
}
def __setstate__(self, state):
self.fragment_set_properties = state['fragment_set_properties']
def __repr__(self):
return "GlycanCombinationRecord(%s, %d)" % (self.composition, self.count)
class CoarseStubGlycopeptideMatch(object):
def __init__(self, key, mass, shift_mass, peaks_matched):
self.key = key
self.mass = mass
self.shift_mass = shift_mass
self.peaks_matched = peaks_matched
def __reduce__(self):
return self.__class__, (self.key, self.mass, self.shift_mass, self.peaks_matched)
def __repr__(self):
return "%s(%s, %f, %f, %r)" % (
self.__class__.__name__,
self.key, self.mass, self.shift_mass, self.peaks_matched
)
class CoarseGlycanMatch(object):
def __init__(self, matched_fragments, n_matched, n_theoretical, core_matched, core_theoretical):
self.fragment_matches = list(matched_fragments)
self.n_matched = n_matched
self.n_theoretical = n_theoretical
self.core_matched = core_matched
self.core_theoretical = core_theoretical
def __iter__(self):
yield self.matched_fragments
yield self.n_matched
yield self.n_theoretical
yield self.core_matched
yield self.core_theoretical
def estimate_peptide_mass(self):
weighted_mass_acc = 0.0
weight_acc = 0.0
for fmatch in self.fragment_matches:
fmass = fmatch.shift_mass
for peak in fmatch.peaks_matched:
weighted_mass_acc += (peak.neutral_mass - fmass) * peak.intensity
weight_acc += peak.intensity
if weight_acc == 0:
return -1
return weighted_mass_acc / weight_acc
def __repr__(self):
template = (
"{self.__class__.__name__}({self.n_matched}, {self.n_theoretical}, "
"{self.core_matched}, {self.core_theoretical})")
return template.format(self=self)
class GlycanCoarseScorerBase(object):
def __init__(self, product_error_tolerance=1e-5, fragment_weight=0.56, core_weight=0.42):
self.product_error_tolerance = product_error_tolerance
self.fragment_weight = fragment_weight
self.core_weight = core_weight
def _match_fragments(self, scan, peptide_mass, shifts, mass_shift_tandem_mass=0.0):
fragment_matches = []
core_matched = 0.0
core_theoretical = 0.0
has_tandem_shift = abs(mass_shift_tandem_mass) > 0
for shift in shifts:
if shift.is_core:
is_core = True
core_theoretical += 1
else:
is_core = False
target_mass = shift.mass + peptide_mass
hits = scan.deconvoluted_peak_set.all_peaks_for(target_mass, self.product_error_tolerance)
if hits:
if is_core:
core_matched += 1
fragment_matches.append((shift.key, target_mass, hits))
if has_tandem_shift:
shifted_mass = target_mass + mass_shift_tandem_mass
hits = scan.deconvoluted_peak_set.all_peaks_for(
shifted_mass, self.product_error_tolerance)
if hits:
if is_core:
core_matched += 1
fragment_matches.append(
CoarseStubGlycopeptideMatch(
shift.key, shifted_mass, shift.mass + mass_shift_tandem_mass, hits))
return CoarseGlycanMatch(
fragment_matches, float(len(fragment_matches)), float(len(shifts)), core_matched, core_theoretical)
# consider adding the internal size approximation to this method and it's Cython implementation.
def _calculate_score(self, glycan_match):
ratio_fragments = (glycan_match.n_matched / glycan_match.n_theoretical)
ratio_core = glycan_match.core_matched / glycan_match.core_theoretical
coverage = (ratio_fragments ** self.fragment_weight) * (ratio_core ** self.core_weight)
score = 0
for fmatch in glycan_match.fragment_matches:
mass = fmatch.mass
for peak in fmatch.matches:
score += np.log(peak.intensity) * (1 - (
|
np.abs(peak.neutral_mass - mass)
|
numpy.abs
|
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
# local
from pynteractome.IO import IO
from pynteractome.utils import extract_triangular, fmt_g, fmt_e, log
from pynteractome.warning import warning
#plt.rc('text', usetex=True) # Activate LaTeX rendering
DEFAULT_PLOT_DIR = '../plots/'
AVAILABLE_FORMATS = (
'eps',
'png',
)
class Plotter:
_PLOT_DIR = None
@staticmethod
def _set_plot_dir(integrator):
plot_dir = DEFAULT_PLOT_DIR
namecode = integrator.interactome.namecode
if namecode is None:
warning('Plotting with unknown interactome. ' + \
'Default plot dir is used: + "' + DEFAULT_PLOT_DIR + '"')
else:
plot_dir += namecode + '/'
Plotter._PLOT_DIR = plot_dir
@staticmethod
def _get_plot_dir_or_die():
if Plotter._PLOT_DIR is None:
raise ValueError(
'[Plotter] Plots dir has not been set yet. ' + \
'You are probably using :class:`Plotter` the wrong way. ' + \
'Only call existing methods from this')
return Plotter._PLOT_DIR
@staticmethod
def save_fig(fig, path):
plot_dir = Plotter._get_plot_dir_or_die()
path = plot_dir + path
ridx = path.rfind('.')
if ridx > 0:
ext = path[ridx+1:]
if ext not in AVAILABLE_FORMATS:
warning('Unknown format: "{}". Setting default format ("{}").' \
.format(ext, AVAILABLE_FORMATS[0]))
path = path[:ridx+1] + AVAILABLE_FORMATS[0]
log('Saving figure to path "{}"'.format(path))
plt.savefig(path, bbox_inches='tight')
plt.close(fig)
@staticmethod
def plot_clustering(integrator, gene_mapping):
Plotter._set_plot_dir(integrator)
cache = integrator.interactome.get_clustering_cache()
ps = list()
hpo2genes = integrator.get_hpo2genes(gene_mapping)
for term, genes in hpo2genes.items():
genes &= integrator.interactome.genes
N = len(genes)
if N < 3:
continue
c = integrator.interactome.get_genes_clustering(genes, entrez=True)
if np.isnan(c):
print('C is ill defined on HPO term {}'.format(term))
continue
k = np.isnan(cache[N]).sum()
cache[N][np.isnan(cache[N])] = 0
if np.isnan(cache[N]).any():
print('Still NaN')
p = (cache[N] >= c).sum() / len(cache[N])
ps.append(p)
print('{} ps are still available'.format(len(ps)))
print(np.unique(ps))
ps = np.asarray(ps)
logps = np.log10(ps)
logps[ps == 0] = -10
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([np.log10(.05)]*2, [0, 100], 'k-.', label=r'$p = .05$')
Plotter.plot_pdf_and_cdf(logps, 20, 'salmon', 'r', 'log10(p)', ax=ax, remove_ticks=False)
Plotter.save_fig(fig, 'clustering.eps')
@staticmethod
def loc_hpo(integrator, gene_mapping):
Plotter._set_plot_dir(integrator)
interactome = integrator.interactome
integrator.reset_propagation()
for depth in [0, integrator.get_hpo_depth()]:#range(integrator.get_hpo_depth()):
integrator.propagate_genes(depth)
hpo2genes = integrator.get_hpo2genes(gene_mapping)
zs = dict()
#for (term, genes) in integrator.get_hpo2genes(gene_mapping).items():
for term in integrator.iter_leaves(1):
if term not in hpo2genes:
continue
genes = hpo2genes[term]
if len(genes) > 1:
zs[term] = interactome.get_lcc_score(genes, 0, shapiro=True)
Plotter._loc_hpo(integrator, zs, depth, gene_mapping)
@staticmethod
def _loc_hpo(integrator, zs, prop_depth, gene_mapping):
Plotter._set_plot_dir(integrator)
integrator.propagate_genes(prop_depth)
interactome = integrator.interactome
hpo2genes = integrator.get_hpo2genes(gene_mapping)
xs, ys = list(), list()
are_normal = list()
empirical_ps = list()
for term, (z_score, empirical_p, is_normal) in zs.items():
if term not in hpo2genes:
continue
if z_score is not None:
z = float(z_score)
genes = hpo2genes[term] & interactome.genes
if len(genes) > 1:
lcc = interactome.get_genes_lcc_size(interactome.verts_id(genes))
rel_size = lcc / len(genes)
xs.append(rel_size)
ys.append(z)
are_normal.append(is_normal)
empirical_ps.append(empirical_p)
print('')
xs = np.asarray(xs)
ys = np.asarray(ys)
are_normal = np.asarray(are_normal)
empirical_ps = np.asarray(empirical_ps)
Plotter.significance_bar_plot(
ys, empirical_ps,
'Significance via $p_{emp}$ or $z$ (HPO terms)',
'loc/barplot.significance.hpo.{}.{}.eps'.format(prop_depth, gene_mapping)
)
print(len(set(np.where(np.logical_and(ys >= 1.65, empirical_ps >= .05))[0])),
'terms have significant z but non-significant p')
print(len(set(np.where(np.logical_and(ys < 1.65, empirical_ps < .05))[0])),
'terms have non-significant z but significant p')
if prop_depth == 0:
title = 'Significance of |LCC| (non-propagated - {})'.format('$\gamma_\cup$' if gene_mapping == 'union' else '$\gamma_\cap$')
elif prop_depth == integrator.get_hpo_depth():
title = 'Significance of |LCC| (fully up-propagated - {})'.format('$\gamma_\cup$' if gene_mapping == 'union' else '$\gamma_\cap$')
else:
title = 'Significance of |LCC| (up-propagated by {} - {})'.format(prop_depth, '$\gamma_\cup$' if gene_mapping == 'union' else '$\gamma_\cap$')
empirical_ps[empirical_ps < 1e-10] = 1e-10
empirical_ps = np.log10(empirical_ps)
path = 'loc/prop.depth.{}.z.{}.eps'.format(prop_depth, gene_mapping)
Plotter._plot_loc_zs(xs, ys, title, path, are_normal)
path = 'loc/prop.depth.{}.empirical.p.{}.eps'.format(prop_depth, gene_mapping)
Plotter.plot_z_vs_empirical_p(ys, empirical_ps, title, path)
path = 'loc/prop.depth.{}.p.{}.eps'.format(prop_depth, gene_mapping)
Plotter._plot_loc_ps(xs, empirical_ps, title, path, are_normal)
@staticmethod
def loc_omim(integrator):
Plotter._set_plot_dir(integrator)
interactome = integrator.interactome
omim2genes = integrator.get_omim2genes()
xs, ys = list(), list()
are_normal = list()
empirical_ps = list()
for genes in omim2genes.values():
genes &= interactome.genes
if not genes or len(genes) <= 1:
continue
z, empirical_p, shapiro_p = interactome.get_lcc_score(genes, 0, shapiro=True)
if z is None:
continue
lcc = interactome.get_genes_lcc_size(interactome.verts_id(genes))
rel_size = lcc / len(genes)
assert rel_size <= 1
xs.append(rel_size)
ys.append(z)
are_normal.append(shapiro_p >= .05)
empirical_ps.append(empirical_p)
xs = np.asarray(xs)
ys = np.asarray(ys)
are_normal = np.asarray(are_normal)
empirical_ps = np.asarray(empirical_ps)
Plotter.significance_bar_plot(
ys, empirical_ps,
r'Significance via $p$ or $z$ (OMIM diseases)',
'barplot.significance.omim.eps'
)
empirical_ps[empirical_ps < 1e-10] = 1e-10
empirical_ps = np.log10(empirical_ps)
title = 'Significance of |LCC| (OMIM diseases)'
path = 'loc/omim.z.eps'
Plotter._plot_loc_zs(xs, ys, title, path, are_normal)
path = 'loc/omim.empirical.p.eps'
Plotter.plot_z_vs_empirical_p(ys, empirical_ps, title, path)
path = 'loc/omim.p.eps'
Plotter._plot_loc_ps(xs, empirical_ps, title, path, are_normal)
@staticmethod
def _plot_loc_zs(xs, zs, title, path, are_normal):
print('{}/{} are significant'.format((zs > 1.65).sum(), len(zs)))
print('{}/{} are < 0'.format((zs < 0).sum(), len(zs)))
print('{} out of {} are normal'.format(are_normal.sum(), len(are_normal)))
fig, axes = Plotter.dot_plot_with_hists(
xs, zs, 'Relative size: |LCC|/|S|', 'z-score', title, figsize=(6, 6)
)
ax = axes[0]
ax.plot([-0., 1], [1.65]*2, 'k-.')
ax.plot([0, 1], [-1.65]*2, 'k-.')
ax.grid(True)
ax.set_xlim([0, 1])
ax.set_xticks(np.arange(0, 11, 2)/10)
ax.set_xticklabels(map(fmt_g, ax.get_xticks()))
ax.get_xticklabels()[-1].set_ha('right')
ax.set_yticklabels(map(fmt_g, ax.get_yticks()))
axes[2].set_yticks(ax.get_yticks())
axes[2].set_ylim(ax.get_ylim())
axes[1].set_xticks(ax.get_xticks())
axes[1].set_xlim(ax.get_xlim())
Plotter.save_fig(fig, path)
@staticmethod
def plot_z_vs_empirical_p(zs, ps, title, path):
fig, axes = Plotter.dot_plot_with_hists(zs, ps, 'z-score', 'log10(Empirical p)', title, figsize=(6, 6))
ax = axes[0]
xlim = ax.get_xlim()
ylim = ax.get_ylim()
ax.plot(xlim, [
|
np.log10(.05)
|
numpy.log10
|
import numpy as np
def sampen(x, dim, r, scale=True):
return entropy(x, dim, r, scale=scale)
def cross_sampen(x1, x2, dim, r, scale=True):
return entropy([x1, x2], dim, r, scale)
def fuzzyen(x, dim, r, n, scale=True):
return entropy(x, dim, r, n=n, scale=scale, remove_baseline=True)
def cross_fuzzyen(x1, x2, dim, r, n, scale=True):
return entropy([x1, x2], dim, r, n, scale=scale, remove_baseline=True)
def pattern_mat(x, m):
"""
Construct a matrix of `m`-length segments of `x`.
Parameters
----------
x : (N, ) array_like
Array of input data.
m : int
Length of segment. Must be at least 1. In the case that `m` is 1, the
input array is returned.
Returns
-------
patterns : (m, N-m+1)
Matrix whose first column is the first `m` elements of `x`, the second
column is `x[1:m+1]`, etc.
Examples
--------
>>> p = pattern_mat([1, 2, 3, 4, 5, 6, 7], 3])
array([[ 1., 2., 3., 4., 5.],
[ 2., 3., 4., 5., 6.],
[ 3., 4., 5., 6., 7.]])
"""
x = np.asarray(x).ravel()
if m == 1:
return x
else:
N = len(x)
patterns = np.zeros((m, N-m+1))
for i in range(m):
patterns[i, :] = x[i:N-m+i+1]
return patterns
def entropy(x, dim, r, n=1, scale=True, remove_baseline=False):
"""
Calculate the entropy of a signal.
Parameters
----------
x : (N, ) array_like
Input time series. May also be a length-2 list [x1, x2], in which case
the cross entropy between x1 and x2 is calculated.
dim : int
Embedding dimension.
r : float
Tolerance (max absolute difference between segments) for SampEn or the
width of the fuzzy exponential function for FuzzyEn. Larger `r` makes
the function wider. Typical range 0.2 -- 1.
n : float, optional
Step width of fuzzy exponential function for FuzzyEn. Larger `n` makes
the function more rectangular. Usually in the range 1 -- 5 or so.
Default is 1.
scale : bool, optional
If True, scale the data (zero mean, unit variance). Default is True.
remove_baseline : bool, optional
If True, remove the baseline from the pattern vectors. Used for
FuzzyEn. Default is False (SampEn).
Returns
-------
entropy : float
The calculated entropy.
"""
fuzzy = True if remove_baseline else False
cross = True if type(x) == list else False
N = len(x[0]) if cross else len(x)
if scale:
if cross:
x = [_scale(np.copy(x[0])), _scale(np.copy(x[1]))]
else:
x = _scale(
|
np.copy(x)
|
numpy.copy
|
from __future__ import print_function
from __future__ import absolute_import
from ku import generators as gr
from ku import generic as gen
from ku import image_utils as iu
from ku import model_helper as mh
from ku import applications as apps
from munch import Munch
import pandas as pd, numpy as np
import pytest, shutil
from matplotlib import pyplot as plt
from os import path
from keras.layers import Input
from keras.models import Model
def test_validation_save_best_multiple_training_rounds():
ids = pd.DataFrame(dict(a = np.arange(100),
b = np.flip(
|
np.arange(100)
|
numpy.arange
|
"""
This file contains some useful functions to perform computation with Yambo.
The module can be loaded in the notebook in one of the following way
>>> from mppi import Utilities as U
>>> U.build_SAVE
or to load directly some elements
>>> from mppi.Utilities import build_SAVE
>>> build_SAVE
"""
import numpy as np
import os
def build_SAVE(source_dir, run_dir, command = 'p2y -a 2', make_link = True, overwrite_if_found = False):
"""
Build the SAVE folder for a yambo computation.
The function creates the SAVE folder in the source_dir using the command provided
as the command parameter (the option -a 2 ensures that labelling of the
high-symmetry kpoints is consistent in both QE and Yambo) and create a symbolic
link (or a copy) of the SAVE folder in the run_dir. This procedure is performed only if the SAVE
folder is not already found in the run_dir, unless the ``overwrite_if_found`` parameter is True.
If the source_dir is not found an exception is raised.
Args:
source_dir (:py:class:`string`) : name of the folder with the source nscf QuantumESPRESSO computation
run_dir (:py:class:`string`) : folder where the SAVE folder is linked or copied
command (:py:class:`string`) : command for generation of the SAVE Folder. Default is 'p2y -a 2'
make_link (:py:class:`bool`) : if True create a symbolic link
overwrite_if_found (:py:class:`bool`) : if True delete the SAVE folder in the run_dir and the
r_setup (if found) and build them again
"""
SAVE_dir = os.path.join(run_dir,'SAVE')
if not os.path.isdir(source_dir): # check if the source_dir exists
raise ValueError('The source directory', source_dir,
' does not exists.')
if not os.path.isdir(run_dir):
os.mkdir(run_dir)
print('Create folder %s'%run_dir)
# Evaluate if the SAVE_dir folder has to be removed if found
if os.path.isdir(SAVE_dir):
if overwrite_if_found:
print('clean the run_dir %s to build a new SAVE folder'%run_dir)
comm_str = 'rm -r %s'%SAVE_dir
print('Executing command:', comm_str)
os.system(comm_str)
r_setup_files = os.path.join(run_dir,'r_setup')
comm_str = 'rm %s'%r_setup_files
print('Executing command:', comm_str)
os.system(comm_str)
else:
print('SAVE folder already present in %s. No operations performed.'%run_dir)
# Actions performed if the SAVE_dir is not present (or if it has been removed)
if not os.path.isdir(SAVE_dir):
comm_str = 'cd %s; %s'%(source_dir,command)
print('Executing command:', comm_str)
os.system(comm_str)
src = os.path.abspath(os.path.join(source_dir,'SAVE'))
dest = os.path.abspath(os.path.join(run_dir,'SAVE'))
if make_link: # copy (or create a symbolik link) of the SAVE folder in the run_dir
os.symlink(src,dest,target_is_directory=True)
print('Create a symlink of %s in %s'%(src,run_dir))
else:
from shutil import copytree
copytree(src,dest)
print('Create a copy of %s in %s'%(src,run_dir))
# build the r_setup
comm_str = 'cd %s;OMP_NUM_THREADS=1 yambo'%run_dir
print('Executing command:', comm_str)
os.system(comm_str)
def make_FixSymm(run_dir, polarization= 'linear', Efield1 = [1.,0.,0.], Efield2 = [0.,1.,0.],
removeTimeReversal = True, overwrite_if_found = False):
"""
Perform the fixSymm procedure to remove the symmetries broken by the electric field.
The procedure creates the FixSymm folder into run_dir and run yambo_rt into the FixSymm to generate the r_setup.
If a SAVE folder is already present in the run_dir/FixSymm path no operations are performed,
unless the ``overwrite_if_found`` parameter is True.
Args:
run_dir (:py:class:`string`) : folder with the SAVE directory
polarization (:py:class:`string`) : specifies the linear or circular polarization of the field
Efield1 (:py:class:`list`) : direction of the first electric field
Efield2 (:py:class:`list`) : direction of the second electric field. Useful for the circular polarization case
removeTimeReversal (:py:class:`bool`) : if True remove the time reversal symmetry
overwrite_if_found (:py:class:`bool`) : if True delete the SAVE folder in the run_dir/FixSymm and the
r_setup (if found) and build them again.
Note:
Although the function does not remove the content of the FixSymm folder, when 'ypp -y' is executed this folder
is erased. This fact must be considered if there are relevant data in the FixSymm
"""
from mppi import InputFiles as I, Calculators as C
fixsymm_dir = os.path.join(run_dir,'FixSymm')
SAVE_dir = os.path.join(fixsymm_dir,'SAVE')
# Evaluate if the SAVE_dir folder has to be removed if found
if os.path.isdir(SAVE_dir):
if overwrite_if_found:
print('clean the FixSymm folder %s to build a new SAVE folder'%fixsymm_dir)
comm_str = 'rm -r %s'%SAVE_dir
print('Executing command:', comm_str)
os.system(comm_str)
l_fixsyms_file = os.path.join(run_dir,'l_fixsyms')
comm_str = 'rm %s'%l_fixsyms_file
print('Executing command:', comm_str)
os.system(comm_str)
r_setup_file = os.path.join(fixsymm_dir,'r_setup')
comm_str = 'rm %s'%r_setup_file
print('Executing command:', comm_str)
os.system(comm_str)
l_Fixsymm_file = os.path.join(fixsymm_dir,'l-FixSymm_fixsyms')
comm_str = 'rm %s'%l_Fixsymm_file
print('Executing command:', comm_str)
os.system(comm_str)
r_Fixsymm_file = os.path.join(fixsymm_dir,'r-FixSymm_fixsyms')
comm_str = 'rm %s'%r_Fixsymm_file
print('Executing command:', comm_str)
os.system(comm_str)
else:
print('SAVE folder already present in %s. No operations performed.'%fixsymm_dir)
if not os.path.isdir(SAVE_dir):
print('Perform the fixSymm in the folder %s'%run_dir)
fixSymm_inp = I.YamboInput('ypp -y',folder=run_dir,filename='FixSymm.in')
if removeTimeReversal:
fixSymm_inp.removeTimeReversal()
if polarization == 'circular':
fixSymm_inp.set_ypp_extFields(Efield1=Efield1,Efield2=Efield2)
elif polarization == 'linear':
fixSymm_inp.set_ypp_extFields(Efield1=Efield1,Efield2=[0.,0.,0.])
else:
print('Specify a correct polarization for the field')
code = C.YamboCalculator(omp=1,mpi=1,executable='ypp',skip=False,verbose=False)
code.run(input=fixSymm_inp,name='FixSymm',run_dir=run_dir)
# build the real-time r_setup
command = 'cd %s; OMP_NUM_THREADS=1 yambo_rt'%fixsymm_dir
os.system(command)
def get_variable_from_db(ndb_file,var_name):
"""
Extract the value of a variable from a ndb database
Args:
ndb_file (:py:class:`string`) : the name of the database
var_name (:py:class:`string`) : name of the variable
Return:
:py:class:`numpy.ndarray` : array with the values of the variable
"""
from netCDF4 import Dataset as Ds
db = Ds(ndb_file)
var =
|
np.array(db.variables[var_name])
|
numpy.array
|
import unittest
import numpy as np
import openmdao.api as om
import numpy.testing as npt
import wisdem.floatingse.member as member
import wisdem.commonse.utilities as util
from wisdem.commonse import gravity as g
NULL = member.NULL
NHEIGHT = 6
NPTS = member.get_nfull(NHEIGHT)
myones = np.ones((NPTS,))
secones = np.ones((NPTS - 1,))
class TestInputs(unittest.TestCase):
def testDiscYAML_1Material(self):
inputs = {}
outputs = {}
discrete_inputs = {}
discrete_outputs = {}
# Test land based, 1 material
inputs["s"] = np.linspace(0, 1, 5)
inputs["layer_thickness"] = 0.25 * np.ones((1, 5))
inputs["joint1"] = np.zeros(3)
inputs["joint2"] = np.r_[np.zeros(2), 1e2]
inputs["outer_diameter_in"] = 8 * np.ones(5)
discrete_inputs["layer_materials"] = ["steel"]
discrete_inputs["ballast_materials"] = ["slurry", "slurry", "seawater"]
inputs["E_mat"] = 1e9 * np.ones((2, 3))
inputs["E_user"] = 0.0
inputs["G_mat"] = 1e8 * np.ones((2, 3))
inputs["sigma_y_mat"] = np.array([1e7, 1e7])
inputs["sigma_ult_mat"] = 1e7 * np.ones((2, 3))
inputs["wohler_exp_mat"] = np.array([1e1, 1e1])
inputs["wohler_A_mat"] = np.array([1e1, 1e1])
inputs["rho_mat"] = np.array([1e4, 1e5])
inputs["rho_water"] = 1e3
inputs["unit_cost_mat"] = np.array([1e1, 2e1])
inputs["outfitting_factor_in"] = 1.05
discrete_inputs["material_names"] = ["steel", "slurry"]
opt = {}
opt["n_height"] = [5]
opt["n_layers"] = [1]
opt["n_ballasts"] = [3]
myobj = member.DiscretizationYAML(options=opt, idx=0, n_mat=2)
myobj.compute(inputs, outputs, discrete_inputs, discrete_outputs)
myones = np.ones(4)
self.assertEqual(outputs["height"], 100.0)
npt.assert_equal(outputs["section_height"], 25.0 * myones)
npt.assert_equal(outputs["outer_diameter"], inputs["outer_diameter_in"])
npt.assert_equal(outputs["wall_thickness"], 0.25 * myones)
npt.assert_equal(outputs["E"], 1e9 * myones)
npt.assert_equal(outputs["G"], 1e8 * myones)
npt.assert_equal(outputs["sigma_y"], 1e7 * myones)
npt.assert_equal(outputs["sigma_ult"], 1e7 * myones)
npt.assert_equal(outputs["rho"], 1e4 * myones)
npt.assert_equal(outputs["unit_cost"], 1e1 * myones)
npt.assert_equal(outputs["outfitting_factor"], 1.05 * myones)
npt.assert_equal(outputs["ballast_density"], np.array([1e5, 1e5, 1e3]))
npt.assert_equal(outputs["ballast_unit_cost"], np.array([2e1, 2e1, 0.0]))
A = np.pi * (16 - 3.75 ** 2)
I = (256.0 - 3.75 ** 4) * np.pi / 4.0
npt.assert_equal(outputs["z_param"], 100 * np.linspace(0, 1, 5))
npt.assert_equal(outputs["sec_loc"], np.linspace(0, 1, 4))
# npt.assert_equal(outputs["str_tw"], np.zeros(nout))
# npt.assert_equal(outputs["tw_iner"], np.zeros(nout))
npt.assert_equal(outputs["mass_den"], 1e4 * A * myones)
npt.assert_equal(outputs["foreaft_iner"], 1e4 * I * myones)
npt.assert_equal(outputs["sideside_iner"], 1e4 * I * myones)
npt.assert_equal(outputs["foreaft_stff"], 1e9 * I * myones)
npt.assert_equal(outputs["sideside_stff"], 1e9 * I * myones)
npt.assert_equal(outputs["tor_stff"], 1e8 * 2 * I * myones)
npt.assert_equal(outputs["axial_stff"], 1e9 * A * myones)
# npt.assert_equal(outputs["cg_offst"], np.zeros(nout))
# npt.assert_equal(outputs["sc_offst"], np.zeros(nout))
# npt.assert_equal(outputs["tc_offst"], np.zeros(nout))
def testDiscYAML_2Materials(self):
inputs = {}
outputs = {}
discrete_inputs = {}
discrete_outputs = {}
# Test land based, 2 materials
inputs["s"] = np.linspace(0, 1, 5)
inputs["layer_thickness"] = np.array([[0.2, 0.2, 0.2, 0.0, 0.0], [0.0, 0.0, 0.0, 0.1, 0.1]])
inputs["joint1"] = np.zeros(3)
inputs["joint2"] = np.r_[np.zeros(2), 1e2]
inputs["outer_diameter_in"] = 8 * np.ones(5)
discrete_inputs["layer_materials"] = ["steel", "other"]
discrete_inputs["ballast_materials"] = ["slurry", "slurry", "seawater"]
inputs["E_mat"] = 1e9 * np.vstack((
|
np.ones((2, 3))
|
numpy.ones
|
#-------------------------------------------------------------------------------
# Author: <NAME> <<EMAIL>>
# Date: 13.09.2017
#-------------------------------------------------------------------------------
# This file is part of SSD-TensorFlow.
#
# SSD-TensorFlow is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SSD-TensorFlow is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SSD-Tensorflow. If not, see <http://www.gnu.org/licenses/>.
#-------------------------------------------------------------------------------
import numpy as np
from collections import defaultdict
from utils import Size, prop2abs
IMG_SIZE = Size(1000, 1000)
#-------------------------------------------------------------------------------
def APs2mAP(aps):
"""
Take a mean of APs over all classes to compute mAP
"""
num_classes = 0.
sum_ap = 0.
for _, v in aps.items():
sum_ap += v
num_classes += 1
if num_classes == 0:
return 0
return sum_ap/num_classes
class CurveAPCalculator:
#---------------------------------------------------------------------------
def __init__(self, minoverlap=0.0015):
"""
Initialize the calculator.
"""
self.minoverlap = minoverlap
self.clear()
#---------------------------------------------------------------------------
def add_detections(self, gt_boxes, boxes):
"""
Add new detections to the calculator.
:param gt_sample: ground truth sample
:param boxes: a list of (float, Box) tuples representing
detections and their confidences, the detections
must have a correctly set label
"""
sample_id = len(self.gt_boxes)
self.gt_boxes.append(gt_boxes)
for conf, box in boxes:
self.det_params[box.label].append(np.array(box.sig))
self.det_confidence[box.label].append(conf)
self.det_sample_ids[box.label].append(sample_id)
#---------------------------------------------------------------------------
def compute_aps(self):
"""
Compute the average precision per class as well as mAP.
"""
#-----------------------------------------------------------------------
# Split the ground truth samples by class and sample
#-----------------------------------------------------------------------
counts = defaultdict(lambda: 0)
gt_map = defaultdict(dict)
for sample_id, boxes in enumerate(self.gt_boxes):
boxes_by_class = defaultdict(list)
for box in boxes:
counts[box.label] += 1
boxes_by_class[box.label].append(box)
for k, v in boxes_by_class.items():
arr = np.zeros((len(v), 2, len(box.sig[0])))
match = np.zeros((len(v)), dtype=np.bool)
for i, box in enumerate(v):
arr[i] = np.array(box.sig)
gt_map[k][sample_id] = (arr, match)
#-----------------------------------------------------------------------
# Compare predictions to ground truth
#-----------------------------------------------------------------------
aps = {}
for k in gt_map:
#-------------------------------------------------------------------
# Create numpy arrays of detection parameters and sort them
# in descending order
#-------------------------------------------------------------------
params = np.array(self.det_params[k], dtype=np.float32)
confs = np.array(self.det_confidence[k], dtype=np.float32)
sample_ids = np.array(self.det_sample_ids[k], dtype=np.int)
idxs_max = np.argsort(-confs)
params = params[idxs_max]
confs = confs[idxs_max]
sample_ids = sample_ids[idxs_max]
#-------------------------------------------------------------------
# Loop over the detections and count true and false positives
#-------------------------------------------------------------------
tps = np.zeros((params.shape[0])) # true positives
fps = np.zeros((params.shape[0])) # false positives
for i in range(params.shape[0]):
sample_id = sample_ids[i]
box = params[i]
#---------------------------------------------------------------
# The image this detection comes from contains no objects of
# of this class
#---------------------------------------------------------------
if not sample_id in gt_map[k]:
fps[i] = 1
continue
#---------------------------------------------------------------
# Compute the jaccard overlap and see if it's over the threshold
#---------------------------------------------------------------
gt = gt_map[k][sample_id][0]
matched = gt_map[k][sample_id][1]
iou = curve_overlap(box, gt)
max_idx = np.argmin(iou)
if iou[max_idx] > self.minoverlap:
fps[i] = 1
continue
#---------------------------------------------------------------
# Check if the max overlap ground truth box is already matched
#---------------------------------------------------------------
if matched[max_idx]:
fps[i] = 1
continue
tps[i] = 1
matched[max_idx] = True
#-------------------------------------------------------------------
# Compute the precision, recall
#-------------------------------------------------------------------
fps = np.cumsum(fps)
tps = np.cumsum(tps)
recall = tps/counts[k]
prec = tps/(tps+fps)
ap = 0
for r_tilde in np.arange(0, 1.01, 0.01):
prec_rec = prec[recall>=r_tilde]
if len(prec_rec) > 0:
ap +=
|
np.amax(prec_rec)
|
numpy.amax
|
import os.path
import shutil
import sys
import numpy as np
'''
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
'''
import tensorflow as tf
from nets import nets
def run(opt):
################################################################################################
# Read experiment to run
################################################################################################
print(opt.name)
################################################################################################
################################################################################################
# Define training and validation datasets through Dataset API
################################################################################################
# Initialize dataset and creates TF records if they do not exist
if opt.dataset.dataset_name == 'insideness':
from data import insideness_data
dataset = insideness_data.InsidenessDataset(opt)
else:
print("Error: no valid dataset specified")
sys.stdout.flush()
# Repeatable datasets for training
train_dataset = dataset.create_dataset(augmentation=False, standarization=False, set_name='train', repeat=True)
val_dataset = dataset.create_dataset(augmentation=False, standarization=False, set_name='val', repeat=True)
test_dataset = dataset.create_dataset(augmentation=False, standarization=False, set_name='test', repeat=True)
# Hadles to switch datasets
handle = tf.placeholder(tf.string, shape=[])
iterator = tf.data.Iterator.from_string_handle(
handle, train_dataset.output_types, train_dataset.output_shapes)
train_iterator = train_dataset.make_one_shot_iterator()
val_iterator = val_dataset.make_one_shot_iterator()
test_iterator = test_dataset.make_one_shot_iterator()
################################################################################################
################################################################################################
# Declare DNN
################################################################################################
# Get data from dataset dataset
image, y_ = iterator.get_next()
# Call DNN
dropout_rate = tf.placeholder(tf.float32)
y, _, _ = nets.Crossing(image, opt, dropout_rate, len(dataset.list_labels)*dataset.num_outputs)
flat_y = tf.reshape(tensor=y, shape=[-1, opt.dataset.image_size ** 2, len(dataset.list_labels)])
flat_y_ = tf.reshape(tensor=y_, shape=[-1, opt.dataset.image_size ** 2])
flat_image = tf.reshape(tensor=tf.cast(image, tf.int64), shape=[-1, opt.dataset.image_size ** 2])
flat_output = tf.argmax(flat_y, 2)
correct_prediction = tf.equal(tf.cast(flat_output * (1 - flat_image), tf.uint8), tf.cast(flat_y_,tf.uint8))
correct_prediction = tf.cast(correct_prediction, tf.float32)
error_images = tf.reduce_min(correct_prediction, 1)
with tf.Session() as sess:
# datasets
# The `Iterator.string_handle()` method returns a tensor that can be evaluated
# and used to feed the `handle` placeholder.
training_handle = sess.run(train_iterator.string_handle())
validation_handle = sess.run(val_iterator.string_handle())
test_handle = sess.run(test_iterator.string_handle())
################################################################################################
sess.run(tf.global_variables_initializer())
insideness = {}
# TRAINING SET
print("TRAIN SET")
insideness['train_img'] = []
insideness['train_gt'] = []
#err_idx = 0
# Steps for doing one epoch
for num_iter in range(int(dataset.num_images_training / opt.hyper.batch_size) + 1):
tmp_img, tmp_gt, net_out, err_img = sess.run([image, y_, flat_output, error_images], feed_dict={handle: training_handle,
dropout_rate: 1.0})
# Use network to generate ground-truth
tmp_gt = np.reshape(net_out, [opt.hyper.batch_size, 42, 42])
'''
if np.sum(np.int8(np.sum(np.sum(np.int8(tmp_img == 1), 1), 1) == 0)) > 0:
print("EMPTY")
err_img[(np.sum(np.sum(np.int8(tmp_img == 1), 1), 1) == 0)] = 0
idx = np.asarray(list(range(len(err_img))))
idx_acc = idx[err_img == 1]
missing = np.uint(np.sum(1 - err_img))
if missing > 0:
idx_not_acc = idx_acc[np.random.randint(idx_acc.shape[0], size=missing)]
idx[err_img == 0] = idx_not_acc
tmp_img = tmp_img[idx, :, :]
tmp_gt = tmp_gt[idx, :, :]
'''
''' VISUALIZE ERRORS
mm = (err_img == 0)
plt.imshow(np.reshape(tmp_img[mm, :, :],[100,100]))
plt.show()
plt.savefig(str(err_idx),
format="pdf", dpi=1000)
plt.close()
plt.imshow(np.reshape(tmp_gt[mm, :, :],[100,100]))
plt.show()
plt.savefig("gt"+str(err_idx),
format="pdf", dpi=1000)
plt.close()
oo = np.reshape(net_out[mm, :], [100,100])
oo = (oo-np.reshape(tmp_gt[mm, :, :], [100,100]))*(1-np.reshape(tmp_img[mm, :, :],[100,100]))
plt.imshow(oo)
plt.show()
plt.savefig("net"+str(err_idx),
format="pdf", dpi=1000)
plt.close()
err_idx = err_idx + 1
'''
insideness['train_img'].append(tmp_img.astype(np.uint8))
insideness['train_gt'].append(tmp_gt.astype(np.uint8))
insideness['train_img'] = [tmp for tmp in np.concatenate(insideness['train_img'])[:int(dataset.num_images_training), :, :]]
insideness['train_gt'] = [tmp for tmp in np.concatenate(insideness['train_gt'])[:int(dataset.num_images_training), :, :]]
# VALIDATION SET
print("VALIDATION SET")
insideness['val_img'] = []
insideness['val_gt'] = []
for num_iter in range(int(dataset.num_images_val / opt.hyper.batch_size) + 1):
tmp_img, tmp_gt, net_out, err_img = sess.run([image, y_, flat_output, error_images], feed_dict={handle: validation_handle,
dropout_rate: 1.0})
# Use network to generate ground-truth
tmp_gt = np.reshape(net_out, [opt.hyper.batch_size, 42, 42])
'''
if np.sum(np.int8(np.sum(np.sum(np.int8(tmp_img == 1),1),1)==0))>0:
print("EMPTY")
err_img[(np.sum(np.sum(np.int8(tmp_img == 1), 1), 1) == 0)] = 0
idx = np.asarray(list(range(len(err_img))))
idx_acc = idx[err_img == 1]
missing = np.uint(np.sum(1 - err_img))
if missing > 0:
idx_not_acc = idx_acc[np.random.randint(idx_acc.shape[0], size=missing)]
idx[err_img == 0] = idx_not_acc
tmp_img = tmp_img[idx, :, :]
tmp_gt = tmp_gt[idx, :, :]
'''
insideness['val_img'].append(tmp_img.astype(np.uint8))
insideness['val_gt'].append(tmp_gt.astype(np.uint8))
insideness['val_img'] = [tmp for tmp in np.concatenate(insideness['val_img'])[:int(np.maximum(dataset.num_images_val, 1)), :, :]]
insideness['val_gt'] = [tmp for tmp in np.concatenate(insideness['val_gt'])[:int(np.maximum(dataset.num_images_val, 1)), :, :]]
# TEST SET
print("TEST SET")
sys.stdout.flush()
insideness['test_img'] = []
insideness['test_gt'] = []
for num_iter in range(int(dataset.num_images_test / opt.hyper.batch_size) + 1):
tmp_img, tmp_gt, net_out, err_img = sess.run([image, y_, flat_output, error_images], feed_dict={handle: test_handle,
dropout_rate: 1.0})
# Use network to generate ground-truth
tmp_gt = np.reshape(net_out, [opt.hyper.batch_size, 42, 42])
'''
if np.sum(np.int8(np.sum(np.sum(np.int8(tmp_img == 1),1),1)==0))>0:
print("EMPTY")
err_img[(np.sum(np.sum(np.int8(tmp_img == 1), 1), 1) == 0)] = 0
idx = np.asarray(list(range(len(err_img))))
idx_acc = idx[err_img == 1]
missing = np.uint(np.sum(1 - err_img))
if missing > 0:
idx_not_acc = idx_acc[np.random.randint(idx_acc.shape[0], size=missing)]
idx[err_img == 0] = idx_not_acc
tmp_img = tmp_img[idx, :, :]
tmp_gt = tmp_gt[idx, :, :]
'''
insideness['test_img'].append(tmp_img.astype(np.uint8))
insideness['test_gt'].append(tmp_gt.astype(np.uint8))
insideness['test_img'] = [tmp for tmp in
|
np.concatenate(insideness['test_img'])
|
numpy.concatenate
|
# ============================================================================
# Convolutional Neural Network for training a classifier to determine the
# complexity of a faraday spectrum.
# Written using Keras and TensorFlow by <NAME>
# https://sheabrownastro.wordpress.com/
# https://astrophysicalmachinelearning.wordpress.com/
# ============================================================================
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (7,7) # Make the figures a bit bigger
np.random.seed(11) # for reproducibility
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution1D, MaxPooling1D, GaussianNoise
from keras.utils import np_utils
from keras import backend as K
# Function to regularize the feature vector of each sample (row)
# ---------------------------------------------------------------
def regularizeData(data):
data=np.asarray(data)
reg=(data-data.mean())/data.max() #data.max(axis=1,keepdims=True)
return reg
batch_size = 5
nb_classes = 2
nb_epoch = 5
# Load some test data
X_train=np.load('x_train.npy')
y_train=
|
np.load('y_train.npy')
|
numpy.load
|
import argparse
import sys
import numpy as np
import matplotlib.pyplot as plt
from imnn_tf.utils import TFRecords
from imnn_tf.lfi import GaussianApproximation
__version__ = "0.2.0"
__author__ = "<NAME>"
class GenerateGaussianNoise():
def __init__(self, input_shape=(10,), n_params=2, n_summaries=2, n_s=1000, n_d=1000, n_d_small=100,
θ_fid=np.array([0., 1.]), δθ=np.array([0.1, 0.1]), training_seed=0,
validation_seed=1):
self.input_shape = input_shape
self.n_params = n_params
self.n_summaries = n_summaries
self.n_s = n_s
self.n_d = n_d
self.n_d_small = n_d_small
self.θ_fid = θ_fid
self.δθ = δθ
self.half_δθ = δθ / 2.
self.training_seed = training_seed
self.validation_seed = validation_seed
def get_fiducial(self, seed, data):
return data[seed]
def get_derivative(self, seed, derivative, parameter, data):
return data[seed, derivative, parameter]
def check_selection(self, size):
if size not in ["full", "all", "small"]:
print("size must be `full`, `all` or `small` describing, respectively "
"whether just `n_d=n_s` is returned, or `n_d=n_s` and `n_d_small` "
"is returned, or `n_d=n_d_small` is returned.")
sys.exit()
def check_ftype(self, ftype):
if ftype not in ["both", "numpy", "tfrecords"]:
print("size must be `both`, `numpy` or `tfrecords` describing, respectively "
"whether both `numpy` and `tfrecords` files are saved, or just either one.")
sys.exit()
def simulator(self, parameters, seed=None, simulator_args=None):
if seed is not None:
np.random.seed(seed)
if len(parameters.shape) == 1:
parameters = parameters[np.newaxis, :]
if self.n_params == 1:
parameters = np.repeat(parameters, 2, axis=1)
parameters[:, 0] = np.zeros_like(parameters[:, 0])
return np.moveaxis(
np.random.normal(
parameters[:, 0],
np.sqrt(parameters[:, 1]),
self.input_shape + (parameters.shape[0],)),
-1, 0)
def generate_data(self, size="full"):
self.check_selection(size)
details = dict(
input_shape=self.input_shape,
n_params=self.n_params,
n_summaries=self.n_summaries,
n_s=self.n_s,
n_d=self.n_d,
θ_fid=self.θ_fid,
δθ=self.δθ)
a_0 = self.simulator(
parameters=np.repeat(
self.θ_fid[np.newaxis, :],
self.n_s,
axis=0),
seed=self.training_seed,
simulator_args={"input_shape": self.input_shape})
a_1 = self.simulator(
parameters=np.repeat(
self.θ_fid[np.newaxis, :],
self.n_s,
axis=0),
seed=self.validation_seed,
simulator_args={"input_shape": self.input_shape})
b_0 = self.simulator(
parameters=np.repeat(
np.array([
self.θ_fid[0] - self.half_δθ[0],
self.θ_fid[1]])[np.newaxis, :],
self.n_d,
axis=0),
seed=self.training_seed,
simulator_args={"input_shape": self.input_shape})
b_1 = self.simulator(
parameters=np.repeat(
np.array([
self.θ_fid[0] - self.half_δθ[0],
self.θ_fid[1]])[np.newaxis, :],
self.n_d,
axis=0),
seed=self.validation_seed,
simulator_args={"input_shape": self.input_shape})
c_0 = self.simulator(
parameters=np.repeat(
np.array([
self.θ_fid[0] + self.half_δθ[0],
self.θ_fid[1]])[np.newaxis, :],
self.n_d,
axis=0),
seed=self.training_seed,
simulator_args={"input_shape": self.input_shape})
c_1 = self.simulator(
parameters=np.repeat(
np.array([
self.θ_fid[0] + self.half_δθ[0],
self.θ_fid[1]])[np.newaxis, :],
self.n_d,
axis=0),
seed=self.validation_seed,
simulator_args={"input_shape": self.input_shape})
d_0 = self.simulator(
parameters=np.repeat(
np.array([
self.θ_fid[0],
self.θ_fid[1] - self.half_δθ[1]]
)[np.newaxis, :],
self.n_d,
axis=0),
seed=self.training_seed,
simulator_args={"input_shape": self.input_shape})
d_1 = self.simulator(
parameters=np.repeat(
np.array([
self.θ_fid[0],
self.θ_fid[1] - self.half_δθ[1]]
)[np.newaxis, :],
self.n_d,
axis=0),
seed=self.validation_seed,
simulator_args={"input_shape": self.input_shape})
e_0 = self.simulator(
parameters=np.repeat(
np.array([
self.θ_fid[0],
self.θ_fid[1] + self.half_δθ[1]]
)[np.newaxis, :],
self.n_d,
axis=0),
seed=self.training_seed,
simulator_args={"input_shape": self.input_shape})
e_1 = self.simulator(
parameters=np.repeat(
np.array([
self.θ_fid[0],
self.θ_fid[1] + self.half_δθ[1]]
)[np.newaxis, :],
self.n_d,
axis=0),
seed=self.validation_seed,
simulator_args={"input_shape": self.input_shape})
f_0 = np.stack((np.stack((b_0, c_0)),
np.stack((d_0, e_0)))
).transpose(2, 1, 0, 3)
f_1 = np.stack((
|
np.stack((b_1, c_1))
|
numpy.stack
|
"""
lotka_volterra.py
----------------
Implementation to simulate a Lotka-Volterra model on a network.
author: <NAME>
Submitted as part of the 2019 NetSI Collabathon.
"""
from netrd.dynamics import BaseDynamics
import numpy as np
import networkx as nx
from numpy.random import uniform, normal
from scipy.integrate import ode
from ..utilities import unweighted
class LotkaVolterra(BaseDynamics):
"""Lotka-Volterra dynamics of species abundance."""
@unweighted
def simulate(
self,
G,
L,
init=None,
gr=None,
cap=None,
inter=None,
dt=1e-2,
stochastic=True,
pertb=None,
):
r"""Simulate time series on a network from the Lotka-Volterra model.
The Lotka-Volterra model was designed to describe dynamics of
species abundances in an ecosystem. Species :math:`i`'s abundance
change per time is :math:`\frac{d X_i}{d t} = r_i X_i \left(1 -
\frac{X_i}{K_i} + \sum_{j \neq i} W_{ij} \frac{X_j}{K_i}\right)`
where :math:`r_i` and :math:`K_i` are the growth rate and the
carrying capacity of species :math:`i` respectively, and
:math:`W_{ij}` are the relative interaction strength of species
:math:`j` on :math:`i`.
The results dictionary also stores the ground truth network as
`'ground_truth'` and the intermediate time steps as `'time_steps'`.
Parameters
----------
G (nx.Graph)
Underlying ground-truth network of simulated time series which
has :math:`N` nodes.
L (int)
Length of time series.
init (np.ndarray)
Length-:math:`N` 1D array of nodes' initial condition. If not
specified an initial condition is unifromly generated from 0 to
the nodes' carrying capacity.
gr (np.ndarray)
Length-:math:`N` 1D array of nodes' growth rate. If not
specified, default to 1 for all nodes.
cap (np.ndarray)
Length-:math:`N` 1D array of nodes' carrying capacity. If not
specified, default to 1 for all nodes.
inter (np.ndarray)
:math:`N \times N` array of interaction weights between
nodes. If not specified, default to a zero-diagonal matrix
whose [i, j] entry is :math:`\frac{sign(j - i)}{N - 1}`.
dt (float or np.ndarray)
Sizes of time steps when simulating the continuous-time
dynamics.
stochastic (bool)
Whether to simulate the stochastic or deterministic dynamics.
pertb (np.ndarray)
Length-:math:`N` 1D array of perturbation magnitude of nodes'
growth. If not specified, default to 0.01 for all nodes.
Returns
-------
TS (np.ndarray)
:math:`N \times L` array of `L` observations on :math:`N` nodes.
Notes
-----
The deterministic dynamics is simulated through the forth-order
Runge-Kutta method, and the stochastic one is simulated through
multiplicative noise with the Euler-Maruyama method.
The ground-truth network, time steps and the time series can be
found in results['ground-truth'], reuslts['time_steps'] and
results['time_series'] respectively.
"""
N = G.number_of_nodes()
adjmat = nx.to_numpy_array(G)
# Initialize the model's parameters if not specified
if gr is None:
gr = np.ones(N, dtype=float)
if cap is None:
cap = np.ones(N, dtype=float)
if inter is None:
wei = 1 / (N - 1)
full = np.full((N, N), wei, dtype=float)
inter = np.zeros((N, N), dtype=float)
inter += np.triu(full) - np.tril(full)
if stochastic and pertb is None:
pertb = 1e-2 * np.ones(N, dtype=float)
# Randomly initialize an initial condition if not speciefied
TS = np.zeros((N, L), dtype=float)
if init is None:
init = uniform(low=0, high=cap)
TS[:, 0] = init
# Define the function of dynamics
mat = np.where(adjmat == 1, inter, 0.0) + np.diag(-np.ones(N))
mat /= cap[:, np.newaxis]
def dyn(t, state):
return state * (gr + np.dot(mat, state))
# Simulate the time series
if isinstance(dt, float):
dt = dt *
|
np.ones(L - 1)
|
numpy.ones
|
import numpy as np
import multiprocessing as mp
import scipy as sp
import matplotlib.pyplot as plt
def sample(seed):
np.random.seed(seed) #critical!!!!
return np.random.uniform()
pool = mp.Pool(mp.cpu_count()-1)
seed0=100
seed1=1e6
seedN=100000
seedH= (seed1-seed0)/(seedN-1)
result=pool.map(sample, 1035*
|
np.arange(seed0,seed1+seedH/2,seedH)
|
numpy.arange
|
import numpy as np
class DAlg:
def __init__(self, K, label=""):
self.label = label
self.K = K
self.alg_time = 0
def choose_arm(self):
pass
def update(self, arm, reward):
self.alg_time += 1
def play_once(self, bandit):
arm = self.choose_arm()
reward = bandit.play_arm(arm)
self.update(arm, reward)
return arm, reward
def play_T_times(self, bandit, T):
for _ in range(T):
self.play_once(bandit)
return
def reset(self):
self.alg_time = 0
class GenericIndexAlg(DAlg):
def __init__(self, K, label=""):
super().__init__(K, label=label)
self.indices = np.ones(self.K)
self.alg_n_plays = np.zeros(self.K)
self.mean_rewards = [0 for _ in range(self.K)]
def choose_arm(self):
if self.alg_time < self.K:
return self.alg_time
return np.argmax(self.indices)
def update(self, arm, reward):
super().update(arm, reward)
self.alg_n_plays[arm] += 1
N = self.alg_n_plays[arm]
self.mean_rewards[arm] = (self.mean_rewards[arm] * (N - 1) + reward) / N
def reset(self):
super().reset()
self.indices = np.ones(self.K)
self.alg_n_plays = np.zeros(self.K)
self.mean_rewards = [0 for _ in range(self.K)]
class UCB_a(GenericIndexAlg):
r"""
UCB-anytime U_a(t) = \hat \mu_a(t) + sig * \sqrt{ 2 * \log (t) / N_a(t)}
"""
def __init__(self, K, sig=1, label=""):
super().__init__(K, label=label)
self.sig = sig
def update(self, arm, reward):
super().update(arm, reward)
if self.alg_time < self.K:
return
self.indices = self.mean_rewards + self.sig * np.sqrt(
2 * np.log(self.alg_time) / (self.alg_n_plays)
)
class MOSS_a(GenericIndexAlg):
""" MOSS-anytime"""
def __init__(self, K, sig=1, label=""):
super().__init__(K, label=label)
self.sig = sig
def update(self, arm, reward):
super().update(arm, reward)
if self.alg_time < self.K:
return
u = np.maximum(np.ones(self.K), self.alg_time / (self.alg_n_plays * self.K))
self.indices = self.mean_rewards + self.sig * np.sqrt(
2 * np.log(u) / self.alg_n_plays
)
class MOSS_f(GenericIndexAlg):
""" MOSS-horizon dependent"""
def __init__(self, K, sig=1, label="", *, T):
super().__init__(K, label=label)
self.sig = sig
self.T = T
def update(self, arm, reward):
super().update(arm, reward)
if self.alg_time < self.K:
return
u = np.maximum(np.ones(self.K), self.T / (self.alg_n_plays * self.K))
self.indices = self.mean_rewards + self.sig * np.sqrt(
2 * np.log(u) / (self.alg_n_plays)
)
class MaxUCB(UCB_a):
def __init__(self, K, sig_init=0, label=""):
super().__init__(K, sig=sig_init, label=label)
self.sig_init = sig_init
self.max_observed_reward = 0
self.min_observed_reward = 0
def update(self, arm, reward):
super().update(arm, reward)
self.max_observed_reward = max(self.max_observed_reward, reward)
self.min_observed_reward = min(self.min_observed_reward, reward)
self.sig = self.max_observed_reward - self.min_observed_reward
def reset(self):
super().reset()
self.max_observed_reward = 0
self.min_observed_reward = 0
self.sig = self.sig_init
class GenericKlUCB(GenericIndexAlg):
def __init__(self, K, label=""):
super().__init__(K, label=label)
def update(self, arm, reward):
super().update(arm, reward)
for i in range(self.K):
n, t = self.alg_n_plays[i], self.alg_time
expl = self.expl(t, n)
self.indices[i] = ucb_kl(self.mean_rewards[i], expl / n)
class klUCB(GenericKlUCB):
def expl(self, t, n):
return np.log(t)
class klUCBplusplus(GenericKlUCB):
def expl(self, t, n):
return np.log(np.maximum(1, t / (self.K * n)))
class KLUCB(GenericIndexAlg):
"Anytime version. Copied from PyMaBandits"
def __init__(self, K, label=""):
super().__init__(K, label=label)
self.obs_dict = [{1: 0} for _ in range(self.K)]
def expl(self, t, n):
return np.log(t)
def update(self, arm, reward):
super().update(arm, reward)
if reward in self.obs_dict[arm]:
self.obs_dict[arm][reward] += 1
else:
self.obs_dict[arm][reward] = 1
if self.alg_time <= self.K:
return
for i in range(self.K):
n = self.alg_n_plays[i]
expl = self.expl(self.alg_time, n) / n
self.indices[i] = self.compute_KL_index(i, expl)
def compute_KL_index(self, i, expl):
if expl == 0:
return self.mean_rewards[i]
else:
temp = np.array(list(self.obs_dict[i].values()))
p = temp / np.sum(temp)
V = np.array(list(self.obs_dict[i].keys()))
q = self._maxEV(p, V, expl)
return np.dot(q, V)
def _maxEV(self, p, V, expl):
Uq = np.zeros(np.size(p))
Kb = p > 0
K = p <= 0
if
|
np.any(K)
|
numpy.any
|
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import sys
import locale
import re
from collections import OrderedDict, Counter
from collections.abc import Iterable
from itertools import product
import numpy as np
__all__ = [
"BackoffNGramLM",
"write_arpa",
"ngram_counts_to_prob_list_mle",
"ngram_counts_to_prob_list_add_k",
"ngram_counts_to_prob_list_simple_good_turing",
"ngram_counts_to_prob_list_katz_backoff",
"ngram_counts_to_prob_list_absolute_discounting",
"ngram_counts_to_prob_list_kneser_ney",
"text_to_sents",
"sents_to_ngram_counts",
]
locale.setlocale(locale.LC_ALL, "C")
warnings.simplefilter("error", RuntimeWarning)
class BackoffNGramLM(object):
"""A backoff NGram language model, stored as a trie
This class is intended for two things: one, to prune backoff language models, and
two, to calculate the perplexity of a language model on a corpus. It is very
inefficient.
Parameters
----------
prob_list : sequence
See :mod:`pydrobert.torch.util.parse_arpa_lm`
sos : str, optional
The start-of-sequence symbol. When calculating the probability of a
sequence, :math:`P(sos) = 1` when `sos` starts the sequence. Defaults
to ``'<S>'`` if that symbol is in the vocabulary, otherwise
``'<s>'``
eos : str, optional
The end-of-sequence symbol. This symbol is expected to terminate each
sequence when calculating sequence or corpus perplexity. Defaults to
``</S>`` if that symbol is in the vocabulary, otherwise ``</s>``
unk : str, optional
The out-of-vocabulary symbol. If a unigram probability does not exist
for a token, the token is replaced with this symbol. Defaults to
``'<UNK>'`` if that symbol is in the vocabulary, otherwise
``'<unk>'``
"""
def __init__(self, prob_list, sos=None, eos=None, unk=None):
self.trie = self.TrieNode(0.0, 0.0)
self.vocab = set()
if not len(prob_list) or not len(prob_list[0]):
raise ValueError("prob_list must contain (all) unigrams")
for order, dict_ in enumerate(prob_list):
is_first = not order
is_last = order == len(prob_list) - 1
for context, value in dict_.items():
if is_first:
self.vocab.add(context)
context = (context,)
if is_last:
lprob, bo = value, 0.0
else:
lprob, bo = value
self.trie.add_child(context, lprob, bo)
if sos is None:
if "<S>" in self.vocab:
sos = "<S>"
else:
sos = "<s>"
if sos not in self.vocab:
raise ValueError(
'start-of-sequence symbol "{}" does not have unigram '
"entry.".format(sos)
)
self.sos = self.trie.sos = sos
if eos is None:
if "</S>" in self.vocab:
eos = "</S>"
else:
eos = "</s>"
if eos not in self.vocab:
raise ValueError(
'end-of-sequence symbol "{}" does not have unigram '
"entry.".format(eos)
)
self.eos = self.trie.eos = eos
if unk is None:
if "<UNK>" in self.vocab:
unk = "<UNK>"
else:
unk = "<unk>"
if unk in self.vocab:
self.unk = unk
else:
warnings.warn(
'out-of-vocabulary symbol "{}" does not have unigram count. '
"Out-of-vocabulary tokens will raise an error".format(unk)
)
self.unk = None
assert self.trie.depth == len(prob_list)
class TrieNode(object):
def __init__(self, lprob, bo):
self.lprob = lprob
self.bo = bo
self.children = OrderedDict()
self.depth = 0
self.sos = None
self.eos = None
def add_child(self, context, lprob, bo):
assert len(context)
next_, rest = context[0], context[1:]
child = self.children.setdefault(next_, type(self)(None, 0.0))
if rest:
child.add_child(rest, lprob, bo)
else:
child.lprob = lprob
child.bo = bo
self.depth = max(self.depth, child.depth + 1)
def conditional(self, context):
assert context and self.depth
context = context[-self.depth :]
cond = 0.0
while True:
assert len(context)
cur_node = self
idx = 0
while idx < len(context):
token = context[idx]
next_node = cur_node.children.get(token, None)
if next_node is None:
if idx == len(context) - 1:
cond += cur_node.bo
break
else:
cur_node = next_node
idx += 1
if idx == len(context):
return cond + cur_node.lprob
assert len(context) > 1 # all unigrams should exist
context = context[1:]
# should never get here
def log_prob(self, context, _srilm_hacks=False):
joint = 0.0
for prefix in range(2 if context[0] == self.sos else 1, len(context) + 1):
joint += self.conditional(context[:prefix])
if _srilm_hacks and context[0] == self.sos:
# this is a really silly thing that SRI does - it estimates
# the initial SOS probability with an EOS probability. Why?
# The unigram probability of an SOS is 0. However, we assume
# the sentence-initial SOS exists prior to the generation task,
# and isn't a "real" part of the vocabulary
joint += self.conditional((self.eos,))
return joint
def _gather_nodes_by_depth(self, order):
nodes = [(tuple(), self)]
nodes_by_depth = []
for _ in range(order):
last, nodes = nodes, []
nodes_by_depth.append(nodes)
for ctx, parent in last:
nodes.extend((ctx + (k,), v) for (k, v) in parent.children.items())
return nodes_by_depth
def _gather_nodes_at_depth(self, order):
nodes = [(tuple(), self)]
for _ in range(order):
last, nodes = nodes, []
for ctx, parent in last:
nodes.extend((ctx + (k,), v) for (k, v) in parent.children.items())
return nodes
def _renormalize_backoffs_for_order(self, order):
nodes = self._gather_nodes_at_depth(order)
base_10 = np.log(10)
for h, node in nodes:
if not len(node.children):
node.bo = 0.0
continue
num = 0.0
denom = 0.0
for w, child in node.children.items():
assert child.lprob is not None
num -= 10.0 ** child.lprob
denom -= 10.0 ** self.conditional(h[1:] + (w,))
# these values may be ridiculously close to 1, but still valid.
if num < -1.0:
raise ValueError(
"Too much probability mass {} on children of n-gram {}"
"".format(-num, h)
)
elif denom <= -1.0:
# We'll never back off. By convention, this is 0. (Pr(1.))
new_bo = 0.0
elif num == -1.0:
if node.bo > -10:
warnings.warn(
"Found a non-negligible backoff {} for n-gram {} "
"when no backoff mass should exist".format(node.bo, h)
)
continue
else:
new_bo = (np.log1p(num) - np.log1p(denom)) / base_10
node.bo = new_bo
def recalculate_depth(self):
max_depth = 0
stack = [(max_depth, self)]
while stack:
depth, node = stack.pop()
max_depth = max(max_depth, depth)
stack.extend((depth + 1, c) for c in node.children.values())
self.depth = max_depth
def renormalize_backoffs(self):
for order in range(1, self.depth): # final order has no backoffs
self._renormalize_backoffs_for_order(order)
def relative_entropy_pruning(self, threshold, eps=1e-8, _srilm_hacks=False):
nodes_by_depth = self._gather_nodes_by_depth(self.depth - 1)
base_10 = np.log(10)
while nodes_by_depth:
nodes = nodes_by_depth.pop() # highest order first
for h, node in nodes:
num = 0.0
denom = 0.0
logP_w_given_hprimes = [] # log P(w | h')
P_h = 10 ** self.log_prob(h, _srilm_hacks=_srilm_hacks)
for w, child in node.children.items():
assert child.lprob is not None
num -= 10.0 ** child.lprob
logP_w_given_hprime = self.conditional(h[1:] + (w,))
logP_w_given_hprimes.append(logP_w_given_hprime)
denom -= 10.0 ** logP_w_given_hprime
if num + 1 < eps or denom + 1 < eps:
warnings.warn(
"Malformed backoff weight for context {}. Leaving "
"as is".format(h)
)
continue
# alpha = (1 + num) / (1 + denom)
log_alpha = (np.log1p(num) - np.log1p(denom)) / base_10
if abs(log_alpha - node.bo) > 1e-2:
warnings.warn(
"Calculated backoff ({}) differs from stored "
"backoff ({}) for context {}"
"".format(log_alpha, node.bo, h)
)
if _srilm_hacks:
# technically these should match when well-formed, but
# re-calculating alpha allows us to re-normalize an ill-formed
# language model
log_alpha = node.bo
for idx, w in enumerate(tuple(node.children)):
child = node.children[w]
if child.bo:
continue # don't prune children with backoffs
logP_w_given_h = child.lprob
P_w_given_h = 10 ** logP_w_given_h
logP_w_given_hprime = logP_w_given_hprimes[idx]
P_w_given_hprime = 10 ** logP_w_given_hprime
new_num = num + P_w_given_h
new_denom = denom + P_w_given_hprime
log_alphaprime = np.log1p(new_num)
log_alphaprime -= np.log1p(new_denom)
log_alphaprime /= base_10
log_delta_prob = logP_w_given_hprime + log_alphaprime
log_delta_prob -= logP_w_given_h
KL = -P_h * (
P_w_given_h * log_delta_prob
+ (log_alphaprime - log_alpha) * (1.0 + num)
)
delta_perplexity = 10.0 ** KL - 1
if delta_perplexity < threshold:
node.children.pop(w)
# we don't have to set backoff properly (we'll renormalize at end).
# We just have to signal whether we can be pruned to our parents (do
# *we* have children?)
node.bo = float("nan") if len(node.children) else None
# recalculate depth in case it's changed
self.depth = -1
cur_nodes = (self,)
while cur_nodes:
self.depth += 1
next_nodes = []
for parent in cur_nodes:
next_nodes.extend(parent.children.values())
cur_nodes = next_nodes
assert self.depth >= 1
self.renormalize_backoffs()
def to_prob_list(self):
nodes_by_depth = self._gather_nodes_by_depth(self.depth)
prob_list = []
for order, nodes in enumerate(nodes_by_depth):
is_first = not order
is_last = order == self.depth - 1
dict_ = dict()
for context, node in nodes:
if is_first:
context = context[0]
if is_last:
assert not node.bo
value = node.lprob
else:
value = (node.lprob, node.bo)
dict_[context] = value
prob_list.append(dict_)
return prob_list
def prune_by_threshold(self, lprob):
for order in range(self.depth - 1, 0, -1):
for _, parent in self._gather_nodes_at_depth(order):
for w in set(parent.children):
child = parent.children[w]
if not child.children and child.lprob <= lprob:
del parent.children[w]
self.renormalize_backoffs()
self.recalculate_depth()
def prune_by_name(self, to_prune, eps_lprob):
to_prune = set(to_prune)
# we'll prune by threshold in a second pass, so no need to worry about
# parent-child stuff
extra_mass = -float("inf")
remainder = set()
stack = [((w,), c) for w, c in self.children.items()]
while stack:
ctx, node = stack.pop()
stack.extend((ctx + (w,), c) for w, c in node.children.items())
if len(ctx) == 1:
ctx = ctx[0]
if ctx in to_prune:
extra_mass = _log10sumexp(extra_mass, node.lprob)
node.lprob = eps_lprob
elif node.lprob > eps_lprob:
remainder.add(ctx)
elif ctx in to_prune:
node.lprob = eps_lprob
# we never *actually* remove unigrams - we set their probablities to roughly
# zero and redistribute the collected mass across the remainder
if not remainder:
raise ValueError("No unigrams are left unpruned!")
extra_mass -= np.log10(len(remainder))
for w in remainder:
child = self.children[w]
child.lprob = _log10sumexp(child.lprob, extra_mass)
self.prune_by_threshold(eps_lprob)
def conditional(self, context):
r"""Return the log probability of the last word in the context
`context` is a non-empty sequence of tokens ``[w_1, w_2, ..., w_N]``. This
method determines
.. math::
\log Pr(w_N | w_{N-1}, w_{N-2}, ... w_{N-C})
Where ``C`` is this model's maximum n-gram size. If an exact entry cannot be
found, the model backs off to a shorter context.
Parameters
----------
context : sequence
Returns
-------
cond : float or :obj:`None`
"""
if self.unk is None:
context = tuple(context)
else:
context = tuple(t if t in self.vocab else self.unk for t in context)
if not len(context):
raise ValueError("context must have at least one token")
return self.trie.conditional(context)
def log_prob(self, context):
r"""Return the log probability of the whole context
`context` is a non-empty sequence of tokens ``[w_1, w_2, ..., w_N]``. This
method determines
.. math::
\log Pr(w_1, w_2, ..., w_{N})
Which it decomposes according to the markov assumption (see :func:`conditional`)
Parameters
----------
context : sequence
Returns
-------
joint : float
"""
if self.unk is None:
context = tuple(context)
else:
context = tuple(t if t in self.vocab else self.unk for t in context)
if not len(context):
raise ValueError("context must have at least one token")
return self.trie.log_prob(context)
def to_prob_list(self):
return self.trie.to_prob_list()
def renormalize_backoffs(self):
r"""Ensure backoffs induce a valid probability distribution
Backoff models follow the same recursive formula for determining the probability
of the next token:
.. math::
Pr(w_n|w_1, \ldots w_{n-1}) = \begin{cases}
Entry(w_1, \ldots, w_n) &
\text{if }Entry(\ldots)\text{ exists}\\
Backoff(w_1, \ldots, w_{n-1})P(w_n|w_{n-1}, \ldots, w_2) &
\text{otherwise}
\end{cases}
Calling this method renormalizes :math:`Backoff(\ldots)` such that,
where possible, :math:`\sum_w Pr(w|\ldots) = 1`
"""
return self.trie.renormalize_backoffs()
def relative_entropy_pruning(self, threshold, _srilm_hacks=False):
r"""Prune n-grams with negligible impact on model perplexity
This method iterates through n-grams, highest order first, looking to absorb
their explicit probabilities into a backoff. The language model defines a
distribution over sequences, :math:`s \sim p(\cdot|\theta)`. Assuming this is
the true distribution of sequences, we can define an approximation of
:math:`p(\cdot)`, :math:`q(\cdot)`, as one that replaces one explicit n-gram
probability with a backoff. [stolcke2000]_ defines the relative change in model
perplexity as:
.. math::
\Delta PP = e^{D_{KL}(p\|q)} - 1
Where :math:`D_{KL}` is the KL-divergence between the two distributions. This
method will prune an n-gram whenever the change in model perplexity is
negligible (below `threshold`). More details can be found in [stolcke2000]_.
Parameters
----------
threshold : float
References
----------
.. [stolcke2000] <NAME>e "Entropy-based pruning of Backoff Language Models,"
ArXiv ePrint, 2000
"""
return self.trie.relative_entropy_pruning(threshold, _srilm_hacks=_srilm_hacks)
def sequence_perplexity(self, sequence, include_delimiters=True):
r"""Return the perplexity of the sequence using this language model
Given a `sequence` of tokens ``[w_1, w_2, ..., w_N]``, the perplexity of the
sequence is
.. math::
Pr(sequence)^{-1/N} = Pr(w_1, w_2, ..., w_N)^{-1/N}
Parameters
----------
sequence : sequence
include_delimiters : bool, optional
If :obj:`True`, the sequence will be prepended with the
start-of-sequence symbol and appended with an end-of-sequence
symbol, assuming they do not already exist as prefix and suffix of
`sequence`
Notes
-----
If the first token in `sequence` is the start-of-sequence token (or
it is added using `include_delimiters`), it will not be included in
the count ``N`` because ``Pr(sos) = 1`` always. An end-of-sequence
token is always included in ``N``.
"""
sequence = list(sequence)
if include_delimiters:
if not len(sequence) or sequence[0] != self.sos:
sequence.insert(0, self.sos)
if sequence[-1] != self.eos:
sequence.append(self.eos)
if not len(sequence):
raise ValueError(
"sequence cannot be empty when include_delimiters is False"
)
N = len(sequence)
if sequence[0] == self.sos:
N -= 1
return 10.0 ** (-self.log_prob(sequence) / N)
def corpus_perplexity(self, corpus, include_delimiters=True):
r"""Calculate the perplexity of an entire corpus using this model
A `corpus` is a sequence of sequences ``[s_1, s_2, ..., s_S]``. Each
sequence ``s_i`` is a sequence of tokens ``[w_1, w_2, ..., w_N_i]``.
Assuming sentences are independent,
.. math::
Pr(corpus) = Pr(s_1, s_2, ..., s_S) = Pr(s_1)Pr(s_2)...Pr(s_S)
We calculate the corpus perplexity as the inverse corpus probablity
normalized by the total number of tokens in the corpus. Letting
:math:`M = \sum_i^S N_i`, the corpus perplexity is
.. math::
Pr(corpus)^{-1/M}
Parameters
----------
corpus : sequence
include_delimiters : bool, optional
Whether to add start- and end-of-sequence delimiters to each
sequence (if necessary). See :func:`sequence_complexity` for more
info
"""
joint = 0.0
M = 0
for sequence in corpus:
sequence = list(sequence)
if include_delimiters:
if not len(sequence) or sequence[0] != self.sos:
sequence.insert(0, self.sos)
if sequence[-1] != self.eos:
sequence.append(self.eos)
if not len(sequence):
warnings.warn("skipping empty sequence (include_delimiters is False)")
continue
N = len(sequence)
if sequence[0] == self.sos:
N -= 1
M += N
joint += self.log_prob(sequence)
return 10.0 ** (-joint / M)
def prune_by_threshold(self, lprob):
"""Prune n-grams with a log-probability <= a threshold
This method prunes n-grams with a conditional log-probability less than or equal
to some fixed threshold. The reclaimed probability mass is sent to the
(n-1)-gram's backoff.
This method never prunes unigrams. Further, it cannot prune n-grams which are a
prefix of some higher-order n-gram that has a conditional probability above that
threshold, since the higher-order n-gram may have need of the lower-order's
backoff.
Parameters
----------
lprob : float
The base-10 log probability of conditionals, below or at which the n-gram
will be pruned.
"""
self.trie.prune_by_threshold(lprob)
def prune_by_name(self, to_prune, eps_lprob=-99.999):
"""Prune n-grams by name
This method prunes n-grams of arbitrary order by name. For n-grams of order > 1,
the reclaimed probability mass is allotted to the appropriate backoff. For
unigrams, the reclaimed probability mass is distributed uniformly across the
remaining unigrams.
This method prunes nodes by setting their probabilities a small log-probability
(`eps_lprob`), then calling :func:`prune_by_threshold` with that small
log-probability. This ensures we do not remove the backoff of higher-order
n-grams (instead setting the probability of "pruned" nodes very low), and gets
rid of lower-order nodes that were previously "pruned" but had to exist for
their backoff when their backoff is now no longer needed.
Unigrams are never fully pruned - their log probabilities are set to
`eps_lprob`.
Parameters
----------
to_prune : set
A set of all n-grams of all orders to prune.
eps_lprob : float, optional
A base 10 log probability considered negligible
"""
self.trie.prune_by_name(to_prune, eps_lprob)
def write_arpa(prob_list, out=sys.stdout):
"""Convert an lists of n-gram probabilities to arpa format
The inverse operation of :func:`pydrobert.torch.util.parse_arpa_lm`
Parameters
----------
prob_list : list of dict
out : file or str, optional
Path or file object to output to
"""
if isinstance(out, str):
with open(out, "w") as f:
return write_arpa(prob_list, f)
entries_by_order = []
for idx, dict_ in enumerate(prob_list):
entries = sorted((k, v) if idx else ((k,), v) for (k, v) in dict_.items())
entries_by_order.append(entries)
out.write("\\data\\\n")
for idx in range(len(entries_by_order)):
out.write("ngram {}={}\n".format(idx + 1, len(entries_by_order[idx])))
out.write("\n")
for idx, entries in enumerate(entries_by_order):
out.write("\\{}-grams:\n".format(idx + 1))
if idx == len(entries_by_order) - 1:
for entry in entries:
out.write("{} {}\n".format(" ".join(entry[0]), entry[1]))
else:
for entry in entries:
out.write(
"{} {} {}\n".format(entry[1][0], " ".join(entry[0]), entry[1][1])
)
out.write("\n")
out.write("\\end\\\n")
def ngram_counts_to_prob_list_mle(ngram_counts, eps_lprob=-99.999):
r"""Determine probabilities based on MLE of observed n-gram counts
For a given n-gram :math:`p, w`, where :math:`p` is a prefix, :math:`w` is the next
word, the maximum likelihood estimate of the last token given the prefix is:
.. math::
Pr(w | p) = C(p, w) / (\sum_w' C(p, w'))
Where :math:`C(x)` Is the count of the sequence :math:`x`. Many counts will be zero,
especially for large n-grams or rare words, making this a not terribly generalizable
solution.
Parameters
----------
ngram_counts : sequence
A list of dictionaries. ``ngram_counts[0]`` should correspond to unigram counts
in a corpus, ``ngram_counts[1]`` to bi-grams, etc. Keys are tuples of tokens
(n-grams) of the appropriate length, with the exception of unigrams, whose keys
are the tokens themselves. Values are the counts of those n-grams in the corpus.
eps_lprob : float, optional
A very negative value substituted as "negligible probability"
Returns
-------
prob_list : sequence
Corresponding n-gram conditional probabilities. See
:mod:`pydrobert.torch.util.parse_arpa_lm`
Examples
--------
>>> from collections import Counter
>>> text = 'a man a plan a canal panama'
>>> ngram_counts = [
>>> Counter(
>>> tuple(text[offs:offs + order]) if order > 1
>>> else text[offs:offs + order]
>>> for offs in range(len(text) - order + 1)
>>> )
>>> for order in range(1, 4)
>>> ]
>>> ngram_counts[0]['<unk>'] = 0 # add oov to vocabulary
>>> ngram_counts[0]['a']
10
>>> sum(ngram_counts[0].values())
27
>>> ngram_counts[1][('a', ' ')]
3
>>> sum(v for (k, v) in ngram_counts[1].items() if k[0] == 'a')
9
>>> prob_list = ngram_counts_to_prob_list_mle(ngram_counts)
>>> prob_list[0]['a'] # (log10(10 / 27), eps_lprob)
(-0.43136376415898736, -99.99)
>>> '<unk>' in prob_list[0] # no probability mass gets removed
False
>>> prob_list[1][('a', ' ')] # (log10(3 / 9), eps_lprob)
(-0.47712125471966244, -99.99)
Notes
-----
To be compatible with back-off models, MLE estimates assign a negligible backoff
probability (`eps_lprob`) to n-grams where necessary. This means the probability
mass might not exactly sum to one.
"""
return ngram_counts_to_prob_list_add_k(ngram_counts, eps_lprob=eps_lprob, k=0.0)
def _get_cond_mle(order, counts, vocab, k):
n_counts = dict() # C(p, w) + k
d_counts = dict() # \sum_w' C(p, w') + k|V|
for ngram in product(vocab, repeat=order + 1):
c = counts.get(ngram if order else ngram[0], 0) + k
if not c:
continue
n_counts[ngram] = c
d_counts[ngram[:-1]] = d_counts.get(ngram[:-1], 0) + c
return dict(
(ng, np.log10(num) - np.log10(d_counts[ng[:-1]]))
for ng, num in n_counts.items()
)
def ngram_counts_to_prob_list_add_k(ngram_counts, eps_lprob=-99.999, k=0.5):
r"""MLE probabilities with constant discount factor added to counts
Similar to :func:`ngram_counts_to_prob_list_mle`, but with a constant added to each
count to smooth out probabilities:
.. math::
Pr(w|p) = (C(p,w) + k)/(\sum_w' C(p, w') + k|V|)
Where :math:`p` is a prefix, :math:`w` is the next word, and :math:`V` is the
vocabulary set. The initial vocabulary set is determined from the unique unigrams
:math:`V = U`. The bigram vocabulary set is the Cartesian product :math:`V = U
\times U`, trigrams :math:`V = U \times U \times U`, and so on.
Parameters
----------
ngram_counts : sequence
A list of dictionaries. ``ngram_counts[0]`` should correspond to unigram counts
in a corpus, ``ngram_counts[1]`` to bi-grams, etc. Keys are tuples of tokens
(n-grams) of the appropriate length, with the exception of unigrams, whose keys
are the tokens themselves. Values are the counts of those n-grams in the corpus.
eps_lprob : float, optional
A very negative value substituted as "negligible probability"
Returns
-------
prob_list : sequence
Corresponding n-gram conditional probabilities. See
:mod:`pydrobert.torch.util.parse_arpa_lm`
Examples
--------
>>> from collections import Counter
>>> text = 'a man a plan a canal panama'
>>> ngram_counts = [
>>> Counter(
>>> tuple(text[offs:offs + order]) if order > 1
>>> else text[offs:offs + order]
>>> for offs in range(len(text) - order + 1)
>>> )
>>> for order in range(1, 4)
>>> ]
>>> ngram_counts[0]['<unk>'] = 0 # add oov to vocabulary
>>> ngram_counts[0]['a']
10
>>> sum(ngram_counts[0].values())
27
>>> ('a', '<unk>') not in ngram_counts[1]
True
>>> sum(v for (k, v) in ngram_counts[1].items() if k[0] == 'a')
9
>>> prob_list = ngram_counts_to_prob_list_add_k(ngram_counts, k=1)
>>> prob_list[0]['a'] # (log10((10 + 1) / (27 + 8)), eps_lprob)
(-0.5026753591920505, -99.999)
>>> # Pr('a' | '<unk>') = (C('<unk>', 'a') + k) / (C('<unk>', .) + k|V|)
>>> # = 1 / 8
>>> prob_list[1][('<unk>', 'a')] # (log10(1 / 8), eps_lprob)
(-0.9030899869919435, -99.999)
>>> # Pr('<unk>' | 'a') = (C('a', '<unk>') + k) / (C('a', .) + k|V|)
>>> # = 1 / (9 + 8)
>>> prob_list[1][('a', '<unk>')] # (log10(1 / 17), eps_lprob)
(-1.2304489213782739, -99.999)
"""
max_order = len(ngram_counts) - 1
if not len(ngram_counts):
raise ValueError("At least unigram counts must exist")
vocab = set(ngram_counts[0])
prob_list = []
for order, counts in enumerate(ngram_counts):
probs = _get_cond_mle(order, counts, vocab, k)
if not order:
for v in vocab:
probs.setdefault((v,), eps_lprob)
if order != max_order:
probs = dict((ngram, (prob, eps_lprob)) for (ngram, prob) in probs.items())
prob_list.append(probs)
prob_list[0] = dict((ngram[0], p) for (ngram, p) in prob_list[0].items())
return prob_list
def _log10sumexp(*args):
if len(args) > 1:
return _log10sumexp(args)
args = np.array(args, dtype=float, copy=False)
x = args[0]
if np.any(np.isnan(x)):
return np.nan
if np.any(np.isposinf(x)):
return np.inf
x = x[np.isfinite(x)]
if not len(x):
return 0.0
max_ = np.max(x)
return np.log10((10 ** (x - max_)).sum()) + max_
def _simple_good_turing_counts(counts, eps_lprob):
# this follows GT smoothing w/o tears section 6 pretty closely. You might
# not know what's happening otherwise
N_r = Counter(counts.values())
max_r = max(N_r.keys())
N_r = np.array(tuple(N_r.get(i, 0) for i in range(max_r + 2)))
N_r[0] = 0
r = np.arange(max_r + 2)
N = (N_r * r).sum()
log_N = np.log10(N)
nonzeros = np.where(N_r != 0)[0]
# find S(r) = a r^b
Z_rp1 = 2.0 * N_r[1:-1]
j = r[1:-1]
diff = nonzeros - j[..., None]
i = j - np.where(-diff < 1, max_r, -diff).min(1)
i[0] = 0
k = j + np.where(diff < 1, max_r, diff).min(1)
k[-1] = 2 * j[-1] - i[-1]
Z_rp1 /= k - i
y = np.log10(Z_rp1[nonzeros - 1]) # Z_rp1 does not include r=0
x = np.log10(r[nonzeros])
# regress on y = bx + a
mu_x, mu_y = x.mean(), y.mean()
num = ((x - mu_x) * (y - mu_y)).sum()
denom = ((x - mu_x) ** 2).sum()
b = num / denom if denom else 0.0
a = mu_y - b * mu_x
log_Srp1 = a + b * np.log10(r[1:])
# determine direct estimates of r* (x) as well as regressed estimates of
# r* (y). Use x until absolute difference between x and y is statistically
# significant (> 2 std dev of gauss defined by x)
log_r_star = np.empty(max_r + 1, dtype=float)
log_Nr = log_r_star[0] = np.log10(N_r[1]) if N_r[1] else eps_lprob + log_N
switched = False
C, ln_10 = np.log10(1.69), np.log(10)
for r_ in range(1, max_r + 1):
switched |= not N_r[r_]
log_rp1 = np.log10(r_ + 1)
log_y = log_rp1 + log_Srp1[r_] - log_Srp1[r_ - 1]
if not switched:
if N_r[r_ + 1]:
log_Nrp1 = np.log10(N_r[r_ + 1])
else:
log_Nrp1 = eps_lprob + log_N + log_Nr
log_x = log_rp1 + log_Nrp1 - log_Nr
if log_y > log_x:
log_abs_diff = log_y + np.log1p(-np.exp(log_x - log_y))
elif log_x < log_y:
log_abs_diff = log_x + np.log1p(-np.exp(log_y - log_x))
else:
log_abs_diff = -float("inf")
log_z = C + log_rp1 - log_Nr + 0.5 * log_Nrp1
log_z += 0.5 * np.log1p(N_r[r_ + 1] / N_r[r_]) / ln_10
if log_abs_diff <= log_z:
switched = True
else:
log_r_star[r_] = log_x
log_Nr = log_Nrp1
if switched:
log_r_star[r_] = log_y
# G&S tell us to renormalize the prob mass among the nonzero r terms. i.e.
# p[0] = r_star[0] / N
# p[i] = (1 - p[0]) r_star[i] / N'
# where N' = \sum_i>0 N_r[i] r_star[i]
# we convert back to counts so that our conditional MLEs are accurate
max_log_r_star = np.max(log_r_star[1:][nonzeros[:-1] - 1])
log_Np = np.log10((N_r[1:-1] * 10 ** (log_r_star[1:] - max_log_r_star)).sum())
log_Np += max_log_r_star
log_p_0 = log_r_star[0] - log_N
log_r_star[1:] += -log_Np + np.log10(1 - 10 ** log_p_0) + log_N
return log_r_star
def ngram_counts_to_prob_list_simple_good_turing(ngram_counts, eps_lprob=-99.999):
r"""Determine probabilities based on n-gram counts using simple good-turing
Simple Good-Turing smoothing discounts counts of n-grams according to the following
scheme:
.. math::
r^* = (r + 1) N_{r + 1} / N_r
Where :math:`r` is the original count of the n-gram in question, :math:`r^*` the
discounted, and :math:`N_r` is the count of the number of times any n-gram had a
count `r`.
When :math:`N_r` becomes sparse, it is replaced with a log-linear regression of
:math:`N_r` values, :math:`S(r) = a + b \log r`. :math:`r^*` for :math:`r > 0` are
renormalized so that :math:`\sum_r N_r r^* = \sum_r N_r r`.
We assume a closed vocabulary and that, for any order n-gram, :math:`N_0` is the
size of the set of n-grams with frequency zero. This method differs from traditional
Good-Turing, which assumes one unseen "event" (i.e. n-gram) per level. See below
notes for more details.
If, for a given order of n-gram, none of the terms have frequency zero, this
function will warn and use MLEs.
Parameters
----------
ngram_counts : sequence
A list of dictionaries. ``ngram_counts[0]`` should correspond to unigram counts
in a corpus, ``ngram_counts[1]`` to bi-grams, etc. Keys are tuples of tokens
(n-grams) of the appropriate length, with the exception of unigrams, whose keys
are the tokens themselves. Values are the counts of those n-grams in the corpus.
eps_lprob : float, optional
A very negative value substituted as "negligible probability."
Returns
-------
prob_list : sequence
Corresponding n-gram conditional probabilities. See
:mod:`pydrobert.torch.util.parse_arpa_lm`
Notes
-----
The traditional definition of Good-Turing is somewhat vague about how to assign
probability mass among unseen events. By setting :math:`r^* = N_1 / N` for :math:`r
= 0`, it's implicitly stating that :math:`N_0 = 1`, that is, there's only one
possible unseen event. This is consistent with introducing a special token, e.g.
``"<unk>"``, that does not occur in the corpus. It also collapses unseen n-grams
into one event.
We cannot bootstrap the backoff penalty to be the probability of the unseen term
because the backoff will be combined with a lower-order estimate, and Good-Turing
uses a fixed unseen probability.
As our solution, we assume the vocabulary is closed. Any term that appears zero
times is added to :math:`N_0`. If all terms appear, then :math:`N_0 = 0` and we
revert to the MLE. While you can simulate the traditional Good-Turing at the
unigram-level by introducing ``"<unk>"`` with count 0, this will not hold for
higher-order n-grams.
Warnings
--------
This function manually defines all n-grams of the target order given a vocabulary.
This means that higher-order n-grams will be very large.
Examples
--------
>>> from collections import Counter
>>> text = 'a man a plan a canal panama'
>>> ngram_counts = [
>>> Counter(
>>> tuple(text[offs:offs + order]) if order > 1
>>> else text[offs:offs + order]
>>> for offs in range(len(text) - order + 1)
>>> )
>>> for order in range(1, 4)
>>> ]
>>> ngram_counts[0]['<unk>'] = 0 # add oov to vocabulary
>>> sum(ngram_counts[0].values())
27
>>> Counter(ngram_counts[0].values())
Counter({2: 3, 10: 1, 6: 1, 4: 1, 1: 1, 0: 1})
>>> # N_1 = 1, N_2 = 3, N_3 = 1
>>> prob_list = ngram_counts_to_prob_list_simple_good_turing(ngram_counts)
>>> # Pr('<unk>') = Pr(r=0) = N_1 / N_0 / N = 1 / 27
>>> prob_list[0]['<unk>'] # (log10(1 / 27), eps_lprob)
(-1.4313637641589874, -99.999)
>>> # Pr('a'|'<unk>') = Cstar('<unk>', 'a') / (Cstar('unk', .))
>>> # = rstar[0] / (|V| * rstar[0]) = 1 / 8
>>> prob_list[1][('<unk>', 'a')] # (log10(1 / 8), eps_lprob)
(-0.9030899869919435, -99.999)
References
----------
.. [gale1995] <NAME> and <NAME>, "Good‐Turing frequency estimation without
tears," Journal of Quantitative Linguistics, vol. 2, no. 3, pp. 217-237, Jan.
1995.
"""
if len(ngram_counts) < 1:
raise ValueError("At least unigram counts must exist")
max_order = len(ngram_counts) - 1
vocab = set(ngram_counts[0])
prob_list = []
for order, counts in enumerate(ngram_counts):
N_0_vocab = set()
log_r_stars = _simple_good_turing_counts(counts, eps_lprob)
n_counts = dict()
d_counts = dict()
for ngram in product(vocab, repeat=order + 1):
r = counts.get(ngram if order else ngram[0], 0)
if r:
c = 10.0 ** log_r_stars[r]
n_counts[ngram] = c
d_counts[ngram[:-1]] = d_counts.get(ngram[:-1], 0) + c
else:
N_0_vocab.add(ngram)
N_0 = len(N_0_vocab)
if N_0:
c = (10 ** log_r_stars[0]) / N_0
for ngram in N_0_vocab:
n_counts[ngram] = c
d_counts[ngram[:-1]] = d_counts.get(ngram[:-1], 0) + c
probs = dict(
(ng, np.log10(n_counts[ng]) - np.log10(d_counts[ng[:-1]]))
for ng in n_counts
)
else:
warnings.warn(
"No {}-grams were missing. Using MLE instead" "".format(order + 1)
)
probs = _get_cond_mle(order, counts, vocab, 0)
if order != max_order:
probs = dict((ngram, (prob, eps_lprob)) for (ngram, prob) in probs.items())
prob_list.append(probs)
prob_list[0] = dict((ngram[0], p) for (ngram, p) in prob_list[0].items())
return prob_list
def _get_katz_discounted_counts(counts, k):
N_r = Counter(counts.values())
max_r = max(N_r.keys())
N_r = np.array(tuple(N_r.get(i, 0) for i in range(max_r + 2)))
N_r[0] = 1
r = np.arange(max_r + 2)
N = (N_r * r).sum()
log_N = np.log10(N)
with np.errstate(divide="ignore", invalid="ignore"):
log_Nr = np.log10(N_r)
log_rp1 = np.log10(r + 1)
log_r_star = log_rp1[:-1] + log_Nr[1:] - log_Nr[:-1]
if k + 1 < len(N_r):
log_d_rp1 = np.zeros(max_r, dtype=float)
log_num_minu = log_r_star[1 : k + 1] - log_rp1[:k]
log_subtra = np.log10(k + 1) + log_Nr[k + 1] - log_Nr[1]
if log_subtra >= 0:
raise ValueError("Your corpus is too small for this")
# np.log10((10 ** (x - max_)).sum()) + max_
log_num = log_num_minu + np.log1p(
-(10 ** (log_subtra - log_num_minu))
) / np.log(10)
log_denom = np.log1p(-(10 ** log_subtra)) / np.log(10)
log_d_rp1[:k] = log_num - log_denom
else:
log_d_rp1 = log_r_star[1:] - log_rp1[:-1]
log_r_star = np.empty(max_r + 1, dtype=float)
log_r_star[0] = log_Nr[1]
log_r_star[1:] = log_d_rp1 + log_rp1[:-2]
assert np.isclose(_log10sumexp(log_r_star + log_Nr[:-1]), log_N)
return log_r_star
def ngram_counts_to_prob_list_katz_backoff(
ngram_counts, k=7, eps_lprob=-99.999, _cmu_hacks=False
):
r"""Determine probabilities based on Katz's backoff algorithm
Kat'z backoff algorithm determines the conditional probability of the last token in
n-gram :math:`w = (w_1, w_2, ..., w_n)` as
.. math::
Pr_{BO}(w_n|w_{n-1}, w_{n-2} ..., w_1) = \begin{cases}
d_w Pr_{MLE}(w_n|w_{n-1}, w_{n-1}, ..., w_1) & \text{if }C(w) > 0
\alpha(w_1, ..., w_{n-1}) Pr_{BO}(w_n|w_{n-1}, ..., w_2)&
\text{else}
\end{cases}
Where :math:`Pr_{MLE}` is the maximum likelihood estimate (based on frequencies),
:math:`d_w` is some discount factor (based on Good-Turing for low-frequency
n-grams), and :math:`\alpha` is an allowance of the leftover probability mass from
discounting.
Parameters
----------
ngram_counts : sequence
A list of dictionaries. ``ngram_counts[0]`` should correspond to unigram counts
in a corpus, ``ngram_counts[1]`` to bi-grams, etc. Keys are tuples of tokens
(n-grams) of the appropriate length, with the exception of unigrams, whose keys
are the tokens themselves. Values are the counts of those n-grams in the corpus.
k : int, optional
`k` is a threshold such that, if :math:`C(w) > k`, no discounting will be
applied to the term. That is, the probability mass assigned for backoff will be
entirely from n-grams s.t. :math:`C(w) \leq k`.
eps_lprob : float, optional
A very negative value substituted as "negligible probability."
Warnings
--------
If the counts of the extensions of a prefix are all above `k`, no discounting will
be applied to those counts, meaning no probability mass can be assigned to unseen
events.
For example, in the Brown corpus, "Hong" is always followed by "Kong". The bigram
"Hong Kong" occurs something like 10 times, so it's not discounted. Thus
:math:`P_{BO}(Kong|Hong) = 1` :math:`P_{BO}(not Kong|Hong) = 0`.
A :class:`UserWarning` will be issued whenever ths happens. If this bothers you, you
could try increasing `k` or, better yet, abandon Katz Backoff altogether.
Returns
-------
prob_list : sequence
Corresponding n-gram conditional probabilities. See
:mod:`pydrobert.torch.util.parse_arpa_lm`
Examples
--------
>>> from nltk.corpus import brown
>>> from collections import Counter
>>> text = tuple(brown.words())[:20000]
>>> ngram_counts = [
>>> Counter(
>>> text[offs:offs + order] if order > 1
>>> else text[offs]
>>> for offs in range(len(text) - order + 1)
>>> )
>>> for order in range(1, 4)
>>> ]
>>> del text
>>> prob_list = ngram_counts_to_prob_list_katz_backoff(ngram_counts)
References
----------
.. [katz1987] <NAME>, "Estimation of probabilities from sparse data for the
language model component of a speech recognizer," IEEE Transactions on Acoustics,
Speech, and Signal Processing, vol. 35, no. 3, pp. 400-401, Mar. 1987.
"""
if len(ngram_counts) < 1:
raise ValueError("At least unigram counts must exist")
if k < 1:
raise ValueError("k too low")
prob_list = []
max_order = len(ngram_counts) - 1
probs = _get_cond_mle(0, ngram_counts[0], set(ngram_counts[0]), 0)
if 0 != max_order:
probs = dict((ngram, (prob, 0.0)) for (ngram, prob) in probs.items())
prob_list.append(probs)
log_r_stars = [
_get_katz_discounted_counts(counts, k) for counts in ngram_counts[1:]
]
if _cmu_hacks:
# A note on CMU compatibility. First, the standard non-ML estimate of
# P(w|p) = C(p, w) / C(p) instead of P(w|p) = C(p, w) / sum_w' C(p, w')
# Second, this below loop. We add one to the count of a prefix whenever
# that prefix has only one child and that child's count is greater than
# k (in increment_context.cc). This ensures there's a non-zero backoff
# to assign to unseen contexts starting with that prefix (N.B. this
# hack should be extended to the case where all children have count
# greater than k, but I don't want to reinforce this behaviour). Note
# that it is applied AFTER the MLE for unigrams, and AFTER deriving
# discounted counts.
for order in range(len(ngram_counts) - 1, 0, -1):
prefix2children = dict()
for ngram, count in ngram_counts[order].items():
prefix2children.setdefault(ngram[:-1], []).append(ngram)
for prefix, children in prefix2children.items():
if len(children) == 1 and ngram_counts[order][children[0]] > k:
for oo in range(order):
pp = prefix[: oo + 1]
if not oo:
pp = pp[0]
ngram_counts[oo][pp] += 1
for order in range(1, len(ngram_counts)):
counts = ngram_counts[order]
probs = dict()
# P_katz(w|pr) = C*(pr, w) / \sum_x C*(pr, x) if C(pr, w) > 0
# alpha(pr) Pr_katz(w|pr[1:]) else
# alpha(pr) = (1 - sum_{c(pr, w) > 0} Pr_katz(w|pr)
# / (1 - sum_{c(pr, w) > 0} Pr_katz(w|pr[1:]))
# note: \sum_w C*(pr, w) = \sum_w C(pr, w), which is why we can
# normalize by the true counts
lg_num_subtras = dict() # logsumexp(log c*(pr,w)) for c(pr,w) > 0
lg_den_subtras = dict() # logsumexp(log Pr(w|pr[1:]) for c(pr, w) > 0
lg_pref_counts = dict() # logsumexp(log c(pr)) for c(pr,w) > 0
for ngram, r in counts.items():
if not r:
continue
log_r_star = log_r_stars[order - 1][r]
probs[ngram] = log_r_star
lg_num_subtras[ngram[:-1]] = _log10sumexp(
lg_num_subtras.get(ngram[:-1], -np.inf), log_r_star
)
lg_den_subtras[ngram[:-1]] = _log10sumexp(
lg_den_subtras.get(ngram[:-1], -np.inf), prob_list[-1][ngram[1:]][0]
)
lg_pref_counts[ngram[:-1]] = _log10sumexp(
lg_pref_counts.get(ngram[:-1], -np.inf), np.log10(r)
)
for ngram in probs:
prefix = ngram[:-1]
if _cmu_hacks:
if order == 1:
prefix = prefix[0]
lg_norm = np.log10(ngram_counts[order - 1][prefix])
else:
lg_norm = lg_pref_counts[prefix]
probs[ngram] -= lg_norm
for prefix, lg_num_subtra in lg_num_subtras.items():
lg_den_subtra = lg_den_subtras[prefix]
if _cmu_hacks:
if order == 1:
lg_norm = np.log10(ngram_counts[order - 1][prefix[0]])
else:
lg_norm = np.log10(ngram_counts[order - 1][prefix])
else:
lg_norm = lg_pref_counts[prefix]
num_subtra = 10.0 ** (lg_num_subtra - lg_norm)
den_subtra = 10.0 ** lg_den_subtra
if np.isclose(den_subtra, 1.0): # 1 - den_subtra = 0
# If the denominator is zero, it means nothing we're backing
# off to has a nonzero probability. It doesn't really matter
# what we put here, but let's not warn about it (we've already
# warned about the prefix)
log_alpha = 0.0
elif np.isclose(num_subtra, 1.0):
warnings.warn(
"Cannot back off to prefix {}. Will assign negligible "
"probability. If this is an issue, try increasing k"
"".format(prefix)
)
# If the numerator is zero and the denominator is nonzero,
# this means we did not discount any probability mass for
# unseen terms. The only way to make a proper distribution is
# to set alpha to zero
log_alpha = eps_lprob
else:
log_alpha = np.log1p(-num_subtra) -
|
np.log1p(-den_subtra)
|
numpy.log1p
|
# MIT License - Copyright <NAME> and contributors
# See the LICENSE.md file included in this source code package
"""The estimator methods.
Do not import this module directly.
Use the `estimate_mi` method in the main ennemi module instead.
"""
from __future__ import annotations
import numpy as np
from scipy.spatial import cKDTree
from typing import Union
from warnings import warn
try:
import numpy.typing as npt
FloatArray = npt.NDArray[np.float64]
except:
FloatArray = "" # type: ignore
def _estimate_single_entropy(x: FloatArray, k: int = 3) -> float:
"""Estimate the differential entropy of a n-dimensional random variable.
`x` must be a 2D array with columns denoting the variable dimensions.
1D arrays are promoted to 2D correctly.
Returns the estimated entropy in nats.
The calculation is described in Kraskov et al. (2004): Estimating mutual
information. Physical Review E 69. doi:10.1103/PhysRevE.69.066138
"""
if x.ndim == 1:
x = x.reshape((x.size,1))
N, ndim = x.shape
grid = cKDTree(x, k)
# Search for the k'th neighbor of each point and store the distance
distances = grid.query(x, k=[k+1], p=np.inf)[0].flatten()
# The log(2) term is because the mean is taken over double the distances
return _psi(N) - _psi(k) + ndim * (np.mean(np.log(distances)) + np.log(2))
def _estimate_discrete_entropy(x: FloatArray, k: int = 3) -> float:
"""Estimate the discrete entropy of a n-dimensional random variable.
This is done using the mathematical definition:
entropy = -sum P(x) log(P(x)).
"""
N = x.shape[0]
_assert_not_object(x)
_, counts = np.unique(x, axis=0, return_counts=True)
probs = counts / N
return -np.sum(np.dot(probs, np.log(probs)))
def _assert_not_object(x: FloatArray) -> None:
if x.dtype.kind == "O":
# We may get 'object' data type especially from pandas (which stores strings as objects).
# We can only use np.unique with 1D arrays of objects.
# Give a more user-friendly error message instead of NumPy's.
raise TypeError("Data type 'object' is not supported." +
" Please pass only numeric, boolean, or string data." +
" If your data is in a pandas DataFrame, convert string categories" +
" to integers (pandas stores strings as objects).")
def _estimate_single_mi(x: FloatArray, y: FloatArray, k: int = 3) -> float:
"""Estimate the mutual information between two continuous variables.
Returns the estimated mutual information (in nats).
The calculation is based on Kraskov et al. (2004): Estimating mutual
information. Physical Review E 69. doi:10.1103/PhysRevE.69.066138
Parameters:
---
x, y : ndarray
The observed values.
The two arrays must have the same length.
k : int
The number of neighbors to consider. Default 3.
Must be smaller than the number of observations.
The algorithm used assumes a continuous distribution. If the data set
contains many identical observations, this method may return -inf. In that
case, add low-amplitude noise to the data and try again.
"""
N = len(x)
# Ensure that x and y are 2-dimensional
x = np.column_stack((x,))
y = np.column_stack((y,))
# We use the fastest O(N*sqrt(k)) time algorithm
# Create the 2D tree for finding the k-th neighbor and marginal 1D trees
xy = np.column_stack((x, y))
grid = cKDTree(xy)
x_grid = cKDTree(x)
y_grid = cKDTree(y)
# We have to subtract a small value from the radius
# because the algorithm expects strict inequality but cKDTree also allows equality.
# This assumes that the radius is of roughly unit magnitude.
# See https://github.com/polsys/ennemi/issues/76 for justification.
eps = grid.query(xy, k=[k+1], p=np.inf)[0].flatten()
nx = x_grid.query_ball_point(x, eps - 1e-12, p=np.inf, return_length=True)
ny = y_grid.query_ball_point(y, eps - 1e-12, p=np.inf, return_length=True)
# Calculate the estimate
return _psi(N) + _psi(k) - np.mean(_psi(nx) + _psi(ny))
def _estimate_conditional_mi(x: FloatArray, y: FloatArray, cond: FloatArray,
k: int = 3) -> float:
"""Estimate conditional mutual information between two continuous variables.
See the documentation for estimate_single_mi for usage.
The only difference is the additional continuous variable used for
conditioning.
The calculation is based on Frenzel & Pompe (2007): Partial Mutual
Information for Coupling Analysis of Multivariate Time Series.
Physical Review Letters 99. doi:10.1103/PhysRevLett.99.204101
"""
# Ensure that cond is 2-dimensional
cond = np.column_stack((cond,))
# The cKDTree class offers a lot of vectorization
# First, create N-dimensional trees for variables
xyz = np.column_stack((x, y, cond))
full_grid = cKDTree(xyz)
xz_grid = cKDTree(np.column_stack((x, cond)))
yz_grid = cKDTree(np.column_stack((y, cond)))
z_grid = cKDTree(cond)
# Find the distance to the k'th neighbor of each point
eps = full_grid.query(xyz, k=[k+1], p=np.inf)[0].flatten()
# Find the number of neighbors in marginal spaces
xz_proj = np.column_stack((x, cond))
yz_proj = np.column_stack((y, cond))
# We have to subtract a small value from the radius
# because the algorithm expects strict inequality but cKDTree also allows equality.
# This assumes that the radius is of roughly unit magnitude.
# See https://github.com/polsys/ennemi/issues/76 for justification.
nxz = xz_grid.query_ball_point(xz_proj, eps - 1e-12, p=np.inf, return_length=True)
nyz = yz_grid.query_ball_point(yz_proj, eps - 1e-12, p=np.inf, return_length=True)
nz = z_grid.query_ball_point(cond, eps - 1e-12, p=np.inf, return_length=True)
return _psi(k) - np.mean(_psi(nxz) + _psi(nyz) - _psi(nz))
def _estimate_semidiscrete_mi(x: FloatArray, y: FloatArray, k: int = 3) -> float:
"""Estimate unconditional MI between discrete y and continuous x.
The calculation is based on Ross (2014): Mutual Information between
Discrete and Continuous Data Sets. PLoS ONE 9(2):e87357.
doi:10.1371/journal.pone.0087357
The only difference to basic estimation is that the distance metric
treats different y values as being further away from each other
than the marginal distance between any two x values.
"""
N = len(x)
# Ensure that x is 2-dimensional
x = np.column_stack((x,))
# Find the unique values of y
y_values, y_counts = np.unique(y, return_counts=True)
if len(y_values) > N / 4:
warn("The discrete variable has relatively many unique values." +
" Did you pass y and x in correct order?", UserWarning)
# Create trees for each y value and for the marginal x space
grids = [cKDTree(x[y==val]) for val in y_values]
x_grid = cKDTree(x)
# For each y value:
# - Find the distance to the k'th neighbor sharing the y value
# - Find the number of neighbors within that distance in the marginal x space
# See https://github.com/polsys/ennemi/issues/76 for (eps - 1e-12) tweak.
n_full = np.empty(N)
for i, val in enumerate(y_values):
subset = x[y==val]
eps = grids[i].query(subset, k=[k+1], p=np.inf)[0].flatten()
n_full[y==val] = x_grid.query_ball_point(subset, eps - 1e-12, p=np.inf, return_length=True)
# The mean of psi(y_counts) is taken over all sample points, not y buckets
weighted_y_counts_mean = np.sum(np.dot(_psi(y_counts), y_counts / N))
return _psi(N) + _psi(k) - np.mean(_psi(n_full)) - weighted_y_counts_mean
def _estimate_conditional_semidiscrete_mi(x: FloatArray, y: FloatArray, cond: FloatArray,
k: int = 3) -> float:
"""Estimate conditional MI between discrete y and continuous x and cond.
This is an adaptation of the CMI algorithm with the
discrete-continuous distance metric.
"""
# Ensure that cond is 2-dimensional
N = len(y)
cond = np.column_stack((cond,))
# Find the unique values of y
y_values = np.unique(y)
_verify_not_continuous(y_values, N)
# First, create N-dimensional trees for variables
# The full space is partitioned according to y levels
xz = np.column_stack((x, cond))
full_grids = [cKDTree(xz[y==val]) for val in y_values]
xz_grid = cKDTree(xz)
z_grid = cKDTree(cond)
# Similarly, the YZ marginal space is partitioned between y levels
yz_grids = [cKDTree(cond[y==val]) for val in y_values]
# Find the distance to the k'th neighbor of each point
# in the y-partitioned spaces, and find the number of neighbors
# in marginal spaces.
xz_proj = np.column_stack((x, cond))
nxz = np.empty(N)
nyz = np.empty(N)
nz = np.empty(N)
for i, val in enumerate(y_values):
subset = y==val
eps = full_grids[i].query(xz[subset], k=[k+1], p=np.inf)[0].flatten()
# See https://github.com/polsys/ennemi/issues/76 for (eps - 1e-12) tweak.
nxz[subset] = xz_grid.query_ball_point(xz_proj[subset], eps - 1e-12, p=np.inf, return_length=True)
nyz[subset] = yz_grids[i].query_ball_point(cond[subset], eps - 1e-12, p=np.inf, return_length=True)
nz[subset] = z_grid.query_ball_point(cond[subset], eps - 1e-12, p=np.inf, return_length=True)
return _psi(k) - np.mean(_psi(nxz) + _psi(nyz) - _psi(nz))
def _verify_not_continuous(values: FloatArray, N: int) -> None:
if len(values) > N / 4:
warn("A discrete variable has relatively many unique values." +
" Have you set marked the discrete variables in correct order?" +
" If both X and Y are discrete, the conditioning variable cannot be continuous" +
" (this limitation can be lifted in the future).", UserWarning)
def _estimate_discrete_mi(x: FloatArray, y: FloatArray) -> float:
"""Estimate unconditional MI between two discrete variables.
The calculation proceeds by the mathematical definition:
joint probabilities are calculated and then used as weights to compute
MI = sum log(P(x,y) / (P(x) * P(y)) * P(x,y).
"""
N = len(x)
# If one variable is string and the other an integer, this converts them both to strings.
# Without this, we get into trouble searching for strings in a dictionary of integers.
data = np.column_stack((x,y))
_assert_not_object(data)
x_vals, x_counts = np.unique(data[:,0], return_counts=True)
x_dict = dict(zip(x_vals, x_counts))
y_vals, y_counts = np.unique(data[:,1], return_counts=True)
y_dict = dict(zip(y_vals, y_counts))
joint_vals, joint_counts = np.unique(data, axis=0, return_counts=True)
_verify_not_continuous(x_vals, N)
_verify_not_continuous(y_vals, N)
def sum_term(a: FloatArray) -> float:
x_weight = x_dict[a[0]]
y_weight = y_dict[a[1]]
joint_weight = int(a[2]) # This too might have been converted to a string
return joint_weight * np.log(N * joint_weight / (x_weight * y_weight))
return np.sum(np.apply_along_axis(sum_term, 1, np.column_stack((joint_vals, joint_counts)))) / N
def _estimate_conditional_discrete_mi(x: FloatArray, y: FloatArray, cond: FloatArray) -> float:
"""Estimate conditional MI between two discrete variables, with discrete condition.
The calculation proceeds by the mathematical definition:
joint probabilities are calculated and then used as weights to compute
MI = sum P(z) sum log(P(x,y|z) / (P(x|z) * P(y|z)) * P(x,y|z).
"""
N = len(x)
_assert_not_object(cond)
# Determine probabilities of the conditioning variable
cond_vals, cond_inverses, cond_counts = np.unique(cond,
axis=0, return_inverse=True, return_counts=True)
# For each condition, compute the conditional probability (given by basic MI on subset of data)
cond_probs = np.zeros(len(cond_vals))
for i in range(len(cond_vals)):
x_subset = x[cond_inverses == i]
y_subset = y[cond_inverses == i]
cond_probs[i] = cond_counts[i] * _estimate_discrete_mi(x_subset, y_subset)
# Return the weighted sum
return np.sum(cond_probs) / N
#
# Digamma
#
def _psi(x: Union[int, FloatArray]) -> FloatArray:
"""A replacement for scipy.special.psi, for non-negative integers only.
This is slightly faster than the SciPy version (not that it's a bottleneck),
and has consistent behavior for digamma(0).
"""
x = np.asarray(x)
# psi(0) = inf for SciPy compatibility
# The shape of result does not matter as inf will propagate in mean()
if np.any(x == 0):
return np.asarray(np.inf)
# Use the SciPy value for psi(1), because the expansion is not good enough
mask = (x != 1)
result = np.full(x.shape, -0.5772156649015331)
# For the rest, a good enough expansion is given by
# https://www.uv.es/~bernardo/1976AppStatist.pdf
y = np.asarray(x[mask], dtype=np.float64)
result[mask] = np.log(y) - np.power(y, -6) * (
|
np.power(y, 2)
|
numpy.power
|
#This program prints out data to *.dat files, for Fig. 6 in our paper
import numpy as np
from Zstats import DeltaP
#X-axis
n_array = np.arange(0,31,1)
#Fig 6: Discovery case [left panel]
#Set signal and background means
s=5
b=5
#Y-axis
#Calculate probabilities *DeltaP(n,m,tau,s,b)* as a function of event count *n* in the signal region, for a fixed bhat = m/tau
#For more information about DeltaP, use the Python help function *help(DeltaP)*
#tau=Infinity
temp1 = [DeltaP(n,np.inf,np.inf,s,b) for n in n_array]
#tau=3
temp2 = [DeltaP(n,b*3,3,s,b) for n in n_array]
#tau=1
temp3 = [DeltaP(n,b*1,1,s,b) for n in n_array]
#Printing data to a *.dat file
np.savetxt('fig6_disc.dat',
|
np.transpose([n_array, temp1, temp2, temp3])
|
numpy.transpose
|
import os
import tensorflow as tf
from tensorflow.python.keras.preprocessing import image
from tensorflow.python.keras.backend import resize_images
import cv2
import numpy as np
JOY_INDEX = 1
INPUT_INDEX = 1
IMG_INDEX = 1
PCT_VALIDATION = 0.2
PCT_TRAIN = 1 - PCT_VALIDATION
LABELS_PATH = 'data-png/labels/labels.npy'
IMGS_PATH = 'data-png/features/img-{}.png'
MODELS_PATH = 'models'
CPOINT_PATH = 'models/checkpoints/weights{epoch:02d}-{val_loss:.4f}.h5'
def resize(img):
return resize_images(img, 66, 200, 'channels_first')
def normalize(img):
normalized = img
return (img / 255.0) - 0.5
def h_flip_image(img):
return cv2.flip(img, 1)
def change_image_brightness_bw(img, s_low=0.2, s_high=0.75):
img = img.astype(np.float32)
s = np.random.uniform(s_low, s_high)
img[:,:] *= s
np.clip(img, 0, 255)
return img.astype(np.uint8)
def add_random_shadow(img, w_low=0.6, w_high=0.85):
cols, rows = (img.shape[0], img.shape[1])
top_y = np.random.random_sample() * rows
bottom_y = np.random.random_sample() * rows
bottom_y_right = bottom_y + np.random.random_sample() * (rows - bottom_y)
top_y_right = top_y + np.random.random_sample() * (rows - top_y)
if np.random.random_sample() <= 0.5:
bottom_y_right = bottom_y - np.random.random_sample() * (bottom_y)
top_y_right = top_y - np.random.random_sample() * (top_y)
poly = np.asarray([[[top_y, 0], [bottom_y, cols], [bottom_y_right, cols], [top_y_right, 0]]], dtype=np.int32)
mask_weight = np.random.uniform(w_low, w_high)
origin_weight = 1 - mask_weight
mask = np.copy(img).astype(np.int32)
cv2.fillPoly(mask, poly, (0, 0, 0))
return cv2.addWeighted(img.astype(np.int32), origin_weight, mask, mask_weight, 0).astype(np.uint8)
def translate_image(img, st_angle, low_x_range, high_x_range, low_y_range, high_y_range, delta_st_angle_per_px):
rows, cols = (img.shape[0], img.shape[1])
translation_x = np.random.randint(low_x_range, high_x_range)
translation_y = np.random.randint(low_y_range, high_y_range)
st_angle += translation_x * delta_st_angle_per_px
translation_matrix =
|
np.float32([[1, 0, translation_x], [0, 1, translation_y]])
|
numpy.float32
|
from collections import defaultdict
import numpy as np
from six.moves import xrange
from scipy.special import comb
import tensorflow as tf
from itertools import combinations
def init_coverage_tables(model):
model_layer_dict = defaultdict(bool)
for layer in model.layers:
if 'Flatten' in layer.name or 'Input' in layer.name:
continue
for index in range(layer.output_shape[-1]):
model_layer_dict[(layer.name, index)] = False
# init_dict(model, model_layer_dict)
return model_layer_dict
# def init_dict(model, model_layer_dict):
# for layer in model.layers:
# if 'Flatten' in layer.name or 'Input' in layer.name:
# continue
# for index in range(layer.output_shape[-1]):
# model_layer_dict[(layer.name, index)] = False
def neuron_covered(model_layer_dict):
covered_neurons = len([v for v in model_layer_dict.values() if v])
total_neurons = len(model_layer_dict)
return covered_neurons, total_neurons, covered_neurons / float(total_neurons)
def scale(intermediate_layer_output, rmax=1, rmin=0):
X_std = (intermediate_layer_output - intermediate_layer_output.min()) / (
intermediate_layer_output.max() - intermediate_layer_output.min())
X_scaled = X_std * (rmax - rmin) + rmin
return X_scaled
# def update_coverage(sess, x, input_data, model, model_layer_dict, feed_dict, threshold=0):
# layer_names = [layer.name for layer in model.layers if
# 'Flatten' not in layer.name and 'Input' not in layer.name]
# intermediate_layer_outputs = []
# dict = model.fprop(x)
# for key in model.layer_names:
# if 'Flatten' not in key and 'Input' not in key:
# tensor = dict[key]
# feed = {x: input_data}
# if feed_dict is not None:
# feed.update(feed_dict)
# v = sess.run(tensor, feed_dict=feed)
# intermediate_layer_outputs.append(v)
# del v
#
# for i, intermediate_layer_output in enumerate(intermediate_layer_outputs):
# for j in range(len(intermediate_layer_output)):
# scaled = scale(intermediate_layer_output[j])
# for num_neuron in xrange(scaled.shape[-1]):
# if np.mean(scaled[..., num_neuron]) > threshold and not model_layer_dict[(layer_names[i], num_neuron)]:
# model_layer_dict[(layer_names[i], num_neuron)] = True
# del intermediate_layer_outputs, intermediate_layer_output
# return model_layer_dict
def update_coverage(sess, x, input_data, model, model_layer_dict, feed_dict, threshold=0):
layer_names = [layer.name for layer in model.layers if
'Flatten' not in layer.name and 'Input' not in layer.name]
intermediate_layer_outputs = []
dict = model.fprop(x)
for key in model.layer_names:
if 'Flatten' not in key and 'Input' not in key:
tensor = dict[key]
feed = {x: input_data}
if feed_dict is not None:
feed.update(feed_dict)
layer_output = sess.run(tensor, feed_dict=feed)
layer_op = np.zeros((layer_output.shape[0], layer_output.shape[-1]))
for j in range(len(layer_output)):
scaled = scale(layer_output[j])
for num_neuron in xrange(scaled.shape[-1]):
layer_op[j][num_neuron] = np.mean(scaled[..., num_neuron])
intermediate_layer_outputs.append(layer_op)
del layer_output, layer_op, scaled
for i, intermediate_layer_output in enumerate(intermediate_layer_outputs):
for j in range(len(intermediate_layer_output)):
intermediate_neuron_output = intermediate_layer_output[j]
for num_neuron in xrange(len(intermediate_neuron_output)):
if intermediate_neuron_output[num_neuron] > threshold and not model_layer_dict[(layer_names[i], num_neuron)]:
model_layer_dict[(layer_names[i], num_neuron)] = True
del intermediate_layer_outputs, intermediate_layer_output, intermediate_neuron_output
return model_layer_dict
def neuron_boundary(sess, x, input_data, model, feed_dict):
boundary = []
dict = model.fprop(x)
for key in model.layer_names:
if 'Flatten' not in key and 'Input' not in key:
tensor = dict[key]
n_batches = int(np.ceil(1.0 * input_data.shape[0] / 256))
for i in range(n_batches):
start = i * 256
end = np.minimum(len(input_data), (i + 1) * 256)
feed= {x: input_data[start:end]}
if feed_dict is not None:
feed.update(feed_dict)
layer_output = sess.run(tensor, feed_dict=feed)
layer_op = np.zeros((layer_output.shape[0], layer_output.shape[-1]))
for j in range(len(layer_output)):
for num_neuron in xrange(layer_output[j].shape[-1]):
layer_op[j][num_neuron] = np.mean(layer_output[j][..., num_neuron])
layer_op = np.transpose(layer_op, (1, 0))
low = np.min(layer_op, axis=1)
high = np.max(layer_op, axis=1)
mean = np.mean(layer_op, axis=1)
std = np.std(layer_op, axis=1, ddof=1)
if i == 0:
layer = np.transpose(np.asarray([low, high, std, mean]), (1, 0))
else:
l = np.transpose(np.asarray([low, high, std, mean]), (1, 0))
n1 = start
n2 = end - start
for j in range(len(l)):
if l[j][0] < layer[j][0]:
layer[j][0] = l[j][0]
if l[j][1] > layer[j][1]:
layer[j][1] = l[j][1]
layer[j][2] = pow(((n1-1)*pow(layer[j][2],2)+(n2-1)*pow(l[j][2],2) + n1*n2*(pow(layer[j][3],2)+pow(l[j][3],2)-2*layer[j][3]*l[j][3])/(1.0*n1+n2)) / (1.0*n1+n2-1), 0.5)
boundary.append(layer[:,:3])
return boundary
# def calculate_layers(sess, x, input_data, model, feed_dict):
# layers_output = []
# dict = model.fprop(x)
# for key in model.layer_names:
# if 'Flatten' not in key and 'Input' not in key:
# tensor = dict[key]
# layer_output = []
# n_batches = int(np.ceil(1.0 * input_data.shape[0] / 32))
# for i in range(n_batches):
# start = i * 32
# end = np.minimum(len(input_data), (i + 1) * 32)
# feed = {x: input_data[start:end]}
# if feed_dict is not None:
# feed.update(feed_dict)
# v = sess.run(tensor, feed_dict=feed)
# layer_output = layer_output + v.tolist()
#
# layer_output = np.asarray(layer_output)
# layer_op = np.zeros((layer_output.shape[0], layer_output.shape[-1]))
# for j in range(len(layer_output)):
# for num_neuron in xrange(layer_output[j].shape[-1]):
# layer_op[j][num_neuron] = np.mean(layer_output[j][..., num_neuron])
#
# layer_op = np.transpose(layer_op, (1, 0)) # num_neurons * num_samples
# layers_output.append(layer_op)
#
# return np.asarray(layers_output)
# def update_multi_coverage_neuron(layers_output, k, boundary, k_coverage, boundary_coverage, std_range):
# for i in range(len(layers_output)):
# for j in range(len(layers_output[i])):
# lower_bound = boundary[i][j][0] - std_range * boundary[i][j][2]
# upper_bound = boundary[i][j][1] + std_range * boundary[i][j][2]
# for t in range(len(layers_output[i][j])):
# output = layers_output[i][j][t]
# lower = boundary[i][j][0]
# upper = boundary[i][j][1]
# if output < lower_bound:
# boundary_coverage[i][j][0] += 1
# elif output > upper_bound:
# boundary_coverage[i][j][1] += 1
# elif output >= lower and output <= upper:
# if output == lower:
# k_coverage[i][j][0] += 1
# else:
# addition = 1.0 * (upper - lower) / k
# if addition == 0.0:
# k_coverage[i][j] = np.add(np.asarray(k_coverage[i][j]), 1).tolist()
# else:
# section = int(np.ceil(1.0 * (output - lower) / addition)) - 1
# if section >= k:
# section = k - 1
# k_coverage[i][j][section] += 1
# return k_coverage, boundary_coverage
def init_coverage_metric(boundary, k_n):
size = 0
k_coverage = []
boundary_coverage = []
for i in range(len(boundary)):
k_coverage.append(np.zeros((len(boundary[i]), k_n)).astype('int').tolist())
boundary_coverage.append(np.zeros((len(boundary[i]),2)).astype('int').tolist())
size += len(boundary[i])
return k_coverage, boundary_coverage, size
def calculate_coverage_layer(layers_output, k_l, samples_num):
layer_coverage = []
for i in range(len(layers_output)):
layer_output = np.transpose(layers_output[i], (1,0)) # num_samples * num_neurons
for j in range(len(layer_output)):
layer_coverage.append(topk(layer_output[j], k_l))
layer_coverage = np.asarray(layer_coverage).reshape((layers_output.shape[0], samples_num))
del layer_output
return layer_coverage
def topk(neuron_output, k):
top =
|
np.argsort(neuron_output)
|
numpy.argsort
|
import numpy as np
from ase.neighborlist import NeighborList
from pymatgen.analysis.structure_matcher import StructureMatcher
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.core.operations import SymmOp
from pymatgen.io.ase import AseAtomsAdaptor
from .structures import get_covalent_radii_array
from .linalg import shortest_vector_index, gauss_reduce
from .map_positions import are_same_except_order
from .pointgroup import SYMPREC
def find_layers( # pylint: disable=too-many-locals,too-many-statements,too-many-branches
asecell, factor=1.1
):
"""
Obtains all subunits of a given structure by looking
at the connectivity of the bonds.
:param asecell: the bulk unit cell (in ase.Atoms format)
:param factor: the skin factor
:return: a tuple with a boolean indicating if the material is layered, a list of layers in the structure (ase format),
a list of indices of the atoms in each layer, and a rotated bulk ASE cell (with stacking axis along z).
MOREOVER, it 1) returns layers ordered by stacking index and 2) makes sure the layer is connected when
removing the PBC along the third (stacking) axis.
"""
tol = 1.0e-6
nl = NeighborList(
factor * get_covalent_radii_array(asecell),
bothways=True,
self_interaction=False,
skin=0.0,
)
nl.update(asecell)
vector1, vector2, vector3 = asecell.cell
is_layered = True
layer_structures = []
layer_indices = []
visited = []
aselayer = None
final_layered_structures = None
# Loop over atoms (idx: atom index)
for idx in range(len(asecell)): # pylint: disable=too-many-nested-blocks
# Will contain the indices of the atoms in the "current" layer
layer = []
# Check if I already visited this atom
if idx not in visited:
# Update 'layer' and 'visited'
check_neighbors(idx, nl, asecell, visited, layer)
aselayer = asecell.copy()[layer]
layer_nl = NeighborList(
factor * get_covalent_radii_array(aselayer),
bothways=True,
self_interaction=False,
skin=0.0,
)
layer_nl.update(aselayer)
# We search for the periodic images of the first atom (idx=0)
# that are connected to at least one atom of the connected layer
neigh_vec = []
for idx2 in range(len(aselayer)):
_, offsets = layer_nl.get_neighbors(idx2)
for offset in offsets:
if not all(offset == [0, 0, 0]):
neigh_vec.append(offset)
# We define the dimensionality as the rank
dim = np.linalg.matrix_rank(neigh_vec)
if dim == 2:
cell = asecell.cell
vectors = list(np.dot(neigh_vec, cell))
iv = shortest_vector_index(vectors)
vector1 = vectors.pop(iv)
iv = shortest_vector_index(vectors)
vector2 = vectors.pop(iv)
vector3 = np.cross(vector1, vector2)
while np.linalg.norm(vector3) < tol:
iv = shortest_vector_index(vectors)
vector2 = vectors.pop(iv)
vector3 = np.cross(vector1, vector2)
vector1, vector2 = gauss_reduce(vector1, vector2)
vector3 = np.cross(vector1, vector2)
aselayer = _update_and_rotate_cell(
aselayer, [vector1, vector2, vector3], [list(range(len(aselayer)))]
)
disconnected = []
for i in range(-3, 4):
for j in range(-3, 4):
for k in range(-3, 4):
vector = i * cell[0] + j * cell[1] + k * cell[2]
if np.dot(vector3, vector) > tol:
disconnected.append(vector)
iv = shortest_vector_index(disconnected)
vector3 = disconnected[iv]
layer_structures.append(aselayer)
layer_indices.append(layer)
else:
is_layered = False
if is_layered:
newcell = [vector1, vector2, vector3]
if abs(np.linalg.det(newcell) / np.linalg.det(cell) - 1.0) > 1e-3:
raise ValueError(
"An error occurred. The new cell after rotation has a different volume than the original cell"
)
rotated_asecell = _update_and_rotate_cell(asecell, newcell, layer_indices)
# Re-order layers according to their projection
# on the stacking direction
vert_direction = np.cross(rotated_asecell.cell[0], rotated_asecell.cell[1])
vert_direction /= np.linalg.norm(vert_direction)
stack_proj = [
|
np.dot(layer.positions, vert_direction)
|
numpy.dot
|
# from cvxopt import matrix, solvers
import logging
import sys
from typing import List, Tuple
import numpy as np
from scipy.optimize import linprog # type: ignore
# TODO: at some point should probably use something better than scipy, do we have a license for
# ibm's cplex solver?
def is_redundant_constraint(
halfspace: np.ndarray, halfspaces: np.ndarray, epsilon=0.0001
) -> bool:
# Let h be a halfspace constraint in the set of contraints H.
# We have a constraint c^w >= 0 we want to see if we can minimize c^T w and get it to go below 0
# if not then this constraint is satisfied by the constraints in H, if we can, then we need to
# add c back into H.
# Thus, we want to minimize c^T w subject to Hw >= 0.
# First we need to change this into the form min c^T x subject to Ax <= b.
# Our problem is equivalent to min c^T w subject to -H w <= 0.
halfspaces =
|
np.array(halfspaces)
|
numpy.array
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Created on Apr 2018
Started to make into an OpenAI gym Jun 21 2018
Cleaned version to upload following IBPSA 2019 Rome Publication April 2019
@author: <NAME>
Building environment to be use in an RL setting similar to OpenAI Gym.
User inputs settings to create the building using a resistance capacitance (RC)
thermal network. User also specifies the temperature profiles of day; the class
will 'decide' which information to supply to the agent. Noise can be added to
weather predictions for added realism.
TODO
+ Setpoints change with time -> new occupant
EXTRA
+ Debug code: import pdb; pdb.set_trace()
"""
import gym
from gym import error, spaces, utils
from gym.utils import seeding
# fn_sim : helper functions for simulating the environment
# fn_env : helper functions for modifying the environment, reading ambient temperature from weather file, applying noise, shuffling order the agent sees
# bldg_models: buldings models, 2 simple models and the general model
from gym_BuildingControls.envs import bldg_models, fn_sim, fn_env
import numpy as np
class BuildingControlsEnv(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self):
''' Define the building controls environment.'''
self.curr_episode = -1 # keep track of episode to change curriculum over time
# NOTE: Following vector to specify which training mode will be run for how many episodes; use '1' to practically skip that mode
# NOTE: Training too much on simple cases results in a unrecoverable collapse of the agent; to avoid!
# self.curriculum = [5, 5, 5, 5, 50, 500, 1000, 10000, 1e100, 1e100, 1e100, 1e100,] # episodes per perturbation mode, in order, "last mode" goes on for total episodes to run
self.curriculum = [50, 50, 50, 50, 100, 500, 1000, 10000, 1e100, 1e100, 1e100, 1e100,] # episodes per perturbation mode, in order, "last mode" goes on for total episodes to run
self.bldg = 0 # 1st-order model: direct conditionning of room air
# self.bldg = 1 # 2nd-order model: radiant slab system to condition room air node
self._get_building_def()
# Weather prediction steps to consider, number of them and how far ahead
self.weather_pred_steps = [0, 15, 30, 60, 180, 240, 480] # minutes, in ascending order
self.weather_pred_uncert_std = [0., 0.05, 0.05, 0.05, 0.05, 0.07, 0.10] # standard deviation, temp, from statistical study
self.perturbation_mode = 0
self.change_perturbations_mode()
# settings for 1st-order model, otherwise settings for other models
self.settings_mode = 0 if (self.bldg == 0) else 1
self.change_settings_mode()
# What the agent can do
self.nA = 3 # -/0/+
self.action_space = spaces.Discrete(self.nA)
# What the environment keeps track of
self.reset()
# What the agent sees
self.nS = len(self.state)
low = np.hstack((
0.,
self.minA,
-1.,
0.,
-1.*np.ones(self.timeindicator.shape[1]),
-40.*np.ones(len(self.weather_pred_steps)),
-20,
))
high = np.hstack((
45.,
self.maxA,
1.,
self.nT,
1.*np.ones(self.timeindicator.shape[1]),
50.*np.ones(len(self.weather_pred_steps)),
20,
))
self.observation_space = spaces.Box(low, high, dtype=np.float32)
# For OpenAI Baseline A2C and PPO2 frameworks
self.nenvs = self.num_envs = 1
def get_state(self):
self.state = np.hstack((
self.T[self.T_sensor_node], # room temperature, where sensor is located
self.heat_cool_s, # heating cooling state
self.comfort,
self.nT - self.t, # time left for episode
self.timeindicator[self.t,], # NOTE: length 11
np.array([
self.TK[self.t+int(self.weather_pred_steps[i]/self.timestep)] + \
np.random.normal(0,self.weather_pred_uncert_std[i]) \
for i in range(len(self.weather_pred_steps))
]).flatten(), # NOTE: length = len(weather_pred_steps)
# TODO: add building type as an input: {light, heavy, office}
self.T[self.T_sensor_node] - 22.5, # room temperature difference vs 22.5
))
return self.state
def reset(self):
# Curriculum
self.curr_episode += 1
if (self.curr_episode > self.curriculum[self.perturbation_mode]):
print("Completed lesson. Incrementing perturbation mode.")
self.perturbation_mode += 1
self.change_perturbations_mode()
self.curr_episode = 0
else:
self._get_perturbations()
self.t = 0
self.T = np.random.normal(22.5, scale=self.T_start_std, size=self.nN) # initial temperatures
self.heat_cool_s = self.heat_cool_off # start with HVAC off
self.comfort = 0
return self.get_state()
def step(self, action):
''' One step in the world.
[observation, reward, done = env.step(action)]
action: which heating/cooling setting to use
@return observation: state temperatures
@return reward: the reward associated with the next state
@return done: True if the state is terminal
'''
done = False
reward = 0.
self.comfort = 0
# If action is passed as a numpy array instead of a scalar
if isinstance(action, np.ndarray): action = action[0]
# Action space
self.heat_cool_s += (action-1) # to have [0,1,2] -> [-1,0,+1]
self.heat_cool_s = np.max((self.heat_cool_s, self.minA)) # lower action bound
self.heat_cool_s = np.min((self.heat_cool_s, self.maxA)) # upper action bound
Q_applied = self.heat_cool_levels[self.heat_cool_s]['power']
self.T = self.calculate_next_T(Q_applied) # for all thermal loads
Tr = self.T[self.T_sensor_node] # temperature at sensor location
# HVAC energy cost
reward += self.cost_factor * self.heat_cool_levels[self.heat_cool_s]['cost']
if self.comfort_penalty_scheme == 'power':
# Thermal comfort: Using an exponential penalty based on distance from comfort bounds,
# no terminal penalty at an extreme temp, smoother penalty
# from RL paper: Deep Reinforcement Learning for Optimal Control of Space Heating, Nagy Kazmi et al, eq. 4
if (Tr < self.T_low_sp): # too cold
reward += -4*1.35**(self.T_low_sp - Tr)
self.comfort = -1
elif (Tr > self.T_high_sp): # too hot
reward += -3*1.30**(Tr - self.T_high_sp)
self.comfort = 1
# else:
# reward += 0
elif self.comfort_penalty_scheme == 'linear':
if (Tr < self.T_low_sp): # too cold
reward -= (self.T_low_sp - Tr)
self.comfort = -1
elif (Tr > self.T_high_sp): # too hot
reward -= (Tr - self.T_high_sp)
self.comfort = 1
# else:
# reward += 0
elif self.comfort_penalty_scheme == 'linear with termination':
# NOTE: the following method has been depreciated in favour of a smoother penalty scheme
if (Tr < self.T_low_limit) or (Tr > self.T_high_limit):
reward += self.penalty_limit_factor*self.nT
done = True
elif (Tr < self.T_low_sp) or (Tr > self.T_high_sp):
reward += self.penalty_sp
done = False
# else:
# reward += 0
# done = False
# You're hot (1) then you're cold (-1), ok (0)
if (Tr > self.T_high_sp): self.comfort = 1
elif (Tr < self.T_low_sp): self.comfort = -1
else: self.comfort = 0
# Excessive toggling
reward += self.cost_factor * self.penalty_hvac_toggle*np.abs(action-1)
# Increment time
self.t += 1
if self.t >= (self.nT-1):
reward += self.reward_termination # Looks like we made it!
done = True
return self.get_state(), reward, done, {'T':self.T}
def render(self, mode='human', close=False):
return
# Calculate temperatures for next timesteps: T(t+1) = Q(t) * U^-1
# Q-vector: Q = Qhvac + Qin + F*TK(t+1) + C/dt*T(t)
def calculate_next_T(self, Q_applied):
Q_applied_1hot = np.eye(1,self.nN,self.heat_cool_node).flatten() * Q_applied
Q = Q_applied_1hot + self.Q[self.t] + np.dot(self.F,self.TK[self.t+1]) + np.multiply(self.C.T/self.dt, self.T).flatten()
return np.dot(Q, self.U_inv)
def change_perturbations_mode(self):
self.perturbation_loaded = False # Flag to keep track if perturbation weather file has been loaded
self._get_perturbations()
print("Perturbation mode: %s" % self.perturbation_mode)
def change_settings_mode(self):
self._get_settings()
print("Settings mode: %s" % self.settings_mode)
# Building model. For more details, please consult the "bldg_models" file.
def _get_building_def(self):
'''
building_def contains U, F, C matrices
nN: number of interior nodes
nM: number of boundary nodes
(U: how thermal nodes are connected to each other [nN x nN])
U_inv: thermal node matrix for implicit finite difference calculation [nN x nN]
F: how thermal nodes are connected to boundaries [nN x nM]
C: thermal capacitances at nodes [nN]
dt: time steps, in seconds
T_start: temperature of room nodes at the start [nN]
T_sensor_node: where the thermostat is located
heat_cool_levels: dictionary map actions to heat/cool outputs with cost penalty
heat_cool_node: where to apply the heating/cooling
'''
self.timestep = 15 # minutes
# self.timestep = 5 # minutes
self.dt = self.timestep*60. # timestep in seconds
if self.bldg == 0:
''' Typical single family house in Montreal, heating delivered to the space directly.
1st-order RC model with effective overall capactiance and resistance values. '''
print("Building: 0, 1st-order model.")
self.U_inv, self.F, self.C, self.nN, self.nM = bldg_models.mF1C1(F_in=250, C_in=12e6, dt=self.dt) # 1-node thermal network model
self.Q_solar_fraction = 0.5
self.T_sensor_node = 0 # thermostat measurement location node
self.heat_cool_node = 0 # heating/cooling applied to this thermal node
self.heat_cool_levels = {
# NOTE: must be in ascending order, no limitation on number
# 0: {'power': -5000., 'cost': -2.,}, # power in watts (-ve is cooling)
# 1: {'power': -2500., 'cost': -1.,},
0: {'power': -3000., 'cost': -2.,}, # power in watts (-ve is cooling)
1: {'power': 0., 'cost': 0.,},
2: {'power': 5000., 'cost': -2.,},
3: {'power': 10000., 'cost': -4.,},
# 3: {'power': 2500., 'cost': -1.,},
# 4: {'power': 5000., 'cost': -2.,},
# 5: {'power': 7500., 'cost': -3.,},
# 6: {'power': 10000., 'cost': -4.,},
}
elif self.bldg == 1:
''' Space where heating/cooling is applied on a thermal node not the same as the
temperature sensor node such as the case of a radiant slab-based conditionning
system. Here,
U_in: conductor between the air node and slab node;
F_in: conductor between the air node and the ambient node;
C_in: room air capacitance;
C_slab: slab capacitance; '''
print("Building: 1, 2nd-order model.")
self.U_inv, self.F, self.C, self.nN, self.nM = bldg_models.mU1F1C2(U_in=15.*185., F_in=250., C_in=2e6, C_slab=10e6, dt=self.dt) # 2-node thermal network model
self.Q_solar_fraction = 0.1
self.T_sensor_node = 0 # thermostat measurement location node (air)
self.heat_cool_node = 1 # heating/cooling applied to this thermal node (slab)
self.heat_cool_levels = {
# NOTE: must be in ascending order, no limitation on number
0: {'power': -3000., 'cost': -2.,}, # power in watts (-ve is cooling)
1: {'power': 0., 'cost': 0.,},
2: {'power': 5000., 'cost': -2.,},
3: {'power': 10000., 'cost': -4.,},
}
else:
print("Unknown model specified!")
# self.heat_cool_off = int((self.maxA-self.minA)/2) Old way
for key, val in self.heat_cool_levels.items():
if val['power'] == 0: self.heat_cool_off = key
keys = [j for i,j in enumerate(self.heat_cool_levels.keys())]
self.minA = min(keys)
self.maxA = max(keys)
# Perturbations to the model
def _get_perturbations(self):
'''
perturbations contains TK, Q matrices
nT: number of timesteps total
TK: temperatures at boundaries [nT x nM]
Q: heat input into interior nodes [nT x nN]
'''
# synthetic case 0
# fixed comfortable temp, no baseload
if (self.perturbation_mode == 0) and (not self.perturbation_loaded):
self.T_start_std = 0. # spread on initial temperatures
self.nT = 180 # total number of timesteps including t_0 initial condition, T(t+1) depends on TK(t+1)
self.timeindicator = np.zeros((self.nT, 11))
lenTK = int(self.nT + self.weather_pred_steps[-1]/self.timestep)
self.TK = fn_sim.periodic(22.5, 0., 15, 86400., self.dt, lenTK)[:,np.newaxis]
Q_solar = fn_sim.halfperiodic(0., 12., 86400., self.dt, self.nT)[:,np.newaxis]
Q_baseload = 0.
self.Q = Q_solar + Q_baseload
self.perturbation_loaded = True
print("Loaded perturbations.")
# synthetic case 1
# fixed comfortable temperature, fixed baseload = hvac setting
if (self.perturbation_mode == 1) and (not self.perturbation_loaded):
self.T_start_std = 0. # spread on initial temperatures
self.nT = 180 # total number of timesteps including t_0 initial condition, T(t+1) depends on TK(t+1)
self.timeindicator = np.zeros((self.nT, 11))
lenTK = int(self.nT + self.weather_pred_steps[-1]/self.timestep)
self.TK = fn_sim.periodic(22.5, 0., 15, 86400., self.dt, lenTK)[:,np.newaxis]
#self.TK = fn_sim.random_TK(dt, nT+6).reshape(nT+6,1)
Q_solar = fn_sim.halfperiodic(0., 12., 86400., self.dt, self.nT)[:,np.newaxis]
Q_baseload = 500.
self.Q = Q_solar + Q_baseload
self.perturbation_loaded = True
print("Loaded perturbations.")
# synthetic case 2
# temperature changes periodically, fixed baseload = hvac setting, longer time
if (self.perturbation_mode == 2) and (not self.perturbation_loaded):
self.T_start_std = 0.5 # spread on initial temperatures
self.nT = 360 # total number of timesteps including t_0 initial condition, T(t+1) depends on TK(t+1)
self.timeindicator = np.zeros((self.nT, 11))
lenTK = int(self.nT + self.weather_pred_steps[-1]/self.timestep)
self.TK = fn_sim.periodic(10., 10., 15, 86400., self.dt, lenTK)[:,np.newaxis]
#self.TK = fn_sim.random_TK(dt, nT+6).reshape(nT+6,1)
Q_solar = fn_sim.halfperiodic(0., 12., 86400., self.dt, self.nT)[:,np.newaxis] # sunny day, total heat gain through windows
Q_baseload = 500.
self.Q = Q_solar + Q_baseload
self.perturbation_loaded = True
print("Loaded perturbations.")
# synthetic case 3
# temperature changes periodically larger dT, smaller baseload + half-sine solar gains, longer time
if (self.perturbation_mode == 3) and (not self.perturbation_loaded):
self.T_start_std = 0.5 # spread on initial temperatures
self.nT = 360 # total number of timesteps including t_0 initial condition, T(t+1) depends on TK(t+1)
self.timeindicator = np.zeros((self.nT, 11))
lenTK = int(self.nT + self.weather_pred_steps[-1]/self.timestep)
# self.TK = fn_sim.periodic(20., 15., 15, 86400., self.dt, lenTK).reshape(lenTK,1)
self.TK = fn_sim.periodic(10., 15., 15, 86400., self.dt, lenTK)[:,np.newaxis]
#self.TK = fn_sim.random_TK(dt, nT+6).reshape(nT+6,1)
# Q_solar = fn_sim.halfperiodic(600., 12., 86400., self.dt, self.nT).reshape(self.nT,1) # sunny day, total heat gain through windows
Q_solar = fn_sim.halfperiodic(600., 12., 86400., self.dt, self.nT)[:,np.newaxis] # sunny day, total heat gain through windows
Q_baseload = 300.
self.Q = Q_solar + Q_baseload
self.perturbation_loaded = True
print("Loaded perturbations.")
# realistic case 0: real weather, daily, start on day 80
if (self.perturbation_mode == 4) and (not self.perturbation_loaded):
self.T_start_std = 0.5 # spread on initial temperatures
# NOTE def load_env_data(resample='1h', weather_file="CAN_ON_Ottawa.716280_CWEC.epw")
self.df_timeweather = fn_env.load_env_data(str(self.timestep)+'min', weather_file="CAN_PQ_Montreal.Intl.AP.716270_CWEC.epw") # XXX: load this once!
# self.timeweather = fn_env.return_env_data(self.df_timeweather, how=80, length='1day', extension_seconds=60*self.weather_pred_steps[-1])
self.timeweather = fn_env.return_env_data(self.df_timeweather, how=80, length_days=1, extension_seconds=60*self.weather_pred_steps[-1])
# self.nT = self.timeweather.shape[0] - 6 # NOTE: cut short depending on what is supplied as predicted to states; otherwise, we will be out of range
self.nT = int(24*60/self.timestep) # NOTE: cut short depending on what is supplied as predicted to states; otherwise, we will be out of range, 1 day
self.timeindicator = self.timeweather[:,0:11]
self.TK = self.timeweather[:,11][:,np.newaxis]
Q_solar = self.Q_solar_fraction*self.timeweather[:,12][:,np.newaxis]
Q_baseload = 250.
self.Q = Q_solar + Q_baseload
self.perturbation_loaded = True
print("Loaded perturbations.")
# realistic case 1: real weather, daily, shuffled
if self.perturbation_mode == 5:
self.T_start_std = 1.5 # 0.5 # spread on initial temperatures
# NOTE def load_env_data(resample='1h', weather_file="CAN_ON_Ottawa.716280_CWEC.epw")
if (not self.perturbation_loaded):
self.df_timeweather = fn_env.load_env_data(str(self.timestep)+'min', weather_file="CAN_PQ_Montreal.Intl.AP.716270_CWEC.epw")
self.perturbation_loaded = True
print("Loaded perturbations.")
self.timeweather = fn_env.return_env_data(self.df_timeweather, how='random', length_days=1, extension_seconds=60*self.weather_pred_steps[-1])
# self.nT = self.timeweather.shape[0] - 6 # NOTE: cut short depending on what is supplied as predicted to states; otherwise, we will be out of range
self.nT = int(24*60/self.timestep) # NOTE: cut short depending on what is supplied as predicted to states; otherwise, we will be out of range, 1 day
self.timeindicator = self.timeweather[:,0:11]
self.TK = self.timeweather[:,11][:,np.newaxis]
Q_solar = self.Q_solar_fraction*self.timeweather[:,12][:,np.newaxis]
Q_baseload = np.random.uniform(low=100.,high=800.)
self.Q = Q_solar + Q_baseload
# realistic case 1: real weather, 2 days, shuffled
if self.perturbation_mode == 6:
self.T_start_std = 2.0 # 0.5 # spread on initial temperatures
# NOTE def load_env_data(resample='1h', weather_file="CAN_ON_Ottawa.716280_CWEC.epw")
if (not self.perturbation_loaded):
self.df_timeweather = fn_env.load_env_data(str(self.timestep)+'min', weather_file="CAN_PQ_Montreal.Intl.AP.716270_CWEC.epw")
self.perturbation_loaded = True
print("Loaded perturbations.")
self.timeweather = fn_env.return_env_data(self.df_timeweather, how='random', length_days=2, extension_seconds=60*self.weather_pred_steps[-1])
# self.nT = self.timeweather.shape[0] - 6 # NOTE: cut short depending on what is supplied as predicted to states; otherwise, we will be out of range
self.nT = int(24*60*2/self.timestep) # NOTE: cut short depending on what is supplied as predicted to states; otherwise, we will be out of range, 1 day
self.timeindicator = self.timeweather[:,0:11]
self.TK = self.timeweather[:,11][:,np.newaxis]
Q_solar = self.Q_solar_fraction*self.timeweather[:,12][:,np.newaxis]
Q_baseload = np.random.uniform(low=100.,high=800.)
self.Q = Q_solar + Q_baseload
# realistic case 1: real weather, random length of days [gamma distribution, k=2, theta=1], shuffled
if self.perturbation_mode == 7:
self.T_start_std = 2.0 # 0.5 # spread on initial temperatures
# NOTE def load_env_data(resample='1h', weather_file="CAN_ON_Ottawa.716280_CWEC.epw")
if (not self.perturbation_loaded):
self.df_timeweather = fn_env.load_env_data(str(self.timestep)+'min', weather_file="CAN_PQ_Montreal.Intl.AP.716270_CWEC.epw")
self.perturbation_loaded = True
print("Loaded perturbations.")
length_days = int(np.ceil(np.random.gamma(2, 1))) + 1
self.timeweather = fn_env.return_env_data(self.df_timeweather, how='random', length_days=length_days, extension_seconds=60*self.weather_pred_steps[-1])
# self.nT = self.timeweather.shape[0] - 6 # NOTE: cut short depending on what is supplied as predicted to states; otherwise, we will be out of range
self.nT = int(24*60*length_days/self.timestep) # NOTE: cut short depending on what is supplied as predicted to states; otherwise, we will be out of range, 1 day
self.timeindicator = self.timeweather[:,0:11]
self.TK = self.timeweather[:,11][:,np.newaxis]
Q_solar = self.Q_solar_fraction*self.timeweather[:,12][:,np.newaxis]
Q_baseload = np.random.uniform(low=100.,high=800.)
self.Q = Q_solar + Q_baseload
# realistic case 2: real weather, weekly, shuffled
if self.perturbation_mode == 8:
self.T_start_std = 2.0 # 0.5 # spread on initial temperatures
# NOTE def load_env_data(resample='1h', weather_file="CAN_ON_Ottawa.716280_CWEC.epw")
if (not self.perturbation_loaded):
self.df_timeweather = fn_env.load_env_data(str(self.timestep)+'min', weather_file="CAN_PQ_Montreal.Intl.AP.716270_CWEC.epw")
self.perturbation_loaded = True
print("Loaded perturbations.")
self.timeweather = fn_env.return_env_data(self.df_timeweather, how='random', length_days=7, extension_seconds=60*self.weather_pred_steps[-1])
# fix the following to be longer by timestep (or just a day more?) and cut
self.nT = int((24*60*7)/self.timestep)
# self.nT = int((24*60*7-self.weather_pred_steps[-1])/self.timestep) # NOTE: cut short depending on what is supplied as predicted to states; otherwise, we will be out of range, 1 week minus predicted weather
self.timeindicator = self.timeweather[:,0:11]
self.TK = self.timeweather[:,11][:,np.newaxis]
Q_solar = self.Q_solar_fraction*self.timeweather[:,12][:,np.newaxis]
Q_baseload = np.random.uniform(low=100.,high=800.)
self.Q = Q_solar + Q_baseload
# realistic case 3: real weather, different than previous, weekly, shuffled
if self.perturbation_mode == 9:
self.T_start_std = 0.5 # spread on initial temperatures
# NOTE def load_env_data(resample='1h', weather_file="CAN_ON_Ottawa.716280_CWEC.epw")
if (not self.perturbation_loaded):
self.df_timeweather = fn_env.load_env_data(str(self.timestep)+'min', weather_file="CAN_ON_Toronto.716240_CWEC.epw")
self.perturbation_loaded = True
print("Loaded perturbations.")
self.timeweather = fn_env.return_env_data(self.df_timeweather, how='random', length_days=7, extension_seconds=60*self.weather_pred_steps[-1])
self.nT = int((24*60*7)/self.timestep)
self.timeindicator = self.timeweather[:,0:11]
self.TK = self.timeweather[:,11][:,np.newaxis]
Q_solar = self.Q_solar_fraction*self.timeweather[:,12][:,np.newaxis]
Q_baseload = np.random.uniform(low=100.,high=800.)
self.Q = Q_solar + Q_baseload
# test case: Montreal winter, real weather, weekly
if (self.perturbation_mode == 10) and (not self.perturbation_loaded):
self.T_start_std = 0.5 # spread on initial temperatures
# NOTE def load_env_data(resample='1h', weather_file="CAN_ON_Ottawa.716280_CWEC.epw")
self.df_timeweather = fn_env.load_env_data(str(self.timestep)+'min', weather_file="CAN_PQ_Montreal.Intl.AP.716270_CWEC.epw") # XXX: load this once!
self.timeweather = fn_env.return_env_data(self.df_timeweather, how=2, length_days=7, extension_seconds=60*self.weather_pred_steps[-1])
self.nT = int((24*60*7)/self.timestep)
self.timeindicator = self.timeweather[:,0:11]
self.TK = self.timeweather[:,11][:,np.newaxis]
Q_solar = self.Q_solar_fraction*self.timeweather[:,12][:,np.newaxis]
Q_baseload =
|
np.random.uniform(low=100.,high=800.)
|
numpy.random.uniform
|
#---------------------------------------------------------------
import os, sys
sys.path.insert(0, os.getcwd()) # enables $ python examples/[EXAMPLE].py
#---------------------------------------------------------------
import unittest
import logging
import os
import numpy as np
from scipy.signal import convolve2d, correlate2d
import torch
import torch.nn as nn
import torch.nn.functional as F
from math import log
from torch.autograd import Variable
from torch.utils.data.dataset import TensorDataset
from torch.utils.data import DataLoader
from torchvision import transforms
import ummon.utils as uu
from ummon import *
# set fixed seed for reproducible results
torch.manual_seed(4)
np.random.seed(4)
def sigmoid(z):
z = np.clip(z, log(np.finfo(np.float64).tiny), log(np.finfo(np.float64).max) - 1.0)
return 1.0/(1.0+np.exp(-z))
class TestUmmon(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestUmmon, self).__init__(*args, **kwargs)
# BACKUP files
backup_dir = "__backup__"
files = os.listdir(".")
dir = "."
for file in files:
if file.endswith(Trainingstate().extension) or file.endswith(".log"):
if not os.path.exists(backup_dir):
os.makedirs(backup_dir)
os.rename(os.path.join(dir,file), os.path.join(backup_dir,file))
# test fully connected layer
def test_predict(self):
print('\n')
# create net
cnet = Sequential(
('line0', Linear([5], 7, 'xavier_uniform_', 0.001)),
('sigm0', nn.Sigmoid())
)
print(cnet)
# check weight matrix
w = cnet.line0.w
b = cnet.line0.b
print('Weight matrix:')
print(w)
print('bias:')
print(b)
# predict
x0 = np.random.randn(1,5).astype('float32')
x1 = Variable(torch.FloatTensor(x0), requires_grad=False)
y1 = cnet(x1)
y1 = y1.data.numpy()
print('Predictions:')
print(y1)
assert y1.shape[1] == 7
# check
x0 = x0.reshape((5, 1))
y2 = sigmoid(np.dot(w, x0) + b.reshape((7, 1)))
print('Reference predictions:')
print(y2.transpose())
assert np.allclose(y1, y2.transpose(), 0, 1e-5)
# test loss
def test_loss(self):
print('\n')
# test data
x0 = np.random.rand(6,5).astype('float32')
x1 = Variable(torch.FloatTensor(x0), requires_grad=False)
y0 = np.zeros((6,5), dtype=np.float32)
y0[:,2] = np.ones(6, dtype=np.float32) # log likelihood works only for one hot coded outputs
y1 = Variable(torch.FloatTensor(y0), requires_grad=False)
# test MSE loss
loss = nn.MSELoss(reduction='sum')
mse = loss(x1, y1).data.numpy()
print('MSE: ', mse)
mse_true = ((x0 - y0)**2).sum() # pyTorch divides by n_dim instead of 2
print('True MSE:', mse_true)
assert np.allclose(mse, mse_true, 0, 1e-3)
# test log likelihood loss function
y1 = Variable(torch.LongTensor(2*y0[:,2]), requires_grad=False) # pytorch takes class index, not one-hot coding
loss = nn.NLLLoss(reduction='sum')
LL = loss(x1, y1).data.numpy()
print('LL: ', LL)
# should be LL_true = (-y0*np.nan_to_num(np.log(x0))).sum(axis=1).mean(), but pytorch expects x1 to be already log'd by Log Softmax
LL_true = (-y0*x0).sum()
print('True LL:', LL_true)
assert np.allclose(LL, LL_true, 0, 1e-3)
# test pytorch cross entropy loss function (=logsoftmax + NLL)
loss = nn.CrossEntropyLoss(reduction='sum')
ce = loss(x1, y1).data.numpy()
print('CE: ', ce)
# pytorch CE is combination of log softmax and log likelihood
ce_true = (-x0[:,2] + np.log(np.exp(x0).sum(axis=1))).sum()
print('True CE: ', ce_true)
assert np.allclose(ce, ce_true, 0, 1e-3)
# test binary cross entropy
loss = nn.BCELoss(reduction='sum')
y1 = Variable(torch.FloatTensor(y0), requires_grad=False)
bce = loss(x1, y1).data.numpy()
print('BCE: ', bce)
bce_true = (np.nan_to_num(-y0*np.log(x0)-(1-y0)*np.log(1-x0))).sum()
print('True BCE:', bce_true) # pytorch takes mean across dimensions instead of sum
assert np.allclose(bce, bce_true, 0, 1e-3)
# test pytorch combined sigmoid and bce
loss = nn.BCEWithLogitsLoss(reduction='sum')
bce = loss(x1, y1).data.numpy()
print('BCEL: ', bce)
bce_true = (np.nan_to_num(-y0*np.log(sigmoid(x0))-(1-y0)*np.log(1-sigmoid(x0)))).sum()
print('TrueBCEL:', bce_true)
assert np.allclose(bce, bce_true, 0, 1e-3)
# test embedding hinge loss function of pytorch (this is not the true Hinge loss!)
x0 = np.random.rand(6,1).astype('float32')
x1 = Variable(torch.FloatTensor(x0), requires_grad=False)
y0 = np.ones(((6,1)), dtype=np.float32)
y0[3:] = -1
y1 = Variable(torch.FloatTensor(y0), requires_grad=False)
loss = nn.HingeEmbeddingLoss()
hinge = loss(x1, y1).data.numpy()
print('HingeE: ', hinge)
hinge_true = (x0[:3].sum() + np.maximum(0, 1 - x0[3:]).sum())/6.0
print('TrueHinE:', hinge_true)
assert np.allclose(hinge, hinge_true, 0, 1e-3)
# test true standard hinge loss
loss = nn.MarginRankingLoss(reduction='sum', margin=1.0)
dummy = torch.FloatTensor(6,1).zero_()
# dummy variable must have same size as x1, but must be 0
hinge = loss(x1, dummy, y1).data.numpy()
print('Hinge: ', hinge)
hinge_true = (np.maximum(0, 1 - x0*y0)).sum()
print('True Hin:', hinge_true)
assert np.allclose(hinge, hinge_true, 0, 1e-3)
# check gradient
def test_gradient(self):
def L(w, b, x, y, lossfunc, act, tck=None):
# linear layer
x1 = x.transpose()
y1 = (np.dot(w, x1) + b.reshape((5, 1))).transpose()
# activation function
if act == 'sigmoid':
y2 = sigmoid(y1)
elif act == 'logsoftmax':
y2 = np.exp(y1)
denom = np.tile(y2.sum(axis=1).reshape((4,1)), (1,5))
y2 = np.log(y2/denom)
elif act == 'bspline':
y2 = sp.splev(y1, tck)
# loss
if lossfunc == 'mse':
lo = ((y - y2)**2).sum()
elif lossfunc == 'cross_entropy': # binary cross entropy
lo = (np.nan_to_num(-y*np.log(y2)-(1-y)*np.log(1-y2))).sum()
elif lossfunc == 'log_likelihood': # log likelihood
lo = (-y*y2).sum()
return lo
def check_grad_activation(lossfunc, act):
print('\n')
print("Loss function: {}, Activation: {}".format(lossfunc, act))
batch = 4
eta = 0.5
# activation
if act == 'sigmoid':
nonl = nn.Sigmoid()
elif act == 'logsoftmax':
nonl = nn.LogSoftmax(dim=1)
# net
cnet = Sequential(
('line0', Linear([3], 5, 'xavier_normal_')),
('nonl0', nonl)
)
print(cnet)
# loss
if lossfunc == 'mse':
loss = nn.MSELoss(reduction='sum')
elif lossfunc == 'cross_entropy':
loss = nn.BCELoss(reduction='sum')
elif lossfunc == 'log_likelihood':
loss = nn.NLLLoss(reduction='sum')
# get weights
w0 = cnet.line0.w
b0 = cnet.line0.b
# training data
x0 = 0.1*
|
np.random.rand(batch, 3)
|
numpy.random.rand
|
import argparse
import ast
import os
import sys
import dill
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import pandas as pd
import numpy as np
from my_utils import utils
from models import idc_models
import my_optimizers
import datasets.get_dataset as get_dataset
import possible_defenses
plt.switch_backend('agg')
parser = argparse.ArgumentParser(description='ResNets for BreastCancer subtype classification(IDC) in pytorch')
# dataset paras
parser.add_argument('--path-dataset', help='path_dataset',
type=str, default='D:/Datasets/BC_IDC')
# saving path paras
parser.add_argument('--save-dir', dest='save_dir',
help='The directory used to save the trained models and csv files',
default='D:/MyCodes/label_inference_attacks_against_vfl/saved_experiment_results', type=str)
# framework paras
parser.add_argument('--party-num', help='party-num',
type=int, default=2)
parser.add_argument('--overlap',
help='whether the attacker uses more features',
type=ast.literal_eval, default=False)
parser.add_argument('--use-top-model',
help='whether the vfl framework has a top model. If set to False, the program will automatically'
'evaluate the direct label inference attack.',
type=ast.literal_eval, default=True)
# attack paras
parser.add_argument('--use-mal-optim',
help='whether the attacker uses the malicious optimizer',
type=ast.literal_eval, default=True)
parser.add_argument('--use-mal-optim-all',
help='whether all participants use the malicious optimizer. If set to '
'True, use_mal_optim will be automatically set to True.',
type=ast.literal_eval, default=False)
parser.add_argument('--use-mal-optim-top',
help='whether the server(top model) uses the malicious optimizer',
type=ast.literal_eval, default=False)
# visualization paras
parser.add_argument('--if-cluster-outputsA', help='if_cluster_outputsA',
type=ast.literal_eval, default=False)
# possible defenses paras
parser.add_argument('--ppdl', help='turn_on_privacy_preserving_deep_learning',
type=ast.literal_eval, default=False)
parser.add_argument('--gc', help='turn_on_gradient_compression',
type=ast.literal_eval, default=False)
parser.add_argument('--lap-noise', help='turn_on_lap_noise',
type=ast.literal_eval, default=False)
parser.add_argument('--multistep_grad', help='turn on multistep-grad',
type=ast.literal_eval, default=False)
parser.add_argument('--sign-sgd', help='turn_on_sign_sgd',
type=ast.literal_eval, default=False)
# setting about possible defenses
parser.add_argument('--ppdl-theta-u', help='theta-u parameter for defense privacy-preserving deep learning',
type=float, default=0.25)
parser.add_argument('--gc-preserved-percent', help='preserved-percent parameter for defense gradient compression',
type=float, default=0.1)
parser.add_argument('--noise-scale', help='noise-scale parameter for defense noisy gradients',
type=float, default=1e-1)
parser.add_argument('--multistep_grad_bins', help='number of bins in multistep-grad',
type=int, default=6)
parser.add_argument('--multistep_grad_bound_abs', help='bound of multistep-grad',
type=float, default=3e-2)
# training paras
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of datasets loading workers (default: 4)')
parser.add_argument('--epochs', default=5, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('-b', '--batch-size', default=64, type=int,
metavar='N', help='mini-batch size')
parser.add_argument('--lr', '--learning-rate', default=5e-2, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=5e-4, type=float,
metavar='W', help='weight decay (default: 5e-4)')
best_prec1 = 0
class IdcVflFramework(nn.Module):
def __init__(self, ppdl, gc, lap_noise, ss):
super(IdcVflFramework, self).__init__()
# counter for direct label inference attack
self.inferred_correct = 0
self.inferred_wrong = 0
self.direct_attack_on = False
# bottom model a can collect output_a for label inference attack
self.collect_outputs_a = False
self.outputs_a = torch.tensor([]).cuda()
# In order to evaluate attack performance, we need to collect label sequence of training dataset
self.labels_training_dataset = torch.tensor([], dtype=torch.long).cuda()
# In order to evaluate attack performance, we need to collect label sequence of training dataset
self.if_collect_training_dataset_labels = False
# adversarial options
self.defense_ppdl = ppdl
self.defense_gc = gc
self.defense_lap_noise = lap_noise
self.defense_ss = ss
self.defense_multistep_grad = args.multistep_grad
# loss funcs
self.loss_func_top_model = nn.CrossEntropyLoss(weight=torch.tensor([0.3, 1.0])).cuda()
self.loss_func_bottom_model = utils.keep_predict_loss
# top model
# By default, each bottom model has a 5-dim output
self.top_model = idc_models.TopModel(dims_in=5 * args.party_num)
# bottom models as a list. self.bottom_models[0] as the malicious party.
self.bottom_models = []
for bottom_model_id in range(args.party_num):
if args.use_top_model:
self.bottom_models.append(idc_models.BottomModel().cuda())
else:
self.bottom_models.append(idc_models.BottomModelForDirect().cuda())
# overlap features test
if args.overlap:
self.bottom_models[0] = idc_models.BottomModelOverlap().cuda()
# bottom model optimizers as a list.
self.bottom_model_optimizers = []
# This setting is for adversarial experiments except sign SGD
if args.use_mal_optim_top:
self.optimizer_top_model = my_optimizers.MaliciousSGD(self.top_model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
else:
self.optimizer_top_model = optim.SGD(self.top_model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
if args.use_mal_optim:
self.bottom_model_optimizers.append(
my_optimizers.MaliciousSGD(
self.bottom_models[0].parameters(),
lr=args.lr, momentum=args.momentum,
weight_decay=args.weight_decay)
)
else:
self.bottom_model_optimizers.append(
optim.SGD(
self.bottom_models[0].parameters(),
lr=args.lr, momentum=args.momentum,
weight_decay=args.weight_decay)
)
if args.use_mal_optim_all:
for i in range(1, args.party_num):
self.bottom_model_optimizers.append(
my_optimizers.MaliciousSGD(
self.bottom_models[i].parameters(),
lr=args.lr, momentum=args.momentum,
weight_decay=args.weight_decay)
)
else:
for i in range(1, args.party_num):
self.bottom_model_optimizers.append(
optim.SGD(
self.bottom_models[i].parameters(),
lr=args.lr, momentum=args.momentum,
weight_decay=args.weight_decay)
)
def forward(self, x):
# in vertical federated setting, each party has non-lapping features of the same sample
input_images_list = []
for i in range(args.party_num):
input_images_list.append(x[:, i:i + 1, :, :, :].squeeze(1))
bottom_model_outputs_list = []
for i in range(args.party_num):
# overlap features test
if i == 0 and args.overlap:
bottom_model_outputs_list.append(self.bottom_models[i](
torch.cat((input_images_list[0], input_images_list[1], input_images_list[2]), dim=3)))
else:
bottom_model_outputs_list.append(self.bottom_models[i](input_images_list[i]))
if not args.use_top_model:
out = None
for i in range(args.party_num):
if out is None:
out = bottom_model_outputs_list[i]
else:
out += bottom_model_outputs_list[i]
else:
bottom_model_output_all = torch.stack(bottom_model_outputs_list)
out = self.top_model(bottom_model_output_all)
return out
def simulate_train_round_per_batch(self, data, target):
timer_mal = 0
timer_benign = 0
# simulate: bottom models forward, top model forward, top model backward and update, bottom backward and update
# In order to evaluate attack performance, we need to collect label sequence of training dataset
if self.if_collect_training_dataset_labels:
self.labels_training_dataset = torch.cat((self.labels_training_dataset, target), dim=0)
# store grad of input of top model/outputs of bottom models
input_tensors_top_model = []
for i in range(args.party_num):
input_tensors_top_model.append(torch.tensor([], requires_grad=False))
output_tensors_bottom_model = []
# --bottom models forward--
input_images_list = []
for i in range(args.party_num):
input_images_list.append(data[:, i:i + 1, :, :, :].squeeze(1))
for i in range(args.party_num):
self.bottom_models[i].train(mode=True)
# overlap features test
if i == 0 and args.overlap:
output_tensors_bottom_model.append(self.bottom_models[i](
torch.cat((input_images_list[0], input_images_list[1], input_images_list[2]), dim=3)))
else:
output_tensors_bottom_model.append(self.bottom_models[i](input_images_list[i]))
if i == 0:
# bottom model a can collect output_a for label inference attack
if self.collect_outputs_a:
self.outputs_a = torch.cat((self.outputs_a, output_tensors_bottom_model[0].data))
input_tensors_top_model[i].data = output_tensors_bottom_model[i].data
grads_output_bottom_model_list = []
if args.use_top_model:
bottom_model_output_all = torch.tensor([]).cuda()
for i in range(args.party_num):
bottom_model_output_all = torch.cat((bottom_model_output_all, input_tensors_top_model[i]), dim=1)
# bottom_model_output_all = torch.stack(input_tensors_top_model)
bottom_model_output_all.requires_grad = True
# -top model forward-
self.top_model.train(mode=True)
output_framework = self.top_model(bottom_model_output_all)
# --top model backward/update--
# top model loss input tensor
loss_framework = idc_models.update_top_model_one_batch(optimizer=self.optimizer_top_model,
model=self.top_model,
output=output_framework,
batch_target=target,
loss_func=self.loss_func_top_model)
# read grad of: input of top model(also output of bottom models), which will be used as bottom model's
# target
for i in range(args.party_num):
grads_output_bottom_model_list.append(bottom_model_output_all.grad[:, i * 5:(i + 1) * 5])
else:
for i in range(args.party_num):
input_tensors_top_model[i] = input_tensors_top_model[i].cuda()
input_tensors_top_model[i].requires_grad = True
output_framework = torch.zeros_like(input_tensors_top_model[0])
output_framework = output_framework.cuda()
# output_framework.require
for i in range(args.party_num):
output_framework += input_tensors_top_model[i]
loss_framework = self.loss_func_top_model(output_framework, target)
loss_framework.backward()
for i in range(args.party_num):
grads_output_bottom_model_list.append(input_tensors_top_model[i].grad)
# defenses here: the server(who controls top model) can defend against label inference attack by protecting
# print("before defense, grad_output_bottom_model_a:", grad_output_bottom_model_a)
# gradients sent to bottom models
model_all_layers_grads_list = grads_output_bottom_model_list
# privacy preserving deep learning
if self.defense_ppdl:
for tensor_id in range(len(model_all_layers_grads_list)):
possible_defenses.dp_gc_ppdl(epsilon=1.8, sensitivity=1,
layer_grad_list=[model_all_layers_grads_list[tensor_id]],
theta_u=args.ppdl_theta_u, gamma=0.001, tau=0.0001)
# gradient compression
if self.defense_gc:
tensor_pruner = possible_defenses.TensorPruner(zip_percent=args.gc_preserved_percent)
for tensor_id in range(len(model_all_layers_grads_list)):
tensor_pruner.update_thresh_hold(model_all_layers_grads_list[tensor_id])
# print("tensor_pruner.thresh_hold:", tensor_pruner.thresh_hold)
model_all_layers_grads_list[tensor_id] = tensor_pruner.prune_tensor(
model_all_layers_grads_list[tensor_id])
# differential privacy
if self.defense_lap_noise:
dp = possible_defenses.DPLaplacianNoiseApplyer(beta=args.noise_scale)
for tensor_id in range(len(model_all_layers_grads_list)):
model_all_layers_grads_list[tensor_id] = dp.laplace_mech(model_all_layers_grads_list[tensor_id])
# multistep gradient
if self.defense_multistep_grad:
for tensor_id in range(len(model_all_layers_grads_list)):
model_all_layers_grads_list[tensor_id] = possible_defenses.multistep_gradient(
model_all_layers_grads_list[tensor_id], bins_num=args.multistep_grad_bins,
bound_abs=args.multistep_grad_bound_abs)
# print("after defense, grad_output_bottom_model_a:", grad_output_bottom_model_a)
# server sends back output_tensor_server_a.grad to the adversary(participant a), so
# the adversary can use this gradient to perform direct label inference attack.
if self.direct_attack_on:
for sample_id in range(len(model_all_layers_grads_list[0])):
grad_per_sample = model_all_layers_grads_list[0][sample_id]
for logit_id in range(len(grad_per_sample)):
if grad_per_sample[logit_id] < 0:
inferred_label = logit_id
if inferred_label == target[sample_id]:
self.inferred_correct += 1
else:
self.inferred_wrong += 1
break
# --bottom models backward/update--
# -bottom model 0: backward/update-
# print("malicious_bottom_model 0")
idc_models.update_bottom_model_one_batch(optimizer=self.bottom_model_optimizers[0],
model=self.bottom_models[0],
output=output_tensors_bottom_model[0],
batch_target=grads_output_bottom_model_list[0],
loss_func=self.loss_func_bottom_model)
# -benign bottom models: backward/update-
# print("benign_bottom_models")
for i in range(1, args.party_num):
idc_models.update_bottom_model_one_batch(optimizer=self.bottom_model_optimizers[i],
model=self.bottom_models[i],
output=output_tensors_bottom_model[i],
batch_target=grads_output_bottom_model_list[i],
loss_func=self.loss_func_bottom_model)
return loss_framework
def test_per_epoch(test_loader, framework, criterion):
test_loss = 0
correct = 0
right_samples_num = 0
TP_samples_num = 0
TN_samples_num = 0
FP_samples_num = 0
FN_samples_num = 0
wrong_samples_num = 0
with torch.no_grad():
for data, target in test_loader:
data = data.float().cuda()
target = target.long().cuda()
# set all sub-models to eval mode.
for i in range(args.party_num):
framework.bottom_models[i].eval()
framework.top_model.eval()
# run forward process of the whole framework
output_tensors_bottom_models = torch.tensor([]).cuda()
for i in range(args.party_num):
input_images = data[:, i:i + 1, :, :, :].squeeze(1)
# overlap test
if i == 0 and args.overlap:
output_tensors_bottom_models = torch.cat((output_tensors_bottom_models,
framework.bottom_models[i](torch.cat(
(data[:, i:i + 1, :, :, :],
data[:, i + 1:i + 2, :, :, :],
data[:, i + 2:i + 3, :, :, :],)
, dim=4).squeeze(1))),
dim=1)
elif args.use_top_model:
output_tensors_bottom_models = torch.cat((output_tensors_bottom_models,
framework.bottom_models[i](input_images)),
dim=1)
else:
if len(output_tensors_bottom_models.shape) == 1:
output_tensors_bottom_models = framework.bottom_models[i](input_images)
else:
output_tensors_bottom_models += framework.bottom_models[i](input_images)
if args.use_top_model:
output_framework = framework.top_model(output_tensors_bottom_models)
else:
output_framework = output_tensors_bottom_models
# sum up batch loss
test_loss += criterion(output_framework, target).data.item()
# get the index of the max log-probability
pred = output_framework.data.max(1, keepdim=True)[1]
# print(pred)
target_data = target.data.view_as(pred)
# print("target_data:", target_data)
correct += pred.eq(target_data).cpu().sum()
target_data = target_data.cpu()
pred = pred.cpu()
y_true = np.array(target_data)
y_pred =
|
np.array(pred)
|
numpy.array
|
import xgboost as xgb
import numpy as np
import pandas as pd
import math
from scipy.stats import norm
from collections import ChainMap
from collections import OrderedDict
from xgboostlss.utils import *
np.seterr(all="ignore")
########################################################################################################################
############################################### Expectile #####################################################
########################################################################################################################
# When a custom objective is provided XGBoost doesn't know its response function so the user is responsible for making
# the transformation for both objective and custom evaluation metric. For objective with identity link like squared
# error this is trivial, but for other link functions like log link or inverse link the difference is significant.
# For the Python package, the behaviour of the predictions can be controlled by the output_margin parameter in the
# predict function. When using the custom_metric parameter without a custom objective, the metric function will receive
# transformed predictions since the objective is defined by XGBoost. However, when a custom objective is also provided
# along with a custom metric, then both the objective and custom metric will receive raw predictions and hence must be
# transformed using the specified response functions.
class Expectile():
"""Expectile Distribution Class
"""
# Specifies the number of distributional parameters
@staticmethod
def n_dist_param():
"""Number of distributional parameter.
"""
n_param = len(Expectile.expectiles)
return n_param
###
# Parameter Dictionary
###
@staticmethod
def param_dict():
""" Dictionary that holds the expectiles and their corresponding response functions.
"""
param_dict = []
for i in range(len(Expectile.expectiles)):
param_dict.append({"expectile_" + str(Expectile.expectiles[i]): identity})
param_dict = dict(ChainMap(*param_dict))
param_dict = OrderedDict(sorted(param_dict.items(), key=lambda x: x[0]))
return param_dict
###
# Starting Values
###
@staticmethod
def initialize(y: np.ndarray):
""" Function that calculates the starting values, for each distributional parameter individually.
y: np.ndarray
Data from which starting values are calculated.
"""
expect_init=[]
for i in range(len(Expectile.expectiles)):
expect_init.append(np.mean(y))
start_values =
|
np.array([expect_init])
|
numpy.array
|
#!/usr/bin/env python3
"""
Process Outputs
"""
import tensorflow as tf
import numpy as np
class Yolo:
"""define the YOLO class"""
def __init__(self, model_path, classes_path, class_t, nms_t, anchors):
"""define and initialize attributes and variables"""
self.model = tf.keras.models.load_model(model_path)
with open(classes_path, 'r') as f:
self.class_names = [class_name[:-1] for class_name in f]
self.class_t = class_t
self.nms_t = nms_t
self.anchors = anchors
def process_outputs(self, outputs, image_size):
"""function that processes single-image predictions"""
boxes = []
box_confidences = []
box_class_probs = []
# Loop over the output feature maps (here 13x13, 26x26, 52x52)
# so i ranges from 0 to 2
for i, output in enumerate(outputs):
# print("output {}:".format(i))
grid_height = output.shape[0]
# print(grid_height)
grid_width = output.shape[1]
# print(grid_width)
anchor_boxes = output.shape[2]
# print(anchor_boxes)
boxs = output[..., :4]
# print("boxes:", boxes.shape, boxes)
# Extract the network output predictions ("raw" coordinates
# and dimensions) to be processed into bounding box predictions
t_x = boxs[..., 0]
t_y = boxs[..., 1]
t_w = boxs[..., 2]
t_h = boxs[..., 3]
# print("t_x:", t_x.shape, t_x)
# print("t_y:", t_y.shape, t_y)
# print("t_w:", t_w.shape, t_w)
# print("t_h:", t_h.shape, t_h)
# Create 3D arrays with the left-corner coordinates (c_x, c_y)
# of each grid cell. Values added in the b_x, b_y formulae (below)
# make a row vector of grid_width length
c_x = np.arange(grid_width).reshape(1, grid_width)
# print(c_x)
# make a 2D array of grid_width columns and grid_height rows,
# but do not transpose it
c_x = np.repeat(c_x, grid_height, axis=0)
# print(c_x)
# add the third axis, duplicating the coordinate values by
# anchor_boxes
c_x = np.repeat(c_x[..., np.newaxis], anchor_boxes, axis=2)
# print(c_x)
# make a row vector of grid_width length
c_y = np.arange(grid_width).reshape(1, grid_width)
# print(c_y)
# make a 2D array of grid_width columns and grid_height rows,
# and transpose it
c_y = np.repeat(c_y, grid_height, axis=0).T
# print(c_y)
# add the third axis, duplicating the coordinate values by
# anchor_boxes
c_y = np.repeat(c_y[..., np.newaxis], anchor_boxes, axis=2)
# print(c_y)
# The network output predictions are passed through a sigmoid
# function, which squashes the output in a range from 0 to 1,
# effectively keeping the center in the grid which is predicting.
# Add the top-left coordinates of the grid (c_x and c_y),
# because YOLO predicts offsets relative to the top-left corner
# of the grid cell which is predicting the object.
# The resultant predictions (b_x and b_y) are normalised by
# the width and height of the grid, e.g. 13 x 13. i.e., if the
# predictions b_x and b_y for the box containing the object
# are (0.3, 0.8), the actual centre coordinates of the box
# on the 13 x 13 feature map are (13 x 0.3, 13 x 0.8).
b_x = (self.sigmoid(t_x) + c_x) / grid_width
b_y = (self.sigmoid(t_y) + c_y) / grid_height
# The dimensions of the bounding box (b_w, b_h) are predicted by
# applying a log-space transform to the output, and then
# multiplying with the anchor dimensions for the box.
# The resultant predictions (b_w and b_h) are normalised by the
# width and height of the image input to the model,
# e.g. 416 x 416. i.e., if the predictions b_w and b_h for the
# box containing the object are (0.4, 0.6), the actual width
# and height of the box on the 416 x 416 image are
# (416 x 0.4, 416 x 0.6).
anchor_width = self.anchors[i, :, 0]
anchor_height = self.anchors[i, :, 1]
# In tf 2.0, on Google Colab:
# image_width = self.model.input.shape[1]
# image_height = self.model.input.shape[2]
# But in tf 1.2 (see Stackoverflow):
image_width = self.model.input.shape[1].value
image_height = self.model.input.shape[2].value
b_w = (anchor_width * np.exp(t_w)) / image_width
b_h = (anchor_height * np.exp(t_h)) / image_height
# Top-left corner coordinates of the bounding box
x_1 = b_x - b_w / 2
y_1 = b_y - b_h / 2
# Bottom right-corner coordinates of the bounding box
x_2 = x_1 + b_w
y_2 = y_1 + b_h
# Express the boundary box coordinates relative to
# the original image
x_1 *= image_size[1]
y_1 *= image_size[0]
x_2 *= image_size[1]
y_2 *= image_size[0]
# Update boxes according to the bounding box coordinates
# inferred above
boxs[..., 0] = x_1
boxs[..., 1] = y_1
boxs[..., 2] = x_2
boxs[..., 3] = y_2
# print(box)
# Append the boxes coordinates to the boxes list
boxes.append(boxs)
# Extract the network output box_confidence prediction
box_confidence = output[..., 4:5]
# The prediction is passed through a sigmoid function,
# which squashes the output in a range from 0 to 1,
# to be interpreted as a probability.
box_confidence = self.sigmoid(box_confidence)
# print(box_confidence)
# Append box_confidence to box_confidences
box_confidences.append(box_confidence)
# Extract the network ouput class_probability predictions
classes = output[..., 5:]
# The predictions are passed through a sigmoid function,
# which squashes the output in a range from 0 to 1,
# to be interpreted as a probability.
# Note: before v3, YOLO used to softmax the class scores.
# However, that design choice has been dropped in v3. The
# reason is that Softmaxing class scores assume that the
# classes are mutually exclusive. In simple words, if an object
# belongs to one class, then it's guaranteed it cannot belong
# to another class. Assumption that does not always hold true!
classes = self.sigmoid(classes)
# print(classes)
# Append class_probability predictions to box_class_probs
box_class_probs.append(classes)
return (boxes, box_confidences, box_class_probs)
def sigmoid(self, array):
"""define the sigmoid activation function"""
return 1 / (1 + np.exp(-1 * array))
def filter_boxes(self, boxes, box_confidences, box_class_probs):
"""function that filters boxes based on their objectness score"""
box_scores = []
box_classes = []
filtered_boxes = []
# Loop over the output feature maps (here 13x13, 26x26, 52x52)
# so i ranges from 0 to 2
for i, (box_confidence, box_class_prob, box) in enumerate(
zip(box_confidences, box_class_probs, boxes)):
# print("box_score #{}:".format(i))
# print(box_confidence.shape)
# print(box_class_prob.shape)
# print("box_confidence[..., 0]:", box_confidence[..., 0])
# Compute the box scores for each output feature map
# note on shapes:
# (13, 13, 3, 1) * (13, 13, 3, 80) = (13, 13, 3, 80)
# -> a box score is defined for each box_class_prob value
box_scores_per_ouput = box_confidence * box_class_prob
# print("box_scores_per_ouput:", box_scores_per_ouput.shape)
# For each individual box (3 per grid cell) keep the max of
# all the scores obtained (the one corresponding to the
# highest box_class_prob value)
max_box_scores =
|
np.max(box_scores_per_ouput, axis=3)
|
numpy.max
|
import argparse
import math
import time
import os
import sys
import numpy as np
import torch
import matplotlib
from tools import create_location_features_2d, factors
matplotlib.use('agg') # make sure to import this before traces and pyplot
from matplotlib.pyplot import figure, colorbar, imshow, show, plot
from torch import nn, optim
from torch.distributions import Multinomial
from torch.utils.data import DataLoader
from torchvision import transforms, datasets
from swarmlayer import SwarmLayer
from traces import Trace
import traceback
import sys
class SwarmTransformer(nn.Module):
def __init__(self, cells, n_emb, C, H, W,
K = None,
learnable_location_features = False,
):
"""
Create a SwarmTransformer module for generative modeling of images
:param cells: a list of SwarmConvLSTMCell
:param n_emb: size of positional embeddings and class conditional embeddings
:param C: number of image channels
:param H: image height in pixels
:param W: image width in pixels
:param K: number of classes (for class conditional generation)
:param learnable_location_features: if True, learn the location features otherwise use sinusoids
"""
super().__init__()
self.cells = nn.Sequential(*cells)
self.n_emb = n_emb
# it has to be multiple of 4 because we have per frequency (sine, cosine)x(vertical,horizontal)
assert (self.n_emb//4)*4 == self.n_emb
# RBG/gray value embedding (8bit images hard coded here)
self.input_embedding = nn.Embedding(256, n_emb)
self.input_embedding.weight.data.uniform_(-0.1, 0.1)
if K is not None:
# class conditional embeddings have the same size as location features (will be added later)
self.cond_embedding = nn.Embedding(K, n_emb)
self.cond_embedding.weight.data.uniform_(-0.1, 0.1)
else:
self.cond_embedding = None
self.ce_loss = nn.CrossEntropyLoss()
if K is not None:
assert K > 0
self.cond = K
self.n_channels = C
if self.n_channels>1:
# learnable RGB-channel embedding
self.channel_embedding = nn.Parameter(torch.zeros((self.n_emb,self.n_channels), dtype=torch.float32))
else:
self.channel_embedding = None
self.learnable_location_features = learnable_location_features
if self.learnable_location_features:
self.location_features = nn.Parameter(0.001*torch.randn(self.n_emb, H, W), requires_grad=True)
else:
self.location_features = nn.Parameter(create_location_features_2d(H, W, self.n_emb), requires_grad=False)
def prepare_data(self, X, Y=None):
"""
Prepare input data to be used for training. In order to use a 2d SwarmLSTMConvCell, the input's W and C
dimensions are flattened
:param X: channel-first batch of images, size (N,C,H,W)
:param Y: batch of labels, size (N)
:return: X_in, X_out (X_in: (N,n_emb,H,W*C), X_out: (N,H,W,C))
"""
N,C,H,W = X.size()
# 1. compute input embeddings for X
X_in = self.input_embedding(X) # (N,C,H,W,Demb)
X_in = X_in.transpose(4,1) # (N,Demb,H,W,C)
# 2. shift input by one to enforce causality
X_in = X_in.contiguous().view((N, self.n_emb, -1))
X_in = torch.cat( (torch.zeros_like(X_in[:,:, 0:1]),X_in[:,:,:-1]), dim=2)
X_in = X_in.view((N, self.n_emb, H, W,C)).contiguous()
# 3. compute location features
F = self.location_features
Df = F.size()[0]
F_in = F.view((1,Df,H,W,1)).expand((1,Df,H,W,C))
X_in = X_in+F_in
# 4. compute class conditional features
if self.cond_embedding is not None:
assert Y is not None
Y_in = self.cond_embedding(Y) # (N,Demb)
Y_in = Y_in.view( (N, self.n_emb, 1,1,1))
X_in = X_in+Y_in
# 5. compute channel embeddings
if self.channel_embedding is not None:
assert C == self.n_channels
X_in = X_in + self.channel_embedding.view((1,self.n_emb,1,1,self.n_channels))
# 6. flatten W and C channels in order to use a2d SwarmConvLSTMCell
X_in = X_in.view((N, self.n_emb, H, W*C))
# output is the raw input with channels last
X_out = X.transpose(1,2).transpose(2,3)
return X_in, X_out
def forward(self, x, y=None):
N, C, H, W = x.size()
X_in, X_out = self.prepare_data(x,y)
logits = self.cells(X_in)
# note, W and C dimensions are flattened, logits are (N,n_out,H,W*C)
# reshaping them back now
logits = logits.view( -1, 256, H,W,C)
loss = self.ce_loss(logits, X_out)
return loss, logits
def create_datasets(batch_size, name='MNIST'):
ds={}
ds['MNIST'] = datasets.MNIST
ds['FashionMNIST'] = datasets.FashionMNIST
ds['CIFAR10'] = datasets.CIFAR10
ds['BWCIFAR'] = datasets.CIFAR10
ds['SMALL'] = datasets.CIFAR10
ds['CIFAR100'] = datasets.CIFAR100
if name=='BWCIFAR':
transform = transforms.Compose([transforms.ToTensor(),
lambda x: (torch.mean(x, dim=0,keepdim=True) * 255).long()
])
elif name == 'SMALL':
transform = transforms.Compose([transforms.ToTensor(),
lambda x: (x*255).long()[:,8:24,8:24]
])
else:
transform = transforms.Compose([ transforms.ToTensor(),
#transforms.Normalize((0.,), (1./255,)),
lambda x: (x*255).long()
])
ds_train = ds[name]('./data/'+name, train=True, download=True, transform=transform)
ds_val = ds[name]('./data/'+name, train=False, transform=transform)
dl_train = DataLoader( ds_train, batch_size=batch_size, shuffle=True)
dl_val = DataLoader( ds_val, batch_size=batch_size, shuffle=True)
return dl_train, dl_val
def create_sample_fn( model, C,H,W, K, device):
"""
create a function that produces a sample plot of the model during training
:param model: the model
:param C: number of RBG channels
:param H: height in pixels
:param W: width in pixels
:param K: number of classes (or None)
:param device:
:return: sample function, that can be called without parameters and returns a figure handle
"""
def sample_fn():
fig = figure(figsize=(12,5))
model.eval()
if K is None:
n_samp = 12
Y = None
else:
n_samp = 2*K
Y = torch.arange(2*K, device=device)%K
X = torch.zeros( n_samp,C,H,W, device=device).long()
with torch.no_grad():
for h in range(H):
for w in range(W):
for c in range(C):
_,logits = model(X,Y)
m = Multinomial(logits=logits[:,:,h,w,c])
X_ = m.sample(torch.Size([]))
X[:,c,h,w] = torch.argmax(X_,dim=1)
X = X.cpu().numpy()
if C>1:
X = X.astype('float')/255.0
_ = imshow(X.reshape(2, n_samp//2, C, H, W).transpose(0, 3, 1, 4, 2).reshape(2 * H, n_samp//2 * W, C))
else:
_ = imshow(X.reshape(2, n_samp//2, H, W).transpose(0, 2, 1, 3).reshape(2 * H, n_samp//2 * W))
colorbar()
return fig
return sample_fn
from parse import parse
class ModelName(object):
#name_template = "%s-%d-%s-%d-%d-wc%.0f-lr%f"
# like "CIFAR10-2-relu-12-5-wc60-lr0.01"
name_template = "{}-{:d}-{}-{:d}-{:d}-wc{:g}-lr{:g}-bs{:d}-{}"
def create(self, opt):
name=ModelName.name_template.format(opt.data, opt.n_layers, opt.non_lin, opt.n_hidden,
opt.n_iter, opt.wc, opt.lr, opt.bs, opt.p)
return name
def parse(self, name, opt):
print(opt)
res = parse(ModelName.name_template, name)
if res is None:
raise ValueError("Could not parse model name {}".format(name))
(opt.data, opt.n_layers, opt.non_lin, opt.n_hidden,
opt.n_iter, opt.wc, opt.lr, opt.bs, opt.p) = tuple(res)
return opt
def validate(model, dl_val, device):
"""
run a complete validation epoch
:param model:
:param dl_val:
:param device:
:return: validation loss
"""
model.eval()
val_loss = 0
with torch.no_grad():
for X, Y in dl_val:
X = X.to(device)
Y = Y.to(device)
loss, _ = model(X, Y)
loss = loss.mean()
val_loss += loss.item()
val_loss /= len(dl_val)
return val_loss
def resume(model, optimizer, checkpoint_path, name=None):
"""
resume model parameters and optimizer state
:param model: model to be resumed
:param optimizer: optimizer to be resumed
:param checkpoint_path: filename of the saved pkl file
:param name: model name (must be identical to the name used in check point)
"""
checkpoint = torch.load(checkpoint_path)
if name is not None:
assert checkpoint['name'] == name
try:
model.load_state_dict(checkpoint['model'])
except:
Warning("Could not resume model from {}".format(checkpoint_path))
try:
optimizer.load_state_dict(checkpoint['optimizer'])
except:
Warning("Could not resume optimizer from {}".format(checkpoint_path))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-data', type=str, choices=['MNIST',
'FashionMNIST',
'CIFAR10',
'CIFAR100',
'BWCIFAR',
'SMALL'],
help='dataset to be used in the experiment')
parser.add_argument('-n_hidden', type=int, default=128,
help='number of hidden units inside the model')
parser.add_argument('-n_layers', type=int, default=1,
help='number of layers for mult-layered models')
parser.add_argument('-n_iter', type=int, default=5,
help='number of iterations to be done in Swarm layers')
parser.add_argument('-non_lin', default='relu', choices=['relu', 'elu', 'lrelu'],
help='non-linearity used between different layers')
parser.add_argument('-bs', type=int, default=100,
help='batch size')
parser.add_argument('-wc', type=float, default=60,
help='allowed wall clock time for training (in minutes)')
parser.add_argument('-update_interval', type=float, default=10,
help='update interval to generate trace and sample plots (in minutes)')
parser.add_argument('-lr', type=float, default=0.01,
help='learning rate')
parser.add_argument('-no_cuda', action='store_true',
help='dont use CUDA even if it is available')
parser.add_argument('-name', type=str, default=None,
help='you can provide a model name that will be parsed into cmd line options')
parser.add_argument('-dry_run', action='store_true',
help='just print out the model name and exit')
parser.add_argument('-to_stdout', action='store_true',
help='log all output to stdout instead of modelname/log')
parser.add_argument('-bt_horizon', type=float, default=0.1,
help='backtracking horizon')
parser.add_argument('-bt_alpha', type=float, default=0.9,
help='backtracking learning rate discount factor')
parser.add_argument('-cond', action='store_true',
help='do class conditional modeling')
parser.add_argument('-resume', type=str, default=None,
help='resume model from modelname/best.pkl')
parser.add_argument('-learn_loc', type=bool, default=False)
opt = parser.parse_args()
if opt.name is not None:
opt = ModelName().parse(opt.name, opt)
name = ModelName().create(opt)
assert opt.name is None or name==opt.name
print(name)
if opt.dry_run:
exit()
import sys
name_part = name+".part"
try:
os.mkdir(name_part)
except:
pass
if not opt.to_stdout:
sys.stdout = open(name_part+'/log', 'w')
opt.cuda = not opt.no_cuda
C,H,W,K = {'MNIST':(1,28,28,10),
'FashionMNIST':(1,28,28,10),
'CIFAR10':(3,32,32,10),
'CIFAR100':(3,32,32,100),
'BWCIFAR':(1,32,32,10),
'SMALL': (3,16,16,10),
} [opt.data]
n_classes = 256 # not dependent on the dataset so far
non_linearity = {'elu':nn.ELU(), 'relu':nn.ReLU(), 'lrelu':nn.LeakyReLU()} [opt.non_lin]
n_in = opt.n_hidden
n_hidden = opt.n_hidden
n_layers = opt.n_layers
n_iter = opt.n_iter
# in case the desired batch size does not fit into CUDA memory
# do batch iteration. Try in a loop the largest batch size nad batch_iter=1 first.
# Decrease batch_size (increase batch_iter) by common factors until there is a model that does not throw an
# out-of-memory error
for batch_iter in factors(opt.bs):
print(type(opt.bs),type(int(opt.bs//batch_iter)))
print("trying batch size {} in {} iterations".format(opt.bs//batch_iter ,batch_iter))
try:
layers = []
n_out_last = n_in
for i in range(n_layers):
if i<n_layers-1:
layers.append( SwarmLayer(n_in=n_out_last, n_out=n_hidden, n_hidden=n_hidden, n_iter=n_iter, pooling='CAUSAL'))
layers.append( non_linearity)
n_out_last = n_hidden
else:
layers.append( SwarmLayer(n_in=n_out_last, n_out=n_classes, n_hidden=n_hidden, n_iter=n_iter, pooling='CAUSAL'))
model = SwarmTransformer(layers, C=C, W=W, H=H, K=K, n_emb=n_in,
learnable_location_features=opt.learn_loc)
device = torch.device('cuda' if opt.cuda else 'cpu')
if torch.cuda.device_count()>1:
model = nn.DataParallel(model)
model.to(device)
print(model)
print("backtracking {}% epochs with lr decrease factor {}".format(100*opt.bt_horizon, opt.bt_alpha))
# create datasets with batch sizes split by batch_iter
dl_train, dl_val = create_datasets( int(opt.bs//batch_iter), opt.data)
sample_fn = create_sample_fn( model, C,H,W,K, device)
optimizer = optim.Adam(model.parameters(), lr=opt.lr)
if opt.resume is not None:
resume(model, optimizer, opt.resume)
for param_group in optimizer.param_groups:
param_group['lr'] = opt.lr
# create a tracing object, that records training and validation losses and other metrics and records 13 individual
# weights of every model parameter tensor
# every now and then it plots learning curves, weight traces and model samples to
# modelname/[metrics.png,weights.png,samples.png] respectively
traces = Trace(model, 13, sample_fn, name=name_part, columns=4)
best_val_loss = math.inf
val_loss_history = [np.inf]
t_start = time.time()
t_update = 0 # timer to count when the next traces update is due
t_no_training = 0 # time spend generating traces and samples
e = 0 # count the epochs
while True:
# inform the Traces object that a new epoch has begun
traces.on_epoch_begin(e)
for i, (X, Y) in enumerate(dl_train):
X = X.to(device)
Y = Y.to(device)
model.train()
if i%batch_iter==0:
optimizer.zero_grad()
norm = 0
loss, _ = model(X, Y)
loss = loss.mean()
(loss/batch_iter).backward()
if (i+1)%batch_iter==0:
# do an optimizer update step only every batch_iter iterations
norm = torch.nn.utils.clip_grad_norm_(model.parameters(), math.inf, norm_type=1)
optimizer.step()
print(i, "%.4f (norm=%.4g)" % (loss.item(), norm), end="\r")
# a dictionary of values and metrics that will be logged by the Traces opbject
logs = {'loss': loss.item(), 'norm': norm}
time_is_up = time.time()>t_start+60*opt.wc + t_no_training #or i>=250
if time_is_up:
print("preparing to complete")
if i+1 == len(dl_train) or time_is_up:
# we are done with the last iteration
# -> kick off a validation epoch now and add the val_loss to the log
val_loss = validate(model, dl_val, device)
print("%d: val_loss = %.4f" % (e, val_loss))
logs['val_loss'] = val_loss
logs['lr'] = [p['lr'] for p in optimizer.param_groups]
# now actually log the metrics for iteration i
traces.on_batch_end(i, logs)
sys.stdout.flush()
if time_is_up:
break
last_worse = np.argwhere(
|
np.array(val_loss_history)
|
numpy.array
|
import os, sys
import numpy as np
import imageio
import json
import random
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm, trange
import trimesh
import matplotlib.pyplot as plt
from run_nerf_helpers import *
from load_llff import load_llff_data, load_colmap_depth, load_realsense_data, load_realsense_depth
from load_dtu import load_dtu_data
from loss import SigmaLoss
from data import RayDataset
from torch.utils.data import DataLoader
from utils.generate_renderpath import generate_renderpath
import cv2
# import time
# concate_time, iter_time, split_time, loss_time, backward_time = [], [], [], [], []
def real_camera_cfgs(height=720, width=1280, fov=60):
"""
Returns a set of camera config parameters
Returns:
YACS CfgNode: Cam config params
"""
_C = CN()
_C.ZNEAR = 0.01
_C.ZFAR = 10
_C.WIDTH = width
_C.HEIGHT = height
_C.FOV = 60
_ROOT_C = CN()
_ROOT_C.CAM = CN()
_ROOT_C.CAM.SIM = _C
_ROOT_C.CAM.REAL = _C
return _ROOT_C.clone()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# torch.cuda.set_device(2)
np.random.seed(0)
DEBUG = False
def batchify(fn, chunk):
"""Constructs a version of 'fn' that applies to smaller batches.
"""
if chunk is None:
return fn
def ret(inputs):
return torch.cat([fn(inputs[i:i+chunk]) for i in range(0, inputs.shape[0], chunk)], 0)
return ret
def run_network(inputs, viewdirs, fn, embed_fn, embeddirs_fn, netchunk=1024*64):
"""Prepares inputs and applies network 'fn'.
"""
inputs_flat = torch.reshape(inputs, [-1, inputs.shape[-1]])
embedded = embed_fn(inputs_flat)
if viewdirs is not None:
input_dirs = viewdirs[:,None].expand(inputs.shape)
input_dirs_flat = torch.reshape(input_dirs, [-1, input_dirs.shape[-1]])
embedded_dirs = embeddirs_fn(input_dirs_flat)
embedded = torch.cat([embedded, embedded_dirs], -1)
outputs_flat = batchify(fn, netchunk)(embedded)
outputs = torch.reshape(outputs_flat, list(inputs.shape[:-1]) + [outputs_flat.shape[-1]])
return outputs
def batchify_rays(rays_flat, chunk=1024*32, **kwargs):
"""Render rays in smaller minibatches to avoid OOM.
"""
all_ret = {}
for i in range(0, rays_flat.shape[0], chunk):
ret = render_rays(rays_flat[i:i+chunk], **kwargs)
for k in ret:
if k not in all_ret:
all_ret[k] = []
all_ret[k].append(ret[k])
all_ret = {k : torch.cat(all_ret[k], 0) for k in all_ret}
return all_ret
def render(H, W, focal, chunk=1024*32, rays=None, c2w=None, ndc=True,
near=0., far=1.,
use_viewdirs=False, c2w_staticcam=None, depths=None,
**kwargs):
"""Render rays
Args:
H: int. Height of image in pixels.
W: int. Width of image in pixels.
focal: float. Focal length of pinhole camera.
chunk: int. Maximum number of rays to process simultaneously. Used to
control maximum memory usage. Does not affect final results.
rays: array of shape [2, batch_size, 3]. Ray origin and direction for
each example in batch.
c2w: array of shape [3, 4]. Camera-to-world transformation matrix.
ndc: bool. If True, represent ray origin, direction in NDC coordinates.
near: float or array of shape [batch_size]. Nearest distance for a ray.
far: float or array of shape [batch_size]. Farthest distance for a ray.
use_viewdirs: bool. If True, use viewing direction of a point in space in model.
c2w_staticcam: array of shape [3, 4]. If not None, use this transformation matrix for
camera while using other c2w argument for viewing directions.
Returns:
rgb_map: [batch_size, 3]. Predicted RGB values for rays.
disp_map: [batch_size]. Disparity map. Inverse of depth.
acc_map: [batch_size]. Accumulated opacity (alpha) along a ray.
extras: dict with everything returned by render_rays().
"""
if c2w is not None:
# special case to render full image
rays_o, rays_d = get_rays(H, W, focal, c2w)
else:
# use provided ray batch
rays_o, rays_d = rays
if use_viewdirs:
# provide ray directions as input
viewdirs = rays_d
if c2w_staticcam is not None:
# special case to visualize effect of viewdirs
rays_o, rays_d = get_rays(H, W, focal, c2w_staticcam)
viewdirs = viewdirs / torch.norm(viewdirs, dim=-1, keepdim=True)
viewdirs = torch.reshape(viewdirs, [-1,3]).float()
sh = rays_d.shape # [..., 3]
if ndc:
# for forward facing scenes
rays_o, rays_d = ndc_rays(H, W, focal, 1., rays_o, rays_d)
# Create ray batch
rays_o = torch.reshape(rays_o, [-1,3]).float()
rays_d = torch.reshape(rays_d, [-1,3]).float()
near, far = near * torch.ones_like(rays_d[...,:1]), far * torch.ones_like(rays_d[...,:1])
rays = torch.cat([rays_o, rays_d, near, far], -1) # B x 8
if depths is not None:
rays = torch.cat([rays, depths.reshape(-1,1)], -1)
if use_viewdirs:
rays = torch.cat([rays, viewdirs], -1)
# Render and reshape
all_ret = batchify_rays(rays, chunk, **kwargs)
for k in all_ret:
k_sh = list(sh[:-1]) + list(all_ret[k].shape[1:])
all_ret[k] = torch.reshape(all_ret[k], k_sh)
k_extract = ['rgb_map', 'disp_map', 'acc_map', 'depth_map']
ret_list = [all_ret[k] for k in k_extract]
ret_dict = {k : all_ret[k] for k in all_ret if k not in k_extract}
return ret_list + [ret_dict]
def render_path(render_poses, hwf, chunk, render_kwargs, gt_imgs=None, savedir=None, render_factor=0):
H, W, focal = hwf
if render_factor!=0:
# Render downsampled for speed
H = H//render_factor
W = W//render_factor
focal = focal/render_factor
rgbs = []
disps = []
t = time.time()
for i, c2w in enumerate(tqdm(render_poses)):
print(i, time.time() - t)
t = time.time()
rgb, disp, acc, depth, extras = render(H, W, focal, chunk=chunk, c2w=c2w[:3,:4], retraw=True, **render_kwargs)
rgbs.append(rgb.cpu().numpy())
disps.append(disp.cpu().numpy())
if i==0:
print(rgb.shape, disp.shape)
"""
if gt_imgs is not None and render_factor==0:
p = -10. * np.log10(np.mean(np.square(rgb.cpu().numpy() - gt_imgs[i])))
print(p)
"""
if savedir is not None:
rgb8 = to8b(rgbs[-1])
rgb8[np.isnan(rgb8)] = 0
filename = os.path.join(savedir, '{:03d}.png'.format(i))
imageio.imwrite(filename, rgb8)
depth = depth.cpu().numpy()
print("max:", np.nanmax(depth))
# depth = depth / 5 * 255
# depth_color = cv2.applyColorMap(depth.astype(np.uint8), cv2.COLORMAP_JET)[:,:,::-1]
# depth_color[np.isnan(depth_color)] = 0
# imageio.imwrite(os.path.join(savedir, '{:03d}_depth.png'.format(i)), depth_color)
imageio.imwrite(os.path.join(savedir, '{:03d}_depth.png'.format(i)), depth)
np.savez(os.path.join(savedir, '{:03d}.npz'.format(i)), rgb=rgb.cpu().numpy(), disp=disp.cpu().numpy(), acc=acc.cpu().numpy(), depth=depth)
rgbs = np.stack(rgbs, 0)
disps = np.stack(disps, 0)
return rgbs, disps
def render_test_ray(rays_o, rays_d, hwf, ndc, near, far, use_viewdirs, N_samples, network, network_query_fn, **kwargs):
H, W, focal = hwf
if use_viewdirs:
# provide ray directions as input
viewdirs = rays_d
viewdirs = viewdirs / torch.norm(viewdirs, dim=-1, keepdim=True)
viewdirs = torch.reshape(viewdirs, [-1,3]).float()
if ndc:
# for forward facing scenes
rays_o, rays_d = ndc_rays(H, W, focal, 1., rays_o, rays_d)
# Create ray batch
rays_o = torch.reshape(rays_o, [-1,3]).float()
rays_d = torch.reshape(rays_d, [-1,3]).float()
near, far = near * torch.ones_like(rays_d[...,:1]), far * torch.ones_like(rays_d[...,:1])
t_vals = torch.linspace(0., 1., steps=N_samples).to(device)
z_vals = near * (1.-t_vals) + far * (t_vals)
z_vals = z_vals.reshape([rays_o.shape[0], N_samples])
rgb, sigma, depth_maps, weights = sample_sigma(rays_o, rays_d, viewdirs, network, z_vals, network_query_fn)
return rgb, sigma, z_vals, depth_maps, weights
def create_nerf(args):
"""Instantiate NeRF's MLP model.
"""
embed_fn, input_ch = get_embedder(args.multires, args.i_embed)
input_ch_views = 0
embeddirs_fn = None
if args.use_viewdirs:
embeddirs_fn, input_ch_views = get_embedder(args.multires_views, args.i_embed)
output_ch = 5 if args.N_importance > 0 else 4
skips = [4]
if args.alpha_model_path is None:
model = NeRF(D=args.netdepth, W=args.netwidth,
input_ch=input_ch, output_ch=output_ch, skips=skips,
input_ch_views=input_ch_views, use_viewdirs=args.use_viewdirs).to(device)
grad_vars = list(model.parameters())
else:
alpha_model = NeRF(D=args.netdepth_fine, W=args.netwidth_fine,
input_ch=input_ch, output_ch=output_ch, skips=skips,
input_ch_views=input_ch_views, use_viewdirs=args.use_viewdirs).to(device)
print('Alpha model reloading from', args.alpha_model_path)
ckpt = torch.load(args.alpha_model_path)
alpha_model.load_state_dict(ckpt['network_fine_state_dict'])
if not args.no_coarse:
model = NeRF_RGB(D=args.netdepth, W=args.netwidth,
input_ch=input_ch, output_ch=output_ch, skips=skips,
input_ch_views=input_ch_views, use_viewdirs=args.use_viewdirs, alpha_model=alpha_model).to(device)
grad_vars = list(model.parameters())
else:
model = None
grad_vars = []
model_fine = None
if args.N_importance > 0:
if args.alpha_model_path is None:
model_fine = NeRF(D=args.netdepth_fine, W=args.netwidth_fine,
input_ch=input_ch, output_ch=output_ch, skips=skips,
input_ch_views=input_ch_views, use_viewdirs=args.use_viewdirs).to(device)
else:
model_fine = NeRF_RGB(D=args.netdepth_fine, W=args.netwidth_fine,
input_ch=input_ch, output_ch=output_ch, skips=skips,
input_ch_views=input_ch_views, use_viewdirs=args.use_viewdirs, alpha_model=alpha_model).to(device)
grad_vars += list(model_fine.parameters())
network_query_fn = lambda inputs, viewdirs, network_fn : run_network(inputs, viewdirs, network_fn,
embed_fn=embed_fn,
embeddirs_fn=embeddirs_fn,
netchunk=args.netchunk)
# Create optimizer
optimizer = torch.optim.Adam(params=grad_vars, lr=args.lrate, betas=(0.9, 0.999))
start = 0
basedir = args.basedir
expname = args.expname
##########################
# Load checkpoints
if args.ft_path is not None and args.ft_path!='None':
ckpts = [args.ft_path]
else:
ckpts = [os.path.join(basedir, expname, f) for f in sorted(os.listdir(os.path.join(basedir, expname))) if 'tar' in f]
print('Found ckpts', ckpts)
if len(ckpts) > 0 and not args.no_reload:
ckpt_path = ckpts[-1]
print('Reloading from', ckpt_path)
ckpt = torch.load(ckpt_path)
start = ckpt['global_step']
optimizer.load_state_dict(ckpt['optimizer_state_dict'])
# Load model
model.load_state_dict(ckpt['network_fn_state_dict'])
if model_fine is not None:
model_fine.load_state_dict(ckpt['network_fine_state_dict'])
##########################
render_kwargs_train = {
'network_query_fn' : network_query_fn,
'perturb' : args.perturb,
'N_importance' : args.N_importance,
'network_fine' : model_fine,
'N_samples' : args.N_samples,
'network_fn' : model,
'use_viewdirs' : args.use_viewdirs,
'white_bkgd' : args.white_bkgd,
'raw_noise_std' : args.raw_noise_std,
}
# NDC only good for LLFF-style forward facing data
if args.dataset_type != 'llff' or args.no_ndc:
print('Not ndc!')
render_kwargs_train['ndc'] = False
render_kwargs_train['lindisp'] = args.lindisp
else:
render_kwargs_train['ndc'] = True
render_kwargs_test = {k : render_kwargs_train[k] for k in render_kwargs_train}
render_kwargs_test['perturb'] = False
render_kwargs_test['raw_noise_std'] = 0.
if args.sigma_loss:
render_kwargs_train['sigma_loss'] = SigmaLoss(args.N_samples, args.perturb, args.raw_noise_std)
##########################
return render_kwargs_train, render_kwargs_test, start, grad_vars, optimizer
def render_rays(ray_batch,
network_fn,
network_query_fn,
N_samples,
retraw=False,
lindisp=False,
perturb=0.,
N_importance=0,
network_fine=None,
white_bkgd=False,
raw_noise_std=0.,
verbose=False,
pytest=False,
sigma_loss=None):
"""Volumetric rendering.
Args:
ray_batch: array of shape [batch_size, ...]. All information necessary
for sampling along a ray, including: ray origin, ray direction, min
dist, max dist, and unit-magnitude viewing direction.
network_fn: function. Model for predicting RGB and density at each point
in space.
network_query_fn: function used for passing queries to network_fn.
N_samples: int. Number of different times to sample along each ray.
retraw: bool. If True, include model's raw, unprocessed predictions.
lindisp: bool. If True, sample linearly in inverse depth rather than in depth.
perturb: float, 0 or 1. If non-zero, each ray is sampled at stratified
random points in time.
N_importance: int. Number of additional times to sample along each ray.
These samples are only passed to network_fine.
network_fine: "fine" network with same spec as network_fn.
white_bkgd: bool. If True, assume a white background.
raw_noise_std: ...
verbose: bool. If True, print more debugging info.
Returns:
rgb_map: [num_rays, 3]. Estimated RGB color of a ray. Comes from fine model.
disp_map: [num_rays]. Disparity map. 1 / depth.
acc_map: [num_rays]. Accumulated opacity along each ray. Comes from fine model.
raw: [num_rays, num_samples, 4]. Raw predictions from model.
rgb0: See rgb_map. Output for coarse model.
disp0: See disp_map. Output for coarse model.
acc0: See acc_map. Output for coarse model.
z_std: [num_rays]. Standard deviation of distances along ray for each
sample.
"""
N_rays = ray_batch.shape[0]
rays_o, rays_d = ray_batch[:,0:3], ray_batch[:,3:6] # [N_rays, 3] each
viewdirs = ray_batch[:,-3:] if ray_batch.shape[-1] > 9 else None
bounds = torch.reshape(ray_batch[...,6:8], [-1,1,2])
near, far = bounds[...,0], bounds[...,1] # [-1,1]
t_vals = torch.linspace(0., 1., steps=N_samples).to(device)
if not lindisp:
z_vals = near * (1.-t_vals) + far * (t_vals)
else:
z_vals = 1./(1./near * (1.-t_vals) + 1./far * (t_vals))
z_vals = z_vals.expand([N_rays, N_samples])
if perturb > 0.:
# get intervals between samples
mids = .5 * (z_vals[...,1:] + z_vals[...,:-1])
upper = torch.cat([mids, z_vals[...,-1:]], -1)
lower = torch.cat([z_vals[...,:1], mids], -1)
# stratified samples in those intervals
t_rand = torch.rand(z_vals.shape).to(device)
# Pytest, overwrite u with numpy's fixed random numbers
if pytest:
np.random.seed(0)
t_rand = np.random.rand(*list(z_vals.shape))
t_rand = torch.Tensor(t_rand).to(device)
z_vals = lower + (upper - lower) * t_rand
pts = rays_o[...,None,:] + rays_d[...,None,:] * z_vals[...,:,None] # [N_rays, N_samples, 3]
# raw = run_network(pts)
if network_fn is not None:
raw = network_query_fn(pts, viewdirs, network_fn)
rgb_map, disp_map, acc_map, weights, depth_map = raw2outputs(raw, z_vals, rays_d, raw_noise_std, white_bkgd, pytest=pytest)
else:
# rgb_map, disp_map, acc_map = None, None, None
# raw2alpha = lambda raw, dists, act_fn=F.relu: 1.-torch.exp(-act_fn(raw)*dists)
# noise = 0
# alpha = network_query_fn(pts, viewdirs, network_fine.alpha_model)[...,3]
if network_fine.alpha_model is not None:
raw = network_query_fn(pts, viewdirs, network_fine.alpha_model)
rgb_map, disp_map, acc_map, weights, depth_map = raw2outputs(raw, z_vals, rays_d, raw_noise_std, white_bkgd, pytest=pytest)
else:
raw = network_query_fn(pts, viewdirs, network_fine)
rgb_map, disp_map, acc_map, weights, depth_map = raw2outputs(raw, z_vals, rays_d, raw_noise_std, white_bkgd, pytest=pytest)
if N_importance > 0:
rgb_map_0, disp_map_0, acc_map_0 = rgb_map, disp_map, acc_map
z_vals_mid = .5 * (z_vals[...,1:] + z_vals[...,:-1])
z_samples = sample_pdf(z_vals_mid, weights[...,1:-1], N_importance, det=(perturb==0.), pytest=pytest)
z_samples = z_samples.detach()
z_vals, _ = torch.sort(torch.cat([z_vals, z_samples], -1), -1)
pts = rays_o[...,None,:] + rays_d[...,None,:] * z_vals[...,:,None] # [N_rays, N_samples + N_importance, 3]
run_fn = network_fn if network_fine is None else network_fine
# raw = run_network(pts, fn=run_fn)
raw = network_query_fn(pts, viewdirs, run_fn)
rgb_map, disp_map, acc_map, weights, depth_map = raw2outputs(raw, z_vals, rays_d, raw_noise_std, white_bkgd, pytest=pytest)
ret = {'rgb_map' : rgb_map, 'disp_map' : disp_map, 'acc_map' : acc_map, 'depth_map' : depth_map}
if retraw:
ret['raw'] = raw
if N_importance > 0:
ret['rgb0'] = rgb_map_0
ret['disp0'] = disp_map_0
ret['acc0'] = acc_map_0
ret['z_std'] = torch.std(z_samples, dim=-1, unbiased=False) # [N_rays]
if sigma_loss is not None and ray_batch.shape[-1] > 11:
depths = ray_batch[:,8]
ret['sigma_loss'] = sigma_loss.calculate_loss(rays_o, rays_d, viewdirs, near, far, depths, network_query_fn, network_fine)
for k in ret:
if (torch.isnan(ret[k]).any() or torch.isinf(ret[k]).any()) and DEBUG:
print(f"! [Numerical Error] {k} contains nan or inf.")
return ret
def config_parser():
import configargparse
parser = configargparse.ArgumentParser()
parser.add_argument('--config', is_config_file=True,
help='config file path')
parser.add_argument("--expname", type=str,
help='experiment name')
parser.add_argument("--basedir", type=str, default='./logs/',
help='where to store ckpts and logs')
parser.add_argument("--datadir", type=str, default='./data/llff/fern',
help='input data directory')
# training options
parser.add_argument("--netdepth", type=int, default=8,
help='layers in network')
parser.add_argument("--netwidth", type=int, default=256,
help='channels per layer')
parser.add_argument("--netdepth_fine", type=int, default=8,
help='layers in fine network')
parser.add_argument("--netwidth_fine", type=int, default=256,
help='channels per layer in fine network')
parser.add_argument("--N_rand", type=int, default=32*32*4,
help='batch size (number of random rays per gradient step)')
parser.add_argument("--lrate", type=float, default=5e-4,
help='learning rate')
parser.add_argument("--lrate_decay", type=int, default=250,
help='exponential learning rate decay (in 1000 steps)')
parser.add_argument("--chunk", type=int, default=1024*32,
help='number of rays processed in parallel, decrease if running out of memory')
parser.add_argument("--netchunk", type=int, default=1024*64,
help='number of pts sent through network in parallel, decrease if running out of memory')
parser.add_argument("--no_batching", action='store_true',
help='only take random rays from 1 image at a time')
parser.add_argument("--no_reload", action='store_true',
help='do not reload weights from saved ckpt')
parser.add_argument("--ft_path", type=str, default=None,
help='specific weights npy file to reload for coarse network')
# rendering options
parser.add_argument("--N_samples", type=int, default=64,
help='number of coarse samples per ray')
parser.add_argument("--N_importance", type=int, default=0,
help='number of additional fine samples per ray')
parser.add_argument("--perturb", type=float, default=1.,
help='set to 0. for no jitter, 1. for jitter')
parser.add_argument("--use_viewdirs", action='store_true',
help='use full 5D input instead of 3D')
parser.add_argument("--i_embed", type=int, default=0,
help='set 0 for default positional encoding, -1 for none')
parser.add_argument("--multires", type=int, default=10,
help='log2 of max freq for positional encoding (3D location)')
parser.add_argument("--multires_views", type=int, default=4,
help='log2 of max freq for positional encoding (2D direction)')
parser.add_argument("--raw_noise_std", type=float, default=0.,
help='std dev of noise added to regularize sigma_a output, 1e0 recommended')
parser.add_argument("--render_only", action='store_true',
help='do not optimize, reload weights and render out render_poses path')
parser.add_argument("--render_test", action='store_true',
help='render the test set instead of render_poses path')
parser.add_argument("--render_test_ray", action='store_true',
help='render the test set instead of render_poses path')
parser.add_argument("--render_train", action='store_true',
help='render the train set instead of render_poses path')
parser.add_argument("--render_mypath", action='store_true',
help='render the test path')
parser.add_argument("--render_factor", type=int, default=0,
help='downsampling factor to speed up rendering, set 4 or 8 for fast preview')
# training options
parser.add_argument("--precrop_iters", type=int, default=0,
help='number of steps to train on central crops')
parser.add_argument("--precrop_frac", type=float,
default=.5, help='fraction of img taken for central crops')
# dataset options
parser.add_argument("--dataset_type", type=str, default='llff',
help='options: llff / blender / deepvoxels')
parser.add_argument("--testskip", type=int, default=8,
help='will load 1/N images from test/val sets, useful for large datasets like deepvoxels')
## deepvoxels flags
parser.add_argument("--shape", type=str, default='greek',
help='options : armchair / cube / greek / vase')
## blender flags
parser.add_argument("--white_bkgd", action='store_true',
help='set to render synthetic data on a white bkgd (always use for dvoxels)')
parser.add_argument("--half_res", action='store_true',
help='load blender synthetic data at 400x400 instead of 800x800')
## llff flags
parser.add_argument("--factor", type=int, default=8,
help='downsample factor for LLFF images')
parser.add_argument("--no_ndc", action='store_true',
help='do not use normalized device coordinates (set for non-forward facing scenes)')
parser.add_argument("--lindisp", action='store_true',
help='sampling linearly in disparity rather than depth')
parser.add_argument("--spherify", action='store_true',
help='set for spherical 360 scenes')
parser.add_argument("--llffhold", type=int, default=8,
help='will take every 1/N images as LLFF test set, paper uses 8')
# logging/saving options
parser.add_argument("--i_print", type=int, default=100,
help='frequency of console printout and metric loggin')
parser.add_argument("--i_img", type=int, default=500,
help='frequency of tensorboard image logging')
parser.add_argument("--i_weights", type=int, default=10000,
help='frequency of weight ckpt saving')
parser.add_argument("--i_testset", type=int, default=50000,
help='frequency of testset saving')
parser.add_argument("--i_video", type=int, default=50000,
help='frequency of render_poses video saving')
# debug
parser.add_argument("--debug", action='store_true')
# new experiment by kangle
parser.add_argument("--N_iters", type=int, default=200000,
help='number of iters')
parser.add_argument("--alpha_model_path", type=str, default=None,
help='predefined alpha model')
parser.add_argument("--no_coarse", action='store_true',
help="Remove coarse network.")
parser.add_argument("--train_scene", nargs='+', type=int,
help='id of scenes used to train')
parser.add_argument("--test_scene", nargs='+', type=int,
help='id of scenes used to test')
parser.add_argument("--colmap_depth", action='store_true',
help="Use depth supervision by colmap.")
parser.add_argument("--depth_loss", action='store_true',
help="Use depth supervision by colmap - depth loss.")
parser.add_argument("--depth_lambda", type=float, default=0.1,
help="Depth lambda used for loss.")
parser.add_argument("--sigma_loss", action='store_true',
help="Use depth supervision by colmap - sigma loss.")
parser.add_argument("--sigma_lambda", type=float, default=0.1,
help="Sigma lambda used for loss.")
parser.add_argument("--weighted_loss", action='store_true',
help="Use weighted loss by reprojection error.")
parser.add_argument("--relative_loss", action='store_true',
help="Use relative loss.")
parser.add_argument("--depth_with_rgb", action='store_true',
help="single forward for both depth and rgb")
parser.add_argument("--normalize_depth", action='store_true',
help="normalize depth before calculating loss")
parser.add_argument("--depth_rays_prop", type=float, default=0.5,
help="Proportion of depth rays.")
return parser
def train():
parser = config_parser()
args = parser.parse_args()
if args.dataset_type == 'llff':
if args.colmap_depth:
depth_gts = load_colmap_depth(args.datadir, factor=args.factor, bd_factor=.75)
images, poses, bds, render_poses, i_test = load_llff_data(args.datadir, args.factor,
recenter=True, bd_factor=.75,
spherify=args.spherify)
hwf = poses[0,:3,-1]
poses = poses[:,:3,:4]
taxes = []
taxes_render = []
for i in range(images.shape[0]):
axis = trimesh.creation.axis(transform=np.concatenate([poses[i], np.array([[0, 0, 0, 1]])], axis=0))
taxes.append(axis)
# raxis = trimesh.creation.axis(transform=np.concatenate([poses[i], np.array([[0, 0, 0, 1]])], axis=0))
# taxes_render.append(axis)
scene = trimesh.Scene()
scene.add_geometry(taxes)
scene.show()
from IPython import embed; embed()
print('Loaded llff', images.shape, render_poses.shape, hwf, args.datadir)
if not isinstance(i_test, list):
i_test = [i_test]
if args.llffhold > 0:
print('Auto LLFF holdout,', args.llffhold)
i_test = np.arange(images.shape[0])[::args.llffhold]
if args.test_scene is not None:
i_test = np.array([i for i in args.test_scene])
if i_test[0] < 0:
i_test = []
i_val = i_test
if args.train_scene is None:
i_train = np.array([i for i in np.arange(int(images.shape[0])) if
(i not in i_test and i not in i_val)])
else:
i_train = np.array([i for i in args.train_scene if
(i not in i_test and i not in i_val)])
print('DEFINING BOUNDS')
if args.no_ndc:
near = np.ndarray.min(bds) * .9
far = np.ndarray.max(bds) * 1.
else:
near = 0.
far = 1.
print('NEAR FAR', near, far)
print('here in load_llff')
from IPython import embed; embed()
elif args.dataset_type == 'dtu':
images, poses, hwf = load_dtu_data(args.datadir)
print('Loaded DTU', images.shape, poses.shape, hwf, args.datadir)
if args.test_scene is not None:
i_test = np.array([i for i in args.test_scene])
if i_test[0] < 0:
i_test = []
i_val = i_test
if args.train_scene is None:
i_train = np.array([i for i in np.arange(int(images.shape[0])) if
(i not in i_test and i not in i_val)])
else:
i_train = np.array([i for i in args.train_scene if
(i not in i_test and i not in i_val)])
near = 0.1
far = 5.0
if args.colmap_depth:
depth_gts = load_colmap_depth(args.datadir, factor=args.factor, bd_factor=.75)
elif args.dataset_type == 'realsense':
images, poses, hwf, render_poses = load_realsense_data(args.datadir)
# print('here after loading images, poses, hwf, render poses')
# from IPython import embed; embed()
# import trimesh
# trimesh for debugging camera poses
taxes = []
taxes_render = []
for i in range(images.shape[0]):
axis = trimesh.creation.axis(transform=np.concatenate([poses[i],
|
np.array([[0, 0, 0, 1]])
|
numpy.array
|
import numpy as np
def findchildren(swc, parent_id):
while np.argwhere(swc[:,6] == parent_id).size != 0:
direct_children_indices = np.argwhere(swc[:,6] == parent_id)
if len(direct_children_indices) > 1:
try:
children_nodes
except NameError:
# if first time running and children_nodes does not exist
children_nodes = direct_children_indices
else:
# when children_nodes already exist
children_nodes = np.concatenate((children_nodes, direct_children_indices))
try:
visited_node
except NameError:
# if first time running and visited node does not exist
visited_node = np.argwhere(swc[:,0] == parent_id)
else:
visited_node = np.concatenate((visited_node, np.argwhere(swc[:,0] == parent_id)))
# new parent id from the list of children
parent_id = swc[children_nodes[-1],0]
elif len(direct_children_indices) == 1:
try:
children_nodes
except NameError:
# if first time running and children_nodes does not exist
children_nodes = direct_children_indices
else:
# when children_nodes already exist
children_nodes =
|
np.concatenate((children_nodes, direct_children_indices))
|
numpy.concatenate
|
import numpy as np
import tensorflow as tf
import cv2
import glob
import tensorflow.contrib.slim as slim
from collections import OrderedDict
import os
def get_variables_to_restore(scope_to_include, suffix_to_exclude):
"""to parse which var to include and which
var to exclude"""
vars_to_include = []
for scope in scope_to_include:
vars_to_include += slim.get_variables(scope)
vars_to_exclude = set()
for scope in suffix_to_exclude:
vars_to_exclude |= set(
slim.get_variables_by_suffix(scope))
return [v for v in vars_to_include if v not in vars_to_exclude]
def remove_first_scope(name):
return '/'.join(name.split('/')[1:])
def collect_vars(scope, start=None, end=None, prepend_scope=None):
vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope)
var_dict = OrderedDict()
if isinstance(start, str):
for i, var in enumerate(vars):
var_name = remove_first_scope(var.op.name)
if var_name.startswith(start):
start = i
break
if isinstance(end, str):
for i, var in enumerate(vars):
var_name = remove_first_scope(var.op.name)
if var_name.startswith(end):
end = i
break
for var in vars[start:end]:
var_name = remove_first_scope(var.op.name)
if prepend_scope is not None:
var_name = os.path.join(prepend_scope, var_name)
var_dict[var_name] = var
return var_dict
# 32 means for data augmentation
def data_augmentation_together(batch, bg, img_size):
batch_size = batch.shape[0]
# left-right flip
if np.random.rand(1) > 0.5:
batch = batch[:, :, ::-1, :]
bg = bg[:, :, ::-1, :]
# up-down flip
if np.random.rand(1) > 0.5:
batch = batch[:, ::-1, :, :]
bg = bg[:, ::-1, :, :]
# rotate 90
if np.random.rand(1) > 0.5:
for id in range(batch_size):
batch[id, :, :, :] = np.rot90(batch[id, :, :, :], k=1) # 90
bg[id, :, :, :] = np.rot90(bg[id, :, :, :], k=1)
# rotate 180
if np.random.rand(1) > 0.5:
for id in range(batch_size):
batch[id, :, :, :] = np.rot90(batch[id, :, :, :], k=2) # 180
bg[id, :, :, :] = np.rot90(bg[id, :, :, :], k=2) # 180
# rotate 270
if np.random.rand(1) > 0.5:
for id in range(batch_size):
batch[id, :, :, :] = np.rot90(batch[id, :, :, :], k=-1) # 270
bg[id, :, :, :] = np.rot90(bg[id, :, :, :], k=-1) # 270
# random crop and resize 0.5~1.0
if np.random.rand(1) > 0.5:
IMG_SIZE = batch.shape[1]
scale = np.random.rand(1) * 0.5 + 0.5
crop_height = int(scale * img_size)
crop_width = int(scale * img_size)
x_st = int((1 - scale) * np.random.rand(1) * (img_size - 1))
y_st = int((1 - scale) * np.random.rand(1) * (img_size - 1))
x_nd = x_st + crop_width
y_nd = y_st + crop_height
for id in range(batch_size):
cropped_img = batch[id, y_st:y_nd, x_st:x_nd, :]
cropped_bg = bg[id, y_st:y_nd, x_st:x_nd, :]
batch[id, :, :, :] = cv2.resize(cropped_img, dsize=(img_size, img_size))
bg[id, :, :, :] = cv2.resize(cropped_bg, dsize=(img_size, img_size))
return batch, bg
def data_augmentation(batch, img_size):
batch_size = batch.shape[0]
# left-right flip
if np.random.rand(1) > 0.5:
batch = batch[:, :, ::-1, :]
# up-down flip
if np.random.rand(1) > 0.5:
batch = batch[:, ::-1, :, :]
# rotate 90
if np.random.rand(1) > 0.5:
for id in range(batch_size):
batch[id, :, :, :] = np.rot90(batch[id, :, :, :], k=1) # 90
# rotate 180
if np.random.rand(1) > 0.5:
for id in range(batch_size):
batch[id, :, :, :] = np.rot90(batch[id, :, :, :], k=2) # 180
# rotate 270
if np.random.rand(1) > 0.5:
for id in range(batch_size):
batch[id, :, :, :] = np.rot90(batch[id, :, :, :], k=-1) # 270
# random crop and resize 0.5~1.0
if np.random.rand(1) > 0.5:
IMG_SIZE = batch.shape[1]
scale = np.random.rand(1) * 0.5 + 0.5
crop_height = int(scale * img_size)
crop_width = int(scale * img_size)
x_st = int((1 - scale) * np.random.rand(1) * (img_size - 1))
y_st = int((1 - scale) * np.random.rand(1) * (img_size - 1))
x_nd = x_st + crop_width
y_nd = y_st + crop_height
for id in range(batch_size):
cropped_img = batch[id, y_st:y_nd, x_st:x_nd, :]
batch[id, :, :, :] = cv2.resize(cropped_img, dsize=(img_size, img_size))
return batch
def img_mask_batch(DATA, batch_size, img_size, with_data_augmentation=True):
data1 = DATA['thin_cloud_images']
n, h, w, c = data1.shape
idx = np.random.choice(range(n), batch_size, replace=False)
batch = data1[idx, :, :, :]
data2 = DATA['background_images']
bg = data2[idx, :, :, :] # Extract the bg image corresponding to the idx number
if with_data_augmentation is True: # The images were selected and then enhanced
batch, bg = data_augmentation_together(batch, bg, img_size)
return batch, bg
def test_batch(test_data, i):
data1 = test_data['test_thin_cloud_images']
data2 = test_data['test_background_images']
idx = np.array([i])
test_batch = data1[idx, :, :, :]
test_bg = data2[idx, :, :, :]
return test_batch, test_bg
def plot2x2(samples):
IMG_SIZE = samples.shape[1]
img_grid = np.zeros((2 * IMG_SIZE, 2 * IMG_SIZE, 3),np.uint8)
for i in range(len(samples)):
py, px = IMG_SIZE * int(i / 2), IMG_SIZE * (i % 2)
this_img = samples[i, :, :, :]
this_img = np.uint8(this_img*255)
img_grid[py:py + IMG_SIZE, px:px + IMG_SIZE, :] = this_img
return img_grid
def plot2x2_test(samples):
IMG_SIZE = samples.shape[1]
img_grid = np.zeros((IMG_SIZE, IMG_SIZE, 3),np.uint8)
for i in range(len(samples)):
py, px = IMG_SIZE * int(i / 2), IMG_SIZE * (i % 2)
this_img = samples[i, :, :, :]
this_img = np.uint8(this_img*255)
img_grid[py:py + IMG_SIZE, px:px + IMG_SIZE, :] = this_img
img_grid = cv2.cvtColor(img_grid, cv2.COLOR_YUV2RGB)
return img_grid
def load_images(image_dir, img_size):
data = {
'background_images': 0,
'thin_cloud_images': 0,
}
# load images
img_dirs = glob.glob(os.path.join(image_dir, 'label/*.png'))
m_tr_imgs = len(img_dirs)
image_buff = np.zeros((m_tr_imgs, img_size, img_size, 3))
for i in range(m_tr_imgs):
file_name = img_dirs[i]
img = cv2.imread(file_name, cv2.IMREAD_COLOR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2YUV)
img = cv2.resize(img, (img_size, img_size))/255.
image_buff[i, :, :, :] = img
i += 1
if np.mod(i, 100) == 0:
print('reading background images: ' + str(i) + ' / ' + str(m_tr_imgs))
data['background_images'] = image_buff
img_dirs = glob.glob(os.path.join(image_dir, 'cloud/*.png'))
m_tr_imgs = len(img_dirs)
image_buff =
|
np.zeros((m_tr_imgs, img_size, img_size, 3))
|
numpy.zeros
|
import copy
import numpy
import matplotlib.pyplot as plt
import pdb
import accelerated_functions as af
import constants as c
from Boundaries.boundary import Boundary
from Species.species import Species
from solver import location_indexes_inv
from timing import Timing
#Inner_2D_Rectangular (Inherits from Boundary):
#
#Definition = Inner rectangular boundary for a rectangular mesh
#Attributes:
# +type (string) = "Inner - 2D_Rectangular"
# +xmin (double) = Left limit of the domain (closest to the Sun).
# +xmax (double) = Right limit of the domain (farthest from the Sun).
# +ymin (double) = Bottom limit of the domain.
# +ymax (double) = Top limit of the domain.
# +bottom ([int]) = array of indices that indicates the represents the bottom of the boundary.
# +top ([int]) = array of indices that indicates the represents the top of the boundary.
# +left ([int]) = array of indices that indicates the represents the left of the boundary.
# +right ([int]) = array of indices that indicates the represents the right of the boundary.
# +ind_inner([int]) = array of indices that lie inside the object sorrounded by this boundary.
# +Boundary attributes
#Methods:
# +Boundary methods.
# +applyParticleBoundary(Species) = Applies the boundary condition to the species passed as argument.
# type_boundary indicates the type of boundary method to apply to particles. 'open', the default mthod, deletes them. 'reflective' reflects them back to the dominion.
# **kwargs may contain arguments necessary for inner methods.
# +applyParticleOpenBoundary(Species) = Deletes particles of Species outside of the boundaries.
# +applyParticleReflectiveBoundary(Species species, Species old_species) = Reflects the particles back into the domain.
# old_species refers to the state of species in the previous step.
# +createDummyBox([ind]location, PIC pic, Species species, [double] delta_n, [double] n_vel, [double] shift_vel) = for every location,
# create the dummy boxes outside of the domain with particles in them, using delta_n (density), n_vel (thermal velocity), shift_vel (velocity shared by all particles).
# +injectParticlesDummyBox([int] location, PIC pic, Field field, Species species, [double] delta_n, [double] n_vel, [double] shift_vel) =
# Inject the particles in location indices by creating dummy boxes around them, creating particles
# inside of them, moving the particles, and then adding the ones that entered into the computational domain.
# +createDistributionAtBorder([int] location, Motion_Solver part_solver, Species species, [double] delta_n): (([double,double] pos, [int] border), [int] repeats) =
# The function creates particle positions of 'species' along the region denoted by 'location', under a uniform distribution with a density 'delta_n', where
# delta_n indicates the density per 'location' node.
# Return: 'pos' is the numpy array indicating the positions of the new particles, 'border' indicates in which border they are created, and
# 'repeats' indicates for each position, how many particles are expected to be created.
# The tuple (pos, border) is reffered as flux in the program.
# +injectParticlesAtPositions('flux', Motion_Solver part_solver, Field field, Species species, [double] delta_n, [double] n_vel, double delta_pos) =
# The method creates 'delta_n' particles at each entry of 'pos' stored in the parameter 'flux' (See Documentation of 'createDistributionArBorder').
# The new particles are stored in 'species', shifted 'delta_pos' away from their borders, initiated with 'n_vel' velocities and prepared in time
# according to the method used 'part_solver' for advancing particles.
class Inner_2D_Rectangular(Boundary):
type = "Inner - 2D_Rectangular"
def __init__(self, x_min, x_max , y_min, y_max, n_material):
self.material = n_material
self.xmin = x_min
self.xmax = x_max
self.ymin = y_min
self.ymax = y_max
self.bottom = []
self.top = []
self.left = []
self.right = []
self.ind_inner = []
self.location = []
self.directions = []
self.areas = []
self.adjacent = []
## +applyElectricBoundary(Electric_Field) = Applies the boundary condition to the electric field passed as argument. So far a 0V Dirichlet boundary condition is applied.
##NOTE: Modifified to add a constant -20V at the right boundary (2020_03_12)
# def applyElectricBoundary(self, e_field):
# #Location of dirichlet and neumann boundaries
# dirichlet_loc_1 = numpy.arange(c.NX-1, c.NX*c.NY, c.NX)
# neumann_loc = numpy.delete(self.location, numpy.arange(c.NX-1,c.NX+(c.NY-1)*2, 2))
# dirichlet_loc_2 = numpy.arange(0, c.NX*c.NY, c.NX)
# #Values
# dirichlet_val_1 = -20*numpy.ones_like(dirichlet_loc_1)
# dirichlet_val_2 = numpy.zeros_like(dirichlet_loc_2)
# neumann_val = numpy.zeros_like(neumann_loc)
# #Applying values
# e_field.dirichlet(dirichlet_loc_1, dirichlet_val_1)
# e_field.dirichlet(dirichlet_loc_2, dirichlet_val_2)
# e_field.neumann(neumann_loc, neumann_val)
def checkPositionInBoundary(self, pos, surface = False):
xmin = self.xmin
xmax = self.xmax
ymin = self.ymin
ymax = self.ymax
#Inner boundary
if surface:
mask2 = af.geq_2D_p(pos, xmin, xmax, ymin, ymax)
else:
mask2 = af.g_2D_p(pos, xmin, xmax, ymin, ymax)
return mask2
# +applyElectricBoundary(Electric_Field) = Applies the boundary condition to the electric field passed as argument. So far a 0V Dirichlet boundary condition is applied.
def applyElectricBoundary(self, e_field):
values = e_field.potential[self.location]
e_field.dirichlet(values, self, e_field.pic.mesh.nx, e_field.pic.mesh.ny, e_field.pic.mesh.dx, e_field.pic.mesh.dy)
# +applyMagneticBoundary(Magnetic_Field) = Applies the boundary condition to the magnetic field passed as argument.
# No magnetic field so far
def applyMagneticBoundary(self, m_field):
pass
# +applyParticleBoundary(Species) = Applies the boundary condition to the species passed as argument.
# type_boundary indicates the type of boundary method to apply to particles. 'open', the default mthod, deletes them. 'reflective' reflects them back to the dominion.
# **kwargs may contain arguments necessary for inner methods.
def applyParticleBoundary(self, species, type_boundary, albedo = None, **kwargs):
np = species.part_values.current_n
xmin = self.xmin
xmax = self.xmax
ymin = self.ymin
ymax = self.ymax
# Finding the particles out of domain
out_ind = af.l_2D_p(species.part_values.position[:np,:], xmin, xmax, ymin, ymax, prec = 0)
out_ind = numpy.flatnonzero(out_ind)
if type_boundary == 'mixed':
rand = numpy.random.rand(len(out_ind))
mask_albedo = rand < albedo
self.applyParticleReflectiveBoundary(species, out_ind[mask_albedo], old_position = kwargs['old_position'])
return self.applyParticleOpenBoundary(species, out_ind[numpy.logical_not(mask_albedo)], old_position = kwargs['old_position'])
elif type_boundary == 'open':
return self.applyParticleOpenBoundary(species, out_ind, old_position = kwargs['old_position'])
elif type_boundary == 'reflective':
return self.applyParticleReflectiveBoundary(species, out_ind, old_position = kwargs['old_position'])
else:
raise ValueError("Called invalid boundary method")
# +applyParticleOpenBoundary(Species) = Deletes particles at or outside of the boundaries. In this case the particles that are to be eliminated are sent in 'ind'.
def applyParticleOpenBoundary(self, species, ind, old_position = None, prec = 0):
#Just for convenience in writing
np = species.part_values.current_n
coord = None
vel = None
tan_vel = None
cos = None
if old_position is not None:
#Bottom
botind = numpy.nonzero((old_position[ind,1]-self.ymin) <= prec)[0]
slope = (species.part_values.position[ind[botind],0]-old_position[ind[botind],0])/\
(species.part_values.position[ind[botind],1]-old_position[ind[botind],1])
hit = slope*(self.ymin-old_position[ind[botind],1])+old_position[ind[botind],0]
hit_ind = numpy.nonzero(numpy.logical_and(self.xmin < hit, hit < self.xmax))[0]
coord = numpy.append(hit[hit_ind][:,None], numpy.append(self.ymin*numpy.ones_like((hit_ind))[:,None],\
numpy.zeros_like((hit_ind), dtype = numpy.short)[:,None], axis = 1), axis = 1)
coord = numpy.append(coord, species.part_values.spwt[ind[botind[hit_ind]]][:,None], axis = 1)
vel = copy.copy(species.part_values.velocity[ind[botind[hit_ind]],1])
tan_vel = copy.copy(species.part_values.velocity[ind[botind[hit_ind]],0])
cos = 1/numpy.sqrt(slope[hit_ind]*slope[hit_ind]+1)
#Left
leftind = numpy.nonzero((old_position[ind,0]-self.xmin) <= prec)[0]
slope = (species.part_values.position[ind[leftind],1]-old_position[ind[leftind],1])/\
(species.part_values.position[ind[leftind],0]-old_position[ind[leftind],0])
hit = slope*(self.xmin-old_position[ind[leftind],0])+old_position[ind[leftind],1]
hit_ind = numpy.nonzero(numpy.logical_and(self.ymin < hit, hit < self.ymax))[0]
coord_l = numpy.append(self.xmin*numpy.ones_like((hit_ind))[:,None], numpy.append(hit[hit_ind][:,None],\
3*numpy.ones_like((hit_ind), dtype = numpy.short)[:,None], axis = 1), axis = 1)
coord_l = numpy.append(coord_l, species.part_values.spwt[ind[leftind[hit_ind]]][:,None], axis = 1)
vel_l = species.part_values.velocity[ind[leftind[hit_ind]], 0]
tan_vel_l = species.part_values.velocity[ind[leftind[hit_ind]], 1]
cos_l = 1/numpy.sqrt(slope[hit_ind]*slope[hit_ind]+1)
#Right
rightind = numpy.nonzero((old_position[ind,0]-self.xmax) >= -prec)[0]
slope = (species.part_values.position[ind[rightind],1]-old_position[ind[rightind],1])/\
(species.part_values.position[ind[rightind],0]-old_position[ind[rightind],0])
hit = slope*(self.xmax-old_position[ind[rightind],0])+old_position[ind[rightind],1]
hit_ind = numpy.nonzero(numpy.logical_and(self.ymin < hit, hit < self.ymax))[0]
coord_r = numpy.append(self.xmax*numpy.ones_like((hit_ind))[:,None], numpy.append(hit[hit_ind][:,None],\
numpy.ones_like((hit_ind), dtype = numpy.short)[:,None], axis = 1), axis = 1)
coord_r = numpy.append(coord_r, species.part_values.spwt[ind[rightind[hit_ind]]][:,None], axis = 1)
vel_r = -species.part_values.velocity[ind[rightind[hit_ind]], 0]
tan_vel_r = species.part_values.velocity[ind[rightind[hit_ind]], 1]
cos_r = 1/numpy.sqrt(slope[hit_ind]*slope[hit_ind]+1)
#Top
topind = numpy.nonzero((old_position[ind,1]-self.ymax) >= -prec)[0]
slope = (species.part_values.position[ind[topind],0]-old_position[ind[topind],0])/\
(species.part_values.position[ind[topind],1]-old_position[ind[topind],1])
hit = slope*(self.ymax-old_position[ind[topind],1])+old_position[ind[topind],0]
hit_ind = numpy.nonzero(numpy.logical_and(self.xmin < hit, hit < self.xmax))[0]
coord_t = numpy.append(hit[hit_ind][:,None], numpy.append(self.ymax*numpy.ones_like((hit_ind))[:,None],\
2*numpy.ones_like((hit_ind), dtype = numpy.short)[:,None], axis = 1), axis = 1)
coord_t = numpy.append(coord_t, species.part_values.spwt[ind[topind[hit_ind]]][:,None], axis = 1)
vel_t = -species.part_values.velocity[ind[topind[hit_ind]], 1]
tan_vel_t = species.part_values.velocity[ind[topind[hit_ind]], 0]
cos_t = 1/numpy.sqrt(slope[hit_ind]*slope[hit_ind]+1)
#Preparing the arrays that will be returned
coord = numpy.concatenate((coord, coord_l, coord_r, coord_t), axis = 0)
vel = numpy.concatenate((vel, vel_l, vel_r, vel_t), axis = 0)
tan_vel = numpy.concatenate((tan_vel, tan_vel_l, tan_vel_r, tan_vel_t), axis = 0)
cos = numpy.concatenate((cos, cos_l, cos_r, cos_t), axis = 0)
#Evaluating that everything goes as expected
assert len(ind) == numpy.shape(coord)[0], "There should not be particles inside the boundaries prev. to this state, or duplicated particles"
# Eliminating particles
self.removeParticles(species,ind)
count2 = numpy.shape(ind)[0]
print('Number of {} eliminated - inner:'.format(species.name), count2)
#Positions of deleted particles for posterior processing of flux
return {'flux': (coord, vel, tan_vel, cos), 'del_ind': ind}
# +applyParticleOpenBoundaryInverse(Species) = This function, as 'applyParticleOpenBoundary', identifies where and under which parameters the particles cross the border,
# but does not eliminate them. The method is used for calculating the Outgoing flux.
def applyParticleOpenBoundaryInverse(self, species, ind, old_position = None):
#Just for convenience in writing
np = species.part_values.current_n
coord = None
vel = None
tan_vel = None
cos = None
if old_position is not None:
#Bottom
botind = numpy.nonzero(species.part_values.position[ind,1] < self.ymin)[0]
slope = (species.part_values.position[ind[botind],0]-old_position[ind[botind],0])/\
(species.part_values.position[ind[botind],1]-old_position[ind[botind],1])
hit = slope*(self.ymin-old_position[ind[botind],1])+old_position[ind[botind],0]
hit_ind = numpy.nonzero(numpy.logical_and(self.xmin < hit, hit < self.xmax))[0]
coord = numpy.append(hit[hit_ind][:,None], numpy.append(self.ymin*numpy.ones_like((hit_ind))[:,None],\
numpy.zeros_like((hit_ind)[:,None], dtype = numpy.short), axis = 1), axis = 1)
coord = numpy.append(coord, species.part_values.spwt[ind[botind[hit_ind]]][:,None], axis = 1)
vel = -copy.copy(species.part_values.velocity[ind[botind[hit_ind]],1])
#tan_vel = copy.copy(species.part_values.velocity[ind[botind[hit_ind]],0])
#cos = 1/numpy.sqrt(slope[hit_ind]*slope[hit_ind]+1)
#Left
leftind = numpy.nonzero(species.part_values.position[ind,0] < self.xmin)[0]
slope = (species.part_values.position[ind[leftind],1]-old_position[ind[leftind],1])/\
(species.part_values.position[ind[leftind],0]-old_position[ind[leftind],0])
hit = slope*(self.xmin-old_position[ind[leftind],0])+old_position[ind[leftind],1]
hit_ind = numpy.nonzero(numpy.logical_and(self.ymin < hit, hit < self.ymax))[0]
coord_l = numpy.append(self.xmin*numpy.ones_like((hit_ind))[:,None], numpy.append(hit[hit_ind][:,None],\
3*numpy.ones_like((hit_ind)[:,None], dtype = numpy.short), axis = 1), axis = 1)
coord_l = numpy.append(coord_l, species.part_values.spwt[ind[leftind[hit_ind]]][:,None], axis = 1)
vel_l = -species.part_values.velocity[ind[leftind[hit_ind]], 0]
#tan_vel_l = species.part_values.velocity[ind[leftind[hit_ind]], 1]
#cos_l = 1/numpy.sqrt(slope[hit_ind]*slope[hit_ind]+1)
#Right
rightind = numpy.nonzero(species.part_values.position[ind,0] > self.xmax)[0]
slope = (species.part_values.position[ind[rightind],1]-old_position[ind[rightind],1])/\
(species.part_values.position[ind[rightind],0]-old_position[ind[rightind],0])
hit = slope*(self.xmax-old_position[ind[rightind],0])+old_position[ind[rightind],1]
hit_ind = numpy.nonzero(numpy.logical_and(self.ymin < hit, hit < self.ymax))[0]
coord_r = numpy.append(self.xmax*numpy.ones_like((hit_ind))[:,None], numpy.append(hit[hit_ind][:,None],\
numpy.ones_like((hit_ind)[:,None], dtype = numpy.short), axis = 1), axis = 1)
coord_r = numpy.append(coord_r, species.part_values.spwt[ind[rightind[hit_ind]]][:,None], axis = 1)
vel_r = species.part_values.velocity[ind[rightind[hit_ind]], 0]
#tan_vel_r = species.part_values.velocity[ind[rightind[hit_ind]], 1]
#cos_r = 1/numpy.sqrt(slope[hit_ind]*slope[hit_ind]+1)
#Top
topind = numpy.nonzero(species.part_values.position[ind,1] > self.ymax)[0]
slope = (species.part_values.position[ind[topind],0]-old_position[ind[topind],0])/\
(species.part_values.position[ind[topind],1]-old_position[ind[topind],1])
hit = slope*(self.ymax-old_position[ind[topind],1])+old_position[ind[topind],0]
hit_ind = numpy.nonzero(numpy.logical_and(self.xmin < hit, hit < self.xmax))[0]
coord_t = numpy.append(hit[hit_ind][:,None], numpy.append(self.ymax*numpy.ones_like((hit_ind))[:,None],\
2*numpy.ones_like((hit_ind)[:,None], dtype = numpy.short), axis = 1), axis = 1)
coord_t = numpy.append(coord_t, species.part_values.spwt[ind[topind[hit_ind]]][:,None], axis = 1)
vel_t = species.part_values.velocity[ind[topind[hit_ind]], 1]
#tan_vel_t = species.part_values.velocity[ind[topind[hit_ind]], 0]
#cos_t = 1/numpy.sqrt(slope[hit_ind]*slope[hit_ind]+1)
#Preparing the arrays that will be returned
coord = numpy.concatenate((coord, coord_l, coord_r, coord_t), axis = 0)
vel = numpy.concatenate((vel, vel_l, vel_r, vel_t), axis = 0)
#tan_vel = numpy.concatenate((tan_vel, tan_vel_l, tan_vel_r, tan_vel_t), axis = 0)
#cos = numpy.concatenate((cos, cos_l, cos_r, cos_t), axis = 0)
#Evaluating that everything goes as expected
assert len(ind) == numpy.shape(coord)[0], "There should not be particles inside the boundaries prev. to this state, or duplicated particles"
#Positions of deleted particles for posterior processing of flux
return {'flux': (coord, vel, tan_vel, cos)}
# +applyParticleReflectiveBoundary(Species species, Species old_species) = Reflects the particles back into the domain.
# old_species refers to the state of species in the previous step. ind are the particles that need to be treated.
def applyParticleReflectiveBoundary(self, species, ind, old_position = None):
delta = 1e-5
if old_position is not None:
#Bottom
botind = numpy.nonzero(old_position[ind,1] < self.ymin)[0]
hit = (species.part_values.position[ind[botind],0]-old_position[ind[botind],0])/\
(species.part_values.position[ind[botind],1]-old_position[ind[botind],1])*\
(self.ymin-old_position[ind[botind],1])+old_position[ind[botind],0]
hit_ind = numpy.nonzero(numpy.logical_and(self.xmin < hit, hit < self.xmax))[0]
species.part_values.position[ind[botind[hit_ind]], 1] = 2*self.ymin - species.part_values.position[ind[botind[hit_ind]],1]-delta
species.part_values.velocity[ind[botind[hit_ind]], 1] *= -1.0
#Left
leftind = numpy.nonzero(old_position[ind,0] < self.xmin)[0]
hit = (species.part_values.position[ind[leftind],1]-old_position[ind[leftind],1])/ \
(species.part_values.position[ind[leftind],0]-old_position[ind[leftind],0])* \
(self.xmin-old_position[ind[leftind],0])+old_position[ind[leftind],1]
hit_ind = numpy.nonzero(numpy.logical_and(self.ymin < hit, hit < self.ymax))[0]
species.part_values.position[ind[leftind[hit_ind]], 0] = 2*self.xmin - species.part_values.position[ind[leftind[hit_ind]],0]-delta
species.part_values.velocity[ind[leftind[hit_ind]], 0] *= -1.0
#Right
rightind = numpy.nonzero(old_position[ind,0] > self.xmax)[0]
hit = (species.part_values.position[ind[rightind],1]-old_position[ind[rightind],1])/ \
(species.part_values.position[ind[rightind],0]-old_position[ind[rightind],0])* \
(self.xmax-old_position[ind[rightind],0])+old_position[ind[rightind],1]
hit_ind = numpy.nonzero(numpy.logical_and(self.ymin < hit, hit < self.ymax))[0]
species.part_values.position[ind[rightind[hit_ind]], 0] = 2*self.xmax - species.part_values.position[ind[rightind[hit_ind]],0]+delta
species.part_values.velocity[ind[rightind[hit_ind]], 0] *= -1.0
#Top
topind = numpy.nonzero(old_position[ind,1] > self.ymax)[0]
hit = (species.part_values.position[ind[topind],0]-old_position[ind[topind],0])/ \
(species.part_values.position[ind[topind],1]-old_position[ind[topind],1])* \
(self.ymax-old_position[ind[topind],1])+old_position[ind[topind],0]
hit_ind = numpy.nonzero(numpy.logical_and(self.xmin < hit, hit < self.xmax))[0]
species.part_values.position[ind[topind[hit_ind]], 1] = 2*self.ymax - species.part_values.position[ind[topind[hit_ind]],1]+delta
species.part_values.velocity[ind[topind[hit_ind]], 1] *= -1.0
# +createDummyBox([ind]location, PIC pic, Species species, [double] delta_n, [double] n_vel, [double] shift_vel) = create the dummy boxes with particles in them.
def createDummyBox(self, location, pic, species, delta_n, n_vel, shift_vel, prec = 1e-5):
#Preparing things for numpy functions use
loc, u_ind = numpy.unique(location, return_index = True)
add_rand = numpy.random.rand(*numpy.shape(loc))
dv = numpy.max(pic.mesh.volumes)
mpf_new = delta_n[u_ind]*(dv-pic.mesh.volumes[loc])/species.spwt+\
species.mesh_values.residuals[loc]+add_rand
mp_new = mpf_new.astype(int)
species.mesh_values.residuals[loc] = mpf_new-mp_new
ind = numpy.arange(len(loc))
index = numpy.repeat(ind, mp_new)
#Setting up positions
pos = pic.mesh.getPosition(loc)[index]
random = numpy.random.rand(*numpy.shape(pos))
random += numpy.where(random == 0, 1e-3, 0)
shift = numpy.where(numpy.abs(pos[:,0]-self.xmin) < prec, random[:,0]/2*pic.mesh.dx, (random[:,0]-0.5)*pic.mesh.dx)
pos[:,0] = pos[:,0] + shift - numpy.where(numpy.abs(pos[:,0]-self.xmax) < prec, random[:,0]/2*pic.mesh.dx, 0)
shift = numpy.where(numpy.abs(pos[:,1]-self.ymin) < prec, random[:,1]/2*pic.mesh.dy, (random[:,1]-0.5)*pic.mesh.dy)
pos[:,1] = pos[:,1] + shift - numpy.where(numpy.abs(pos[:,1]-self.ymax) < prec, random[:,1]/2*pic.mesh.dy, 0)
#Setting up velocities
vel = super().sampleIsotropicVelocity(n_vel[u_ind], mp_new)+shift_vel[index]
#Adding particles
super().addParticles(species, pos, vel)
# +injectParticlesDummyBox([int] location, PIC pic, Field field, Species species, [double] delta_n, [double] n_vel, [double] shift_vel) =
# Inject the particles in location indices by creating dummy boxes around them, creating particles
# inside of them, moving the particles, and then adding the ones that entered into the computational domain.
@Timing
def injectParticlesDummyBox(self, location, part_solver, field, species, delta_n, n_vel, shift_vel):
# Creating temporary species
ghost = Species("temporary species", species.dt, species.q, species.m, species.debye, species.spwt, \
int(species.part_values.max_n/10), species.pos_dim, species.vel_dim, species.mesh_values.nPoints, numpy.asarray([0]))
ghost.mesh_values.residuals = species.mesh_values.residuals
self.createDummyBox(location, part_solver.pic, ghost, delta_n, n_vel, shift_vel)
species.mesh_values.residuals[location] = copy.copy(ghost.mesh_values.residuals[location])
#Preparing variables
np = ghost.part_values.current_n
#Entering particles into the mesh and adjusting them according to motion_solver
old_position = copy.copy(ghost.part_values.position)
ghost.part_values.position[:np,:] += ghost.part_values.velocity[:np,:]*ghost.dt
ind = numpy.flatnonzero(self.checkPositionInBoundary(ghost.part_values.position[:np,:]))
hit = self.applyParticleOpenBoundaryInverse(ghost, ind, old_position = old_position)['flux']
###Test
#np = ghost.part_values.current_n
##Test positioning
#fig = plt.figure(figsize=(8,8))
#plt.scatter(ghost.part_values.position[:np, 0], ghost.part_values.position[:np,1], marker = '.')
#plt.title(self.type+" - "+species.name)
#plt.show()
##Test velocity
#fig = plt.figure(figsize=(8,8))
#datamag = plt.hist(numpy.sqrt(ghost.part_values.velocity[:np,0]*ghost.part_values.velocity[:np,0]+ \
# ghost.part_values.velocity[:np,1]*ghost.part_values.velocity[:np,1]), 81, alpha=0.5, label=species.name)
#plt.title(self.type+" - "+species.name)
#plt.show()
#Leap-frog state
part_solver.initialConfiguration(ghost, field, ind)
#Adding particles
self.addParticles(species, ghost.part_values.position[ind,:], ghost.part_values.velocity[ind,:])
self.updateTrackers(species, len(ind))
#Calculating outgoing flux
part_solver.pic.scatterOutgoingFlux(species, hit)
print("Injected particles: ", len(ind))
print("Total {}".format(species.name),": ", species.part_values.current_n)
# +createDistributionAtBorder([int] location, Motion_Solver part_solver, Species species, [double] delta_n): (([double,double] pos, [int] border), [int] repeats) =
# The function creates particle positions of 'species' along the region denoted by 'location', under a uniform distribution with a surface density 'delta_n', where
# delta_n indicates the density per 'location' node [particle/m^2].
# Return: 'pos' is the numpy array indicating the positions of the new particles, 'border' indicates in which border they are created, and
# 'repeats' indicates for each position, how many particles are expected to be created.
# The tuple (pos, border) is reffered as flux in the program.
@Timing
def createDistributionAtBorder(self, location, part_solver, species, delta_n, prec = 1e-5):
add_rand = numpy.random.rand(len(location))
#This needs to be generalized later
#NOTE: Modified(2021/02/14) with no backward compatibility
local_loc = location_indexes_inv(location, store = False)
mpf_new = delta_n*self.areas[local_loc]
#Treating borders
mpf_new /= numpy.where(numpy.max(part_solver.pic.mesh.volumes)/part_solver.pic.mesh.volumes[location] < 1.5, 2, 1)
#Computing number of particles created
mpf_new = mpf_new/species.spwt+species.mesh_values.residuals[location]+add_rand
mp_new = mpf_new.astype(int)
species.mesh_values.residuals[location] = mpf_new-mp_new
#Assigning positions
pos_1 = numpy.repeat(part_solver.pic.mesh.getPosition(location), mp_new, axis = 0)
random = numpy.random.rand(numpy.shape(pos_1)[0])
random += numpy.where(random == 0, 1e-3, 0)
hit_1 = numpy.repeat(self.directions[local_loc], mp_new)
ind_b = numpy.flatnonzero(hit_1 == 0)
ind_l = numpy.flatnonzero(hit_1 == 3)
ind_r = numpy.flatnonzero(hit_1 == 1)
ind_t = numpy.flatnonzero(hit_1 == 2)
#Bottom
shifts = numpy.where(numpy.abs(pos_1[ind_b,0]-self.xmin) < prec, random[ind_b]*part_solver.pic.mesh.dx/2, (random[ind_b]-0.5)*part_solver.pic.mesh.dx)
shifts -= numpy.where(numpy.abs(pos_1[ind_b,1]-self.xmax) < prec, random[ind_b]*part_solver.pic.mesh.dx/2, 0)
pos_1[ind_b,0] += shifts
#Left
shifts = numpy.where(numpy.abs(pos_1[ind_l,1]-self.ymin) < prec, random[ind_l]*part_solver.pic.mesh.dy/2, (random[ind_l]-0.5)*part_solver.pic.mesh.dy)
shifts -= numpy.where(numpy.abs(pos_1[ind_l,1]-self.ymax) < prec, random[ind_l]*part_solver.pic.mesh.dy/2, 0)
pos_1[ind_l,1] += shifts
#Right
shifts = numpy.where(numpy.abs(pos_1[ind_r,1]-self.ymin) < prec, random[ind_r]*part_solver.pic.mesh.dy/2, (random[ind_r]-0.5)*part_solver.pic.mesh.dy)
shifts -= numpy.where(numpy.abs(pos_1[ind_r,1]-self.ymax) < prec, random[ind_r]*part_solver.pic.mesh.dy/2, 0)
pos_1[ind_r,1] += shifts
#Top
shifts = numpy.where(numpy.abs(pos_1[ind_t,0]-self.xmin) < prec, random[ind_t]*part_solver.pic.mesh.dx/2, (random[ind_t]-0.5)*part_solver.pic.mesh.dx)
shifts -= numpy.where(numpy.abs(pos_1[ind_t,1]-self.xmax) < prec, random[ind_t]*part_solver.pic.mesh.dx/2, 0)
pos_1[ind_t,0] += shifts
repeats = numpy.ones(numpy.shape(hit_1)[0], dtype = numpy.uint8)
return (numpy.append(pos_1, hit_1[:,None], axis = 1),), repeats
# +injectParticlesAtPositions('flux', Motion_Solver part_solver, Field field, Species species, [double] delta_n, [double] n_vel, double delta_pos) =
# The method creates 'delta_n' particles at each entry of 'pos' stored in the parameter 'flux' (See Documentation of 'createDistributionArBorder').
# The new particles are stored in 'species', shifted 'delta_pos' away from their borders, initiated with 'n_vel' velocities and prepared in time
# according to the method used 'part_solver' for advancing particles.
@Timing
def injectParticlesAtPositions(self, hit, part_solver, field, species, delta_n, n_vel, delta_pos = 1e-5):
sum_particles = numpy.sum(delta_n)
if sum_particles > 0:
#Unfolding
border = numpy.repeat(hit[0][:,2], delta_n)
pos = numpy.repeat(hit[0][:,:2], delta_n, axis = 0)
pos_copy = copy.copy(pos)
#Assigning positions
#NOTE: This part is not necessary but I am including it to be cleaner. It can be deleted for time efficiency.
pos[:,1] += numpy.where(border == 0, -delta_pos, 0)
pos[:,0] += numpy.where(border == 1, delta_pos, 0)
pos[:,1] +=
|
numpy.where(border == 2, delta_pos, 0)
|
numpy.where
|
#!/usr/bin/env python
import numpy as np
import copy
def calc_tof(array, dgr, T, coeff, exact=True, verb=0):
"""Function to compute TOF using the energy span model.
Reproduces results from the AUTOF code (https://doi.org/10.1002/jcc.21669).
Adapted from the AUTOF implementation with contribution by <NAME>."""
coeff = np.array(coeff)
array = np.array(array)
h = 6.62607015e-34
k_b = 1.380649e-23
R = 8.314462618
n_S = array.size
n_TS = np.count_nonzero(coeff)
n_I = np.count_nonzero(coeff == 0)
if verb > 1:
print(f"Number of intermediates taken into account is {n_I}")
print(f"Number of TS taken into account is {n_TS}")
try:
assert array.size == coeff.size
except AssertionError:
print(
f"WARNING: The species number {n_S} does not seem to match the identified intermediates ({n_I}) plus TS ({n_TS})."
)
X_TOF = np.zeros((n_I, 2))
matrix_T_I = np.zeros((n_I, 2))
j = 0
for i in range(n_S):
if coeff[i] == 0:
matrix_T_I[j, 0] = array[i]
if i < n_S - 1:
if coeff[i + 1] == 1:
matrix_T_I[j, 1] = array[i + 1]
if coeff[i + 1] == 0:
if array[i + 1] > array[i]:
matrix_T_I[j, 1] = array[i + 1]
else:
matrix_T_I[j, 1] = array[i]
j += 1
if i == n_S - 1:
if dgr > array[i]:
matrix_T_I[j, 1] = dgr
else:
matrix_T_I[j, 1] = array[i]
if verb > 3:
print(f"From profile {array}, \n the reaction step matrix is: \n{matrix_T_I}")
if exact:
sum_span = 0
for i in range(n_I):
for j in range(n_I):
if i >= j:
sum_span += np.exp(
((matrix_T_I[i, 1] - matrix_T_I[j, 0] - dgr) * 4184) / (R * T)
)
if i < j:
sum_span += np.exp(
((matrix_T_I[i, 1] - matrix_T_I[j, 0]) * 4184) / (R * T)
)
TOF = ((k_b * T) / h) * ((np.exp((-dgr * 4184) / (R * T))) / sum_span)
for i in range(n_I):
sum_e = 0
for j in range(n_I):
if i >= j:
sum_e += np.exp(
((matrix_T_I[i, 1] - matrix_T_I[j, 0] - dgr) * 4184) / (R * T)
)
if i < j:
sum_e += np.exp(
((matrix_T_I[i, 1] - matrix_T_I[j, 0]) * 4184) / (R * T)
)
X_TOF[i, 1] = np.round(sum_e / sum_span, 4)
for j in range(n_I):
sum_e = 0
for i in range(n_I):
if i >= j:
sum_e += np.exp(
((matrix_T_I[i, 1] - matrix_T_I[j, 0] - dgr) * 4184) / (R * T)
)
if i < j:
sum_e += np.exp(
((matrix_T_I[i, 1] - matrix_T_I[j, 0]) * 4184) / (R * T)
)
X_TOF[j, 0] = np.round(sum_e / sum_span, 4)
else:
dE = np.zeros((n_I, n_I))
for i in range(n_I):
for j in range(n_I):
if i >= j:
dE[i, j] = matrix_T_I[i, 1] - matrix_T_I[j, 0]
if i < j:
dE[i, j] = matrix_T_I[i, 1] - matrix_T_I[j, 0] + dgr
Energy_Span =
|
np.amax(dE)
|
numpy.amax
|
# Copyright (c) 2019 Graphcore Ltd. All rights reserved.
import pytest
import popart
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import numpy.random as npr
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).resolve().parent.parent))
import test_util as tu
import torch_lamb
def compare_against_pytorch(optType, optMaps, batchesPerStep=5, scaled=False):
seed = 1015
npr.seed(seed)
torch.manual_seed(seed)
optkwargs = {}
if optType == "adam":
popartOpt = popart.Adam
optkwargs["weight_decay_mode"] = popart.WeightDecayMode.L2Regularization
optkwargs["scaled_optimizer_state"] = scaled
elif optType == "adamw":
popartOpt = popart.Adam
optkwargs["weight_decay_mode"] = popart.WeightDecayMode.Decay
optkwargs["scaled_optimizer_state"] = scaled
elif optType == "adamax":
popartOpt = popart.Adam
optkwargs["mode"] = popart.AdamMode.AdaMax
optkwargs["weight_decay_mode"] = popart.WeightDecayMode.L2Regularization
optkwargs["scaled_optimizer_state"] = scaled
elif optType == "lamb":
popartOpt = popart.Adam
optkwargs["mode"] = popart.AdamMode.Lamb
optkwargs["weight_decay_mode"] = popart.WeightDecayMode.Decay
optkwargs["scaled_optimizer_state"] = scaled
elif optType == "lambnobias":
popartOpt = popart.Adam
optkwargs["mode"] = popart.AdamMode.LambNoBias
optkwargs["weight_decay_mode"] = popart.WeightDecayMode.Decay
optkwargs["scaled_optimizer_state"] = scaled
elif optType == "adagrad":
popartOpt = popart.Adaptive
optkwargs["mode"] = popart.AdaptiveMode.AdaGrad
optkwargs["weight_decay_mode"] = popart.WeightDecayMode.L2Regularization
elif optType == "rmsprop":
popartOpt = popart.Adaptive
optkwargs["mode"] = popart.AdaptiveMode.RMSProp
optkwargs["weight_decay_mode"] = popart.WeightDecayMode.L2Regularization
elif optType == "centeredrmsprop":
popartOpt = popart.Adaptive
optkwargs["mode"] = popart.AdaptiveMode.CenteredRMSProp
optkwargs["weight_decay_mode"] = popart.WeightDecayMode.L2Regularization
elif optType == "adadelta":
popartOpt = popart.Adaptive
optkwargs["mode"] = popart.AdaptiveMode.AdaDelta
optkwargs["weight_decay_mode"] = popart.WeightDecayMode.L2Regularization
elif optType == "sgd0":
popartOpt = popart.SGD
elif optType == "sgd1":
popartOpt = popart.SGD
optkwargs[
"accumulatorAndMomentum"] = popart.SGDAccumulatorAndMomentum.Combined
elif optType == "sgd2":
popartOpt = popart.SGD
optkwargs[
"accumulatorAndMomentum"] = popart.SGDAccumulatorAndMomentum.Separate
else:
raise "Unknown optType: " + optType
#L1 loss value
lambda1 = 1.0
# tensor dimensions and replications
height = 2
numberOfSteps = len(optMaps)
sampleShape = [height, height]
replicationFactor = 1
accumulationFactor = 1
nVirtualGraphs = 1
samplesPerBatch = 4
divvyFactor = replicationFactor * accumulationFactor
samplesPerMicroBatch = samplesPerBatch // divvyFactor
nIPUs = replicationFactor * nVirtualGraphs
stepDataShape = [batchesPerStep, samplesPerBatch, height, height]
microBatchShape = [samplesPerMicroBatch, height, height]
stepDataInfo = popart.TensorInfo("FLOAT", stepDataShape)
microBatchInfo = popart.TensorInfo("FLOAT", microBatchShape)
#initial weight and input values
w0vals = np.array(npr.randn(height, height), dtype=np.float32)
w1vals = np.array(
|
npr.randn(height, height)
|
numpy.random.randn
|
"""
Internal wave functions
"""
import numpy as np
from scipy import linalg, sparse
from scipy.interpolate import interp1d
from scipy.integrate import solve_bvp
from scipy.optimize import least_squares, leastsq
import pdb
GRAV = 9.81
RHO0 = 1020.
###########
# Wave shape
###########
def gaussian(x, a_0, L_w):
sigma = L_w/4
return -a_0 * np.exp( - (x/sigma)**2. )
def sine(x, a_0, L_w, x0=0.):
k = 2*np.pi/L_w
eta = -a_0/2 - a_0/2 * np.sin(k*x + k*x0 + np.pi/2)
eta[x>x0+L_w/2] = 0.
eta[x<x0-L_w/2] = 0.
return eta
def wave_eta(x, a_0, c_n, L_w, wavefunc=gaussian, **kwargs):
"""
Initial gaussian wave
"""
#return -a_0 *c_n* np.exp( - (x/L_w)**2. )
return wavefunc(x, a_0, L_w, **kwargs)
def wave_init(x, rhoz, dz, d, a_0, L_w, mode=0, wavefunc=gaussian, **kwargs):
"""
Initialise a wavefield
"""
phi, cn, drho_dz = iwave_modes(rhoz, dz, d)
#drho_dz = np.gradient(rhoz, -dz)
eta = wave_eta(x, a_0, np.real(cn[mode]), L_w, wavefunc=wavefunc, **kwargs)
phi_n = phi[:,mode].squeeze()
phi_n /= np.abs(phi_n).max()
phi_n *= np.sign(phi_n[1])
rho_pr = eta*drho_dz[:,np.newaxis]*phi_n[:,np.newaxis]
return rhoz[:,np.newaxis] - rho_pr, phi_n
def wave_init_phi(x, rhoz, drho_dz, phi_n, cn, z, d, a_0, L_w, mode=0):
"""
Proper way to initialize the wavefield
"""
#phi, dphi, cn = iwave_modes(rhoz, dz, d)
Z = z[...,np.newaxis]
#drho_dz = np.gradient(rhoz, -dz)
eta = wave_eta(x, a_0, cn, L_w)
#phi_n = phi[:,mode].squeeze()
phi = phi_n / np.abs(phi_n).max()
phi *= np.sign(phi_n.sum())
#rho_pr = eta*drho_dz[:,np.newaxis]*phi[:,np.newaxis]
eta_pr = eta*phi[:,np.newaxis]
#print z.shape, rhoz.shape
# Interpolation function
Frho = interp1d(z, rhoz, axis=0)
eta = z[:,np.newaxis] - eta_pr
eta[eta>0.] = 0.
eta[eta<-d] = -d
# Find rho by interpolating eta
return Frho(eta), phi
#return rhoz[:,np.newaxis] - rho_pr, phi
#####
# Nondimensional parameters (Lamb definitions)
#####
def calc_alpha(phi, c, N2, dz):
"""
Holloway et al 1999 nonlinearity parameter
"""
phi_z = np.gradient(phi,-np.abs(dz))
num = 3*c*np.trapz( phi_z**3., dx=dz)
den = 2*np.trapz( phi_z**2., dx=dz)
return num/den
def calc_r20(phi, c, N2, dz):
phi_z = np.gradient(phi,-np.abs(dz))
S_20 = calc_S20(phi, c, N2, dz)
num = c*np.trapz( phi*S_20, dx=dz)
den = 2*np.trapz( phi_z**2., dx=dz)
return num/den
def calc_alpha_wshear(phi, c, U, dz):
"""
alpha with shear (see Stastna and Lamb 2002)
"""
# Stastna and Lamb defn
E = c/(c-U) * phi
E_z = np.gradient(E, -np.abs(dz))
num = 3*np.trapz((c-U)**2. * E_z**3., dx = np.abs(dz))
den = 2*np.trapz((c-U) * E_z**2., dx = np.abs(dz))
return num/den
def calc_alpha_wshear_liu(phi, c, U, dz):
"""
alpha with shear (see Liu et al 1988)
"""
# Liu et al definition
phi_z = np.gradient(phi, -
|
np.abs(dz)
|
numpy.abs
|
"""
This file contains the core algorithms for
* the forward mode (univariate Taylor polynomial arithmetic)
* the reverse mode
The functions are operating solely on numpy datastructures.
Rationale
---------
If speed is an issue, one can rather easily replace
the function implementations by C or Fortran functions.
"""
import math
import functools
import numpy
from numpy.lib.stride_tricks import as_strided, broadcast_arrays
try:
import scipy.linalg
import scipy.special
except ImportError:
pass
try:
import pytpcore
except ImportError:
pytpcore = None
from algopy import nthderiv
def _plus_const(x_data, c, out=None):
"""
Constants are only added to the d=0 slice of the data array.
A function like this is not so useful for multiplication by a constant,
because UTPM multiplication by a constant scales the entire data array
rather than acting on only the d=0 slice.
"""
if out is None:
y_data = numpy.copy(x_data)
else:
y_data = out
y_data[0] += c
return y_data
def _eval_slow_generic(f, x_data, out=None):
"""
This is related to summations associated with the name '<NAME>.'
@param f: f(X, out=None, n=0) computes nth derivative of f at X
@param x_data: something about algorithmic differentiation
@param out: something about algorithmic differentiation
@param return: something about algorithmic differentiation
"""
#FIXME: Improve or replace this function.
# It is intended to help with naive implementations
# of truncated taylor expansions
# of functions of a low degree polynomial,
# when the nth derivatives of the function of interest
# can be computed more or less directly.
y_data = nthderiv.np_filled_like(x_data, 0, out=out)
D, P = x_data.shape[:2]
# base point: d = 0
y_data[0] = f(x_data[0])
# higher order coefficients: d > 0
for d in range(1, D):
# Accumulate coefficients of truncated expansions of powers
# of the polynomial.
if d == 1:
accum = x_data[1:].copy()
else:
for i in range(D-2, 0, -1):
accum[i] = numpy.sum(accum[:i] * x_data[i:0:-1], axis=0)
accum[0] = 0.
# Add the contribution of this summation term.
y_data[1:] += f(x_data[0], n=d) * accum / float(math.factorial(d))
return y_data
def _black_f_white_fprime(f, fprime_data, x_data, out=None):
"""
The function evaluation is a black box, but the derivative is compound.
@param f: computes the scalar function directly
@param fprime_data: the array associated with the evaluated derivative
@param x_data: something about algorithmic differentiation
@param out: something about algorithmic differentiation
@param return: something about algorithmic differentiation
"""
y_data = nthderiv.np_filled_like(x_data, 0, out=out)
D, P = x_data.shape[:2]
# Do the direct computation efficiently (e.g. using C implemention of erf).
y_data[0] = f(x_data[0])
# Compute the truncated series coefficients using discrete convolution.
#FIXME: one of these two loops can be vectorized
for d in range(1, D):
for c in range(d):
y_data[d] += fprime_data[d-1-c] * x_data[c+1] * (c+1)
y_data[d] /= d
return y_data
def _taylor_polynomials_of_ode_solutions(
a_data, b_data, c_data,
u_data, v_data,
):
"""
This is a general O(D^2) algorithm for functions that are ODE solutions.
It is an attempt to implement Proposition 13.1
of "Evaluating Derivatives" by Griewank and Walther (2008).
The function must satisfy the identity
b(u) f'(u) - a(u) f(u) = c(u)
where a, b and c are already represented by their Taylor expansions.
Also u is represented as a Taylor expansion, and so is v.
But we are only given the first term of v, which is the recursion base.
In this function we use the notation from the book mentioned above.
"""
# define the number of terms allowed in the truncated series
D = u_data.shape[0]
d = D-1
# these arrays have elements that are scaled slightly differently
u_tilde_data = u_data.copy()
v_tilde_data = v_data.copy()
for j in range(1, D):
u_tilde_data[j] *= j
v_tilde_data[j] *= j
# this is just convenient temporary storage which is not so important
s = numpy.zeros_like(u_data)
# on the other hand the e_data is very important for recursion
e_data = numpy.zeros_like(u_data)
# do the dynamic programming to fill the v_data array
for k in range(D):
if k > 0:
for j in range(1, k+1):
s[k] += (c_data[k-j] + e_data[k-j]) * u_tilde_data[j]
for j in range(1, k):
s[k] -= b_data[k-j] * v_tilde_data[j]
v_tilde_data[k] = s[k] / b_data[0]
v_data[k] = v_tilde_data[k] / k
if k < d:
for j in range(k+1):
e_data[k] += a_data[j] * v_data[k-j]
return v_data
def vdot(x,y, z = None):
"""
vectorized dot
z = vdot(x,y)
Rationale:
given two iteratable containers (list,array,...) x and y
this function computes::
z[i] = numpy.dot(x[i],y[i])
if z is None, this function allocates the necessary memory
Warning: the naming is inconsistent with numpy.vdot
Warning: this is a preliminary version that is likely to be changed
"""
x_shp = numpy.shape(x)
y_shp = numpy.shape(y)
if x_shp[-1] != y_shp[-2]:
raise ValueError('got x.shape = %s and y.shape = %s'%(str(x_shp),str(y_shp)))
if numpy.ndim(x) == 3:
P,N,M = x_shp
P,M,K = y_shp
retval = numpy.zeros((P,N,K))
for p in range(P):
retval[p,:,:] = numpy.dot(x[p,:,:], y[p,:,:])
return retval
elif numpy.ndim(x) == 4:
D,P,N,M = x_shp
D,P,M,K = y_shp
retval = numpy.zeros((D,P,N,K))
for d in range(D):
for p in range(P):
retval[d,p,:,:] = numpy.dot(x[d,p,:,:], y[d,p,:,:])
return retval
def truncated_triple_dot(X,Y,Z, D):
"""
computes d^D/dt^D ( [X]_D [Y]_D [Z]_D) with t set to zero after differentiation
X,Y,Z are (DT,P,N,M) arrays s.t. the dimensions match to compute dot(X[d,p,:,:], dot(Y[d,p,:,:], Z[d,p,:,:]))
"""
import algopy.exact_interpolation
noP = False
if len(X.shape) == 3:
noP = True
DT,NX,MX = X.shape
X = X.reshape((DT,1,NX,MX))
if len(Y.shape) == 3:
noP = True
DT,NY,MY = Y.shape
Y = Y.reshape((DT,1,NY,MY))
if len(Z.shape) == 3:
noP = True
DT,NZ,MZ = Z.shape
Z = Z.reshape((DT,1,NZ,MZ))
DT,P,NX,MX = X.shape
DT,P,NZ,MZ = Z.shape
multi_indices = algopy.exact_interpolation.generate_multi_indices(3,D)
retval = numpy.zeros((P,NX,MZ))
for mi in multi_indices:
for p in range(P):
if mi[0] == D or mi[1] == D or mi[2] == D:
continue
retval[p] += numpy.dot(X[mi[0],p,:,:], numpy.dot(Y[mi[1],p,:,:], Z[mi[2],p,:,:]))
if noP == False:
return retval
else:
return retval[0]
def broadcast_arrays_shape(x_shp,y_shp):
if len(x_shp) < len(y_shp):
tmp = x_shp
x_shp = y_shp
y_shp = tmp
z_shp = numpy.array(x_shp,dtype=int)
for l in range(1,len(y_shp)-1):
if z_shp[-l] == 1: z_shp[-l] = y_shp[-l]
elif z_shp[-l] != 1 and y_shp[-l] != 1 and z_shp[-l] != y_shp[-l]:
raise ValueError('cannot broadcast arrays')
return z_shp
class RawAlgorithmsMixIn:
@classmethod
def _broadcast_arrays(cls, x_data, y_data):
""" UTPM equivalent of numpy.broadcast_arrays """
# transpose arrays s.t. numpy.broadcast can be used
Lx = len(x_data.shape)
Ly = len(y_data.shape)
x_data = x_data.transpose( tuple(range(2,Lx)) + (0,1))
y_data = y_data.transpose( tuple(range(2,Ly)) + (0,1))
# broadcast arrays
x_data, y_data = broadcast_arrays(x_data, y_data)
# transpose into the original format
Lx = len(x_data.shape)
Ly = len(y_data.shape)
x_data = x_data.transpose( (Lx-2, Lx-1) + tuple(range(Lx-2)) )
y_data = y_data.transpose( (Ly-2, Ly-1) + tuple(range(Lx-2)) )
return x_data, y_data
@classmethod
def _mul(cls, x_data, y_data, out=None):
"""
z = x*y
"""
if numpy.shape(x_data) != numpy.shape(y_data):
raise NotImplementedError
D, P = x_data.shape[:2]
#FIXME: there is a memoryview and buffer contiguity checking error
# which may or may not be caused by a bug in numpy or cython.
if pytpcore and all(s > 1 for s in x_data.shape):
# tp_mul is not careful about aliasing
z_data = numpy.empty_like(x_data)
x_data_reshaped = x_data.reshape((D, -1))
y_data_reshaped = y_data.reshape((D, -1))
z_data_reshaped = z_data.reshape((D, -1))
pytpcore.tp_mul(x_data_reshaped, y_data_reshaped, z_data_reshaped)
if out is not None:
out[...] = z_data_reshaped.reshape((z_data.shape))
return out
else:
return z_data
else:
# numpy.sum is careful about aliasing so we can use out=z_data
if out is None:
z_data = numpy.empty_like(x_data)
else:
z_data = out
for d in range(D)[::-1]:
numpy.sum(
x_data[:d+1,:,...] * y_data[d::-1,:,...],
axis=0,
out = z_data[d,:,...])
return z_data
@classmethod
def _minimum(cls, x_data, y_data, out=None):
if x_data.shape != y_data.shape:
raise NotImplementedError(
'algopy broadcasting is not implemented for this function')
D = x_data.shape[0]
xmask = numpy.less_equal(x_data[0], y_data[0])
ymask = 1 - xmask
z_data = numpy.empty_like(x_data)
for d in range(D):
numpy.add(xmask * x_data[d], ymask * y_data[d], out=z_data[d])
if out is not None:
out[...] = z_data[...]
return out
else:
return z_data
@classmethod
def _maximum(cls, x_data, y_data, out=None):
if x_data.shape != y_data.shape:
raise NotImplementedError(
'algopy broadcasting is not implemented for this function')
D = x_data.shape[0]
xmask = numpy.greater_equal(x_data[0], y_data[0])
ymask = 1 - xmask
z_data = numpy.empty_like(x_data)
for d in range(D):
numpy.add(xmask * x_data[d], ymask * y_data[d], out=z_data[d])
if out is not None:
out[...] = z_data[...]
return out
else:
return z_data
@classmethod
def _amul(cls, x_data, y_data, out = None):
"""
z += x*y
"""
z_data = out
if out is None:
raise NotImplementedError
(D,P) = z_data.shape[:2]
for d in range(D):
z_data[d,:,...] += numpy.sum(x_data[:d+1,:,...] * y_data[d::-1,:,...], axis=0)
@classmethod
def _itruediv(cls, z_data, x_data):
(D,P) = z_data.shape[:2]
tmp_data = z_data.copy()
for d in range(D):
tmp_data[d,:,...] = 1./ x_data[0,:,...] * ( z_data[d,:,...] - numpy.sum(tmp_data[:d,:,...] * x_data[d:0:-1,:,...], axis=0))
z_data[...] = tmp_data[...]
@classmethod
def _truediv(cls, x_data, y_data, out = None):
"""
z = x/y
"""
if out is None:
raise NotImplementedError
z_data = numpy.empty_like(out)
(D,P) = z_data.shape[:2]
for d in range(D):
z_data[d,:,...] = 1./ y_data[0,:,...] * ( x_data[d,:,...] - numpy.sum(z_data[:d,:,...] * y_data[d:0:-1,:,...], axis=0))
out[...] = z_data[...]
return out
@classmethod
def _reciprocal(cls, y_data, out=None):
"""
z = 1/y
"""
#FIXME: this function could use some attention;
# it was copypasted from div
z_data = numpy.empty_like(y_data)
D = y_data.shape[0]
if pytpcore:
y_data_reshaped = y_data.reshape((D, -1))
z_data_reshaped = z_data.reshape((D, -1))
pytpcore.tp_reciprocal(y_data_reshaped, z_data_reshaped)
else:
for d in range(D):
if d == 0:
z_data[d,:,...] = 1./ y_data[0,:,...] * ( 1 - numpy.sum(z_data[:d,:,...] * y_data[d:0:-1,:,...], axis=0))
else:
z_data[d,:,...] = 1./ y_data[0,:,...] * ( 0 - numpy.sum(z_data[:d,:,...] * y_data[d:0:-1,:,...], axis=0))
if out is not None:
out[...] = z_data[...]
return out
else:
return z_data
@classmethod
def _pb_reciprocal(cls, ybar_data, x_data, y_data, out=None):
if out is None:
raise NotImplementedError('should implement that')
#FIXME: this is probably dumb
tmp = -cls._reciprocal(cls._square(x_data))
cls._amul(ybar_data, tmp, out=out)
@classmethod
def _floordiv(cls, x_data, y_data, out = None):
"""
z = x // y
use L'Hospital's rule when leading coefficients of y_data are zero
"""
z_data = out
if out is None:
raise NotImplementedError
(D,P) = z_data.shape[:2]
x_data = x_data.copy()
y_data = y_data.copy()
#print x_data
#print y_data
# left shifting x_data and y_data if necessary
mask = Ellipsis
while True:
mask = numpy.where( abs(y_data[0, mask]) <= 1e-8)
if len(mask[0]) == 0:
break
elif len(mask) == 1:
mask = mask[0]
x_data[:D-1, mask] = x_data[1:, mask]
x_data[D-1, mask] = 0.
y_data[:D-1, mask] = y_data[1:, mask]
y_data[D-1, mask] = 0.
for d in range(D):
z_data[d,:,...] = 1./ y_data[0,:,...] * \
( x_data[d,:,...]
- numpy.sum(z_data[:d,:,...] * y_data[d:0:-1,:,...],
axis=0)
)
@classmethod
def _pow_real(cls, x_data, r, out = None):
""" y = x**r, where r is scalar """
y_data = out
if out is None:
raise NotImplementedError
(D,P) = y_data.shape[:2]
if type(r) == int and r >= 0:
if r == 0:
y_data[...] = 0.
y_data[0, ...] = 1.
return y_data
elif r == 1:
y_data[...] = x_data[...]
return y_data
elif r == 2:
return cls._square(x_data, out=y_data)
elif r >= 3:
y_data[...] = x_data[...]
for nr in range(r-1):
cls._mul(x_data, y_data, y_data)
return
else:
raise NotImplementedError("power to %d is not implemented" % r)
y_data[0] = x_data[0]**r
for d in range(1,D):
y_data[d] = r * numpy.sum([y_data[d-k] * k * x_data[k] for k in range(1,d+1)], axis = 0) - \
numpy.sum([ x_data[d-k] * k * y_data[k] for k in range(1,d)], axis = 0)
y_data[d] /= x_data[0]
y_data[d] /= d
@classmethod
def _pb_pow_real(cls, ybar_data, x_data, r, y_data, out = None):
""" pullback function of y = pow(x,r) """
if out is None:
raise NotImplementedError('should implement that')
xbar_data = out
(D,P) = y_data.shape[:2]
# if r == 0:
# raise NotImplementedError('x**0 is special and has not been implemented')
# if type(r) == int:
# if r == 2:
# print 'r=',r
# print 'x_data=',x_data
# print 'y_data=',y_data
# print 'xbar_data=',xbar_data
# print 'ybar_data=',ybar_data
if type(r) == int:
if r > 0:
tmp = numpy.zeros_like(xbar_data)
cls._pow_real(x_data, r - 1, out = tmp)
tmp *= r
cls._mul(ybar_data, tmp, tmp)
xbar_data += tmp
else:
tmp = numpy.zeros_like(xbar_data)
cls._truediv(y_data, x_data, tmp)
tmp[...] = numpy.nan_to_num(tmp)
cls._mul(ybar_data, tmp, tmp)
tmp *= r
xbar_data += tmp
# print 'xbar_data=',xbar_data
@classmethod
def _max(cls, x_data, axis = None, out = None):
if out is None:
raise NotImplementedError('should implement that')
x_shp = x_data.shape
D,P = x_shp[:2]
shp = x_shp[2:]
if len(shp) > 1:
raise NotImplementedError('should implement that')
for p in range(P):
out[:,p] = x_data[:,p,numpy.argmax(x_data[0,p])]
@classmethod
def _argmax(cls, a_data, axis = None):
if axis is not None:
raise NotImplementedError('should implement that')
a_shp = a_data.shape
D,P = a_shp[:2]
return numpy.argmax(a_data[0].reshape((P,numpy.prod(a_shp[2:]))), axis = 1)
@classmethod
def _absolute(cls, x_data, out=None):
"""
z = |x|
"""
if out is None:
z_data = numpy.empty_like(x_data)
else:
z_data = out
D = x_data.shape[0]
if D > 1:
x_data_sign = numpy.sign(x_data[0])
for d in range(D):
if d == 0:
numpy.absolute(x_data[d], out=z_data[d])
else:
numpy.multiply(x_data[d], x_data_sign, out=z_data[d])
return z_data
@classmethod
def _pb_absolute(cls, ybar_data, x_data, y_data, out = None):
if out is None:
raise NotImplementedError('should implement that')
fprime_data = numpy.empty_like(x_data)
D = x_data.shape[0]
for d in range(D):
if d == 0:
numpy.sign(x_data[d], out=fprime_data[d])
else:
fprime_data[d].fill(0)
cls._amul(ybar_data, fprime_data, out=out)
@classmethod
def _negative(cls, x_data, out=None):
"""
z = -x
"""
return numpy.multiply(x_data, -1, out=out)
@classmethod
def _pb_negative(cls, ybar_data, x_data, y_data, out = None):
if out is None:
raise NotImplementedError('should implement that')
fprime_data = numpy.empty_like(x_data)
fprime_data[0].fill(-1)
fprime_data[1:].fill(0)
cls._amul(ybar_data, fprime_data, out=out)
@classmethod
def _square(cls, x_data, out=None):
"""
z = x*x
This can theoretically be twice as efficient as mul(x, x).
"""
if out is None:
z_data = numpy.empty_like(x_data)
else:
z_data = out
tmp = numpy.zeros_like(x_data)
D, P = x_data.shape[:2]
for d in range(D):
d_half = (d+1) // 2
if d:
AB = x_data[:d_half, :, ...] * x_data[d:d-d_half:-1, :, ...]
numpy.sum(AB * 2, axis=0, out=tmp[d, :, ...])
if (d+1) % 2 == 1:
tmp[d, :, ...] += numpy.square(x_data[d_half, :, ...])
z_data[...] = tmp[...]
return z_data
@classmethod
def _pb_square(cls, ybar_data, x_data, y_data, out = None):
if out is None:
raise NotImplementedError('should implement that')
cls._amul(ybar_data, x_data*2, out=out)
@classmethod
def _sqrt(cls, x_data, out = None):
if out is None:
raise NotImplementedError('should implement that')
y_data = numpy.zeros_like(x_data)
D,P = x_data.shape[:2]
y_data[0] = numpy.sqrt(x_data[0])
for k in range(1,D):
y_data[k] = 1./(2.*y_data[0]) * ( x_data[k] - numpy.sum( y_data[1:k] * y_data[k-1:0:-1], axis=0))
out[...] = y_data[...]
return out
@classmethod
def _pb_sqrt(cls, ybar_data, x_data, y_data, out = None):
if out is None:
raise NotImplementedError('should implement that')
xbar_data = out
tmp = xbar_data.copy()
cls._truediv(ybar_data, y_data, tmp)
tmp /= 2.
xbar_data += tmp
return xbar_data
@classmethod
def _exp(cls, x_data, out=None):
if out is None:
y_data = numpy.empty_like(x_data)
else:
y_data = out
D,P = x_data.shape[:2]
if pytpcore:
x_data_reshaped = x_data.reshape((D, -1))
y_data_reshaped = y_data.reshape((D, -1))
tmp = numpy.empty_like(x_data_reshaped)
pytpcore.tp_exp(x_data_reshaped, tmp, y_data_reshaped)
else:
y_data[0] = numpy.exp(x_data[0])
xtctilde = x_data[1:].copy()
for d in range(1,D):
xtctilde[d-1] *= d
for d in range(1, D):
y_data[d] = numpy.sum(y_data[:d][::-1]*xtctilde[:d], axis=0)/d
return y_data
@classmethod
def _pb_exp(cls, ybar_data, x_data, y_data, out = None):
if out is None:
raise NotImplementedError('should implement that')
xbar_data = out
cls._amul(ybar_data, y_data, xbar_data)
@classmethod
def _expm1(cls, x_data, out=None):
fprime_data = cls._exp(x_data)
return _black_f_white_fprime(
nthderiv.expm1, fprime_data, x_data, out=out)
@classmethod
def _pb_expm1(cls, ybar_data, x_data, y_data, out = None):
if out is None:
raise NotImplementedError('should implement that')
fprime_data = cls._exp(x_data)
cls._amul(ybar_data, fprime_data, out=out)
@classmethod
def _logit(cls, x_data, out=None):
fprime_data = cls._reciprocal(x_data - cls._square(x_data))
return _black_f_white_fprime(
scipy.special.logit, fprime_data, x_data, out=out)
@classmethod
def _pb_logit(cls, ybar_data, x_data, y_data, out = None):
if out is None:
raise NotImplementedError('should implement that')
fprime_data = cls._reciprocal(x_data - cls._square(x_data))
cls._amul(ybar_data, fprime_data, out=out)
@classmethod
def _expit(cls, x_data, out=None):
b_data = cls._reciprocal(_plus_const(cls._exp(x_data), 1))
fprime_data = b_data - cls._square(b_data)
return _black_f_white_fprime(
scipy.special.expit, fprime_data, x_data, out=out)
@classmethod
def _pb_expit(cls, ybar_data, x_data, y_data, out = None):
if out is None:
raise NotImplementedError('should implement that')
b_data = cls._reciprocal(_plus_const(cls._exp(x_data), 1))
fprime_data = b_data - cls._square(b_data)
cls._amul(ybar_data, fprime_data, out=out)
@classmethod
def _sign(cls, x_data, out = None):
if out is None:
raise NotImplementedError('should implement that')
y_data = out
D, P = x_data.shape[:2]
y_data[0] = numpy.sign(x_data[0])
y_data[1:].fill(0)
return y_data
@classmethod
def _pb_sign(cls, ybar_data, x_data, y_data, out = None):
if out is None:
raise NotImplementedError('should implement that')
xbar_data = out
tmp = numpy.zeros_like(x_data)
cls._amul(ybar_data, tmp, xbar_data)
@classmethod
def _botched_clip(cls, a_min, a_max, x_data, out= None):
"""
In this function the args are permuted w.r.t numpy.
"""
if out is None:
raise NotImplementedError('should implement that')
y_data = out
D, P = x_data.shape[:2]
y_data[0] = numpy.clip(x_data[0], a_min, a_max)
mask = numpy.logical_and(
numpy.less_equal(x_data[0], a_max),
numpy.greater_equal(x_data[0], a_min))
for d in range(1, D):
y_data[d] *= mask
return y_data
@classmethod
def _pb_botched_clip(
cls, ybar_data, a_min, a_max, x_data, y_data, out=None):
"""
In this function the args are permuted w.r.t numpy.
"""
if out is None:
raise NotImplementedError('should implement that')
xbar_data = out
tmp = numpy.zeros_like(x_data)
numpy.multiply(
numpy.less_equal(x_data[0], a_max),
numpy.greater_equal(x_data[0], a_min),
out=tmp[0])
cls._amul(ybar_data, tmp, xbar_data)
@classmethod
def _log(cls, x_data, out = None):
if out is None:
raise NotImplementedError('should implement that')
y_data = numpy.empty_like(x_data)
D,P = x_data.shape[:2]
# base point: d = 0
y_data[0] = numpy.log(x_data[0])
# higher order coefficients: d > 0
for d in range(1,D):
y_data[d] = (x_data[d]*d - numpy.sum(x_data[1:d][::-1] * y_data[1:d], axis=0))
y_data[d] /= x_data[0]
for d in range(1,D):
y_data[d] /= d
out[...] = y_data[...]
return out
@classmethod
def _pb_log(cls, ybar_data, x_data, y_data, out = None):
if out is None:
raise NotImplementedError('should implement that')
xbar_data = out
xbar_data += cls._truediv(ybar_data, x_data, numpy.empty_like(xbar_data))
return xbar_data
@classmethod
def _log1p(cls, x_data, out=None):
fprime_data = cls._reciprocal(_plus_const(x_data, 1))
return _black_f_white_fprime(
numpy.log1p, fprime_data, x_data, out=out)
@classmethod
def _pb_log1p(cls, ybar_data, x_data, y_data, out=None):
if out is None:
raise NotImplementedError('should implement that')
xbar_data = out
xbar_data += cls._truediv(
ybar_data, _plus_const(x_data, 1), numpy.empty_like(xbar_data))
return xbar_data
@classmethod
def _dawsn(cls, x_data, out=None):
if out is None:
v_data = numpy.empty_like(x_data)
else:
v_data = out
# construct the u and v arrays
u_data = x_data
v_data[0, ...] = scipy.special.dawsn(u_data[0])
# construct values like in Table (13.2) of "Evaluating Derivatives"
a_data = -2 * u_data.copy()
b_data = _plus_const(numpy.zeros_like(u_data), 1)
c_data = _plus_const(numpy.zeros_like(u_data), 1)
# fill the rest of the v_data
_taylor_polynomials_of_ode_solutions(
a_data, b_data, c_data,
u_data, v_data)
return v_data
@classmethod
def _pb_dawsn(cls, ybar_data, x_data, y_data, out=None):
if out is None:
raise NotImplementedError('should implement that')
fprime_data = _plus_const(-2*cls._mul(x_data, cls._dawsn(x_data)), 1)
cls._amul(ybar_data, fprime_data, out=out)
@classmethod
def _tansec2(cls, x_data, out = None):
""" computes tan and sec in Taylor arithmetic"""
if out is None:
raise NotImplementedError('should implement that')
y_data, z_data = out
D,P = x_data.shape[:2]
# base point: d = 0
y_data[0] = numpy.tan(x_data[0])
z_data[0] = 1./(numpy.cos(x_data[0])*numpy.cos(x_data[0]))
# higher order coefficients: d > 0
for d in range(1,D):
y_data[d] = numpy.sum([k*x_data[k] * z_data[d-k] for k in range(1,d+1)], axis = 0)/d
z_data[d] = 2.*numpy.sum([k*y_data[k] * y_data[d-k] for k in range(1,d+1)], axis = 0)/d
return y_data, z_data
@classmethod
def _pb_tansec(cls, ybar_data, zbar_data, x_data, y_data, z_data, out = None):
if out is None:
raise NotImplementedError('should implement that')
xbar_data = out
cls._mul(2*zbar_data, y_data, y_data)
y_data += ybar_data
cls._amul(y_data, z_data, xbar_data)
@classmethod
def _sincos(cls, x_data, out = None):
""" computes sin and cos in Taylor arithmetic"""
if out is None:
raise NotImplementedError('should implement that')
s_data,c_data = out
D,P = x_data.shape[:2]
# base point: d = 0
s_data[0] = numpy.sin(x_data[0])
c_data[0] = numpy.cos(x_data[0])
# higher order coefficients: d > 0
for d in range(1,D):
s_data[d] = numpy.sum([k*x_data[k] * c_data[d-k] for k in range(1,d+1)], axis = 0)/d
c_data[d] = numpy.sum([-k*x_data[k] * s_data[d-k] for k in range(1,d+1)], axis = 0)/d
return s_data, c_data
@classmethod
def _pb_sincos(cls, sbar_data, cbar_data, x_data, s_data, c_data, out = None):
if out is None:
raise NotImplementedError('should implement that')
xbar_data = out
cls._amul(sbar_data, c_data, xbar_data)
cls._amul(cbar_data, -s_data, xbar_data)
@classmethod
def _arcsin(cls, x_data, out = None):
if out is None:
raise NotImplementedError('should implement that')
y_data,z_data = out
D,P = x_data.shape[:2]
# base point: d = 0
y_data[0] = numpy.arcsin(x_data[0])
z_data[0] = numpy.cos(y_data[0])
# higher order coefficients: d > 0
for d in range(1,D):
y_data[d] = (d*x_data[d] - numpy.sum([k*y_data[k] * z_data[d-k] for k in range(1,d)], axis = 0))/(z_data[0]*d)
z_data[d] = -numpy.sum([k*y_data[k] * x_data[d-k] for k in range(1,d+1)], axis = 0)/d
return y_data, z_data
@classmethod
def _arccos(cls, x_data, out = None):
if out is None:
raise NotImplementedError('should implement that')
y_data,z_data = out
D,P = x_data.shape[:2]
# base point: d = 0
y_data[0] = numpy.arccos(x_data[0])
z_data[0] = -numpy.sin(y_data[0])
# higher order coefficients: d > 0
for d in range(1,D):
y_data[d] = (d*x_data[d] - numpy.sum([k*y_data[k] * z_data[d-k] for k in range(1,d)], axis = 0))/(z_data[0]*d)
z_data[d] = -numpy.sum([k*y_data[k] * x_data[d-k] for k in range(1,d+1)], axis = 0)/d
return y_data, z_data
@classmethod
def _arctan(cls, x_data, out = None):
if out is None:
raise NotImplementedError('should implement that')
y_data,z_data = out
D,P = x_data.shape[:2]
# base point: d = 0
y_data[0] = numpy.arctan(x_data[0])
z_data[0] = 1 + x_data[0] * x_data[0]
# higher order coefficients: d > 0
for d in range(1,D):
y_data[d] = (d*x_data[d] - numpy.sum([k*y_data[k] * z_data[d-k] for k in range(1,d)], axis = 0))/(z_data[0]*d)
z_data[d] = 2* numpy.sum([k*x_data[k] * x_data[d-k] for k in range(1,d+1)], axis = 0)/d
return y_data, z_data
@classmethod
def _sinhcosh(cls, x_data, out = None):
if out is None:
raise NotImplementedError('should implement that')
s_data,c_data = out
D,P = x_data.shape[:2]
# base point: d = 0
s_data[0] = numpy.sinh(x_data[0])
c_data[0] = numpy.cosh(x_data[0])
# higher order coefficients: d > 0
for d in range(1,D):
s_data[d] = (numpy.sum([k*x_data[k] * c_data[d-k] for k in range(1,d+1)], axis = 0))/d
c_data[d] = (numpy.sum([k*x_data[k] * s_data[d-k] for k in range(1,d+1)], axis = 0))/d
return s_data, c_data
@classmethod
def _tanhsech2(cls, x_data, out = None):
if out is None:
raise NotImplementedError('should implement that')
y_data,z_data = out
D,P = x_data.shape[:2]
# base point: d = 0
y_data[0] = numpy.tanh(x_data[0])
z_data[0] = 1-y_data[0]*y_data[0]
# higher order coefficients: d > 0
for d in range(1,D):
y_data[d] = (numpy.sum([k*x_data[k] * z_data[d-k] for k in range(1,d+1)], axis = 0))/d
z_data[d] = -2*(numpy.sum([k*y_data[k] * y_data[d-k] for k in range(1,d+1)], axis = 0))/d
return y_data, z_data
@classmethod
def _erf(cls, x_data, out=None):
fprime_data = (2. / math.sqrt(math.pi)) * cls._exp(-cls._square(x_data))
return _black_f_white_fprime(
nthderiv.erf, fprime_data, x_data, out=out)
@classmethod
def _pb_erf(cls, ybar_data, x_data, y_data, out = None):
if out is None:
raise NotImplementedError('should implement that')
fprime_data = (2. / math.sqrt(math.pi)) * cls._exp(-cls._square(x_data))
cls._amul(ybar_data, fprime_data, out=out)
@classmethod
def _erfi(cls, x_data, out=None):
fprime_data = (2. / math.sqrt(math.pi)) * cls._exp(cls._square(x_data))
return _black_f_white_fprime(
nthderiv.erfi, fprime_data, x_data, out=out)
@classmethod
def _pb_erfi(cls, ybar_data, x_data, y_data, out = None):
if out is None:
raise NotImplementedError('should implement that')
fprime_data = (2. / math.sqrt(math.pi)) * cls._exp(cls._square(x_data))
cls._amul(ybar_data, fprime_data, out=out)
@classmethod
def _dpm_hyp1f1(cls, a, b, x_data, out=None):
f = functools.partial(nthderiv.mpmath_hyp1f1, a, b)
return _eval_slow_generic(f, x_data, out=out)
@classmethod
def _pb_dpm_hyp1f1(cls, ybar_data, a, b, x_data, y_data, out=None):
if out is None:
raise NotImplementedError('should implement that')
tmp = cls._dpm_hyp1f1(a+1., b+1., x_data) * (float(a) / float(b))
cls._amul(ybar_data, tmp, out=out)
@classmethod
def _hyp1f1(cls, a, b, x_data, out=None):
f = functools.partial(nthderiv.hyp1f1, a, b)
return _eval_slow_generic(f, x_data, out=out)
@classmethod
def _pb_hyp1f1(cls, ybar_data, a, b, x_data, y_data, out=None):
if out is None:
raise NotImplementedError('should implement that')
tmp = cls._hyp1f1(a+1., b+1., x_data) * (float(a) / float(b))
cls._amul(ybar_data, tmp, out=out)
@classmethod
def _hyperu(cls, a, b, x_data, out=None):
f = functools.partial(nthderiv.hyperu, a, b)
return _eval_slow_generic(f, x_data, out=out)
@classmethod
def _pb_hyperu(cls, ybar_data, a, b, x_data, y_data, out=None):
if out is None:
raise NotImplementedError('should implement that')
tmp = cls._hyperu(a+1., b+1., x_data) * (-a)
cls._amul(ybar_data, tmp, out=out)
@classmethod
def _dpm_hyp2f0(cls, a1, a2, x_data, out=None):
f = functools.partial(nthderiv.mpmath_hyp2f0, a1, a2)
return _eval_slow_generic(f, x_data, out=out)
@classmethod
def _pb_dpm_hyp2f0(cls, ybar_data, a1, a2, x_data, y_data, out=None):
if out is None:
raise NotImplementedError('should implement that')
tmp = cls._dpm_hyp2f0(a1+1., a2+1., x_data) * float(a1) * float(a2)
cls._amul(ybar_data, tmp, out=out)
@classmethod
def _hyp2f0(cls, a1, a2, x_data, out=None):
f = functools.partial(nthderiv.hyp2f0, a1, a2)
return _eval_slow_generic(f, x_data, out=out)
@classmethod
def _pb_hyp2f0(cls, ybar_data, a1, a2, x_data, y_data, out=None):
if out is None:
raise NotImplementedError('should implement that')
tmp = cls._hyp2f0(a1+1., a2+1., x_data) * float(a1) * float(a2)
cls._amul(ybar_data, tmp, out=out)
@classmethod
def _hyp0f1(cls, b, x_data, out=None):
f = functools.partial(nthderiv.hyp0f1, b)
return _eval_slow_generic(f, x_data, out=out)
@classmethod
def _pb_hyp0f1(cls, ybar_data, b, x_data, y_data, out=None):
if out is None:
raise NotImplementedError('should implement that')
tmp = cls._hyp0f1(b+1., x_data) / float(b)
cls._amul(ybar_data, tmp, out=out)
@classmethod
def _polygamma(cls, m, x_data, out=None):
f = functools.partial(nthderiv.polygamma, m)
return _eval_slow_generic(f, x_data, out=out)
@classmethod
def _pb_polygamma(cls, ybar_data, m, x_data, y_data, out=None):
if out is None:
raise NotImplementedError('should implement that')
tmp = cls._polygamma(m+1, x_data)
cls._amul(ybar_data, tmp, out=out)
@classmethod
def _psi(cls, x_data, out=None):
if out is None:
raise NotImplementedError('should implement that')
return _eval_slow_generic(nthderiv.psi, x_data, out=out)
@classmethod
def _pb_psi(cls, ybar_data, x_data, y_data, out=None):
if out is None:
raise NotImplementedError('should implement that')
tmp = cls._polygamma(1, x_data)
cls._amul(ybar_data, tmp, out=out)
@classmethod
def _gammaln(cls, x_data, out=None):
if out is None:
raise NotImplementedError('should implement that')
return _eval_slow_generic(nthderiv.gammaln, x_data, out=out)
@classmethod
def _pb_gammaln(cls, ybar_data, x_data, y_data, out=None):
if out is None:
raise NotImplementedError('should implement that')
tmp = cls._polygamma(0, x_data)
cls._amul(ybar_data, tmp, out=out)
@classmethod
def _dot(cls, x_data, y_data, out = None):
"""
z = dot(x,y)
"""
if out is None:
new_shp = x_data.shape[:-1] + y_data.shape[2:-2] + (y_data.shape[-1],)
out = numpy.zeros(new_shp, dtype=numpy.promote_types(x_data.dtype, y_data.dtype) )
z_data = out
z_data[...] = 0.
D,P = x_data.shape[:2]
# print 'x_data.shape=', x_data.shape
# print 'y_data.shape=', y_data.shape
# print 'z_data.shape=', z_data.shape
for d in range(D):
for p in range(P):
for c in range(d+1):
tmp = numpy.dot(x_data[c,p,...],
y_data[d-c,p,...])
numpy.add(z_data[d,p,...], tmp, out=z_data[d,p, ...], casting='unsafe')
return out
@classmethod
def _dot_pullback(cls, zbar_data, x_data, y_data, z_data, out = None):
if out is None:
raise NotImplementedError('should implement that')
(xbar_data, ybar_data) = out
xbar_data += cls._dot(zbar_data, cls._transpose(y_data), out = xbar_data.copy())
ybar_data += cls._dot(cls._transpose(x_data), zbar_data, out = ybar_data.copy())
return out
@classmethod
def _dot_non_UTPM_y(cls, x_data, y_data, out = None):
"""
z = dot(x,y)
"""
if out is None:
raise NotImplementedError('should implement that')
z_data = out
z_data[...] = 0.
D,P = x_data.shape[:2]
# print 'z_data=',z_data
for d in range(D):
for p in range(P):
z_data[d,p,...] = numpy.dot(x_data[d,p,...], y_data[...])
return out
@classmethod
def _dot_non_UTPM_x(cls, x_data, y_data, out = None):
"""
z = dot(x,y)
"""
if out is None:
raise NotImplementedError('should implement that')
z_data = out
z_data[...] = 0.
D,P = y_data.shape[:2]
for d in range(D):
for p in range(P):
z_data[d,p,...] = numpy.dot(x_data[...], y_data[d,p,...])
return out
@classmethod
def _outer(cls, x_data, y_data, out = None):
"""
z = outer(x,y)
"""
if out is None:
raise NotImplementedError('should implement that')
z_data = out
z_data[...] = 0.
D,P = x_data.shape[:2]
for d in range(D):
for p in range(P):
for c in range(d+1):
z_data[d,p,...] += numpy.outer(x_data[c,p,...], y_data[d-c,p,...])
return out
@classmethod
def _outer_non_utpm_y(cls, x_data, y, out = None):
"""
z = outer(x,y)
where x is UTPM and y is ndarray
"""
if out is None:
raise NotImplementedError('should implement that')
z_data = out
z_data[...] = 0.
D,P = x_data.shape[:2]
for d in range(D):
for p in range(P):
z_data[d,p,...] += numpy.outer(x_data[d,p,...], y)
return out
@classmethod
def _outer_non_utpm_x(cls, x, y_data, out = None):
"""
z = outer(x,y)
where y is UTPM and x is ndarray
"""
if out is None:
raise NotImplementedError('should implement that')
z_data = out
z_data[...] = 0.
D,P = y_data.shape[:2]
for d in range(D):
for p in range(P):
z_data[d,p,...] += numpy.outer(x, y_data[d,p,...])
return out
@classmethod
def _outer_pullback(cls, zbar_data, x_data, y_data, z_data, out = None):
if out is None:
raise NotImplementedError('should implement that')
(xbar_data, ybar_data) = out
xbar_data += cls._dot(zbar_data, y_data, out = xbar_data.copy())
ybar_data += cls._dot(zbar_data, x_data, out = ybar_data.copy())
return out
@classmethod
def _inv(cls, x_data, out = None):
"""
computes y = inv(x)
"""
if out is None:
raise NotImplementedError('should implement that')
y_data, = out
(D,P,N,M) = y_data.shape
# tc[0] element
for p in range(P):
y_data[0,p,:,:] = numpy.linalg.inv(x_data[0,p,:,:])
# tc[d] elements
for d in range(1,D):
for p in range(P):
for c in range(1,d+1):
y_data[d,p,:,:] += numpy.dot(x_data[c,p,:,:], y_data[d-c,p,:,:],)
y_data[d,p,:,:] = numpy.dot(-y_data[0,p,:,:], y_data[d,p,:,:],)
return y_data
@classmethod
def _inv_pullback(cls, ybar_data, x_data, y_data, out = None):
if out is None:
raise NotImplementedError('should implement that')
xbar_data = out
tmp1 = numpy.zeros(xbar_data.shape)
tmp2 = numpy.zeros(xbar_data.shape)
tmp1 = cls._dot(ybar_data, cls._transpose(y_data), out = tmp1)
tmp2 = cls._dot(cls._transpose(y_data), tmp1, out = tmp2)
xbar_data -= tmp2
return out
@classmethod
def _solve_pullback(cls, ybar_data, A_data, x_data, y_data, out = None):
if out is None:
raise NotImplementedError('should implement that')
Abar_data = out[0]
xbar_data = out[1]
Tbar = numpy.zeros(xbar_data.shape)
cls._solve( A_data.transpose((0,1,3,2)), ybar_data, out = Tbar)
Tbar *= -1.
cls._iouter(Tbar, y_data, Abar_data)
xbar_data -= Tbar
return out
@classmethod
def _solve_non_UTPM_x_pullback(cls, ybar_data, A_data, x_data, y_data, out = None):
if out is None:
raise NotImplementedError('should implement that')
Abar_data = out
Tbar = numpy.zeros(xbar_data.shape)
cls._solve( A_data.transpose((0,1,3,2)), ybar_data, out = Tbar)
Tbar *= -1.
cls._iouter(Tbar, y_data, Abar_data)
return out, None
@classmethod
def _solve(cls, A_data, x_data, out = None):
"""
solves the linear system of equations for y::
A y = x
"""
if out is None:
raise NotImplementedError('should implement that')
y_data = out
x_shp = x_data.shape
A_shp = A_data.shape
D,P,M,N = A_shp
D,P,M,K = x_shp
# d = 0: base point
for p in range(P):
y_data[0,p,...] = numpy.linalg.solve(A_data[0,p,...], x_data[0,p,...])
# d = 1,...,D-1
dtype = numpy.promote_types(A_data.dtype, x_data.dtype)
tmp = numpy.zeros((M,K),dtype=dtype)
for d in range(1, D):
for p in range(P):
tmp[:,:] = x_data[d,p,:,:]
for k in range(1,d+1):
tmp[:,:] -= numpy.dot(A_data[k,p,:,:],y_data[d-k,p,:,:])
y_data[d,p,:,:] = numpy.linalg.solve(A_data[0,p,:,:],tmp)
return out
@classmethod
def _solve_non_UTPM_A(cls, A_data, x_data, out = None):
"""
solves the linear system of equations for y::
A y = x
when A is a simple (N,N) float array
"""
if out is None:
raise NotImplementedError('should implement that')
y_data = out
x_shp = numpy.shape(x_data)
A_shp = numpy.shape(A_data)
M,N = A_shp
D,P,M,K = x_shp
assert M == N
for d in range(D):
for p in range(P):
y_data[d,p,...] = numpy.linalg.solve(A_data[:,:], x_data[d,p,...])
return out
@classmethod
def _solve_non_UTPM_x(cls, A_data, x_data, out = None):
"""
solves the linear system of equations for y::
A y = x
where x is simple (N,K) float array
"""
if out is None:
raise NotImplementedError('should implement that')
y_data = out
x_shp = numpy.shape(x_data)
A_shp = numpy.shape(A_data)
D,P,M,N = A_shp
M,K = x_shp
assert M==N
# d = 0: base point
for p in range(P):
y_data[0,p,...] = numpy.linalg.solve(A_data[0,p,...], x_data[...])
# d = 1,...,D-1
tmp = numpy.zeros((M,K),dtype=float)
for d in range(1, D):
for p in range(P):
tmp[:,:] = 0.
for k in range(1,d+1):
tmp[:,:] -= numpy.dot(A_data[k,p,:,:],y_data[d-k,p,:,:])
y_data[d,p,:,:] = numpy.linalg.solve(A_data[0,p,:,:],tmp)
return out
@classmethod
def _cholesky(cls, A_data, L_data):
"""
compute the choleksy decomposition in Taylor arithmetic of a symmetric
positive definite matrix A
i.e.
..math:
A = L L^T
"""
DT,P,N = numpy.shape(A_data)[:3]
# allocate (temporary) projection matrix
Proj =
|
numpy.zeros((N,N))
|
numpy.zeros
|
"""
atomtools for geometry
"""
import os
import math
import itertools
import numpy as np
from numpy.linalg import norm
import modlog
import chemdata
BASEDIR = os.path.dirname(os.path.abspath(__file__))
EXTREME_SMALL = 1e-5
logger = modlog.getLogger(__name__)
def cos(theta, arc=False):
factor = 1 if arc else math.pi/180.0
return math.cos(theta * factor)
def sin(theta, arc=False):
factor = 1 if arc else math.pi/180.0
return math.sin(theta * factor)
def acos(result, arc=False):
factor = 1 if arc else 180.0/math.pi
return math.acos(result) * factor
def asin(result, arc=False):
factor = 1 if arc else 180.0/math.pi
return math.asin(result) * factor
def get_positions(positions):
if hasattr(positions, 'positions'):
positions = positions.positions
return np.array(positions).reshape((-1, 3))
def get_atoms_size(positions):
if hasattr(positions, 'positions'):
positions = positions.positions
assert isinstance(positions, (np.ndarray, list)
), 'Please give Atoms, list or ndarray'
positions = np.array(positions).reshape((-1, 3))
size = [0.] * 3
for i in range(3):
size[i] = positions[:, i].max() - positions[:, i].min()
return tuple(size)
def normed(v):
v = np.array(v)
if norm(v) < EXTREME_SMALL:
return v
return v/norm(v)
def vector_angle(a, b):
return acos(np.dot(a, b)/(norm(a)*norm(b)))
def get_distance(positions, i, j):
positions = get_positions(positions)
return norm(positions[i]-positions[j])
def get_angle(positions, i, j, k):
positions = get_positions(positions)
v1 = positions[i] - positions[j]
v2 = positions[k] - positions[j]
return acos(normed(v1).dot(normed(v2)))
return vector_angle(v1, v2)
def get_dihedral(positions, i, j, k, l):
positions = get_positions(positions)
v1 = normed(positions[i] - positions[j])
v2 = normed(positions[l] - positions[k])
vl = normed(positions[k] - positions[j])
return acos(v1.dot(v2)) * np.sign(v2.dot(np.cross(v1, vl)))
def cartesian_to_zmatrix(positions, zmatrix_dict=None,
initial_num=0, indices=None):
def get_zmat_data(zmatrix_dict, keywords):
return zmatrix_dict[keywords] if zmatrix_dict is not None \
and keywords in zmatrix_dict else []
shown_length = get_zmat_data(zmatrix_dict, 'shown_length')
shown_angle = get_zmat_data(zmatrix_dict, 'shown_angle')
shown_dihedral = get_zmat_data(zmatrix_dict, 'shown_dihedral')
same_length = get_zmat_data(zmatrix_dict, 'same_length')
same_angle = get_zmat_data(zmatrix_dict, 'same_angle')
same_dihedral = get_zmat_data(zmatrix_dict, 'same_dihedral')
shown_length.sort()
#shown_length = []
#shown_angle = []
#shown_dihedral = []
positions = np.array(positions).reshape((-1, 3))
natoms = len(positions)
if indices is None:
indices = np.arange(natoms)
zmatrix = np.array([[[-1, -1], [-1, -1], [-1, -1]]]*natoms).tolist()
same_bond_variables = [''] * len(same_length)
variables = {}
for ai in range(natoms):
if ai == 0:
continue
elif ai == 1:
zmatrix[ai][0] = [0, get_distance(positions, 0, 1)]
continue
for a0, a1 in shown_length:
a0, a1 = indices[a0], indices[a1]
logger.debug(f"{a0}, {a1}")
if ai == a1:
alpha = 'R_'+str(a0+initial_num)+'_'+str(a1+initial_num)
write_variable = True
for same_length, index in zip(same_length,
range(len(same_length))):
# print((a0, a1), same_length)
if (a0, a1) in same_length:
# print("UES")
if same_bond_variables[index] == '':
same_bond_variables[index] = alpha
logger.debug(f"{index}, {same_bond_variables}")
else:
alpha = same_bond_variables[index]
write_variable = False
break
zmatrix[ai][0] = [a0, alpha]
if write_variable:
variables[alpha] = [(a0, a1), get_distance(positions, a0, a1)]
break
a0 = -1
a1 = -1
a2 = -1
a0 = zmatrix[ai][0][0]
if a0 == -1:
a0 = 0
dist = get_distance(positions, ai, a0)
logger.debug(f'dist:, {ai}, {a0}, {dist}')
zmatrix[ai][0] = [a0, dist]
a1 = zmatrix[ai][1][0]
if a1 == -1:
for a1 in range(0, ai):
if not a1 in [a0]:
break
if a1 == -1:
raise ValueError('a1 is still -1')
angle = get_angle(positions, ai, a0, a1)
logger.debug(f'angle:, {ai}, {a0}, {a1}, {angle}')
zmatrix[ai][1] = [a1, angle]
a2 = zmatrix[ai][2][0]
if ai >= 3 and a2 == -1:
for a2 in range(0, ai):
if not a1 in [a0, a1]:
break
if a2 == -1:
raise ValueError('a2 is still -1')
dihedral = get_dihedral(positions, ai, a0, a1, a2)
logger.debug(f'dihedral:, {dihedral}')
zmatrix[ai][2] = [a2, dihedral]
if initial_num != 0:
for zmat in zmatrix:
for zmat_x in zmat:
if zmat_x[0] != -1:
zmat_x[0] += initial_num
logger.debug(f"{zmatrix}, {variables}, {indices}")
return zmatrix, variables, indices
def cartesian_to_spherical(pos_o, pos_s):
pos_o = np.array(pos_o)
pos_s = np.array(pos_s)
logger.debug(f'cartesian to spherical:, {pos_o}, {pos_s}')
v_os = pos_s - pos_o
if norm(v_os) < 0.01:
return (0, 0, 0)
x, y, z = v_os
length = np.linalg.norm(v_os)
theta = acos(z/length)
xy_length = math.sqrt(x*x+y*y)
logger.debug(f'xy_length, {theta}, {xy_length}')
if xy_length < 0.05:
phi_x = 0.0
phi_y = 0.0
else:
phi_x = acos(x/xy_length)
phi_y = asin(y/xy_length)
if y >= 0:
phi = phi_x
else:
phi = -phi_x
return (length, theta, phi)
def spherical_to_cartesian(pos_o, length, space_angle, space_angle0=(0, 0)):
theta, phi = space_angle
theta0, phi0 = space_angle0
print(f'sperical to cartesian:, {theta}, {phi}')
pos_site = np.array(pos_o) + length * \
np.array([sin(theta+theta0) * cos(phi+phi0),
sin(theta+theta0) * sin(phi+phi0),
cos(theta+theta0)])
return pos_site
def rotate_site_angle(site_angle, theta, phi):
for site_angle_i in site_angle:
theta_i, phi_i = site_angle_i
site_angle_i = [theta_i+theta, phi_i+phi]
return site_angle
def input_standard_pos_transform(inp_pos, std_pos, t_vals,
std_to_inp=True, is_coord=False):
t_vals = np.array(t_vals).copy()
std_O = np.array(std_pos)[-1].copy()
inp_O = np.array(inp_pos)[-1].copy()
std_pos = np.array(std_pos).copy() - std_O
inp_pos = np.array(inp_pos).copy() - inp_O
natoms = len(inp_pos)
if not is_coord:
inp_O = std_O = np.array([0, 0, 0])
R_mat = None
# return std_pos, inp_pos
for selection in itertools.combinations(range(natoms-1), 3):
selection = list(selection)
std_m = std_pos[selection]
inp_m = inp_pos[selection]
if np.linalg.det(std_m) > 0.01 and np.linalg.det(inp_m) > 0.01:
# std_m * R_mat = inp_m
# R_mat = std_m^-1 * inp_m
R_mat = np.dot(np.linalg.inv(std_m), inp_m)
logger.debug(f'selections:, {selection}')
logger.debug(f'{std_m}, {np.linalg.det(std_m)}')
logger.debug(f'{inp_m}, {np.linalg.det(inp_m)}')
break
if R_mat is None:
# dimision is less than 3
for selection in itertools.combinations(range(natoms-1), 2):
std_v0 = std_pos[selection[0]]
std_v1 = std_pos[selection[1]]
std_v2 = np.cross(std_v0, std_v1)
std_m = np.array([std_v0, std_v1, std_v2])
inp_v0 = inp_pos[selection[0]]
inp_v1 = inp_pos[selection[1]]
inp_v2 = np.cross(inp_v0, inp_v1)
inp_m = np.array([inp_v0, inp_v1, inp_v2])
if np.linalg.det(std_m) > 0.01:
R_mat = np.dot(np.linalg.inv(std_m), inp_m)
logger.debug(f'selections:, {selection}')
break
if R_mat is None:
# 2 atoms
std_v = std_pos[0]
inp_v = inp_pos[0]
R = np.cross(std_v, inp_v)
R = normed(R)
logger.debug(f'stdv, inpv:, {std_v}, {inp_v}, \nR:, {R}')
if std_to_inp:
return np.cross(R, t_vals-std_O)+inp_O
else:
return np.cross(t_vals-inp_O, R)+std_O
else:
# testification
# if debug:
# assert((np.dot(std_pos, R_mat)-inp_pos < 0.001).all())
# logger.debug('test complete')
if std_to_inp:
return
|
np.dot(t_vals - std_O, R_mat)
|
numpy.dot
|
import numpy as np
import matplotlib.pyplot as plt
class SolveMinProbl:
def __init__(self, y, A, y_val, A_val, y_test, A_test): #initialization
self.matr=A
self.Np=y.shape[0]
self.Nf=A.shape[1]
self.vect=y
self.vect_val=y_val
self.matr_val=A_val
self.matr_test=A_test
self.vect_test=y_test
self.sol=np.zeros((self.Nf, 1), dtype=float)
return
def plot_w(self, title='Solution'):
w=self.sol
n=np.arange(self.Nf)
plt.figure()
plt.plot(n, w)
plt.xlabel('n')
plt.ylabel('w(n)')
plt.title(title)
plt.grid()
plt.show()
return
def print_result(self, title):
print(title, '_:')
print("The optimum weight vector is:_")
print(self.sol)
return
def plot_err(self, title='Square_error', logx=0, logy=0):
err=self.err
plt.figure()
if (logy==0) and (logx==0):
plt.plot(err[:, 0], err[:, 1])
if (logy == 1) and (logx == 0):
plt.semilogy(err[:, 0], err[:, 1], label='Train')
if (logy == 0) and (logx == 1):
plt.semilogx(err[:, 0], err[:, 1])
if (logy == 1) and (logx == 1):
plt.loglog(err[:, 0], err[:, 1])
if (logy==0) and (logx==0):
plt.plot(err[:, 0], err[:, 2])
if (logy == 1) and (logx == 0):
plt.semilogy(err[:, 0], err[:, 2], label='Validation')
if (logy == 0) and (logx == 1):
plt.semilogx(err[:, 0], err[:, 2])
if (logy == 1) and (logx == 1):
plt.loglog(err[:, 0], err[:, 2])
plt.xlabel('n')
plt.ylabel('e(n)')
plt.legend()
plt.title(title)
plt.margins(0.01, 0.1)
plt.grid()
plt.show()
return
def plotyhattest(self, title):
w=self.sol
A_test=self.matr_test
y_test=self.vect_test
y_hat_test=np.dot(A_test, w)
plt.figure()
plt.scatter(y_test, y_hat_test)
plt.xlabel('y_test')
plt.ylabel('y_hat_test')
plt.title(title)
plt.grid()
plt.show()
def plotyhattrain(self, title):
w=self.sol
A_train=self.matr
y_train=self.vect
y_hat_train=np.dot(A_train, w)
plt.figure()
plt.scatter(y_train, y_hat_train)
plt.xlabel('y_train')
plt.ylabel('y_hat_train')
plt.title(title)
plt.grid()
plt.show()
class SolveGrad(SolveMinProbl):
def run(self, gamma=1e-5, Nit=1000):
np.random.seed(2)
self.err=np.zeros((Nit, 3), dtype=float)
A=self.matr
y=self.vect
A_val = self.matr_val
y_val = self.vect_val
w=np.random.rand(self.Nf, 1)
for it in range(Nit):
grad=2*np.dot(A.T, (np.dot(A,w)-y))
w=w-gamma*grad
self.err[it, 0]=it
self.err[it, 1]=np.linalg.norm(np.dot(A, w)-y)
self.err[it, 2]=np.linalg.norm(np.dot(A_val, w)-y_val)
self.sol=w
self.min=self.err[it, 1]
class SolveSteep(SolveMinProbl):
def run(self, Nit=1000):
np.random.seed(2)
self.err = np.zeros((Nit, 3), dtype=float)
#self.err_val[0, 1]=10000
A_val=self.matr_val
y_val=self.vect_val
A = self.matr
y = self.vect
w = np.random.rand(self.Nf, 1)
for it in range(Nit):
grad = 2 * np.dot(A.T, (np.dot(A, w) - y))
H=2*np.dot(A.T, A)
w=w-np.linalg.norm(grad)**2/np.dot(np.dot(grad.T, H), grad)*grad
self.err[it, 0] = it
self.err[it, 1] = np.linalg.norm(np.dot(A, w) - y)
self.err[it, 2]=np.linalg.norm(np.dot(A_val, w) - y_val)
self.sol = w
self.min = self.err[it, 1]
class SolveStocha(SolveMinProbl):
def run(self, Nit=100, gamma=1e-2):
np.random.seed(2)
self.err = np.zeros((Nit, 3), dtype=float)
A_val = self.matr_val
y_val = self.vect_val
A = self.matr
y = self.vect
w = np.random.rand(self.Nf, 1)
Ac = np.zeros((self.Nf, 1), dtype=float)
for it in range(Nit):
for i in range(self.Np):
for j in range(self.Nf):
Ac[j, 0] = A[i, j]
grad = (gamma * (np.dot(A[i], w) - y[i])) * Ac
w = w - grad
self.err[it, 0] = it
self.err[it, 1] = np.linalg.norm(np.dot(A, w) - y)
self.err[it, 2] = np.linalg.norm(
|
np.dot(A_val, w)
|
numpy.dot
|
# coding=utf-8
# Copyright 2018 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for sufficient_input_subsets.sis."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from sufficient_input_subsets import sis
# Function that returns the L2 norm over each set of coordinates in the batch.
_F_L2 = lambda batch_coords: np.linalg.norm(batch_coords, ord=2, axis=-1)
# Function that returns the sum over each array in the batch.
_F_SUM = lambda batch: np.array([np.sum(arr) for arr in batch])
# Function that computes the dot product between a known vector ([1, 2, 0, 1])
# and each array in the batch (analagous to linear regression).
_LINREGRESS_THETA = np.array([1, 2, 0, 1])
_F_LINREGRESS = lambda bt: np.array([np.dot(_LINREGRESS_THETA, b) for b in bt])
class SisTest(parameterized.TestCase):
def test_import(self):
self.assertIsNotNone(sis)
def _assert_backselect_stack_equal(self, actual_backselect_stack,
expected_backselect_stack):
"""Raises an AssertionError if two backselect stacks are not equal."""
if not expected_backselect_stack: # expected empty stack
np.testing.assert_equal(actual_backselect_stack,
expected_backselect_stack)
return
actual_idxs, actual_values = zip(*actual_backselect_stack)
expected_idxs, expected_values = zip(*expected_backselect_stack)
if not (np.array_equal(actual_idxs, expected_idxs) and
np.allclose(actual_values, expected_values)):
raise AssertionError(
'Backselect stacks not equal. Got %s, expected %s.' %
(str(actual_backselect_stack), str(expected_backselect_stack)))
@parameterized.named_parameters(
dict(
testcase_name='sis len 1',
sis_result=sis.SISResult(
sis=np.array([[0]]),
ordering_over_entire_backselect=np.array([[2], [1], [3], [0]]),
values_over_entire_backselect=np.array([10.0, 8.0, 5.0, 0.0]),
mask=np.array([True, False, False, False]),
),
expected_len=1),
dict(
testcase_name='sis, 2-dim idxs, len 3',
sis_result=sis.SISResult(
sis=np.array([[0, 1], [1, 2], [2, 3]]),
ordering_over_entire_backselect=np.array([[2], [1], [3], [0]]),
values_over_entire_backselect=np.array([10.0, 8.0, 5.0, 0.0]),
mask=np.array([True, False, False, False]),
),
expected_len=3),
)
def test_sisresult_len(self, sis_result, expected_len):
actual_len = len(sis_result)
self.assertEqual(actual_len, expected_len)
@parameterized.named_parameters(
dict(
testcase_name='sis equal',
sis1=sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[0], [1]]),
values_over_entire_backselect=np.array([10.0, 0.0]),
mask=np.array([False, True])),
sis2=sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[0], [1]]),
values_over_entire_backselect=np.array([10.0, 0.0]),
mask=np.array([False, True])),
expected=True,
),
dict(
testcase_name='sis not equal, values very slight different',
sis1=sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[0], [1]]),
values_over_entire_backselect=np.array([10.0, 0.0]),
mask=np.array([False, True])),
sis2=sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[0], [1]]),
values_over_entire_backselect=np.array([10.0, 0.000000001]),
mask=np.array([False, True])),
expected=False,
),
dict(
testcase_name='sis not equal, differ on sis',
sis1=sis.SISResult(
sis=np.array([[2]]),
ordering_over_entire_backselect=np.array([[0], [1]]),
values_over_entire_backselect=np.array([10.0, 0.0]),
mask=np.array([False, True])),
sis2=sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[0], [1]]),
values_over_entire_backselect=np.array([10.0, 0.0]),
mask=np.array([False, True])),
expected=False,
),
dict(
testcase_name='sis not equal, differ on ordering',
sis1=sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[1], [1]]),
values_over_entire_backselect=np.array([10.0, 0.0]),
mask=np.array([False, True])),
sis2=sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[0], [1]]),
values_over_entire_backselect=np.array([10.0, 0.0]),
mask=np.array([False, True])),
expected=False,
),
dict(
testcase_name='sis not equal, differ on values',
sis1=sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[0], [1]]),
values_over_entire_backselect=np.array([10.0, 5.0]),
mask=np.array([False, True])),
sis2=sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[0], [1]]),
values_over_entire_backselect=np.array([10.0, 0.0]),
mask=np.array([False, True])),
expected=False,
),
dict(
testcase_name='sis not equal, fractional difference in values',
sis1=sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[0], [1]]),
values_over_entire_backselect=np.array([10.0, 5.0]),
mask=np.array([False, True])),
sis2=sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[0], [1]]),
values_over_entire_backselect=np.array([10.0, 10.01]),
mask=np.array([False, True])),
expected=False,
),
dict(
testcase_name='sis not equal, differ on mask',
sis1=sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[0], [1]]),
values_over_entire_backselect=np.array([10.0, 0.0]),
mask=np.array([False, False])),
sis2=sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[0], [1]]),
values_over_entire_backselect=np.array([10.0, 0.0]),
mask=np.array([False, True])),
expected=False,
),
)
def test_sis_result_equality(self, sis1, sis2, expected):
if expected:
self.assertEqual(sis1, sis2)
self.assertEqual(sis2, sis1)
else:
self.assertNotEqual(sis1, sis2)
self.assertNotEqual(sis2, sis1)
@parameterized.named_parameters(
dict(
testcase_name='sis equal',
sis1=sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[0], [1]]),
values_over_entire_backselect=np.array([10.0, 0.0]),
mask=np.array([False, True])),
sis2=sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[0], [1]]),
values_over_entire_backselect=np.array([10.0, 0.0]),
mask=np.array([False, True])),
expected=True,
),
dict(
testcase_name='sis equal, values very slight different',
sis1=sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[0], [1]]),
values_over_entire_backselect=np.array([10.0, 0.0]),
mask=np.array([False, True])),
sis2=sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[0], [1]]),
values_over_entire_backselect=np.array([10.0, 0.000000001]),
mask=np.array([False, True])),
expected=True,
),
dict(
testcase_name='sis not equal, values too different',
sis1=sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[0], [1]]),
values_over_entire_backselect=np.array([10.0, 0.0]),
mask=np.array([False, True])),
sis2=sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[0], [1]]),
values_over_entire_backselect=np.array([10.01, 0.0]),
mask=np.array([False, True])),
expected=False,
),
dict(
testcase_name='sis not equal, different masks',
sis1=sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[0], [1]]),
values_over_entire_backselect=np.array([10.0, 0.0]),
mask=np.array([False, True])),
sis2=sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[0], [1]]),
values_over_entire_backselect=np.array([10.0, 0.0]),
mask=np.array([False, False])),
expected=False,
),
)
def test_sis_result_approx_equality(self, sis1, sis2, expected):
if expected:
self.assertTrue(sis1.approx_equal(sis2))
self.assertTrue(sis2.approx_equal(sis1))
else:
self.assertFalse(sis1.approx_equal(sis2))
self.assertFalse(sis2.approx_equal(sis1))
@parameterized.named_parameters(
dict(testcase_name='2-dim', shape=(4, 3)),
dict(testcase_name='2-dim transposed', shape=(3, 4)),
dict(testcase_name='1-dim', shape=(3,)),
dict(testcase_name='3-dim', shape=(4, 3, 8)),
)
def test_make_empty_boolean_mask(self, shape):
actual_mask = sis.make_empty_boolean_mask(shape)
self.assertEqual(actual_mask.shape, shape)
self.assertTrue(np.all(actual_mask))
@parameterized.named_parameters(
dict(
testcase_name='2-dim mask over columns',
shape=(2, 3),
axis=0,
expected_shape=(1, 3)),
dict(
testcase_name='2-dim mask over columns, as tuple',
shape=(2, 3),
axis=(0,),
expected_shape=(1, 3)),
dict(
testcase_name='2-dim mask over rows',
shape=(2, 3),
axis=1,
expected_shape=(2, 1)),
dict(
testcase_name='2-dim mask over all',
shape=(2, 3),
axis=(0, 1),
expected_shape=(1, 1)),
dict(
testcase_name='3-dim mask over ax 1',
shape=(4, 5, 6),
axis=1,
expected_shape=(4, 1, 6)),
dict(
testcase_name='3-dim mask over ax (1, 2)',
shape=(4, 5, 6),
axis=(1, 2),
expected_shape=(4, 1, 1)),
)
def test_make_empty_boolean_mask_broadcast_over_axis(self, shape, axis,
expected_shape):
actual_mask = sis.make_empty_boolean_mask_broadcast_over_axis(shape, axis)
self.assertEqual(actual_mask.shape, expected_shape)
self.assertTrue(np.all(actual_mask))
@parameterized.named_parameters(
dict(
testcase_name='disjoint SIS-collection',
collection=[
sis.SISResult(
sis=np.array([[0], [1]]),
ordering_over_entire_backselect=np.array([[1], [0]]),
values_over_entire_backselect=np.array([10.0, 0.0]),
mask=np.array([True, False]),
),
sis.SISResult(
sis=np.array([[2], [3]]),
ordering_over_entire_backselect=np.array([[1], [0]]),
values_over_entire_backselect=np.array([10.0, 0.0]),
mask=np.array([True, False]),
),
]),)
def test_assert_sis_collection_disjoint(self, collection):
sis._assert_sis_collection_disjoint(collection)
@parameterized.named_parameters(
dict(
testcase_name='non-disjoint SIS-collection',
collection=[
sis.SISResult(
sis=np.array([[0], [1]]),
ordering_over_entire_backselect=np.array([[1], [0]]),
values_over_entire_backselect=np.array([10.0, 0.0]),
mask=np.array([True, False]),
),
sis.SISResult(
sis=np.array([[1], [2]]),
ordering_over_entire_backselect=np.array([[1], [0]]),
values_over_entire_backselect=np.array([10.0, 0.0]),
mask=np.array([True, False]),
),
]),)
def test_assert_sis_collection_disjoint_raises_error(self, collection):
with self.assertRaises(AssertionError):
sis._assert_sis_collection_disjoint(collection)
@parameterized.named_parameters(
dict(
testcase_name='1-dim idxs, 1 idx',
idx_array=np.array([[3]]),
expected_tuple=(np.array([0]), np.array([3]))),
dict(
testcase_name='1-dim idxs, 2 idxs',
idx_array=np.array([[1], [2]]),
expected_tuple=(np.array([0, 1]), np.array([1, 2]))),
dict(
testcase_name='2-dim idxs, 2 idxs',
idx_array=np.array([[0, 1], [1, 1]]),
expected_tuple=(np.array([0, 1]), np.array([0, 1]), np.array([1,
1]))),
dict(
testcase_name='3-dim idxs, 4 idxs',
idx_array=np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]),
expected_tuple=(np.array([0, 1, 2, 3]), np.array([1, 4, 7, 10]),
np.array([2, 5, 8, 11]), np.array([3, 6, 9, 12]))),
)
def test_transform_next_masks_index_array_into_tuple(self, idx_array,
expected_tuple):
actual_tuple = sis._transform_next_masks_index_array_into_tuple(idx_array)
self.assertLen(actual_tuple, len(expected_tuple))
for actual_column, expected_column in zip(actual_tuple, expected_tuple):
np.testing.assert_array_equal(actual_column, expected_column)
@parameterized.named_parameters(
dict(testcase_name='1-dim idxs, 1 idx', idx_array=np.array([1])),
dict(testcase_name='1-dim idxs, 2 idxs', idx_array=np.array([1, 2])),
dict(
testcase_name='3-dim idxs, 2 idxs',
idx_array=np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]])),
)
def test_transform_next_masks_index_array_into_tuple_raises_error(
self, idx_array):
with self.assertRaises(TypeError):
_ = sis._transform_next_masks_index_array_into_tuple(idx_array)
@parameterized.named_parameters(
dict(
testcase_name='no values masked',
current_mask=np.array([True, True, True]),
expected_next_masks=np.array([[False, True,
True], [True, False, True],
[True, True, False]]),
expected_next_masks_idxs=np.array([[0], [1], [2]])),
dict(
testcase_name='partially masked',
current_mask=np.array([True, False, True]),
expected_next_masks=np.array([[False, False, True],
[True, False, False]]),
expected_next_masks_idxs=np.array([[0], [2]])),
dict(
testcase_name='partially masked 2',
current_mask=np.array([False, False, True]),
expected_next_masks=np.array([[False, False, False]]),
expected_next_masks_idxs=np.array([[2]])),
dict(
testcase_name='partially masked larger',
current_mask=np.array([True, True, False, True, True, False]),
expected_next_masks=np.array([
[False, True, False, True, True, False],
[True, False, False, True, True, False],
[True, True, False, False, True, False],
[True, True, False, True, False, False],
]),
expected_next_masks_idxs=np.array([[0], [1], [3], [4]])),
dict(
testcase_name='all values masked',
current_mask=np.array([False, False, False]),
expected_next_masks=np.array([]),
expected_next_masks_idxs=np.array([])),
dict(
testcase_name='(3, 1) input',
current_mask=np.array([[True], [True], [True]]),
expected_next_masks=np.array([[[False], [True], [True]],
[[True], [False], [True]],
[[True], [True], [False]]]),
expected_next_masks_idxs=np.array([[0, 0], [1, 0], [2, 0]])),
dict(
testcase_name='(1, 3) input',
current_mask=np.array([[True, True, True]]),
expected_next_masks=np.array([[[False, True, True]],
[[True, False, True]],
[[True, True, False]]]),
expected_next_masks_idxs=np.array([[0, 0], [0, 1], [0, 2]])),
dict(
testcase_name='(1, 3) input, partially masked',
current_mask=np.array([[True, False, True]]),
expected_next_masks=np.array([[[False, False, True]],
[[True, False, False]]]),
expected_next_masks_idxs=np.array([[0, 0], [0, 2]])),
dict(
testcase_name='(1, 3) input, all masked',
current_mask=np.array([[False, False, False]]),
expected_next_masks=np.array([]),
expected_next_masks_idxs=np.array([])),
dict(
testcase_name='(2, 2) input',
current_mask=np.array([[True, True], [True, True]]),
expected_next_masks=np.array([[[False, True], [True, True]],
[[True, False], [True, True]],
[[True, True], [False, True]],
[[True, True], [True, False]]]),
expected_next_masks_idxs=np.array([[0, 0], [0, 1], [1, 0], [1, 1]])),
)
def test_produce_next_masks(self, current_mask, expected_next_masks,
expected_next_masks_idxs):
actual_next_masks, actual_next_masks_idxs = sis._produce_next_masks(
current_mask)
np.testing.assert_array_equal(actual_next_masks, expected_next_masks)
np.testing.assert_array_equal(actual_next_masks_idxs,
expected_next_masks_idxs)
@parameterized.named_parameters(
dict(
testcase_name='1-dim, single mask',
input_to_mask=np.array([1, 2, 3, 4, 5]),
fully_masked_input=np.array([0, 0, 0, 0, 0]),
batch_of_masks=np.array([[False, True, False, True, True]]),
expected_masked_inputs=np.array([[0, 2, 0, 4, 5]])),
dict(
testcase_name='1-dim, multiple masks',
input_to_mask=np.array([1, 2, 3]),
fully_masked_input=np.array([0, 0, 0]),
batch_of_masks=np.array([[True, True, False], [True, True, True],
[False, False, False], [False, True,
False]]),
expected_masked_inputs=np.array([[1, 2, 0], [1, 2, 3], [0, 0, 0],
[0, 2, 0]])),
dict(
testcase_name='2-dim, single mask',
input_to_mask=np.array([[1, 2, 3], [4, 5, 6]]),
fully_masked_input=np.array([[0, 0, 0], [0, 0, 0]]),
batch_of_masks=np.array([[[True, False, False], [False, True,
True]]]),
expected_masked_inputs=np.array([[[1, 0, 0], [0, 5, 6]]])),
dict(
testcase_name='2-dim, multiple masks',
input_to_mask=np.array([[1, 2, 3], [4, 5, 6]]),
fully_masked_input=np.array([[0, 0, 0], [0, 0, 0]]),
batch_of_masks=np.array(
[[[True, True, True], [True, True, True]],
[[False, False, False], [False, False, False]],
[[True, False, True], [False, True, False]]]),
expected_masked_inputs=np.array([[[1, 2, 3], [4, 5, 6]],
[[0, 0, 0], [0, 0, 0]],
[[1, 0, 3], [0, 5, 0]]])),
dict(
testcase_name='1-dim, single mask, string inputs',
input_to_mask=np.array(['A', 'B', 'C', 'D']),
fully_masked_input=np.array(['-', '-', '-', '-']),
batch_of_masks=np.array([[False, True, False, True]]),
expected_masked_inputs=np.array([['-', 'B', '-', 'D']])),
)
def test_produce_masked_inputs(self, input_to_mask, fully_masked_input,
batch_of_masks, expected_masked_inputs):
actual_masked_inputs = sis.produce_masked_inputs(
input_to_mask, fully_masked_input, batch_of_masks)
np.testing.assert_array_equal(actual_masked_inputs, expected_masked_inputs)
@parameterized.named_parameters(
dict(
testcase_name='1-dim, single mask, no batch dimension',
input_to_mask=np.array([1, 2, 3]),
fully_masked_input=np.array([0, 0, 0]),
batch_of_masks=np.array([False, True, False])),)
def test_produce_masked_inputs_raises_error(
self, input_to_mask, fully_masked_input, batch_of_masks):
with self.assertRaises(TypeError):
_ = sis.produce_masked_inputs(input_to_mask, fully_masked_input,
batch_of_masks)
@parameterized.named_parameters(
dict(
testcase_name='L2 norm, 2-dim',
f=_F_L2,
current_input=np.array([1, 10]),
current_mask=np.array([True, True]),
fully_masked_input=np.array([0, 0]),
expected_backselect_stack=[(np.array([0]), 10), (np.array([1]), 0)]),
dict(
testcase_name='L2 norm, 2-dim, all masked',
f=_F_L2,
current_input=np.array([1, 10]),
current_mask=np.array([False, False]),
fully_masked_input=np.array([0, 0]),
expected_backselect_stack=[]),
dict(
testcase_name='L2 norm, 2-dim, reversed',
f=_F_L2,
current_input=np.array([10, 1]),
current_mask=np.array([True, True]),
fully_masked_input=np.array([0, 0]),
expected_backselect_stack=[(np.array([1]), 10), (np.array([0]), 0)]),
dict(
testcase_name='L2 norm, 2-dim, partially masked',
f=_F_L2,
current_input=np.array([10, 1]),
current_mask=np.array([False, True]),
fully_masked_input=np.array([0, 0]),
expected_backselect_stack=[(np.array([1]), 0)]),
dict(
testcase_name='L2 norm, 2-dim, partially masked, reversed',
f=_F_L2,
current_input=np.array([10, 1]),
current_mask=np.array([True, False]),
fully_masked_input=np.array([0, 0]),
expected_backselect_stack=[(np.array([0]), 0)]),
dict(
testcase_name='L2 norm, 3-dim, same value',
f=_F_L2,
current_input=np.array([10, 10, 10]),
current_mask=np.array([True, True, True]),
fully_masked_input=np.array([0, 0, 0]),
expected_backselect_stack=[(np.array([0]), np.sqrt(200)),
(np.array([1]), 10), (np.array([2]), 0)]),
dict(
testcase_name='L2 norm, 4-dim, diff values',
f=_F_L2,
current_input=np.array([0.1, 10, 5, 1]),
current_mask=np.array([True, True, True, True]),
fully_masked_input=np.array([0, 0, 0, 0]),
expected_backselect_stack=[(np.array([0]), np.sqrt(126)),
(np.array([3]), np.sqrt(125)),
(np.array([2]), 10), (
|
np.array([1])
|
numpy.array
|
import string
from itertools import product
import numpy as np
from pandas import DataFrame, MultiIndex, date_range, melt, wide_to_long
import pandas as pd
from .pandas_vb_common import setup # noqa
class Melt(object):
goal_time = 0.2
def setup(self):
self.df = DataFrame(np.random.randn(10000, 3), columns=['A', 'B', 'C'])
self.df['id1'] = np.random.randint(0, 10, 10000)
self.df['id2'] = np.random.randint(100, 1000, 10000)
def time_melt_dataframe(self):
melt(self.df, id_vars=['id1', 'id2'])
class Pivot(object):
goal_time = 0.2
def setup(self):
N = 10000
index = date_range('1/1/2000', periods=N, freq='h')
data = {'value': np.random.randn(N * 50),
'variable': np.arange(50).repeat(N),
'date': np.tile(index.values, 50)}
self.df = DataFrame(data)
def time_reshape_pivot_time_series(self):
self.df.pivot('date', 'variable', 'value')
class SimpleReshape(object):
goal_time = 0.2
def setup(self):
arrays = [np.arange(100).repeat(100),
np.roll(np.tile(np.arange(100), 100), 25)]
index = MultiIndex.from_arrays(arrays)
self.df = DataFrame(np.random.randn(10000, 4), index=index)
self.udf = self.df.unstack(1)
def time_stack(self):
self.udf.stack()
def time_unstack(self):
self.df.unstack(1)
class Unstack(object):
goal_time = 0.2
def setup(self):
m = 100
n = 1000
levels = np.arange(m)
index = MultiIndex.from_product([levels] * 2)
columns =
|
np.arange(n)
|
numpy.arange
|
#!/usr/bin/env python
from __future__ import print_function, division
from collections import deque
from util import cached_property, memoized, nop
from copy import copy
import numpy as np
import random
import sys
if sys.version_info[0] < 3:
range = xrange
class Cell():
empty = 0
shape = 1
block = 2
solid = 3
class Action():
left = 'left'
right = 'right'
turn_left = 'turnleft'
turn_right = 'turnright'
down = 'down'
up = 'up'
class Points():
line = [0, 0, 3, 6, 10]
tspin = [0, 5, 10]
perfect = 18
class Piece(object):
def __init__(self, value, name):
self.value = value
self.name = 'Piece.{}'.format(name)
def __repr__(self):
return self.name
def __getitem__(self, index):
return self.value[index]
@cached_property
def indexes(self):
return range(0, len(self.value))
@cached_property
def max_height(self):
return 1 + max(max(x[0]) for x in self.value)
@cached_property
def max_width(self):
return 1 + max(max(x[1]) for x in self.value)
@cached_property
def num_rotations(self):
return len(self.value)
@cached_property
def offsets(self):
return self.value
@cached_property
def pairs(self):
return tuple(tuple(zip(*x)) for x in self.value)
@cached_property
def rows(self):
return tuple(x[0] for x in self.value)
@cached_property
def cols(self):
return tuple(x[1] for x in self.value)
@cached_property
def heights(self):
return tuple(1 + max(x[0]) for x in self.value)
@cached_property
def widths(self):
return tuple(1 + max(x[1]) for x in self.value)
@staticmethod
def tspin_offsets():
return ((0, 0, 2, 2), (0, 2, 0, 2))
Piece.L = Piece([((0, 1, 1, 1), (2, 0, 1, 2)), ((0, 1, 2, 2), (1, 1, 1, 2)),
((1, 1, 1, 2), (0, 1, 2, 0)), ((0, 0, 1, 2), (0, 1, 1, 1))],
'L')
Piece.O = Piece([((0, 0, 1, 1), (0, 1, 0, 1))],
'O')
Piece.I = Piece([((1, 1, 1, 1), (0, 1, 2, 3)), ((0, 1, 2, 3), (2, 2, 2, 2))],
'I')
Piece.J = Piece([((0, 1, 1, 1), (0, 0, 1, 2)), ((0, 0, 1, 2), (1, 2, 1, 1)),
((1, 1, 1, 2), (0, 1, 2, 2)), ((0, 1, 2, 2), (1, 1, 0, 1))],
'J')
Piece.S = Piece([((0, 0, 1, 1), (1, 2, 0, 1)), ((0, 1, 1, 2), (1, 1, 2, 2))],
'S')
Piece.T = Piece([((0, 1, 1, 1), (1, 0, 1, 2)), ((0, 1, 1, 2), (1, 1, 2, 1)),
((1, 1, 1, 2), (0, 1, 2, 1)), ((0, 1, 1, 2), (1, 0, 1, 1))],
'T')
Piece.Z = Piece([((0, 0, 1, 1), (0, 1, 1, 2)), ((0, 1, 1, 2), (2, 1, 2, 1))],
'Z')
pieces = [Piece.L, Piece.O, Piece.I, Piece.J, Piece.S, Piece.T, Piece.Z]
# pieces = [Piece.O, Piece.S, Piece.T, Piece.Z]
# pieces = [Piece.O, Piece.I]
# pieces = [Piece.I]
piece_letters = {
'L': Piece.L,
'O': Piece.O,
'I': Piece.I,
'J': Piece.J,
'S': Piece.S,
'T': Piece.T,
'Z': Piece.Z
}
letter_pieces = {
Piece.L: 'L',
Piece.O: 'O',
Piece.I: 'I',
Piece.J: 'J',
Piece.S: 'S',
Piece.T: 'T',
Piece.Z: 'Z'
}
class Placement(object):
cache = {}
def __new__(cls, piece, rotation, row, col, *args, **kwargs):
key = (piece, rotation, row, col)
return Placement.cache.setdefault(key, super(Placement, cls).__new__(cls))
def __init__(self, piece, rotation, row, col):
self.piece = piece
self.rotation = rotation
self.row = row
self.col = col
def __repr__(self):
args = type(self).__name__, self.piece, self.rotation, self.row, self.col
return '{}(piece={}, rotation={}, row={}, col={})'.format(*args)
def _replace(self, piece = None, rotation = None, row = None, col = None):
if piece is None: piece = self.piece
if rotation is None: rotation = self.rotation
if row is None: row = self.row
if col is None: col = self.col
return Placement(piece, rotation, row, col)
@property
def to_tuple(self):
return (self.piece, self.rotation, self.row, self.col)
@cached_property
def left(self):
return self._replace(col = self.col - 1)
@cached_property
def right(self):
return self._replace(col = self.col + 1)
@cached_property
def up(self):
return self._replace(row = self.row - 1)
@cached_property
def down(self):
return self._replace(row = self.row + 1)
@cached_property
def turn_left(self):
r = self.rotation - 1
return self._replace(rotation = r) if r >= 0 else None
@cached_property
def turn_right(self):
r = self.rotation + 1
l = self.piece.num_rotations
return self._replace(rotation = r) if r < l else None
@cached_property
def rows(self):
return tuple(r + self.row for r in self.piece.rows[self.rotation])
@cached_property
def cols(self):
return tuple(c + self.col for c in self.piece.cols[self.rotation])
@cached_property
def row_bounds(self):
rows = self.rows
return (min(rows), max(rows) + 1)
@cached_property
def col_bounds(self):
cols = self.cols
return (min(cols), max(cols) + 1)
@cached_property
def offsets(self):
return (self.rows, self.cols)
@cached_property
def pairs(self):
return tuple((r + self.row, c + self.col)
for r, c in self.piece.pairs[self.rotation])
@cached_property
def nonnegative_offsets(self):
return tuple(zip(*(x for x in self.pairs if x[0] >= 0))) # and x[1] >= 0
@cached_property
def tspin_offsets(self):
offsets = Piece.tspin_offsets()
rows = tuple(r + self.row for r in offsets[0])
cols = tuple(c + self.col for c in offsets[1])
return (rows, cols)
@memoized
def is_inside(self, row_bounds, col_bounds):
return self.is_inside_rows(row_bounds) and self.is_inside_cols(col_bounds)
@memoized
def is_inside_rows(self, row_bounds):
if row_bounds is None:
return True
return all(row_bounds[0] <= r < row_bounds[1] for r in self.rows)
@memoized
def is_inside_cols(self, col_bounds):
if col_bounds is None:
return True
return all(col_bounds[0] <= c < col_bounds[1] for c in self.cols)
@cached_property
def moves(self):
actions = ['left', 'right', 'turnleft', 'turnright', 'down']
placements = [self.left, self.right, self.turn_left,
self.turn_right, self.down]
return tuple((a, p) for a, p in zip(actions, placements) if p)
@cached_property
def backward_moves(self):
actions = ['up', 'turnleft', 'turnright', 'left', 'right']
placements = [self.up, self.turn_left, self.turn_right,
self.left, self.right]
return tuple((a, p) for a, p in zip(actions, placements) if p)
def generate():
while True:
yield Placement(random.choice(pieces), 0, -1, 4)
class Field(object):
move_cache = {}
def __init__(self, width = 10, height = 20, cells = None, history = True):
self.width = width
self.height = height
self.reset_cells(cells)
self.reset_ceiling()
self.reset_heights()
self.reset_solid_lines()
self.history = [] if history else nop
def __repr__(self):
args = (type(self).__name__, self.width, self.height,
self.cells, self.history)
return '<{}: width={}, height={}, cells={}, history={}>'.format(*args)
def blank_cells(self):
return np.zeros((self.height, self.width), dtype = bool)
def reset_cells(self, cells):
if cells is None:
self.cells = self.blank_cells()
else:
self.cells = np.array(cells) >= Cell.block
def reset_ceiling(self):
for i in range(self.height):
if np.any(self.cells[i,:]):
break
self.ceiling = i
def reset_solid_lines(self):
for i in range(self.height, 0, -1):
if not np.all(self.cells[i - 1,:]):
break
self.solid_lines = self.height - i
def reset_heights(self):
self.heights = self.calculate_heights()
def calculate_heights(self, slice = slice(None,None,None)):
if self.height == self.ceiling:
return np.zeros((self.width,), dtype=np.int8)[slice]
arr = self.cells[self.ceiling:,slice]
top = ~arr[0,:]
heights = np.argmax(arr, axis=0)
zeros = np.where(heights == 0)
heights[zeros] = top[zeros] * arr.shape[0]
return self.height - self.ceiling - heights
def reset(self, cells):
self.reset_cells(cells)
self.reset_ceiling()
self.reset_heights()
self.reset_solid_lines()
self.history[:] = []
@property
def floor(self):
return self.height - min(self.heights)
@property
def second_floor(self):
return self.height - np.partition(self.heights, 1)[1]
@property
def fourth_floor(self):
return self.height - np.partition(self.heights, 3)[3]
@property
def real_height(self):
return self.height - self.solid_lines
@memoized
def can_contain(self, placement):
return placement.is_inside((-1, self.height), (0, self.width))
def can_fit(self, placement):
return (self.can_contain(placement) and
not np.any(self.cells[placement.nonnegative_offsets]) )
def can_base(self, placement):
try:
return (placement.row >= 0 and
np.any(self.cells[1:,:][placement.nonnegative_offsets]) )
except IndexError:
return True
def can_place(self, placement):
return self.can_fit(placement) and self.can_base(placement)
def ceiling_start(self, placement):
row = self.ceiling - placement.piece.max_height
return placement._replace(row = row) if row > placement.row else placement
def local_moves(self, placement):
return ((a, p) for a, p in placement.moves if self.can_contain(p))
def moves(self, placement):
placement = self.ceiling_start(placement)
# This was tested -- but found no speedup benefit
# key = self.heights.tostring()
# try:
# return Field.move_cache[(key, placement)]
# except KeyError:
# pass
moves = []
if not self.can_fit(placement):
return moves
visited = set([placement])
queue = deque([placement])
while queue:
placement = queue.popleft()
if self.can_base(placement):
moves.append(placement)
for _, neighbor in self.local_moves(placement):
if neighbor in visited or not self.can_fit(neighbor):
continue
visited.add(neighbor)
queue.append(neighbor)
# Field.move_cache[(key, placement)] = moves
return moves
def drops(self, placement):
moves = []
placement = self.ceiling_start(placement)
for rotation in placement.piece.indexes:
for col in range(-2, self.width):
p = placement._replace(rotation = rotation, col = col)
if not self.can_fit(p):
continue
while not self.can_base(p):
p = p.down
moves.append(p)
return moves
def path(self, start_placement, end_placement):
if not self.can_fit(start_placement):
return None
def extract_path(paths, placement):
path = ['drop']
while True:
parent, action = paths[placement]
if parent is None:
break
placement = parent
if len(path) <= 1 and action == 'down':
continue
path.append(action)
path.reverse()
return path
paths = {start_placement: (None, None)}
queue = deque([start_placement])
while queue:
placement = queue.popleft()
for action, neighbor in self.local_moves(placement):
if neighbor in paths or not self.can_fit(neighbor):
continue
paths[neighbor] = (placement, action)
if neighbor == end_placement:
return extract_path(paths, end_placement)
queue.append(neighbor)
return None
def check_empty(self):
return self.ceiling == self.height - self.solid_lines
def check_tspin(self, placement):
if ( placement.piece is not Piece.T or
not (0 <= placement.row < self.height - 2 and
0 <= placement.col < self.width - 2) ):
return False
offsets = placement.tspin_offsets
empties =
|
np.where(self.cells[offsets] == 0)
|
numpy.where
|
import typing
import numpy
import numpy.random
import numpy.typing
from scipy.stats import poisson
import pytest
from .model import HybridPoissonHMMv2 as HybridPoissonHMM, neg_bin
class ModelParameters(typing.NamedTuple):
transition_matrix: numpy.typing.NDArray
signal_matrix: numpy.typing.NDArray
@pytest.fixture(scope='module')
def pinned_rng():
# scipy stats uses numpy.random directly.
numpy.random.seed(seed=43902) # mutates global state.
return None
def make_demo_model_params(n: int, delta: float, rho_min: float, rho_max: float):
"""
:param n: number of hidden states
:param delta: probability of transition from s_i to s_j , j != i
:param rho_min: emission probability for least active state (state 0)
:param rho_max: emission probability for most active state (state n-1)
:return:
"""
assert n > 0
assert 0.0 <= delta
assert delta <= 1.0
assert 0.0 <= rho_min
assert rho_min <= rho_max
assert rho_max <= 1.0
diag = numpy.eye(n, dtype=numpy.float64)
if n > 1:
transition_matrix = (delta / (n - 1.0)) * (
numpy.ones((n, n), dtype=numpy.float64) - diag) + (1.0 - delta) * diag
else:
transition_matrix = numpy.eye(n, dtype=numpy.float64)
rho = numpy.linspace(rho_min, rho_max, n, dtype=numpy.float64)
assert rho.shape == (n,)
max_k = 1
signal_matrix = numpy.zeros((max_k + 1, n))
signal_matrix[1, :] = rho
signal_matrix[0, :] = 1.0 - rho
return ModelParameters(
transition_matrix=transition_matrix,
signal_matrix=signal_matrix,
)
def make_prior(n: int, alpha: float, beta: float):
assert n > 0
assert alpha > 0.0
assert beta > 0.0
q0 = numpy.empty((n, 3), dtype=numpy.float64)
for i in range(n):
# Uniform prior for P(state)
q0[i, 0] = 1.0 / n
# Gamma distribution for P(lambda | state)
q0[i, 1] = alpha
q0[i, 2] = beta
return q0
def make_synthetic_observations(n_times: int, rho: float, noise_rate: float):
signals = numpy.asarray(numpy.random.uniform(0.0, 1.0, n_times) < rho, dtype=int)
noise = poisson.rvs(noise_rate, size=n_times)
observations = signals + noise
return observations
def test_single_state_noise_only_model_updates_gamma_posterior(pinned_rng):
"""
Sanity check that noise-only model updates Gamma distribution according
to normal rules for Poisson rate under a Gamma conjugate prior.
Hidden Markov model has 1 state, doesnt emit any signal.
Any event counts in observation are entirely due to Poisson noise.
In the approximate posterior distribution
P(lambda, s | y_{1:t}) approx q(lambda, s | y_{1:t})
we have - by assumption - the decomposition
q(lambda, s | y_{1:t}) = q(lambda | s, y_{1:t}) q(s | y_{1:t})
where the first factor q(lambda | s, y_{1:t}) is a Gamma distribution.
If the prior for q was q0(lambda, s) = q0(lambda | s) q0(s)
where q0(lambda | s) = Gamma(lambda ; alpha_0, beta_0)
then we expect the final posterior factor q(lambda | s, y_{1:t}) to be
q(lambda | s, y_{1:t}) = Gamma(lambda ; alpha_0 + sum_t y_t , beta_0 + T)
where
T is the number of observations and
sum_{t=1}^Y y_t is the total observed event count.
"""
n_states = 1
params = make_demo_model_params(
n=n_states,
delta=0.0,
rho_min=0.0,
rho_max=0.0,
)
prior_alpha = 1.0
prior_beta = 0.5
q0 = make_prior(n_states, alpha=prior_alpha, beta=prior_beta)
observations = make_synthetic_observations(n_times=100, rho=0.0, noise_rate=1.0)
n_observations = len(observations)
net_event_count = numpy.sum(observations)
expected_alpha = prior_alpha + net_event_count
expected_beta = prior_beta + n_observations
model = HybridPoissonHMM(
transition_matrix=params.transition_matrix,
signal_matrix=params.signal_matrix,
)
q, log_z = model.forward(observations, q0)
final_c = q[0, 0]
final_alpha = q[0, 1]
final_beta = q[0, 2]
assert (0.0 < final_c and
numpy.isclose(expected_alpha, final_alpha) and
numpy.isclose(expected_beta, final_beta))
def test_two_independent_state_noise_only_model_updates_gamma_posterior(pinned_rng):
"""
Trivial two-state model - states cannot transition or emit event counts.
Check that the distribution of the noise rate lambda given the state are
equal and exactly match what we expect for updating parameters of Gamma
conjugate prior distribution for observations assumed to be generated by a
Poisson distribution.
"""
n_states = 2
params = make_demo_model_params(
n=n_states,
delta=0.0, # no transitions between states permitted
rho_min=0.0,
rho_max=0.0,
)
prior_alpha = 1.0
prior_beta = 0.5
q0 = make_prior(n_states, alpha=prior_alpha, beta=prior_beta)
observations = make_synthetic_observations(n_times=100, rho=0.0, noise_rate=1.0)
n_observations = len(observations)
net_event_count = numpy.sum(observations)
expected_alpha = prior_alpha + net_event_count
expected_beta = prior_beta + n_observations
model = HybridPoissonHMM(
transition_matrix=params.transition_matrix,
signal_matrix=params.signal_matrix,
)
q, log_z = model.forward(observations, q0)
final_c_state_0 = q[0, 0]
final_alpha_state_0 = q[0, 1]
final_beta_state_0 = q[0, 2]
final_c_state_1 = q[1, 0]
final_alpha_state_1 = q[1, 1]
final_beta_state_1 = q[1, 2]
assert (0.0 < final_c_state_0 and
0.0 < final_c_state_1 and
numpy.isclose(final_c_state_0, final_c_state_1) and
|
numpy.isclose(expected_alpha, final_alpha_state_0)
|
numpy.isclose
|
import os
import joblib
import cv2
import numpy as np
import tensorflow as tf
from tensorflow_serving.apis import (
predict_pb2,
prediction_service_pb2_grpc
)
import grpc
from utils import norm_mean_std
class KuzuSegment:
def __init__(self,
img_size=(512, 512),
host="localhost",
port=8500,
input_name="input_image",
output_name="pred_mask",
model_spec_name="kuzu_segment",
model_sig_name="kuzu_segment_sig",
timeout=10):
self.img_size = img_size
self.input_name = input_name
self.output_name = output_name
# init channel
self.channel = grpc.insecure_channel("{}:{}".format(host, port))
self.stub = prediction_service_pb2_grpc.PredictionServiceStub(
self.channel
)
# Create PredictRequest ProtoBuf from image data
self.request = predict_pb2.PredictRequest()
self.request.model_spec.name = model_spec_name
self.request.model_spec.signature_name = model_sig_name
self.timeout = timeout
def load_image(self,
oimg):
if isinstance(oimg, str):
oimg = cv2.imread(oimg)[:, :, ::-1]
h, w, _ = oimg.shape
img = cv2.resize(oimg, self.img_size)
img = norm_mean_std(img)
img = np.expand_dims(img, axis=0)
return img, oimg, h, w
def _grpc_client_request(self, img):
assert img.ndim == 4
self.request.inputs[self.input_name].CopyFrom(
tf.contrib.util.make_tensor_proto(
img,
dtype=np.float32,
shape=[*img.shape] # noqa
)
)
# Call the TFServing Predict API
predict_response = self.stub.Predict(
self.request, timeout=self.timeout
)
return predict_response
def predict(self,
img,
bbox_thres=0.01,
center_thres=0.02):
# img = self.load_image(img_fp)
result = self._grpc_client_request(img)
# parse result
pred_mask = tf.contrib.util.make_ndarray(
result.outputs[self.output_name]
)
pred_mask = pred_mask[0]
pred_bbox, pred_center = pred_mask[:, :, 0], pred_mask[:, :, 1]
pred_bbox = (pred_bbox > bbox_thres).astype(np.float32)
pred_center = (pred_center > center_thres).astype(np.float32)
return pred_bbox, pred_center
class KuzuClassify:
def __init__(self,
img_size=(64, 64),
host="localhost",
port=8500,
input_name="input_image",
output_name="y_pred",
model_spec_name="kuzu_classify",
model_sig_name="kuzu_classify_sig",
timeout=10):
self.img_size = img_size
self.input_name = input_name
self.output_name = output_name
# init channel
self.channel = grpc.insecure_channel("{}:{}".format(host, port))
self.stub = prediction_service_pb2_grpc.PredictionServiceStub(
self.channel
)
# Create PredictRequest ProtoBuf from image data
self.request = predict_pb2.PredictRequest()
self.request.model_spec.name = model_spec_name
self.request.model_spec.signature_name = model_sig_name
self.timeout = timeout
self.load_le()
def load_le(self):
le_fp = os.path.abspath("./models/le.pkl")
assert os.path.exists(le_fp)
with open(le_fp, "rb") as f:
self.le = joblib.load(f)
def deunicode(self,
codepoint):
return chr(int(codepoint[2:], 16))
def load_image(self,
char_img):
if isinstance(char_img, str):
char_img = cv2.imread(char_img)[:, :, ::-1]
char_img = norm_mean_std(char_img)
char_img = cv2.resize(char_img, (64, 64))
char_img =
|
np.expand_dims(char_img, axis=0)
|
numpy.expand_dims
|
import os
import ast
import spacy
import json
import numpy as np
import xml.etree.ElementTree as ET
from errno import ENOENT
from collections import Counter
from bert_serving.client import BertClient
import logging
from torch.utils.data import DataLoader, Dataset
# logger = logging.getLogger(__name__)
nlp = spacy.load("en_core_web_sm")
bc = BertClient()
np.set_printoptions(threshold='nan')
#
# def load_datasets_and_vocabs(FLAGS):
# train, test = get_dataset(FLAGS.dataset_name) #json文件以list类型返回
#
# logger.info('Train set size: %s', len(train))
# logger.info('Test set size: %s,', len(test))
#
# # Build word vocabulary(part of speech, dep_tag) and save pickles.
# word_vecs, word_vocab = load_and_cache_vocabs(
# train+test, FLAGS)
# train_dataset = ASBA_Depparsed_Dataset(
# train, FLAGS, word_vocab)
# test_dataset = ASBA_Depparsed_Dataset(
# test, FLAGS, word_vocab)
#
# return train_dataset, test_dataset, word_vocab
#
# def get_dataset(dataset_name):
# '''
# Already preprocess the data and now they are in json format.(only for semeval14)
# Retrieve train and test set
# With a list of dict:
# e.g. {"sentence": "Boot time is super fast, around anywhere from 35 seconds to 1 minute.",
# "tokens": ["Boot", "time", "is", "super", "fast", ",", "around", "anywhere", "from", "35", "seconds", "to", "1", "minute", "."],
# "tags": ["NNP", "NN", "VBZ", "RB", "RB", ",", "RB", "RB", "IN", "CD", "NNS", "IN", "CD", "NN", "."],
# "predicted_dependencies": ["nn", "nsubj", "root", "advmod", "advmod", "punct", "advmod", "advmod", "prep", "num", "pobj", "prep", "num", "pobj", "punct"],
# "predicted_heads": [2, 3, 0, 5, 3, 5, 8, 5, 8, 11, 9, 9, 14, 12, 3],
# "dependencies": [["nn", 2, 1], ["nsubj", 3, 2], ["root", 0, 3], ["advmod", 5, 4], ["advmod", 3, 5], ["punct", 5, 6], ["advmod", 8, 7], ["advmod", 5, 8],
# ["prep", 8, 9], ["num", 11, 10], ["pobj", 9, 11], ["prep", 9, 12], ["num", 14, 13], ["pobj", 12, 14], ["punct", 3, 15]],
# "aspect_sentiment": [["Boot time", "positive"]], "from_to": [[0, 2]]}
# '''
# rest_train = 'data/restaurant/Restaurants_Train_v2_biaffine_depparsed_with_energy.json'
# rest_test = 'data/restaurant/Restaurants_Test_Gold_biaffine_depparsed_with_energy.json'
#
# laptop_train = 'data/laptop/Laptop_Train_v2_biaffine_depparsed.json'
# laptop_test = 'data/laptop/Laptops_Test_Gold_biaffine_depparsed.json'
#
#
# ds_train = {'rest': rest_train,
# 'laptop': laptop_train}
# ds_test = {'rest': rest_test,
# 'laptop': laptop_test}
#
# train = list(read_sentence_depparsed(ds_train[dataset_name]))
# logger.info('# Read %s Train set: %d', dataset_name, len(train))
#
# test = list(read_sentence_depparsed(ds_test[dataset_name]))
# logger.info("# Read %s Test set: %d", dataset_name, len(test))
# return train, test
#
# def read_sentence_depparsed(file_path):
# with open(file_path, 'r') as f:
# data = json.load(f)
# return data
#
# def load_and_cache_vocabs(data, args):
#
# word_vocab = None
# word_vecs = None
# return word_vecs, word_vocab
#
# class ASBA_Depparsed_Dataset(Dataset):
# '''
# Convert examples to features, numericalize text to ids.
# data:
# -list of dict:
# keys: sentence, tags, pos_class, aspect, sentiment,
# predicted_dependencies, predicted_heads,
# from, to, dep_tag, dep_idx, dependencies, dep_dir
#
# After processing,
# data:
# sentence
# tags
# pos_class
# aspect
# sentiment
# from
# to
# dep_tag
# dep_idx
# dep_dir
# predicted_dependencies_ids
# predicted_heads
# dependencies
# sentence_ids
# aspect_ids
# tag_ids
# dep_tag_ids
# text_len
# aspect_len
# if bert:
# input_ids
# word_indexer
#
# Return from getitem:
# sentence_ids
# aspect_ids
# dep_tag_ids
# dep_dir_ids
# pos_class
# text_len
# aspect_len
# sentiment
# deprel
# dephead
# aspect_position
# if bert:
# input_ids
# word_indexer
# input_aspect_ids
# aspect_indexer
# or:
# input_cat_ids
# segment_ids
# '''
#
# def __init__(self, data, FLAGS, word_vocab):
# self.data = data
# self.word_vocab = word_vocab
#
# self.convert_features()
#
# def __len__(self):
# return len(self.data)
#
# def __getitem__(self, idx):
# e = self.data[idx]
# items = e['dep_tag_ids'], \
# e['pos_class'], e['text_len'], e['aspect_len'], e['sentiment'],\
# e['dep_rel_ids'], e['predicted_heads'], e['aspect_position'], e['dep_dir_ids']
#
# bert_items = e['input_ids'], e['word_indexer'], e['input_aspect_ids'], e['aspect_indexer'], e['input_cat_ids'], e['segment_ids']
# # segment_id
# items_tensor = tuple(torch.tensor(t) for t in bert_items)
# items_tensor += tuple(torch.tensor(t) for t in items)
# return items_tensor
#
# def convert_features_bert(self, i):
# """
# BERT features.
# convert sentence to feature.
# """
# cls_token = "[CLS]"
# sep_token = "[SEP]"
# pad_token = 0
# # tokenizer = self.args.tokenizer
#
# tokens = []
# word_indexer = []
# aspect_tokens = []
# aspect_indexer = []
#
# for word in self.data[i]['sentence']:
# word_tokens = self.args.tokenizer.tokenize(word)
# token_idx = len(tokens)
# tokens.extend(word_tokens)
# # word_indexer is for indexing after bert, feature back to the length of original length.
# word_indexer.append(token_idx)
#
# # aspect
# for word in self.data[i]['aspect']:
# word_aspect_tokens = self.args.tokenizer.tokenize(word)
# token_idx = len(aspect_tokens)
# aspect_tokens.extend(word_aspect_tokens)
# aspect_indexer.append(token_idx)
#
# # The convention in BERT is:
# # (a) For sequence pairs:
# # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# # (b) For single sequences:
# # tokens: [CLS] the dog is hairy . [SEP]
# # type_ids: 0 0 0 0 0 0 0
#
# tokens = [cls_token] + tokens + [sep_token]
# aspect_tokens = [cls_token] + aspect_tokens + [sep_token]
# word_indexer = [i+1 for i in word_indexer]
# aspect_indexer = [i+1 for i in aspect_indexer]
#
# input_ids = self.args.tokenizer.convert_tokens_to_ids(tokens)
# input_aspect_ids = self.args.tokenizer.convert_tokens_to_ids(
# aspect_tokens)
#
# # check len of word_indexer equals to len of sentence.
# assert len(word_indexer) == len(self.data[i]['sentence'])
# assert len(aspect_indexer) == len(self.data[i]['aspect'])
#
# # THE STEP:Zero-pad up to the sequence length, save to collate_fn.
#
# if self.args.pure_bert:
# input_cat_ids = input_ids + input_aspect_ids[1:]
# segment_ids = [0] * len(input_ids) + [1] * len(input_aspect_ids[1:])
#
# self.data[i]['input_cat_ids'] = input_cat_ids
# self.data[i]['segment_ids'] = segment_ids
# else:
# input_cat_ids = input_ids + input_aspect_ids[1:]
# segment_ids = [0] * len(input_ids) + [1] * len(input_aspect_ids[1:])
#
# self.data[i]['input_cat_ids'] = input_cat_ids
# self.data[i]['segment_ids'] = segment_ids
# self.data[i]['input_ids'] = input_ids
# self.data[i]['word_indexer'] = word_indexer
# self.data[i]['input_aspect_ids'] = input_aspect_ids
# self.data[i]['aspect_indexer'] = aspect_indexer
#
# def convert_features(self):
# '''
# Convert sentence, aspects, pos_tags, dependency_tags to ids.
# '''
# for i in range(len(self.data)):
# if self.args.embedding_type == 'glove':
# self.data[i]['sentence_ids'] = [self.word_vocab['stoi'][w]
# for w in self.data[i]['sentence']]
# self.data[i]['aspect_ids'] = [self.word_vocab['stoi'][w]
# for w in self.data[i]['aspect']]
# elif self.args.embedding_type == 'elmo':
# self.data[i]['sentence_ids'] = self.data[i]['sentence']
# self.data[i]['aspect_ids'] = self.data[i]['aspect']
# else: # self.args.embedding_type == 'bert'
# self.convert_features_bert(i)
#
# self.data[i]['text_len'] = len(self.data[i]['sentence'])
# self.data[i]['aspect_position'] = [0] * self.data[i]['text_len']
# try: # find the index of aspect in sentence
# for j in range(self.data[i]['from'], self.data[i]['to']):
# self.data[i]['aspect_position'][j] = 1
# except:
# for term in self.data[i]['aspect']:
# self.data[i]['aspect_position'][self.data[i]
# ['sentence'].index(term)] = 1
#
# self.data[i]['dep_tag_ids'] = [self.dep_tag_vocab['stoi'][w]
# for w in self.data[i]['dep_tag']]
# self.data[i]['dep_dir_ids'] = [idx
# for idx in self.data[i]['dep_dir']]
# self.data[i]['pos_class'] = [self.pos_tag_vocab['stoi'][w]
# for w in self.data[i]['tags']]
# self.data[i]['aspect_len'] = len(self.data[i]['aspect'])
#
# self.data[i]['dep_rel_ids'] = [self.dep_tag_vocab['stoi'][r]
# for r in self.data[i]['predicted_dependencies']]
#####################################################################
# def trans(p):
# words = [] # 建立一个空列表
# index = 0 # 遍历所有的字符
# start = 0 # 记录每个单词的开始位置
# while index < len(p): # 当index小于p的长度
# start = index # start来记录位置
# while p[index] != " " and p[index] not in [".", ","]: # 若不是空格,点号,逗号
# index += 1 # index加一
# if index == len(p): # 若遍历完成
# break # 结束
# words.append(p[start:index])
# if index == len(p):
# break
# while p[index] == " " or p[index] in [".", ","]:
# if p[index] in [".", ","]:
# words.append(p[index:index+1])
# index += 1
# if index == len(p):
# break
# return words
def get_inputs(sentence, aspects, tokenizer):
"""
BERT features.
convert sentence to feature.
"""
cls_token = "[CLS]"
sep_token = "[SEP]"
pad_token = 0
# tokenizer = self.args.tokenizer
tokens = []
word_indexer = []
aspect_tokens = []
aspect_indexer = []
for word in sentence:
word_tokens = tokenizer.tokenize(word)
token_idx = len(tokens)
tokens.extend(word_tokens)
# word_indexer is for indexing after bert, feature back to the length of original length.
word_indexer.append(token_idx)
n = len(tokens)
# aspect
for word in aspects:
word_aspect_tokens = tokenizer.tokenize(word)
token_idx = n+len(aspect_tokens)
aspect_tokens.extend(word_aspect_tokens)
aspect_indexer.append(token_idx)
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
tokens = [cls_token] + tokens + [sep_token]
aspect_tokens = [cls_token] + aspect_tokens + [sep_token]
word_indexer = [i + 1 for i in word_indexer]
aspect_indexer = [i + 2 for i in aspect_indexer]
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_aspect_ids = tokenizer.convert_tokens_to_ids(
aspect_tokens)
# check len of word_indexer equals to len of sentence.
assert len(word_indexer) == len(sentence)
assert len(aspect_indexer) == len(aspects)
# THE STEP:Zero-pad up to the sequence length, save to collate_fn.
input_cat_ids = input_ids + input_aspect_ids[1:]
segment_ids = [0] * len(input_ids) + [1] * len(input_aspect_ids[1:])
return input_cat_ids,input_aspect_ids,segment_ids,word_indexer,aspect_indexer
def get_data_info(train_fname, test_fname, save_fname, pre_processed):
word2id, max_sentence_len, max_aspect_len = {}, 0, 0
word2id['<pad>'] = 0
if pre_processed:
if not os.path.isfile(save_fname):
raise IOError(ENOENT, 'Not a file', save_fname)
with open(save_fname, 'r') as f:
for line in f:
content = line.strip().split()
if len(content) == 3:
max_sentence_len = int(content[1])
max_aspect_len = int(content[2])
else:
word2id[content[0]] = int(content[1])
else:
if not os.path.isfile(train_fname):
raise IOError(ENOENT, 'Not a file', train_fname)
if not os.path.isfile(test_fname):
raise IOError(ENOENT, 'Not a file', test_fname)
words = []
train_tree = ET.parse(train_fname)
train_root = train_tree.getroot()
for sentence in train_root:
sptoks = nlp(sentence.find('text').text)
words.extend([sp.text.lower() for sp in sptoks])
if len(sptoks) > max_sentence_len:
max_sentence_len = len(sptoks)
for asp_terms in sentence.iter('aspectTerms'):
for asp_term in asp_terms.findall('aspectTerm'):
if asp_term.get('polarity') == 'conflict':
continue
t_sptoks = nlp(asp_term.get('term'))
if len(t_sptoks) > max_aspect_len:
max_aspect_len = len(t_sptoks)
word_count = Counter(words).most_common()
for word, _ in word_count:
if word not in word2id and ' ' not in word:
word2id[word] = len(word2id)
test_tree = ET.parse(test_fname)
test_root = test_tree.getroot()
for sentence in test_root:
sptoks = nlp(sentence.find('text').text)
words.extend([sp.text.lower() for sp in sptoks])
if len(sptoks) > max_sentence_len:
max_sentence_len = len(sptoks)
for asp_terms in sentence.iter('aspectTerms'):
for asp_term in asp_terms.findall('aspectTerm'):
if asp_term.get('polarity') == 'conflict':
continue
t_sptoks = nlp(asp_term.get('term'))
if len(t_sptoks) > max_aspect_len:
max_aspect_len = len(t_sptoks)
word_count = Counter(words).most_common()
for word, _ in word_count:
if word not in word2id and ' ' not in word:
word2id[word] = len(word2id)
with open(save_fname, 'w') as f:
f.write('length %s %s\n' % (max_sentence_len, max_aspect_len))
for key, value in word2id.items():
f.write('%s %s\n' % (key, value))
print('There are %s words in the dataset, the max length of sentence is %s, and the max length of aspect is %s' % (len(word2id), max_sentence_len, max_aspect_len))
return word2id, max_sentence_len, max_aspect_len
def get_loc_info(sptoks, from_id, to_id):
aspect = []
for sptok in sptoks:
if sptok.idx < to_id and sptok.idx + len(sptok.text) > from_id:
aspect.append(sptok.i)
loc_info = []
for _i, sptok in enumerate(sptoks):
loc_info.append(min([abs(_i - i) for i in aspect]) / len(sptoks))
return loc_info
def get_inputs2(tokens_, aspect_tokens_,s,a):
# s = 0
# a = 0
doc_vecs = bc.encode([tokens_,aspect_tokens_])
# for i in doc_vecs[0]:
# if np.all(i == 0):
# break
# else:
# s = s + 1
# for j in doc_vecs[1]:
# if np.all(j == 0):
# break
# else:
# a = a + 1
sentence =
|
np.random.normal(0, 0.05, [s, 768])
|
numpy.random.normal
|
import os
import urllib
import gzip
import numpy as np
import tensorflow as tf
from menpo.image import Image
from menpo.visualize import print_dynamic
from digitrecognition.base import src_dir_path
# MNIST url
SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
# MNIST filenames
TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
def download(filename, verbose=False):
r"""
Method that downloads the provided filename from SOURCE_URL and
stores it in the data path, if it doesn't already exist.
Parameters
----------
filename : `str`
The filename to download.
verbose : `bool`, optional
If `True`, then the progress will be printed.
Returns
-------
file_path : `pathlib.PosixPath`
The path where the file was stored.
"""
if verbose:
print_dynamic('Downloading {}'.format(filename))
# Path to store data
data_path = src_dir_path() / 'data'
# Check if data path exists, otherwise create it
if not os.path.isdir(str(data_path)):
os.makedirs(str(data_path))
# Check if file exists
file_path = data_path / filename
if not os.path.isfile(str(file_path)):
# It doesn't exist, so download it
urllib.request.urlretrieve(SOURCE_URL + filename,
filename=str(file_path))
# Return the path where the file is stored
return file_path
def _read32(bytestream):
r"""
Read bytes as 32-bit integers.
Parameters
----------
bytestream : `bytes`
The bytes to read.
Returns
-------
array : `array`
The 32-bit int data.
"""
dt = np.dtype(np.uint32).newbyteorder('>')
return np.frombuffer(bytestream.read(4), dtype=dt)[0]
def extract_images(filename, as_images=False, verbose=False):
r"""
Extract images from gzip file.
Parameters
----------
filename : `pathlib.PosixPath`
The gzip file path.
as_images : `bool`, optional
If `True`, then the method returns a list containing a
`menpo.image.Image` object per image. If `False`, then it
returns a numpy array of shape `(n_images, height, width, n_channels)`.
verbose : `bool`, optional
If `True`, then the progress will be printed.
Returns
-------
images : `list` or `array`
The image data.
"""
if verbose:
print_dynamic('Extracting {}'.format(filename))
with open(str(filename), 'rb') as f, gzip.GzipFile(fileobj=f) as bytestream:
magic = _read32(bytestream)
if magic != 2051:
raise ValueError('Invalid magic number %d in MNIST image file: %s' %
(magic, filename))
num_images = _read32(bytestream)
rows = _read32(bytestream)
cols = _read32(bytestream)
buf = bytestream.read(rows * cols * num_images)
data = np.frombuffer(buf, dtype=np.uint8)
data = data.reshape(num_images, rows, cols, 1)
# Convert data array to list of menpo.image.Image, if required
if as_images:
return [Image(data[i, :, :, 0]) for i in range(data.shape[0])]
return data
def _convert_dense_to_one_hot(labels_dense):
r"""
Method that converts an array of labels to one-hot labels.
Parameters
----------
labels_dense : `array`
An `(n_images,)` array with an integer label per image.
Returns
-------
labels : `array`
An `(n_images, n_labels)` array with the one-hot labels.
"""
# Get number of labels and classes
num_labels = labels_dense.shape[0]
num_classes = labels_dense.max() + 1
# Create binary one-hot indicator
index_offset = np.arange(num_labels) * num_classes
labels_one_hot =
|
np.zeros((num_labels, num_classes))
|
numpy.zeros
|
import inspect
import copy
import numpy as np
import tensorflow as tf
from scipy import linalg
from sklearn.metrics import pairwise
from sklearn.base import clone
from sklearn.model_selection import train_test_split
from adapt.utils import get_default_discriminator, check_sample_weight
from tensorflow.keras.optimizers import Adam
EPS = np.finfo(float).eps
def _estimator_predict(estimator, Xs, Xt, X):
if hasattr(estimator, "transform"):
args = [
p.name
for p in inspect.signature(estimator.transform).parameters.values()
if p.name != "self" and p.kind != p.VAR_KEYWORD
]
if "domain" in args:
Xt = estimator.transform(Xt, domain="tgt")
Xs = estimator.transform(Xs, domain="src")
else:
Xt = estimator.transform(Xt)
Xs = estimator.transform(Xs)
elif hasattr(estimator, "predict_weights"):
sample_weight = estimator.predict_weights()
if len(X) != len(sample_weight):
sample_weight = np.ones(len(X))
sample_weight = check_sample_weight(sample_weight, X)
sample_weight /= sample_weight.sum()
bootstrap_index = np.random.choice(
len(X), size=len(X), replace=True, p=sample_weight)
Xs = X[bootstrap_index]
else:
raise ValueError("The Adapt model should implement"
" a transform or predict_weights methods")
return Xs, Xt
def _fit_alpha(Xs, Xt, centers, sigma):
"""
Fit alpha coeficients to compute J-score
"""
A = pairwise.rbf_kernel(Xt, centers, sigma)
b = np.mean(pairwise.rbf_kernel(centers, Xs, sigma), axis=1)
b = b.reshape(-1, 1)
alpha = np.ones((len(centers), 1)) / len(centers)
previous_objective = -np.inf
objective = np.mean(np.log(np.dot(A, alpha) + EPS))
k = 0
while k < 5000 and objective-previous_objective > 1e-6:
previous_objective = objective
alpha_p = np.copy(alpha)
alpha += 1e-4 * np.dot(
np.transpose(A), 1./(np.dot(A, alpha) + EPS)
)
alpha += b * ((((1-np.dot(np.transpose(b), alpha)) /
(np.dot(np.transpose(b), b) + EPS))))
alpha = np.maximum(0, alpha)
alpha /= (np.dot(np.transpose(b), alpha) + EPS)
objective = np.mean(np.log(np.dot(A, alpha) + EPS))
k += 1
return alpha
def make_uda_scorer(func, Xs, Xt, greater_is_better=False, **kwargs):
"""
Make a scorer function from an adapt metric.
The goal of adapt metric is to measure the closeness between
a source input dataset `Xs` and a target input dataset `Xt`.
If `Xs` is close from `Xt`, it can be expected that a good
model trained on source will perform well on target.
The returned score function will apply `func` on
a transformation of `Xs` and `Xt` given to `make_uda_scorer`.
If the estimator given in the score function is a
feature-based method, the metric will be applied
on the encoded `Xs` and `Xt`. If the estimator is instead an
instance-based method, a weighted bootstrap sample of `Xs`
will be compared to `Xt`.
**IMPORTANT NOTE** : when the returned score function is used
with ``GridSearchCV`` from sklearn, the parameter
``return_train_score`` must be set to ``True``.
The adapt score then corresponds to the train scores.
Parameters
----------
func : callable
Adapt metric with signature
``func(Xs, Xt, **kwargs)``.
Xs : array
Source input dataset
Xt : array
Target input dataset
greater_is_better : bool, default=True
Whether the best outputs of ``func`` are the greatest
ot the lowest. For all adapt metrics, the low values
mean closeness between Xs and Xt.
kwargs : key, value arguments
Parameters given to ``func``.
Returns
-------
scorer : callable
A scorer function with signature
``scorer(estimator, X, y_true=None)``.
The scorer function transform the parameters
`Xs` and `Xt` with the given ``estimator``.
Then it rerurns ``func(Xst, Xtt)`` with `Xst`
and `Xtt` the transformed data.
Notes
-----
When the returned score function is used
with ``GridSearchCV`` from sklearn, the parameter
``return_train_score`` must be set to ``True``.
The adapt score then corresponds to the train scores.
"""
def scorer(estimator, X, y_true=None):
"""
Scorer function for unsupervised domain adaptation.
For fearure_based method, scorer will apply the
``transform`` method of the fitted ``estimator``
to the parameters `Xs` and `Xt` given when building scorer.
Then it computes a metric between the two transformed
datasets.
For instance-based method a weighted bootstrap of
the input paramter `X` is performed with the weights return
by the ``predict_weights`` method of the fitted ``estimator``.
Then it computes a metric beteen the bootstraped `X` and `Xt`.
**IMPORTANT NOTE** : when scorer is used
with ``GridSearchCV`` from sklearn, the parameter
``return_train_score`` must be set to ``True``.
The adapt score then corresponds to the train scores.
Parameters
----------
estimator : Adapt estimator
A fitted adapt estimator which should implements
a ``predict_weights`` or ``transform`` method.
X : array
Input source data
y_true : array (default=None)
Not used. Here for compatibility with sklearn.
Notes
-----
When scorer is used with ``GridSearchCV`` from sklearn,
the parameter ``return_train_score`` must be set to ``True``.
The adapt score then corresponds to the train scores.
"""
nonlocal Xs
nonlocal Xt
nonlocal greater_is_better
nonlocal kwargs
Xs, Xt = _estimator_predict(estimator, Xs=Xs, Xt=Xt, X=X)
score = func(Xs, Xt, **kwargs)
if not greater_is_better:
score *= -1
return score
return scorer
def cov_distance(Xs, Xt):
"""
Compute the mean absolute difference
between the covariance matrixes of Xs and Xt
Parameters
----------
Xs : array
Source array
Xt : array
Target array
Returns
-------
score : float
See also
--------
frechet_distance
CORAL
References
----------
.. [1] `[1] <https://arxiv.org/pdf/1511.05547.pdf>`_ <NAME>., <NAME>., <NAME>. \
"Return of frustratingly easy domain adaptation". In AAAI, 2016.
"""
cov_Xs = np.cov(Xs, rowvar=False)
cov_Xt = np.cov(Xt, rowvar=False)
return np.mean(np.abs(cov_Xs-cov_Xt))
def frechet_distance(Xs, Xt):
"""
Compute the frechet distance
between Xs and Xt.
.. math::
\\Delta = ||\\mu_S - \\mu_T||_2^2 + Tr\\left(\\Sigma_S + \\Sigma_T
- 2 (\\Sigma_S \\cdot \\Sigma_T)^{\\frac{1}{2}} \\right)
Where:
- :math:`\\mu_S, \\mu_T` are the mean of Xs, Xt along first axis.
- :math:`\\Sigma_S, \\Sigma_T` are the covariance matrix of Xs, Xt.
Parameters
----------
Xs : array
Source array
Xt : array
Target array
Returns
-------
score : float
See also
--------
normalized_frechet_distance
linear_discrepancy
normalized_linear_discrepancy
References
----------
.. [1] `[1] <https://www.sciencedirect.com/science/article/pii/00\
47259X8290077X?via%3Dihub>`_ <NAME>; <NAME>. "The Fréchet \
distance between multivariate normal distributions". JMVA. 1982
"""
mu1 = np.mean(Xs, axis=0)
sigma1 = np.cov(Xs, rowvar=False)
mu2 = np.mean(Xt, axis=0)
sigma2 = np.cov(Xt, rowvar=False)
ssdiff = np.sum((mu1 - mu2)**2.0)
product = np.array(sigma1.dot(sigma2))
if product.ndim < 2:
product = product.reshape(-1, 1)
covmean = linalg.sqrtm(product)
if np.iscomplexobj(covmean):
covmean = covmean.real
return ssdiff + np.trace(sigma1 + sigma2 - 2.0 * covmean)
def linear_discrepancy(Xs, Xt, power_method=False, n_iter=20):
"""
Compute the linear discrepancy
between Xs and Xt.
.. math::
\\Delta = \\max_{u \\in \\mathbb{R}^p} u^T (X_S^T X_S - X_T^T X_T) u
Where:
- :math:`p` is the number of features of Xs and Xt.
Parameters
----------
Xs : array
Source array
Xt : array
Target array
power_method : bool (default=False)
Weither to use the power method
approximation or not.
n_iter : int (default=20)
Number of iteration for power method
Returns
-------
score : float
See also
--------
normalized_linear_discrepancy
frechet_distance
normalized_frechet_distance
References
----------
.. [1] `[1] <https://arxiv.org/pdf/0902.3430.pdf>`_ \
<NAME>, <NAME>, and <NAME>. "Domain \
adaptation: Learning bounds and algorithms". In COLT, 2009.
"""
M = (1/len(Xs)) * np.dot(
|
np.transpose(Xs)
|
numpy.transpose
|
#!/usr/bin/env python
# -*- np -*-
import collections
import math
import numpy as np
import skimage
import skimage.filters
import scipy.ndimage.filters
SimilarityMask = collections.namedtuple(
"SimilarityMask", ["size", "color", "texture", "fill"])
class Features:
def __init__(self,
image,
label,
n_region,
similarity_weight=SimilarityMask(1, 1, 1, 1)):
self.image = image
self.label = label
self.w = similarity_weight
self.imsize = float(label.shape[0] * label.shape[1])
self.size = self.__init_size(n_region)
self.color = self.__init_color(n_region)
self.bbox = self.__init_bounding_box(n_region)
self.texture = self.__init_texture(n_region)
def __init_size(self, n_region):
bincnt = np.bincount(self.label.ravel(), minlength=n_region)
return {i: bincnt[i] for i in range(n_region)}
def __init_color(self, n_region):
n_bin = 25
bin_width = int(math.ceil(255.0 / n_bin))
bins_color = [i * bin_width for i in range(n_bin + 1)]
bins_label = range(n_region + 1)
bins = [bins_label, bins_color]
r_hist = np.histogram2d(self.label.ravel(),
self.image[:, :, 0].ravel(),
bins=bins)[0] #shape=(n_region, n_bin)
g_hist = np.histogram2d(self.label.ravel(),
self.image[:, :, 1].ravel(),
bins=bins)[0]
b_hist = np.histogram2d(self.label.ravel(),
self.image[:, :, 2].ravel(),
bins=bins)[0]
hist = np.hstack([r_hist, g_hist, b_hist])
l1_norm = np.sum(hist, axis=1).reshape((n_region, 1))
hist = np.nan_to_num(hist / l1_norm)
return {i: hist[i] for i in range(n_region)}
def __init_bounding_box(self, n_region):
bbox = dict()
for region in range(n_region):
I, J = np.where(self.label == region)
bbox[region] = (min(I), min(J), max(I), max(J))
return bbox
def __init_texture(self, n_region):
ar = np.ndarray((n_region, 240))
return {i: ar[i] for i in range(n_region)}
def __calc_gradient_histogram(self,
label,
gaussian,
n_region,
nbins_orientation=8,
nbins_inten=10):
op = np.array([[-1, 0, 1]], dtype=np.float32)
h = scipy.ndimage.filters.convolve(gaussian, op)
v = scipy.ndimage.filters.convolve(gaussian, op.transpose())
g = np.arctan2(v, h)
# define each axis for texture histogram
bin_width = 2 * math.pi / 8
bins_label = range(n_region + 1)
bins_angle = np.linspace(-math.pi, math.pi, nbins_orientation + 1)
bins_inten =
|
np.linspace(.0, 1., nbins_inten + 1)
|
numpy.linspace
|
import numpy as np
import pytest
from resqpy.grid import Grid
import resqpy.grid as grr
from resqpy.model import Model
import resqpy.grid._cell_properties as cp
import resqpy.property.grid_property_collection as gpc
def test_thickness_array_thickness_already_set(basic_regular_grid: Grid):
# Arrange
extent = basic_regular_grid.extent_kji
array_thickness = np.random.random(extent)
basic_regular_grid.array_thickness = array_thickness # type: ignore
# Act
thickness = cp.thickness(basic_regular_grid)
# Assert
np.testing.assert_array_almost_equal(thickness, array_thickness)
def test_thickness_array_thickness_already_set_cell_kji0(basic_regular_grid: Grid):
# Arrange
extent = basic_regular_grid.extent_kji
array_thickness = np.random.random(extent)
basic_regular_grid.array_thickness = array_thickness # type: ignore
cell_kji0 = (1, 1, 1)
# Act
thickness = cp.thickness(basic_regular_grid, cell_kji0 = cell_kji0)
# Assert
assert thickness == array_thickness[cell_kji0]
def test_thickness_faulted_grid(faulted_grid: Grid):
# Arrange
expected_thickness = np.array([[[20., 20., 20., 20., 20., 20., 20., 20.], [20., 20., 20., 20., 20., 20., 20., 20.],
[20., 20., 20., 20., 20., 20., 20., 20.], [20., 20., 20., 20., 20., 20., 20., 20.],
[20., 20., 20., 20., 20., 20., 20., 20.]],
[[20., 20., 20., 20., 20., 20., 20., 20.], [20., 20., 20., 20., 20., 20., 20., 20.],
[20., 20., 20., 20., 20., 20., 20., 20.], [20., 20., 20., 20., 20., 20., 20., 20.],
[20., 20., 20., 20., 20., 20., 20., 20.]],
[[10., 10., 5., 0., 0., 5., 10., 10.], [10., 10., 5., 0., 0., 5., 10., 10.],
[10., 10., 5., 0., 0., 5., 10., 10.], [10., 10., 5., 0., 0., 5., 10., 10.],
[10., 10., 5., 0., 0., 5., 10., 10.]]])
# Act
thickness = cp.thickness(faulted_grid)
# Assert
np.testing.assert_array_almost_equal(thickness, expected_thickness)
def test_thickness_blank_property_collection(basic_regular_grid: Grid):
# Arrange
property_collection = gpc.GridPropertyCollection()
# Act
thickness = cp.thickness(basic_regular_grid, property_collection = property_collection)
# Assert
assert thickness is None
def test_thickness_property_collection(example_model_with_properties: Model):
# Arrange
grid = example_model_with_properties.grid()
extent = grid.extent_kji
property_collection = grid.property_collection
thickness_array = np.random.random(extent)
property_collection.add_cached_array_to_imported_list(thickness_array,
'test data',
'DZ',
False,
uom = grid.z_units(),
property_kind = 'cell length',
facet_type = 'direction',
indexable_element = 'cells',
facet = 'K')
property_collection.write_hdf5_for_imported_list()
property_collection.create_xml_for_imported_list_and_add_parts_to_model()
if hasattr(grid, 'array_thickness'):
delattr(grid, 'array_thickness')
# Act
thickness = cp.thickness(grid, property_collection = property_collection)
# Assert
np.testing.assert_array_almost_equal(thickness, thickness_array)
def test_thickness_multiple_property_collection(example_model_with_properties: Model):
# Arrange
grid = example_model_with_properties.grid()
extent = grid.extent_kji
property_collection = grid.property_collection
thickness_array_gross = np.random.random(extent)
property_collection.add_cached_array_to_imported_list(thickness_array_gross,
'test data',
'DZ',
False,
uom = grid.z_units(),
property_kind = 'thickness',
facet_type = 'netgross',
indexable_element = 'cells',
facet = 'gross')
thickness_array_net = np.random.random(extent) / 2
property_collection.add_cached_array_to_imported_list(thickness_array_net,
'test data',
'DZ',
False,
uom = grid.z_units(),
property_kind = 'thickness',
facet_type = 'netgross',
indexable_element = 'cells',
facet = 'net')
property_collection.write_hdf5_for_imported_list()
property_collection.create_xml_for_imported_list_and_add_parts_to_model()
if hasattr(grid, 'array_thickness'):
delattr(grid, 'array_thickness')
# Act
thickness = cp.thickness(grid, property_collection = property_collection)
# Assert
np.testing.assert_array_almost_equal(thickness, thickness_array_gross)
def test_thickness_from_points(example_model_with_properties: Model):
# Arrange
grid = example_model_with_properties.grid()
if hasattr(grid, 'array_thickness'):
delattr(grid, 'array_thickness')
if hasattr(grid, 'property_collection'):
delattr(grid, 'property_collection')
# Act
thickness = cp.thickness(grid)
# Assert
np.testing.assert_array_almost_equal(thickness, 20.0)
def test_volume_array_volume_already_set(basic_regular_grid: Grid):
# Arrange
extent = basic_regular_grid.extent_kji
array_volume = np.random.random(extent)
basic_regular_grid.array_volume = array_volume # type: ignore
# Act
volume = cp.volume(basic_regular_grid)
# Assert
np.testing.assert_array_almost_equal(volume, array_volume)
def test_volume_array_volume_already_set_cell_kji0(basic_regular_grid: Grid):
# Arrange
extent = basic_regular_grid.extent_kji
array_volume = np.random.random(extent)
basic_regular_grid.array_volume = array_volume # type: ignore
cell_kji0 = (1, 1, 1)
# Act
volume = cp.volume(basic_regular_grid, cell_kji0 = cell_kji0)
# Assert
assert volume == array_volume[cell_kji0]
def test_volume_faulted_grid(faulted_grid: Grid):
# Arrange
expected_volume = np.array([[[200000., 200000., 200000., 200000., 200000., 200000., 200000., 200000.],
[200000., 200000., 200000., 200000., 200000., 200000., 200000., 200000.],
[200000., 200000., 200000., 200000., 200000., 200000., 200000., 200000.],
[200000., 200000., 200000., 200000., 200000., 200000., 200000., 200000.],
[200000., 200000., 200000., 200000., 200000., 200000., 200000., 200000.]],
[[200000., 200000., 200000., 200000., 200000., 200000., 200000., 200000.],
[200000., 200000., 200000., 200000., 200000., 200000., 200000., 200000.],
[200000., 200000., 200000., 200000., 200000., 200000., 200000., 200000.],
[200000., 200000., 200000., 200000., 200000., 200000., 200000., 200000.],
[200000., 200000., 200000., 200000., 200000., 200000., 200000., 200000.]],
[[100000., 100000., 50000., 0., 0., 50000., 100000., 100000.],
[100000., 100000., 50000., 0., 0., 50000., 100000., 100000.],
[100000., 100000., 50000., 0., 0., 50000., 100000., 100000.],
[100000., 100000., 50000., 0., 0., 50000., 100000., 100000.],
[100000., 100000., 50000., 0., 0., 50000., 100000., 100000.]]])
# Act
volume = cp.volume(faulted_grid)
# Assert
np.testing.assert_array_almost_equal(volume, expected_volume)
def test_volume_blank_property_collection(basic_regular_grid: Grid):
# Arrange
property_collection = gpc.GridPropertyCollection()
# Act
volume = cp.volume(basic_regular_grid, property_collection = property_collection)
# Assert
assert volume is None
def test_volume_property_collection(example_model_with_properties: Model):
# Arrange
grid = example_model_with_properties.grid()
extent = grid.extent_kji
property_collection = grid.property_collection
volume_array =
|
np.random.random(extent)
|
numpy.random.random
|
# -------- energy in eV, temperature in K
# assume every variable starting with a-h and o-z are real numbers
# common block named comcon
from __future__ import division
import sys
import gzip
import os
from os import walk
import subprocess
import math
import copy
import json
import pickle
import numpy as np
from scipy.constants import physical_constants
import scipy.constants as scipy_constants
from scipy.optimize import brentq, curve_fit
from scipy.integrate import cumtrapz, trapz, simps
from scipy.interpolate import interp1d, splev, splrep, BSpline
from scipy.integrate import quadrature
from scipy.interpolate import UnivariateSpline
from atomate.vasp.database import VaspCalcDb
from pymatgen.core import Structure
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
import dfttk.pyphon as ywpyphon
from dfttk.utils import sort_x_by_y
from dfttk.analysis.ywplot import myjsonout
from dfttk.analysis.ywutils import get_rec_from_metatag, get_used_pot
from dfttk.analysis.ywutils import formula2composition, reduced_formula, MM_of_Elements
import warnings
k_B = physical_constants['Boltzmann constant in eV/K'][0]
def substr(str1, str2, pos):
try:
if str1.index(str2)==pos:
#print("idx=",str1.index(str2))
return True
else:
return False
except ValueError:
return False
def isfloat(value):
try:
float(value)
return True
except ValueError:
return False
def isint(value):
try:
int(value)
return True
except ValueError:
return False
# this is a FORTRAN function (e.g. 1 return value)
def pregetdos(f): # Line 186
"""
to make the code can also handle WIEN2k dos in the unit of eV
Parameters
----------
f : file descriptor for the DOS file
Returns
-------
xdn : lower energy to integrate over?
xup : higher energy to integrate over?
vde : band energy intercal
e (array): band energy mesh the Fermi energy has been shifted to zero
DOS (array) : e dos
"""
# read the file
lines = f.readlines() # read in all lines then determine is it is WIEN2k DOS (in the unit eV) file or VASP DOS file
# now the first line should be the one with the data, lets remove the one into its own special line
tmp = lines[0]
if substr(tmp,"# BAND", 0):
tmp = lines[1]
tmp1 = lines[2]
if substr(tmp, "#EF=",0) and substr(tmp1, "# ENERGY",0):
tmp1 = tmp[31:43].replace("NENRG=","")
if isint(tmp1):
n_dos = int(tmp1)
tmp = lines[2]
lines = lines[3:n_dos+3]
wienEdos = np.zeros(n_dos)
ve = np.zeros(n_dos)
for i, l in enumerate(lines):
split_l = l.split(' ')
split_l = [k for k in split_l if k != '']
ve[i], wienEdos[i] = (float(split_l[0]), float(split_l[1]))
edn = ve[0]
eup = ve[n_dos-1]
ve = np.linspace(edn, eup, n_dos)
vde = (eup - edn)/(n_dos-1) # This appears to be the change of v per electron, so what is v? Voltage in eV?
return edn, eup, vde, ve, wienEdos
tmp = lines[5]
data_line = tmp[0:32].split(' ') #n_dos >10000, no space left before it in VASP
data_line.extend(tmp[32:].split(' '))
# filter out empty spaces
data_line = [k for k in data_line if k != '']
#print (data_line)
eup, edn, n_dos, eFermi = (float(data_line[0]),
float(data_line[1]),
int(data_line[2]),
float(data_line[3])) # we're leaving the last number behind
lines = lines[6:n_dos+6]
# line 197 goes to line 209
eup = eup - eFermi
edn = edn - eFermi
vde = (eup - edn)/(n_dos-1) # This appears to be the change of v per electron, so what is v? Voltage in eV?
# vectors
ve = np.linspace(edn, eup, n_dos)
vaspEdos = np.zeros(n_dos)
for i, l in enumerate(lines):
# why do we need to do this?
split_l = l.split(' ')
# filter again
split_l = [k for k in split_l if k != '']
if len(split_l)>=5: #spin polarized
t, vaspEdos[i], y, vdos, x = (float(split_l[0]), float(split_l[1]), float(split_l[2]), float(split_l[3]), float(split_l[4]))
vaspEdos[i] += y
else:
t, vaspEdos[i], vdos = (float(split_l[0]), float(split_l[1]), float(split_l[2]))
_eFermi = CBMtoVBM(ve, vaspEdos)
return edn-_eFermi, eup-_eFermi, vde, ve-_eFermi, vaspEdos
def CBMtoVBM(ve, vaspEdos):
# move eFermi to VBM if it in CBM
vde = ve[1] - ve[0]
for i, dos in enumerate(vaspEdos):
if ve[i] >= -vde: break
if dos!=0.0:
_eFermi = ve[i]
if _eFermi < -3*vde:
print ("Fermi energy shifted from CBM", 0.0, "to VBM", _eFermi)
return _eFermi+vde
else: return 0.0
def getdos(xdn, xup, dope, NEDOS, gaussian, edn, eup, vde, ve, tdos): # Line 186
"""
Parameters
----------
dos : pymatgen.electronic_structure.dos.Dos
DOS object from pymatgen
xdn : float
Minimum energy for integration
xup : float
Maximum energy for integration
dope : float
Number of electrons to dope (negative means to remove electrons, positive means add electrons)
dos_grid_size : int
Number of DOS points have in the energy/density grid
gaussian_grid_size : int
Number of Gaussian points to use in the grid mesh around the Fermi energy
Returns
-------
tuple
Tuple of a (float, float, array, array) of the number of electrons,
Fermi level shift due to doping, and the arrays of energies and densities on a grid.
"""
for i,energy in enumerate(ve):
if energy <-15.0 and tdos[i]==0.0:
xdn = energy
n_dos = len(tdos)
idx = closest(ve,0.0)
for i in range(idx,n_dos):
if tdos[i]!=0.0:
iBoF = i-1
break
eBoF = -1.0
if iBoF>=idx:
eBoF = ve[iBoF]
espr = tdos[iBoF+2]-tdos[iBoF+1]
if espr>0.0:
espr = tdos[iBoF+1]/espr*vde
if (espr < vde):
eBoF = ve[iBoF+1] - espr
#print("eBoF=", eBoF)
xdn = max(xdn,edn)
xup = min(xup,eup)
e = np.linspace(xdn,xup,NEDOS,dtype=float)
if gaussian != 0.0:
e = remesh(xdn, xup, gaussian, 0.0, eBoF, NEDOS)
dos = refdos(eBoF, 0.0, vde, edn, e, ve, tdos)
ados = cumtrapz(dos, e, initial=0.0)
idx = closest(e,0.0)
for idx1 in range(idx-1, 0, -1):
if ados[idx1] != ados[idx] : break
NELECTRONS = ados[idx] - e[idx]/(e[idx1]-e[idx])*(ados[idx1]-ados[idx])
dF = 0.0
if dope != 0.0:
NELECTRONS = NELECTRONS+dope
idx = closest(ados,NELECTRONS)
for idx1 in range(idx-1, 0, -1):
if ados[idx1] != ados[idx] : break
#if idx == (NEDOS-1) or ados[idx] == ados[idx+1]:
#print ("NELECTRONS=", NELECTRONS, "idx=", idx, ados[idx], "idx1=", idx1, ados[idx1], "NEDOS=", NEDOS)
if idx1 <= 0 or idx >= (NEDOS-1) or ados[idx] == ados[idx1]:
print ("NELECTRONS=", NELECTRONS, "idx=", idx, ados[idx], "idx1=", idx1, ados[idx1], "NEDOS=", NEDOS)
# we are dopidxng too much
raise ValueError('Too much doping')
dF = (NELECTRONS-ados[idx])/(ados[idx1] - ados[idx])*(e[idx1] - e[idx])+e[idx]
# dF is the shift in the Fermi energy due to doping
e = e - dF # This is done in a loop (line 289), but I think we can do without
if gaussian != 0.0 and abs(dope)>0.0001: # why did I do this ***********************
#if gaussian != 0.0:
e = remesh(xdn, xup, gaussian, dF, eBoF, NEDOS)
dos = refdos(eBoF, dF, vde, edn, e, ve, tdos)
edos = e*dos
ados = cumtrapz(dos, e, initial=0.0)
energy = cumtrapz(edos, e, initial=0.0)
idx = closest(e,0.0)
NELECTRONS = ados[idx] - e[idx]/(e[idx+1]-e[idx])*(ados[idx+1]-ados[idx])
E0 = energy[idx] - e[idx]/(e[idx+1]-e[idx])*(energy[idx+1]-energy[idx])
return NELECTRONS, E0, dF, e, dos, eBoF
def remesh(xdn, xup, gaussian, dF, eBoF, NEDOS):
"""
refine the dos mesh by using denser mesh around the 0 K Fermi energy in order to decrease the numerical uncertainty
Parameters
----------
eBoF : Conduction band minimum
dF : Fermi energy change due to doping
ve : original e mesh
NEDOS : original e mesh
gaussian : parameter used to refine the e mesh near the Fermi energy
Return
------
e : refined e mesh
"""
e = np.zeros(NEDOS)
e[0] = xdn - dF
xde = 2.0*(xup - xdn)/(NEDOS-1)
if eBoF>0.0:
xde = 3.0*(xup - xdn)/(NEDOS-1)
sigma = -0.5*(gaussian/(xup-xdn))**2
fac = gaussian/(math.sqrt(2.0*math.pi))
for i in range(1,NEDOS):
f1 = 1.0 + fac*math.exp(sigma*(e[i-1])**2)
if eBoF>0.0:
if dF < eBoF:
f1 += fac*math.exp(sigma*((e[i-1]-eBoF+dF))**2)
else:
f1 += fac*math.exp(sigma*((e[i-1]+dF))**2)
e[i] = e[i-1]+xde/f1
return e
def refdos(eBoF, dF, vde, edn, e, ve, tdos):
"""
refine the dos mesh by using denser mesh around the 0 K Fermi energy in order to decrease the numerical uncertainty
Parameter
---------
eBoF : Conduction band minimum
dF : Fermi energy change due to doping
e : refined e mesh
ve : original e mesh
tdos : original e dos
Return
------
dos : refined e dos
"""
dos = np.zeros(len(e))
n_dos = len(tdos)
for i in range(0, len(e)):
tx = e[i] + dF
kx = int((tx-edn)/vde) # Converted to int, remember the type!
kx = max([kx,0]) # XXX: is this translated correctly? What is the 1 in fortran?
kx = min([n_dos-2, kx]) # TODO: the ndos-1 was here before. could be a source of error
if tdos[kx+1]==0.0 and ve[kx+1]>0.0 and ve[kx+1]<vde:
# handling near the Top of valence band
if tx >= 0.0:
dos[i] = 0.0
else:
dos[i] = tdos[kx]*tx/ve[kx]
#dos[i] = tdos[kx]*(tx/ve[kx])**2
elif eBoF > 0.0 and tdos[kx]==0.0 and ve[kx+1]-eBoF<vde and ve[kx+1]-eBoF>0.0:
# handling near the bottom of conduction band
if tx <= eBoF:
dos[i] = 0.0
else:
dos[i] = tdos[kx+1]*(tx-eBoF)/(ve[kx+1]-eBoF)
else:
dos[i] = tdos[kx] + (tdos[kx+1] - tdos[kx])/vde*(tx - ve[kx])
return dos
def closest(e,val):
"""
find the index of the band energy which is the close to the energy val
Parameters
----------
e : float
array of band energy for the e dos
val : given value of band energy
Return
------
index of e that closest to the energy val
"""
idx = np.abs(e-val).argmin()
if e[idx] < val:
idx = idx + 1
return idx
def gfind(mu_el, pe, pdos, NELECTRONS, Beta, IntegrationFunc=trapz):
"""
Calculate the number of electron difference from 0K given chemical potential. the purpose is the find the
chemical potential to make zero of number of electron difference from 0K
Parameters
----------
mu_el : chemical potential, :math:`\mu`, in the Fermi distribution
pe : eigenenergies
pdos : density of states (:math:`n(\varepsilon) \varepsilon`
NELECTRONS : Total number of electrons in the system at 0K
Beta : :math:`\frac{1}{T*k_{B}}`
Returns
-------
The number of electron difference from 0K given chemical potential
"""
tc = Beta*(pe-mu_el)
tc = tc[np.where(tc<200)]
k = len(tc)
fn = pdos[0:k]/(np.exp(tc[0:k])+1.0)
return IntegrationFunc(fn, pe[0:k])- NELECTRONS
# line 363
def caclf(pe, pdos, NELECTRONS, Beta, mu_ref=0.0, dF=0.0, IntegrationFunc=trapz): #line 363
"""
Calculate thermal free energy from electronic density of states (e DOS)
Parameters
----------
pe : band energy array
pdos : e DOS
NELECTRONS : total number of electrons
Beta : 1/(kB*T)
Returns
-------
electron chememical potential, internal energy, entropy, carrier amount, coefficient to cal Seebeck
"""
#print ("dF=", dF)
if 1==1:
deltaE = 2
for i in range(8):
try:
mu_el = brentq(gfind, mu_ref-deltaE, mu_ref+deltaE, args=(pe, pdos, NELECTRONS, Beta, IntegrationFunc), maxiter=10000)
break
except:
deltaE *= 2
else:
t0 = mu_ref
d0 = gfind(t0, pe, pdos, NELECTRONS, Beta, IntegrationFunc)
if d0 > 0.0: td = -0.1
elif d0 <0.0: td = 0.1
else: return t0
for i in range(999):
t1 = t0 + td
d1 = gfind(t1, pe, pdos, NELECTRONS, Beta, IntegrationFunc)
if d1*d0 < 0.0: break
elif d1*d0 == 0.0: break
t0 = t1
d0 = d1
td = td + td
for i in range(999):
t2 = (t0 + t1)*0.5
d2 = gfind(t2, pe, pdos, NELECTRONS, Beta, IntegrationFunc)
if d2*d0 < 0.0:
t1 = t2
d1 = d2
else:
t0 = t2
d0 = d2
if abs(t1-t0) <1.e-8:
mu_el = 0.5*(t0+t1)
break
tc = Beta*(pe-mu_el)
tc = tc[np.where(tc<200)]
k1 = len(tc)
tf = 1.0/(np.exp(tc)+1.0)
fn = pdos[0:k1]*pe[0:k1]*tf
u = IntegrationFunc(fn, pe[0:k1])
k0 = closest(tc,-200)
tf0 = tf[k0:]
pdos = pdos[k0:k1]
pe = pe[k0:k1]
tf1 = 1.0 - tf0 + 1.e-60 # 1.e-60 is used to avoid log exception
fn = pdos*(tf0*np.log(tf0)+tf1*np.log(tf1))
s = IntegrationFunc(fn, pe)
tf = tf0*(1.0-tf0)
fn = pdos*tf
fn2 = pdos*tf*(pe-mu_el)
Q_el = IntegrationFunc(fn, pe)
Q_p = IntegrationFunc(fn[pe<=dF], pe[pe<=dF])
Q_e = IntegrationFunc(fn[pe>dF], pe[pe>dF])
Y_el = IntegrationFunc(fn2, pe)
fn = pdos*(pe-mu_el)*tf
if Q_el!=0.0:
e_ = IntegrationFunc(fn, pe)/Q_el
fn = pdos[0:k1]*(pe[0:k1]-mu_el-e_)**2*tf
cv = IntegrationFunc(fn, pe[0:k1])
else:
cv = 0.0
fn = pdos[0:k1]*(pe[0:k1]-mu_el)**2*tf
c_mu = IntegrationFunc(fn, pe[0:k1])
# hole/electron concentration by effective carrier
tf = tf0*(1.0-tf0)
fn = pdos*tf
f2 = interp1d(pe, fn, kind='linear')
fmu = f2(mu_el)
x = np.hstack([pe[pe<mu_el],mu_el])
y = np.hstack([fn[pe<mu_el],fmu])
W_p = IntegrationFunc(y,x)
x = np.hstack([mu_el, pe[pe>mu_el]])
y = np.hstack([fmu, fn[pe>mu_el]])
W_e = IntegrationFunc(y,x)
#W_e = IntegrationFunc(fn[pe>mu_el], pe[pe>mu_el])
#W_e = IntegrationFunc(fn[pe>dF], pe[pe>dF])
# hole/electron concentration by alternative difination
fn = pdos*(1.0-tf0)
f2 = interp1d(pe, fn, kind='linear')
#fmu = f2(mu_el)
#x = np.hstack([pe[pe<mu_el],mu_el])
#y = np.hstack([fn[pe<mu_el],fmu])
try:
fmu = f2(dF)
except:
fmu = 0.
x = np.hstack([pe[pe<dF],dF])
y = np.hstack([fn[pe<dF],fmu])
Y_p = IntegrationFunc(y,x)
fn = pdos*tf0
f2 = interp1d(pe, fn, kind='linear')
#fmu = f2(mu_el)
#x = np.hstack([mu_el, pe[pe>mu_el]])
#y = np.hstack([fmu, fn[pe>mu_el]])
#print ("mu_el", mu_el, dF)
try:
fmu = f2(dF)
except:
fmu = 0.
x = np.hstack([dF, pe[pe>dF]])
y = np.hstack([fmu, fn[pe>dF]])
Y_e = IntegrationFunc(y,x)
return mu_el, u, -s*k_B, cv*k_B*Beta*Beta, Q_el, Y_el, Q_p, Q_e, c_mu*k_B*Beta*Beta, W_p, W_e, Y_p, Y_e
def T_remesh(t0, t1, td, _nT=-1):
T = []
if td > 0:
for t in np.arange(t0,t1+td, td):
T.append(t)
return np.array(T)
if _nT <= 0: nT = 51
else: nT = _nT
a = 100./nT
dT_new = abs(td)/(1+(nT-1)*0.5*a)
for i in range (nT):
T.append(t0+i*dT_new*(1+i*a))
T = np.array(T)
p = (t1-t0)/(max(T)-t0)
for i in range (nT):
T[i] = round((T[i]-t0)*p+t0,2)
return T
def runthelec(t0, t1, td, xdn, xup, dope, ndosmx, gaussian, natom,
_T=[], dos=sys.stdin, fout=sys.stdout, vol=None, IntegrationFunc=trapz):
"""
Calculate thermal free energy from electronic density of states (e DOS)
Parameters
----------
t0 : float
Low temperature limit
t1 : float
High temperature limit
td : float
Temperature increment
xdn : float
Minimum energy for integration
xup : float
Maximum energy for integration
dope : float
Number of electrons to dope (negative means to remove electrons, positive means add electrons)
ndosmx : int
Refined number of DOS points for the energy/density grid
gaussian_grid_size : int
Gaussian parameter to refining the grid mesh around the Fermi energy
natom : int
Default 1. Number of atoms in the unit cell if one wants to renomalize
the calculated properties in the unit of per atom
dos : file description for the DOSCAR or pymatgen dos object
Filename for VASP DOSCAR
outf : file description
Output file description for the calculated properties
Return
------
Tuple of 14 float array containing
thermal electron free energy, entropy, specific heat, M_el, seebeck_coefficients,
effective number of charge carrier, Q_p, Q_e, constant chemical potential specific heat, temperature.
Other quantities are for researching purpose
"""
if hasattr(dos, 'read'):
edn, eup, vde, dos_energies, vaspEdos = pregetdos(dos) # Line 186
else:
e_fermi = dos.efermi
eup = np.max(dos.energies) - e_fermi
edn = np.min(dos.energies) - e_fermi
n_dos = len(dos.energies) # number of points in DOS
vde = (eup - edn)/(n_dos-1) # change in energy per step
dos_energies = np.linspace(edn, eup, n_dos) # linearize: sometimes rounding errors in DOSCAR
vaspEdos = np.array(dos.get_densities())
_eFermi = CBMtoVBM(dos_energies, vaspEdos)
eup -= _eFermi
edn -= _eFermi
dos_energies -= _eFermi
NELECTRONS, E0, dF, e, dos, Eg = getdos(xdn, xup, dope, ndosmx, gaussian, edn, eup, vde, dos_energies, vaspEdos)
if Eg < 0.0: Eg = 0.0
if vol == None:
fout.write('#Bandgap= {} eV. '.format(Eg))
else:
fout.write('#Bandgap= {} eV at volume= {} Angstrom^3/cell. '.format(Eg,vol))
fout.write('Fermi energy was shifted {} due to doping of {} resulting Ne={} \n'.format(dF, dope, NELECTRONS))
# for all temperatures
if len(_T)!=0:
T=copy.deepcopy(_T)
elif td>0.0:
T = np.arange(t0,t1+td,td) # temperature
else:
if self.debug:
T = T_remesh(t0,t1,td,_nT=65)
else:
T = T_remesh(t0,t1,td,_nT=self.nT)
nT = len(T)
U_el = np.zeros(nT)
S_el = np.zeros(nT)
C_el = np.zeros(nT) # electronic specific heat
C_mu = np.zeros(nT) # electronic specific heat at constant chemical potential
M_el = np.zeros(nT) # electronic chemical potential, i.e., absolute thermal electric force
Q_el = np.zeros(nT) # total number of thermal Carrier
Y_el = np.zeros(nT)
Q_p =
|
np.zeros(nT)
|
numpy.zeros
|
##-----------------------------------------------------------------------------
## Import
##-----------------------------------------------------------------------------
from re import S
import numpy as np
from os import listdir
from fnmatch import filter
import scipy.io as sio
import cupy as cp
from time import time
import warnings
warnings.filterwarnings("ignore")
##-----------------------------------------------------------------------------
## Function
##-----------------------------------------------------------------------------
def matching(template_extr, mask_extr, temp_dir, threshold=0.38, use_cuda=True):
"""
Description:
Match the extracted template with database.
Input:
template_extr - Extracted template.
mask_extr - Extracted mask.
threshold - Threshold of distance.
temp_dir - Directory contains templates.
Output:
List of strings of matched files, 0 if not, -1 if no registered sample.
"""
# Get the number of accounts in the database
n_files = len(filter(listdir(temp_dir), '*.mat'))
if n_files == 0:
return -1
result_list = []
dir_list = listdir(temp_dir)
result_list = allmatchingPool(dir_list, template_extr, mask_extr, temp_dir, use_cuda)
filenames = result_list[0]
hm_dists = result_list[1]
# Remove NaN elements
ind_valid =
|
np.where(hm_dists>0)
|
numpy.where
|
#
# ENVISIoN
#
# Copyright (c) 2020 <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##############################################################################################
from pathlib import Path
import h5py
import numpy as np
import re
import sys
def fermi_parser(hdf_file_path, vasp_dir_path):
"""
Reads OUTCAR and EIGNVAL to create datastructure for visualization of fermi surfaces
Parameters
----------
hdf_file_path: str
Path where hdf file will be written to
vasp_dir_path: str
Path of direcotry containing OUTCAR and EIGENVAL files
Returns
-------
None
"""
# Check for files
# ---------------
outcar_file_path = Path(vasp_dir_path).joinpath('OUTCAR')
eigenval_file_path = Path(vasp_dir_path).joinpath('EIGENVAL')
if not outcar_file_path.exists() or not eigenval_file_path.exists():
raise FileNotFoundError('Cannot find one of the two vasp files in directory %s' % vasp_dir_path)
# Parse OUTCAR file for fermi energy and reciprocal lattice vectors
# https://www.vasp.at/wiki/index.php/OUTCAR
# --------------------------------------------------------------
with outcar_file_path.open('r') as f:
lines = f.readlines()
for i, line in enumerate(lines):
if 'E-fermi' in line:
fermi_energy = float(re.findall(r'-?[\d.]+', line)[0])
if 'reciprocal lattice vectors' in line:
base_x = re.findall(r'-?[\d.]+', lines[i + 1])[3:]
base_x = [float(x) for x in base_x]
base_y = re.findall(r'-?[\d.]+', lines[i + 2])[3:]
base_y = [float(x) for x in base_y]
base_z = re.findall(r'-?[\d.]+', lines[i + 3])[3:]
base_z = [float(x) for x in base_z]
basis = np.array([base_x, base_y, base_z])
# Parse EIGENVAL file for all calculated K-Points and band energies
# https://www.vasp.at/wiki/index.php/EIGENVAL
# ----------------------------------------------------------------
with eigenval_file_path.open('r') as f:
lines = f.readlines()
# collect meta data
[_, _, _, nspin] = [int(v) for v in re.findall(r'[\d]+', lines[0])]
nelectrons, nkpoints, nbands = [int(v) for v in re.findall(r'[\d]+', lines[5])]
kpoints = np.zeros(shape=(nkpoints, 4))
evalues = np.zeros(shape=(nkpoints, nbands, nspin), dtype=np.float32)
kpoint_index = 0
for i, line in enumerate(lines[7:]):
regex = re.findall(r'[-\d.E+]+', line)
# kpoint
if len(regex) == 4:
kpoints[kpoint_index, :] = [float(v) for v in regex]
kpoint_index += 1
# eigenvalue
elif len(regex) > 0:
band_index = int(regex[0])
values = [float(v) for v in regex[1:1+nspin:]]
evalues[kpoint_index - 1, band_index - 1, :] = values
# derive dimensions from unique kpoints
nkpoints_x = len(set(kpoints[:, 0]))
nkpoints_y = len(set(kpoints[:, 1]))
nkpoints_z = len(set(kpoints[:, 2]))
# Write data to HDF5
# ------------------
hdf_file = h5py.File(hdf_file_path, 'a')
hdf_file.create_dataset('fermi_energy', data=
|
np.array(fermi_energy)
|
numpy.array
|
from functools import partial
from itertools import product
from itertools import chain
from itertools import permutations
import warnings
import re
import numpy as np
from scipy import linalg
import pytest
from sklearn import datasets
from sklearn import svm
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import label_binarize, LabelBinarizer
from sklearn.utils.validation import check_random_state
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import assert_warns
from sklearn.utils._testing import assert_warns_div0
from sklearn.utils._testing import assert_no_warnings
from sklearn.utils._testing import assert_warns_message
from sklearn.utils._testing import ignore_warnings
from sklearn.utils._mocking import MockDataFrame
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import balanced_accuracy_score
from sklearn.metrics import classification_report
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_score
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import zero_one_loss
from sklearn.metrics import brier_score_loss
from sklearn.metrics import multilabel_confusion_matrix
from sklearn.metrics._classification import _check_targets
from sklearn.exceptions import UndefinedMetricWarning
from scipy.spatial.distance import hamming as sp_hamming
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def test_classification_report_dictionary_output():
# Test performance report with dictionary output
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = {'setosa': {'precision': 0.82608695652173914,
'recall': 0.79166666666666663,
'f1-score': 0.8085106382978724,
'support': 24},
'versicolor': {'precision': 0.33333333333333331,
'recall': 0.096774193548387094,
'f1-score': 0.15000000000000002,
'support': 31},
'virginica': {'precision': 0.41860465116279072,
'recall': 0.90000000000000002,
'f1-score': 0.57142857142857151,
'support': 20},
'macro avg': {'f1-score': 0.5099797365754813,
'precision': 0.5260083136726211,
'recall': 0.596146953405018,
'support': 75},
'accuracy': 0.5333333333333333,
'weighted avg': {'f1-score': 0.47310435663627154,
'precision': 0.5137535108414785,
'recall': 0.5333333333333333,
'support': 75}}
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names, output_dict=True)
# assert the 2 dicts are equal.
assert(report.keys() == expected_report.keys())
for key in expected_report:
if key == 'accuracy':
assert isinstance(report[key], float)
assert report[key] == expected_report[key]
else:
assert report[key].keys() == expected_report[key].keys()
for metric in expected_report[key]:
assert_almost_equal(expected_report[key][metric],
report[key][metric])
assert type(expected_report['setosa']['precision']) == float
assert type(expected_report['macro avg']['precision']) == float
assert type(expected_report['setosa']['support']) == int
assert type(expected_report['macro avg']['support']) == int
@pytest.mark.parametrize('zero_division', ["warn", 0, 1])
def test_classification_report_zero_division_warning(zero_division):
y_true, y_pred = ["a", "b", "c"], ["a", "b", "d"]
with warnings.catch_warnings(record=True) as record:
classification_report(
y_true, y_pred, zero_division=zero_division, output_dict=True)
if zero_division == "warn":
assert len(record) > 1
for item in record:
msg = ("Use `zero_division` parameter to control this "
"behavior.")
assert msg in str(item.message)
else:
assert not record
def test_multilabel_accuracy_score_subset_accuracy():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert accuracy_score(y1, y2) == 0.5
assert accuracy_score(y1, y1) == 1
assert accuracy_score(y2, y2) == 1
assert accuracy_score(y2, np.logical_not(y2)) == 0
assert accuracy_score(y1, np.logical_not(y1)) == 0
assert accuracy_score(y1, np.zeros(y1.shape)) == 0
assert accuracy_score(y2, np.zeros(y1.shape)) == 0
def test_precision_recall_f1_score_binary():
# Test Precision Recall and F1 Score for binary classification task
y_true, y_pred, _ = make_prediction(binary=True)
# detailed measures for each class
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.73, 0.85], 2)
assert_array_almost_equal(r, [0.88, 0.68], 2)
assert_array_almost_equal(f, [0.80, 0.76], 2)
assert_array_equal(s, [25, 25])
# individual scoring function that can be used for grid search: in the
# binary class case the score is the value of the measure for the positive
# class (e.g. label == 1). This is deprecated for average != 'binary'.
for kwargs, my_assert in [({}, assert_no_warnings),
({'average': 'binary'}, assert_no_warnings)]:
ps = my_assert(precision_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(ps, 0.85, 2)
rs = my_assert(recall_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(rs, 0.68, 2)
fs = my_assert(f1_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(fs, 0.76, 2)
assert_almost_equal(my_assert(fbeta_score, y_true, y_pred, beta=2,
**kwargs),
(1 + 2 ** 2) * ps * rs / (2 ** 2 * ps + rs), 2)
@ignore_warnings
def test_precision_recall_f_binary_single_class():
# Test precision, recall and F-scores behave with a single positive or
# negative class
# Such a case may occur with non-stratified cross-validation
assert 1. == precision_score([1, 1], [1, 1])
assert 1. == recall_score([1, 1], [1, 1])
assert 1. == f1_score([1, 1], [1, 1])
assert 1. == fbeta_score([1, 1], [1, 1], 0)
assert 0. == precision_score([-1, -1], [-1, -1])
assert 0. == recall_score([-1, -1], [-1, -1])
assert 0. == f1_score([-1, -1], [-1, -1])
assert 0. == fbeta_score([-1, -1], [-1, -1], float('inf'))
assert fbeta_score([-1, -1], [-1, -1], float('inf')) == pytest.approx(
fbeta_score([-1, -1], [-1, -1], beta=1e5))
@ignore_warnings
def test_precision_recall_f_extra_labels():
# Test handling of explicit additional (not in input) labels to PRF
y_true = [1, 3, 3, 2]
y_pred = [1, 1, 3, 2]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
# No average: zeros in array
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average=None)
assert_array_almost_equal([0., 1., 1., .5, 0.], actual)
# Macro average is changed
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average='macro')
assert_array_almost_equal(np.mean([0., 1., 1., .5, 0.]), actual)
# No effect otheriwse
for average in ['micro', 'weighted', 'samples']:
if average == 'samples' and i == 0:
continue
assert_almost_equal(recall_score(y_true, y_pred,
labels=[0, 1, 2, 3, 4],
average=average),
recall_score(y_true, y_pred, labels=None,
average=average))
# Error when introducing invalid label in multilabel case
# (although it would only affect performance if average='macro'/None)
for average in [None, 'macro', 'micro', 'samples']:
with pytest.raises(ValueError):
recall_score(y_true_bin, y_pred_bin, labels=np.arange(6),
average=average)
with pytest.raises(ValueError):
recall_score(y_true_bin, y_pred_bin, labels=np.arange(-1, 4),
average=average)
# tests non-regression on issue #10307
y_true = np.array([[0, 1, 1], [1, 0, 0]])
y_pred = np.array([[1, 1, 1], [1, 0, 1]])
p, r, f, _ = precision_recall_fscore_support(y_true, y_pred,
average='samples',
labels=[0, 1])
assert_almost_equal(np.array([p, r, f]), np.array([3 / 4, 1, 5 / 6]))
@ignore_warnings
def test_precision_recall_f_ignored_labels():
# Test a subset of labels may be requested for PRF
y_true = [1, 1, 2, 3]
y_pred = [1, 3, 3, 3]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
recall_13 = partial(recall_score, y_true, y_pred, labels=[1, 3])
recall_all = partial(recall_score, y_true, y_pred, labels=None)
assert_array_almost_equal([.5, 1.], recall_13(average=None))
assert_almost_equal((.5 + 1.) / 2, recall_13(average='macro'))
assert_almost_equal((.5 * 2 + 1. * 1) / 3,
recall_13(average='weighted'))
assert_almost_equal(2. / 3, recall_13(average='micro'))
# ensure the above were meaningful tests:
for average in ['macro', 'weighted', 'micro']:
assert (recall_13(average=average) !=
recall_all(average=average))
def test_average_precision_score_score_non_binary_class():
# Test that average_precision_score function returns an error when trying
# to compute average_precision_score for multiclass task.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
err_msg = "multiclass format is not supported"
with pytest.raises(ValueError, match=err_msg):
average_precision_score(y_true, y_pred)
def test_average_precision_score_duplicate_values():
# Duplicate values with precision-recall require a different
# processing than when computing the AUC of a ROC, because the
# precision-recall curve is a decreasing curve
# The following situation corresponds to a perfect
# test statistic, the average_precision_score should be 1
y_true = [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
y_score = [0, .1, .1, .4, .5, .6, .6, .9, .9, 1, 1]
assert average_precision_score(y_true, y_score) == 1
def test_average_precision_score_tied_values():
# Here if we go from left to right in y_true, the 0 values are
# are separated from the 1 values, so it appears that we've
# Correctly sorted our classifications. But in fact the first two
# values have the same score (0.5) and so the first two values
# could be swapped around, creating an imperfect sorting. This
# imperfection should come through in the end score, making it less
# than one.
y_true = [0, 1, 1]
y_score = [.5, .5, .6]
assert average_precision_score(y_true, y_score) != 1.
@ignore_warnings
def test_precision_recall_fscore_support_errors():
y_true, y_pred, _ = make_prediction(binary=True)
# Bad beta
with pytest.raises(ValueError):
precision_recall_fscore_support(y_true, y_pred, beta=-0.1)
# Bad pos_label
with pytest.raises(ValueError):
precision_recall_fscore_support(y_true, y_pred,
pos_label=2,
average='binary')
# Bad average option
with pytest.raises(ValueError):
precision_recall_fscore_support([0, 1, 2], [1, 2, 0],
average='mega')
def test_precision_recall_f_unused_pos_label():
# Check warning that pos_label unused when set to non-default value
# but average != 'binary'; even if data is binary.
assert_warns_message(UserWarning,
"Note that pos_label (set to 2) is "
"ignored when average != 'binary' (got 'macro'). You "
"may use labels=[pos_label] to specify a single "
"positive class.", precision_recall_fscore_support,
[1, 2, 1], [1, 2, 2], pos_label=2, average='macro')
def test_confusion_matrix_binary():
# Test confusion matrix - binary classification case
y_true, y_pred, _ = make_prediction(binary=True)
def test(y_true, y_pred):
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[22, 3], [8, 17]])
tp, fp, fn, tn = cm.flatten()
num = (tp * tn - fp * fn)
den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
true_mcc = 0 if den == 0 else num / den
mcc = matthews_corrcoef(y_true, y_pred)
assert_array_almost_equal(mcc, true_mcc, decimal=2)
assert_array_almost_equal(mcc, 0.57, decimal=2)
test(y_true, y_pred)
test([str(y) for y in y_true],
[str(y) for y in y_pred])
def test_multilabel_confusion_matrix_binary():
# Test multilabel confusion matrix - binary classification case
y_true, y_pred, _ = make_prediction(binary=True)
def test(y_true, y_pred):
cm = multilabel_confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[[17, 8], [3, 22]],
[[22, 3], [8, 17]]])
test(y_true, y_pred)
test([str(y) for y in y_true],
[str(y) for y in y_pred])
def test_multilabel_confusion_matrix_multiclass():
# Test multilabel confusion matrix - multi-class case
y_true, y_pred, _ = make_prediction(binary=False)
def test(y_true, y_pred, string_type=False):
# compute confusion matrix with default labels introspection
cm = multilabel_confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[[47, 4], [5, 19]],
[[38, 6], [28, 3]],
[[30, 25], [2, 18]]])
# compute confusion matrix with explicit label ordering
labels = ['0', '2', '1'] if string_type else [0, 2, 1]
cm = multilabel_confusion_matrix(y_true, y_pred, labels=labels)
assert_array_equal(cm, [[[47, 4], [5, 19]],
[[30, 25], [2, 18]],
[[38, 6], [28, 3]]])
# compute confusion matrix with super set of present labels
labels = ['0', '2', '1', '3'] if string_type else [0, 2, 1, 3]
cm = multilabel_confusion_matrix(y_true, y_pred, labels=labels)
assert_array_equal(cm, [[[47, 4], [5, 19]],
[[30, 25], [2, 18]],
[[38, 6], [28, 3]],
[[75, 0], [0, 0]]])
test(y_true, y_pred)
test(list(str(y) for y in y_true),
list(str(y) for y in y_pred),
string_type=True)
def test_multilabel_confusion_matrix_multilabel():
# Test multilabel confusion matrix - multilabel-indicator case
from scipy.sparse import csc_matrix, csr_matrix
y_true = np.array([[1, 0, 1], [0, 1, 0], [1, 1, 0]])
y_pred = np.array([[1, 0, 0], [0, 1, 1], [0, 0, 1]])
y_true_csr = csr_matrix(y_true)
y_pred_csr = csr_matrix(y_pred)
y_true_csc = csc_matrix(y_true)
y_pred_csc = csc_matrix(y_pred)
# cross test different types
sample_weight = np.array([2, 1, 3])
real_cm = [[[1, 0], [1, 1]],
[[1, 0], [1, 1]],
[[0, 2], [1, 0]]]
trues = [y_true, y_true_csr, y_true_csc]
preds = [y_pred, y_pred_csr, y_pred_csc]
for y_true_tmp in trues:
for y_pred_tmp in preds:
cm = multilabel_confusion_matrix(y_true_tmp, y_pred_tmp)
assert_array_equal(cm, real_cm)
# test support for samplewise
cm = multilabel_confusion_matrix(y_true, y_pred, samplewise=True)
assert_array_equal(cm, [[[1, 0], [1, 1]],
[[1, 1], [0, 1]],
[[0, 1], [2, 0]]])
# test support for labels
cm = multilabel_confusion_matrix(y_true, y_pred, labels=[2, 0])
assert_array_equal(cm, [[[0, 2], [1, 0]],
[[1, 0], [1, 1]]])
# test support for labels with samplewise
cm = multilabel_confusion_matrix(y_true, y_pred, labels=[2, 0],
samplewise=True)
assert_array_equal(cm, [[[0, 0], [1, 1]],
[[1, 1], [0, 0]],
[[0, 1], [1, 0]]])
# test support for sample_weight with sample_wise
cm = multilabel_confusion_matrix(y_true, y_pred,
sample_weight=sample_weight,
samplewise=True)
assert_array_equal(cm, [[[2, 0], [2, 2]],
[[1, 1], [0, 1]],
[[0, 3], [6, 0]]])
def test_multilabel_confusion_matrix_errors():
y_true = np.array([[1, 0, 1], [0, 1, 0], [1, 1, 0]])
y_pred = np.array([[1, 0, 0], [0, 1, 1], [0, 0, 1]])
# Bad sample_weight
with pytest.raises(ValueError, match="inconsistent numbers of samples"):
multilabel_confusion_matrix(y_true, y_pred, sample_weight=[1, 2])
with pytest.raises(ValueError, match="bad input shape"):
multilabel_confusion_matrix(y_true, y_pred,
sample_weight=[[1, 2, 3],
[2, 3, 4],
[3, 4, 5]])
# Bad labels
err_msg = r"All labels must be in \[0, n labels\)"
with pytest.raises(ValueError, match=err_msg):
multilabel_confusion_matrix(y_true, y_pred, labels=[-1])
err_msg = r"All labels must be in \[0, n labels\)"
with pytest.raises(ValueError, match=err_msg):
multilabel_confusion_matrix(y_true, y_pred, labels=[3])
# Using samplewise outside multilabel
with pytest.raises(ValueError, match="Samplewise metrics"):
multilabel_confusion_matrix([0, 1, 2], [1, 2, 0], samplewise=True)
# Bad y_type
err_msg = "multiclass-multioutput is not supported"
with pytest.raises(ValueError, match=err_msg):
multilabel_confusion_matrix([[0, 1, 2], [2, 1, 0]],
[[1, 2, 0], [1, 0, 2]])
@pytest.mark.parametrize(
"normalize, cm_dtype, expected_results",
[('true', 'f', 0.333333333),
('pred', 'f', 0.333333333),
('all', 'f', 0.1111111111),
(None, 'i', 2)]
)
def test_confusion_matrix_normalize(normalize, cm_dtype, expected_results):
y_test = [0, 1, 2] * 6
y_pred = list(chain(*permutations([0, 1, 2])))
cm = confusion_matrix(y_test, y_pred, normalize=normalize)
assert_allclose(cm, expected_results)
assert cm.dtype.kind == cm_dtype
def test_confusion_matrix_normalize_wrong_option():
y_test = [0, 0, 0, 0, 1, 1, 1, 1]
y_pred = [0, 0, 0, 0, 0, 0, 0, 0]
with pytest.raises(ValueError, match='normalize must be one of'):
confusion_matrix(y_test, y_pred, normalize=True)
def test_confusion_matrix_normalize_single_class():
y_test = [0, 0, 0, 0, 1, 1, 1, 1]
y_pred = [0, 0, 0, 0, 0, 0, 0, 0]
cm_true = confusion_matrix(y_test, y_pred, normalize='true')
assert cm_true.sum() == pytest.approx(2.0)
# additionally check that no warnings are raised due to a division by zero
with pytest.warns(None) as rec:
cm_pred = confusion_matrix(y_test, y_pred, normalize='pred')
assert not rec
assert cm_pred.sum() == pytest.approx(1.0)
with pytest.warns(None) as rec:
cm_pred = confusion_matrix(y_pred, y_test, normalize='true')
assert not rec
def test_cohen_kappa():
# These label vectors reproduce the contingency matrix from Artstein and
# Poesio (2008), Table 1: np.array([[20, 20], [10, 50]]).
y1 = np.array([0] * 40 + [1] * 60)
y2 = np.array([0] * 20 + [1] * 20 + [0] * 10 + [1] * 50)
kappa = cohen_kappa_score(y1, y2)
assert_almost_equal(kappa, .348, decimal=3)
assert kappa == cohen_kappa_score(y2, y1)
# Add spurious labels and ignore them.
y1 = np.append(y1, [2] * 4)
y2 = np.append(y2, [2] * 4)
assert cohen_kappa_score(y1, y2, labels=[0, 1]) == kappa
assert_almost_equal(cohen_kappa_score(y1, y1), 1.)
# Multiclass example: Artstein and Poesio, Table 4.
y1 = np.array([0] * 46 + [1] * 44 + [2] * 10)
y2 = np.array([0] * 52 + [1] * 32 + [2] * 16)
assert_almost_equal(cohen_kappa_score(y1, y2), .8013, decimal=4)
# Weighting example: none, linear, quadratic.
y1 = np.array([0] * 46 + [1] * 44 + [2] * 10)
y2 = np.array([0] * 50 + [1] * 40 + [2] * 10)
assert_almost_equal(cohen_kappa_score(y1, y2), .9315, decimal=4)
assert_almost_equal(cohen_kappa_score(y1, y2,
weights="linear"), 0.9412, decimal=4)
assert_almost_equal(cohen_kappa_score(y1, y2,
weights="quadratic"), 0.9541, decimal=4)
@ignore_warnings
def test_matthews_corrcoef_nan():
assert matthews_corrcoef([0], [1]) == 0.0
assert matthews_corrcoef([0, 0], [0, 1]) == 0.0
def test_matthews_corrcoef_against_numpy_corrcoef():
rng = np.random.RandomState(0)
y_true = rng.randint(0, 2, size=20)
y_pred = rng.randint(0, 2, size=20)
assert_almost_equal(matthews_corrcoef(y_true, y_pred),
np.corrcoef(y_true, y_pred)[0, 1], 10)
def test_matthews_corrcoef_against_jurman():
# Check that the multiclass matthews_corrcoef agrees with the definition
# presented in Jurman, Riccadonna, Furlanello, (2012). A Comparison of MCC
# and CEN Error Measures in MultiClass Prediction
rng = np.random.RandomState(0)
y_true = rng.randint(0, 2, size=20)
y_pred = rng.randint(0, 2, size=20)
sample_weight = rng.rand(20)
C = confusion_matrix(y_true, y_pred, sample_weight=sample_weight)
N = len(C)
cov_ytyp = sum([
C[k, k] * C[m, l] - C[l, k] * C[k, m]
for k in range(N) for m in range(N) for l in range(N)
])
cov_ytyt = sum([
C[:, k].sum() *
np.sum([C[g, f] for f in range(N) for g in range(N) if f != k])
for k in range(N)
])
cov_ypyp = np.sum([
C[k, :].sum() *
np.sum([C[f, g] for f in range(N) for g in range(N) if f != k])
for k in range(N)
])
mcc_jurman = cov_ytyp / np.sqrt(cov_ytyt * cov_ypyp)
mcc_ours = matthews_corrcoef(y_true, y_pred, sample_weight)
assert_almost_equal(mcc_ours, mcc_jurman, 10)
def test_matthews_corrcoef():
rng = np.random.RandomState(0)
y_true = ["a" if i == 0 else "b" for i in rng.randint(0, 2, size=20)]
# corrcoef of same vectors must be 1
assert_almost_equal(matthews_corrcoef(y_true, y_true), 1.0)
# corrcoef, when the two vectors are opposites of each other, should be -1
y_true_inv = ["b" if i == "a" else "a" for i in y_true]
assert_almost_equal(matthews_corrcoef(y_true, y_true_inv), -1)
y_true_inv2 = label_binarize(y_true, ["a", "b"])
y_true_inv2 = np.where(y_true_inv2, 'a', 'b')
assert_almost_equal(matthews_corrcoef(y_true, y_true_inv2), -1)
# For the zero vector case, the corrcoef cannot be calculated and should
# result in a RuntimeWarning
mcc = assert_warns_div0(matthews_corrcoef, [0, 0, 0, 0], [0, 0, 0, 0])
# But will output 0
assert_almost_equal(mcc, 0.)
# And also for any other vector with 0 variance
mcc = assert_warns_div0(matthews_corrcoef, y_true, ['a'] * len(y_true))
# But will output 0
assert_almost_equal(mcc, 0.)
# These two vectors have 0 correlation and hence mcc should be 0
y_1 = [1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1]
y_2 = [1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1]
assert_almost_equal(matthews_corrcoef(y_1, y_2), 0.)
# Check that sample weight is able to selectively exclude
mask = [1] * 10 + [0] * 10
# Now the first half of the vector elements are alone given a weight of 1
# and hence the mcc will not be a perfect 0 as in the previous case
with pytest.raises(AssertionError):
assert_almost_equal(matthews_corrcoef(y_1, y_2,
sample_weight=mask), 0.)
def test_matthews_corrcoef_multiclass():
rng = np.random.RandomState(0)
ord_a = ord('a')
n_classes = 4
y_true = [chr(ord_a + i) for i in rng.randint(0, n_classes, size=20)]
# corrcoef of same vectors must be 1
assert_almost_equal(matthews_corrcoef(y_true, y_true), 1.0)
# with multiclass > 2 it is not possible to achieve -1
y_true = [0, 0, 1, 1, 2, 2]
y_pred_bad = [2, 2, 0, 0, 1, 1]
assert_almost_equal(matthews_corrcoef(y_true, y_pred_bad), -.5)
# Maximizing false positives and negatives minimizes the MCC
# The minimum will be different for depending on the input
y_true = [0, 0, 1, 1, 2, 2]
y_pred_min = [1, 1, 0, 0, 0, 0]
assert_almost_equal(matthews_corrcoef(y_true, y_pred_min),
-12 / np.sqrt(24 * 16))
# Zero variance will result in an mcc of zero and a Runtime Warning
y_true = [0, 1, 2]
y_pred = [3, 3, 3]
mcc = assert_warns_message(RuntimeWarning, 'invalid value encountered',
matthews_corrcoef, y_true, y_pred)
assert_almost_equal(mcc, 0.0)
# These two vectors have 0 correlation and hence mcc should be 0
y_1 = [0, 1, 2, 0, 1, 2, 0, 1, 2]
y_2 = [1, 1, 1, 2, 2, 2, 0, 0, 0]
assert_almost_equal(matthews_corrcoef(y_1, y_2), 0.)
# We can test that binary assumptions hold using the multiclass computation
# by masking the weight of samples not in the first two classes
# Masking the last label should let us get an MCC of -1
y_true = [0, 0, 1, 1, 2]
y_pred = [1, 1, 0, 0, 2]
sample_weight = [1, 1, 1, 1, 0]
assert_almost_equal(matthews_corrcoef(y_true, y_pred, sample_weight), -1)
# For the zero vector case, the corrcoef cannot be calculated and should
# result in a RuntimeWarning
y_true = [0, 0, 1, 2]
y_pred = [0, 0, 1, 2]
sample_weight = [1, 1, 0, 0]
mcc = assert_warns_message(RuntimeWarning, 'invalid value encountered',
matthews_corrcoef, y_true, y_pred,
sample_weight)
# But will output 0
assert_almost_equal(mcc, 0.)
@pytest.mark.parametrize('n_points', [100, 10000])
def test_matthews_corrcoef_overflow(n_points):
# https://github.com/scikit-learn/scikit-learn/issues/9622
rng = np.random.RandomState(20170906)
def mcc_safe(y_true, y_pred):
conf_matrix = confusion_matrix(y_true, y_pred)
true_pos = conf_matrix[1, 1]
false_pos = conf_matrix[1, 0]
false_neg = conf_matrix[0, 1]
n_points = len(y_true)
pos_rate = (true_pos + false_neg) / n_points
activity = (true_pos + false_pos) / n_points
mcc_numerator = true_pos / n_points - pos_rate * activity
mcc_denominator = activity * pos_rate * (1 - activity) * (1 - pos_rate)
return mcc_numerator / np.sqrt(mcc_denominator)
def random_ys(n_points): # binary
x_true = rng.random_sample(n_points)
x_pred = x_true + 0.2 * (rng.random_sample(n_points) - 0.5)
y_true = (x_true > 0.5)
y_pred = (x_pred > 0.5)
return y_true, y_pred
arr = np.repeat([0., 1.], n_points) # binary
assert_almost_equal(matthews_corrcoef(arr, arr), 1.0)
arr = np.repeat([0., 1., 2.], n_points) # multiclass
assert_almost_equal(matthews_corrcoef(arr, arr), 1.0)
y_true, y_pred = random_ys(n_points)
assert_almost_equal(matthews_corrcoef(y_true, y_true), 1.0)
assert_almost_equal(matthews_corrcoef(y_true, y_pred),
mcc_safe(y_true, y_pred))
def test_precision_recall_f1_score_multiclass():
# Test Precision Recall and F1 Score for multiclass classification task
y_true, y_pred, _ = make_prediction(binary=False)
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.83, 0.33, 0.42], 2)
assert_array_almost_equal(r, [0.79, 0.09, 0.90], 2)
assert_array_almost_equal(f, [0.81, 0.15, 0.57], 2)
assert_array_equal(s, [24, 31, 20])
# averaging tests
ps = precision_score(y_true, y_pred, pos_label=1, average='micro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='micro')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='micro')
assert_array_almost_equal(fs, 0.53, 2)
ps = precision_score(y_true, y_pred, average='macro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='macro')
assert_array_almost_equal(rs, 0.60, 2)
fs = f1_score(y_true, y_pred, average='macro')
assert_array_almost_equal(fs, 0.51, 2)
ps = precision_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(ps, 0.51, 2)
rs = recall_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(fs, 0.47, 2)
with pytest.raises(ValueError):
precision_score(y_true, y_pred, average="samples")
with pytest.raises(ValueError):
recall_score(y_true, y_pred, average="samples")
with pytest.raises(ValueError):
f1_score(y_true, y_pred, average="samples")
with pytest.raises(ValueError):
fbeta_score(y_true, y_pred, average="samples", beta=0.5)
# same prediction but with and explicit label ordering
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[0, 2, 1], average=None)
assert_array_almost_equal(p, [0.83, 0.41, 0.33], 2)
assert_array_almost_equal(r, [0.79, 0.90, 0.10], 2)
assert_array_almost_equal(f, [0.81, 0.57, 0.15], 2)
assert_array_equal(s, [24, 20, 31])
@pytest.mark.parametrize('average',
['samples', 'micro', 'macro', 'weighted', None])
def test_precision_refcall_f1_score_multilabel_unordered_labels(average):
# test that labels need not be sorted in the multilabel case
y_true = np.array([[1, 1, 0, 0]])
y_pred = np.array([[0, 0, 1, 1]])
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[3, 0, 1, 2], warn_for=[], average=average)
assert_array_equal(p, 0)
assert_array_equal(r, 0)
assert_array_equal(f, 0)
if average is None:
assert_array_equal(s, [0, 1, 1, 0])
def test_precision_recall_f1_score_binary_averaged():
y_true = np.array([0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1])
y_pred = np.array([1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1])
# compute scores with default labels introspection
ps, rs, fs, _ = precision_recall_fscore_support(y_true, y_pred,
average=None)
p, r, f, _ = precision_recall_fscore_support(y_true, y_pred,
average='macro')
assert p == np.mean(ps)
assert r == np.mean(rs)
assert f == np.mean(fs)
p, r, f, _ = precision_recall_fscore_support(y_true, y_pred,
average='weighted')
support = np.bincount(y_true)
assert p == np.average(ps, weights=support)
assert r == np.average(rs, weights=support)
assert f == np.average(fs, weights=support)
def test_zero_precision_recall():
# Check that pathological cases do not bring NaNs
old_error_settings = np.seterr(all='raise')
try:
y_true = np.array([0, 1, 2, 0, 1, 2])
y_pred = np.array([2, 0, 1, 1, 2, 0])
assert_almost_equal(precision_score(y_true, y_pred,
average='macro'), 0.0, 2)
assert_almost_equal(recall_score(y_true, y_pred, average='macro'),
0.0, 2)
assert_almost_equal(f1_score(y_true, y_pred, average='macro'),
0.0, 2)
finally:
np.seterr(**old_error_settings)
def test_confusion_matrix_multiclass_subset_labels():
# Test confusion matrix - multi-class case with subset of labels
y_true, y_pred, _ = make_prediction(binary=False)
# compute confusion matrix with only first two labels considered
cm = confusion_matrix(y_true, y_pred, labels=[0, 1])
assert_array_equal(cm, [[19, 4],
[4, 3]])
# compute confusion matrix with explicit label ordering for only subset
# of labels
cm = confusion_matrix(y_true, y_pred, labels=[2, 1])
assert_array_equal(cm, [[18, 2],
[24, 3]])
# a label not in y_true should result in zeros for that row/column
extra_label = np.max(y_true) + 1
cm = confusion_matrix(y_true, y_pred, labels=[2, extra_label])
assert_array_equal(cm, [[18, 0],
[0, 0]])
# check for exception when none of the specified labels are in y_true
with pytest.raises(ValueError):
confusion_matrix(y_true, y_pred,
labels=[extra_label, extra_label + 1])
def test_confusion_matrix_dtype():
y = [0, 1, 1]
weight = np.ones(len(y))
# confusion_matrix returns int64 by default
cm = confusion_matrix(y, y)
assert cm.dtype == np.int64
# The dtype of confusion_matrix is always 64 bit
for dtype in [np.bool_, np.int32, np.uint64]:
cm = confusion_matrix(y, y,
sample_weight=weight.astype(dtype, copy=False))
assert cm.dtype == np.int64
for dtype in [np.float32, np.float64, None, object]:
cm = confusion_matrix(y, y,
sample_weight=weight.astype(dtype, copy=False))
assert cm.dtype == np.float64
# np.iinfo(np.uint32).max should be accumulated correctly
weight = np.full(len(y), 4294967295, dtype=np.uint32)
cm = confusion_matrix(y, y, sample_weight=weight)
assert cm[0, 0] == 4294967295
assert cm[1, 1] == 8589934590
# np.iinfo(np.int64).max should cause an overflow
weight = np.full(len(y), 9223372036854775807, dtype=np.int64)
cm = confusion_matrix(y, y, sample_weight=weight)
assert cm[0, 0] == 9223372036854775807
assert cm[1, 1] == -2
def test_classification_report_multiclass():
# Test performance report
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.83 0.79 0.81 24
versicolor 0.33 0.10 0.15 31
virginica 0.42 0.90 0.57 20
accuracy 0.53 75
macro avg 0.53 0.60 0.51 75
weighted avg 0.51 0.53 0.47 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names)
assert report == expected_report
def test_classification_report_multiclass_balanced():
y_true, y_pred = [0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]
expected_report = """\
precision recall f1-score support
0 0.33 0.33 0.33 3
1 0.33 0.33 0.33 3
2 0.33 0.33 0.33 3
accuracy 0.33 9
macro avg 0.33 0.33 0.33 9
weighted avg 0.33 0.33 0.33 9
"""
report = classification_report(y_true, y_pred)
assert report == expected_report
def test_classification_report_multiclass_with_label_detection():
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
accuracy 0.53 75
macro avg 0.53 0.60 0.51 75
weighted avg 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert report == expected_report
def test_classification_report_multiclass_with_digits():
# Test performance report with added digits in floating point values
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.82609 0.79167 0.80851 24
versicolor 0.33333 0.09677 0.15000 31
virginica 0.41860 0.90000 0.57143 20
accuracy 0.53333 75
macro avg 0.52601 0.59615 0.50998 75
weighted avg 0.51375 0.53333 0.47310 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names, digits=5)
assert report == expected_report
def test_classification_report_multiclass_with_string_label():
y_true, y_pred, _ = make_prediction(binary=False)
y_true = np.array(["blue", "green", "red"])[y_true]
y_pred = np.array(["blue", "green", "red"])[y_pred]
expected_report = """\
precision recall f1-score support
blue 0.83 0.79 0.81 24
green 0.33 0.10 0.15 31
red 0.42 0.90 0.57 20
accuracy 0.53 75
macro avg 0.53 0.60 0.51 75
weighted avg 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert report == expected_report
expected_report = """\
precision recall f1-score support
a 0.83 0.79 0.81 24
b 0.33 0.10 0.15 31
c 0.42 0.90 0.57 20
accuracy 0.53 75
macro avg 0.53 0.60 0.51 75
weighted avg 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred,
target_names=["a", "b", "c"])
assert report == expected_report
def test_classification_report_multiclass_with_unicode_label():
y_true, y_pred, _ = make_prediction(binary=False)
labels = np.array(["blue\xa2", "green\xa2", "red\xa2"])
y_true = labels[y_true]
y_pred = labels[y_pred]
expected_report = """\
precision recall f1-score support
blue\xa2 0.83 0.79 0.81 24
green\xa2 0.33 0.10 0.15 31
red\xa2 0.42 0.90 0.57 20
accuracy 0.53 75
macro avg 0.53 0.60 0.51 75
weighted avg 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert report == expected_report
def test_classification_report_multiclass_with_long_string_label():
y_true, y_pred, _ = make_prediction(binary=False)
labels = np.array(["blue", "green" * 5, "red"])
y_true = labels[y_true]
y_pred = labels[y_pred]
expected_report = """\
precision recall f1-score support
blue 0.83 0.79 0.81 24
greengreengreengreengreen 0.33 0.10 0.15 31
red 0.42 0.90 0.57 20
accuracy 0.53 75
macro avg 0.53 0.60 0.51 75
weighted avg 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert report == expected_report
def test_classification_report_labels_target_names_unequal_length():
y_true = [0, 0, 2, 0, 0]
y_pred = [0, 2, 2, 0, 0]
target_names = ['class 0', 'class 1', 'class 2']
assert_warns_message(UserWarning,
"labels size, 2, does not "
"match size of target_names, 3",
classification_report,
y_true, y_pred, labels=[0, 2],
target_names=target_names)
def test_classification_report_no_labels_target_names_unequal_length():
y_true = [0, 0, 2, 0, 0]
y_pred = [0, 2, 2, 0, 0]
target_names = ['class 0', 'class 1', 'class 2']
err_msg = ("Number of classes, 2, does not "
"match size of target_names, 3. "
"Try specifying the labels parameter")
with pytest.raises(ValueError, match=err_msg):
classification_report(y_true, y_pred, target_names=target_names)
@ignore_warnings
def test_multilabel_classification_report():
n_classes = 4
n_samples = 50
_, y_true = make_multilabel_classification(n_features=1,
n_samples=n_samples,
n_classes=n_classes,
random_state=0)
_, y_pred = make_multilabel_classification(n_features=1,
n_samples=n_samples,
n_classes=n_classes,
random_state=1)
expected_report = """\
precision recall f1-score support
0 0.50 0.67 0.57 24
1 0.51 0.74 0.61 27
2 0.29 0.08 0.12 26
3 0.52 0.56 0.54 27
micro avg 0.50 0.51 0.50 104
macro avg 0.45 0.51 0.46 104
weighted avg 0.45 0.51 0.46 104
samples avg 0.46 0.42 0.40 104
"""
report = classification_report(y_true, y_pred)
assert report == expected_report
def test_multilabel_zero_one_loss_subset():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert zero_one_loss(y1, y2) == 0.5
assert zero_one_loss(y1, y1) == 0
assert zero_one_loss(y2, y2) == 0
assert zero_one_loss(y2, np.logical_not(y2)) == 1
assert zero_one_loss(y1, np.logical_not(y1)) == 1
assert zero_one_loss(y1, np.zeros(y1.shape)) == 1
assert zero_one_loss(y2, np.zeros(y1.shape)) == 1
def test_multilabel_hamming_loss():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
w = np.array([1, 3])
assert hamming_loss(y1, y2) == 1 / 6
assert hamming_loss(y1, y1) == 0
assert hamming_loss(y2, y2) == 0
assert hamming_loss(y2, 1 - y2) == 1
assert hamming_loss(y1, 1 - y1) == 1
assert hamming_loss(y1, np.zeros(y1.shape)) == 4 / 6
assert hamming_loss(y2, np.zeros(y1.shape)) == 0.5
assert hamming_loss(y1, y2, sample_weight=w) == 1. / 12
assert hamming_loss(y1, 1-y2, sample_weight=w) == 11. / 12
assert hamming_loss(y1, np.zeros_like(y1), sample_weight=w) == 2. / 3
# sp_hamming only works with 1-D arrays
assert hamming_loss(y1[0], y2[0]) == sp_hamming(y1[0], y2[0])
assert_warns_message(FutureWarning,
"The labels parameter is unused. It was"
" deprecated in version 0.21 and"
" will be removed in version 0.23",
hamming_loss, y1, y2, labels=[0, 1])
def test_jaccard_score_validation():
y_true = np.array([0, 1, 0, 1, 1])
y_pred = np.array([0, 1, 0, 1, 1])
err_msg = r"pos_label=2 is not a valid label: array\(\[0, 1\]\)"
with pytest.raises(ValueError, match=err_msg):
jaccard_score(y_true, y_pred, average='binary', pos_label=2)
y_true = np.array([[0, 1, 1], [1, 0, 0]])
y_pred = np.array([[1, 1, 1], [1, 0, 1]])
msg1 = (r"Target is multilabel-indicator but average='binary'. "
r"Please choose another average setting, one of \[None, "
r"'micro', 'macro', 'weighted', 'samples'\].")
with pytest.raises(ValueError, match=msg1):
jaccard_score(y_true, y_pred, average='binary', pos_label=-1)
y_true = np.array([0, 1, 1, 0, 2])
y_pred = np.array([1, 1, 1, 1, 0])
msg2 = (r"Target is multiclass but average='binary'. Please choose "
r"another average setting, one of \[None, 'micro', 'macro', "
r"'weighted'\].")
with pytest.raises(ValueError, match=msg2):
jaccard_score(y_true, y_pred, average='binary')
msg3 = ("Samplewise metrics are not available outside of multilabel "
"classification.")
with pytest.raises(ValueError, match=msg3):
jaccard_score(y_true, y_pred, average='samples')
assert_warns_message(UserWarning,
"Note that pos_label (set to 3) is ignored when "
"average != 'binary' (got 'micro'). You may use "
"labels=[pos_label] to specify a single positive "
"class.", jaccard_score, y_true, y_pred,
average='micro', pos_label=3)
def test_multilabel_jaccard_score(recwarn):
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
# size(y1 \inter y2) = [1, 2]
# size(y1 \union y2) = [2, 2]
assert jaccard_score(y1, y2, average='samples') == 0.75
assert jaccard_score(y1, y1, average='samples') == 1
assert jaccard_score(y2, y2, average='samples') == 1
assert jaccard_score(y2, np.logical_not(y2), average='samples') == 0
assert jaccard_score(y1, np.logical_not(y1), average='samples') == 0
assert jaccard_score(y1, np.zeros(y1.shape), average='samples') == 0
assert jaccard_score(y2, np.zeros(y1.shape), average='samples') == 0
y_true = np.array([[0, 1, 1], [1, 0, 0]])
y_pred = np.array([[1, 1, 1], [1, 0, 1]])
# average='macro'
assert_almost_equal(jaccard_score(y_true, y_pred,
average='macro'), 2. / 3)
# average='micro'
assert_almost_equal(jaccard_score(y_true, y_pred,
average='micro'), 3. / 5)
# average='samples'
assert_almost_equal(jaccard_score(y_true, y_pred, average='samples'),
7. / 12)
assert_almost_equal(jaccard_score(y_true, y_pred,
average='samples',
labels=[0, 2]), 1. / 2)
assert_almost_equal(jaccard_score(y_true, y_pred,
average='samples',
labels=[1, 2]), 1. / 2)
# average=None
assert_array_equal(jaccard_score(y_true, y_pred, average=None),
np.array([1. / 2, 1., 1. / 2]))
y_true = np.array([[0, 1, 1], [1, 0, 1]])
y_pred = np.array([[1, 1, 1], [1, 0, 1]])
assert_almost_equal(jaccard_score(y_true, y_pred,
average='macro'), 5. / 6)
# average='weighted'
assert_almost_equal(jaccard_score(y_true, y_pred,
average='weighted'), 7. / 8)
msg2 = 'Got 4 > 2'
with pytest.raises(ValueError, match=msg2):
jaccard_score(y_true, y_pred, labels=[4], average='macro')
msg3 = 'Got -1 < 0'
with pytest.raises(ValueError, match=msg3):
jaccard_score(y_true, y_pred, labels=[-1], average='macro')
msg = ('Jaccard is ill-defined and being set to 0.0 in labels '
'with no true or predicted samples.')
assert assert_warns_message(UndefinedMetricWarning, msg,
jaccard_score,
np.array([[0, 1]]),
np.array([[0, 1]]),
average='macro') == 0.5
msg = ('Jaccard is ill-defined and being set to 0.0 in samples '
'with no true or predicted labels.')
assert assert_warns_message(UndefinedMetricWarning, msg,
jaccard_score,
np.array([[0, 0], [1, 1]]),
np.array([[0, 0], [1, 1]]),
average='samples') == 0.5
assert not list(recwarn)
def test_multiclass_jaccard_score(recwarn):
y_true = ['ant', 'ant', 'cat', 'cat', 'ant', 'cat', 'bird', 'bird']
y_pred = ['cat', 'ant', 'cat', 'cat', 'ant', 'bird', 'bird', 'cat']
labels = ['ant', 'bird', 'cat']
lb = LabelBinarizer()
lb.fit(labels)
y_true_bin = lb.transform(y_true)
y_pred_bin = lb.transform(y_pred)
multi_jaccard_score = partial(jaccard_score, y_true,
y_pred)
bin_jaccard_score = partial(jaccard_score,
y_true_bin, y_pred_bin)
multi_labels_list = [['ant', 'bird'], ['ant', 'cat'], ['cat', 'bird'],
['ant'], ['bird'], ['cat'], None]
bin_labels_list = [[0, 1], [0, 2], [2, 1], [0], [1], [2], None]
# other than average='samples'/'none-samples', test everything else here
for average in ('macro', 'weighted', 'micro', None):
for m_label, b_label in zip(multi_labels_list, bin_labels_list):
assert_almost_equal(multi_jaccard_score(average=average,
labels=m_label),
bin_jaccard_score(average=average,
labels=b_label))
y_true = np.array([[0, 0], [0, 0], [0, 0]])
y_pred = np.array([[0, 0], [0, 0], [0, 0]])
with ignore_warnings():
assert (jaccard_score(y_true, y_pred, average='weighted')
== 0)
assert not list(recwarn)
def test_average_binary_jaccard_score(recwarn):
# tp=0, fp=0, fn=1, tn=0
assert jaccard_score([1], [0], average='binary') == 0.
# tp=0, fp=0, fn=0, tn=1
msg = ('Jaccard is ill-defined and being set to 0.0 due to '
'no true or predicted samples')
assert assert_warns_message(UndefinedMetricWarning,
msg,
jaccard_score,
[0, 0], [0, 0],
average='binary') == 0.
# tp=1, fp=0, fn=0, tn=0 (pos_label=0)
assert jaccard_score([0], [0], pos_label=0,
average='binary') == 1.
y_true = np.array([1, 0, 1, 1, 0])
y_pred = np.array([1, 0, 1, 1, 1])
assert_almost_equal(jaccard_score(y_true, y_pred,
average='binary'), 3. / 4)
assert_almost_equal(jaccard_score(y_true, y_pred,
average='binary',
pos_label=0), 1. / 2)
assert not list(recwarn)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_1():
# Test precision_recall_f1_score on a crafted multilabel example
# First crafted example
y_true = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 1]])
y_pred = np.array([[0, 1, 0, 0], [0, 1, 0, 0], [1, 0, 1, 0]])
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
# tp = [0, 1, 1, 0]
# fn = [1, 0, 0, 1]
# fp = [1, 1, 0, 0]
# Check per class
assert_array_almost_equal(p, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 1, 1, 1], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.83, 1, 0], 2)
# Check macro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert s is None
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="macro"),
np.mean(f2))
# Check micro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert s is None
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
# Check weighted
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert s is None
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
# Check samples
# |h(x_i) inter y_i | = [0, 1, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert s is None
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="samples"),
0.5)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_2():
# Test precision_recall_f1_score on a crafted multilabel example 2
# Second crafted example
y_true = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 1, 1, 0]])
y_pred = np.array([[0, 0, 0, 1], [0, 0, 0, 1], [1, 1, 0, 0]])
# tp = [ 0. 1. 0. 0.]
# fp = [ 1. 0. 0. 2.]
# fn = [ 1. 1. 1. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 0.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 0.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 0.66, 0.0, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 0, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.25)
assert_almost_equal(f, 2 * 0.25 * 0.25 / 0.5)
assert s is None
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.125)
assert_almost_equal(f, 2 / 12)
assert s is None
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 2 / 4)
assert_almost_equal(r, 1 / 4)
assert_almost_equal(f, 2 / 3 * 2 / 4)
assert s is None
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# Check samples
# |h(x_i) inter y_i | = [0, 0, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
assert_almost_equal(p, 1 / 6)
assert_almost_equal(r, 1 / 6)
assert_almost_equal(f, 2 / 4 * 1 / 3)
assert s is None
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.1666, 2)
@ignore_warnings
@pytest.mark.parametrize('zero_division', ["warn", 0, 1])
def test_precision_recall_f1_score_with_an_empty_prediction(zero_division):
y_true = np.array([[0, 1, 0, 0], [1, 0, 0, 0], [0, 1, 1, 0]])
y_pred = np.array([[0, 0, 0, 0], [0, 0, 0, 1], [0, 1, 1, 0]])
# true_pos = [ 0. 1. 1. 0.]
# false_pos = [ 0. 0. 0. 1.]
# false_neg = [ 1. 1. 0. 0.]
zero_division = 1.0 if zero_division == 1.0 else 0.0
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None,
zero_division=zero_division)
assert_array_almost_equal(p, [zero_division, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 1.0, zero_division], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None,
zero_division=zero_division)
support = s
assert_array_almost_equal(f2, [0, 0.55, 1, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro",
zero_division=zero_division)
assert_almost_equal(p, (2 + zero_division) / 4)
assert_almost_equal(r, (1.5 + zero_division) / 4)
assert_almost_equal(f, 2.5 / (4 * 1.5))
assert s is None
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro",
zero_division=zero_division)
assert_almost_equal(p, 2 / 3)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2 / 3 / (2 / 3 + 0.5))
assert s is None
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro",
zero_division=zero_division),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted",
zero_division=zero_division)
assert_almost_equal(p, 3 / 4 if zero_division == 0 else 1.0)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, (2 / 1.5 + 1) / 4)
assert s is None
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted",
zero_division=zero_division),
np.average(f2, weights=support),
)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# |h(x_i) inter y_i | = [0, 0, 2]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [0, 1, 2]
assert_almost_equal(p, 1 / 3)
assert_almost_equal(r, 1 / 3)
assert_almost_equal(f, 1 / 3)
assert s is None
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples",
zero_division=zero_division),
0.333, 2)
@pytest.mark.parametrize('beta', [1])
@pytest.mark.parametrize('average', ["macro", "micro", "weighted", "samples"])
@pytest.mark.parametrize('zero_division', [0, 1])
def test_precision_recall_f1_no_labels(beta, average, zero_division):
y_true = np.zeros((20, 3))
y_pred = np.zeros_like(y_true)
p, r, f, s = assert_no_warnings(precision_recall_fscore_support, y_true,
y_pred, average=average, beta=beta,
zero_division=zero_division)
fbeta = assert_no_warnings(fbeta_score, y_true, y_pred, beta=beta,
average=average, zero_division=zero_division)
zero_division = float(zero_division)
assert_almost_equal(p, zero_division)
assert_almost_equal(r, zero_division)
assert_almost_equal(f, zero_division)
assert s is None
assert_almost_equal(fbeta, float(zero_division))
@pytest.mark.parametrize('average', ["macro", "micro", "weighted", "samples"])
def test_precision_recall_f1_no_labels_check_warnings(average):
y_true = np.zeros((20, 3))
y_pred = np.zeros_like(y_true)
func = precision_recall_fscore_support
with pytest.warns(UndefinedMetricWarning):
p, r, f, s = func(y_true, y_pred, average=average, beta=1.0)
assert_almost_equal(p, 0)
assert_almost_equal(r, 0)
assert_almost_equal(f, 0)
assert s is None
with pytest.warns(UndefinedMetricWarning):
fbeta = fbeta_score(y_true, y_pred, average=average, beta=1.0)
assert_almost_equal(fbeta, 0)
@pytest.mark.parametrize('zero_division', [0, 1])
def test_precision_recall_f1_no_labels_average_none(zero_division):
y_true = np.zeros((20, 3))
y_pred = np.zeros_like(y_true)
# tp = [0, 0, 0]
# fn = [0, 0, 0]
# fp = [0, 0, 0]
# support = [0, 0, 0]
# |y_hat_i inter y_i | = [0, 0, 0]
# |y_i| = [0, 0, 0]
# |y_hat_i| = [0, 0, 0]
p, r, f, s = assert_no_warnings(precision_recall_fscore_support,
y_true, y_pred,
average=None, beta=1.0,
zero_division=zero_division)
fbeta = assert_no_warnings(fbeta_score, y_true, y_pred, beta=1.0,
average=None, zero_division=zero_division)
zero_division = float(zero_division)
assert_array_almost_equal(
p, [zero_division, zero_division, zero_division], 2
)
assert_array_almost_equal(
r, [zero_division, zero_division, zero_division], 2
)
assert_array_almost_equal(
f, [zero_division, zero_division, zero_division], 2
)
assert_array_almost_equal(s, [0, 0, 0], 2)
assert_array_almost_equal(
fbeta, [zero_division, zero_division, zero_division], 2
)
def test_precision_recall_f1_no_labels_average_none_warn():
y_true = np.zeros((20, 3))
y_pred = np.zeros_like(y_true)
# tp = [0, 0, 0]
# fn = [0, 0, 0]
# fp = [0, 0, 0]
# support = [0, 0, 0]
# |y_hat_i inter y_i | = [0, 0, 0]
# |y_i| = [0, 0, 0]
# |y_hat_i| = [0, 0, 0]
with pytest.warns(UndefinedMetricWarning):
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, average=None, beta=1
)
assert_array_almost_equal(p, [0, 0, 0], 2)
assert_array_almost_equal(r, [0, 0, 0], 2)
assert_array_almost_equal(f, [0, 0, 0], 2)
assert_array_almost_equal(s, [0, 0, 0], 2)
with pytest.warns(UndefinedMetricWarning):
fbeta = fbeta_score(y_true, y_pred, beta=1, average=None)
assert_array_almost_equal(fbeta, [0, 0, 0], 2)
def test_prf_warnings():
# average of per-label scores
f, w = precision_recall_fscore_support, UndefinedMetricWarning
for average in [None, 'weighted', 'macro']:
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in labels with no predicted samples.'
' Use `zero_division` parameter to control'
' this behavior.')
assert_warns_message(w, msg, f, [0, 1, 2], [1, 1, 2], average=average)
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in labels with no true samples.'
' Use `zero_division` parameter to control'
' this behavior.')
assert_warns_message(w, msg, f, [1, 1, 2], [0, 1, 2], average=average)
# average of per-sample scores
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in samples with no predicted labels.'
' Use `zero_division` parameter to control'
' this behavior.')
assert_warns_message(w, msg, f, np.array([[1, 0], [1, 0]]),
np.array([[1, 0], [0, 0]]), average='samples')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in samples with no true labels.'
' Use `zero_division` parameter to control'
' this behavior.')
assert_warns_message(w, msg, f, np.array([[1, 0], [0, 0]]),
np.array([[1, 0], [1, 0]]), average='samples')
# single score: micro-average
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.'
' Use `zero_division` parameter to control'
' this behavior.')
assert_warns_message(w, msg, f, np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]), average='micro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.'
' Use `zero_division` parameter to control'
' this behavior.')
assert_warns_message(w, msg, f, np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]), average='micro')
# single positive label
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.'
' Use `zero_division` parameter to control'
' this behavior.')
assert_warns_message(w, msg, f, [1, 1], [-1, -1], average='binary')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.'
' Use `zero_division` parameter to control'
' this behavior.')
assert_warns_message(w, msg, f, [-1, -1], [1, 1], average='binary')
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
precision_recall_fscore_support([0, 0], [0, 0], average="binary")
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.'
' Use `zero_division` parameter to control'
' this behavior.')
assert str(record.pop().message) == msg
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.'
' Use `zero_division` parameter to control'
' this behavior.')
assert str(record.pop().message) == msg
@pytest.mark.parametrize('zero_division', [0, 1])
def test_prf_no_warnings_if_zero_division_set(zero_division):
# average of per-label scores
f = precision_recall_fscore_support
for average in [None, 'weighted', 'macro']:
assert_no_warnings(f, [0, 1, 2], [1, 1, 2], average=average,
zero_division=zero_division)
assert_no_warnings(f, [1, 1, 2], [0, 1, 2], average=average,
zero_division=zero_division)
# average of per-sample scores
assert_no_warnings(f, np.array([[1, 0], [1, 0]]),
np.array([[1, 0], [0, 0]]), average='samples',
zero_division=zero_division)
assert_no_warnings(f, np.array([[1, 0], [0, 0]]),
np.array([[1, 0], [1, 0]]),
average='samples', zero_division=zero_division)
# single score: micro-average
assert_no_warnings(f, np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]), average='micro',
zero_division=zero_division)
assert_no_warnings(f, np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]), average='micro',
zero_division=zero_division)
# single positive label
assert_no_warnings(f, [1, 1], [-1, -1], average='binary',
zero_division=zero_division)
assert_no_warnings(f, [-1, -1], [1, 1], average='binary',
zero_division=zero_division)
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
precision_recall_fscore_support([0, 0], [0, 0], average="binary",
zero_division=zero_division)
assert len(record) == 0
@pytest.mark.parametrize('zero_division', ["warn", 0, 1])
def test_recall_warnings(zero_division):
assert_no_warnings(recall_score,
np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro', zero_division=zero_division)
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
recall_score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro', zero_division=zero_division)
if zero_division == "warn":
assert (str(record.pop().message) ==
'Recall is ill-defined and '
'being set to 0.0 due to no true samples.'
' Use `zero_division` parameter to control'
' this behavior.')
else:
assert len(record) == 0
recall_score([0, 0], [0, 0])
if zero_division == "warn":
assert (str(record.pop().message) ==
'Recall is ill-defined and '
'being set to 0.0 due to no true samples.'
' Use `zero_division` parameter to control'
' this behavior.')
@pytest.mark.parametrize('zero_division', ["warn", 0, 1])
def test_precision_warnings(zero_division):
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
precision_score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro', zero_division=zero_division)
if zero_division == "warn":
assert (str(record.pop().message) ==
'Precision is ill-defined and '
'being set to 0.0 due to no predicted samples.'
' Use `zero_division` parameter to control'
' this behavior.')
else:
assert len(record) == 0
precision_score([0, 0], [0, 0])
if zero_division == "warn":
assert (str(record.pop().message) ==
'Precision is ill-defined and '
'being set to 0.0 due to no predicted samples.'
' Use `zero_division` parameter to control'
' this behavior.')
assert_no_warnings(precision_score,
np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro', zero_division=zero_division)
@pytest.mark.parametrize('zero_division', ["warn", 0, 1])
def test_fscore_warnings(zero_division):
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
for score in [f1_score, partial(fbeta_score, beta=2)]:
score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro', zero_division=zero_division)
assert len(record) == 0
score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro', zero_division=zero_division)
assert len(record) == 0
score(np.array([[0, 0], [0, 0]]),
np.array([[0, 0], [0, 0]]),
average='micro', zero_division=zero_division)
if zero_division == "warn":
assert (str(record.pop().message) ==
'F-score is ill-defined and '
'being set to 0.0 due to no true nor predicted '
'samples. Use `zero_division` parameter to '
'control this behavior.')
else:
assert len(record) == 0
def test_prf_average_binary_data_non_binary():
# Error if user does not explicitly set non-binary average mode
y_true_mc = [1, 2, 3, 3]
y_pred_mc = [1, 2, 3, 1]
msg_mc = (r"Target is multiclass but average='binary'. Please "
r"choose another average setting, one of \["
r"None, 'micro', 'macro', 'weighted'\].")
y_true_ind = np.array([[0, 1, 1], [1, 0, 0], [0, 0, 1]])
y_pred_ind = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]])
msg_ind = (r"Target is multilabel-indicator but average='binary'. Please "
r"choose another average setting, one of \["
r"None, 'micro', 'macro', 'weighted', 'samples'\].")
for y_true, y_pred, msg in [
(y_true_mc, y_pred_mc, msg_mc),
(y_true_ind, y_pred_ind, msg_ind),
]:
for metric in [precision_score, recall_score, f1_score,
partial(fbeta_score, beta=2)]:
with pytest.raises(ValueError, match=msg):
metric(y_true, y_pred)
def test__check_targets():
# Check that _check_targets correctly merges target types, squeezes
# output and fails if input lengths differ.
IND = 'multilabel-indicator'
MC = 'multiclass'
BIN = 'binary'
CNT = 'continuous'
MMC = 'multiclass-multioutput'
MCN = 'continuous-multioutput'
# all of length 3
EXAMPLES = [
(IND, np.array([[0, 1, 1], [1, 0, 0], [0, 0, 1]])),
# must not be considered binary
(IND, np.array([[0, 1], [1, 0], [1, 1]])),
(MC, [2, 3, 1]),
(BIN, [0, 1, 1]),
(CNT, [0., 1.5, 1.]),
(MC, np.array([[2], [3], [1]])),
(BIN, np.array([[0], [1], [1]])),
(CNT, np.array([[0.], [1.5], [1.]])),
(MMC, np.array([[0, 2], [1, 3], [2, 3]])),
(MCN, np.array([[0.5, 2.], [1.1, 3.], [2., 3.]])),
]
# expected type given input types, or None for error
# (types will be tried in either order)
EXPECTED = {
(IND, IND): IND,
(MC, MC): MC,
(BIN, BIN): BIN,
(MC, IND): None,
(BIN, IND): None,
(BIN, MC): MC,
# Disallowed types
(CNT, CNT): None,
(MMC, MMC): None,
(MCN, MCN): None,
(IND, CNT): None,
(MC, CNT): None,
(BIN, CNT): None,
(MMC, CNT): None,
(MCN, CNT): None,
(IND, MMC): None,
(MC, MMC): None,
(BIN, MMC): None,
(MCN, MMC): None,
(IND, MCN): None,
(MC, MCN): None,
(BIN, MCN): None,
}
for (type1, y1), (type2, y2) in product(EXAMPLES, repeat=2):
try:
expected = EXPECTED[type1, type2]
except KeyError:
expected = EXPECTED[type2, type1]
if expected is None:
with pytest.raises(ValueError):
_check_targets(y1, y2)
if type1 != type2:
err_msg = ("Classification metrics can't handle a mix "
"of {0} and {1} targets".format(type1, type2))
with pytest.raises(ValueError, match=err_msg):
_check_targets(y1, y2)
else:
if type1 not in (BIN, MC, IND):
err_msg = "{0} is not supported".format(type1)
with pytest.raises(ValueError, match=err_msg):
_check_targets(y1, y2)
else:
merged_type, y1out, y2out = _check_targets(y1, y2)
assert merged_type == expected
if merged_type.startswith('multilabel'):
assert y1out.format == 'csr'
assert y2out.format == 'csr'
else:
assert_array_equal(y1out, np.squeeze(y1))
assert_array_equal(y2out, np.squeeze(y2))
with pytest.raises(ValueError):
_check_targets(y1[:-1], y2)
# Make sure seq of seq is not supported
y1 = [(1, 2,), (0, 2, 3)]
y2 = [(2,), (0, 2,)]
msg = ('You appear to be using a legacy multi-label data representation. '
'Sequence of sequences are no longer supported; use a binary array'
' or sparse matrix instead - the MultiLabelBinarizer'
' transformer can convert to this format.')
with pytest.raises(ValueError, match=msg):
_check_targets(y1, y2)
def test__check_targets_multiclass_with_both_y_true_and_y_pred_binary():
# https://github.com/scikit-learn/scikit-learn/issues/8098
y_true = [0, 1]
y_pred = [0, -1]
assert _check_targets(y_true, y_pred)[0] == 'multiclass'
def test_hinge_loss_binary():
y_true = np.array([-1, 1, 1, -1])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert hinge_loss(y_true, pred_decision) == 1.2 / 4
y_true = np.array([0, 2, 2, 0])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert hinge_loss(y_true, pred_decision) == 1.2 / 4
def test_hinge_loss_multiclass():
pred_decision = np.array([
[+0.36, -0.17, -0.58, -0.99],
[-0.54, -0.37, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.54, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, +0.24],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 3, 2])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
np.clip(dummy_losses, 0, None, out=dummy_losses)
dummy_hinge_loss = np.mean(dummy_losses)
assert (hinge_loss(y_true, pred_decision) ==
dummy_hinge_loss)
def test_hinge_loss_multiclass_missing_labels_with_labels_none():
y_true = np.array([0, 1, 2, 2])
pred_decision = np.array([
[+1.27, 0.034, -0.68, -1.40],
[-1.45, -0.58, -0.38, -0.17],
[-2.36, -0.79, -0.27, +0.24],
[-2.36, -0.79, -0.27, +0.24]
])
error_message = ("Please include all labels in y_true "
"or pass labels as third argument")
with pytest.raises(ValueError, match=error_message):
hinge_loss(y_true, pred_decision)
def test_hinge_loss_multiclass_with_missing_labels():
pred_decision = np.array([
[+0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 2])
labels = np.array([0, 1, 2, 3])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][2] + pred_decision[4][3]
])
np.clip(dummy_losses, 0, None, out=dummy_losses)
dummy_hinge_loss = np.mean(dummy_losses)
assert (hinge_loss(y_true, pred_decision, labels=labels) ==
dummy_hinge_loss)
def test_hinge_loss_multiclass_invariance_lists():
# Currently, invariance of string and integer labels cannot be tested
# in common invariance tests because invariance tests for multiclass
# decision functions is not implemented yet.
y_true = ['blue', 'green', 'red',
'green', 'white', 'red']
pred_decision = [
[+0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, +0.24],
[-1.45, -0.58, -0.38, -0.17]]
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
np.clip(dummy_losses, 0, None, out=dummy_losses)
dummy_hinge_loss = np.mean(dummy_losses)
assert (hinge_loss(y_true, pred_decision) ==
dummy_hinge_loss)
def test_log_loss():
# binary case with symbolic labels ("no" < "yes")
y_true = ["no", "no", "no", "yes", "yes", "yes"]
y_pred = np.array([[0.5, 0.5], [0.1, 0.9], [0.01, 0.99],
[0.9, 0.1], [0.75, 0.25], [0.001, 0.999]])
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.8817971)
# multiclass case; adapted from http://bit.ly/RJJHWA
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7, 0.1], [0.6, 0.2, 0.2], [0.6, 0.1, 0.3]]
loss = log_loss(y_true, y_pred, normalize=True)
assert_almost_equal(loss, 0.6904911)
# check that we got all the shapes and axes right
# by doubling the length of y_true and y_pred
y_true *= 2
y_pred *= 2
loss = log_loss(y_true, y_pred, normalize=False)
assert_almost_equal(loss, 0.6904911 * 6, decimal=6)
# check eps and handling of absolute zero and one probabilities
y_pred = np.asarray(y_pred) > .5
loss = log_loss(y_true, y_pred, normalize=True, eps=.1)
assert_almost_equal(loss, log_loss(y_true, np.clip(y_pred, .1, .9)))
# raise error if number of classes are not equal.
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1]]
with pytest.raises(ValueError):
log_loss(y_true, y_pred)
# case when y_true is a string array object
y_true = ["ham", "spam", "spam", "ham"]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]]
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.0383217, decimal=6)
# test labels option
y_true = [2, 2]
y_pred = [[0.2, 0.7], [0.6, 0.5]]
y_score = np.array([[0.1, 0.9], [0.1, 0.9]])
error_str = (r'y_true contains only one label \(2\). Please provide '
r'the true labels explicitly through the labels argument.')
with pytest.raises(ValueError, match=error_str):
log_loss(y_true, y_pred)
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.2, 0.3]]
error_str = ('Found input variables with inconsistent numbers of samples: '
'[3, 2]')
(ValueError, error_str, log_loss, y_true, y_pred)
# works when the labels argument is used
true_log_loss = -np.mean(np.log(y_score[:, 1]))
calculated_log_loss = log_loss(y_true, y_score, labels=[1, 2])
assert_almost_equal(calculated_log_loss, true_log_loss)
# ensure labels work when len(np.unique(y_true)) != y_pred.shape[1]
y_true = [1, 2, 2]
y_score2 = [[0.2, 0.7, 0.3], [0.6, 0.5, 0.3], [0.3, 0.9, 0.1]]
loss = log_loss(y_true, y_score2, labels=[1, 2, 3])
assert_almost_equal(loss, 1.0630345, decimal=6)
def test_log_loss_pandas_input():
# case when input is a pandas series and dataframe gh-5715
y_tr = np.array(["ham", "spam", "spam", "ham"])
y_pr = np.array([[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]])
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TrueInputType, PredInputType in types:
# y_pred dataframe, y_true series
y_true, y_pred = TrueInputType(y_tr), PredInputType(y_pr)
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.0383217, decimal=6)
def test_brier_score_loss():
# Check brier_score_loss function
y_true = np.array([0, 1, 1, 0, 1, 1])
y_pred = np.array([0.1, 0.8, 0.9, 0.3, 1., 0.95])
true_score = linalg.norm(y_true - y_pred) ** 2 / len(y_true)
assert_almost_equal(brier_score_loss(y_true, y_true), 0.0)
assert_almost_equal(brier_score_loss(y_true, y_pred), true_score)
assert_almost_equal(brier_score_loss(1. + y_true, y_pred),
true_score)
assert_almost_equal(brier_score_loss(2 * y_true - 1, y_pred),
true_score)
with pytest.raises(ValueError):
brier_score_loss(y_true, y_pred[1:])
with pytest.raises(ValueError):
brier_score_loss(y_true, y_pred + 1.)
with pytest.raises(ValueError):
brier_score_loss(y_true, y_pred - 1.)
# ensure to raise an error for multiclass y_true
y_true = np.array([0, 1, 2, 0])
y_pred = np.array([0.8, 0.6, 0.4, 0.2])
error_message = ("Only binary classification is supported. Labels "
"in y_true: {}".format(np.array([0, 1, 2])))
with pytest.raises(ValueError, match=re.escape(error_message)):
brier_score_loss(y_true, y_pred)
# calculate correctly when there's only one class in y_true
assert_almost_equal(brier_score_loss([-1], [0.4]), 0.16)
assert_almost_equal(brier_score_loss([0], [0.4]), 0.16)
assert_almost_equal(brier_score_loss([1], [0.4]), 0.36)
assert_almost_equal(
brier_score_loss(['foo'], [0.4], pos_label='bar'), 0.16)
assert_almost_equal(
brier_score_loss(['foo'], [0.4], pos_label='foo'), 0.36)
def test_balanced_accuracy_score_unseen():
assert_warns_message(UserWarning, 'y_pred contains classes not in y_true',
balanced_accuracy_score, [0, 0, 0], [0, 0, 1])
@pytest.mark.parametrize('y_true,y_pred',
[
(['a', 'b', 'a', 'b'], ['a', 'a', 'a', 'b']),
(['a', 'b', 'c', 'b'], ['a', 'a', 'a', 'b']),
(['a', 'a', 'a', 'b'], ['a', 'b', 'c', 'b']),
])
def test_balanced_accuracy_score(y_true, y_pred):
macro_recall = recall_score(y_true, y_pred, average='macro',
labels=np.unique(y_true))
with ignore_warnings():
# Warnings are tested in test_balanced_accuracy_score_unseen
balanced = balanced_accuracy_score(y_true, y_pred)
assert balanced == pytest.approx(macro_recall)
adjusted = balanced_accuracy_score(y_true, y_pred, adjusted=True)
chance = balanced_accuracy_score(y_true, np.full_like(y_true, y_true[0]))
assert adjusted == (balanced - chance) / (1 - chance)
def test_multilabel_jaccard_similarity_score_deprecation():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
# size(y1 \inter y2) = [1, 2]
# size(y1 \union y2) = [2, 2]
jss = partial(assert_warns, FutureWarning,
jaccard_similarity_score)
assert jss(y1, y2) == 0.75
assert jss(y1, y1) == 1
assert jss(y2, y2) == 1
assert jss(y2,
|
np.logical_not(y2)
|
numpy.logical_not
|
"""
precession. TODO: write me here
"""
import warnings
import numpy as np
import scipy.special
import scipy.integrate
from sympy import elliptic_pi
def roots_vec(p):
"""
Locate roots of polynomial using a vectorized version of numpy.roots. Equivalent to [np.roots(x) for x in p].
Credits: stackoverflow user `pv`, see https://stackoverflow.com/a/35853977
Call
----
roots = roots_vec(p)
Parameters
----------
p: array
Polynomial coefficients.
Returns
-------
roots: array
Polynomial roots.
"""
p = np.atleast_1d(p)
n = p.shape[-1]
A = np.zeros(p.shape[:1] + (n-1, n-1), float)
A[..., 1:, :-1] = np.eye(n-2)
A[..., 0, :] = -p[..., 1:]/p[..., None, 0]
return np.linalg.eigvals(A)
def norm_nested(x):
"""
Norm of 2D array of shape (N,3) along last axis.
Call
----
x = normalize_nested(x)
Parameters
----------
x : array
Input array.
Returns
-------
x : array
Normalized array.
"""
return np.linalg.norm(x, axis=1)
def normalize_nested(x):
"""
Normalize 2D array of shape (N,3) along last axis.
Call
----
x = normalize_nested(x)
Parameters
----------
x : array
Input array.
Returns
-------
x : array
Normalized array.
"""
return x/norm_nested(x)[:, None]
def dot_nested(x, y):
"""
Dot product between 2D arrays along last axis.
Call
----
z = dot_nested(x, y)
Parameters
----------
x : array
Input array.
y : array
Input array.
Returns
-------
z : array
Dot product array.
"""
return np.einsum('ij, ij->i', x, y)
def sample_unitsphere(N=1):
"""
Sample points uniformly on a sphere of unit radius. Returns array of shape (N,3).
Call
----
vec = sample_unitsphere(N = 1)
Parameters
----------
N: integer, optional (default: 1)
Number of samples.
Returns
-------
vec: array
Vector in Cartesian coomponents.
"""
vec = np.random.randn(3, N)
vec /= np.linalg.norm(vec, axis=0)
return vec.T
def wraproots(coefficientfunction, *args, **kwargs):
"""
Find roots of a polynomial given coefficients, ordered according to their real part. Complex roots are masked with nans. This is essentially a wrapper of numpy.roots.
Call
----
sols = precession.wraproots(coefficientfunction, *args, **kwargs)
Parameters
----------
coefficientfunction: callable
Function returning the polynomial coefficients ordered from highest to lowest degree.
*args, **kwargs:
Parameters of `coefficientfunction`.
Returns
-------
sols: array
Roots of the polynomial.
"""
coeffs = coefficientfunction(*args, **kwargs)
sols = np.sort_complex(roots_vec(coeffs.T))
sols = np.real(np.where(np.isreal(sols), sols, np.nan))
return sols
@np.vectorize
def ellippi(n, phi, m):
"""
Incomplete elliptic integral of the third kind. At the time of writing, this has not been implemented in scipy yet; here wrapping the sympy implementation. For the complete integral, set phi=np.pi/2.
Call
----
piintegral = precession.ellippi(n, phi, m)
Parameters
----------
n: foat
Characheristic of the elliptic integral.
phi: float
Amplitude of the elliptic integral.
m: float
Parameter of the elliptic integral
Returns
-------
piintegral: float
Incomplete elliptic integral of the third kind
"""
return float(elliptic_pi(n, phi, m))
def rotate_zaxis(vec, angle):
"""
Rotate series of arrays along the z axis of a given angle. Input vec has shape (N,3) and input angle has shape (N,).
Call
----
newvec = rotate_zaxis(vec,angle)
Parameters
----------
vec: array
Input array.
angle: float
Rotation angle.
Returns
-------
newvec: array
Rotated array.
"""
newx = vec[:, 0]*np.cos(angle) - vec[:, 1]*np.sin(angle)
newy = vec[:, 0]*np.sin(angle) + vec[:, 1]*np.cos(angle)
newz = vec[:, 2]
newvec = np.transpose([newx, newy, newz])
return newvec
def ismonotonic(vec, which):
"""
Check if an array is monotonic. The parameter `which` can takes the following values:
- `<` check array is strictly increasing.
- `<=` check array is increasing.
- `>` check array is strictly decreasing.
- `>=` check array is decreasing.
Call
----
check = ismonotonic(vec, which):
Parameters
----------
vec: array
Input array.
which: string
Select function behavior.
Returns
-------
check: boolean
Result
"""
if which == '<':
return np.all(vec[:-1] < vec[1:])
elif which == '<=':
return np.all(vec[:-1] <= vec[1:])
elif which == '>':
return np.all(vec[:-1] > vec[1:])
elif which == '>=':
return np.all(vec[:-1] >= vec[1:])
else:
raise ValueError("`which` needs to be one of the following: `>`, `>=`, `<`, `<=`.")
# Definitions
def eval_m1(q):
"""
Mass of the heavier black hole in units of the total mass.
Call
----
m1 = eval_m1(q)
Parameters
----------
q: float
Mass ratio: 0<=q<=1.
Returns
-------
m1: float
Mass of the primary (heavier) black hole.
"""
q = np.atleast_1d(q)
m1 = 1/(1+q)
return m1
def eval_m2(q):
"""
Mass of the lighter black hole in units of the total mass.
Call
----
m2 = eval_m2(q)
Parameters
----------
q: float
Mass ratio: 0<=q<=1.
Returns
-------
m2: float
Mass of the secondary (lighter) black hole.
"""
q = np.atleast_1d(q)
m2 = q/(1+q)
return m2
def masses(q):
"""
Masses of the two black holes in units of the total mass.
Call
----
m1,m2 = masses(q)
Parameters
----------
q: float
Mass ratio: 0<=q<=1.
Returns
-------
m1: float
Mass of the primary (heavier) black hole.
m2: float
Mass of the secondary (lighter) black hole.
"""
m1 = eval_m1(q)
m2 = eval_m2(q)
return np.stack([m1, m2])
def eval_q(m1, m2):
"""
Mass ratio, 0 < q = m2/m1 < 1.
Call
----
q = eval_q(m1,m2)
Parameters
----------
m1: float
Mass of the primary (heavier) black hole.
m2: float
Mass of the secondary (lighter) black hole.
Returns
-------
q: float
Mass ratio: 0<=q<=1.
"""
m1 = np.atleast_1d(m1)
m2 = np.atleast_1d(m2)
q = m2/m1
assert (q < 1).all(), "The convention used in this code is q=m2/m1<1."
return q
def eval_eta(q):
"""
Symmetric mass ratio eta = m1*m2/(m1+m2)^2 = q/(1+q)^2.
Call
----
eta = eval_eta(q)
Parameters
----------
q: float
Mass ratio: 0<=q<=1.
Returns
-------
eta: float
Symmetric mass ratio 0<=eta<=1/4.
"""
q = np.atleast_1d(q)
eta = q/(1+q)**2
return eta
def eval_S1(q, chi1):
"""
Spin angular momentum of the heavier black hole.
Call
----
S1 = eval_S1(q,chi1)
Parameters
----------
q: float
Mass ratio: 0<=q<=1.
chi1: float
Dimensionless spin of the primary (heavier) black hole: 0<=chi1<=1.
Returns
-------
S1: float
Magnitude of the primary spin.
"""
chi1 = np.atleast_1d(chi1)
S1 = chi1*(eval_m1(q))**2
return S1
def eval_S2(q, chi2):
"""
Spin angular momentum of the lighter black hole.
Call
----
S2 = eval_S2(q,chi2)
Parameters
----------
q: float
Mass ratio: 0<=q<=1.
chi2: float
Dimensionless spin of the secondary (lighter) black hole: 0<=chi2<=1.
Returns
-------
S2: float
Magnitude of the secondary spin.
"""
chi2 = np.atleast_1d(chi2)
S2 = chi2*(eval_m2(q))**2
return S2
def spinmags(q, chi1, chi2):
"""
Spins of the black holes in units of the total mass.
Call
----
S1,S2 = spinmags(q,chi1,chi2)
Parameters
----------
q: float
Mass ratio: 0<=q<=1.
chi1: float
Dimensionless spin of the primary (heavier) black hole: 0<=chi1<=1.
chi2: float
Dimensionless spin of the secondary (lighter) black hole: 0<=chi2<=1.
Returns
-------
S1: float
Magnitude of the primary spin.
S2: float
Magnitude of the secondary spin.
"""
S1 = eval_S1(q, chi1)
S2 = eval_S2(q, chi2)
return np.stack([S1, S2])
def eval_L(r, q):
"""
Newtonian angular momentum of the binary.
Call
----
L = eval_L(r,q)
Parameters
----------
r: float
Binary separation.
q: float
Mass ratio: 0<=q<=1.
Returns
-------
L: float
Magnitude of the Newtonian orbital angular momentum.
"""
r = np.atleast_1d(r)
L = eval_m1(q)*eval_m2(q)*r**0.5
return L
def eval_v(r):
"""
Newtonian orbital velocity of the binary.
Call
----
v = eval_v(r)
Parameters
----------
r: float
Binary separation.
Returns
-------
v: float
Newtonian orbital velocity.
"""
r = np.atleast_1d(r)
v = 1/r**0.5
return v
def eval_r(L=None, u=None, q=None):
"""
Orbital separation of the binary. Valid inputs are either (L,q) or (u,q).
Call
----
r = eval_r(L=None,u=None,q=None)
Parameters
----------
L: float, optional (default: None)
Magnitude of the Newtonian orbital angular momentum.
u: float, optional (default: None)
Compactified separation 1/(2L).
q: float, optional (default: None)
Mass ratio: 0<=q<=1.
Returns
-------
r: float
Binary separation.
"""
if L is not None and u is None and q is not None:
L = np.atleast_1d(L)
m1, m2 = masses(q)
r = (L / (m1 * m2))**2
elif L is None and u is not None and q is not None:
u = np.atleast_1d(u)
r = (2*eval_m1(q)*eval_m2(q)*u)**(-2)
else:
raise TypeError("Provide either (L,q) or (u,q).")
return r
# Limits
def Jlimits_LS1S2(r, q, chi1, chi2):
"""
Limits on the magnitude of the total angular momentum due to the vector relation J=L+S1+S2.
Call
----
Jmin,Jmax = Jlimits_LS1S2(r,q,chi1,chi2)
Parameters
----------
r: float
Binary separation.
q: float
Mass ratio: 0<=q<=1.
chi1: float
Dimensionless spin of the primary (heavier) black hole: 0<=chi1<=1.
chi2: float
Dimensionless spin of the secondary (lighter) black hole: 0<=chi2<=1.
Returns
-------
Jmin: float
Minimum value of the total angular momentum J.
Jmax: float
Maximum value of the total angular momentum J.
"""
S1, S2 = spinmags(q, chi1, chi2)
L = eval_L(r, q)
Jmin = np.maximum.reduce([np.zeros(L.shape), L-S1-S2, np.abs(S1-S2)-L])
Jmax = L+S1+S2
return np.stack([Jmin, Jmax])
def kappadiscriminant_coefficients(u, chieff, q, chi1, chi2):
"""
Coefficients of the quintic equation in kappa that defines the spin-orbit resonances.
Call
----
coeff5,coeff4,coeff3,coeff2,coeff1,coeff0 = kappadiscriminant_coefficients(u,chieff,q,chi1,chi2)
Parameters
----------
u: float
Compactified separation 1/(2L).
chieff: float
Effective spin.
q: float
Mass ratio: 0<=q<=1.
chi1: float
Dimensionless spin of the primary (heavier) black hole: 0<=chi1<=1.
chi2: float
Dimensionless spin of the secondary (lighter) black hole: 0<=chi2<=1.
Returns
-------
coeff5: float
Coefficient to the x^5 term in polynomial.
coeff4: float
Coefficient to the x^4 term in polynomial.
coeff3: float
Coefficient to the x^3 term in polynomial.
coeff2: float
Coefficient to the x^2 term in polynomial.
coeff1: float
Coefficient to the x^1 term in polynomial.
coeff0: float
Coefficient to the x^0 term in polynomial.
"""
u = np.atleast_1d(u)
q = np.atleast_1d(q)
chieff = np.atleast_1d(chieff)
S1, S2 = spinmags(q, chi1, chi2)
# Machine generated with polycoefficients.nb
coeff5 = -256 * q**3 * ((1 + q))**6 * u
# Machine generated with polycoefficients.nb
coeff4 = 16 * q**2 * ((1 + q))**4 * (((-1 + q**2))**2 + (-16 * ((1 +
q))**2 * (q * (-5 + 3 * q) * S1**2 + (3 + -5 * q) * S2**2) * u**2 +
(40 * q * ((1 + q))**2 * u * chieff + 16 * q**2 * u**2 * chieff**2)))
# Machine generated with polycoefficients.nb
coeff3 = -32 * q * ((1 + q))**4 * (2 * q**6 * S1**2 * u * (-5 + 12 *
S1**2 * u**2) + (2 * S2**2 * u * (-5 + 12 * S2**2 * u**2) + (2 * q**2
* u * (40 * S1**4 * u**2 + (-44 * S2**4 * u**2 + (8 * chieff**2 +
(S1**2 * (-5 + (-8 * S2**2 * u**2 + 40 * u * chieff)) + -2 * S2**2 *
(-5 + 4 * u * chieff * (1 + u * chieff)))))) + (2 * q**3 * (32 *
S1**4 * u**3 + (32 * S2**4 * u**3 + (chieff * (-1 + 8 * u * chieff *
(3 + u * chieff)) + (2 * S2**2 * u * (-1 + u * chieff * (17 + 8 * u *
chieff)) + 2 * S1**2 * u * (-1 + (40 * S2**2 * u**2 + u * chieff *
(17 + 8 * u * chieff))))))) + (q * (chieff + 2 * u * (S1**2 * (1 +
-48 * S2**2 * u**2) + S2**2 * (1 + -2 * u * (12 * S2**2 * u +
chieff)))) + (q**5 * (chieff + 2 * u * (S2**2 + S1**2 * (1 + -2 * u *
(12 * (S1**2 + 2 * S2**2) * u + chieff)))) + -2 * q**4 * u * (5 *
S2**2 + (44 * S1**4 * u**2 + (-8 * (5 * S2**4 * u**2 + (5 * S2**2 * u
* chieff + chieff**2)) + 2 * S1**2 * (-5 + 4 * u * (chieff + u *
(S2**2 + chieff**2))))))))))))
# Machine generated with polycoefficients.nb
coeff2 = -16 * ((1 + q))**2 * (16 * (-1 + q) * q**3 * ((1 + q))**4 *
(10 + (-8 + q) * q) * S1**6 * u**4 + (-16 * ((-1 + q))**3 * ((1 +
q))**4 * S2**6 * u**4 + (-1 * ((-1 + q**2))**2 * S2**4 * u**2 * (((1
+ q))**2 * (-8 + (-20 + q) * q) + (8 * (-4 + q) * q * (1 + q) * u *
chieff + 16 * q**2 * u**2 * chieff**2)) + (-1 * q**2 * (((1 + q) *
S2**2 * u + q * chieff))**2 * ((-1 + q) * ((1 + q))**2 * (-1 + (q +
48 * S2**2 * u**2)) + (8 * q * (1 + q) * (5 + q) * u * chieff + 16 *
q**2 * u**2 * chieff**2)) + (2 * q**2 * ((1 + q))**2 * S1**4 * u**2 *
((-1 + q) * ((1 + q))**2 * ((-1 + q) * (-3 + (30 * q + 4 * q**2)) +
-72 * (2 + (-2 + q) * q) * S2**2 * u**2) + (4 * q * (1 + q) * (-30 +
q * (39 + q * (-19 + 4 * q))) * u * chieff + -8 * q**2 * (6 + (-6 +
q) * q) * u**2 * chieff**2)) + (-4 * q * (-1 * (1 + q) * S2**2 * u +
-1 * q * chieff) * (-1 * ((-1 + q))**2 * ((1 + q))**3 * S2**2 * u *
(-10 + (q + 24 * S2**2 * u**2)) + (-1 * (-1 + q) * q * ((1 + q))**2 *
(-1 + (q + 4 * (1 + 2 * q) * S2**2 * u**2)) * chieff + (-8 * q**2 *
(1 + q) * u * (2 + (q + 2 * (-1 + q) * S2**2 * u**2)) * chieff**2 +
-16 * q**3 * u**2 * chieff**3))) + (q * (1 + q) * S1**2 * ((-1 + q) *
((1 + q))**3 * (((-1 + q))**3 * q + (4 * (-1 + q) * (15 + q * (-29 +
15 * q)) * S2**2 * u**2 + 144 * (1 + 2 * (-1 + q) * q) * S2**4 *
u**4)) + (2 * q * ((1 + q))**2 * u * (((-1 + q))**2 * (-3 + q * (23 +
4 * q)) + 12 * (1 + q) * (1 + q**2) * S2**2 * u**2) * chieff + (8 *
q**2 * (1 + q) * u**2 * (-12 + (-2 * q + (-11 * q**2 + (q**3 + 4 * (3
+ q * (-5 + 3 * q)) * S2**2 * u**2)))) * chieff**2 + -32 * q**3 * (3
+ (-1 + q) * q) * u**3 * chieff**3))) + (S2**2 * (((-1 + q**2))**4 +
(2 * ((-1 + q))**2 * q * ((1 + q))**3 * (4 + 5 * q) * u * chieff + (8
* (-1 + q) * q**2 * ((1 + q))**2 * (-1 + 4 * q) * u**2 * chieff**2 +
32 * q**3 * (-1 + q**2) * u**3 * chieff**3))) + -1 * q**2 * chieff**2
* (1 + q * (8 * u * chieff + q * (-2 + (16 * u * chieff + ((q + 4 * u
* chieff))**2))))))))))))
# Machine generated with polycoefficients.nb
coeff1 = -16 * (1 + q) * (-16 * ((-1 + q))**2 * q**3 * ((1 + q))**5 *
(-5 + 2 * q) * S1**8 * u**5 + (-4 * (-1 + q) * q**2 * ((1 + q))**3 *
S1**6 * u**3 * ((-1 + q) * ((1 + q))**2 * (-1 + (15 * q + (4 * q**2 +
8 * (6 + (-1 + q) * q) * S2**2 * u**2))) + (2 * q * (1 + q) * (20 + q
* (-29 + 12 * q)) * u * chieff + -8 * (-2 + q) * q**2 * u**2 *
chieff**2)) + (-2 * q * (((1 + q) * S2**2 * u + q * chieff))**2 * (-1
* ((-1 + q))**2 * ((1 + q))**3 * S2**2 * u * (-10 + (q + 24 * S2**2 *
u**2)) + (-1 * (-1 + q) * q * ((1 + q))**2 * (-1 + (q + 4 * (1 + 2 *
q) * S2**2 * u**2)) * chieff + (-8 * q**2 * (1 + q) * u * (2 + (q + 2
* (-1 + q) * S2**2 * u**2)) * chieff**2 + -16 * q**3 * u**2 *
chieff**3))) + (-2 * q * ((1 + q))**2 * S1**4 * u * (((-1 + q))**2 *
((1 + q))**3 * (((-1 + q))**2 * q + (2 * (15 + q * (-55 + 2 * q * (9
+ 2 * q))) * S2**2 * u**2 + -72 * (1 + q**2) * S2**4 * u**4)) + ((-1
+ q) * q * ((1 + q))**2 * u * (3 + (-52 * q + (33 * q**2 + (16 * q**3
+ 4 * (-3 + 2 * q**2 * (-7 + 4 * q)) * S2**2 * u**2)))) * chieff +
(-8 * q**2 * (1 + q) * u**2 * (6 + (-16 * q + (18 * q**2 + (-5 * q**3
+ 2 * (-1 + q) * (3 + (-1 + q) * q) * S2**2 * u**2)))) * chieff**2 +
-16 * q**3 * (3 + q * (-5 + 3 * q)) * u**3 * chieff**3))) + (S1**2 *
(-32 * ((-1 + q))**2 * ((1 + q))**5 * (1 + q * (-1 + 6 * q)) * S2**6
* u**5 + (-4 * (-1 + q) * ((1 + q))**3 * S2**4 * u**3 * ((-1 + q) *
((1 + q))**2 * (4 + q * (18 + 5 * q * (-11 + 3 * q))) + (2 * q * (1 +
q) * (-8 + (14 * q + 3 * q**3)) * u * chieff + 8 * q**2 * (1 + q *
(-1 + 3 * q)) * u**2 * chieff**2)) + (2 * ((1 + q))**3 * S2**2 * u *
(-1 * ((-1 + q))**4 * ((1 + q))**2 * (1 + (-12 + q) * q) + (-2 * q *
((-1 + q**2))**2 * (4 + q * (-7 + 4 * q)) * u * chieff + (-8 * q**2 *
(1 + q * (-8 + q * (20 + (-8 + q) * q))) * u**2 * chieff**2 + 16 *
(-2 + q) * q**3 * (-1 + 2 * q) * u**3 * chieff**3))) + 2 * q**2 *
chieff * (-1 * ((-1 + q**2))**4 + (-1 * ((-1 + q))**2 * ((1 + q))**3
* (-1 + q * (18 + 7 * q)) * u * chieff + (4 * q * ((1 + q))**2 * (2 +
q * (-5 + 19 * q)) * u**2 * chieff**2 + 16 * q**2 * (1 + q**2 * (2 +
3 * q)) * u**3 * chieff**3)))))) + -2 * (-1 * (1 + q) * S2**2 * u +
-1 * q * chieff) * (16 * ((-1 + q))**3 * ((1 + q))**4 * S2**6 * u**4
+ (((-1 + q**2))**2 * S2**4 * u**2 * (((1 + q))**2 * (-8 + (-20 + q)
* q) + (8 * (-4 + q) * q * (1 + q) * u * chieff + 16 * q**2 * u**2 *
chieff**2)) + (S2**2 * (-1 * ((-1 + q**2))**4 + (-2 * ((-1 + q))**2 *
q * ((1 + q))**3 * (4 + 5 * q) * u * chieff + (-8 * (-1 + q) * q**2 *
((1 + q))**2 * (-1 + 4 * q) * u**2 * chieff**2 + -32 * q**3 * (-1 +
q**2) * u**3 * chieff**3))) + q**2 * chieff**2 * (1 + q * (8 * u *
chieff + q * (-2 + (16 * u * chieff + ((q + 4 * u *
chieff))**2))))))))))))
# Machine generated with polycoefficients.nb
coeff0 = -16 * (16 * ((-1 + q))**3 * q**3 * ((1 + q))**6 * S1**10 *
u**6 + (-1 * ((-1 + q))**2 * q**2 * ((1 + q))**4 * S1**8 * u**4 *
(((1 + q))**2 * (1 + (-20 * q + (-8 * q**2 + 16 * (-3 + (q + 2 *
q**2)) * S2**2 * u**2))) + (-8 * q * (1 + q) * (-5 + 8 * q) * u *
chieff + 16 * q**2 * u**2 * chieff**2)) + ((-1 + q) * q * ((1 +
q))**3 * S1**6 * u**2 * (q * ((-1 + q**2))**3 + (-4 * (-1 + q) * ((1
+ q))**3 * (-5 + q * (27 + q * (-3 + 8 * q))) * S2**2 * u**2 + (16 *
((-1 + q))**2 * ((1 + q))**3 * (3 + q * (6 + q)) * S2**4 * u**4 + (-2
* (-1 + q) * q * ((1 + q))**2 * u * (1 + (-25 * q + (-12 * q**2 + 4 *
(-1 + (q + 12 * q**2)) * S2**2 * u**2))) * chieff + (8 * q**2 * (1 +
q) * u**2 * (4 + (-18 * q + (11 * q**2 + 4 * (-1 + q**2) * S2**2 *
u**2))) * chieff**2 + 32 * (1 + -2 * q) * q**3 * u**3 *
chieff**3))))) + (((1 + q))**2 * S1**4 * u * (-16 * ((-1 + q))**3 *
((1 + q))**4 * (1 + 3 * q * (2 + q)) * S2**6 * u**5 + (2 * S2**4 *
u**3 * (((-1 + q))**2 * ((1 + q))**4 * (4 + q * (6 + q * (61 + (6 * q
+ 4 * q**2)))) + (4 * ((-1 + q))**2 * q * ((1 + q))**4 * (4 + (q + 4
* q**2)) * u * chieff + -8 * q**2 * ((-1 + q**2))**2 * (1 + q * (4 +
q)) * u**2 * chieff**2)) + (chieff * (2 * ((-1 + q))**4 * q**2 * ((1
+ q))**3 + (((q + -1 * q**3))**2 * (-1 + q * (40 + 23 * q)) * u *
chieff + (8 * q**3 * (1 + q) * (-1 + q * (14 + 5 * (-4 + q) * q)) *
u**2 * chieff**2 + -16 * q**4 * (1 + 6 * (-1 + q) * q) * u**3 *
chieff**3))) + (-1 + q) * (1 + q) * S2**2 * u * (-1 * ((-1 +
q**2))**3 * (-1 + 2 * q * (12 + 5 * q)) + (-2 * (-1 + q) * q * ((1 +
q))**2 * (-4 + q * (29 + q * (-21 + 32 * q))) * u * chieff + (-8 *
q**2 * (1 + q) * (1 + 2 * (-2 + q) * q * (1 + 4 * q)) * u**2 *
chieff**2 + 32 * q**3 * (1 + q * (-1 + 3 * q)) * u**3 *
chieff**3)))))) + ((1 + q) * S1**2 * (16 * ((-1 + q))**3 * ((1 +
q))**5 * (2 + 3 * q) * S2**8 * u**6 + (q**2 * chieff**2 * (((-1 +
q))**4 * ((1 + q))**3 + (2 * q * (5 + 3 * q) * ((-1 + q**2))**2 * u *
chieff + (-8 * q**2 * (1 + q) * (-4 + q * (7 + q)) * u**2 * chieff**2
+ 32 * (1 + -2 * q) * q**3 * u**3 * chieff**3))) + ((-1 + q) * ((1 +
q))**2 * S2**4 * u**2 * ((-10 + (-24 + q) * q) * ((-1 + q**2))**3 +
(2 * (-1 + q) * q * ((1 + q))**2 * (-32 + q * (21 + q * (-29 + 4 *
q))) * u * chieff + (8 * q**2 * (1 + q) * (8 + q * (-14 + (-4 + q) *
q)) * u**2 * chieff**2 + -32 * q**3 * (3 + (-1 + q) * q) * u**3 *
chieff**3))) + (S2**2 * (-1 * ((-1 + q))**6 * ((1 + q))**5 + (-10 *
((-1 + q))**4 * q * ((1 + q))**5 * u * chieff + (-2 * ((-1 + q))**2 *
q**2 * ((1 + q))**3 * (11 + q * (-24 + 11 * q)) * u**2 * chieff**2 +
(16 * q**3 * ((1 + q))**3 * (2 + q * (-3 + 2 * q)) * u**3 * chieff**3
+ 32 * q**4 * (1 + q) * (3 + q * (-5 + 3 * q)) * u**4 * chieff**4))))
+ 4 * ((-1 + q))**2 * ((1 + q))**4 * S2**6 * u**4 * (-8 + q * (-5 +
(-24 * q + (-22 * q**2 + (5 * q**3 + (2 * (-4 + q) * (3 + q) * u *
chieff + 8 * q * u**2 * chieff**2)))))))))) + -1 * (((1 + q) * S2**2
* u + q * chieff))**2 * (16 * ((-1 + q))**3 * ((1 + q))**4 * S2**6 *
u**4 + (((-1 + q**2))**2 * S2**4 * u**2 * (((1 + q))**2 * (-8 + (-20
+ q) * q) + (8 * (-4 + q) * q * (1 + q) * u * chieff + 16 * q**2 *
u**2 * chieff**2)) + (S2**2 * (-1 * ((-1 + q**2))**4 + (-2 * ((-1 +
q))**2 * q * ((1 + q))**3 * (4 + 5 * q) * u * chieff + (-8 * (-1 + q)
* q**2 * ((1 + q))**2 * (-1 + 4 * q) * u**2 * chieff**2 + -32 * q**3
* (-1 + q**2) * u**3 * chieff**3))) + q**2 * chieff**2 * (1 + q * (8
* u * chieff + q * (-2 + (16 * u * chieff + ((q + 4 * u *
chieff))**2))))))))))))
return np.stack([coeff5, coeff4, coeff3, coeff2, coeff1, coeff0])
def kapparesonances(u, chieff, q, chi1, chi2):
"""
Regularized angular momentum of the two spin-orbit resonances. The resonances minimizes and maximizes kappa for a given value of chieff. The minimum corresponds to deltaphi=pi and the maximum corresponds to deltaphi=0.
Call
----
kappamin,kappamax = kapparesonances(u,chieff,q,chi1,chi2)
Parameters
----------
u: float
Compactified separation 1/(2L).
chieff: float
Effective spin.
q: float
Mass ratio: 0<=q<=1.
chi1: float
Dimensionless spin of the primary (heavier) black hole: 0<=chi1<=1.
chi2: float
Dimensionless spin of the secondary (lighter) black hole: 0<=chi2<=1.
Returns
-------
kappamin: float
Minimum value of the regularized angular momentum kappa.
kappamax: float
Maximum value of the regularized angular momentum kappa.
"""
u = np.atleast_1d(u)
chieff = np.atleast_1d(chieff)
q = np.atleast_1d(q)
chi1 = np.atleast_1d(chi1)
chi2 = np.atleast_1d(chi2)
kapparoots = wraproots(kappadiscriminant_coefficients, u, chieff, q, chi1, chi2)
# There are in principle five solutions, but only two are physical.
def _compute(kapparoots, u, chieff, q, chi1, chi2):
kapparoots = kapparoots[np.isfinite(kapparoots)]
Sroots = Satresonance(kappa=kapparoots, u=np.tile(u, kapparoots.shape), chieff=np.tile(chieff, kapparoots.shape), q=np.tile(q, kapparoots.shape), chi1=np.tile(chi1, kapparoots.shape), chi2=np.tile(chi2, kapparoots.shape))
Smin, Smax = Slimits_S1S2(np.tile(q, kapparoots.shape), np.tile(chi1, kapparoots.shape), np.tile(chi2, kapparoots.shape))
kappares = kapparoots[np.logical_and(Sroots > Smin, Sroots < Smax)]
assert len(kappares) <= 2, "I found more than two resonances, this should not be possible."
# If you didn't find enough solutions, append nans
kappares = np.concatenate([kappares, np.repeat(np.nan, 2-len(kappares))])
return kappares
kappamin, kappamax = np.array(list(map(_compute, kapparoots, u, chieff, q, chi1, chi2))).T
return np.stack([kappamin, kappamax])
def kappainfresonances(chieff, q, chi1, chi2):
"""
Regularized angular momentum of the two spin-orbit resonances. The resonances minimizes and maximizes kappa for a given value of chieff. The minimum corresponds to deltaphi=pi and the maximum corresponds to deltaphi=0.
Call
----
kappainfmin,kappainfmax = kappainfresonances(chieff,q,chi1,chi2)
Parameters
----------
chieff: float
Effective spin.
q: float
Mass ratio: 0<=q<=1.
chi1: float
Dimensionless spin of the primary (heavier) black hole: 0<=chi1<=1.
chi2: float
Dimensionless spin of the secondary (lighter) black hole: 0<=chi2<=1.
Returns
-------
kappainfmin: float
Minimum value of the asymptotic angular momentum kappainf.
kappainfmax: float
Maximum value of the asymptotic angular momentum kappainf.
"""
chieff = np.atleast_1d(chieff)
q = np.atleast_1d(q)
S1, S2 = spinmags(q, chi1, chi2)
kappainfmin = np.maximum((chieff - (q**-1-q)*S2)/(1+q), (chieff - (q**-1-q)*S1)/(1+q**-1))
kappainfmax = np.minimum((chieff + (q**-1-q)*S2)/(1+q), (chieff + (q**-1-q)*S1)/(1+q**-1))
return np.stack([kappainfmin, kappainfmax])
def Jresonances(r, chieff, q, chi1, chi2):
"""
Total angular momentum of the two spin-orbit resonances. The resonances minimizes and maximizes J for a given value of chieff. The minimum corresponds to deltaphi=pi and the maximum corresponds to deltaphi=0.
Call
----
Jmin,Jmax = Jresonances(r,chieff,q,chi1,chi2)
Parameters
----------
r: float
Binary separation.
chieff: float
Effective spin.
q: float
Mass ratio: 0<=q<=1.
chi1: float
Dimensionless spin of the primary (heavier) black hole: 0<=chi1<=1.
chi2: float
Dimensionless spin of the secondary (lighter) black hole: 0<=chi2<=1.
Returns
-------
Jmin: float
Minimum value of the total angular momentum J.
Jmax: float
Maximum value of the total angular momentum J.
"""
u = eval_u(r, q)
kappamin, kappamax = kapparesonances(u, chieff, q, chi1, chi2)
Jmin = eval_J(kappa=kappamin, r=r, q=q)
Jmax = eval_J(kappa=kappamax, r=r, q=q)
return np.stack([Jmin, Jmax])
def Jlimits(r=None, chieff=None, q=None, chi1=None, chi2=None, enforce=False):
"""
Limits on the magnitude of the total angular momentum. The contraints considered depend on the inputs provided.
- If r, q, chi1, and chi2 are provided, the limits are given by J=L+S1+S2.
- If r, chieff, q, chi1, and chi2 are provided, the limits are given by the two spin-orbit resonances.
The boolean flag enforce allows raising an error in case the inputs are not compatible.
Call
----
Jmin,Jmax = Jlimits(r=None,chieff=None,q=None,chi1=None,chi2=None,enforce=False)
Parameters
----------
r: float, optional (default: None)
Binary separation.
chieff: float, optional (default: None)
Effective spin.
q: float, optional (default: None)
Mass ratio: 0<=q<=1.
chi1: float, optional (default: None)
Dimensionless spin of the primary (heavier) black hole: 0<=chi1<=1.
chi2: float, optional (default: None)
Dimensionless spin of the secondary (lighter) black hole: 0<=chi2<=1.
enforce: boolean, optional (default: False)
If True raise errors, if False raise warnings.
Returns
-------
Jmin: float
Minimum value of the total angular momentum J.
Jmax: float
Maximum value of the total angular momentum J.
"""
if r is not None and chieff is None and q is not None and chi1 is not None and chi2 is not None:
Jmin, Jmax = Jlimits_LS1S2(r, q, chi1, chi2)
elif r is not None and chieff is not None and q is not None and chi1 is not None and chi2 is not None:
Jmin, Jmax = Jresonances(r, chieff, q, chi1, chi2)
# Check precondition
Jmin_cond, Jmax_cond = Jlimits_LS1S2(r, q, chi1, chi2)
if (Jmin > Jmin_cond).all() and (Jmax < Jmax_cond).all():
pass
else:
if enforce:
raise ValueError("Input values are not compatible [Jlimits].")
else:
warnings.warn("Input values are not compatible [Jlimits].", Warning)
else:
raise TypeError("Provide either (r,q,chi1,chi2) or (r,chieff,q,chi1,chi2).")
return np.stack([Jmin, Jmax])
def kappainflimits(chieff=None, q=None, chi1=None, chi2=None, enforce=False):
"""
Limits on the asymptotic angular momentum. The contraints considered depend on the inputs provided.
- If r, q, chi1, and chi2 are provided, the limits are given by kappa=S1+S2.
- If r, chieff, q, chi1, and chi2 are provided, the limits are given by the two spin-orbit resonances.
The boolean flag enforce allows raising an error in case the inputs are not compatible.
Call
----
kappainfmin,kappainfmin = kappainflimits(r=None,chieff=None,q=None,chi1=None,chi2=None,enforce=False)
Parameters
----------
r: float, optional (default: None)
Binary separation.
chieff: float, optional (default: None)
Effective spin.
q: float, optional (default: None)
Mass ratio: 0<=q<=1.
chi1: float, optional (default: None)
Dimensionless spin of the primary (heavier) black hole: 0<=chi1<=1.
chi2: float, optional (default: None)
Dimensionless spin of the secondary (lighter) black hole: 0<=chi2<=1.
enforce: boolean, optional (default: False)
If True raise errors, if False raise warnings.
Returns
-------
kappainfmin: float
Minimum value of the asymptotic angular momentum kappainf.
kappainfmin: float
Minimum value of the asymptotic angular momentum kappainf.
"""
if chieff is None and q is not None and chi1 is not None and chi2 is not None:
kappainflim = Slimits_S1S2(q, chi1, chi2)[1]
kappainfmin, kappainfmax = -kappainflim, kappainflim
print(kappainflim)
elif chieff is not None and q is not None and chi1 is not None and chi2 is not None:
kappainfmin, kappainfmax = kappainfresonances(chieff, q, chi1, chi2)
# Check precondition
kappainflim = Slimits_S1S2(q, chi1, chi2)[1]
kappainfmin_cond, kappainfmax_cond = -kappainflim, kappainflim
if (kappainfmin > kappainfmin_cond).all() and (kappainfmax < kappainfmax_cond).all():
pass
else:
if enforce:
raise ValueError("Input values are not compatible [kappainflimits].")
else:
warnings.warn("Input values are not compatible [kappainflimits].", Warning)
else:
raise TypeError("Provide either (q,chi1,chi2) or (chieff,q,chi1,chi2).")
return np.stack([kappainfmin, kappainfmax])
def chiefflimits_definition(q, chi1, chi2):
"""
Limits on the effective spin based only on the definition chieff = (1+q)S1.L + (1+1/q)S2.L.
Call
----
chieffmin,chieffmax = chiefflimits_definition(q,chi1,chi2)
Parameters
----------
q: float
Mass ratio: 0<=q<=1.
chi1: float
Dimensionless spin of the primary (heavier) black hole: 0<=chi1<=1.
chi2: float
Dimensionless spin of the secondary (lighter) black hole: 0<=chi2<=1.
Returns
-------
chieffmin: float
Minimum value of the effective spin chieff.
chieffmax: float
Maximum value of the effective spin chieff.
"""
q = np.atleast_1d(q)
S1, S2 = spinmags(q, chi1, chi2)
chiefflim = (1+q)*S1 + (1+1/q)*S2
return np.stack([-chiefflim, chiefflim])
def chieffdiscriminant_coefficients(kappa, u, q, chi1, chi2):
"""
Coefficients of the sixth-degree equation in chieff that defines the spin-orbit resonances.
Call
----
coeff6,coeff5,coeff4,coeff3,coeff2,coeff1,coeff0 = chieffdiscriminant_coefficients(kappa,u,q,chi1,chi2)
Parameters
----------
kappa: float
Regularized angular momentum (J^2-L^2)/(2L).
u: float
Compactified separation 1/(2L).
q: float
Mass ratio: 0<=q<=1.
chi1: float
Dimensionless spin of the primary (heavier) black hole: 0<=chi1<=1.
chi2: float
Dimensionless spin of the secondary (lighter) black hole: 0<=chi2<=1.
Returns
-------
coeff6: float
Coefficient to the x^6 term in polynomial.
coeff5: float
Coefficient to the x^5 term in polynomial.
coeff4: float
Coefficient to the x^4 term in polynomial.
coeff3: float
Coefficient to the x^3 term in polynomial.
coeff2: float
Coefficient to the x^2 term in polynomial.
coeff1: float
Coefficient to the x^1 term in polynomial.
coeff0: float
Coefficient to the x^0 term in polynomial.
"""
kappa = np.atleast_1d(kappa)
u = np.atleast_1d(u)
q = np.atleast_1d(q)
S1, S2 = spinmags(q, chi1, chi2)
# Machine generated with polycoefficients.nb
coeff6 = 256 * q**6 * u**2
# Machine generated with polycoefficients.nb
coeff5 = 128 * q**5 * (1 + q) * u * (1 + (q + (8 * q * S1**2 * u**2 +
(-4 * u * (S1**2 * u + (-2 * S2**2 * u + kappa)) + -4 * q * u *
(S2**2 * u + kappa)))))
# Machine generated with polycoefficients.nb
coeff4 = 16 * q**4 * ((1 + q))**2 * (1 + (-32 * S1**2 * u**2 + (8 * u
* (12 * S2**4 * u**3 + (-2 * kappa + (2 * u * ((-1 * S1**2 * u +
kappa))**2 + S2**2 * u * (1 + -12 * u * (S1**2 * u + kappa))))) + (q
* (-2 + (-96 * S1**4 * u**4 + (8 * S1**2 * u**2 * (7 + 4 * u * (5 *
S2**2 * u + kappa)) + 8 * u * (-12 * S2**4 * u**3 + (2 * kappa * (-3
+ 4 * u * kappa) + S2**2 * u * (7 + 4 * u * kappa)))))) + q**2 * (1 +
8 * u * (-2 * kappa + u * (-4 * S2**2 + (12 * S1**4 * u**2 + (2 *
((-1 * S2**2 * u + kappa))**2 + S1**2 * (1 + -12 * u * (S2**2 * u +
kappa)))))))))))
# Machine generated with polycoefficients.nb
coeff3 = 32 * q**3 * ((1 + q))**3 * (16 * (-1 + q) * q * (-1 + 2 * q)
* S1**6 * u**5 + (16 * (-2 + q) * (-1 + q) * S2**6 * u**5 + (4 *
S2**4 * u**3 * (-5 + (20 * q + (-14 * q**2 + (q**3 + -4 * (3 + q *
(-5 + 3 * q)) * u * kappa)))) + ((1 + q) * kappa * (-1 * ((-1 +
q))**2 + (4 * (1 + q * (8 + q)) * u * kappa + -16 * q * u**2 *
kappa**2)) + (S2**2 * u * (-1 * ((-1 + q))**2 * (3 + 5 * q) + (-4 * q
* (19 + q * (-5 + 2 * q)) * u * kappa + 16 * (1 + q * (-1 + 3 * q)) *
u**2 * kappa**2)) + (-4 * S1**4 * u**3 * (-1 + (-4 * S2**2 * u**2 + q
* (14 + (5 * (-4 + q) * q + (8 * S2**2 * u**2 + (4 * q * (-4 + 3 * q)
* S2**2 * u**2 + 4 * (3 + q * (-5 + 3 * q)) * u * kappa)))))) + S1**2
* u * (-5 + (-8 * u * (6 * S2**4 * u**3 + (kappa + 2 * S2**2 * u * (1
+ 2 * u * kappa))) + (q * (7 + (64 * S2**4 * u**4 + (8 * S2**2 * u**2
* (1 + 6 * u * kappa) + 4 * u * kappa * (5 + 12 * u * kappa)))) +
(q**3 * (-3 + 16 * u**2 * (S2**4 * u**2 + (kappa**2 + -1 * S2**2 * (1
+ 2 * u * kappa)))) + q**2 * (1 + -4 * u * (8 * S2**4 * u**3 + (kappa
* (19 + 4 * u * kappa) + -2 * S2**2 * u * (1 + 6 * u *
kappa))))))))))))))
# Machine generated with polycoefficients.nb
coeff2 = 16 * q**2 * ((1 + q))**4 * (16 * ((-1 + q))**2 * q**2 *
S1**8 * u**6 + (16 * ((-1 + q))**2 * S2**8 * u**6 + (kappa**2 * (((-1
+ q))**2 * (1 + q * (4 + q)) + (-32 * q * (1 + q * (3 + q)) * u *
kappa + 16 * q**2 * u**2 * kappa**2)) + (S2**4 * u**2 * (((-1 +
q))**2 * (-23 + (-40 + q) * q) + (16 * (5 + -2 * q * (9 + q * (-8 + 3
* q))) * u * kappa + 16 * (1 + 6 * (-1 + q) * q) * u**2 * kappa**2))
+ (S2**2 * (-1 * ((-1 + q))**4 + (-2 * ((-1 + q))**2 * (-7 + (-18 +
q) * q) * u * kappa + (8 * (-1 + q * (11 + 2 * q * (1 + 6 * q))) *
u**2 * kappa**2 + 32 * (1 + -2 * q) * q * u**3 * kappa**3))) + (8 *
(-1 + q) * S2**6 * u**4 * (11 + (4 * u * kappa + 2 * q * (-9 + (2 * q
+ -4 * u * kappa)))) + (-8 * (-1 + q) * q * S1**6 * u**4 * (4 + (-4 *
S2**2 * u**2 + q * (-18 + (-8 * u * kappa + q * (11 + 4 * u * (S2**2
* u + kappa)))))) + (S1**4 * u**2 * (((1 + -4 * S2**2 * u**2))**2 +
(q**4 * (-23 + 16 * u * (S2**4 * u**3 + (-2 * S2**2 * u * (-2 + u *
kappa) + kappa * (5 + u * kappa)))) + (2 * q**3 * (3 + 8 * u * (2 *
S2**4 * u**3 + (-6 * kappa * (3 + u * kappa) + S2**2 * u * (-11 + 4 *
u * kappa)))) + (q**2 * (58 + -16 * u * (6 * S2**4 * u**3 + (-2 *
kappa * (8 + 3 * u * kappa) + S2**2 * u * (-5 + 8 * u * kappa)))) + q
* (-42 + 8 * u * (-12 * kappa + S2**2 * u * (5 + 4 * u * (S2**2 * u +
3 * kappa)))))))) + S1**2 * (-1 + (4 * q**3 * (1 + (-8 * S2**6 * u**6
+ (2 * S2**4 * u**4 * (5 + 12 * u * kappa) + (-1 * S2**2 * u**2 * (23
+ 8 * u * kappa * (4 + 3 * u * kappa)) + 2 * u * kappa * (1 + u *
kappa * (11 + 4 * u * kappa)))))) + (q**4 * (-1 + 2 * u * (-4 * S2**4
* u**3 + (kappa * (7 + -4 * u * kappa) + S2**2 * u * (11 + 8 * u *
kappa)))) + (2 * q**2 * (-3 + 2 * u * (8 * S2**6 * u**5 + (4 * S2**4
* u**3 * (5 + -8 * u * kappa) + (kappa * (-15 + 4 * u * kappa * (1 +
-4 * u * kappa)) + 5 * S2**2 * u * (7 + 8 * u * kappa * (2 + u *
kappa)))))) + (4 * q * (1 + u * (8 * S2**6 * u**5 + (4 * S2**4 * u**3
* (-11 + 4 * u * kappa) + (2 * kappa * (5 + 12 * u * kappa) + -1 *
S2**2 * u * (23 + 8 * u * kappa * (4 + 3 * u * kappa)))))) + 2 * u *
(-1 * kappa + S2**2 * u * (11 + 8 * u * (kappa + -2 * S2**2 * u * (-2
+ u * (S2**2 * u + kappa))))))))))))))))))
# Machine generated with polycoefficients.nb
coeff1 = -32 * q * ((1 + q))**2 * (4 * ((-1 + q))**2 * q**2 * ((1 +
q))**3 * (-5 + 8 * q) * S1**8 * u**5 + (-1 * (-1 + q) * q * ((1 +
q))**3 * S1**6 * u**3 * (-1 + (26 * q + (-13 * q**2 + (-12 * q**3 +
(4 * (-1 + q) * (1 + 3 * q) * (-1 + 4 * q) * S2**2 * u**2 + 4 * q *
(20 + q * (-29 + 12 * q)) * u * kappa))))) + ((-1 * (1 + q) * S2**2 *
u + (kappa + q * kappa)) * (16 * ((-1 + q))**3 * ((1 + q))**2 * S2**6
* u**4 + (q**2 * ((1 + q))**2 * kappa**2 * (((-1 + q))**2 + -16 * q *
u * kappa) + (-1 * (-1 + q) * ((1 + q))**2 * S2**2 * (((-1 + q))**3 +
(2 * (-10 + q) * (-1 + q) * q * u * kappa + -48 * q**2 * u**2 *
kappa**2)) + ((-1 + q**2))**2 * S2**4 * u**2 * (-8 + q * (-20 + (q +
-48 * u * kappa)))))) + (-1 * (1 + q) * ((-1 * (1 + q) * S2**2 * u +
(kappa + q * kappa)))**2 * (4 * (-4 + q) * ((-1 + q))**2 * S2**4 *
u**3 + (q * kappa * (-1 * ((-1 + q))**2 + 4 * q * (5 + q) * u *
kappa) + -1 * (-1 + q) * S2**2 * u * (-4 + q * (-1 + (4 * u * kappa +
q * (5 + 8 * u * kappa)))))) + (((1 + q))**3 * S1**2 * (4 * (-4 + q)
* ((-1 + q))**2 * (3 + q) * S2**6 * u**5 + ((1 + q) * S2**2 * u * (-5
* ((-1 + q))**4 + (-2 * ((-1 + q))**2 * (4 + q * (-7 + 4 * q)) * u *
kappa + 12 * q * (1 + q**2) * u**2 * kappa**2)) + (q * kappa * (-1 *
((-1 + q))**4 + (((-1 + q))**2 * (-3 + q * (23 + 4 * q)) * u * kappa
+ -4 * q * (-20 + q * (3 + q)) * u**2 * kappa**2)) + (-1 + q) * S2**4
* u**3 * (32 * (1 + u * kappa) + q * (-53 + (-56 * u * kappa + q *
(50 + q * (-33 + (4 * q + -12 * u * kappa))))))))) + -1 * ((1 +
q))**2 * S1**4 * u * (-4 * ((-1 + q**2))**2 * (4 + (q + 4 * q**2)) *
S2**4 * u**4 + (q * (1 + q) * (-1 * ((-1 + q))**4 + (((-1 + q))**2 *
(-3 + q * (49 + 16 * q)) * u * kappa + 4 * q * (30 + q * (-39 + (19 +
-4 * q) * q)) * u**2 * kappa**2)) + (-1 + q**2) * S2**2 * u**2 * (4 +
q * (-3 * (11 + 4 * u * kappa) + q * (50 + q * (-53 + (32 * q + 8 *
(-7 + 4 * q) * u * kappa))))))))))))
# Machine generated with polycoefficients.nb
coeff0 = -16 * ((1 + q))**4 * (16 * ((-1 + q))**3 * q**3 * ((1 +
q))**2 * S1**10 * u**6 + ((-1 + q) * q * ((1 + q))**2 * S1**6 * u**2
* ((-1 + q) * (((-1 + q))**2 * q + (-4 * (-5 + q * (27 + q * (-3 + 8
* q))) * S2**2 * u**2 + 16 * (-1 + q) * (3 + q * (6 + q)) * S2**4 *
u**4)) + (-4 * (-1 + q) * q * u * (-1 + (15 * q + (4 * q**2 + 8 * (6
+ (-1 + q) * q) * S2**2 * u**2))) * kappa + 16 * q**2 * (10 + (-8 +
q) * q) * u**2 * kappa**2)) + (-1 * S1**4 * u * (16 * ((-1 + q))**3 *
((1 + q))**2 * (1 + 3 * q * (2 + q)) * S2**6 * u**5 + (-2 * ((-1 +
q**2))**2 * S2**4 * u**3 * (4 + (q * (6 + q * (61 + (6 * q + 4 *
q**2))) + 72 * (q + q**3) * u * kappa)) + (2 * kappa * (((-1 + q))**4
* q**2 * ((1 + q))**2 + (-1 * (-3 + (30 * q + 4 * q**2)) * ((q + -1 *
q**3))**2 * u * kappa + -8 * q**3 * ((1 + q))**2 * (10 + 3 * (-4 + q)
* q) * u**2 * kappa**2)) + (-1 + q) * ((1 + q))**2 * S2**2 * u *
(((-1 + q))**3 * (-1 + 2 * q * (12 + 5 * q)) + (4 * (-1 + q) * q *
(15 + q * (-55 + 2 * q * (9 + 2 * q))) * u * kappa + 144 * q**2 * (2
+ (-2 + q) * q) * u**2 * kappa**2))))) + (((1 + q))**2 * S1**2 * (16
* ((-1 + q))**3 * (2 + 3 * q) * S2**8 * u**6 + (4 * ((-1 + q))**2 *
S2**6 * u**4 * (-8 + (3 * q + (-27 * q**2 + (5 * q**3 + 8 * (-1 + (q
+ -6 * q**2)) * u * kappa)))) + (q**2 * kappa**2 * (((-1 + q))**4 +
(-4 * ((-1 + q))**2 * (-1 + 5 * q) * u * kappa + 16 * q * (-5 + 3 *
q) * u**2 * kappa**2)) + ((-1 + q) * S2**4 * u**2 * (((-1 + q))**3 *
(-10 + (-24 + q) * q) + (-4 * (-1 + q) * (4 + q * (18 + 5 * q * (-11
+ 3 * q))) * u * kappa + 144 * q * (1 + 2 * (-1 + q) * q) * u**2 *
kappa**2)) + S2**2 * (-1 * ((-1 + q))**6 + (-2 * ((-1 + q))**4 * (1 +
(-12 + q) * q) * u * kappa + (4 * ((-1 + q))**2 * q * (15 + q * (-29
+ 15 * q)) * u**2 * kappa**2 + -32 * q**2 * (6 + q * (-11 + 6 * q)) *
u**3 * kappa**3))))))) + (-1 * ((-1 * S2**2 * u + kappa))**2 * (16 *
((-1 + q))**3 * ((1 + q))**2 * S2**6 * u**4 + (q**2 * ((1 + q))**2 *
kappa**2 * (((-1 + q))**2 + -16 * q * u * kappa) + (-1 * (-1 + q) *
((1 + q))**2 * S2**2 * (((-1 + q))**3 + (2 * (-10 + q) * (-1 + q) * q
* u * kappa + -48 * q**2 * u**2 * kappa**2)) + ((-1 + q**2))**2 *
S2**4 * u**2 * (-8 + q * (-20 + (q + -48 * u * kappa)))))) + -1 * ((q
+ -1 * q**3))**2 * S1**8 * u**4 * (1 + (-48 * S2**2 * u**2 + 4 * q *
(-5 + (4 * u * (S2**2 * u + -5 * kappa) + q * (-2 + 8 * u * (S2**2 *
u + kappa)))))))))))
return np.stack([coeff6, coeff5, coeff4, coeff3, coeff2, coeff1, coeff0])
def chieffresonances(J, r, q, chi1, chi2):
"""
Effective spin of the two spin-orbit resonances. The resonances minimizes and maximizes chieff for a given value of J. The minimum corresponds to either deltaphi=0 or deltaphi=pi, the maximum always corresponds to deltaphi=pi.
Call
----
chieffmin,chieffmax = chieffresonances(J,r,q,chi1,chi2)
Parameters
----------
J: float
Magnitude of the total angular momentum.
r: float
Binary separation.
q: float
Mass ratio: 0<=q<=1.
chi1: float
Dimensionless spin of the primary (heavier) black hole: 0<=chi1<=1.
chi2: float
Dimensionless spin of the secondary (lighter) black hole: 0<=chi2<=1.
Returns
-------
chieffmin: float
Minimum value of the effective spin chieff.
chieffmax: float
Maximum value of the effective spin chieff.
"""
# Altough there are 6 solutions in general, we know that only two can lie between Smin and Smax.
J = np.atleast_1d(J)
r = np.atleast_1d(r)
q = np.atleast_1d(q)
chi1 = np.atleast_1d(chi1)
chi2 = np.atleast_1d(chi2)
kappa = eval_kappa(J, r, q)
u = eval_u(r, q)
Smin, Smax = Slimits_LJS1S2(J, r, q, chi1, chi2)
chieffroots = wraproots(chieffdiscriminant_coefficients, kappa, u, q, chi1, chi2)
def _compute(Smin, Smax, J, r, chieffroots, q, chi1, chi2):
chieffroots = chieffroots[np.isfinite(chieffroots)]
Sroots = Satresonance(J=np.tile(J, chieffroots.shape), r=np.tile(r, chieffroots.shape), chieff=chieffroots, q=np.tile(q, chieffroots.shape), chi1=np.tile(chi1, chieffroots.shape), chi2=np.tile(chi2, chieffroots.shape))
chieffres = chieffroots[np.logical_and(Sroots > Smin, Sroots < Smax)]
assert len(chieffres) <= 2, "I found more than two resonances, this should not be possible."
# If you didn't find enough solutions, append nans
chieffres = np.concatenate([chieffres, np.repeat(np.nan, 2-len(chieffres))])
return chieffres
chieffmin, chieffmax = np.array(list(map(_compute, Smin, Smax, J, r, chieffroots, q, chi1, chi2))).T
return np.stack([chieffmin, chieffmax])
def anglesresonances(J=None, r=None, chieff=None, q=None, chi1=None, chi2=None):
"""
Compute the values of the angles corresponding to the two spin-orbit resonances. Provide either J or chieff, not both.
Call
----
theta1atmin,theta2atmin,deltaphiatmin,theta1atmax,theta2atmax,deltaphiatmax = anglesresonances(J=None,r=None,chieff=None,q=None,chi1=None,chi2=None)
Parameters
----------
J: float, optional (default: None)
Magnitude of the total angular momentum.
r: float, optional (default: None)
Binary separation.
chieff: float, optional (default: None)
Effective spin.
q: float, optional (default: None)
Mass ratio: 0<=q<=1.
chi1: float, optional (default: None)
Dimensionless spin of the primary (heavier) black hole: 0<=chi1<=1.
chi2: float, optional (default: None)
Dimensionless spin of the secondary (lighter) black hole: 0<=chi2<=1.
Returns
-------
theta1atmin: float
Value of the angle theta1 at the resonance that minimizes either J or chieff, depending on the input.
theta2atmin: float
Value of the angle theta2 at the resonance that minimizes either J or chieff, depending on the input.
deltaphiatmin: float
Value of the angle deltaphi at the resonance that minimizes either J or chieff, depending on the input.
theta1atmax: float
Value of the angle theta1 at the resonance that maximizes either J or chieff, depending on the input.
theta2atmax: float
Value of the angle theta2 at the resonance that maximizes either J or chieff, depending on the input.
deltaphiatmax: float
Value of the angle deltaphi at the resonance that maximizes either J or chieff, depending on the input.
"""
q = np.atleast_1d(q)
if J is None and r is not None and chieff is not None and q is not None and chi1 is not None and chi2 is not None:
Jmin, Jmax = Jresonances(r, chieff, q, chi1, chi2)
Satmin = Satresonance(J=Jmin, r=r, chieff=chieff, q=q, chi1=chi1, chi2=chi2)
theta1atmin = eval_theta1(Satmin, Jmin, r, chieff, q, chi1, chi2)
theta2atmin = eval_theta2(Satmin, Jmin, r, chieff, q, chi1, chi2)
deltaphiatmin = np.tile(np.pi, q.shape)
Satmax = Satresonance(J=Jmax, r=r, chieff=chieff, q=q, chi1=chi1, chi2=chi2)
theta1atmax = eval_theta1(Satmax, Jmax, r, chieff, q, chi1, chi2)
theta2atmax = eval_theta2(Satmax, Jmax, r, chieff, q, chi1, chi2)
deltaphiatmax = np.tile(0, q.shape)
elif J is not None and r is not None and chieff is None and q is not None and chi1 is not None and chi2 is not None:
chieffmin, chieffmax = chieffresonances(J, r, q, chi1, chi2)
Satmin = Satresonance(J=J, r=r, chieff=chieffmin, q=q, chi1=chi1, chi2=chi2)
theta1atmin = eval_theta1(Satmin, J, r, chieffmin, q, chi1, chi2)
theta2atmin = eval_theta2(Satmin, J, r, chieffmin, q, chi1, chi2)
# See Fig 5 in arxiv:1506.03492
J = np.atleast_1d(J)
S1, S2 = spinmags(q, chi1, chi2)
L = eval_L(r, q)
deltaphiatmin = np.where(J > np.abs(L-S1-S2), 0, np.pi)
Satmax = Satresonance(J=J, r=r, chieff=chieffmax, q=q, chi1=chi1, chi2=chi2)
theta1atmax = eval_theta1(Satmax, J, r, chieffmax, q, chi1, chi2)
theta2atmax = eval_theta2(Satmax, J, r, chieffmax, q, chi1, chi2)
deltaphiatmax = np.tile(np.pi, q.shape)
else:
raise TypeError("Provide either (r,chieff,q,chi1,chi2) or (J,r,q,chi1,chi2).")
return np.stack([theta1atmin, theta2atmin, deltaphiatmin, theta1atmax, theta2atmax, deltaphiatmax])
def chiefflimits(J=None, r=None, q=None, chi1=None, chi2=None, enforce=False):
"""
Limits on the projected effective spin. The contraints considered depend on the inputs provided.
- If q, chi1, and chi2 are provided, enforce chieff = (1+q)S1.L + (1+1/q)S2.L.
- If J, r, q, chi1, and chi2 are provided, the limits are given by the two spin-orbit resonances.
The boolean flag enforce allows raising an error in case the inputs are not compatible.
Call
----
chieffmin,chieffmax = chiefflimits(J=None,r=None,q=None,chi1=None,chi2=None,enforce=False)
Parameters
----------
J: float, optional (default: None)
Magnitude of the total angular momentum.
r: float, optional (default: None)
Binary separation.
q: float, optional (default: None)
Mass ratio: 0<=q<=1.
chi1: float, optional (default: None)
Dimensionless spin of the primary (heavier) black hole: 0<=chi1<=1.
chi2: float, optional (default: None)
Dimensionless spin of the secondary (lighter) black hole: 0<=chi2<=1.
enforce: boolean, optional (default: False)
If True raise errors, if False raise warnings.
Returns
-------
chieffmin: float
Minimum value of the effective spin chieff.
chieffmax: float
Maximum value of the effective spin chieff.
"""
if J is None and r is None and q is not None and chi1 is not None and chi2 is not None:
chieffmin, chieffmax = chiefflimits_definition(q, chi1, chi2)
elif J is not None and r is not None and q is not None and chi1 is not None and chi2 is not None:
chieffmin, chieffmax = chieffresonances(J, r, q, chi1, chi2)
# Check precondition
chieffmin_cond, chieffmax_cond = chiefflimits_definition(q, chi1, chi2)
if (chieffmin > chieffmin_cond).all() and (chieffmax < chieffmax_cond).all():
pass
else:
if enforce:
raise ValueError("Input values are not compatible [chiefflimits].")
else:
warnings.warn("Input values are not compatible [chiefflimits].", Warning)
else:
raise TypeError("Provide either (q,chi1,chi2) or (J,r,q,chi1,chi2).")
return np.stack([chieffmin, chieffmax])
def Slimits_S1S2(q, chi1, chi2):
"""
Limits on the total spin magnitude due to the vector relation S=S1+S2.
Call
----
Smin,Smax = Slimits_S1S2(q,chi1,chi2)
Parameters
----------
q: float
Mass ratio: 0<=q<=1.
chi1: float
Dimensionless spin of the primary (heavier) black hole: 0<=chi1<=1.
chi2: float
Dimensionless spin of the secondary (lighter) black hole: 0<=chi2<=1.
Returns
-------
Smin: float
Minimum value of the total spin S.
Smax: float
Maximum value of the total spin S.
"""
S1, S2 = spinmags(q, chi1, chi2)
Smin = np.abs(S1-S2)
Smax = S1+S2
return np.stack([Smin, Smax])
def Slimits_LJ(J, r, q):
"""
Limits on the total spin magnitude due to the vector relation S=J-L.
Call
----
Smin,Smax = Slimits_LJ(J,r,q)
Parameters
----------
J: float
Magnitude of the total angular momentum.
r: float
Binary separation.
q: float
Mass ratio: 0<=q<=1.
Returns
-------
Smin: float
Minimum value of the total spin S.
Smax: float
Maximum value of the total spin S.
"""
L = eval_L(r, q)
Smin = np.abs(J-L)
Smax = J+L
return
|
np.stack([Smin, Smax])
|
numpy.stack
|
"""
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from ..adapters import Adapter
from ..config import ConfigValidator, StringField
from ..representation import PoseEstimationPrediction
from ..utils import UnsupportedPackage
try:
from scipy.optimize import linear_sum_assignment
except ImportError as error:
linear_sum_assignment = UnsupportedPackage('scipy.optimize', error.msg)
class AssociativeEmbeddingAdapter(Adapter):
__provider__ = 'human_pose_estimation_ae'
prediction_types = (PoseEstimationPrediction, )
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update({
'heatmaps_out': StringField(
description="Name of output layer with keypoints heatmaps.",
),
'nms_heatmaps_out': StringField(
description="Name of output layer with keypoints heatmaps after NMS.",
),
'embeddings_out': StringField(
description="Name of output layer with associative embeddings.",
),
})
return parameters
@classmethod
def validate_config(cls, config, fetch_only=False, **kwargs):
return super().validate_config(
config, fetch_only=fetch_only, on_extra_argument=ConfigValidator.WARN_ON_EXTRA_ARGUMENT
)
def configure(self):
self.heatmaps = self.get_value_from_config('heatmaps_out')
self.nms_heatmaps = self.get_value_from_config('nms_heatmaps_out')
self.embeddings = self.get_value_from_config('embeddings_out')
if isinstance(linear_sum_assignment, UnsupportedPackage):
linear_sum_assignment.raise_error(self.__provider__)
self.decoder = AssociativeEmbeddingDecoder(
num_joints=17,
adjust=True,
refine=True,
dist_reweight=True,
delta=0.0,
max_num_people=30,
detection_threshold=0.1,
tag_threshold=1,
use_detection_val=True,
ignore_too_much=False)
self.outputs_verified = False
def select_output_blob(self, outputs):
self.heatmaps = self.check_output_name(self.heatmaps, outputs)
self.nms_heatmaps = self.check_output_name(self.nms_heatmaps, outputs)
self.embeddings = self.check_output_name(self.embeddings, outputs)
self.outputs_verified = True
def process(self, raw, identifiers, frame_meta):
result = []
raw_outputs = self._extract_predictions(raw, frame_meta)
if not self.outputs_verified:
self.select_output_blob(raw_outputs)
raw_output = zip(identifiers, raw_outputs[self.heatmaps][None],
raw_outputs[self.nms_heatmaps][None],
raw_outputs[self.embeddings][None], frame_meta)
for identifier, heatmap, nms_heatmap, embedding, meta in raw_output:
poses, scores = self.decoder(heatmap, embedding, nms_heatmaps=nms_heatmap)
if len(scores) == 0:
result.append(PoseEstimationPrediction(
identifier,
np.empty((0, 17), dtype=float),
np.empty((0, 17), dtype=float),
np.empty((0, 17), dtype=float),
np.empty((0, ), dtype=float)
))
continue
poses = poses.astype(float)
scores = np.asarray(scores).astype(float)
scale_x = meta['scale_x']
scale_y = meta['scale_y']
poses[:, :, 0] /= scale_x / 2
poses[:, :, 1] /= scale_y / 2
point_scores = poses[:, :, 2]
result.append(PoseEstimationPrediction(
identifier,
poses[:, :, 0],
poses[:, :, 1],
point_scores,
scores))
return result
class Pose:
def __init__(self, num_joints, tag_size=1):
self.num_joints = num_joints
self.tag_size = tag_size
self.pose = np.zeros((num_joints, 2 + 1 + tag_size), dtype=np.float32)
self.pose_tag = np.zeros(tag_size, dtype=np.float32)
self.valid_points_num = 0
self.c = np.zeros(2, dtype=np.float32)
def add(self, idx, joint, tag):
self.pose[idx] = joint
self.c = self.c * self.valid_points_num + joint[:2]
self.pose_tag = (self.pose_tag * self.valid_points_num) + tag
self.valid_points_num += 1
self.c /= self.valid_points_num
self.pose_tag /= self.valid_points_num
@property
def tag(self):
if self.valid_points_num > 0:
return self.pose_tag
return None
@property
def center(self):
if self.valid_points_num > 0:
return self.c
return None
class AssociativeEmbeddingDecoder:
def __init__(self, num_joints, max_num_people, detection_threshold, use_detection_val,
ignore_too_much, tag_threshold,
adjust=True, refine=True, delta=0.0, joints_order=None,
dist_reweight=True):
self.num_joints = num_joints
self.max_num_people = max_num_people
self.detection_threshold = detection_threshold
self.tag_threshold = tag_threshold
self.use_detection_val = use_detection_val
self.ignore_too_much = ignore_too_much
if self.num_joints == 17 and joints_order is None:
self.joint_order = (0, 1, 2, 3, 4, 5, 6, 11, 12, 7, 8, 9, 10, 13, 14, 15, 16)
else:
self.joint_order = list(np.arange(self.num_joints))
self.do_adjust = adjust
self.do_refine = refine
self.dist_reweight = dist_reweight
self.delta = delta
def match(self, tag_k, loc_k, val_k):
return list(map(self._match_by_tag, zip(tag_k, loc_k, val_k)))
@staticmethod
def _max_match(scores):
r, c = linear_sum_assignment(scores)
tmp = np.stack((r, c), axis=1)
return tmp
def _match_by_tag(self, inp):
tag_k, loc_k, val_k = inp
embd_size = tag_k.shape[2]
all_joints = np.concatenate((loc_k, val_k[..., None], tag_k), -1)
poses = []
for idx in self.joint_order:
tags = tag_k[idx]
joints = all_joints[idx]
mask = joints[:, 2] > self.detection_threshold
tags = tags[mask]
joints = joints[mask]
if len(poses) == 0:
for tag, joint in zip(tags, joints):
pose = Pose(self.num_joints, embd_size)
pose.add(idx, joint, tag)
poses.append(pose)
continue
if joints.shape[0] == 0 or (self.ignore_too_much and len(poses) == self.max_num_people):
continue
poses_tags = np.stack([p.tag for p in poses], axis=0)
diff = tags[:, None] - poses_tags[None, :]
diff_normed = np.linalg.norm(diff, ord=2, axis=2)
diff_saved = np.copy(diff_normed)
if self.dist_reweight:
# Reweight cost matrix to prefer nearby points among all that are close enough in a tag space.
centers = np.stack([p.center for p in poses], axis=0)[None]
dists = np.linalg.norm(joints[:, :2][:, None, :] - centers, ord=2, axis=2)
close_tags_masks = diff_normed < self.tag_threshold
min_dists = np.min(dists, axis=0, keepdims=True)
dists /= min_dists + 1e-10
diff_normed[close_tags_masks] *= dists[close_tags_masks]
if self.use_detection_val:
diff_normed = np.round(diff_normed) * 100 - joints[:, 2:3]
num_added = diff.shape[0]
num_grouped = diff.shape[1]
if num_added > num_grouped:
diff_normed = np.pad(diff_normed, ((0, 0), (0, num_added - num_grouped)),
mode='constant', constant_values=1e10)
pairs = self._max_match(diff_normed)
for row, col in pairs:
if row < num_added and col < num_grouped and diff_saved[row][col] < self.tag_threshold:
poses[col].add(idx, joints[row], tags[row])
else:
pose = Pose(self.num_joints, embd_size)
pose.add(idx, joints[row], tags[row])
poses.append(pose)
ans = np.asarray([p.pose for p in poses], dtype=np.float32).reshape(-1, self.num_joints, 2 + 1 + embd_size)
tags = np.asarray([p.tag for p in poses], dtype=np.float32).reshape(-1, embd_size)
return ans, tags
def top_k(self, heatmaps, tags):
N, K, H, W = heatmaps.shape
heatmaps = heatmaps.reshape(N, K, -1)
ind = heatmaps.argpartition(-self.max_num_people, axis=2)[:, :, -self.max_num_people:]
val_k = np.take_along_axis(heatmaps, ind, axis=2)
subind = np.argsort(-val_k, axis=2)
ind = np.take_along_axis(ind, subind, axis=2)
val_k = np.take_along_axis(val_k, subind, axis=2)
tags = tags.reshape(N, K, W * H, -1)
tag_k = [
|
np.take_along_axis(tags[..., i], ind, axis=2)
|
numpy.take_along_axis
|
import numpy as np
from sklearn import linear_model
import matplotlib.pyplot as plt
survival_sheet_loc_train = '/images/brainMRI/brats2018/train/survival_data.csv'
survival_sheet_loc_val = '/images/brainMRI/brats2018/validation/survival_evaluation.csv'
survival_sheet_loc_test = '/images/brainMRI/brats2018/test/survival_evaluation.csv'
########################################################################################################################
########################################################################################################################
def main():
survival_data = np.loadtxt(survival_sheet_loc_train, delimiter=',', skiprows=1,
dtype={'names': ('name', 'age', 'survival_days', 'resection'),
'formats': ('U20', 'f4', 'i4', 'U10')
})
gtr_data = []
for i in range(survival_data.__len__()):
name = survival_data[i]['name']
age = survival_data[i]['age']
survival = survival_data[i]['survival_days']
if survival_data[i]['resection'] == 'GTR':
gtr_data.append(np.array([name, age, survival]))
t1 =
|
np.asarray(gtr_data)
|
numpy.asarray
|
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This submodule contains the discrete-variable quantum operations that do
not depend on any parameters.
"""
# pylint:disable=abstract-method,arguments-differ,protected-access
import cmath
import numpy as np
from scipy.linalg import block_diag
import pennylane as qml
from pennylane.operation import AnyWires, DiagonalOperation, Observable, Operation
from pennylane.utils import pauli_eigs
from pennylane.wires import Wires
INV_SQRT2 = 1 / qml.math.sqrt(2)
class Hadamard(Observable, Operation):
r"""Hadamard(wires)
The Hadamard operator
.. math:: H = \frac{1}{\sqrt{2}}\begin{bmatrix} 1 & 1\\ 1 & -1\end{bmatrix}.
**Details:**
* Number of wires: 1
* Number of parameters: 0
Args:
wires (Sequence[int] or int): the wire the operation acts on
"""
num_params = 0
num_wires = 1
par_domain = None
is_self_inverse = True
eigvals = pauli_eigs(1)
matrix = np.array([[INV_SQRT2, INV_SQRT2], [INV_SQRT2, -INV_SQRT2]])
def label(self, decimals=None, base_label=None):
return base_label or "H"
@classmethod
def _matrix(cls, *params):
return cls.matrix
@classmethod
def _eigvals(cls, *params):
return cls.eigvals
def diagonalizing_gates(self):
r"""Rotates the specified wires such that they
are in the eigenbasis of the Hadamard operator.
For the Hadamard operator,
.. math:: H = U^\dagger Z U
where :math:`U = R_y(-\pi/4)`.
Returns:
list(~.Operation): A list of gates that diagonalize Hadamard in
the computational basis.
"""
return [qml.RY(-np.pi / 4, wires=self.wires)]
@staticmethod
def decomposition(wires):
decomp_ops = [
qml.PhaseShift(np.pi / 2, wires=wires),
qml.RX(np.pi / 2, wires=wires),
qml.PhaseShift(np.pi / 2, wires=wires),
]
return decomp_ops
def adjoint(self):
return Hadamard(wires=self.wires)
def single_qubit_rot_angles(self):
# H = RZ(\pi) RY(\pi/2) RZ(0)
return [np.pi, np.pi / 2, 0.0]
class PauliX(Observable, Operation):
r"""PauliX(wires)
The Pauli X operator
.. math:: \sigma_x = \begin{bmatrix} 0 & 1 \\ 1 & 0\end{bmatrix}.
**Details:**
* Number of wires: 1
* Number of parameters: 0
Args:
wires (Sequence[int] or int): the wire the operation acts on
"""
num_params = 0
num_wires = 1
par_domain = None
is_self_inverse = True
basis = "X"
eigvals = pauli_eigs(1)
matrix = np.array([[0, 1], [1, 0]])
def label(self, decimals=None, base_label=None):
return base_label or "X"
@classmethod
def _matrix(cls, *params):
return cls.matrix
@classmethod
def _eigvals(cls, *params):
return cls.eigvals
def diagonalizing_gates(self):
r"""Rotates the specified wires such that they
are in the eigenbasis of the Pauli-X operator.
For the Pauli-X operator,
.. math:: X = H^\dagger Z H.
Returns:
list(qml.Operation): A list of gates that diagonalize PauliY in the
computational basis.
"""
return [Hadamard(wires=self.wires)]
@staticmethod
def decomposition(wires):
decomp_ops = [
qml.PhaseShift(np.pi / 2, wires=wires),
qml.RX(np.pi, wires=wires),
qml.PhaseShift(np.pi / 2, wires=wires),
]
return decomp_ops
def adjoint(self):
return PauliX(wires=self.wires)
def _controlled(self, wire):
CNOT(wires=Wires(wire) + self.wires)
def single_qubit_rot_angles(self):
# X = RZ(-\pi/2) RY(\pi) RZ(\pi/2)
return [np.pi / 2, np.pi, -np.pi / 2]
class PauliY(Observable, Operation):
r"""PauliY(wires)
The Pauli Y operator
.. math:: \sigma_y = \begin{bmatrix} 0 & -i \\ i & 0\end{bmatrix}.
**Details:**
* Number of wires: 1
* Number of parameters: 0
Args:
wires (Sequence[int] or int): the wire the operation acts on
"""
num_params = 0
num_wires = 1
par_domain = None
is_self_inverse = True
basis = "Y"
eigvals = pauli_eigs(1)
matrix = np.array([[0, -1j], [1j, 0]])
def label(self, decimals=None, base_label=None):
return base_label or "Y"
@classmethod
def _matrix(cls, *params):
return cls.matrix
@classmethod
def _eigvals(cls, *params):
return cls.eigvals
def diagonalizing_gates(self):
r"""Rotates the specified wires such that they
are in the eigenbasis of PauliY.
For the Pauli-Y observable,
.. math:: Y = U^\dagger Z U
where :math:`U=HSZ`.
Returns:
list(~.Operation): A list of gates that diagonalize PauliY in the
computational basis.
"""
return [
PauliZ(wires=self.wires),
S(wires=self.wires),
Hadamard(wires=self.wires),
]
@staticmethod
def decomposition(wires):
decomp_ops = [
qml.PhaseShift(np.pi / 2, wires=wires),
qml.RY(np.pi, wires=wires),
qml.PhaseShift(np.pi / 2, wires=wires),
]
return decomp_ops
def adjoint(self):
return PauliY(wires=self.wires)
def _controlled(self, wire):
CY(wires=Wires(wire) + self.wires)
def single_qubit_rot_angles(self):
# Y = RZ(0) RY(\pi) RZ(0)
return [0.0, np.pi, 0.0]
class PauliZ(Observable, DiagonalOperation):
r"""PauliZ(wires)
The Pauli Z operator
.. math:: \sigma_z = \begin{bmatrix} 1 & 0 \\ 0 & -1\end{bmatrix}.
**Details:**
* Number of wires: 1
* Number of parameters: 0
Args:
wires (Sequence[int] or int): the wire the operation acts on
"""
num_params = 0
num_wires = 1
par_domain = None
is_self_inverse = True
basis = "Z"
eigvals = pauli_eigs(1)
matrix = np.array([[1, 0], [0, -1]])
def label(self, decimals=None, base_label=None):
return base_label or "Z"
@classmethod
def _matrix(cls, *params):
return cls.matrix
@classmethod
def _eigvals(cls, *params):
return cls.eigvals
def diagonalizing_gates(self):
return []
@staticmethod
def decomposition(wires):
decomp_ops = [qml.PhaseShift(np.pi, wires=wires)]
return decomp_ops
def adjoint(self):
return PauliZ(wires=self.wires)
def _controlled(self, wire):
CZ(wires=Wires(wire) + self.wires)
def single_qubit_rot_angles(self):
# Z = RZ(\pi) RY(0) RZ(0)
return [np.pi, 0.0, 0.0]
class S(DiagonalOperation):
r"""S(wires)
The single-qubit phase gate
.. math:: S = \begin{bmatrix}
1 & 0 \\
0 & i
\end{bmatrix}.
**Details:**
* Number of wires: 1
* Number of parameters: 0
Args:
wires (Sequence[int] or int): the wire the operation acts on
"""
num_params = 0
num_wires = 1
par_domain = None
basis = "Z"
op_eigvals = np.array([1, 1j])
op_matrix = np.array([[1, 0], [0, 1j]])
@classmethod
def _matrix(cls, *params):
return cls.op_matrix
@classmethod
def _eigvals(cls, *params):
return cls.op_eigvals
@staticmethod
def decomposition(wires):
decomp_ops = [qml.PhaseShift(np.pi / 2, wires=wires)]
return decomp_ops
def adjoint(self):
return S(wires=self.wires).inv()
def single_qubit_rot_angles(self):
# S = RZ(\pi/2) RY(0) RZ(0)
return [np.pi / 2, 0.0, 0.0]
class T(DiagonalOperation):
r"""T(wires)
The single-qubit T gate
.. math:: T = \begin{bmatrix}
1 & 0 \\
0 & e^{\frac{i\pi}{4}}
\end{bmatrix}.
**Details:**
* Number of wires: 1
* Number of parameters: 0
Args:
wires (Sequence[int] or int): the wire the operation acts on
"""
num_params = 0
num_wires = 1
par_domain = None
basis = "Z"
op_matrix = np.array([[1, 0], [0, cmath.exp(1j * np.pi / 4)]])
op_eigvals = np.array([1, cmath.exp(1j * np.pi / 4)])
@classmethod
def _matrix(cls, *params):
return cls.op_matrix
@classmethod
def _eigvals(cls, *params):
return cls.op_eigvals
@staticmethod
def decomposition(wires):
decomp_ops = [qml.PhaseShift(np.pi / 4, wires=wires)]
return decomp_ops
def adjoint(self):
return T(wires=self.wires).inv()
def single_qubit_rot_angles(self):
# T = RZ(\pi/4) RY(0) RZ(0)
return [np.pi / 4, 0.0, 0.0]
class SX(Operation):
r"""SX(wires)
The single-qubit Square-Root X operator.
.. math:: SX = \sqrt{X} = \frac{1}{2} \begin{bmatrix}
1+i & 1-i \\
1-i & 1+i \\
\end{bmatrix}.
**Details:**
* Number of wires: 1
* Number of parameters: 0
Args:
wires (Sequence[int] or int): the wire the operation acts on
"""
num_params = 0
num_wires = 1
par_domain = None
basis = "X"
op_matrix = 0.5 * np.array([[1 + 1j, 1 - 1j], [1 - 1j, 1 + 1j]])
op_eigvals = np.array([1, 1j])
@classmethod
def _matrix(cls, *params):
return cls.op_matrix
@classmethod
def _eigvals(cls, *params):
return cls.op_eigvals
@staticmethod
def decomposition(wires):
decomp_ops = [
qml.RZ(np.pi / 2, wires=wires),
qml.RY(np.pi / 2, wires=wires),
qml.RZ(-np.pi, wires=wires),
qml.PhaseShift(np.pi / 2, wires=wires),
]
return decomp_ops
def adjoint(self):
return SX(wires=self.wires).inv()
def single_qubit_rot_angles(self):
# SX = RZ(-\pi/2) RY(\pi/2) RZ(\pi/2)
return [np.pi / 2, np.pi / 2, -np.pi / 2]
class CNOT(Operation):
r"""CNOT(wires)
The controlled-NOT operator
.. math:: CNOT = \begin{bmatrix}
1 & 0 & 0 & 0 \\
0 & 1 & 0 & 0\\
0 & 0 & 0 & 1\\
0 & 0 & 1 & 0
\end{bmatrix}.
.. note:: The first wire provided corresponds to the **control qubit**.
**Details:**
* Number of wires: 2
* Number of parameters: 0
Args:
wires (Sequence[int]): the wires the operation acts on
"""
num_params = 0
num_wires = 2
par_domain = None
is_self_inverse = True
basis = "X"
matrix = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]])
def label(self, decimals=None, base_label=None):
return base_label or "⊕"
@classmethod
def _matrix(cls, *params):
return CNOT.matrix
def adjoint(self):
return CNOT(wires=self.wires)
def _controlled(self, wire):
Toffoli(wires=Wires(wire) + self.wires)
@property
def control_wires(self):
return Wires(self.wires[0])
class CZ(DiagonalOperation):
r"""CZ(wires)
The controlled-Z operator
.. math:: CZ = \begin{bmatrix}
1 & 0 & 0 & 0 \\
0 & 1 & 0 & 0\\
0 & 0 & 1 & 0\\
0 & 0 & 0 & -1
\end{bmatrix}.
.. note:: The first wire provided corresponds to the **control qubit**.
**Details:**
* Number of wires: 2
* Number of parameters: 0
Args:
wires (Sequence[int]): the wires the operation acts on
"""
num_params = 0
num_wires = 2
par_domain = None
is_self_inverse = True
is_symmetric_over_all_wires = True
basis = "Z"
eigvals = np.array([1, 1, 1, -1])
matrix =
|
np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, -1]])
|
numpy.array
|
from abc import ABCMeta, abstractmethod
from sklearn.metrics import mean_squared_error
from sklearn.metrics.pairwise import pairwise_distances
from matplotlib.colors import ListedColormap
from ordinal_tsf.util import assert_sum_one, all_satisfy, frame_ts, is_univariate, frame_generator, frame_generator_list, gmm_marginal_pdf
import pickle
import numpy as np
import seaborn as sns
from scipy.stats import norm, multivariate_normal
from scipy.spatial.distance import euclidean
from functools import partial, reduce
from sklearn.mixture import BayesianGaussianMixture, GaussianMixture
from sklearn.neighbors import KernelDensity
from sklearn.cluster import KMeans
from fastdtw import fastdtw
import copy
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.colors import LogNorm
class Dataset(object):
"""Handler for a time series dataset and its different representations."""
train_ts = None
val_ts = None
test_ts = None
def __init__(self, raw_ts, frame_length, p_train=0.7, p_val=0.15, p_test=0.15, preprocessing_steps=[]):
assert assert_sum_one([p_train, p_val, p_test]) \
and all_satisfy([p_train, p_val, p_test], lambda x: x > 0.), \
"Please make sure p_train, p_val, p_test are positive and sum up to 1."
assert raw_ts.ndim == 2 and raw_ts.shape[0], 'Please provide a univariate time series as input to Dataset.'
self.optional_params = {}
self.raw_ts = raw_ts
self.frame_length = frame_length
self.__raw_n_train = int(self.raw_ts.shape[0] * p_train)
self.__raw_n_val = self.__raw_n_train + int(self.raw_ts.shape[0] * p_val)
self.train_ts, self.val_ts, self.test_ts = self.raw_train_ts, self.raw_val_ts, self.raw_test_ts
for step in preprocessing_steps:
self.train_ts = step.apply(self.train_ts)
self.val_ts = step.apply(self.val_ts)
self.test_ts = step.apply(self.test_ts)
self.optional_params.update(step.param_dict)
if self.optional_params.get('frame_generator', False):
self.train_frames = partial(frame_generator, ts=self.train_ts, frame_length=frame_length)
self.val_frames = partial(frame_generator, ts=self.val_ts, frame_length=frame_length)
self.test_frames = partial(frame_generator, ts=self.test_ts, frame_length=frame_length)
else:
self.train_frames = frame_ts(self.train_ts, frame_length)
self.val_frames = frame_ts(self.val_ts, frame_length)
self.test_frames = frame_ts(self.test_ts, frame_length)
def save(self, fname):
tmp = [self.train_frames, self.val_frames, self.test_frames]
with open(fname, 'wb') as f:
self.train_frames, self.val_frames, self.test_frames = None, None, None
pickle.dump(self, f)
self.train_frames, self.val_frames, self.test_frames = tmp
@staticmethod
def load(fname):
with open(fname, 'r') as f:
dataset = pickle.load(f)
dataset.train_frames = frame_ts(dataset.train_ts, dataset.frame_length)
dataset.val_frames = frame_ts(dataset.val_ts, dataset.frame_length)
dataset.test_frames = frame_ts(dataset.test_ts, dataset.frame_length)
return dataset
@property
def raw_train_ts(self):
# type: (...) -> np.ndarray
return self.raw_ts[:self.__raw_n_train]
@property
def raw_val_ts(self):
# type: (...) -> np.ndarray
return self.raw_ts[self.__raw_n_train:self.__raw_n_val]
@property
def raw_test_ts(self):
# type: (...) -> np.ndarray
return self.raw_ts[self.__raw_n_val:]
def __str__(self):
props = reduce(lambda x,y:x+y, ['{}:{}\n'.format(k,v) for k,v in self.optional_params.items()])
return 'Dataset with properties:\n' + props
def get_default_fname(self, id):
"""Dataset's default name based on its own preprocessing pipeline."""
fname = id
if 'white_noise_level' in self.optional_params:
fname += '_sigma_{}'.format(self.optional_params['white_noise_level'])
if 'zero_mean_unit_var' in self.optional_params:
fname += '_standardised'
if 'is_ordinal' in self.optional_params:
fname += '_ordinal'
if 'is_attractor' in self.optional_params:
fname += '_attractor'
return fname
def apply_partial_preprocessing(self, mode, enabled_steps):
"""Queries a specific representation of the given dataset
Applies a pipeline of preprocessing steps to obtain a dataset representation."""
# type: (str, List[DatasetPreprocessingStep]) -> np.ndarray
assert mode in ['train', 'test', 'val'], "Mode must be one of [train, val, test]"
if mode == 'val':
ts = self.raw_val_ts.copy()
elif mode == 'test':
ts = self.raw_test_ts.copy()
else:
ts = self.raw_train_ts.copy()
for step in enabled_steps:
ts = step.apply(ts)
return ts
class TimeSeriesSetDataset(object):
"""This handler takes a list of time series and treats each of them as an individual subdataset"""
def __init__(self, ts_list, frame_length, p_train=0.7, p_val=0.15, p_test=0.15,
preprocessing_steps=[],
multichannel_preprocessing_steps = [],
frame_gen_func=frame_generator_list):
# type: (List[np.ndarray], int, float, float, float, List[DatasetPreprocessingStep]) -> TimeSeriesSetDataset
assert [raw_ts.ndim == 2 and raw_ts.shape[0] > frame_length for raw_ts in ts_list], \
'Please provide only univariate time series as input to Dataset.'
self.raw_train_list = []
self.raw_val_list = []
self.raw_test_list = []
self.frame_length = frame_length
self.train_ts = []
self.val_ts = []
self.test_ts = []
self.optional_params = {'is_list': True}
self.optional_params_list = []
self.preprocessing_steps_list = []
self.frame_length = frame_length
for ts in ts_list:
n_train = int(p_train * ts.shape[0])
n_train_val = int((p_train + p_val) * ts.shape[0])
cur_train_ts = ts[:n_train]
cur_val_ts = ts[n_train:n_train_val]
cur_test_ts = ts[n_train_val:]
self.raw_train_list += [cur_train_ts.copy()]
self.raw_val_list += [cur_val_ts.copy()]
self.raw_test_list += [cur_test_ts.copy()]
current_optional_params = {}
current_preproc_steps = []
for step in preprocessing_steps:
this_step = copy.deepcopy(step)
cur_train_ts = this_step.apply(cur_train_ts)
cur_val_ts = this_step.apply(cur_val_ts)
cur_test_ts = this_step.apply(cur_test_ts)
current_preproc_steps += [this_step]
current_optional_params.update(this_step.param_dict)
self.optional_params_list += [current_optional_params]
self.train_ts += [cur_train_ts]
self.val_ts += [cur_val_ts]
self.test_ts += [cur_test_ts]
self.preprocessing_steps_list += [current_preproc_steps]
self.train_frames = partial(frame_gen_func, ts_list=self.train_ts, frame_length=frame_length)
self.val_frames = partial(frame_gen_func, ts_list=self.val_ts, frame_length=frame_length)
self.test_frames = partial(frame_gen_func, ts_list=self.test_ts, frame_length=frame_length)
def apply_partial_preprocessing(self, mode, enabled_steps):
"""Queries a specific representation of the given dataset
Applies a pipeline of preprocessing steps to obtain a dataset representation."""
# type: (str, List[DatasetPreprocessingStep]) -> np.ndarray
assert mode in ['train', 'test', 'val'], "Mode must be one of [train, val, test]"
if mode == 'val':
ts = self.raw_val_list.copy()
elif mode == 'test':
ts = self.raw_test_list.copy()
else:
ts = self.raw_train_list.copy()
for step in enabled_steps:
ts = step.apply(ts)
return ts
class DatasetPreprocessingStep(object):
"""Provides a common interface for the individual transformations of the dataset preprocessing pipeline
Attributes:
is_fitted (bool): All preprocessing steps have to be fitted to the input time series
param_dict (dict): Communication protocol from the step's attributes that must be known by the caller
"""
__metaclass__ = ABCMeta
is_fitted = False
param_dict = {}
@abstractmethod
def apply(self, ts):
"""Common interface to perform a transformation from a raw time series to its new representation"""
# type: (np.ndarray) -> np.ndarray
pass
class WhiteCorrupter(DatasetPreprocessingStep):
"""Adds white noise to a time series
Args:
sigma (float): noise standard deviation
"""
def __init__(self, sigma=1e-3):
self.noise_level = sigma
def apply(self, ts):
self.param_dict['white_noise_level'] = self.noise_level
return ts + np.random.normal(scale=self.noise_level, size=ts.shape)
class FrameGenerator(DatasetPreprocessingStep):
"""Ensures the time series framer will be a generator
Args:
sigma (float): noise standard deviation
"""
def __init__(self):
self.param_dict['frame_generator'] = True
def apply(self, ts):
return ts
class Standardiser(DatasetPreprocessingStep):
"""Makes a time series zero-mean, unit-variance"""
def __init__(self):
self.mean = None
self.std = None
def apply(self, ts):
if not self.is_fitted: self.fit(ts)
return (ts - self.mean) / self.std
def fit(self, ts):
self.mean = np.nanmean(ts)
self.std = np.nanstd(ts)
self.param_dict = {'ts_mean': self.mean,
'ts_std': self.std,
'zero_mean_unit_var':True}
self.is_fitted = True
class MultivarStandardiser(DatasetPreprocessingStep):
"""Makes a time series zero-mean, unit-variance"""
def __init__(self):
self.mean = None
self.std = None
def apply(self, ts):
if isinstance(ts,(list,)):
ts = np.stack(ts, axis=-1)
if not self.is_fitted: self.fit(ts)
return (ts - self.mean) / self.std
def fit(self, ts):
if isinstance(ts,(list,)):
ts = np.stack(ts, axis=-1)
self.mean = np.nanmean(ts, axis=0)
self.std = np.nanstd(ts, axis=0)
self.param_dict = {'ts_mean': self.mean,
'ts_std': self.std,
'zero_mean_unit_var':True}
self.is_fitted = True
class Quantiser(DatasetPreprocessingStep):
"""Computes ordinal bins and allocates each observation in the time series."""
def __init__(self, n_bins=None, delta=1e-3, frame_generator=True):
self.n_bins = n_bins
self.delta = delta
self.bins = None
self.frame_generator = frame_generator
def apply(self, ts):
if not self.is_fitted:
self.fit(ts)
assert is_univariate(ts), 'Only univariate time series can be quantised. Current shape: {}'.format(ts.shape)
if (ts.max() > (self.bins[-1]+self.delta)) or (ts.min() < (self.bins[0]-self.delta)):
print("WARNING: You are trying to quantise a time series that has observations outside the quantisation "
"range. BE CAREFUL as This may lead to inaccurate results.")
na_mask = np.isnan(ts)
out = np.zeros((ts.shape[0], self.n_bins))
digits = np.searchsorted(self.bins[:-1], ts.squeeze())
for i, i_d in enumerate(digits):
if na_mask[i]:
out[i, :] = np.nan
else:
out[i, i_d] = 1.
return out
def fit(self, ts):
ts_max = np.nanmax(ts)
ts_min = np.nanmin(ts)
if self.n_bins is None:
self.n_bins = self.__find_n_bins(ts)
self.bins = np.linspace(ts_min, ts_max, self.n_bins)
self.param_dict = {'bins': self.bins,
'bin_delta':self.bins[1]-self.bins[0],
'is_ordinal': True,
'frame_generator': self.frame_generator}
self.is_fitted = True
def __find_n_bins(self, ts):
# type: (np.ndarray) -> int
MAX_ALLOWED = 300
MIN_ALLOWED = 10
n_bins = np.unique(ts.squeeze()).shape[0]
if n_bins < MAX_ALLOWED and n_bins > MIN_ALLOWED:
return n_bins
ts_max = np.nanmax(ts)
ts_min = np.nanmin(ts)
n_bins = int((ts_max - ts_min) / self.delta)
n_bins = max(min(MAX_ALLOWED, n_bins), MIN_ALLOWED)
return n_bins
class QuantiserArray(DatasetPreprocessingStep):
"""Computes ordinal bins and allocates each observation in the time series."""
def __init__(self, n_bins=None, delta=1e-3, frame_generator=True):
self.n_bins = n_bins
self.delta = delta
self.bins = None
self.frame_generator = frame_generator
self.quantisers = []
def apply(self, ts):
if not self.is_fitted:
self.fit(ts)
return np.stack([q.apply(ts[:, i_q:i_q + 1]) for i_q, q in enumerate(self.quantisers)], axis=-1)
def fit(self, ts):
for i_q in range(ts.shape[-1]):
q = Quantiser(n_bins=self.n_bins, delta=self.delta, frame_generator=self.frame_generator)
q.fit(ts[:, i_q:i_q + 1])
self.quantisers += [q]
self.n_bins = [q.n_bins for q in self.quantisers]
self.bins = [q.bins for q in self.quantisers]
self.param_dict = {'is_ordinal': True,
'frame_generator': self.frame_generator,
'is_array': True,
'n_channels': ts.shape[-1],
'bins': self.bins
}
self.is_fitted = True
class KMeansQuantiser(DatasetPreprocessingStep):
"""Quantises a time series using the KMeans method"""
def __init__(self, n_clusters=150, n_init=5):
self.n_clusters = n_clusters
self.n_init = n_init
self.model = None
def fit(self, ts):
self.model = KMeans(n_clusters=self.n_clusters, n_init=self.n_init).fit(ts)
self.param_dict = {'centroids': self.model.cluster_centers_,
'n_centroids': self.n_clusters,
'frame_generator': True}
self.is_fitted = True
def apply(self, ts):
if not self.is_fitted:
self.fit(ts)
cluster_ids = self.model.predict(ts)
out = np.zeros((ts.shape[0], self.n_clusters))
for i, i_d in enumerate(cluster_ids):
out[i, i_d] = 1.
return out
def apply_decoder(self, weights, f_decoder=None):
if f_decoder is None:
return self.replace_with_centroids(weights, self.param_dict['centroids'])
return f_decoder(weights, self.param_dict['centroids'])
@staticmethod
def replace_with_centroids(draw, centroids):
return np.stack([centroids[k] for k in draw])
@staticmethod
def replace_with_weighted_mean(weights, centroids):
return weights.dot(centroids)
@staticmethod
def replace_with_weighted_mode(weights, centroids):
modes = weights.argmax(axis=-1)
return np.stack([centroids[i] for i in modes])
class GMMQuantiser(DatasetPreprocessingStep):
"""Quantises a time series using the Variational GMM method"""
def __init__(self, n_clusters=150, n_init=5, weight_concentration_prior=500):
self.n_clusters = n_clusters
self.n_init = n_init
self.model = None
self.wcp = weight_concentration_prior
def fit(self, ts):
self.model = GaussianMixture(n_components=self.n_clusters, max_iter=200, n_init=self.n_init).fit(ts)
self.param_dict = {'centroids': self.model.means_,
'covariances': self.model.covariances_,
'n_centroids': self.n_clusters,
'frame_generator': True}
self.is_fitted = True
def apply(self, ts):
if not self.is_fitted:
self.fit(ts)
cluster_ids = self.model.predict(ts)
out = np.zeros((ts.shape[0], self.n_clusters))
for i, i_d in enumerate(cluster_ids):
out[i, i_d] = 1.
return out
def compute_bin_proba(self, samples):
return self.model.predict_proba(samples)
def apply_decoder(self, weights, f_decoder=None):
if f_decoder is None:
return self.replace_with_centroids(weights, self.param_dict['centroids'])
return f_decoder(weights, self.param_dict['centroids'])
@staticmethod
def replace_with_centroids(draw, centroids):
return np.stack([centroids[k] for k in draw])
@staticmethod
def replace_with_weighted_mean(weights, centroids):
return weights.dot(centroids)
class VBGMMQuantiser(DatasetPreprocessingStep):
"""Quantises a time series using the GMM method"""
def __init__(self, n_clusters=150, n_init=5, weight_concentration_prior=500):
self.n_clusters = n_clusters
self.n_init = n_init
self.model = None
self.wcp = weight_concentration_prior
def fit(self, ts):
self.model = BayesianGaussianMixture(n_components=self.n_clusters, max_iter=200, n_init=self.n_init,
weight_concentration_prior_type='dirichlet_distribution',
weight_concentration_prior=self.wcp).fit(ts)
self.param_dict = {'centroids': self.model.means_,
'covariances': self.model.covariances_,
'n_centroids': self.n_clusters,
'frame_generator': True}
self.is_fitted = True
def apply(self, ts):
if not self.is_fitted:
self.fit(ts)
cluster_ids = self.model.predict(ts)
out = np.zeros((ts.shape[0], self.n_clusters))
for i, i_d in enumerate(cluster_ids):
out[i, i_d] = 1.
return out
def apply_decoder(self, weights, f_decoder=None):
if f_decoder is None:
return self.replace_with_centroids(weights, self.param_dict['centroids'])
return f_decoder(weights, self.param_dict['centroids'])
@staticmethod
def replace_with_centroids(draw, centroids):
return np.stack([centroids[k] for k in draw])
@staticmethod
def replace_with_weighted_mean(weights, centroids):
return weights.dot(centroids)
class AttractorStacker(DatasetPreprocessingStep):
"""Stacks a time series with lagged representations of itself, in an attractor-like fashion."""
def __init__(self, lag):
self.lag = lag
def apply(self, ts):
self.is_fitted = True
self.param_dict = {'attractor_lag': self.lag,
'n_channels': 3,
'is_attractor': True}
return np.stack((ts[:-2*self.lag], ts[self.lag:-self.lag], ts[2*self.lag:]), axis=-1)
class Selector(DatasetPreprocessingStep):
"""Extracts a subsequence of length ``horizon`` from index ``start``"""
def __init__(self, start, horizon):
self.start = start
self.end = start + horizon
def apply(self, ts):
self.is_fitted = True
return ts[self.start:self.end]
class Prediction(object):
"""Provides a common interface for the output predictions of different forecasting strategies """
__metaclass__ = ABCMeta
type = 'deterministic'
@abstractmethod
def mse(self, ground_truth): pass
@abstractmethod
def nll(self, ground_truth): pass
@staticmethod
def get_mase_norm_constant(tr_ts, m):
n = tr_ts.shape[0]
return np.abs(tr_ts[m:] - tr_ts[:-m]).sum() / (n - m)
class StateDensity(object):
"""Provides a common interface for the output predictions of different forecasting strategies """
__metaclass__ = ABCMeta
@abstractmethod
def pdf(self, x): pass
class MultivariateOrdinalPrediction(Prediction):
type = 'multi_ordinal'
def __init__(self, quant, ordinal_pdf, draws, n_x=250):
self.ordinal_pdf = ordinal_pdf
self.quant = quant
self.mean = quant.apply_decoder(ordinal_pdf, quant.replace_with_weighted_mean)
bins = quant.param_dict['centroids']
self.draws = quant.apply_decoder(draws, quant.replace_with_centroids)
self.n_channels = bins.shape[-1]
self.channel_ranges = []
self.channel_densities = []
self.gmm_marginals = []
for i_channel in range(self.n_channels):
this_range = np.linspace(self.draws[:, :, i_channel].min(),
self.draws[:, :, i_channel].max(),
n_x)[:, np.newaxis]
this_delta = this_range[1] - this_range[0]
this_channel_marginals = [norm(loc=quant.model.means_[k, i_channel],
scale=np.sqrt(quant.model.covariances_[k, i_channel, i_channel]))
for k in range(ordinal_pdf.shape[1])]
this_channel_density = gmm_marginal_pdf(this_range, this_channel_marginals, ordinal_pdf, this_delta)
self.channel_ranges += [this_range]
self.channel_densities += [this_channel_density]
self.gmm_marginals += [this_channel_marginals]
def plot_channel_like(self, plt, y_true):
fig, axes = plt.subplots(self.n_channels)
for i_channel in range(self.n_channels):
im = axes[i_channel].imshow(self.channel_densities[i_channel].T, origin='lower',
extent=[0, self.channel_densities[i_channel].shape[0],
self.channel_ranges[i_channel][0],
self.channel_ranges[i_channel][-1]],
aspect='auto', cmap='Blues')
axes[i_channel].plot(y_true[:, i_channel], color='xkcd:green')
axes[i_channel].plot(self.mean[:, i_channel], color='xkcd:orange')
axes[i_channel].plot(self.get_ordinal_quantile(self.channel_densities[i_channel],
self.channel_ranges[i_channel], 0.5),
color='xkcd:crimson')
axes[i_channel].plot(self.get_ordinal_quantile(self.channel_densities[i_channel],
self.channel_ranges[i_channel], 0.025),
color='xkcd:crimson')
axes[i_channel].plot(self.get_ordinal_quantile(self.channel_densities[i_channel],
self.channel_ranges[i_channel], 0.975),
color='xkcd:crimson')
fig.colorbar(im, ax=axes[i_channel])
# plt.colorbar()
def plot_channel_cdf(self, plt, y_true):
fig, axes = plt.subplots(self.n_channels)
c_pal = sns.color_palette('Blues', n_colors=125).as_hex()
my_cmap = ListedColormap(c_pal + c_pal[::-1][1:])
for i_channel in range(self.n_channels):
im = axes[i_channel].imshow(self.channel_densities[i_channel].cumsum(axis=-1).T, origin='lower',
extent=[0, self.channel_densities[i_channel].shape[0],
self.channel_ranges[i_channel][0],
self.channel_ranges[i_channel][-1]],
aspect='auto', cmap=my_cmap)
axes[i_channel].plot(y_true[:, i_channel], color='xkcd:green')
axes[i_channel].plot(self.mean[:, i_channel], color='xkcd:orange')
axes[i_channel].plot(self.get_ordinal_quantile(self.channel_densities[i_channel],
self.channel_ranges[i_channel], 0.5),
color='xkcd:crimson')
axes[i_channel].plot(self.get_ordinal_quantile(self.channel_densities[i_channel],
self.channel_ranges[i_channel], 0.025),
color='xkcd:crimson')
axes[i_channel].plot(self.get_ordinal_quantile(self.channel_densities[i_channel],
self.channel_ranges[i_channel], 0.975),
color='xkcd:crimson')
fig.colorbar(im, ax=axes[i_channel])
def plot_decoded(self, plt, y_true):
fig = plt.figure()
if y_true.shape[1] == 3:
ax = fig.gca(projection='3d')
ax.plot(y_true[:, 0], y_true[:, 1], y_true[:, 2], '.', color='xkcd:crimson')
ax.plot(self.mean[:, 0], self.mean[:, 1], self.mean[:, 2], '.', color='xkcd:blue')
elif y_true.shape[1] == 2:
ax = fig.gca()
ax.plot(y_true[:, 0], y_true[:, 1], '.', color='xkcd:crimson')
ax.plot(self.mean[:, 0], self.mean[:, 1], '.', color='xkcd:blue')
else:
print('Incorrect number of channels')
def plot_channels(self, plt, y_true):
fig, axes = plt.subplots(y_true.shape[-1])
for i_ax, ax in enumerate(axes):
ax.plot(self.mean[:, i_ax], color='xkcd:blue')
ax.plot(y_true[:, i_ax], color='xkcd:crimson')
def get_ordinal_quantile(self, pdf, x_range, alpha):
cdf = pdf.cumsum(axis=-1)
quantile = np.array([x_range[j] for j in (cdf >= alpha).argmax(axis=-1)])
quantile[cdf[:, -1] < alpha] = x_range[-1]
return quantile
def rmse_mean(self, ground_truth):
return np.sqrt(mean_squared_error(ground_truth, self.mean.squeeze()))
def mse(self):
pass
def nll(self, y_true):
#bin_proba = self.quant.compute_bin_proba(y_true)
bin_proba = np.stack([multivariate_normal.pdf(y_true,
self.quant.model.means_[k_mix],
self.quant.model.covariances_[k_mix])
for k_mix in range(self.quant.model.means_.shape[0])], axis=1)
p_ground_truth = (bin_proba * self.ordinal_pdf).sum(axis=-1)
return (-np.log(p_ground_truth)).sum()
# This is used when you have independent models for each time series channel and then want to
# integrate into a single prediction
class PredictionList(Prediction):
def __init__(self, predictions):
self.n_ar_channels = len(predictions)
self.predictions = predictions
def mse(self, ground_truth): return 0.
def nll(self, ground_truth): return 0.
def rmse_mean(self, ground_truth):
# ground_truth \in (timesteps, channels)
mse = np.array([pred.rmse_mean(ground_truth[i_pred]) for i_pred, pred in enumerate(self.predictions)])
return mse.mean()
def plot_channel_like(self, plt, ground_truth):
fig, axes = plt.subplots(self.n_ar_channels)
if self.n_ar_channels == 1:
axes = [axes]
for i_pred, pred in enumerate(self.predictions):
pred.plot_empirical(axes[i_pred], ground_truth[i_pred])
def plot_median_2std(self, plt, ground_truth):
fig, axes = plt.subplots(self.n_ar_channels)
if self.n_ar_channels == 1:
axes = [axes]
for i_pred, pred in enumerate(self.predictions):
pred.plot_median_2std(axes[i_pred], ground_truth[i_pred])
def ordinal_marginal_nll(self, ordinal_ground_truth):
return np.array([pred.nll(ordinal_ground_truth[i]) for i, pred in enumerate(self.predictions)])
class OrdinalPrediction(Prediction):
"""Encapsulates a sequential ordinal predictive posterior distribution.
This implements the strategy to compute metrics and plots where the predictive distribution is assumed to be
ordinal/categorical at every timestep.
Args:
ordinal_pdf (np.ndarray): The ordinal output of the forecasting model
draws (np.ndarray): The draws obtained from the forecasting model
bins (np.ndarray): The bins used the decode the sample trajectory draws
Attributes:
ordinal_pdf (np.ndarray): The ordinal output of the forecasting model
draws (np.ndarray): The draws obtained from the forecasting model
bins (np.ndarray): The bins used the decode the sample trajectory draws
delta (float): Bin width used to reinterpret ordinal pdf as a piecewise uniform pdf
"""
type = 'ordinal'
def __init__(self, ordinal_pdf, draws, bins):
self.ordinal_pdf = ordinal_pdf
self.draws = np.array([[bins[j] for j in draw] for draw in draws])
self.bins = bins
self.delta = self.bins[1] - self.bins[0]
def mse(self, ground_truth):
"""Computes MSE between two real-valued time series"""
# type: (np.ndarray) -> np.float
return np.mean([mean_squared_error(ground_truth, p) for p in self.draws])
def smape_mean(self, ground_truth):
this_mean = self.ordinal_pdf.dot(self.bins).squeeze()
k = ground_truth.shape[0]
y_true = ground_truth.squeeze()
smape_vector = np.abs(y_true - this_mean) / (np.abs(y_true) + np.abs(this_mean))
return smape_vector.sum() * (2. / k)
def smape_quantile(self, ground_truth, alpha=0.5):
k = ground_truth.shape[0]
median = self.get_quantile(alpha).squeeze()
y_true = ground_truth.squeeze()
smape_vector = np.abs(y_true - median) / (np.abs(y_true) + np.abs(median))
return smape_vector.sum() * (2. / k)
def mase_mean(self, ground_truth, mase_norm_constant):
k = ground_truth.shape[0]
#mase_norm_constant = self.get_mase_norm_constant(ground_truth, 1)
this_mean = self.ordinal_pdf.dot(self.bins).squeeze()
y_true = ground_truth.squeeze()
mase_vector = np.abs(y_true - this_mean).sum() / k
return mase_vector / mase_norm_constant
def mase_quantile(self, ground_truth, mase_norm_constant, alpha=0.5):
k = ground_truth.shape[0]
#mase_norm_constant = self.get_mase_norm_constant(ground_truth, 1)
median = self.get_quantile(alpha).squeeze()
y_true = ground_truth.squeeze()
mase_vector = np.abs(y_true - median).sum() / k
return mase_vector / mase_norm_constant
def rmse_quantile(self, ground_truth, alpha=0.5):
return np.sqrt(mean_squared_error(ground_truth, self.get_quantile(alpha).squeeze()))
def rmse_mean(self, ground_truth):
return np.sqrt(mean_squared_error(ground_truth, self.ordinal_pdf.dot(self.bins).squeeze()))
def mse_mean(self, ground_truth):
return mean_squared_error(ground_truth, self.ordinal_pdf.dot(self.bins).squeeze())
def quantile_mse(self, ground_truth, alpha=0.5):
return mean_squared_error(ground_truth, self.get_quantile(alpha).squeeze())
def mse_and_std(self, ground_truth):
"""Computes MSE +- STD between two real-valued time series"""
# type: (np.ndarray) -> np.float
all_mse = [mean_squared_error(ground_truth, prediction) for prediction in self.draws]
return np.mean(all_mse), np.std(all_mse)
def median_dtw_distance(self, ground_truth):
pred_median = self.get_quantile(0.5)
dist, path = fastdtw(pred_median, ground_truth, dist=euclidean)
return dist
def median_attractor_distance(self, ground_truth):
pred_median = self.get_quantile(0.5)
stacker = AttractorStacker(10)
pred_median_att = stacker.apply(pred_median).squeeze()
ground_truth_att = stacker.apply(ground_truth).squeeze()
d = pairwise_distances(pred_median_att, ground_truth_att)
return d.min(axis=0).sum()
def nll(self, binned_ground_truth):
"""Computes NLL of drawing a time series from a piecewise uniform sequential prediction"""
# type: (np.ndarray) -> np.float
p_ground_truth = (self.ordinal_pdf * binned_ground_truth / self.delta).max(axis=-1)
neg_log_p_ground_truth = -np.log(p_ground_truth)
return neg_log_p_ground_truth.sum()
def qq_dist(self, ordinal_ground_truth, up_to=1000):
qq_x = np.arange(0.01, 1., 0.01)
y_pred_idx = (self.ordinal_pdf[:up_to] * ordinal_ground_truth[:up_to]).argmax(axis=-1)
cdf_truth = np.array([self.ordinal_pdf[t, :idx].sum() for t, idx in enumerate(y_pred_idx)])
qq_ordinal = np.array([(cdf_truth <= alpha).mean() for alpha in qq_x])
return mean_squared_error(qq_x, qq_ordinal)
def cum_nll(self, binned_ground_truth):
"""Computes integral of NLL(t) of drawing a time series from a piecewise uniform sequential prediction"""
# type: (np.ndarray) -> np.float
p_ground_truth = (self.ordinal_pdf * binned_ground_truth / self.delta).max(axis=-1)
neg_log_p_ground_truth = -np.log(p_ground_truth)
return neg_log_p_ground_truth.cumsum().sum()
def get_quantile(self, alpha):
"""Computes \alpha-quantiles given the object's ordinal pdf"""
# type: (float) -> np.ndarray
cdf = self.ordinal_pdf.cumsum(axis=-1)
return np.array([self.bins[j] for j in (cdf >= alpha).argmax(axis=-1)])
def plot_median_2std(self, plt, ground_truth):
"""Plots a probabilistic forecast's median and 2.5, 97.5 quantiles alongside the corresponding ground truth"""
quantile_025 = self.get_quantile(0.025)
quantile_975 = self.get_quantile(0.975)
quantile_median = self.get_quantile(0.5)
plt.plot(quantile_025, 'xkcd:orange')
plt.plot(quantile_975, 'xkcd:orange')
plt.plot(quantile_median, 'xkcd:maroon')
plt.plot(ground_truth, 'xkcd:olive')
plt.legend(['Quantile 0.025', 'Quantile 0.975', 'Median', 'True'])
def plot_mean_2std(self, plt, ground_truth):
"""Plots a probabilistic forecast's median and 2.5, 97.5 quantiles alongside the corresponding ground truth"""
quantile_025 = self.get_quantile(0.025)
quantile_975 = self.get_quantile(0.975)
pred_mean = self.ordinal_pdf.dot(self.bins)
plt.plot(quantile_025, 'xkcd:orange')
plt.plot(quantile_975, 'xkcd:orange')
plt.plot(pred_mean, 'xkcd:maroon')
plt.plot(ground_truth, 'xkcd:olive')
plt.legend(['Quantile 0.025', 'Quantile 0.975', 'Mean', 'True'])
def plot_like(self, plt, ground_truth=None):
"""Plots the full ordinal pdf as a heatmap"""
if ground_truth is not None:
plt.plot(ground_truth, 'xkcd:orange')
plt.imshow(self.ordinal_pdf.T, origin='lower',
extent=[0, self.ordinal_pdf.shape[0], self.bins.min(), self.bins.max()],
aspect='auto', cmap='Blues')
plt.title('Predictive likelihood')
plt.colorbar()
def plot_cum_nll(self, plt, binned_ground_truth):
"""Plots the full ordinal pdf as a heatmap"""
p_ground_truth = (self.ordinal_pdf * binned_ground_truth / self.delta).max(axis=-1)
neg_log_p_ground_truth = -np.log(p_ground_truth)
cum_nll = neg_log_p_ground_truth.cumsum()
plt.plot(cum_nll)
plt.title('Cumulative negative log likelihood')
def plot_log_like(self, plt, ground_truth=None):
"""Plots the full log ordinal pdf as a heatmap"""
if ground_truth is not None:
plt.plot(ground_truth, 'xkcd:orange')
plt.imshow(np.ma.log(self.ordinal_pdf.T).data, origin='lower',
extent=[0, self.ordinal_pdf.shape[0], self.bins.min(), self.bins.max()],
aspect='auto', cmap='Blues')
plt.title('Predictive log likelihood')
plt.colorbar()
def plot_draws_quantiles(self, plt, ground_truth):
"""Plots a probabilistic forecast's median and 2.5, 97.5 quantiles alongside the corresponding ground truth"""
quantile_025 = self.get_quantile(0.025)
quantile_975 = self.get_quantile(0.975)
quantile_median = self.get_quantile(0.5)
[plt.plot(x, color='xkcd:blue', alpha=0.1) for x in self.draws.squeeze()]
plt.plot(quantile_025, 'xkcd:orange')
plt.plot(quantile_975, 'xkcd:orange')
plt.plot(quantile_median, 'xkcd:maroon')
plt.plot(ground_truth, 'xkcd:olive')
plt.legend(['Quantile 0.025', 'Quantile 0.975', 'Median', 'True'], bbox_to_anchor=(1., 1.))
def plot_empirical(self, plt, ground_truth):
c_pal = sns.color_palette('Blues', n_colors=150).as_hex()
my_cmap = ListedColormap(c_pal + c_pal[::-1][1:])
quantile_025 = self.get_quantile(0.025)
quantile_975 = self.get_quantile(0.975)
plt.plot(quantile_025, 'xkcd:azure')
plt.plot(quantile_975, 'xkcd:azure')
plt.plot(ground_truth, 'xkcd:coral')
plt.imshow(self.ordinal_pdf.cumsum(axis=-1).T, origin='lower',
extent=[0, ground_truth.shape[0], self.bins.min(), self.bins.max()],
aspect='auto', cmap=my_cmap)
#plt.title('Empirical distribution function')
#plt.colorbar()
def plot_qq(self, plt, ordinal_ground_truth, up_to=1000, col='xkcd:blue'):
qq_x = np.arange(0.01, 1., 0.01)
y_pred_idx = (self.ordinal_pdf[:up_to] * ordinal_ground_truth[:up_to]).argmax(axis=-1)
cdf_truth = np.array([self.ordinal_pdf[t, :idx].sum() for t, idx in enumerate(y_pred_idx)])
qq_ordinal = np.array([(cdf_truth <= alpha).mean() for alpha in qq_x])
plt.plot(qq_x, qq_ordinal, col)
plt.plot(qq_x, qq_x, '--', color='xkcd:green')
plt.legend(['Ordinal prediction', 'Ideal'])
#plt.title('Uncertainty calibration plot for ordinal prediction')
def plot_median_dtw_alignment(self, plt, ground_truth):
pred_median = self.get_quantile(0.5)
dist, path = fastdtw(pred_median, ground_truth, dist=euclidean)
plt.plot(np.array([pred_median[j] for i, j in path]))
plt.plot(np.array([ground_truth[i] for i, j in path]))
@staticmethod
def compatibility(old_pred):
new_pred = OrdinalPrediction(old_pred.ordinal_pdf, [], old_pred.bins)
new_pred.draws = old_pred.draws
return new_pred
# This is used when you have a shared LSTM layer and only individual fully connected layers
# for each output channel
class OrdinalArrayPrediction(Prediction):
type = 'ordinal_array'
def __init__(self, ordinal_pdf, draws, bins, vbgmm_max_components=5):
self.predictions = []
self.n_channels = ordinal_pdf.shape[-1]
self.vbgmm_components = vbgmm_max_components
for i in range(ordinal_pdf.shape[-1]):
self.predictions += [OrdinalPrediction(ordinal_pdf[:, :, i],
draws[:, :, i],
bins[i])]
self.draws = np.stack([pred.draws for pred in self.predictions], axis=-1)
self.vbgmm = [BayesianGaussianMixture(vbgmm_max_components, n_init=3, max_iter=200).fit(self.draws[:, t])
for t in range(self.draws.shape[1])]
x_mins = [b[0] for b in bins]
x_max = [b[-1] for b in bins]
self.x_ranges = np.stack([np.linspace(xmi, xma, 1000) for xmi, xma in zip(x_mins, x_max)], axis=-1)
def get_quantile(self, alpha):
"""Computes \alpha-quantiles given the object's posterior mean and standard deviation"""
# type: (float) -> np.ndarray
all_quantiles = []
for i_ch in range(self.n_channels):
this_quantile = [self.x_ranges[q, i_ch]
for q in (self.all_ch_cdf[:, :, i_ch] >= alpha).argmax(axis=-1)] # shape: (n_ts, n_ts_range, n_channels)
this_quantile = np.array(this_quantile)
msk = (self.all_ch_cdf[:, -1, i_ch] < alpha)
this_quantile[msk] = self.x_ranges[-1, i_ch]
all_quantiles += [this_quantile]
return np.stack(all_quantiles, axis=-1)
def mse(self, ground_truth):
"""Computes MSE between two real-valued time series"""
# type: (np.ndarray) -> np.float
return np.mean([pred.mse(ground_truth[:, :, i]) for i, pred in enumerate(self.predictions)])
def smape_mean(self, ground_truth):
return -1.
def rmse_quantile(self, ground_truth, alpha=0.5):
return np.mean([pred.rmse_quantile(ground_truth[:, i], alpha) for i, pred in enumerate(self.predictions)])
def rmse_mean(self, ground_truth):
return np.mean([pred.rmse_mean(ground_truth[:, i]) for i, pred in enumerate(self.predictions)])
def nll(self, ground_truth):
"""Computes NLL of drawing a time series from a piecewise uniform sequential prediction"""
# type: (np.ndarray) -> np.float
return -np.sum([self.vbgmm[t].score(ground_truth[t:t+1]) for t in range(self.draws.shape[1])])
#return np.sum([pred.nll(binned_ground_truth[:, :, i])
# for i, pred in enumerate(self.predictions)])
def plot_median_2std(self, plt, ground_truth):
fig, axes = plt.subplots(self.n_channels)
for i_pred, pred in enumerate(self.predictions):
pred.plot_median_2std(axes[i_pred], ground_truth[:, i_pred])
def plot_decoded(self, plt, ground_truth):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
#[ax.plot(*d.T, linestyle='', marker='.', color='xkcd:blue', alpha=0.01) for d in self.draws]
ax.plot(*self.draws.mean(axis=0).T, linestyle='', marker='.', color='xkcd:blue')
ax.plot(*ground_truth.T, linestyle='-', marker='.', color='xkcd:crimson')
def plot_channel_like(self, plt, ground_truth):
fig, axes = plt.subplots(self.n_channels)
for i_pred, pred in enumerate(self.predictions):
pred.plot_empirical(axes[i_pred], ground_truth[:, i_pred])
def plot_draws_quantiles(self, plt, ground_truth):
fig, axes = plt.subplots(self.n_channels)
for i_pred, pred in enumerate(self.predictions):
pred.plot_draws_quantiles(axes[i_pred], ground_truth[:, i_pred])
def factorised_ordinal_joint_nll(self, ordinal_ground_truth):
return np.sum([pred.nll(ordinal_ground_truth[:, :, i]) for i, pred in enumerate(self.predictions)])
def ordinal_marginal_nll(self, ordinal_ground_truth):
return np.array([pred.nll(ordinal_ground_truth[:, :, i]) for i, pred in enumerate(self.predictions)])
def vbgmm_joint_nll(self, ground_truth):
return -np.sum([self.vbgmm[t].score(ground_truth[t:t + 1]) for t in range(self.draws.shape[1])])
def vbgmm_marginal_nll(self, ground_truth):
all_ch_like = []
for i_ch in range(self.n_channels):
ch_like = []
for t in range(ground_truth.shape[0]):
cur_ch_like = 0.
for k_mix in range(self.vbgmm[t].weights_.shape[0]):
cur_ch_like += self.vbgmm[t].weights_[k_mix] * norm.pdf(ground_truth[t:t + 1, i_ch],
loc=self.vbgmm[t].means_[k_mix, i_ch],
scale=np.sqrt(self.vbgmm[t].covariances_[k_mix,
i_ch,
i_ch]))
ch_like += [cur_ch_like]
all_ch_like += [-np.log(ch_like).sum()]
return all_ch_like
class StatePrediction(Prediction):
type = 'state'
class GaussianPrediction(Prediction):
"""Encapsulates a sequential Gaussian predictive posterior distribution.
This implements the strategy to compute metrics and plots where the predictive distribution assumed to be
Gaussian at every timestep.
Args:
draws (np.ndarray): The draws obtained from the forecasting model
Attributes:
posterior_mean (np.ndarray): Monte Carlo approximation of the posterior predictive mean
posterior_std (np.ndarray): Monte Carlo approximation of the posterior predictive standard deviation
"""
type = 'gaussian'
def __init__(self, draws, raw_pred=None):
if raw_pred is None:
self.posterior_mean = draws.mean(axis=0)
self.posterior_std = draws.std(axis=0)
self.draws = draws
else:
self.posterior_mean = raw_pred['posterior_mean'].squeeze()
self.posterior_std = raw_pred['posterior_std'].squeeze()
self.draws = np.stack([np.random.normal(self.posterior_mean[t],
self.posterior_std[t],
size = 100) for t in range(self.posterior_mean.shape[0])], axis=1)
def mse(self, ground_truth):
"""Computes MSE between two real-valued time series"""
# type: (np.ndarray) -> np.float
return np.mean([mean_squared_error(ground_truth, p) for p in self.draws])
def rmse_quantile(self, ground_truth, alpha=0.5):
return np.sqrt(mean_squared_error(ground_truth, self.get_quantile(alpha).squeeze()))
def rmse_mean(self, ground_truth):
return np.sqrt(mean_squared_error(ground_truth, self.posterior_mean))
def mse_mean(self, ground_truth):
return mean_squared_error(ground_truth, self.posterior_mean)
def quantile_mse(self, ground_truth, alpha=0.5):
return mean_squared_error(ground_truth, self.get_quantile(alpha).squeeze())
def median_dtw_distance(self, ground_truth):
pred_median = self.get_quantile(0.5)
dist, path = fastdtw(pred_median, ground_truth, dist=euclidean)
return dist
def median_attractor_distance(self, ground_truth):
pred_median = self.get_quantile(0.5)
stacker = AttractorStacker(10)
pred_median_att = stacker.apply(pred_median).squeeze()
ground_truth_att = stacker.apply(ground_truth).squeeze()
d = pairwise_distances(pred_median_att, ground_truth_att)
return d.min(axis=0).sum()
def smape_mean(self, ground_truth):
this_mean = self.posterior_mean.squeeze()
y_true = ground_truth.squeeze()
k = ground_truth.shape[0]
smape_vector = np.abs(y_true - this_mean) / (np.abs(y_true) + np.abs(this_mean))
return smape_vector.sum() * (2. / k)
def smape_quantile(self, ground_truth, alpha=0.5):
k = ground_truth.shape[0]
y_true = ground_truth.squeeze()
median = self.get_quantile(alpha).squeeze()
smape_vector = np.abs(y_true - median) / (np.abs(y_true) + np.abs(median))
return smape_vector.sum() * (2. / k)
def mase_mean(self, ground_truth, mase_norm_constant):
k = ground_truth.shape[0]
y_true = ground_truth.squeeze()
#mase_norm_constant = self.get_mase_norm_constant(ground_truth, 1)
this_mean = self.posterior_mean.squeeze()
mase_vector = np.abs(y_true - this_mean).sum() / k
return mase_vector / mase_norm_constant
def mase_quantile(self, ground_truth, mase_norm_constant, alpha=0.5):
k = ground_truth.shape[0]
y_true = ground_truth.squeeze()
#mase_norm_constant = self.get_mase_norm_constant(ground_truth, 1)
median = self.get_quantile(alpha).squeeze()
mase_vector = np.abs(y_true - median).sum() / k
return mase_vector / mase_norm_constant
def nll(self, ground_truth):
"""Computes NLL of drawing a time series from a GP sequential prediction"""
# type: (np.ndarray) -> np.float
horizon = self.posterior_mean.shape[0]
likelihood = np.array([norm(loc=self.posterior_mean[i], scale=self.posterior_std[i]).pdf(ground_truth[i])
for i in range(horizon)])
log_like = -np.log(likelihood)
if np.isinf(log_like).any():
likelihood += 1e-12
likelihood /= likelihood.sum()
log_like = -np.log(likelihood)
nll = log_like.sum()
#print 'NLL: {}'.format(nll)
return nll
def qq_dist(self, ground_truth, up_to=1000):
qq_x = np.arange(0.01, 1., 0.01)
qq_gp = [np.less_equal(ground_truth.squeeze()[:up_to], self.get_quantile(a)[:up_to]).mean() for a in qq_x]
return mean_squared_error(qq_x, qq_gp)
def get_quantile(self, alpha):
"""Computes \alpha-quantiles given the object's posterior mean and standard deviation"""
# type: (float) -> np.ndarray
return np.array([norm.ppf(alpha, mu, sigma) for mu, sigma in zip(self.posterior_mean, self.posterior_std)])
def cum_nll(self, ground_truth):
"""Computes compulative NLL of drawing a time series from a GP sequential prediction"""
# type: (np.ndarray) -> np.float
horizon = self.posterior_mean.shape[0]
likelihood = np.array([norm(loc=self.posterior_mean[i], scale=self.posterior_std[i]).pdf(ground_truth[i])
for i in range(horizon)])
nll = -np.log(likelihood).cumsum().sum()
#print 'Cum NLL: {}'.format(nll)
return nll
def plot_cum_nll(self, plt, ground_truth):
"""Computes compulative NLL of drawing a time series from a GP sequential prediction"""
# type: (np.ndarray) -> np.float
horizon = self.posterior_mean.shape[0]
likelihood = np.array([norm(loc=self.posterior_mean[i], scale=self.posterior_std[i]).pdf(ground_truth[i])
for i in range(horizon)])
nll = -np.log(likelihood).cumsum()
plt.plot(nll)
plt.title('Cumulative negative log likelihood')
def plot_median_2std(self, plt, ground_truth):
"""Plots a probabilistic forecast's median and 2.5, 97.5 quantiles alongside the corresponding ground truth"""
quantile_median = self.posterior_mean
quantile_025 = quantile_median - 2 * self.posterior_std
quantile_975 = quantile_median + 2 * self.posterior_std
plt.plot(quantile_025, 'xkcd:orange')
plt.plot(quantile_975, 'xkcd:orange')
plt.plot(quantile_median, 'xkcd:maroon')
plt.plot(ground_truth, 'xkcd:olive')
plt.legend(['Quantile 0.025', 'Quantile 0.975', 'Median', 'True'])
def plot_draws_quantiles(self, plt, ground_truth):
"""Plots a probabilistic forecast's median and 2.5, 97.5 quantiles alongside the corresponding ground truth"""
quantile_median = self.posterior_mean
quantile_025 = quantile_median - 2 * self.posterior_std
quantile_975 = quantile_median + 2 * self.posterior_std
[plt.plot(x, color='xkcd:blue', alpha=0.1) for x in self.draws.squeeze()]
plt.plot(quantile_025, 'xkcd:orange')
plt.plot(quantile_975, 'xkcd:orange')
plt.plot(quantile_median, 'xkcd:maroon')
plt.plot(ground_truth, 'xkcd:olive')
plt.legend(['Quantile 0.025', 'Quantile 0.975', 'Median', 'True'])
def plot_empirical(self, plt, ground_truth):
x_min = ground_truth.min()
x_max = ground_truth.max()
x = np.linspace(x_min, x_max, 300)
cdf = np.stack([norm.cdf(x, mu, sigma) for mu, sigma in zip(self.posterior_mean, self.posterior_std)], axis=0)
quantile_025 = self.get_quantile(0.025)
quantile_975 = self.get_quantile(0.975)
plt.plot(quantile_025, 'xkcd:azure')
plt.plot(quantile_975, 'xkcd:azure')
c_pal = sns.color_palette('Blues', n_colors=150).as_hex()
my_cmap = ListedColormap(c_pal + c_pal[::-1][1:])
plt.plot(ground_truth, 'xkcd:coral')
plt.imshow(cdf.T, origin='lower',
extent=[0, cdf.shape[0], x_min, x_max],
aspect='auto', cmap=my_cmap)
#plt.title('Empirical distribution function')
#plt.colorbar()
def plot_qq(self, plt, ground_truth, up_to=1000, col='xkcd:blue'):
qq_x = np.arange(0.01, 1., 0.01)
qq_gp = [np.less_equal(ground_truth.squeeze()[:up_to], self.get_quantile(a)[:up_to]).mean() for a in qq_x]
plt.plot(qq_x, qq_gp, color=col)
plt.plot(qq_x, qq_x, '--', color='xkcd:green')
plt.legend(['Continuous prediction', 'Ideal'])
#plt.title('Uncertainty calibration plot for continuous prediction')
@staticmethod
def compatibility(old_pred):
return GaussianPrediction(old_pred.draws)
class GaussianMixturePrediction(Prediction):
type = 'gmm'
def __init__(self, draws, n_components, vbgmms=None):
draw_length = draws.shape[1]
self.n_components = n_components
self.draws = draws
overall_x_min = None
overall_x_max = None
if vbgmms is None:
self.vbgmms = []
for t in range(draw_length):
self.vbgmms += [BayesianGaussianMixture(self.n_components, n_init=3, max_iter=200).fit(self.draws[:, t, np.newaxis])]
else:
self.vbgmms = vbgmms
for vbgmm in self.vbgmms:
x_min = (vbgmm.means_.squeeze() - 3. * np.sqrt(vbgmm.covariances_).squeeze()).min()
x_max = (vbgmm.means_.squeeze() + 3. * np.sqrt(vbgmm.covariances_).squeeze()).max()
if overall_x_min is None or x_min < overall_x_min:
overall_x_min = x_min
if overall_x_max is None or x_max > overall_x_max:
overall_x_max = x_max
x = np.linspace(overall_x_min, overall_x_max, 300)
self.ts_range = x
self.cdf = self.eval_cdf(x)
def mse(self, ground_truth):
"""Computes MSE between two real-valued time series"""
# type: (np.ndarray) -> np.float
return np.mean([mean_squared_error(ground_truth, p) for p in self.draws])
def mse_mean(self, ground_truth):
return mean_squared_error(ground_truth, self.draws.mean(axis=0))
def rmse_quantile(self, ground_truth, alpha=0.5):
return np.sqrt(mean_squared_error(ground_truth, self.get_quantile(alpha).squeeze()))
def rmse_mean(self, ground_truth):
return np.sqrt(mean_squared_error(ground_truth, self.draws.mean(axis=0).squeeze()))
def median_dtw_distance(self, ground_truth):
pred_median = self.get_quantile(0.5)
dist, path = fastdtw(pred_median, ground_truth, dist=euclidean)
return dist
def median_attractor_distance(self, ground_truth):
pred_median = self.get_quantile(0.5)
stacker = AttractorStacker(10)
pred_median_att = stacker.apply(pred_median).squeeze()
ground_truth_att = stacker.apply(ground_truth).squeeze()
d = pairwise_distances(pred_median_att, ground_truth_att)
return d.min(axis=0).sum()
def smape_mean(self, ground_truth):
this_mean = self.draws.mean(axis=0).squeeze()
y_true = ground_truth.squeeze()
k = ground_truth.shape[0]
smape_vector = np.abs(y_true - this_mean) / (np.abs(y_true) + np.abs(this_mean))
return smape_vector.sum() * (2. / k)
def smape_quantile(self, ground_truth, alpha=0.5):
k = ground_truth.shape[0]
y_true = ground_truth.squeeze()
median = self.get_quantile(alpha).squeeze()
smape_vector = np.abs(y_true - median) / (np.abs(y_true) + np.abs(median))
return smape_vector.sum() * (2. / k)
def mase_mean(self, ground_truth, mase_norm_constant):
k = ground_truth.shape[0]
y_true = ground_truth.squeeze()
#mase_norm_constant = self.get_mase_norm_constant(ground_truth, 1)
this_mean = self.draws.mean(axis=0).squeeze()
mase_vector = np.abs(y_true - this_mean).sum() / k
return mase_vector / mase_norm_constant
def mase_quantile(self, ground_truth, mase_norm_constant, alpha=0.5):
k = ground_truth.shape[0]
#mase_norm_constant = self.get_mase_norm_constant(ground_truth, 1)
y_true = ground_truth.squeeze()
median = self.get_quantile(alpha).squeeze()
mase_vector = np.abs(y_true - median).sum() / k
return mase_vector / mase_norm_constant
def quantile_mse(self, ground_truth, alpha=0.5):
return mean_squared_error(ground_truth, self.get_quantile(alpha).squeeze())
def qq_dist(self, ground_truth, up_to=1000):
qq_x = np.arange(0.01, 1., 0.01)
qq_gp = [np.less_equal(ground_truth.squeeze()[:up_to], self.get_quantile(a)[:up_to]).mean() for a in qq_x]
return mean_squared_error(qq_x, qq_gp)
def nll(self, ground_truth):
"""Computes NLL of drawing a time series from a GP sequential prediction"""
# type: (np.ndarray) -> np.float
horizon = len(self.vbgmms)
likelihood = []
for t in range(horizon):
vbgmm = self.vbgmms[t]
p = 0.
for pi, mu, sigma_sq in zip(vbgmm.weights_.squeeze(), vbgmm.means_.squeeze(), vbgmm.covariances_.squeeze()):
sigma = np.sqrt(sigma_sq)
p += pi * norm.pdf(ground_truth[t], mu, sigma)
likelihood += [p]
likelihood = np.array(likelihood)
nll = -np.log(likelihood).sum()
#print 'NLL: {}'.format(nll)
return nll
def get_quantile(self, alpha):
"""Computes \alpha-quantiles given the object's posterior mean and standard deviation"""
# type: (float) -> np.ndarray
quantile = np.array([self.ts_range[j] for j in (self.cdf >= alpha).argmax(axis=-1)])
return np.array([self.ts_range[j] for j in (self.cdf >= alpha).argmax(axis=-1)])
def cum_nll(self, ground_truth):
"""Computes compulative NLL of drawing a time series from a GP sequential prediction"""
# type: (np.ndarray) -> np.float
horizon = len(self.vbgmms)
likelihood = []
for t in range(horizon):
vbgmm = self.vbgmms[t]
p = 0.
for pi, mu, sigma_sq in zip(vbgmm.weights_.squeeze(), vbgmm.means_.squeeze(), vbgmm.covariances_.squeeze()):
sigma = np.sqrt(sigma_sq)
p += pi * norm.pdf(ground_truth[t], mu, sigma)
likelihood += [p]
likelihood = np.array(likelihood)
nll = -np.log(likelihood).cumsum().sum()
#print 'Cum NLL: {}'.format(nll)
return nll
def plot_cum_nll(self, plt, ground_truth):
"""Computes compulative NLL of drawing a time series from a GP sequential prediction"""
# type: (np.ndarray) -> np.float
horizon = len(self.vbgmms)
likelihood = []
for t in range(horizon):
vbgmm = self.vbgmms[t]
p = 0.
for pi, mu, sigma_sq in zip(vbgmm.weights_.squeeze(), vbgmm.means_.squeeze(), vbgmm.covariances_.squeeze()):
sigma = np.sqrt(sigma_sq)
p += pi * norm.pdf(ground_truth[t], mu, sigma)
likelihood += [p]
likelihood = np.array(likelihood)
nll = -np.log(likelihood).cumsum()
plt.plot(nll)
plt.title('Cumulative negative log likelihood')
def plot_median_2std(self, plt, ground_truth):
"""Plots a probabilistic forecast's median and 2.5, 97.5 quantiles alongside the corresponding ground truth"""
quantile_025 = self.get_quantile(0.025)
quantile_975 = self.get_quantile(0.975)
quantile_median = self.get_quantile(0.5)
plt.plot(quantile_025, 'xkcd:orange')
plt.plot(quantile_975, 'xkcd:orange')
plt.plot(quantile_median, 'xkcd:maroon')
plt.plot(ground_truth, 'xkcd:olive')
plt.legend(['Quantile 0.025', 'Quantile 0.975', 'Median', 'True'])
def eval_cdf(self, x):
cdf = []
for vbgmm in self.vbgmms:
P = 0.
for pi, mu, sigma_sq in zip(vbgmm.weights_.squeeze(), vbgmm.means_.squeeze(), vbgmm.covariances_.squeeze()):
sigma = np.sqrt(sigma_sq)
P += pi * norm.cdf(x, mu, sigma)
cdf += [P]
return np.stack(cdf, axis=0)
def plot_draws_quantiles(self, plt, ground_truth):
"""Plots a probabilistic forecast's median and 2.5, 97.5 quantiles alongside the corresponding ground truth"""
quantile_025 = self.get_quantile(0.025)
quantile_975 = self.get_quantile(0.975)
quantile_median = self.get_quantile(0.5)
[plt.plot(x, color='xkcd:blue', alpha=0.1) for x in self.draws.squeeze()]
plt.plot(quantile_025, 'xkcd:orange')
plt.plot(quantile_975, 'xkcd:orange')
plt.plot(quantile_median, 'xkcd:maroon')
plt.plot(ground_truth, 'xkcd:olive')
plt.legend(['Quantile 0.025', 'Quantile 0.975', 'Median', 'True'])
def plot_empirical(self, plt, ground_truth):
c_pal = sns.color_palette('Blues', n_colors=150).as_hex()
my_cmap = ListedColormap(c_pal + c_pal[::-1][1:])
quantile_025 = self.get_quantile(0.025)
quantile_975 = self.get_quantile(0.975)
plt.plot(quantile_025, 'xkcd:azure')
plt.plot(quantile_975, 'xkcd:azure')
plt.plot(ground_truth, 'xkcd:coral')
plt.imshow(self.cdf.T, origin='lower',
extent=[0, self.cdf.shape[0], self.ts_range.min(), self.ts_range.max()],
aspect='auto', cmap=my_cmap)
plt.title('Empirical distribution function')
#plt.colorbar()
def plot_qq(self, plt, ground_truth, up_to=1000, col='xkcd:blue'):
qq_x = np.arange(0.01, 1., 0.01)
qq_gp = [np.less_equal(ground_truth.squeeze()[:up_to], self.get_quantile(a)[:up_to]).mean() for a in qq_x]
plt.plot(qq_x, qq_gp, color=col)
plt.plot(qq_x, qq_x, '--', color='xkcd:green')
plt.legend(['Continuous prediction', 'Ideal'])
#plt.title('Uncertainty calibration plot for continuous prediction')
def plot_median_dtw_alignment(self, plt, ground_truth):
pred_median = self.get_quantile(0.5)
dist, path = fastdtw(pred_median, ground_truth, dist=euclidean)
plt.plot(np.array([pred_median[j] for i, j in path]))
plt.plot(np.array([ground_truth[i] for i, j in path]))
@staticmethod
def compatibility_univar_gaussian(old_pred, n_components):
return GaussianMixturePrediction(old_pred.draws, n_components)
@staticmethod
def compatibility(old_pred):
return GaussianMixturePrediction(old_pred.draws, old_pred.n_components, old_pred.vbgmms)
class MultivarVBGMMPrediction(Prediction):
type = 'multi_vbgmm'
def __init__(self, draws, x_ranges, vbgmms=None, n_components=5):
self.draws = draws
self.n_channels = draws.shape[-1]
self.predictive_horizon = draws.shape[1]
if vbgmms is not None:
self.vbgmm = vbgmms
else:
self.vbgmm = [BayesianGaussianMixture(n_components, n_init=5, max_iter=200).fit(draws[:, t])
for t in range(self.predictive_horizon)]
self.x_ranges = np.stack(x_ranges, axis=-1)
self.all_ch_like = self.eval_marginal_like(self.x_ranges)
self.all_ch_cdf = self.eval_marginal_cdf(self.x_ranges) # shape: (n_ts, n_ts_range, n_channels)
self.pred_mean = self.draws.mean(axis=0)
def get_quantile(self, alpha):
"""Computes \alpha-quantiles given the object's posterior mean and standard deviation"""
# type: (float) -> np.ndarray
all_quantiles = []
for i_ch in range(self.n_channels):
this_quantile = [self.x_ranges[q, i_ch]
for q in (self.all_ch_cdf[:, :, i_ch] >= alpha).argmax(axis=-1)] # shape: (n_ts, n_ts_range, n_channels)
this_quantile = np.array(this_quantile)
msk = (self.all_ch_cdf[:, -1, i_ch] < alpha)
this_quantile[msk] = self.x_ranges[-1, i_ch]
all_quantiles += [this_quantile]
return np.stack(all_quantiles, axis=-1)
def plot_decoded(self, plt, ground_truth):
if self.n_channels == 3:
ax = plt.figure(figsize=(12, 12)).gca(projection='3d')
elif self.n_channels == 2:
ax = plt.figure(figsize=(12, 12)).gca()
else:
print("plot_decoded only available for time series with n_channels < 4. Provided "
"time series has {} channels".format(ground_truth.shape[-1]))
return
[ax.plot(*p.T, '.', color='xkcd:blue', alpha=0.01) for p in self.draws]
ax.plot(*ground_truth.T, color='xkcd:orange', label='Ground truth', linewidth=3.0)
plt.legend(loc=7, prop={'size': 14})
plt.title('Sample predictions and ground truth', fontsize=24)
def plot_channel_cdf(self, plt, ground_truth):
fig, axes = plt.subplots(self.n_channels, figsize=(15, 10))
fig.suptitle('Cumulative predictive posterior', fontsize=24)
c_pal = sns.color_palette('Blues', n_colors=150).as_hex()
my_cmap = ListedColormap(c_pal + c_pal[::-1][1:])
upper_quant = self.get_quantile(0.975)
lower_quant = self.get_quantile(0.025)
for i_ch in range(self.n_channels):
im=axes[i_ch].imshow(self.all_ch_cdf.T[i_ch],
origin='lower',
extent=[0, self.predictive_horizon,
self.x_ranges[0, i_ch], self.x_ranges[-1, i_ch]],
aspect='auto', cmap=my_cmap)
axes[i_ch].plot(lower_quant[:, i_ch], 'xkcd:azure', label='Quantiles')
axes[i_ch].plot(upper_quant[:, i_ch], 'xkcd:azure')
axes[i_ch].plot(ground_truth[:, i_ch], color='xkcd:orange', label='Ground truth')
axes[i_ch].legend(loc=1, prop={'size': 14})
divider = make_axes_locatable(axes[i_ch])
cax = divider.append_axes('right', size='5%', pad=0.05)
fig.colorbar(im, cax=cax)
def plot_channel_like(self, plt, ground_truth):
fig, axes = plt.subplots(self.n_channels, figsize=(15, 10))
upper_quant = self.get_quantile(0.975)
lower_quant = self.get_quantile(0.025)
fig.suptitle('Predictive posterior', fontsize=24)
for i_ch in range(self.n_channels):
im = axes[i_ch].imshow(self.all_ch_like.T[i_ch],
origin='lower',
extent=[0, self.predictive_horizon,
self.x_ranges[0, i_ch], self.x_ranges[-1, i_ch]],
aspect='auto', cmap='Blues', norm=LogNorm(vmin=0.001, vmax=1))
axes[i_ch].plot(lower_quant[:, i_ch], 'xkcd:azure', label='Quantiles')
axes[i_ch].plot(upper_quant[:, i_ch], 'xkcd:azure')
axes[i_ch].plot(ground_truth[:, i_ch], color='xkcd:orange', label='Ground truth')
axes[i_ch].legend(loc=1, prop={'size': 14})
divider = make_axes_locatable(axes[i_ch])
cax = divider.append_axes('right', size='5%', pad=0.05)
fig.colorbar(im, cax=cax)
def plot_median_2std(self, plt, ground_truth, with_draws=True):
upper_quant = self.get_quantile(0.975)
lower_quant = self.get_quantile(0.025)
pred_median = self.get_quantile(0.5)
fig, axes = plt.subplots(self.n_channels, figsize=(15, 10))
fig.suptitle('Predictive median and quantiles 2.5 and 97.5', fontsize=24)
for i_ch in range(self.n_channels):
axes[i_ch].plot(lower_quant[:, i_ch], 'xkcd:azure', label='Quantiles')
axes[i_ch].plot(upper_quant[:, i_ch], 'xkcd:azure')
axes[i_ch].plot(pred_median[:, i_ch], 'xkcd:azure')
if with_draws:
[axes[i_ch].plot(d[:, i_ch], alpha=0.05, color='xkcd:blue') for d in self.draws]
axes[i_ch].plot(ground_truth[:, i_ch], color='xkcd:orange', label='Ground truth')
axes[i_ch].legend(loc=1, prop={'size': 14})
def plot_mean_2std(self, plt, ground_truth, with_draws=True):
upper_quant = self.get_quantile(0.975)
lower_quant = self.get_quantile(0.025)
fig, axes = plt.subplots(self.n_channels, figsize=(15, 10))
fig.suptitle('Predictive mean and quantiles 2.5 and 97.5', fontsize=24)
for i_ch in range(self.n_channels):
axes[i_ch].plot(lower_quant[:, i_ch], 'xkcd:azure', label='Quantiles')
axes[i_ch].plot(upper_quant[:, i_ch], 'xkcd:azure')
axes[i_ch].plot(self.pred_mean[:, i_ch], 'xkcd:azure')
if with_draws:
[axes[i_ch].plot(d[:, i_ch], alpha=0.05, color='xkcd:blue') for d in self.draws]
axes[i_ch].plot(ground_truth[:, i_ch], color='xkcd:orange', label='Ground truth')
axes[i_ch].legend(loc=1, prop={'size': 14})
def rmse_mean(self, ground_truth):
return np.sqrt(mean_squared_error(ground_truth, self.pred_mean))
def rmse_quantile(self, ground_truth, alpha=0.5):
return np.sqrt(mean_squared_error(ground_truth, self.get_quantile(alpha)))
def vbgmm_joint_nll(self, ground_truth):
return -np.sum([self.vbgmm[t].score(ground_truth[t:t + 1]) for t in range(self.draws.shape[1])])
def vbgmm_marginal_nll(self, ground_truth):
all_ch_like = []
n_mix = self.vbgmm[0].weights_.shape[0]
for i_ch in range(self.n_channels):
ch_like = []
for t in range(self.draws.shape[1]):
cur_ch_like = 0.
this_vbgmm = self.vbgmm[t]
for k_mix in range(n_mix):
cur_ch_like += this_vbgmm.weights_[k_mix] * norm.pdf(ground_truth[t:t + 1, i_ch:i_ch + 1],
loc=this_vbgmm.means_[k_mix, i_ch],
scale=np.sqrt(this_vbgmm.covariances_[k_mix,
i_ch,
i_ch]))
ch_like += [cur_ch_like]
all_ch_like += [np.concatenate(ch_like, axis=0)]
return -np.log(np.concatenate(all_ch_like, axis=-1))
def eval_marginal_like(self, x_ranges):
all_ch_like = []
n_mix = self.vbgmm[0].weights_.shape[0]
for i_ch in range(self.n_channels):
ch_like = []
for t in range(self.draws.shape[1]):
cur_ch_like = 0.
this_vbgmm = self.vbgmm[t]
for k_mix in range(n_mix):
cur_ch_like += this_vbgmm.weights_[k_mix] * norm.pdf(x_ranges[:, i_ch:i_ch+1],
loc=this_vbgmm.means_[k_mix, i_ch],
scale=np.sqrt(this_vbgmm.covariances_[k_mix,
i_ch,
i_ch]))
ch_like += [cur_ch_like]
all_ch_like += [np.stack(ch_like, axis=0)]
return np.concatenate(all_ch_like, axis=-1)
def eval_marginal_cdf(self, x_ranges):
all_ch_like = []
n_mix = self.vbgmm[0].weights_.shape[0]
for i_ch in range(self.n_channels):
ch_like = []
for t in range(self.draws.shape[1]):
cur_ch_like = 0.
this_vbgmm = self.vbgmm[t]
for k_mix in range(n_mix):
cur_ch_like += this_vbgmm.weights_[k_mix] * norm.cdf(x_ranges[:, i_ch:i_ch+1],
loc=this_vbgmm.means_[k_mix, i_ch],
scale=np.sqrt(this_vbgmm.covariances_[k_mix,
i_ch,
i_ch]))
ch_like += [cur_ch_like]
all_ch_like += [np.stack(ch_like, axis=0)]
return
|
np.concatenate(all_ch_like, axis=-1)
|
numpy.concatenate
|
from __future__ import print_function
from __future__ import absolute_import
from past.builtins import basestring
import numpy as np
from numpy import pi, abs, fmod, modf
# Handling of branch cuts and angle conversions
_ANGLE_PERIOD = 360.
_ANGLE_BRANCH = -180.
def rerange(x, lo=_ANGLE_BRANCH, period=_ANGLE_PERIOD):
"""
Remove multiples of period to bring x into interval [lo, lo+period).
"""
return fmod(fmod(x - lo, period)+period,period) + lo
def is_within(x, lo, hi, period=_ANGLE_PERIOD):
"""
Returns true if some branch of x satisfies lo <= x < hi.
x can be an array.
"""
# Note the rerange numpifies the angle sufficiently that we can
# use np.array-style boolean operations (the '*' operator for
# AND).
x = rerange(x, lo, period=period)
return (lo <= x)*(x < hi)
def to_deg(x):
return 180.*x/pi
def to_rad(x):
return pi*x/180
def from_sexagesimal(x, hours=False):
"""
Given string of the form "dd:mm:ss.sss" or "hh:mm:ss.sss" (when
hours=True), convert to decimal degrees.
"""
w = x.split(':')
w[0] = w[0].strip()
s = 1.
if w[0][0]=='-':
s=-1.
w[0] = w[0][1:]
y = 0.
c = 1.
for yy in w:
y += float(yy) * c
c /= 60.
if hours: y*= 15.
return y*s
def to_sexagesimal(x, hours=False, hms=False):
"""
Given x in decimal degrees, convert to sexagesimal degrees or hours.
"""
s = ''
if x < 0:
s = '-'
x *= -1
if hours or hms:
x = x / 15.
di, i = modf(x)
dm, m = modf(
|
abs(di)
|
numpy.abs
|
import numpy as np
import math
import pandas as pd
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from local_error_local_picking import *
import math
def func(x, a, c):
return a * x ** 2 + c ** 2
#noisy travel time data generator with noisy link
#input args:
# peak: picking travel time per sensor
# SNR_noisy_link: SNR for noisy link
# receiver_number: number of receivers on one side
#output args:
# noisy_peak: noisy picking time after transmission
# DEVIATION_array: standard deviation on noisy picking time per sensor
def noisy_picking(peak, SNR_noisy_link, receiver_number):
peak = np.array(peak).T
noisy_peak = []
DEVIATION_array=[]
for j in peak:
#calculate standard deviation based on SNR
DEVIATION = np.sqrt( np.var(peak))/10**(SNR_noisy_link/20)
# noisy picking
noisy_picking = np.add(j, np.sqrt(abs(DEVIATION)) * np.random.randn(len(j), 1))
# append noisy picking and std array
noisy_peak.append(noisy_picking[0][0:receiver_number])
DEVIATION_array.append(DEVIATION)
return noisy_peak, DEVIATION_array
# define root-mean square velocity solver
fig = plt.figure(figsize=(6, 6))
def rms_velocity(t0d, layer_velocity):
initial_time = 0.5 * np.array(t0d)
oneway = []
for i in range(len(initial_time)):
if i > 0:
oneway.append(initial_time[i] - initial_time[i - 1])
else:
oneway.append(initial_time[i])
oneway = np.array(oneway)
v_rms = []
k = 0
nu = 0
bu = 0
for j in range(len(np.array(t0d))):
while k <= j:
nu = nu + layer_velocity[j] ** 2 * oneway[j]
bu = bu + oneway[j]
k += 1
val = np.sqrt(abs(nu / bu))
v_rms.append(val)
return v_rms, oneway
# ground truth t0
# input args:
# test_depth: ground truth layer depth
# layer_velocity: ground truth layer velocity
#ouput args:
# t0d: ground truth t0
def t0_solver(test_depth, layer_velocity):
t0ground = []
for a in range(len(test_depth) - 1):
if a == 0:
t0ground.append(2 * (test_depth[a + 1]) / layer_velocity[a])
else:
t0ground.append((2 * (test_depth[a + 1] - test_depth[a]) / layer_velocity[a]))
t0d = []
for k in range(len(t0ground)):
t0d.append(np.array(t0ground[0:k]).sum())
return t0d
# t0 solver
def t0_withreceiver(offset, peak):
# estimate t0 from receiver measurement
t0coff = []
parameter = []
for j in range(len(peak)):
popt, pcov = curve_fit(func, np.array(offset[j]).flatten(), np.array(peak[j]).flatten() ** 2)
t0coff.append(popt[1])
parameter.append(popt[0])
return t0coff, parameter
# velocity and depth estimator with NMO
#estimate layer velocity and depth from NMO
#input args:
# receiver_distance: receiver distance to source
# peak: picking arrival time of reflections at receivers
# layer_velocity: velocity of each layer
# test_depth: depth from ground for each layer
# receiver_number: number of receivers
# consensus_t0: final consensus estimated t0 for each layer after average consensus
# central_flag: flag to perform centralized NMO
# t0d:ground truth of t0
# optimal_flag: flag to perform optimum estimation with perfect picking and estimated m0 and t0
#output args:
# ground_depth: estimated layer depth (from ground surface)
# v_layer: estimated layer velocity
# t0coff: estimated t0
def vel_depth_estimator(pattern_analzye_flag, osicillation_pattern,receiver_distance, test_depth, layer_velocity, offset, peak, receiver_number, consensus_t0,
central_flag, optimal_flag):
# estimate t0 from receiver measurement
# if apply nmo in centralized manner
if central_flag == 1:
#estimated parameters via nonlinear least-square fitting
t0coff, para = t0_withreceiver(offset, peak)
if pattern_analzye_flag==0 and osicillation_pattern==0:
# recalculate peak in centralized case
peak=re_picking_arrival(t0coff,para,receiver_distance)
# if apply nmo in distributed manner with final consensus t0 and m0
elif central_flag == 0:
t0coff = consensus_t0
# apply nmo distributedly with local estimate t0 and m0
elif central_flag == 2:
t0coff = consensus_t0
t0ground = []
for a in range(len(test_depth) - 1):
if a == 0:
t0ground.append(2 * (test_depth[a + 1]) / layer_velocity[a])
else:
t0ground.append((2 * (test_depth[a + 1] - test_depth[a]) / layer_velocity[a]))
t0d = []
for k in range(len(t0ground)):
t0d.append(np.array(t0ground[0:k + 1]).sum())
t0 = t0d
# reconstruct velocity
# if its optimal estimate, use ground truth
if optimal_flag == 1 and central_flag == 1:
t0coff = t0
# calculate time difference
vel = []
for l in range(len(peak)):
time_diff = peak[l] - t0coff[l]
# velocity approximation with binomial expansion
# vel.append(np.array(offset[l]/np.sqrt(abs(2*time_diff*t0coff[l]))))
# velocity approximation
int_term = abs(np.array(peak[l]) ** 2 - np.array(t0coff[l]) ** 2)
vel.append(np.array(offset[l] / np.sqrt(int_term)))
# solve for velocity at each layer
v_layer = []
for r in range(len(vel)):
for p in range(len(vel[r])):
if r == 0:
v_layer.append(vel[0][p])
else:
v_layer.append(np.sqrt(abs(
(vel[r][p] ** 2 * np.array(t0coff[r]) - vel[r - 1][p] ** 2 * np.array(t0coff[r - 1])) / (
np.array(t0coff[r]) - np.array(t0coff[r - 1])))))
# reconstruct depth
v_rms, oneway = rms_velocity(t0coff, layer_velocity)
l = 0
depth = []
v_layer = np.array(v_layer)
# solve for estimated oneway travel time
oneway_estimate = []
for j in range(len(t0coff)):
if j == 0:
oneway_estimate.append((np.array(t0coff[j])) / 2)
else:
oneway_estimate.append((np.array(t0coff[j]) - np.array(t0coff[j - 1])) / 2)
# reshape for processing
v_layer = v_layer.reshape(len(peak), receiver_number)
# deal with special case
a = 0
for j in v_layer:
if central_flag != 2:
if (np.array(j)).mean() - layer_velocity[a] > 1e3:
v_layer[a] = abs(v_layer[a] - (np.array(j[-1]) - layer_velocity[a]))
a += 1
for j in range(len(v_layer)):
depth.append(v_layer[j] * oneway[j])
# calculate depth from ground
ground_depth = []
ground_depthval = np.zeros([1, len(depth[0])])
for j in range(len(depth)):
ground_depthval = ground_depthval + depth[j]
ground_depth.append(ground_depthval)
return ground_depth, v_layer, t0coff
# function to investigate root mean square velocity estimation error
# input args;
# peak: picking arrival time
# t0coff: estimated t0
# receiver_distance: receiver offset
#output args:
# f_mean: calculated root mean square velocity per layer per receiver
def root_mean_vel_error(peak, t0coff, receiver_distance, synthetic_arriavl):
# calcualte value of function f
f = []
for j in range(len(peak)):
#root mean-square velocity expression
term = np.sqrt(abs((np.array(peak[j]) ** 2 - np.array(t0coff[j]) ** 2)))
f.append(np.array(np.divide(receiver_distance, (term))))
# f.append(np.array((term)))
#
f_mean = np.array(f)
return f_mean
# function for normal moveout with picking arrival time and receiver offsets in centralized and distributed manner
# input args:
# vel_flag: 1: plot centralized Normal moveout estimation result
# vel_flag1: 1:plot distributed normal moveout velocity estimation results
# 0: plot distributed normal moveout depth estimation results
# pattern_analzye_flag: set 1: plot root-mean square velocity estimation curve analysis
# osicillation_pattern: set 1: plot how picking time deviation influences estimation in classic normal moveout
# without recalculating picking time
# finaltime: ground truth arrival time at receivers
# local_information: estimated parameter t0 at all iterations for all receivers
# local_information1: estimated parameter m0 at all iterations for all receivers
# local_information11: estimated parameter t0 at all iterations for all receivers with Dirls algorithm
# local_information111: estimated parameter m0 at all iterations for all receivers with Dirls algorithm
# receiver_distance: receiver offset for each receiver on one side
# percen: noisy link flag
# consensus_t0: final consensus estimated t0 per layer
# consenus_para: final consensus estimated m0 per layer
# peak: picking travel time
# layer_velocity: layer propagation velocity
# test_depth: depth of each layer calculated from ground
# layer_n: number of layers
# receiver_number: number of receivers on one side
#output args:
# peak: picking travel time at receivers
# optimal_time: optimal picking travel time at receivers
# ground_depth: estimated layer depth from centralized NMO
# v_layer: estimated layer velocity from centralized NMO
# t0coff: estimated t0
# t0coffop:optimal estimated t0 (ground truth)
# ground_depth_dis: final consensus estimated layer depth
# v_layer_dis: final consensus estimated layer velocity
def normal_moveout(vel_flag, vel_flag1, pattern_analzye_flag, osicillation_pattern, finaltime, local_information,
local_information1, local_information11, local_information111, receiver_distance, percen,
consensus_t0, consenus_para, peak, layer_velocity, test_depth, layer_n, receiver_number):
# generate ground truth receiver offset
synthetic_offset=[]
for i in range(layer_n):
synthetic_offset.append(receiver_distance)
synthetic_offset = np.array(synthetic_offset)
synthetic_arriavl = sorted(np.array(finaltime).flatten())
# use recalculated picking arrival time from distributed nmo
newpeak11 = re_picking_arrival(np.array(local_information11).T[-1].T, np.array(local_information111).T[-1].T, receiver_distance)
# use repicking from dirls
arrival = re_picking_arrival(
|
np.array(local_information)
|
numpy.array
|
import random
import time
from collections import defaultdict
from PIL import Image,ImageDraw,ImageFont
import cv2
import numpy as np
import base64
def add_chinese(img_,str_,id):
pil_img = cv2.cvtColor(img_,cv2.COLOR_BGR2RGB)#cv2和PIL中颜色的hex码的储存顺序不同,需转RGB模式
pilimg = Image.fromarray(pil_img)#Image.fromarray()将数组类型转成图片格式,与np.array()相反
draw = ImageDraw.Draw(pilimg)#PIL图片上打印汉字
font = ImageFont.truetype("./cfg_font/simhei.ttf",20,encoding="utf-8")#参数1:字体文件路径,参数2:字体大小;Windows系统“simhei.ttf”默认存储在路径:C:\Windows\Fonts中
draw.text((0,int(20*id)),str_,(255,0,0),font=font)
img_ = cv2.cvtColor(np.array(pilimg),cv2.COLOR_RGB2BGR)#将图片转成cv2.imshow()可以显示的数组格
return img_
def draw_person(img,data):
for bbox in data:
plot_box(bbox, img, color=(255,0,255), label="person", line_thickness=2)
def draw_bdudu_mask(img_,data):
img_fusion = None
if data['data'] is not None:
pass
res = data['data']['info']
labelmap = base64.b64decode(res['labelmap']) # res为通过接口获取的返回json
nparr =
|
np.fromstring(labelmap, np.uint8)
|
numpy.fromstring
|
# ----------------------------------------------------------------------------
# - Open3D: www.open3d.org -
# ----------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2018 www.open3d.org
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# ----------------------------------------------------------------------------
import open3d
import numpy as np
import time
import pytest
@pytest.mark.parametrize(
"input_array, expect_exception",
[
# Empty case
(np.ones((0, 3), dtype=np.float64), False),
# Wrong shape
(np.ones((2, 4), dtype=np.float64), True),
# Non-numpy array
([[1, 2, 3], [4, 5, 6]], False),
([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], False),
# Datatypes
(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float64), False),
(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32), False),
(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32), False),
# Slice non-contiguous memory
(np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]],
dtype=np.float64)[:, 0:6:2], False),
# Transpose view
(np.array([[1, 4], [2, 5], [3, 6]], dtype=np.float64).T, False),
# Fortran layout
(np.asfortranarray(np.array([[1, 2, 3], [4, 5, 6]],
dtype=np.float64)), False),
])
def test_Vector3dVector(input_array, expect_exception):
def run_test(input_array):
open3d_array = open3d.Vector3dVector(input_array)
output_array = np.asarray(open3d_array)
np.testing.assert_allclose(input_array, output_array)
if expect_exception:
with pytest.raises(Exception):
run_test(input_array)
else:
run_test(input_array)
@pytest.mark.parametrize(
"input_array, expect_exception",
[
# Empty case
(np.ones((0, 3), dtype=np.int32), False),
# Wrong shape
(np.ones((2, 4), dtype=np.int32), True),
# Non-numpy array
([[1, 2, 3], [4, 5, 6]], False),
([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], False),
# Datatypes
(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float64), False),
(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32), False),
(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32), False),
# Slice non-contiguous memory
(np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]],
dtype=np.int32)[:, 0:6:2], False),
# Transpose view
(np.array([[1, 4], [2, 5], [3, 6]], dtype=np.int32).T, False),
# Fortran layout
(np.asfortranarray(np.array([[1, 2, 3], [4, 5, 6]],
dtype=np.int32)), False),
])
def test_Vector3iVector(input_array, expect_exception):
def run_test(input_array):
open3d_array = open3d.Vector3iVector(input_array)
output_array = np.asarray(open3d_array)
np.testing.assert_allclose(input_array, output_array)
if expect_exception:
with pytest.raises(Exception):
run_test(input_array)
else:
run_test(input_array)
@pytest.mark.parametrize(
"input_array, expect_exception",
[
# Empty case
(
|
np.ones((0, 2), dtype=np.int32)
|
numpy.ones
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.