prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
import re, os
import numpy as np
from .normalized_hermite import hermval_weighted, hermval
def read_hermite_basis(fname='spectral_basis.desc'):
"""
Keyword Arguments:
fname -- (default 'spectral_basis.desc')
"""
pattern = 'H_([0-9]+),\s+fw_(.*)\s+H_([0-9]+),\s+fw_(.*)'
f = open(fname,'r')
elements = []
for l in f.readlines():
match = re.match(pattern, l)
if match:
k1 = int(match.group(1))
w1 = float(match.group(2))
k2 = int(match.group(3))
w2 = float(match.group(4))
elements.append( {'k1' : k1, 'w1' : w1, 'k2' : k2, 'w2' : w2} )
return elements
def hermite_evaluatez(C, Z, N, weighted=True):
"""
Evaluate 2-d Hermite series with weight exp(-|z|^2/2)
Keyword Arguments:
C -- coefficients as 1d array
Z -- evaluation points Z = X + 1j Y
N -- max degree in one direction
"""
X = np.real(Z)
Y = np.imag(Z)
degx = np.array([]).astype(np.int)
degy = np.array([]).astype(np.int)
for i in range(N+1):
t = np.arange(i+1).astype(np.int)
degx =
|
np.hstack((degx,t))
|
numpy.hstack
|
# Wafer map pattern classification using CNN
import pickle
import os
import numpy as np
import tensorflow as tf
from sklearn.metrics import confusion_matrix, f1_score
from sklearn.model_selection import train_test_split
from tensorflow.keras.applications.vgg16 import VGG16
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
tf.config.experimental.set_memory_growth(gpus[0], True)
except RuntimeError as e:
print(e)
DIM = 64
BATCH_SIZE = 32
MAX_EPOCH = 1000
TRAIN_SIZE_LIST = [500, 5000, 50000, 162946]
LEARNING_RATE = 1e-4
early_stopping = tf.keras.callbacks.EarlyStopping(patience=20, restore_best_weights=True)
with open('../data/X_CNN_64.pickle', 'rb') as f:
X_resize = pickle.load(f)
with open('../data/y.pickle', 'rb') as f:
y = pickle.load(f)
# Stack wafer maps as 3 channels to correspond with RGB channels.
X_resize = (X_resize - 0.5) * 2
X_resize_stacked = np.repeat(X_resize, 3, -1)
y_onehot = tf.keras.utils.to_categorical(y)
REP_ID = 0
RAN_NUM = 27407 + REP_ID
for TRAIN_SIZE_ID in range(4):
TRAIN_SIZE = TRAIN_SIZE_LIST[TRAIN_SIZE_ID]
X_trnval, X_tst, y_trnval, y_tst = train_test_split(X_resize_stacked, y_onehot,
test_size=10000, random_state=RAN_NUM)
# Randomly sample train set for evaluation at various training set size
if TRAIN_SIZE == X_trnval.shape[0]:
pass
else:
X_trnval,_ , y_trnval, _ = train_test_split(X_trnval, y_trnval,
train_size=TRAIN_SIZE, random_state=RAN_NUM)
# Get unique labels in training set. Some labels might not appear in small training set.
labels = np.unique(np.argmax(y_trnval, 1))
base_model = VGG16(weights=None, pooling='avg', include_top=False)
predictions = tf.keras.layers.Dense(9, activation='softmax')(base_model.output)
model = tf.keras.Model(inputs=base_model.input, outputs=predictions)
for layer in base_model.layers: layer.trainable = True
model.compile(optimizer= tf.keras.optimizers.Adam(lr=LEARNING_RATE),
loss='categorical_crossentropy',
metrics=['accuracy'])
log = model.fit(X_trnval, y_trnval, validation_split=0.2,
batch_size=BATCH_SIZE, epochs=MAX_EPOCH,
callbacks=[early_stopping], verbose=0)
y_trnval_hat= model.predict(X_trnval)
y_tst_hat= model.predict(X_tst)
macro = f1_score(np.argmax(y_tst, 1), np.argmax(y_tst_hat, 1), labels=labels, average='macro')
micro = f1_score(np.argmax(y_tst, 1),
|
np.argmax(y_tst_hat, 1)
|
numpy.argmax
|
from .. import util
from .. import ComparisonGP, ComparisonGPEmukitWrapper
from .acquisition_function import AcquisitionFunction
import GPy
import numpy as np
import numpy.linalg as la
import scipy as sp
import collections
import time
from scipy.stats import norm, mvn, multivariate_normal
from GPy.util.univariate_Gaussian import std_norm_pdf, std_norm_cdf, derivLogCdfNormal, logCdfNormal, cdfNormal
from typing import Callable, List, Tuple, Dict
class SequentialThompsonSampler():
"""
A class for performing Thompson sampling from a GP. The sampler works so that
it stores the simulated draws and conditions the new draws on (part of) them.
:param model: A model from which posterior the samples are drawn from
:param seed: Random seed that specifies the random sample
:param delta: Defines how close the samples for numerical derivatives are taken from
:param num_points: Number of points the samples are conditioned on
"""
def __init__(self, model: ComparisonGP, seed: float=None, delta: float=1e-5, num_points: int=None):
self.model = model #model
self.posterior = model.posterior
self.d = self.model.X.shape[1] #dimensionality of the input points
self.seeds = []
self.reset(seed=seed)
self.delta = delta
if num_points is None:
num_points = int(100 * round(np.sqrt(self.d)))
self.num_points = num_points
self.scaling = np.array([model.kern.lengthscale[:]]).flatten()
def reset(self, seed: float=None) -> None:
"""
Reset the sampler by forgetting the already drawn samples and resetting the seed.
:param seed: new seed after reset
"""
self.x_evaluations = np.empty((0,self.model.X.shape[1]))
self.f_evaluations =
|
np.empty((0,1))
|
numpy.empty
|
from pathlib import Path
from tempfile import TemporaryDirectory
from unittest.mock import patch
import pytest
import numpy as np
from metaspace.sm_annotation_utils import (
IsotopeImages,
SMDataset,
GraphQLClient,
SMInstance,
GraphQLException,
)
from metaspace.tests.utils import sm, my_ds_id, advanced_ds_id
EXPECTED_RESULTS_COLS = [
'msm',
'moc',
'rhoSpatial',
'rhoSpectral',
'fdr',
'mz',
'moleculeNames',
'moleculeIds',
'intensity',
'colocCoeff',
]
@pytest.fixture()
def dataset(sm, my_ds_id):
return sm.dataset(id=my_ds_id)
@pytest.fixture()
def advanced_dataset(sm, advanced_ds_id):
return sm.dataset(id=advanced_ds_id)
@pytest.fixture()
def downloadable_dataset_id(sm: SMInstance):
OLD_DATASET_FIELDS = GraphQLClient.DATASET_FIELDS
GraphQLClient.DATASET_FIELDS += ' canDownload'
datasets = sm.datasets()
GraphQLClient.DATASET_FIELDS = OLD_DATASET_FIELDS
for ds in datasets:
if ds._info['canDownload'] and ds._info['inputPath'].startswith('s3a:'):
return ds.id
def test_annotations(dataset: SMDataset):
annotations = dataset.annotations()
assert len(annotations) > 0
assert len(annotations[0]) == 2 # sf, adduct tuple
def test_results(dataset: SMDataset):
annotations = dataset.results(database=('HMDB', 'v4'), fdr=0.5)
assert len(annotations) > 0
assert all(col in annotations.columns for col in EXPECTED_RESULTS_COLS)
assert list(annotations.index.names) == ['formula', 'adduct']
def test_results_with_coloc(dataset: SMDataset):
coloc_with = dataset.results(database=('HMDB', 'v4'), fdr=0.5).ion[0]
coloc_annotations = dataset.results(database=('HMDB', 'v4'), fdr=0.5, coloc_with=coloc_with)
assert len(coloc_annotations) > 0
assert coloc_annotations.colocCoeff.all()
def test_results_with_int_database_id(dataset: SMDataset):
annotations = dataset.results(22, fdr=0.5)
assert len(annotations) > 0
def test_results_with_str_database_id(dataset: SMDataset):
try:
annotations = dataset.results('22', fdr=0.5)
# If the above code succeeds, it's time to start coercing the databaseId type to fit the API.
# See the comment in GraphQLClient.map_database_to_id for context.
assert False
except GraphQLException:
assert True
@patch(
'metaspace.sm_annotation_utils.GraphQLClient.get_databases',
return_value=[{'id': '22', 'name': 'HMDB', 'version': 'v4'}],
)
@patch('metaspace.sm_annotation_utils.GraphQLClient.getAnnotations', return_value=[])
def test_map_database_works_handles_strs_ids_from_api(
mock_getAnnotations, mock_get_databases, dataset: SMDataset
):
# This test is just to ensure that the forward-compatibility with string IDs has the correct behavior
dataset.results()
print(mock_getAnnotations.call_args)
annot_filter = mock_getAnnotations.call_args[1]['annotationFilter']
assert annot_filter['databaseId'] == '22'
def test_results_neutral_loss_chem_mod(advanced_dataset: SMDataset):
"""
Test setup: Create a dataset with a -H2O neutral loss and a -H+C chem mod.
"""
annotations = advanced_dataset.results(database=('HMDB', 'v4'), fdr=0.5)
annotations_cm = advanced_dataset.results(
database=('HMDB', 'v4'), fdr=0.5, include_chem_mods=True
)
annotations_nl = advanced_dataset.results(
database=('HMDB', 'v4'), fdr=0.5, include_neutral_losses=True
)
annotations_cm_nl = advanced_dataset.results(
database=('HMDB', 'v4'), fdr=0.5, include_chem_mods=True, include_neutral_losses=True
)
# Check expected columns
assert list(annotations_cm.index.names) == ['formula', 'adduct', 'chemMod']
assert list(annotations_nl.index.names) == ['formula', 'adduct', 'neutralLoss']
assert list(annotations_cm_nl.index.names) == ['formula', 'adduct', 'chemMod', 'neutralLoss']
# Check CMs / NLs are present when explicitly included
assert len(annotations_cm[annotations_cm.index.get_level_values('chemMod') != '']) > 0
assert len(annotations_nl[annotations_nl.index.get_level_values('neutralLoss') != '']) > 0
assert len(annotations_cm_nl[annotations_cm_nl.index.get_level_values('chemMod') != '']) > 0
assert len(annotations_cm_nl[annotations_cm_nl.index.get_level_values('neutralLoss') != '']) > 0
# Check CMs / NLs are excluded if they're not explicitly included
assert annotations.index.is_unique
assert annotations_cm.index.is_unique
assert annotations_nl.index.is_unique
assert annotations_cm_nl.index.is_unique
assert len(annotations) < len(annotations_cm) < len(annotations_cm_nl)
assert len(annotations) < len(annotations_nl) < len(annotations_cm_nl)
plain_annotations = set(
annotations_cm_nl.reset_index(['chemMod', 'neutralLoss'])[
lambda df: (df.chemMod == '') & (df.neutralLoss == '')
].index
)
assert set(annotations.index) == plain_annotations
def test_isotope_images(dataset: SMDataset):
sf, adduct = dataset.annotations(neutralLoss='', chemMod='')[0]
images = dataset.isotope_images(sf, adduct)
assert len(images) > 1
assert isinstance(images[0], np.ndarray)
def test_isotope_images_advanced(advanced_dataset: SMDataset):
sf, cm, nl, adduct = advanced_dataset.annotations(
return_vals=('sumFormula', 'chemMod', 'neutralLoss', 'adduct'),
neutralLoss='-H2O',
chemMod='-H+C',
)[0]
images = advanced_dataset.isotope_images(sf, adduct, chem_mod=cm, neutral_loss=nl)
assert len(images) > 1
assert isinstance(images[0], np.ndarray)
def test_isotope_images_scaling(dataset: SMDataset):
ann = dataset.results(neutralLoss='', chemMod='').iloc[0]
formula, adduct = ann.name
scaled_img = dataset.isotope_images(formula, adduct)[0]
unscaled_img = dataset.isotope_images(formula, adduct, scale_intensity=False)[0]
clipped_img = dataset.isotope_images(formula, adduct, hotspot_clipping=True)[0]
clipped_unscaled_img = dataset.isotope_images(
formula, adduct, scale_intensity=False, hotspot_clipping=True
)[0]
assert np.max(scaled_img) == pytest.approx(ann.intensity)
assert np.max(unscaled_img) == pytest.approx(1)
assert np.max(clipped_img) < ann.intensity
assert np.max(clipped_img) > ann.intensity / 2 # Somewhat arbitrary, but generally holds true
assert
|
np.max(clipped_unscaled_img)
|
numpy.max
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to test.
Notes
-----
Functions can have up to 3 scalar arguments with names a, b and c. If the
function has an optional integer argument it must be called n. Vector inputs
have the name x.
"""
from __future__ import division
import numpy as np
import tangent
from tangent import insert_grad_of
import tensorflow as tf
def id_(a):
return a
def unary_sub(a):
return -a
def sum_(x):
return np.sum(x)
def overwrite_call(x):
x = np.sum(x)
x = x * x
return x
def tanh(a):
return np.tanh(a)
def I_assign(a):
b = a
return b
def increment(a):
return a + 1
def saxpy(a, b, c):
return a * b + c
def mul(a, b):
return a * b
def add(a, b):
return a + b
def saxpy_call(a, b, c):
return add(mul(a, b), c)
def saxpy_anf(a, b, c):
t1 = a * b
t2 = t1 + c
return t2
def saxpy_overwrite(a, b, c):
t = a * b
t = t + c
return t
def gradname_aliasing(a):
da = a * a
return da
def overwrite_if(a):
if a > 0:
a = a * 3.0
else:
a = a * 2.0
return a
def test_binop_mul1(a):
return a * 3.0
def test_binop_mul2(a):
return 3.0 * a
def test_binop_mul3(a):
return a * a
def test_binop_div1(a):
return a / 3.0
def test_binop_div2(a):
return 3.0 / a
def test_binop_div3(a):
return a / a
def test_binop_sub1(a):
return a - 3.0
def test_binop_sub2(a):
return 3.0 - a
def test_binop_sub3(a):
return a - a
def test_binop_add1(a):
return a + 3.0
def test_binop_add2(a):
return 3.0 + a
def test_binop_add3(a):
return a + a
def nested_if(a):
if a > 0:
if a < 10:
a = a * a
else:
a = 3.0 * a
return a
def multiarg_if(a):
# A bunch of spammy nonsense to try to break our system
if a * a / 3.0 > 0:
b = 3.0
else:
b = 4.0
a = a * b
if a < b - a * b:
a = a * a
elif a > b + 3.0 * a:
if a / 2.0 < 0:
a = a / b
else:
a = a * b
else:
a = -a
return a
def fn_multiply(a):
return np.multiply(a, 3.0)
def fn_multiple_return(a):
return 2 * a, a
def test_anf_list(a):
b = [1, a * a, 2]
return np.sum(b)
def test_deep_anf_list(x):
b = [1, x[0] * x[1] * x[2], 3]
return np.sum(b)
# TODO: needs equivalent for all iterable collections
def test_subscript1(x):
a = x[0]
b = x[1]
return a * b
def test_subscript2(x):
a = x[0]
b = x[1]
x[0] = a * b
return np.sum(x)
def test_subscript3(x):
y = x ** 2.0
x[0] = y[1] * y[2]
return np.sum(x * y)
def test_list_and_subscript(a):
x = [1.0, a, 3.0]
return x[0] * x[1]
# TODO: needs a numpy equivalent, and all iterables collections too
# def test_subscript_overwrite(a):
# x = [1,a*a,3]
# x[1] = x[0]*x[1]
# return x[1]+x[1]
def serial_if(a):
if a > 0:
a = a * a
a = a + a
if a < 0:
a = 3.0 * a
return a
def multivar_if(a, b):
if a > b:
a = a * a
else:
a = a * b
return a
# TODO: split this into a bunch of tinier tests
def serial_for(a, n):
for i in range(n):
a = a * a
a = 2.0 * a
for i in range(n + 1):
a = a * 3.0
return a
def serial_ifthenfor(a, n):
if a > 0:
a = a * a
a = 2.0 * a
for i in range(n):
a = a * a
return a
def serial_forthenif(a, n):
for i in range(n):
a = a * a
a = 2.0 * a
if a > 0:
a = a * a
return a
def devilish_nested_if(a):
if a > 0:
a = a * 3.0
if a < 10:
a = a * a
else:
a = a * 2.0
return a
def overwrite_ifelse(a):
if a > 0:
a = a * 3.0
elif 0 > a:
a = a * 2.0
else:
a = 1.0 * a
return a
def overwrite_arg(a):
a = a * 3.0
return a
def overwrite_non_arg(a):
b = a
b = b * 3.0
return b
def third_pow(a):
return a * a * a
def direct_third_pow(a):
return a**3
def iter_third_pow1(a):
out = 1
for i in range(3):
out = out * a
return out
def iter_third_pow2(a):
out = a
for i in range(3 - 1):
out = out * a
return out
def iterpower_static(a):
for i in range(3):
a = a * a
return a
def iterpower(a, n):
for i in range(n):
a = a * a
return a
def cond_iterpower1(a):
for i in range(3):
if a < 20:
a = a * a
return a
def cond_iterpower2(a):
if a < 20:
for i in range(3):
a = a * a
return a
def superfor_iterpower(a):
for i in range(2):
for j in range(3):
a = a * a
return a
def super_iterpower(a):
# Tests ANF in a for loop
for i in range(3):
a = a * a * a
return a
# ================================================
# Numpy grads
# ================================================
def numpy_sum(x):
return np.sum(x)
def numpy_mean(x):
return np.mean(x)
def numpy_exp(a):
return np.exp(a)
def numpy_exp2(a):
return np.exp(np.exp(a))
def numpy_sqrt(a):
return
|
np.sqrt(a)
|
numpy.sqrt
|
import pytest
from SciDataTool import DataLinspace, DataTime, DataPattern
from numpy import meshgrid, linspace, array
from numpy.testing import assert_array_almost_equal
@pytest.mark.validation
# @pytest.mark.DEV
def test_slice():
"""Test slicing"""
X = DataLinspace(name="X", unit="m", initial=0, final=10, number=11)
Y = DataLinspace(name="Y", unit="m", initial=0, final=100, number=11)
y, x = meshgrid(Y.get_values(), X.get_values())
field = x + y
Field = DataTime(name="Example field", symbol="Z", axes=[X, Y], values=field)
# Extract data by axis value
# 'X=1'
result = Field.get_along("X=1", "Y")
assert_array_almost_equal(field[1, :], result["Z"])
# 'X=[0, 1]'
result = Field.get_along("X=[0, 1]", "Y")
expected = field[0:2, :]
assert_array_almost_equal(expected, result["Z"])
# 'X<2' #TODO result in an error
result = Field.get_along("X<2", "Y")
expected = field[0:2, :]
# assert_array_almost_equal(expected, result["Z"])
# Extract data by operator
# mean value 'X=mean'
result = Field.get_along("X=mean", "Y")
expected = field.mean(axis=0)
assert_array_almost_equal(expected, result["Z"])
# sum 'X=sum'
result = Field.get_along("X=sum", "Y")
expected = field.sum(axis=0)
assert_array_almost_equal(expected, result["Z"])
# rms value 'X=rms'
result = Field.get_along("X=rms", "Y")
expected = (field ** 2).mean(axis=0) ** (1 / 2)
assert_array_almost_equal(expected, result["Z"])
# Extract data by indices
result = Field.get_along("X[1:5]", "Y[2:8]")
expected = field[1:5, 2:8]
assert_array_almost_equal(expected, result["Z"])
# Integral value 'X=integrate'
result = Field.get_along("X=integrate", "Y")
expected = (50 + 10 * linspace(0, 100, 11)) / 10
assert_array_almost_equal(expected, result["Z"])
# Step axis
X = DataPattern(
name="X",
unit="m",
values=array([-0.5, -0.3, -0.1, 0.1, 0.3]),
values_whole=array([-0.5, -0.3, -0.3, -0.1, -0.1, 0.1, 0.1, 0.3, 0.3, 0.5]),
rebuild_indices=[0, 0, 1, 1, 2, 2, 3, 3, 4, 4],
)
field =
|
array([20, 40, 60, 80, 100])
|
numpy.array
|
"""This module calculates the Planck function in various forms.
Usually between a lower and upper limit and for a single temperature.
Two functions use weave.blitz code to speed up the functions.
"""
# planck module
import numpy as np
import dwell.constants as const
def planck_wavelength(wavelength, temp):
"""Calculate Black Body radiance at a single wavelength or wavelengths.
:param wavelength: wavelengths, microns
:type wavelength: numpy array
:param temp: of temperatures, kelvin
:type temp: numpy array, or float
:returns: Planck function in units of W/m^2/ster/micron
calculated at wavelength and temperature
:rtype: numpy array
"""
metres_to_microns = 1.e6
c1 = 2 * const.h * const.c * const.c * metres_to_microns**4 # 1.911e8
c2 = (const.h * const.c / const.k_B) * metres_to_microns # 1.439e4
Blambda = c1 * (wavelength)**(-5)/(np.exp(c2/(wavelength*temp))-1)
return Blambda
def planck_wavenumber(wavenumber, temp):
"""Calculate Black Body radiance at given wavenumbers.
:param wavenumber: wavenumber, cm-1
:type wavenumber: numpy array
:param temp: of temperatures, kelvin
:type temp: numpy array or float
:returns: Planck function in units of W/m^2/ster/cm-1
calculated at wavelength and temperature
:rtype: numpy array
"""
metres_to_centimetres = 1e2
c1 = 2 * const.h * const.c * const.c * metres_to_centimetres**4 # 1.e-8
c2 = const.h * const.c / (const.k_B) * metres_to_centimetres # 1.439
Bnu = c1 * (wavenumber**3) / (np.exp(wavenumber*c2/temp) - 1)
return Bnu
def cfst4_wavenumber(wl, wh, *args, **kwargs):
"""Calculate the integrated Planck function between two wavenumbers.
:param wl: low wavenumber, cm-1
:type wl: numpy array
:param wh: high wavenumber, cm-1
:type wh: numpy array
:param temp: of temperatures, kelvin
:type temp: float
:returns: Planck function in units of W/m^2/ster
at temperature integrated between wl and wh
:rtype: numpy array
"""
return cfst4_wavelength(10000. / wh, 10000. / wl, *args, **kwargs)
def cfst4_wavelength(wl, wh, temperature,
diff_planck=None, diff_planck_2=None):
"""Calculate the integrated Planck function between two wavelengths.
:param wl: low wavelength, micron
:type wl: numpy array
:param wh: high wavelength, micron
:type wh: numpy array
:param temp: of temperatures, kelvin
:type temp: float
:returns: Planck function in units of W/m^2/ster
at temperature integrated between wl and wh
:rtype: numpy array
"""
c = 0.153989733
c2 = 1.43883e4
sigma = const.sigma
wn = [wl, wh]
fac = 0.0000001
f = [0.0, 0.0]
for i in range(2):
f[i] = 0.0
v = c2 / (wn[i] * temperature)
for m in range(1, 1000):
wv = float(m) * v
ff = (float(m) ** (-4)) *\
(
|
np.exp(-wv)
|
numpy.exp
|
#!/usr/bin/env python
import os
import argparse
import time
from pathlib import Path
from argparse import Namespace
from collections import OrderedDict
import numpy as np
from PIL import Image
from matplotlib import cm
import torch
import torch.nn.functional as F
import wandb
import yaml
from torch.utils.data import DataLoader
from torch.nn.parallel import DataParallel as DP
import mel2wav.modules
from mel2wav.dataset import AudioDataset
from mel2wav.modules import Audio2Mel, Discriminator, Generator
from mel2wav.utils import save_sample
from util import seed_everything
def load_state_dict_handleDP(model, filepath):
try:
model.load_state_dict(torch.load(filepath))
except RuntimeError as e:
print("RuntimeError", e)
print("Fixing model trained with DataParallel by removing .module prefix")
# state_dict = torch.load(filepath, map_location=device)
state_dict = torch.load(filepath)
# state_dict = state_dict["state_dict.model"]
# remove the DP() to load the model
state_dict = OrderedDict((k.split(".", 1)[1], v) for k, v in state_dict.items())
model.load_state_dict(state_dict)
return model
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--save_path", required=True)
parser.add_argument("--load_from_run_id", default=None)
parser.add_argument("--resume_run_id", default=None)
parser.add_argument("--n_mel_channels", type=int, default=80)
parser.add_argument("--ngf", type=int, default=32)
parser.add_argument("--n_residual_layers", type=int, default=3)
parser.add_argument("--ndf", type=int, default=16)
parser.add_argument("--num_D", type=int, default=3)
parser.add_argument("--n_layers_D", type=int, default=4)
parser.add_argument("--downsamp_factor", type=int, default=4)
parser.add_argument("--ratios", default=[8, 8, 2, 2])
parser.add_argument("--lambda_feat", type=float, default=10)
parser.add_argument("--cond_disc", action="store_true")
parser.add_argument("--learning_rate", type=float, default=1e-4)
parser.add_argument(
"--pad_mode", type=str, default="reflect", choices=["reflect", "replicate"]
)
parser.add_argument("--data_path", default=None, type=Path)
parser.add_argument("--batch_size", type=int, default=16)
parser.add_argument("--seq_len", type=int, default=8192)
parser.add_argument("--sampling_rate", type=int, default=44100)
parser.add_argument("--epochs", type=int, default=3000)
parser.add_argument("--log_interval", type=int, default=100)
parser.add_argument("--save_interval", type=int, default=1000)
parser.add_argument("--n_test_samples", type=int, default=8)
parser.add_argument("--notes", type=str)
args = parser.parse_args()
return args
def main():
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
seed_everything(7)
args = parse_args()
Path(args.save_path).mkdir(parents=True, exist_ok=True)
entity = "demiurge"
project = "melgan"
load_from_run_id = args.load_from_run_id
resume_run_id = args.resume_run_id
restore_run_id = load_from_run_id or resume_run_id
batch_size = args.batch_size
# Getting initial run steps and epoch
# if restore run, replace args
steps = None
if restore_run_id:
api = wandb.Api()
previous_run = api.run(f"{entity}/{project}/{restore_run_id}")
steps = previous_run.lastHistoryStep
prev_args = argparse.Namespace(**previous_run.config)
args = vars(args)
args.update(vars(prev_args))
args = Namespace(**args)
args.batch_size = batch_size
load_initial_weights = bool(restore_run_id)
sampling_rate = args.sampling_rate
ratios = args.ratios
if isinstance(ratios, str):
ratios = ratios.replace(" ", "")
ratios = ratios.strip("][").split(",")
ratios = [int(i) for i in ratios]
ratios = np.array(ratios)
if load_from_run_id and resume_run_id:
raise RuntimeError("Specify either --load_from_id or --resume_run_id.")
if resume_run_id:
print(f"Resuming run ID {resume_run_id}.")
elif load_from_run_id:
print(f"Starting new run with initial weights from run ID {load_from_run_id}.")
else:
print("Starting new run from scratch.")
# read 1 line in train files to log dataset location
train_files = Path(args.data_path) / "train_files.txt"
with open(train_files, encoding="utf-8", mode="r") as f:
file = f.readline()
args.train_file_sample = str(file)
wandb.init(
entity=entity,
project=project,
id=resume_run_id,
config=args,
resume=True if resume_run_id else False,
save_code=True,
dir=args.save_path,
notes=args.notes,
)
print("run id: " + str(wandb.run.id))
print("run name: " + str(wandb.run.name))
root = Path(wandb.run.dir)
root.mkdir(parents=True, exist_ok=True)
####################################
# Dump arguments and create logger #
####################################
with open(root / "args.yml", "w") as f:
yaml.dump(args, f)
wandb.save("args.yml")
###############################################
# The file modules.py is needed by the unagan #
###############################################
wandb.save(mel2wav.modules.__file__, base_path=".")
#######################
# Load PyTorch Models #
#######################
netG = Generator(
args.n_mel_channels, args.ngf, args.n_residual_layers, ratios=ratios
).to(device)
netD = Discriminator(
args.num_D, args.ndf, args.n_layers_D, args.downsamp_factor
).to(device)
fft = Audio2Mel(
n_mel_channels=args.n_mel_channels,
pad_mode=args.pad_mode,
sampling_rate=sampling_rate,
).to(device)
for model in [netG, netD, fft]:
wandb.watch(model)
#####################
# Create optimizers #
#####################
optG = torch.optim.Adam(netG.parameters(), lr=args.learning_rate, betas=(0.5, 0.9))
optD = torch.optim.Adam(netD.parameters(), lr=args.learning_rate, betas=(0.5, 0.9))
if load_initial_weights:
for model, filenames in [
(netG, ["netG.pt", "netG_prev.pt"]),
(optG, ["optG.pt", "optG_prev.pt"]),
(netD, ["netD.pt", "netD_prev.pt"]),
(optD, ["optD.pt", "optD_prev.pt"]),
]:
recover_model = False
filepath = None
for filename in filenames:
try:
run_path = f"{entity}/{project}/{restore_run_id}"
print(f"Restoring {filename} from run path {run_path}")
restored_file = wandb.restore(filename, run_path=run_path)
filepath = restored_file.name
model = load_state_dict_handleDP(model, filepath)
recover_model = True
break
except RuntimeError as e:
print("RuntimeError", e)
print(f"recover model weight file: '{filename}'' failed")
if not recover_model:
raise RuntimeError(
f"Cannot load model weight files for component {filenames[0]}."
)
else:
# store successfully recovered model weight file ("***_prev.pt")
path_parent = Path(filepath).parent
newfilepath = str(path_parent / filenames[1])
os.rename(filepath, newfilepath)
wandb.save(newfilepath)
if torch.cuda.device_count() > 1:
netG = DP(netG).to(device)
netD = DP(netD).to(device)
fft = DP(fft).to(device)
print(f"We have {torch.cuda.device_count()} gpus. Use data parallel.")
else:
print(f"We have {torch.cuda.device_count()} gpu.")
#######################
# Create data loaders #
#######################
train_set = AudioDataset(
Path(args.data_path) / "train_files.txt",
args.seq_len,
sampling_rate=sampling_rate,
)
test_set = AudioDataset(
Path(args.data_path) / "test_files.txt",
sampling_rate * 4,
sampling_rate=sampling_rate,
augment=False,
)
wandb.save(str(Path(args.data_path) / "train_files.txt"))
wandb.save(str(Path(args.data_path) / "test_files.txt"))
train_loader = DataLoader(train_set, batch_size=args.batch_size, num_workers=4)
test_loader = DataLoader(test_set, batch_size=1)
if len(train_loader) == 0:
raise RuntimeError("Train dataset is empty.")
if len(test_loader) == 0:
raise RuntimeError("Test dataset is empty.")
if not restore_run_id:
steps = wandb.run.step
start_epoch = steps // len(train_loader)
print(f"Starting with epoch {start_epoch} and step {steps}.")
##########################
# Dumping original audio #
##########################
test_voc = []
test_audio = []
samples = []
melImages = []
num_fix_samples = args.n_test_samples - (args.n_test_samples // 2)
cmap = cm.get_cmap("inferno")
for i, x_t in enumerate(test_loader):
x_t = x_t.to(device)
s_t = fft(x_t).detach()
test_voc.append(s_t.to(device))
test_audio.append(x_t)
audio = x_t.squeeze().cpu()
save_sample(root / ("original_%d.wav" % i), sampling_rate, audio)
samples.append(
wandb.Audio(audio, caption=f"sample {i}", sample_rate=sampling_rate)
)
melImage = s_t.squeeze().detach().cpu().numpy()
melImage = (melImage - np.amin(melImage)) / (
np.amax(melImage) - np.amin(melImage)
)
# melImage = Image.fromarray(np.uint8(cmap(melImage)) * 255)
# melImage = melImage.resize((melImage.width * 4, melImage.height * 4))
melImages.append(wandb.Image(cmap(melImage), caption=f"sample {i}"))
if i == num_fix_samples - 1:
break
# if not resume_run_id:
wandb.log({"audio/original": samples}, step=start_epoch)
wandb.log({"mel/original": melImages}, step=start_epoch)
# else:
# print("We are resuming, skipping logging of original audio.")
costs = []
start = time.time()
# enable cudnn autotuner to speed up training
torch.backends.cudnn.benchmark = True
best_mel_reconst = 1000000
for epoch in range(start_epoch, start_epoch + args.epochs + 1):
for iterno, x_t in enumerate(train_loader):
x_t = x_t.to(device)
s_t = fft(x_t).detach()
x_pred_t = netG(s_t.to(device))
with torch.no_grad():
s_pred_t = fft(x_pred_t.detach())
s_error = F.l1_loss(s_t, s_pred_t).item()
#######################
# Train Discriminator #
#######################
D_fake_det = netD(x_pred_t.to(device).detach())
D_real = netD(x_t.to(device))
loss_D = 0
for scale in D_fake_det:
loss_D += F.relu(1 + scale[-1]).mean()
for scale in D_real:
loss_D += F.relu(1 - scale[-1]).mean()
netD.zero_grad()
loss_D.backward()
optD.step()
###################
# Train Generator #
###################
D_fake = netD(x_pred_t.to(device))
loss_G = 0
for scale in D_fake:
loss_G += -scale[-1].mean()
loss_feat = 0
feat_weights = 4.0 / (args.n_layers_D + 1)
D_weights = 1.0 / args.num_D
wt = D_weights * feat_weights
for i in range(args.num_D):
for j in range(len(D_fake[i]) - 1):
loss_feat += wt * F.l1_loss(D_fake[i][j], D_real[i][j].detach())
netG.zero_grad()
(loss_G + args.lambda_feat * loss_feat).backward()
optG.step()
costs.append([loss_D.item(), loss_G.item(), loss_feat.item(), s_error])
wandb.log(
{
"loss/discriminator": costs[-1][0],
"loss/generator": costs[-1][1],
"loss/feature_matching": costs[-1][2],
"loss/mel_reconstruction": costs[-1][3],
},
step=steps,
)
steps += 1
if steps % args.save_interval == 0:
st = time.time()
with torch.no_grad():
samples = []
melImages = []
# fix samples
for i, (voc, _) in enumerate(zip(test_voc, test_audio)):
pred_audio = netG(voc)
pred_audio = pred_audio.squeeze().cpu()
save_sample(
root / ("generated_%d.wav" % i), sampling_rate, pred_audio
)
samples.append(
wandb.Audio(
pred_audio,
caption=f"sample {i}",
sample_rate=sampling_rate,
)
)
melImage = voc.squeeze().detach().cpu().numpy()
melImage = (melImage - np.amin(melImage)) / (
np.amax(melImage) - np.amin(melImage)
)
# melImage = Image.fromarray(np.uint8(cmap(melImage)) * 255)
# melImage = melImage.resize(
# (melImage.width * 4, melImage.height * 4)
# )
melImages.append(
wandb.Image(cmap(melImage), caption=f"sample {i}")
)
wandb.log(
{
"audio/generated": samples,
"mel/generated": melImages,
"epoch": epoch,
},
step=steps,
)
# var samples
source = []
pred = []
pred_mel = []
num_var_samples = args.n_test_samples - num_fix_samples
for i, x_t in enumerate(test_loader):
# source
x_t = x_t.to(device)
audio = x_t.squeeze().cpu()
source.append(
wandb.Audio(
audio, caption=f"sample {i}", sample_rate=sampling_rate
)
)
# pred
s_t = fft(x_t).detach()
voc = s_t.to(device)
pred_audio = netG(voc)
pred_audio = pred_audio.squeeze().cpu()
pred.append(
wandb.Audio(
pred_audio,
caption=f"sample {i}",
sample_rate=sampling_rate,
)
)
melImage = voc.squeeze().detach().cpu().numpy()
melImage = (melImage - np.amin(melImage)) / (
np.amax(melImage) - np.amin(melImage)
)
# melImage = Image.fromarray(np.uint8(cmap(melImage)) * 255)
# melImage = melImage.resize(
# (melImage.width * 4, melImage.height * 4)
# )
pred_mel.append(
wandb.Image(cmap(melImage), caption=f"sample {i}")
)
# stop when reach log sample
if i == num_var_samples - 1:
break
wandb.log(
{
"audio/var_original": source,
"audio/var_generated": pred,
"mel/var_generated": pred_mel,
},
step=steps,
)
print("Saving models ...")
torch.save(netG.state_dict(), root / "netG.pt")
torch.save(optG.state_dict(), root / "optG.pt")
wandb.save(str(root / "netG.pt"))
wandb.save(str(root / "optG.pt"))
torch.save(netD.state_dict(), root / "netD.pt")
torch.save(optD.state_dict(), root / "optD.pt")
wandb.save(str(root / "netD.pt"))
wandb.save(str(root / "optD.pt"))
if np.asarray(costs).mean(0)[-1] < best_mel_reconst:
best_mel_reconst = np.asarray(costs).mean(0)[-1]
torch.save(netD.state_dict(), root / "best_netD.pt")
torch.save(netG.state_dict(), root / "best_netG.pt")
wandb.save(str(root / "best_netD.pt"))
wandb.save(str(root / "best_netG.pt"))
print("Took %5.4fs to generate samples" % (time.time() - st))
print("-" * 100)
if steps % args.log_interval == 0:
print(
"Epoch {} | Iters {} / {} | ms/batch {:5.2f} | loss {}".format(
epoch,
iterno,
len(train_loader),
1000 * (time.time() - start) / args.log_interval,
|
np.asarray(costs)
|
numpy.asarray
|
# -*- coding: utf-8 -*-
import os
import re
import sys
import time
import json
import pdb
import numpy as np
import tensorflow as tf
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from scipy.io import loadmat
from datetime import datetime
from model.vaegan import VAEGAN
from PIL import Image
from iohandler.datareader import find_files
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string(
'datadir', './data/TWKai98_32x32', 'data dir')
tf.app.flags.DEFINE_string(
'architecture', None, 'network architecture')
tf.app.flags.DEFINE_string('logdir', 'logdir', 'log dir')
tf.app.flags.DEFINE_string('checkpoint', None, 'model checkpoint')
def SingleFileReader(filename, shape, rtype='tanh', ext='jpg'):
n, h, w, c = shape
if ext == 'jpg' or ext == 'jpeg':
decoder = tf.image.decode_jpeg
elif ext == 'png':
decoder = tf.image.decode_png
else:
raise ValueError('Unsupported file type: {:s}.'.format(ext) +
' (only *.png and *.jpg are supported')
filename_queue = tf.train.string_input_producer(filename, shuffle=False)
reader = tf.WholeFileReader()
key, value = reader.read(filename_queue)
img = decoder(value, channels=c)
img = tf.image.crop_to_bounding_box(img, 0, 0, h, w)
img = tf.to_float(img)
if rtype == 'tanh':
img = tf.div(img, 127.5) - 1.
imgs = tf.train.batch(
[img],
batch_size=n,
capacity=1)
return imgs, key
def fit_the_shape(x_, shape):
n, h, w, c = shape
x_ = np.reshape(
|
np.transpose(x_, [1, 0, 2, 3])
|
numpy.transpose
|
import os, pickle, numpy as np
import keras
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from keras.utils.np_utils import to_categorical
from keras import optimizers
from keras.layers import Conv2D, MaxPooling2D, ZeroPadding2D, GlobalAveragePooling2D
from keras.layers.normalization import BatchNormalization
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.models import Sequential, Model, Input
from keras.layers.advanced_activations import LeakyReLU
from keras.preprocessing.image import ImageDataGenerator
# import matplotlib.pyplot as plt
final = {}
def read_data(link):
files = os.listdir(link)
files.sort()
idx = 0
for file1 in files:
now = link + file1
final[idx] = file1.split(".")[0]
if idx == 0:
train = np.load(now)
m_lbl = np.array([idx] * train.shape[0])
else:
temp1 = np.load(now)
temp3 = np.array([idx] * temp1.shape[0])
train = np.vstack([train, temp1])
m_lbl = np.hstack([m_lbl, temp3])
idx += 1
print(final)
print(train.shape)
return train, m_lbl
train, m_lbl = read_data("../../col-774-spring-2018/train/")
test = np.load("../../col-774-spring-2018/test/test.npy")
train_x, m_lbl = shuffle(train, m_lbl, random_state=0)
train_y = to_categorical(m_lbl, num_classes=20)
# train_x -= 255
# test -= 255
train_x = np.divide(train_x, 255)
test_x = np.divide(test, 255)
train_x.resize(train_x.shape[0], 28, 28, 1)
test_x.resize(test_x.shape[0], 28, 28, 1)
# Striving for Simplicity: The All Convolutional Net
def conv_pool_cnn(model_input):
x = Conv2D(96, kernel_size=(3, 3), activation='relu', padding='same')(model_input)
x = Conv2D(96, (3, 3), activation='relu', padding='same')(x)
x = Conv2D(96, (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D(pool_size=(3, 3), strides=2)(x)
x = Conv2D(192, (3, 3), activation='relu', padding='same')(x)
x = Conv2D(192, (3, 3), activation='relu', padding='same')(x)
x = Conv2D(192, (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D(pool_size=(3, 3), strides=2)(x)
x = Conv2D(192, (3, 3), activation='relu', padding='same')(x)
x = Conv2D(192, (1, 1), activation='relu')(x)
x = Conv2D(20, (1, 1))(x)
x = GlobalAveragePooling2D()(x)
x = Activation(activation='softmax')(x)
model = Model(model_input, x, name='conv_pool_cnn')
return model
model = conv_pool_cnn(Input(shape=(28,28,1)))
data_generator = ImageDataGenerator(width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True)
data_generator.fit(train_x)
opt = optimizers.Adam(lr=0.001)
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
model.summary()
model.fit_generator(data_generator.flow(train_x, train_y, batch_size=100), steps_per_epoch=1000, epochs=100)
# Save whole model to HDF5
model.save("model_conv_pool100.h5")
print("Saved model to disk")
y_classes = model.predict(test_x)
y_classes =
|
np.argmax(y_classes, axis=-1)
|
numpy.argmax
|
"""
Functions to run likelihoods, automatically looping over numbers of angular bins.
"""
import glob
import os.path
import time
import warnings
import gaussian_cl_likelihood.python.simulation # https://github.com/robinupham/gaussian_cl_likelihood
import numpy as np
import angular_binning.like_bp_gauss as like_bp
import angular_binning.like_bp_gauss_mix as like_bp_mix
import angular_binning.like_cf_gauss as like_cf
warnings.filterwarnings('error') # terminate on warning
def like_bp_gauss_loop_nbin(grid_dir, n_bps, n_zbin, lmax, lmin_like, lmin_in, fid_pos_pos_dir, fid_she_she_dir,
fid_pos_she_dir, pos_nl_path, she_nl_path, noise_ell_path, pbl_save_dir, obs_bp_save_dir,
inv_cov_save_dir, varied_params, like_save_dir, cov_fsky=1.0):
"""
Run the like_bp_gauss likelihood module over a CosmoSIS grid repeatedly for different numbers of bandpowers, saving
a separate likelihood file for each number of bandpowers.
Args:
grid_dir (str): Path to CosmoSIS grid.
n_bps (list): List of numbers of bandpowers.
n_zbin (int): Number of redshift bins.
lmax (int): Maximum l to use in the likelihood.
lmin_like (int): Minimum l to use in the likelihood.
lmin_in (int): Minimum l supplied in theory power spectra.
fid_pos_pos_dir (str): Path to fiducial position-position power spectra.
fid_she_she_dir (str): Path to fiducial shear-shear power spectra.
fid_pos_she_dir (str): Path to fiducial position-shear power spectra.
pos_nl_path (str): Path to text file containing position noise power spectrum.
she_nl_path (str): Path to text file containing shear noise power spectrum.
noise_ell_path (str): Path to text file containing ells for the noise power spectra.
pbl_save_dir (str): Path to directory into which to save bandpower binning matrices, which are then loaded
inside the likelihood module.
obs_bp_save_dir (str): Path to directory into which to save binned 'observed' (fiducial) power spectra, which
are then loaded inside the likelihood module.
inv_cov_save_dir (str): Path to directory into which to save precomputed inverse bandpower covariance matrices,
which are then loaded inside the likelihood module.
varied_params (list): List of CosmoSIS parameter names whose values are varied across the grid.
like_save_dir (str): Path to directory into which to save likelihood files, one for each number of bandpowers.
cov_fsky (float, optional): If supplied, covariance will be multiplied by 1 / cov_fsky. (Default 1.0.)
"""
print(f'Starting at {time.strftime("%c")}')
# Calculate some useful quantities
n_field = 2 * n_zbin
n_spec = n_field * (n_field + 1) // 2
n_ell_like = lmax - lmin_like + 1
ell_like = np.arange(lmin_like, lmax + 1)
n_ell_in = lmax - lmin_in + 1
assert lmin_in <= lmin_like
# Load fiducial Cls
print(f'Loading fiducial Cls at {time.strftime("%c")}')
fid_cl = like_bp.load_spectra(n_zbin, fid_pos_pos_dir, fid_she_she_dir, fid_pos_she_dir, lmax=lmax,
lmin=lmin_in)[:, lmin_like:]
assert fid_cl.shape == (n_spec, n_ell_like)
# Add noise
print(f'Adding noise at {time.strftime("%c")}')
pos_nl = np.loadtxt(pos_nl_path, max_rows=n_ell_in)[(lmin_like - lmin_in):]
she_nl = np.loadtxt(she_nl_path, max_rows=n_ell_in)[(lmin_like - lmin_in):]
fid_cl[:n_field:2, :] += pos_nl
fid_cl[1:n_field:2, :] += she_nl
# Obs = fid
obs_cl = fid_cl
# Precompute unbinned covariance
cl_covs = np.full((n_ell_like, n_spec, n_spec), np.nan)
for l in range(lmin_like, lmax + 1):
print(f'Calculating Cl covariance l = {l} / {lmax} at {time.strftime("%c")}')
cl_covs[l - lmin_like, :, :] = like_cf.calculate_cl_cov_l(fid_cl[:, l - lmin_like], l, n_field)
assert np.all(
|
np.isfinite(cl_covs)
|
numpy.isfinite
|
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_almost_equal, assert_equal)
from nose.tools import assert_raises
from pystruct.models import EdgeFeatureGraphCRF
from pystruct.inference.linear_programming import lp_general_graph
from pystruct.inference import compute_energy, get_installed
from pystruct.utils import make_grid_edges, edge_list_to_features
from pystruct.datasets import generate_blocks_multinomial
def test_initialization():
X, Y = generate_blocks_multinomial(noise=2, n_samples=1, seed=1)
x, y = X[0], Y[0]
n_states = x.shape[-1]
edge_list = make_grid_edges(x, 4, return_lists=True)
edges = np.vstack(edge_list)
edge_features = edge_list_to_features(edge_list)
x = (x.reshape(-1, n_states), edges, edge_features)
y = y.ravel()
crf = EdgeFeatureGraphCRF()
crf.initialize([x], [y])
assert_equal(crf.n_edge_features, 2)
assert_equal(crf.n_features, 3)
|
assert_equal(crf.n_states, 3)
|
numpy.testing.assert_equal
|
#####################################################
#####################################################
#####################################################
### Dynamic Relaxation
###
### Written by <NAME> and <NAME>, Last Edited on 10/09/2018
###
### Thanks to Ir. <NAME> for introducing the following reference:
### <NAME>., <NAME>, <NAME>, and <NAME> (2014). Shell Structures for Architecture: Form Finding and Optimization. New York: Routledge.
### "Chapter 2: Review of Dynamic Relaxation with an extension to six degrees of freedom theory" by Adriaenssens
#####################################################
###
### OBJ Reader
###
#####################################################
import copy
# Input data:
# -----------
# filename : the path of the starting obj
filename = 'OBJ_Files/BasicRhinoOutput.obj'
# opening the file
with open(filename, 'r') as myfile:
data = myfile.read().split('\n')
# initiating the lists
xyz = [] # xyz coordinates
vertices = [] # unique xyz coordinates
points = [] # references to vertices
lines = [] # pairs of references to vertices
# parsing the obj file, filling the data lists
for line in data:
# spliting the string line by ' '
parts = line.split()
# checking if it contains data
if not parts:
continue
# assigning the 1st element to head and the rest to tail (for obj file instructure check here: http://paulbourke.net/dataformats/obj/)
head = parts[0]
tail = parts[1:]
# appending vertex cordinations in (xyz) and assigning an index to them in (points)
if head == 'v':
ftail = [float(x) for x in tail]
xyz.append(ftail)
points.append(len(xyz)-1)
# iterating through the edges of the faces and appending them in (lines)
elif head == 'f':
ftail = [float(x) for x in tail]
for i in range(len(ftail)):
sp = ftail[i%len(ftail)]
ep = ftail[(i+1)%len(ftail)]
lines.append((int(sp)-1, int(ep)-1))
# points to verticies (welding similar vertices)
x2v = {}
tol = 0.001 ** 2
# initiating the bounding box max and min point
bbmax = [0,0,0]
bbmin = [0,0,0]
for i, x in enumerate(iter(xyz)):
found = False
for j, v in enumerate(iter(vertices)):
if (x[0] - v[0]) ** 2 < tol \
and (x[1] - v[1]) ** 2 < tol \
and (x[2] - v[2]) ** 2 < tol:
found = True
x2v[i] = j
break
if not found:
x2v[i] = len(vertices)
vertices.append(x)
#finding the bounding box
for i in range(3):
if x[i] > bbmax[i]: bbmax[i] = x[i]
if x[i] < bbmin[i]: bbmin[i] = x[i]
# redefining the indexes
points[:] = [x2v[index] for index in points]
# redefining the edges by the refined indexes
edges = [(x2v[u[0]], x2v[u[1]]) for u in lines]
# reserving the Initial State of the system
xyzIS = copy.deepcopy(vertices)
# Output data:
# -----------
# points : List of points
# edges : List of edges
# vertices : List of vertices
# xyzIS : Initial state of the system
# bbmax, bbmin : Bounding Box
#####################################################
###
### Dynamic Relaxation
###
#####################################################
from numpy import array, zeros, float64, append, sqrt, sign
# Input data:
# -----------
# vertices: a list of unique xyz coordinates
# edges: a list of pairs of vertex indices
# points: index list of fixed vertices
# bbmax, bbmin : Bounding Box
m = len(edges)
n = len(vertices)
# fixed points here are set to be the boundary of the rectangle
fixed = []
for i, pnt in enumerate(iter(xyz)):
if abs(pnt[0] - bbmin[0]) < 0.01 or abs(pnt[0] - bbmax[0]) < 0.01:
fixed.append(i)
elif abs(pnt[1] - bbmin[1]) < 0.01 or abs(pnt[1] - bbmax[1]) < 0.01:
fixed.append(i)
free = list(set(range(n)) - set(fixed))
xyz =
|
array(vertices)
|
numpy.array
|
import numbers
from typing import Any, List, Sequence
import numpy as np
import torch
from PIL import Image, ImageOps, ImageEnhance, __version__ as PILLOW_VERSION
try:
import accimage
except ImportError:
accimage = None
@torch.jit.unused
def _is_pil_image(img: Any) -> bool:
if accimage is not None:
return isinstance(img, (Image.Image, accimage.Image))
else:
return isinstance(img, Image.Image)
@torch.jit.unused
def _get_image_size(img: Any) -> List[int]:
if _is_pil_image(img):
return img.size
raise TypeError("Unexpected type {}".format(type(img)))
@torch.jit.unused
def _get_image_num_channels(img: Any) -> int:
if _is_pil_image(img):
return 1 if img.mode == 'L' else 3
raise TypeError("Unexpected type {}".format(type(img)))
@torch.jit.unused
def hflip(img):
"""PRIVATE METHOD. Horizontally flip the given PIL Image.
.. warning::
Module ``transforms.functional_pil`` is private and should not be used in user application.
Please, consider instead using methods from `transforms.functional` module.
Args:
img (PIL Image): Image to be flipped.
Returns:
PIL Image: Horizontally flipped image.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
return img.transpose(Image.FLIP_LEFT_RIGHT)
@torch.jit.unused
def vflip(img):
"""PRIVATE METHOD. Vertically flip the given PIL Image.
.. warning::
Module ``transforms.functional_pil`` is private and should not be used in user application.
Please, consider instead using methods from `transforms.functional` module.
Args:
img (PIL Image): Image to be flipped.
Returns:
PIL Image: Vertically flipped image.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
return img.transpose(Image.FLIP_TOP_BOTTOM)
@torch.jit.unused
def adjust_brightness(img, brightness_factor):
"""PRIVATE METHOD. Adjust brightness of an RGB image.
.. warning::
Module ``transforms.functional_pil`` is private and should not be used in user application.
Please, consider instead using methods from `transforms.functional` module.
Args:
img (PIL Image): Image to be adjusted.
brightness_factor (float): How much to adjust the brightness. Can be
any non negative number. 0 gives a black image, 1 gives the
original image while 2 increases the brightness by a factor of 2.
Returns:
PIL Image: Brightness adjusted image.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
enhancer = ImageEnhance.Brightness(img)
img = enhancer.enhance(brightness_factor)
return img
@torch.jit.unused
def adjust_contrast(img, contrast_factor):
"""PRIVATE METHOD. Adjust contrast of an Image.
.. warning::
Module ``transforms.functional_pil`` is private and should not be used in user application.
Please, consider instead using methods from `transforms.functional` module.
Args:
img (PIL Image): PIL Image to be adjusted.
contrast_factor (float): How much to adjust the contrast. Can be any
non negative number. 0 gives a solid gray image, 1 gives the
original image while 2 increases the contrast by a factor of 2.
Returns:
PIL Image: Contrast adjusted image.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
enhancer = ImageEnhance.Contrast(img)
img = enhancer.enhance(contrast_factor)
return img
@torch.jit.unused
def adjust_saturation(img, saturation_factor):
"""PRIVATE METHOD. Adjust color saturation of an image.
.. warning::
Module ``transforms.functional_pil`` is private and should not be used in user application.
Please, consider instead using methods from `transforms.functional` module.
Args:
img (PIL Image): PIL Image to be adjusted.
saturation_factor (float): How much to adjust the saturation. 0 will
give a black and white image, 1 will give the original image while
2 will enhance the saturation by a factor of 2.
Returns:
PIL Image: Saturation adjusted image.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
enhancer = ImageEnhance.Color(img)
img = enhancer.enhance(saturation_factor)
return img
@torch.jit.unused
def adjust_hue(img, hue_factor):
"""PRIVATE METHOD. Adjust hue of an image.
.. warning::
Module ``transforms.functional_pil`` is private and should not be used in user application.
Please, consider instead using methods from `transforms.functional` module.
The image hue is adjusted by converting the image to HSV and
cyclically shifting the intensities in the hue channel (H).
The image is then converted back to original image mode.
`hue_factor` is the amount of shift in H channel and must be in the
interval `[-0.5, 0.5]`.
See `Hue`_ for more details.
.. _Hue: https://en.wikipedia.org/wiki/Hue
Args:
img (PIL Image): PIL Image to be adjusted.
hue_factor (float): How much to shift the hue channel. Should be in
[-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in
HSV space in positive and negative direction respectively.
0 means no shift. Therefore, both -0.5 and 0.5 will give an image
with complementary colors while 0 gives the original image.
Returns:
PIL Image: Hue adjusted image.
"""
if not(-0.5 <= hue_factor <= 0.5):
raise ValueError('hue_factor ({}) is not in [-0.5, 0.5].'.format(hue_factor))
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
input_mode = img.mode
if input_mode in {'L', '1', 'I', 'F'}:
return img
h, s, v = img.convert('HSV').split()
np_h = np.array(h, dtype=np.uint8)
# uint8 addition take cares of rotation across boundaries
with np.errstate(over='ignore'):
np_h += np.uint8(hue_factor * 255)
h = Image.fromarray(np_h, 'L')
img = Image.merge('HSV', (h, s, v)).convert(input_mode)
return img
@torch.jit.unused
def adjust_gamma(img, gamma, gain=1):
r"""PRIVATE METHOD. Perform gamma correction on an image.
.. warning::
Module ``transforms.functional_pil`` is private and should not be used in user application.
Please, consider instead using methods from `transforms.functional` module.
Also known as Power Law Transform. Intensities in RGB mode are adjusted
based on the following equation:
.. math::
I_{\text{out}} = 255 \times \text{gain} \times \left(\frac{I_{\text{in}}}{255}\right)^{\gamma}
See `Gamma Correction`_ for more details.
.. _Gamma Correction: https://en.wikipedia.org/wiki/Gamma_correction
Args:
img (PIL Image): PIL Image to be adjusted.
gamma (float): Non negative real number, same as :math:`\gamma` in the equation.
gamma larger than 1 make the shadows darker,
while gamma smaller than 1 make dark regions lighter.
gain (float): The constant multiplier.
"""
if not _is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
if gamma < 0:
raise ValueError('Gamma should be a non-negative real number')
input_mode = img.mode
img = img.convert('RGB')
gamma_map = [(255 + 1 - 1e-3) * gain * pow(ele / 255., gamma) for ele in range(256)] * 3
img = img.point(gamma_map) # use PIL's point-function to accelerate this part
img = img.convert(input_mode)
return img
@torch.jit.unused
def pad(img, padding, fill=0, padding_mode="constant"):
r"""PRIVATE METHOD. Pad the given PIL.Image on all sides with the given "pad" value.
.. warning::
Module ``transforms.functional_pil`` is private and should not be used in user application.
Please, consider instead using methods from `transforms.functional` module.
Args:
img (PIL Image): Image to be padded.
padding (int or tuple or list): Padding on each border. If a single int is provided this
is used to pad all borders. If a tuple or list of length 2 is provided this is the padding
on left/right and top/bottom respectively. If a tuple or list of length 4 is provided
this is the padding for the left, top, right and bottom borders respectively. For compatibility reasons
with ``functional_tensor.pad``, if a tuple or list of length 1 is provided, it is interpreted as
a single int.
fill (int or str or tuple): Pixel fill value for constant fill. Default is 0. If a tuple of
length 3, it is used to fill R, G, B channels respectively.
This value is only used when the padding_mode is constant.
padding_mode: Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.
- constant: pads with a constant value, this value is specified with fill
- edge: pads with the last value on the edge of the image
- reflect: pads with reflection of image (without repeating the last value on the edge)
padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
will result in [3, 2, 1, 2, 3, 4, 3, 2]
- symmetric: pads with reflection of image (repeating the last value on the edge)
padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
will result in [2, 1, 1, 2, 3, 4, 4, 3]
Returns:
PIL Image: Padded image.
"""
if not _is_pil_image(img):
raise TypeError("img should be PIL Image. Got {}".format(type(img)))
if not isinstance(padding, (numbers.Number, tuple, list)):
raise TypeError("Got inappropriate padding arg")
if not isinstance(fill, (numbers.Number, str, tuple)):
raise TypeError("Got inappropriate fill arg")
if not isinstance(padding_mode, str):
raise TypeError("Got inappropriate padding_mode arg")
if isinstance(padding, list):
padding = tuple(padding)
if isinstance(padding, tuple) and len(padding) not in [1, 2, 4]:
raise ValueError("Padding must be an int or a 1, 2, or 4 element tuple, not a " +
"{} element tuple".format(len(padding)))
if isinstance(padding, tuple) and len(padding) == 1:
# Compatibility with `functional_tensor.pad`
padding = padding[0]
if padding_mode not in ["constant", "edge", "reflect", "symmetric"]:
raise ValueError("Padding mode should be either constant, edge, reflect or symmetric")
if padding_mode == "constant":
opts = _parse_fill(fill, img, "2.3.0", name="fill")
if img.mode == "P":
palette = img.getpalette()
image = ImageOps.expand(img, border=padding, **opts)
image.putpalette(palette)
return image
return ImageOps.expand(img, border=padding, **opts)
else:
if isinstance(padding, int):
pad_left = pad_right = pad_top = pad_bottom = padding
if isinstance(padding, tuple) and len(padding) == 2:
pad_left = pad_right = padding[0]
pad_top = pad_bottom = padding[1]
if isinstance(padding, tuple) and len(padding) == 4:
pad_left = padding[0]
pad_top = padding[1]
pad_right = padding[2]
pad_bottom = padding[3]
p = [pad_left, pad_top, pad_right, pad_bottom]
cropping = -
|
np.minimum(p, 0)
|
numpy.minimum
|
# Princeton University licenses this file to You under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy of the License at:
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
# NOTES:
# * COULD NOT IMPLEMENT integrator_function in paramClassDefaults (see notes below)
# * NOW THAT NOISE AND INTEGRATION_RATE ARE PROPRETIES THAT DIRECTLY REFERERNCE integrator_function,
# SHOULD THEY NOW BE VALIDATED ONLY THERE (AND NOT IN TransferMechanism)??
# * ARE THOSE THE ONLY TWO integrator PARAMS THAT SHOULD BE PROPERTIES??
# ******************************************** TransferMechanism ******************************************************
"""
Contents
--------
* `TransferMechanism_Overview`
* `TransferMechanism_Creation`
* `TransferMechanism_Structure`
- `TransferMechanism_InputPorts`
- `TransferMechanism_Function`
- `TransferMechanism_OutputPorts`
* `TransferMechanism_Execution`
- `TransferMechanism_Integration`
- `TransferMechanism_Termination`
- `TransferMechanism_Reinitialization`
* `TransferMechanism_Class_Reference`
.. _TransferMechanism_Overview:
Overview
--------
A TransferMechanism is a subclass of `ProcessingMechanism` that adds the ability to integrate its input.
Like a ProcessingMechanism, it transforms its input using a simple mathematical function, that maintains the form
(dimensionality) of its input. The input can be a single scalar value, a multidimensional array (list or numpy
array), or several independent ones. The function used to carry out the transformation can be selected from the
following PsyNeuLink `Functions <Function>`: `Linear`, `Exponential`, `Logistic`, or `SoftMax`.
Its **integrator_mode** argument can switch the transformation from an "instantaneous" to a "time averaged"
(integrated) manner of execution. When `integrator_mode <TransferMechanism.integrator_mode>` is set to True, the
mechanism's input is first transformed by its `integrator_function <TransferMechanism.integrator_function>` (
`AdaptiveIntegrator`). That result is then transformed by the mechanism's `function <Mechanism_Base.function>`.
.. _TransferMechanism_Creation:
Creating a TransferMechanism
-----------------------------
A TransferMechanism is created by calling its constructor.
Its `function <Mechanism_Base.function>` is specified in the **function** argument, which can be the name of a
`Function <Function>` class:
>>> import psyneulink as pnl
>>> my_linear_transfer_mechanism = pnl.TransferMechanism(function=pnl.Linear)
in which case all of the function's parameters will be set to their default values. Alternatively, the **function**
argument can be a call to a Function constructor, in which case values may be specified for the Function's parameters:
>>> my_logistic_transfer_mechanism = pnl.TransferMechanism(function=pnl.Logistic(gain=1.0, bias=-4))
.. _TransferMechanism_Integrator_Mode:
Next, the **integrator_mode** argument allows the TransferMechanism to operate in either an "instantaneous" or
"time averaged" manner. By default, `integrator_mode <TransferMechanism.integrator_mode>` is set to False, meaning
execution is instantaneous. In order to switch to time averaging, the **integrator_mode** argument of the constructor
must be set to True.
>>> my_logistic_transfer_mechanism = pnl.TransferMechanism(function=pnl.Logistic(gain=1.0, bias=-4),
... integrator_mode=True)
When `integrator_mode <TransferMechanism.integrator_mode>` is True, the TransferMechanism uses its `integrator_function
<TransferMechanism.integrator_function>` to integrate its variable on each execution. The output of the
`integrator_function <TransferMechanism.integrator_function>` is then used as the input to `function
<Mechanism_Base.function>`.
By default, the `integrator_function <TransferMechanism.integrator_function>` of a TransferMechanism is
`AdaptiveIntegrator`. However, any `IntegratorFunction` can be assigned. A TransferMechanism has three
parameters that
are used by most IntegratorFunctions: `initial_value <TransferMechanism.initial_value>`, `integration_rate
<TransferMechanism.integration_rate>`, and `noise <TransferMechanism.noise>`. If any of these are specified in the
TransferMechanism's constructor, their value is used to specify the corresponding parameter of its `integrator_function
<TransferMechanism.integrator_function>`. In the following example::
>>> my_logistic_transfer_mechanism = pnl.TransferMechanism(function=pnl.Logistic(gain=1.0, bias=-4),
... integrator_mode=True,
... initial_value=np.array([[0.2]]),
... integration_rate=0.1)
``my_logistic_transfer_mechanism`` will be assigned an `AdaptiveIntegrator` (the default) as its
`integrator_function
<TrasnferMechanism.integrator_function>`, with ``0.2`` as its `initializer <AdaptiveIntegrator.initializer>`
parameter,
and ``0.`` as its `rate <AdaptiveIntegrator.rate>` parameter. However, in this example::
>>> my_logistic_transfer_mechanism = pnl.TransferMechanism(function=pnl.Logistic(gain=1.0, bias=-4),
... integrator_mode=True,
... integrator_function=AdaptiveIntegrator(rate=0.3),
... initial_value=np.array([[0.2]]),
... integration_rate=0.1)
the AdaptiveIntegrator's `rate <AdaptiveIntegrator.rate>` parameter will be assigned ``0.3``,
and this will also
be assigned to the TransferMechanism's `integration_rate <TransferMechanism.integration_rate>` parameter, overriding
the specified value of ``0.1``.
.. note::
If `integrator_mode <TransferMechanism.integrator_mode>` is False, then the arguments **integration_rate** and
**initial_value** are ignored, as its `integrator_function <TransferMechanism.integrator_function>` is not executed.
When switching between `integrator_mode <TransferMechanism.integrator_mode>` = True and `integrator_mode
<TransferMechanism.integrator_mode>` = False, the behavior of the `integrator_function
<TransferMechanism.integrator_function>` is determined by `on_resume_integrator_mode
<TransferMechanism.on_resume_integrator_mode>`. There are three options for how the `integrator_function
<TransferMechanism.integrator_function>` may resume accumulating when the Mechanism returns to `integrator_mode
<TransferMechanism.integrator_mode>` = True.
* *INSTANTANEOUS_MODE_VALUE* - reinitialize the Mechanism with its own current value,
so that the value computed by the Mechanism during "Instantaneous Mode" is where the
`integrator_function <TransferMechanism.integrator_function>` begins accumulating.
* *INTEGRATOR_MODE_VALUE* - resume accumulation wherever the `integrator_function
<TransferMechanism.integrator_function>` left off the last time `integrator_mode
<TransferMechanism.integrator_mode>` was True.
* *REINITIALIZE* - call the `integrator_function <TransferMechanism.integrator_function>`\\s
`reinitialize <AdaptiveIntegrator.reinitialize>` method, so that accumulation begins at
`initial_value <TransferMechanism.initial_value>`
Finally, the TransferMechanism has two arguments that can adjust the final result of the mechanism: **clip** and
**noise**. If `integrator_mode <TransferMechanism.integrator_mode>` is False, `clip <TransferMechanism.clip>` and
`noise <TransferMechanism.noise>` modify the value returned by the mechanism's `function <Mechanism_Base.function>`
before setting it as the mechanism's value. If `integrator_mode <TransferMechanism.integrator_mode>` is True,
**noise** is assigned to the TransferMechanism's `integrator_function <TransferMechanism.integrator_function>`
(as its `noise <IntegratorFunction.noise>` parameter -- in the same manner as `integration_rate
<TransferMechanism.integration_rate>` and `initial_value <TransferMechanism.intial_value>`), whereas `clip
<TransferMechanism.clip>` modifies the value returned by the mechanism's `function <Mechanism_Base.function>`
before setting it as the TransferMechanism's `value <Mechanism_Base.value>`.
.. _TransferMechanism_Structure:
Structure
---------
.. _TransferMechanism_InputPorts:
*InputPorts*
~~~~~~~~~~~~~
By default, a TransferMechanism has a single `InputPort`; however, more than one can be specified
using the **default_variable** or **size** arguments of its constructor (see `Mechanism`). The `value
<InputPort.value>` of each InputPort is used as a separate item of the Mechanism's `variable
<Mechanism_Base.variable>`, and transformed independently by its `function <Mechanism_Base.function>`.
Like any InputPorts, the `value <OutputPort.value>` of any or all of the TransferMechanism's InputPorts can be
modulated by one or more `GatingSignals <GatingSignal_Modulation>` prior to transformation by its `function
<Mechanism_Base.function>`.
.. _TransferMechanism_Function:
*Function*
~~~~~~~~~~
*Function*. The default function for a TransferMechanism is `Linear`. A custom function can be specified in the
**function** argument of the constructor. This can be any PsyNeuLink `Function <Function>` that is a subtype of
either `TransferFunction` or `NormalizationFunction.` It can also be any python function or method, with the constraint
that it returns an output that is identical in shape to its input; the function or method is "wrapped" as
`UserDefinedFunction`, and assigned as the TransferMechanism's `function <Mechanism_Base.function>` attribute.
The result of the `function <Mechanism_Base.function>` applied to the `value <InputPort.value>` of each InputPort
is:
- appended to an array that represents the TransferMechanism's `value <Mechanism_Base.value>`
- assigned as the `value <OutputPort.value>` of the TransferMechanism's corresponding `OutputPort <OutputPort>`
.. _TransferMechanism_OutputPorts:
*OutputPorts*
~~~~~~~~~~~~~
By default, or if the **output_ports** argument is specified using the keyword *RESULTS*, a TransferMechanism generates
one `OutputPort` for each item in the outer dimension (axis 0) of its `value <Mechanism_Base.value>` (each of which is
the result of the Mechanism's `function <Mechanism_Base.function>` applied to the `value <InputPort.value>` of the
corresponding `InputPort`). If there is only one OutputPort (i.e., the case in which there is only one InputPort and
therefore only one item in Mechanism's `value <Mechanism_Base.value>`), the OutputPort is named *RESULT*. If there is
more than one item in `value <Mechanism_Base.value>`, then an OuputPort is assigned for each; the name of the first
is *RESULT-0*, and the names of the subsequent ones are suffixed with an integer that is incremented for each successive
one (e.g., *RESULT-1*, *RESULT-2*, etc.). Additional OutputPorts can be assigned using the TransferMechanism's
`standard_output_ports <TransferMechanism.standard_output_ports>` (see `OutputPort_Standard`) or by creating `custom
OutputPorts <OutputPort_Customization>` (but see note below). Like any OutputPorts, the `value <OutputPort.value>` of
any or all of these can be modulated by one or more `ControlSignals <ControlSignal_Modulation>` or `GatingSignals
<GatingSignal_Modulation>`.
.. _TransferMechanism_OutputPorts_Note:
.. note::
If any OutputPorts are specified in the **output_ports** argument of the TransferMechanism's constructor,
then, `as with any Mechanism <Mechanism_Default_Port_Suppression_Note>`, its default OutputPorts are not
automatically generated. Therefore, an OutputPort with the appropriate `index <OutputPort.index>` must be
explicitly specified for each and every item of the Mechanism's `value <Mechanism_Base.value>` (corresponding
to each InputPort) for which an OutputPort is needed.
.. _TransferMechanism_Execution:
Execution
---------
COMMENT:
DESCRIBE AS TWO MODES (AKIN TO DDM): INSTANTANEOUS AND TIME-AVERAGED
INSTANTANEOUS:
input transformed in a single `execution <TransferMechanism_Execution>` of the Mechanism)
TIME-AVERAGED:
input transformed using `step-wise` integration, in which each execution returns the result of a subsequent step of the
integration process).
COMMENT
When a TransferMechanism is executed, it transforms its input using its `function <Mechanism_Base.function>` and
the following parameters (in addition to any specified for the `function <Mechanism_Base.function>`):
* `integrator_mode <TransferMechanism.integrator_mode>`: determines whether the input is time-averaged before
passing through the function of the mechanism. When `integrator_mode <TransferMechanism.integrator_mode>` is set
to True, the TransferMechanism integrates its input, by executing its `integrator_function
<TransferMechanism.integrator_function>`, before executing its `function <Mechanism_Base.function>`. When
`integrator_mode <TransferMechanism.integrator_mode>` is False, the `integrator_function
<TransferMechanism.integrator_function>` is ignored, and time-averaging does not occur.
* `integration_rate <TransferMechanism.integration_rate>`: if the `integrator_mode
<TransferMechanism.integrator_mode>`
attribute is set to True, the `integration_rate <TransferMechanism.integration_rate>` attribute is the rate of
integration (a higher value specifies a faster rate); if `integrator_mode <TransferMechanism.integrator_mode>`
is False,
`integration_rate <TransferMechanism.integration_rate>` is ignored and time-averaging does not occur.
* `noise <TransferMechanism.noise>`: applied element-wise to the output of its `integrator_function
<TransferMechanism.integrator_function>` or its `function <Mechanism_Base.function>`, depending on whether
`integrator_mode <TransferMechanism.integrator_mode>` is True or False.
* `clip <TransferMechanism.clip>`: caps all elements of the `function <Mechanism_Base.function>` result by the
lower and upper values specified by clip.
After each execution, the TransferMechanism's `function <Mechanism_Base.function>` -- applied to the `value
<InputPort.value>` of each of its `input_ports <Mechanism_Base.input_ports>` -- generates a corresponding set of
values, each of which is assigned as an item of the Mechanism's `value <Mechanism_Base.value>` attribute, and the
`value <OutputPort.value>` of the corresponding `OutputPort` in its `ouput_ports <Mechanism_Base.output_ports>`.
.. _TransferMechanism_Integration:
*Integration*
~~~~~~~~~~~~~
If `integrator_mode <TransferMechanism.integrator_mode>` is False (the default), then the TransferMechanism updates its
`value <Mechanism_Base.value>` and the `value <OutputPort.value>` of its `output_ports <Mechanism_Base.output_ports>`
without using its `integrator_function <TransferMechanism.integrator_function>`, as in the following example::
>>> my_mech = pnl.TransferMechanism(size=2)
>>> my_mech.execute([0.5, 1])
array([[0.5, 1. ]])
Notice that the result is the full linear transfer of the input (i.e., no integration occured).
If `integrator_mode <TransferMechanism.integrator_mode>` is True, then it can be configured to conduct a single
step of integration per execution, or to continue to integrate until its termination condition is met.
A single step of integration is executed if no `termination_threshold <TransferMechanism.termination_threshold>` is
specified (or it is None, the default), as in the following example::
>>> my_mech = pnl.TransferMechanism(size=2,
... integrator_mode=True)
>>> my_mech.execute([0.5, 1])
array([[0.25, 0.5 ]])
>>> my_mech.execute([0.5, 1])
array([[0.375, 0.75 ]])
>>> my_mech.execute([0.5, 1])
array([[0.4375, 0.875 ]])
Notice that every call to the ``my_execute`` produces a single step of integration (at the default `rate
<TransferMechanism.rate>` of 0.5). A single step is also executed if the Mechanism's `execute_until_finished
<Component.execute_until_finished>` attribute is set to False, even if **termination_threshold** is specified.
.. _TransferMechanism_Termination:
*Termination*
~~~~~~~~~~~~~
If `integrator_mode <TransferMechanism.integrator_mode>` is True, and **termination_threshold** is specified, then the
TransferMechanism continues to integrate (using its current input) until its termination condition is met, or the number
of executions reaches `max_executions_before_finished <Component.max_executions_before_finished>`. The number of
executions that took place is contained in `num_executions_before_finished <Component.num_executions_before_finished>`.
By default, a TransferMechanism uses a convergence criterion to terminate integration, as in the following example::
>>> my_mech = pnl.TransferMechanism(size=2,
... integrator_mode=True,
... termination_threshold=0.1)
>>> my_mech.execute([0.5, 1])
array([[0.46875, 0.9375 ]])
>>> my_mech.num_executions_before_finished
4
In this case, the single call to ``my_mech.execute`` caused the Mechanism to integrate for 4 steps, until the
difference between its current `value <Mechanism_Base.value>` and its `previous value
<Mechanism_Base.previous_value>` is less than the specified **termination_threshold**. However,
the **termination_measure** and **termination_comparison_op** arguments can be used to congifure other termination
conditions. Thare are two broad types of termination condition.
*Convergence* -- updating terminates based on the difference between the TransferMechanism's current `value
<Mechanism_Base.value>` and its `previous_value <Mechanism_Base.previous_value>` (as in the example above).
This is implemented by specifying **termination_measure** with a function that accepts a 2d array with *two items*
(1d arrays) as its argument, and returns a scalar (the default for a TransferMechanism is the `Distance` Function with
`MAX_ABS_DIFF` as its metric). After each execution, the function is passed the Mechanism's current `value
<Mechanism_Base.value>` as well as its `previous_value <Mechanism_Base.previous_value>`, and the scalar returned is
compared to **termination_threshold** using the comparison operator specified by **termination_comparison_op** (which
is *LESS_THAN_OR_EQUAL* by default). Execution continues until this returns True. Thus, in the example above,
execution continued until the difference between the Mechanism's current `value <Mechanism_Base.value>` and
`previous_value <Mechanism_Base.previous_value>` was less than or equal to 0.1. A `Distance` Function with other
metrics (e.g., *ENERGY* or *ENTROPY*) can be specified as the **termination_measure**, as can any other function that
accepts a single argument that is a 2d array with two entries.
*Boundary termination* -- updating terminates when the TransferMechanism's current `value <Mechanism_Base.value>`
meets the condition specified by the **termination_measure**, **termination_comparison_op** and
**termination_threshold** arguments, without considering its `previous_value <Mechanism_Base.previous_value>`.
This is implemented by specifying **termination_measure** with a function that accepts a 2d array with a *single
entry* as its argument and returns a scalar. After each execution, the function is passed the Mechanism's
current `value <Mechanism_Base.value>`, and the scalar returned is compared to **termination_threshold** using the
comparison operator specified by **termination_comparison_op**. Execution continues until this returns True, as in the
following example::
>>> my_mech = pnl.TransferMechanism(size=2,
... integrator_mode=True,
... termination_measure=max,
... termination_threshold=0.9,
... termination_comparison_op=pnl.GREATER_THAN_OR_EQUAL)
>>> my_mech.execute([0.5, 1])
array([[0.46875, 0.9375 ]])
>>> my_mech.num_executions_before_finished
4
Here, ``my_mech`` continued to execute for ``5`` times, until the element of the Mechanism's `value
<Mechanism_Base.value>` with the greatest value exceeded ``0.9``. Note that GREATER_THAN_EQUAL is a keyword for the
string ">=", which is a key in the `comparison_operators` dict for the Python ``operator.ge``; any of these can be
used to specify **termination_comparison_op**).
The values specified for **termination_threshold**, **termination_measure**, and **termination_comparison_op** are
assigned to the TransferMechanism's `termination_threshold <TransferMechanism.termination_threshold>`,
`termination_measure <TransferMechanism.termination_measure>`, and `termination_comparison_op
<TransferMechanism.termination_comparison_op>` attributes, respectively.
.. _TransferMechanism_Reinitialization:
*Reinitialization*
~~~~~~~~~~~~~~~~~~
In some cases, it may be useful to reset the accumulation of a Mechanism back to its original starting point, or a new
starting point. This is done using the `reinitialize <AdaptiveIntegrator.reinitialize>` method on the
mechanism's `integrator_function <TransferMechanism.integrator_function>`, or the mechanisms's own `reinitialize
<Mechanism_Base.reinitialize>` method.
The `reinitialize <AdaptiveIntegrator.reinitialize>` method of the `integrator_function
<TransferMechanism.integrator_function>` sets:
- the integrator_function's `previous_value <AdaptiveIntegrator.previous_value>` attribute and
- the integrator_function's `value <AdaptiveIntegrator.value>` attribute
to the specified value.
The `reinitialize <Mechanism_Base.reinitialize>` method of the `TransferMechanism` first sets:
- the Mechanismn's `previous_value <Mechanism_Base.previous_value>` attribute,
- the integrator_function's `previous_value <AdaptiveIntegrator.previous_value>` attribute, and
- the integrator_function's `value <AdaptiveIntegrator.value>` attribute
to the specified value. Then:
- the specified value is passed into the mechanism's `function <Mechanism_Base.function>` and the function is
executed
- the TransferMechanism's `value <Mechanism_Base.value>` attribute is set to the output of the function
- the TransferMechanism updates its `output_ports <Mechanism_Base.output_ports>`
A use case for `reinitialize <AdaptiveIntegrator.reinitialize>` is demonstrated in the following example:
Create a `System` with a TransferMechanism in integrator_mode:
>>> my_time_averaged_transfer_mechanism = pnl.TransferMechanism(function=pnl.Linear, #doctest: +SKIP
... integrator_mode=True, #doctest: +SKIP
... integration_rate=0.1, #doctest: +SKIP
... initial_value=np.array([[0.2]])) #doctest: +SKIP
>>> my_process = pnl.Process(pathway=[my_time_averaged_transfer_mechanism]) #doctest: +SKIP
>>> my_system = pnl.System(processes=[my_process]) #doctest: +SKIP
Then run the system for 5 trials:
>>> # RUN 1:
>>> my_system.run(inputs={my_time_averaged_transfer_mechanism: [1.0]}, #doctest: +SKIP
... num_trials=5) #doctest: +SKIP
>>> assert np.allclose(my_time_averaged_transfer_mechanism.value, 0.527608) #doctest: +SKIP
After RUN 1, my_time_averaged_transfer_mechanism's integrator_function will preserve its state (its position along its
path of integration).
Run the system again to observe that my_time_averaged_transfer_mechanism's integrator_function continues accumulating
where it left off:
>>> # RUN 2:
>>> my_system.run(inputs={my_time_averaged_transfer_mechanism: [1.0]}, #doctest: +SKIP
... num_trials=5) #doctest: +SKIP
>>> assert np.allclose(my_time_averaged_transfer_mechanism.value, 0.72105725) #doctest: +SKIP
The integrator_function's `reinitialize <AdaptiveIntegrator.reinitialize>` method and the TransferMechanism's
`reinitialize <TransferMechanism.reinitialize>` method are useful in cases when the integration should instead start
over at the original initial value, or a new one.
Use `reinitialize <AdaptiveIntegrator.reinitialize>` to re-start the integrator_function's accumulation at 0.2:
>>> my_time_averaged_transfer_mechanism.integrator_function.reinitialize(np.array([[0.2]])) #doctest: +SKIP
Run the system again to observe that my_time_averaged_transfer_mechanism's integrator_function will begin accumulating
at 0.2, following the exact same trajectory as in RUN 1:
>>> # RUN 3
>>> my_system.run(inputs={my_time_averaged_transfer_mechanism: [1.0]}, #doctest: +SKIP
... num_trials=5) #doctest: +SKIP
>>> assert np.allclose(my_time_averaged_transfer_mechanism.value, 0.527608) #doctest: +SKIP
Because `reinitialize <AdaptiveIntegrator.reinitialize>` was set to 0.2 (its original initial_value),
my_time_averaged_transfer_mechanism's integrator_function effectively started RUN 3 in the same state as it began RUN 1.
As a result, it arrived at the exact same value after 5 trials (with identical inputs).
In the examples above, `reinitialize <AdaptiveIntegrator.reinitialize>` was applied directly to the
integrator function. The key difference between the `integrator_function's reinitialize
<AdaptiveIntegrator.reinitialize>` and the `TransferMechanism's reinitialize <TransferMechanism.reinitialize>` is
that the latter will also execute the mechanism's function and update its output ports. This is useful if the
mechanism's value or any of its OutputPort values will be used or checked *before* the mechanism's next execution. (
This may be true if, for example, the mechanism is `recurrent <RecurrentTransferMechanism>`, the mechanism is
responsible for `modulating <ModulatorySignal_Modulation` other components, or if a `Scheduler` condition depends on
the mechanism's activity.)
COMMENT:
.. _TransferMechanism_Examples:
Examples
--------
EXAMPLES HERE
COMMENT
.. _TransferMechanism_Class_Reference:
Class Reference
---------------
"""
import copy
import inspect
import numbers
import warnings
import logging
import operator
from collections.abc import Iterable
import numpy as np
import typecheck as tc
from psyneulink.core import llvm as pnlvm
from psyneulink.core.components.component import function_type, method_type
from psyneulink.core.components.functions.distributionfunctions import DistributionFunction
from psyneulink.core.components.functions.statefulfunctions.integratorfunctions import AdaptiveIntegrator
from psyneulink.core.components.functions.statefulfunctions.integratorfunctions import IntegratorFunction
from psyneulink.core.components.functions.function import Function, is_function_type
from psyneulink.core.components.functions.objectivefunctions import Distance
from psyneulink.core.components.functions.selectionfunctions import SelectionFunction
from psyneulink.core.components.functions.transferfunctions import Linear, Logistic, TransferFunction
from psyneulink.core.components.functions.combinationfunctions import LinearCombination, SUM
from psyneulink.core.components.functions.userdefinedfunction import UserDefinedFunction
from psyneulink.core.components.mechanisms.modulatory.control.controlmechanism import _is_control_spec
from psyneulink.core.components.mechanisms.mechanism import Mechanism, MechanismError
from psyneulink.core.components.mechanisms.processing.processingmechanism import ProcessingMechanism_Base
from psyneulink.core.components.ports.inputport import InputPort
from psyneulink.core.components.ports.outputport import OutputPort
from psyneulink.core.globals.context import ContextFlags, handle_external_context
from psyneulink.core.globals.keywords import \
COMBINE, comparison_operators, FUNCTION, INITIALIZER, INSTANTANEOUS_MODE_VALUE, LESS_THAN_OR_EQUAL, \
MAX_ABS_DIFF, NAME, NOISE, OWNER_VALUE, RATE, REINITIALIZE, RESULT, RESULTS, SELECTION_FUNCTION_TYPE, \
TRANSFER_FUNCTION_TYPE, TRANSFER_MECHANISM, VARIABLE
from psyneulink.core.globals.parameters import Parameter
from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set
from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel
from psyneulink.core.globals.utilities import \
all_within_range, append_type_to_name, iscompatible, is_comparison_operator
from psyneulink.core.scheduling.condition import Never
from psyneulink.core.globals.registry import remove_instance_from_registry, register_instance
__all__ = [
'INITIAL_VALUE', 'CLIP', 'INTEGRATOR_FUNCTION', 'INTEGRATION_RATE',
'TERMINATION_THRESHOLD', 'TERMINATION_MEASURE', 'TERMINATION_MEASURE_VALUE',
'Transfer_DEFAULT_BIAS', 'Transfer_DEFAULT_GAIN', 'Transfer_DEFAULT_LENGTH', 'Transfer_DEFAULT_OFFSET',
'TransferError', 'TransferMechanism',
]
# TransferMechanism parameter keywords:
CLIP = "clip"
INTEGRATOR_FUNCTION = 'integrator_function'
INTEGRATION_RATE = "integration_rate"
INITIAL_VALUE = 'initial_value'
TERMINATION_THRESHOLD = 'termination_threshold'
TERMINATION_MEASURE = 'termination_measure'
TERMINATION_MEASURE_VALUE = 'termination_measure_value'
# TransferMechanism default parameter values:
Transfer_DEFAULT_LENGTH = 1
Transfer_DEFAULT_GAIN = 1
Transfer_DEFAULT_BIAS = 0
Transfer_DEFAULT_OFFSET = 0
# Transfer_DEFAULT_RANGE = np.array([])
logger = logging.getLogger(__name__)
class TransferError(Exception):
def __init__(self, error_value):
self.error_value = error_value
def __str__(self):
return repr(self.error_value)
def _integrator_mode_setter(value, owning_component=None, context=None):
if value is True:
if (
not owning_component.parameters.integrator_mode._get(context)
and owning_component.parameters.has_integrated._get(context)
):
if owning_component.integrator_function is not None:
if owning_component.on_resume_integrator_mode == INSTANTANEOUS_MODE_VALUE:
owning_component.reinitialize(owning_component.parameters.value._get(context), context=context)
elif owning_component.on_resume_integrator_mode == REINITIALIZE:
owning_component.reinitialize(context=context)
owning_component._parameter_components.add(owning_component.integrator_function)
owning_component.parameters.has_initializers._set(True, context)
if (
not isinstance(
owning_component.integrator_function,
IntegratorFunction
)
):
owning_component._needs_integrator_function_init = True
elif value is False:
owning_component.parameters.has_initializers._set(False, context)
if not hasattr(owning_component, "reinitialize_when"):
owning_component.reinitialize_when = Never()
return value
# IMPLEMENTATION NOTE: IMPLEMENTS OFFSET PARAM BUT IT IS NOT CURRENTLY BEING USED
class TransferMechanism(ProcessingMechanism_Base):
"""
TransferMechanism( \
integrator_mode=False, \
integrator_function=AdaptiveIntegrator, \
on_resume_integrator_mode=INSTANTANEOUS_MODE_VALUE, \
initial_value=None, \
integration_rate=0.5, \
noise=0.0, \
clip=[float:min, float:max], \
termination_measure=Distance(metric=MAX_ABS_DIFF), \
termination_threshold=None, \
termination_comparison_op=LESS_THAN_OR_EQUAL, \
output_ports=RESULTS \
)
Subclass of `ProcessingMechanism <ProcessingMechanism>` that performs a simple transform of its input.
See `Mechanism <Mechanism_Class_Reference>` for additional arguments and attributes.
Arguments
---------
integrator_mode : bool : False
specifies whether or not the TransferMechanism should be executed using its `integrator_function
<TransferMechanism>` to integrate its `variable <Mechanism_Base.variable>` (
when set to `True`), or simply report the asymptotic value of the output of its `function
<Mechanism_Base.function>` (when set to `False`).
integrator_function : IntegratorFunction : default AdaptiveIntegrator
specifies `IntegratorFunction` to use in `integration_mode <TransferMechanism.integration_mode>`.
initial_value : value, list or np.ndarray : default Transfer_DEFAULT_BIAS
specifies the starting value for time-averaged input (only relevant if `integrator_mode
<TransferMechanism.integrator_mode>` is True).
COMMENT:
Transfer_DEFAULT_BIAS SHOULD RESOLVE TO A VALUE
COMMENT
integration_rate : float : default 0.5
specifies the rate of integration of `variable <Mechanism_Base.variable>` when the TransferMechanism is
executed with `integrator_mode` set to `True`.
on_resume_integrator_mode : keyword : default INSTANTANEOUS_MODE_VALUE
specifies how the `integrator_function <TransferMechanism.integrator_function>` should resume its accumulation
when the Mechanism was most recently in "Instantaneous Mode" (`integrator_mode
<TransferMechanism.intergrator_mode>` = False) and has just switched to "IntegratorFunction Mode"
(`integrator_mode <TransferMechanism.intergrator_mode>` = True); can be one of the following keywords:
* *INSTANTANEOUS_MODE_VALUE* - reinitialize the Mechanism with its own current value,
so that the value computed by the Mechanism during "Instantaneous Mode" is where the
`integrator_function <TransferMechanism.integrator_function>` begins accumulating.
* *INTEGRATOR_MODE_VALUE* - resume accumulation wherever the `integrator_function
<TransferMechanism.integrator_function>` left off the last time `integrator_mode
<TransferMechanism.integrator_mode>` was True.
* *REINITIALIZE* - call the `integrator_function <TransferMechanism.integrator_function>`\\s
`reinitialize <AdaptiveIntegrator.reinitialize>` method, so that accumulation begins at
`initial_value <TransferMechanism.initial_value>`
noise : float or function : default 0.0
specifies a value to be added to the result of the TransferMechanism's `function <Mechanism_Base.function>`
or its `integrator_function <TransferMechanism.integrator_function>`, depending on whether `integrator_mode
<TransferMechanism.integrator_mode>` is `True` or `False`. See `noise <TransferMechanism.noise>` for details.
clip : list [float, float] : default None (Optional)
specifies the allowable range for the result of `function <Mechanism_Base.function>`. The item in index 0
specifies the minimum allowable value of the result, and the item in index 1 specifies the maximum allowable
value; any element of the result that exceeds the specified minimum or maximum value is set to the value of
`clip <TransferMechanism.clip>` that it exceeds.
termination_measure : function : default Distance(metric=MAX_ABS_DIFF)
specifies metric used to determine when execution of TransferMechanism is complete if `execute_until_finished
<Component.execute_until_finished>` is True. Must take at least one argument, and optionally a second,
both of which must be arrays, and must return either another array or a scalar; see `termination_measure
<TransferMechanism.termination_measure>` for additional details.
termination_threshold : None or float : default None
specifies value against which `termination_measure_value <TransferMechanism.termination_measure_value>` is
compared to determine when execution of TransferMechanism is complete; see `termination_measure
<TransferMechanism.termination_measure>` for additional details.
termination_comparison_op : comparator keyword : default LESS_THAN_OR_EQUAL
specifies how `termination_measure_value <TransferMechanism.termination_measure_value>` is compared with
`termination_threshold <TransferMechanism.termination_threshold>` to determine when execution of
TransferMechanism is complete; see `termination_measure <TransferMechanism.termination_measure>`
for additional details.
output_ports : str, list or np.ndarray : default RESULTS
specifies the OutputPorts for the TransferMechanism; the keyword **RESULTS** (the default) specifies that
one OutputPort be generated for each InputPort specified in the **input_ports** argument (see
`TransferMechanism_OutputPorts` for additional details, and note <TransferMechanism_OutputPorts_Note>` in
particular).
Attributes
----------
integrator_mode : bool
determines whether the TransferMechanism uses its `integrator_function <TransferMechanism.integrator_function>`
to integrate its `variable <Mechanism_Base.variable>` when it executes.
**If integrator_mode is set to** `True`:
the TransferMechanism's `variable <TransferMechanism>` is first passed to its `integrator_function
<TransferMechanism.integrator_function>`, and then the result is passed to the TransferMechanism's
`function <Mechanism_Base.function>` which computes the TransferMechanism's `value
<Mechanism_Base.value>`.
.. note::
The TransferMechanism's `integration_rate <TransferMechanism.integration_rate>`, `noise
<TransferMechanism.noise>`, and `initial_value <TransferMechanism.initial_value>` parameters
specify the respective parameters of its `integrator_function <TransferMechanism.integrator_function>`
(with `initial_value <TransferMechanism.initial_value>` corresponding to `initializer
<IntegratorFunction.initializer>` and `integration_rate <TransferMechanism.integration_rate>`
corresponding to `rate <IntegratorFunction.rate>` of `integrator_function
<TransferMechanism.integrator_function>`). However, if there are any disagreements between these
(e.g., any of these parameters is specified in the constructor for an `IntegratorFunction` assigned
as the **integrator_function** arg of the TransferMechanism), the values specified for the
`integrator_function <TransferMechanism.integrator_function>` take precedence, and their value(s) are
assigned as those of the corresponding parameters on the TransferMechanism.
**If integrator_mode is set to** `False`:
if `noise <TransferMechanism.noise>` is non-zero, it is applied to the TransferMechanism's `variable
<TransferMechanism>` which is htne passed directly to its `function <Mechanism_Base.function>`
-- that is, its `integrator_function <TransferMechanism.integrator_function>` is bypassed,
and its related attributes (`initial_value <TransferMechanism.initial_value>` and
`integration_rate <TransferMechanism.integration_rate>`) are ignored.
integrator_function : IntegratorFunction
the `IntegratorFunction` used when `integrator_mode <TransferMechanism.integrator_mode>` is set to
`True` (see `integrator_mode <TransferMechanism.integrator_mode>` for details).
initial_value : value, list or np.ndarray
determines the starting value for the `integrator_function <TransferMechanism.integrator_function>`; only
relevant if `integrator_mode <TransferMechanism.integrator_mode>` is `True` and `integration_rate
<TransferMechanism.integration_rate>` is not 1.0 (see `integrator_mode <TransferMechanism.integrator_mode>`
for additional details).
integration_rate : float
the rate at which the TransferMechanism's `variable <TransferMechanism>` is integrated when it is executed with
`integrator_mode <TransferMechanism.integrator_mode>` set to `True` (see `integrator_mode
<TransferMechanism.integrator_mode>` for additional details).
on_resume_integrator_mode : keyword
specifies how the `integrator_function <TransferMechanism.integrator_function>` should resume its accumulation
when the Mechanism was most recently in "Instantaneous Mode" (integrator_mode = False) and has just switched to
"IntegratorFunction Mode" (integrator_mode = True). There are three options:
(1) INSTANTANEOUS_MODE_VALUE - reinitialize the Mechanism with its own current value, so that the value computed by
the Mechanism during "Instantaneous Mode" is where the `integrator_function
<TransferMechanism.integrator_function>` begins accumulating.
(2) INTEGRATOR_MODE_VALUE - resume accumulation wherever the `integrator_function
<TransferMechanism.integrator_function>` left off the last time `integrator_mode
<TransferMechanism.integrator_mode>` was True.
(3) REINITIALIZE - call the `integrator_function's <TransferMechanism.integrator_function>` `reinitialize method
<AdaptiveIntegrator.reinitialize>` so that accumulation Mechanism begins at `initial_value
<TransferMechanism.initial_value>`
noise : float or function
When `integrator_mode <TransferMechanism.integrator_mode>` is set to `True`, `noise <TransferMechanism.noise>`
is passed into the `integrator_function <TransferMechanism.integrator_function>` (see `integrator_mode
<TransferMechanism.integrator_mode>` for additional details). Otherwise, noise is added to the output of the
`function <Mechanism_Base.function>`. If `noise <TransferMechanism.noise>` is a list or array,
it must be the same length as `variable <TransferMechanism.default_variable>`. If `noise
<TransferMechanism.noise>` is specified as a single float or function, while `variable
<Mechanism_Base.variable>` is a list or array, `noise <TransferMechanism.noise>` will be applied to each
element of `variable <Mechanism_Base.variable>`. In the case that `noise <TransferMechanism.noise>` is
specified as a function, the function will be executed separately for each element of `variable
<Mechanism_Base.variable>`.
.. note::
In order to generate random noise, a probability distribution function should be used (see `Distribution
Functions <DistributionFunction>` for details), that will generate a new noise value from its
distribution on each execution. If `noise <TransferMechanism.noise>` is specified as a float or as a
function with a fixed output, then the noise will simply be an offset that remains the same across all
executions.
clip : list [float, float]
specifies the allowable range for the result of `function <Mechanism_Base.function>`. The 1st item (index
0) specifies the minimum allowable value of the result, and the 2nd item (index 1) specifies the maximum
allowable value; any element of the result that exceeds the specified minimum or maximum value is set to
the value of `clip <TransferMechanism.clip>` that it exceeds.
termination_measure : function
used to determine when execution of the TransferMechanism is complete (i.e., `is_finished` is True), if
`execute_until_finished <Component.execute_until_finished>` is True. It is passed the `value
<Mechanism_Base.value>` and `previous_value <Mechanism_Base.previous_value>` of the TransferMechanism;
its result (`termination_measure_value <TransferMechanism.termination_measure_value>`) is compared with
`termination_threshold <TransferMechanism.termination_threshold>` using
`TransferMechanism.termination_comparison_op`, the result of which is used as the value of `is_finished`.
.. note::
A Mechanism's `previous_value` attribute is distinct from the `previous_value
<AdaptiveIntegrator.previous_value>` attribute of its `integrator_function
<Mechanism_Base.integrator_function>`.
termination_measure_value : array or scalar
value returned by `termination_measure <TransferMechanism.termination_measure>`; used to determine when
`is_finished` is True.
termination_threshold : None or float
value with which `termination_measure_value <TransferMechanism.termination_measure_value>` is compared to
determine when execution of TransferMechanism is complete if `execute_until_finished
<Component.execute_until_finished>` is True.
termination_comparison_op : Comparator
used to compare `termination_measure_value <TransferMechanism.termination_measure_value>` with
`termination_threshold <TransferMechanism.termination_threshold>` to determine when execution of
TransferMechanism is complete if `execute_until_finished <Component.execute_until_finished>` is True.
standard_output_ports : list[dict]
list of `Standard OutputPort <OutputPort_Standard>` that includes the following in addition to the
`standard_output_ports <ProcessingMechanism.standard_output_ports>` of a
`ProcessingMechanism <ProcessingMechanism>`:
.. _COMBINE:
*COMBINE* : 1d array
Element-wise (Hadamard) sum of all items of the TransferMechanism's `value <Mechanism_Base.value>`
(requires that they all have the same dimensionality).
Returns
-------
instance of TransferMechanism : TransferMechanism
"""
componentType = TRANSFER_MECHANISM
classPreferenceLevel = PreferenceLevel.SUBTYPE
# These will override those specified in TYPE_DEFAULT_PREFERENCES
# classPreferences = {
# PREFERENCE_SET_NAME: 'TransferCustomClassPreferences',
# # REPORT_OUTPUT_PREF: PreferenceEntry(False, PreferenceLevel.INSTANCE),
# }
# TransferMechanism parameter and control signal assignments):
paramClassDefaults = ProcessingMechanism_Base.paramClassDefaults.copy()
paramClassDefaults.update({NOISE: None})
standard_output_ports = ProcessingMechanism_Base.standard_output_ports.copy()
standard_output_ports.extend([{NAME: COMBINE,
VARIABLE: OWNER_VALUE,
FUNCTION: LinearCombination(operation=SUM).function}])
standard_output_port_names = ProcessingMechanism_Base.standard_output_port_names.copy()
standard_output_port_names.extend([COMBINE])
class Parameters(ProcessingMechanism_Base.Parameters):
"""
Attributes
----------
clip
see `clip <TransferMechanism.clip>`
:default value: None
:type:
initial_value
see `initial_value <TransferMechanism.initial_value>`
:default value: None
:type:
integration_rate
see `integration_rate <TransferMechanism.integration_rate>`
:default value: 0.5
:type: float
integrator_function
see `integrator_function <TransferMechanism.integrator_function>`
:default value: `AdaptiveIntegrator`
:type: `Function`
integrator_function_value
see `integrator_function_value <TransferMechanism.integrator_function_value>`
:default value: [[0]]
:type: list
:read only: True
integrator_mode
see `integrator_mode <TransferMechanism.integrator_mode>`
:default value: False
:type: bool
noise
see `noise <TransferMechanism.noise>`
:default value: 0.0
:type: float
on_resume_integrator_mode
see `on_resume_integrator_mode <TransferMechanism.on_resume_integrator_mode>`
:default value: `INSTANTAENOUS_MODE_VALUE`
:type: str
termination_measure
see `termination_measure <TransferMechanism.termination_measure>`
:default value: Distance(metric=MAX_ABS_DIFF))
:type: function
termination_measure_value
see `termination_measure_value <TransferMechanism.termination_measure_value>`
:default value: None
:type: float or array
termination_threshold
see `termination_threshold <TransferMechanism.termination_threshold>`
:default value: None
:type: float or int
= termination_comparison_op
see `termination_comparison_op <TransferMechanism.termination_comparison_op>`
:default value: LESS_THAN_OR_EQUAL
:type: str
"""
integrator_mode = Parameter(False, setter=_integrator_mode_setter)
integration_rate = Parameter(0.5, modulable=True)
initial_value = None
integrator_function = Parameter(AdaptiveIntegrator, stateful=False, loggable=False)
integrator_function_value = Parameter([[0]], read_only=True)
has_integrated = Parameter(False, user=False)
on_resume_integrator_mode = Parameter(INSTANTANEOUS_MODE_VALUE, stateful=False, loggable=False)
clip = None
noise = Parameter(0.0, modulable=True)
termination_measure = Parameter(Distance(metric=MAX_ABS_DIFF), modulable=False, stateful=False, loggable=False)
termination_threshold = Parameter(None, modulable=True)
termination_comparison_op = Parameter(operator.le, modulable=False, loggable=False)
termination_measure_value = Parameter(0.0, modulable=False, read_only=True)
output_ports = Parameter(
[RESULTS],
stateful=False,
loggable=False,
read_only=True,
structural=True,
)
def _validate_integrator_mode(self, integrator_mode):
if not isinstance(integrator_mode, bool):
return 'may only be True or False.'
def _validate_termination_metric(self, termination_measure):
if not is_function_type(termination_measure):
return 'must be a function.'
def _parse_termination_metric(self, termination_measure):
if isinstance(termination_measure, type):
return termination_measure()
def _validate_termination_threshold(self, termination_threshold):
if (termination_threshold is not None
and not isinstance(termination_threshold, (int, float))):
return 'must be a float or int.'
def _validate_termination_comparison_op(self, termination_comparison_op):
if (not termination_comparison_op in comparison_operators.keys()
and not termination_comparison_op in comparison_operators.values()):
return f"must be boolean comparison operator or one of the following strings:" \
f" {','.join(comparison_operators.keys())}."
@tc.typecheck
def __init__(self,
default_variable=None,
size=None,
input_ports:tc.optional(tc.any(Iterable, Mechanism, OutputPort, InputPort))=None,
function=Linear,
integrator_mode=False,
integrator_function=AdaptiveIntegrator,
initial_value=None,
integration_rate=0.5,
on_resume_integrator_mode=INSTANTANEOUS_MODE_VALUE,
noise=0.0,
clip=None,
termination_measure=Distance(metric=MAX_ABS_DIFF),
termination_threshold:tc.optional(float)=None,
termination_comparison_op:tc.any(str, is_comparison_operator)=LESS_THAN_OR_EQUAL,
output_ports:tc.optional(tc.any(str, Iterable))=None,
params=None,
name=None,
prefs:is_pref_set=None,
**kwargs):
"""Assign type-level preferences and call super.__init__
"""
# Default output_ports is specified in constructor as a string rather than a list
# to avoid "gotcha" associated with mutable default arguments
# (see: bit.ly/2uID3s3 and http://docs.python-guide.org/en/latest/writing/gotchas/)
if output_ports is None or output_ports is RESULTS:
output_ports = [RESULTS]
initial_value = self._parse_arg_initial_value(initial_value)
self.integrator_function = integrator_function or AdaptiveIntegrator # In case any subclass set it to None
params = self._assign_args_to_param_dicts(function=function,
initial_value=initial_value,
noise=noise,
integration_rate=integration_rate,
integrator_mode=integrator_mode,
clip=clip,
termination_measure=termination_measure,
termination_threshold=termination_threshold,
termination_comparison_op=termination_comparison_op,
integrator_function=integrator_function,
params=params)
self.on_resume_integrator_mode = on_resume_integrator_mode
# self.integrator_function = None
self.has_integrated = False
self._current_variable_index = 0
# this is checked during execution to see if integrator_mode was set
# to True after initialization
self._needs_integrator_function_init = False
super(TransferMechanism, self).__init__(default_variable=default_variable,
size=size,
input_ports=input_ports,
output_ports=output_ports,
function=function,
params=params,
name=name,
prefs=prefs,
**kwargs)
def _parse_arg_initial_value(self, initial_value):
return self._parse_arg_variable(initial_value)
def _validate_params(self, request_set, target_set=None, context=None):
"""Validate FUNCTION and Mechanism params
"""
super()._validate_params(request_set=request_set, target_set=target_set, context=context)
# Validate FUNCTION
if FUNCTION in target_set:
transfer_function = target_set[FUNCTION]
transfer_function_class = None
# FUNCTION is a Function
if isinstance(transfer_function, Function):
transfer_function_class = transfer_function.__class__
# FUNCTION is a class
elif inspect.isclass(transfer_function):
transfer_function_class = transfer_function
if issubclass(transfer_function_class, Function):
if not issubclass(transfer_function_class, (TransferFunction, SelectionFunction, UserDefinedFunction)):
raise TransferError("Function specified as {} param of {} ({}) must be a {}".
format(repr(FUNCTION), self.name, transfer_function_class.__name__,
" or ".join([TRANSFER_FUNCTION_TYPE, SELECTION_FUNCTION_TYPE])))
elif not isinstance(transfer_function, (function_type, method_type)):
raise TransferError("Unrecognized specification for {} param of {} ({})".
format(repr(FUNCTION), self.name, transfer_function))
# FUNCTION is a function or method, so test that shape of output = shape of input
if isinstance(transfer_function, (function_type, method_type, UserDefinedFunction)):
var_shape = self.defaults.variable.shape
if isinstance(transfer_function, UserDefinedFunction):
val_shape = transfer_function._execute(self.defaults.variable, context=context).shape
else:
val_shape = np.array(transfer_function(self.defaults.variable, context=context)).shape
if val_shape != var_shape:
raise TransferError("The shape ({}) of the value returned by the Python function, method, or UDF "
"specified as the {} param of {} must be the same shape ({}) as its {}".
format(val_shape, repr(FUNCTION), self.name, var_shape, repr(VARIABLE)))
# Validate INITIAL_VALUE
if INITIAL_VALUE in target_set and target_set[INITIAL_VALUE] is not None:
initial_value = np.array(target_set[INITIAL_VALUE])
# Need to compare with variable, since default for initial_value on Class is None
if initial_value.dtype != object:
initial_value = np.atleast_2d(initial_value)
if not iscompatible(initial_value, self.defaults.variable):
raise TransferError(
"The format of the initial_value parameter for {} ({}) must match its variable ({})".
format(append_type_to_name(self), initial_value, self.defaults.variable,
)
)
# FIX: SHOULD THIS (AND INTEGRATION_RATE) JUST BE VALIDATED BY INTEGRATOR FUNCTION NOW THAT THEY ARE PROPERTIES??
# Validate NOISE:
if NOISE in target_set:
noise = target_set[NOISE]
# If assigned as a Function, set TransferMechanism as its owner, and assign its actual function to noise
if isinstance(noise, DistributionFunction):
noise.owner = self
target_set[NOISE] = noise.execute
self._validate_noise(target_set[NOISE])
# Validate INTEGRATOR_FUNCTION:
if INTEGRATOR_FUNCTION in target_set:
integtr_fct = target_set[INTEGRATOR_FUNCTION]
if not (isinstance(integtr_fct, IntegratorFunction)
or (isinstance(integtr_fct, type) and issubclass(integtr_fct, IntegratorFunction))):
raise TransferError("The function specified for the {} arg of {} ({}) must be an {}".
format(repr(INTEGRATOR_FUNCTION), self.name, integtr_fct),
IntegratorFunction.__class__.__name__)
# Validate INTEGRATION_RATE:
if INTEGRATION_RATE in target_set and target_set[INTEGRATION_RATE] is not None:
integration_rate = np.array(target_set[INTEGRATION_RATE])
if not all_within_range(integration_rate, 0, 1):
raise TransferError("Value(s) in {} arg for {} ({}) must be an int or float in the interval [0,1]".
format(repr(INTEGRATION_RATE), self.name, integration_rate, ))
if (not np.isscalar(integration_rate.tolist())
and integration_rate.shape != self.defaults.variable.squeeze().shape):
raise TransferError("{} arg for {} ({}) must be either an int or float, "
"or have the same shape as its {} ({})".
format(repr(INTEGRATION_RATE), self.name, integration_rate,
VARIABLE, self.defaults.variable))
# Validate CLIP:
if CLIP in target_set and target_set[CLIP] is not None:
clip = target_set[CLIP]
if clip:
if not (isinstance(clip, (list,tuple)) and len(clip)==2 and all(isinstance(i, numbers.Number)
for i in clip)):
raise TransferError("clip parameter ({}) for {} must be a tuple with two numbers".
format(clip, self.name))
if not clip[0] < clip[1]:
raise TransferError("The first item of the clip parameter ({}) must be less than the second".
format(clip, self.name))
target_set[CLIP] = list(clip)
def _validate_noise(self, noise):
# Noise is a list or array
if isinstance(noise, (np.ndarray, list)):
if len(noise) == 1:
pass
# Variable is a list/array
elif not iscompatible(np.atleast_2d(noise), self.defaults.variable) and len(noise) > 1:
raise MechanismError(
"Noise parameter ({}) does not match default variable ({}). Noise parameter of {} must be specified"
" as a float, a function, or an array of the appropriate shape ({})."
.format(noise, self.defaults.variable, self.name, np.shape(
|
np.array(self.defaults.variable)
|
numpy.array
|
import numpy as np
from . import ReadData as data
from sympy import *
def ElasticConstants(stresses,strains):
stresses = data.ReadElastic(stresses)
strains = data.ReadElastic(strains)
stresses = stresses.get_tensor()
strains = strains.get_tensor()
stresses = stresses * 2 #Ha to Ry
C11, C12, C13, C14, C15, C16 = symbols('C11 C12 C13 C14 C15 C16')
C22, C23, C24, C25, C26 = symbols('C22 C23 C24 C25 C26')
C33, C34, C35, C36 = symbols('C33 C34 C35 C36')
C44, C45, C46 = symbols('C44 C45 C46')
C55, C56 = symbols('C55 C56')
C66 = symbols('C66')
EC_matrix = Matrix([[C11, C12, C13, C14, C15, C16],[C12, C22, C23, C24, C25, C26], [C13, C23, C33, C34, C35, C36],[C14, C24, C34, C44, C45, C46],[C15, C25, C35, C45, C55, C56],[C16, C26, C36, C46, C56, C66]])
e_vec = []
t_vec = []
constants = []
#Initialize stress const arrays
C11_ = []
C12_ = []
C13_ = []
C14_ = []
C15_ = []
C16_ = []
C22_ = []
C23_ = []
C24_ = []
C25_ = []
C26_ = []
C33_ = []
C34_ = []
C35_ = []
C36_ = []
C44_ = []
C45_ = []
C46_ = []
C55_ = []
C56_ = []
C66_ = []
bohr2ang = 0.529117
ang2m = 1E10
ry2joule = 2.179872E-18
pa2kbar = 1E8
fit_x = []
fit_y1 = []
fit_y2 = []
fit_y3 = []
fit_y4 = []
fit_y5 = []
fit_y6 = []
#Get range of stress/strains to fit over
e_range = np.arange(np.min(strains) - 0.0005,np.max(strains) + 0.0005 ,0.0005)
t_range1 = []
t_range2 = []
t_range3 = []
t_range4 = []
t_range5 = []
t_range6 = []
new_strain = []
new_stress = []
#This will loop to create unique fits based on the strain being applied
for i in range(np.size(strains,0)):
tmp = stresses[i,:]
t_vec = np.append(t_vec,tmp)
tmp2 = strains[i,:]
e_vec = np.append(e_vec,tmp2)
if np.mod(i+1,3) == 0:
e_vec = np.reshape(e_vec,(3,3))
t_vec = np.reshape(t_vec,(3,3))
tag1 = sum(np.concatenate(np.nonzero(e_vec)))
tag2 = np.size(np.nonzero(e_vec))
#STRAIN 1
if tag1 == 0 and tag2 == 2:
fit_x = np.append(fit_x, e_vec[0,0])
fit_y1 = np.append(fit_y1, t_vec[0,0]) #C11
fit_y2 = np.append(fit_y2, t_vec[1,1]) #C21
fit_y3 = np.append(fit_y3, t_vec[2,2]) #C31
fit_y4 = np.append(fit_y4, t_vec[2,1]) #C41
fit_y5 = np.append(fit_y5, t_vec[0,2]) #C51
fit_y6 = np.append(fit_y6, t_vec[0,1]) #C61
if len(fit_x) == 4:
p1 = np.polyfit(fit_x, fit_y1,2)
p2 = np.polyfit(fit_x, fit_y2,2)
p3 = np.polyfit(fit_x, fit_y3,2)
p4 =
|
np.polyfit(fit_x, fit_y4,2)
|
numpy.polyfit
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import cv2
import numpy as np
import chumpy as ch
from lib.geometry import visible_boundary_edge_verts
from vendor.smplify.robustifiers import GMOf
def plucker(rays):
p = rays[:, 0]
n = rays[:, 1] - rays[:, 0]
n /= np.linalg.norm(n, axis=1).reshape(-1, 1)
m =
|
np.cross(p, n, axisa=1, axisb=1)
|
numpy.cross
|
import matplotlib
from matplotlib.patches import Rectangle, Patch
import numpy as np
import matplotlib.pyplot as plt
import csv
import cv2
import os
from matplotlib.colors import LinearSegmentedColormap
import scipy.stats as stats
import seaborn as sns
from matplotlib import cm
from scipy.optimize import linear_sum_assignment
from scipy.spatial.distance import cdist
import subprocess
def filenames_from_folder(folder, filename_starts_with = None, filename_contains = None, filename_ends_with = None, filename_does_not_contain = None):
'''
Function that returns the filenames contained inside a folder.
The function can be provided with arguments to specify which files to look for. This includes what the filenames start and end with, as well as if something is contained in the filename.
'''
# Return the names of all the files in the folder
filenames = ['{0}\{1}'.format(folder, os.path.basename(filename)) for filename in os.listdir(folder)]
# Check if filename_starts_with was given
if filename_starts_with != None:
# Return the names of all the files that start with ...
filenames = ['{0}\{1}'.format(folder, os.path.basename(filename)) for filename in filenames if filename.startswith(filename_starts_with)]
# Check if filename_contains was given
if filename_contains != None:
if isinstance(filename_contains, list):
for item in filename_contains:
filenames = ['{0}\{1}'.format(folder, os.path.basename(filename)) for filename in filenames if item in filename]
else:
# Return the names of all the files that contain ...
filenames = ['{0}\{1}'.format(folder, os.path.basename(filename)) for filename in filenames if filename_contains in filename]
# Check if filename_ends_with was given
if filename_ends_with != None:
# Return the names of all the files that end with ...
filenames = ['{0}\{1}'.format(folder, os.path.basename(filename)) for filename in filenames if filename.endswith(filename_ends_with)]
# Check if filename_does_not_contain was given
if filename_does_not_contain != None:
if isinstance(filename_does_not_contain, list):
for item in filename_does_not_contain:
filenames = ['{0}\{1}'.format(folder, os.path.basename(filename)) for filename in filenames if item not in os.path.basename(filename)]
else:
# Return the names of all the files that do not contain ...
filenames = ['{0}\{1}'.format(folder, os.path.basename(filename)) for filename in filenames if filename_does_not_contain not in filename]
return filenames
def convert_video(video_filename):
ffmpeg_command = f"ffmpeg -y -i \"{video_filename}\" -vcodec mjpeg \"{video_filename[:-4]}_converted.avi\""
return subprocess.call(ffmpeg_command)
def extract_tracking_points_from_csv(csv_file):
with open(csv_file) as f:
tracking_points = [[column for column in row if len(column) > 0] for row in csv.reader(f)]
tracking_points = np.array([np.array([np.array([point.split(', ') for point in fish_data[3:-3].split(")', '(")]).astype(float) for fish_data in data_point]) for data_point in tracking_points])
return tracking_points
def extract_tail_curvature_from_csv(csv_file):
with open(csv_file) as f:
tail_curvature = [[column for column in row if len(column) > 0] for row in csv.reader(f)]
tail_curvature = np.array([np.array([fish_data[1:-1].split(", ") for fish_data in data_point]).astype(float) for data_point in tail_curvature])
return tail_curvature
def reorder_data_for_identities_tail_curvature(tracking_data, tail_curvature, n_fish, n_tracking_points):
new_tracking_data = np.zeros((len(tracking_data), n_fish, n_tracking_points, 2))
new_tail_curvature = np.zeros((len(tail_curvature), n_fish, n_tracking_points - 1))
for i in range(len(tracking_data) - 1):
if i == 0:
first_tracking_data_arr = tracking_data[0]
new_tracking_data[0] = tracking_data[0]
new_tail_curvature[0] = tail_curvature[0]
second_tracking_data_arr = tracking_data[i + 1]
tail_curvature_arr = tail_curvature[i + 1]
new_tracking_data_arr, new_tail_curvature_arr, new_order = find_identities_tail_curvature(first_tracking_data_arr, second_tracking_data_arr, tail_curvature_arr, n_fish, n_tracking_points)
new_tracking_data[i + 1] = new_tracking_data_arr[new_order[1]]
new_tail_curvature[i + 1] = new_tail_curvature_arr[new_order[1]]
first_tracking_data_arr = new_tracking_data[i + 1]
return [new_tracking_data, new_tail_curvature]
def find_identities_tail_curvature(first_tracking_data_arr, second_tracking_data_arr, tail_curvature_arr, n_fish, n_tracking_points):
cost = cdist(first_tracking_data_arr[:, 0], second_tracking_data_arr[:, 0])
result = linear_sum_assignment(cost)
if second_tracking_data_arr.shape[0] < n_fish:
missed_index = [i for i in range(len(first_tracking_data_arr)) if i not in result[0]][0]
merged_index = np.where(cost[missed_index] == np.min(cost[missed_index]))[0][0]
second_tracking_data_arr = np.append(second_tracking_data_arr, second_tracking_data_arr[merged_index]).reshape(-1, n_tracking_points, 2)
tail_curvature_arr = np.append(tail_curvature_arr, tail_curvature_arr[merged_index]).reshape(-1, n_tracking_points - 1)
second_tracking_data_arr, tail_curvature_arr, result = find_identities_tail_curvature(first_tracking_data_arr, second_tracking_data_arr, tail_curvature_arr, n_fish, n_tracking_points)
return second_tracking_data_arr, tail_curvature_arr, result
def reorder_data_for_identities_tail_points(tracking_data, n_fish, n_tracking_points, start_index = 0):
new_tracking_data = np.zeros((len(tracking_data), n_fish, n_tracking_points, 2))
for i in range(len(tracking_data) - 1):
if i == 0:
first_tracking_data_arr = tracking_data[start_index]
new_tracking_data[0] = tracking_data[start_index]
second_tracking_data_arr = tracking_data[i + 1]
new_tracking_data_arr, new_order = find_identities_tail_points(first_tracking_data_arr, second_tracking_data_arr, n_fish, n_tracking_points)
new_tracking_data[i + 1] = new_tracking_data_arr[new_order[1]]
first_tracking_data_arr = new_tracking_data[i + 1]
return new_tracking_data
def find_identities_tail_points(first_tracking_data_arr, second_tracking_data_arr, n_fish, n_tracking_points):
cost = cdist(first_tracking_data_arr[:, 0], second_tracking_data_arr[:, 0])
result = linear_sum_assignment(cost)
if second_tracking_data_arr.shape[0] < n_fish:
missed_index = [i for i in range(len(first_tracking_data_arr)) if i not in result[0]][0]
merged_index = np.where(cost[missed_index] == np.min(cost[missed_index]))[0][0]
second_tracking_data_arr = np.append(second_tracking_data_arr, second_tracking_data_arr[merged_index]).reshape(-1, n_tracking_points, 2)
second_tracking_data_arr, result = find_identities_tail_points(first_tracking_data_arr, second_tracking_data_arr, n_fish, n_tracking_points)
return second_tracking_data_arr, result
def find_tracking_errors(tracking_data, window = None):
tracking_errors = np.zeros((tracking_data.shape[:2]))
if window is None:
for time_index, fish_data in enumerate(tracking_data):
dupes = np.unique(fish_data[:, 0], axis = 0, return_counts = True)
dupe_vals = dupes[0][dupes[1] > 1]
for fish_index, fish_val in enumerate(fish_data[:, 0]):
for dupe_val in dupe_vals:
if np.array_equal(fish_val, dupe_val):
tracking_errors[time_index, fish_index] = 1
else:
for time_index, fish_data in enumerate(tracking_data[int(window / 2) : -int(window / 2)]):
dupes = np.unique(fish_data[:, 0], axis = 0, return_counts = True)
dupe_vals = dupes[0][dupes[1] > 1]
for fish_index, fish_val in enumerate(fish_data[:, 0]):
for dupe_val in dupe_vals:
if np.array_equal(fish_val, dupe_val):
tracking_errors[time_index : time_index + int(window), fish_index] = 1
return tracking_errors
def remove_tracking_errors_from_tail_curvature(tail_curvature, tracking_errors):
processed_tail_curvature = tail_curvature.copy()
processed_tail_curvature[tracking_errors == 1] = 0
return processed_tail_curvature
def load_tracking_data(folder, prefix, n_fish, n_tracking_points):
tracking_data_csv = filenames_from_folder(folder, filename_contains = [prefix, "tracking-results"], filename_ends_with = ".csv")[0]
tail_curvature_csv = filenames_from_folder(folder, filename_contains = [prefix, "tail-curvature"], filename_ends_with = ".csv")[0]
tracking_data = extract_tracking_points_from_csv(tracking_data_csv)
tail_curvature = extract_tail_curvature_from_csv(tail_curvature_csv)
tracking_data, tail_curvature = reorder_data_for_identities(tracking_data, tail_curvature, n_fish, n_tracking_points)
return tracking_data, tail_curvature
def load_image(folder, prefix, example_index):
video_path = filenames_from_folder(folder, filename_contains = [prefix, "video"], filename_ends_with = ".avi")[0]
cap = cv2.VideoCapture(video_path)
cap.set(cv2.CAP_PROP_POS_FRAMES, example_index)
image = cap.read()[1]
cap.release()
return image
def plot_image_with_tracking_overlay(tracking_data, image, save_path = None, example_index = 0, index_factor = 1):
tracking_colours = np.array([cm.get_cmap("hsv")(plt.Normalize(0, tracking_data.shape[1])(i)) for i in range(tracking_data.shape[1])])
new_image = image.copy()
for fish_index, fish_tracking_points in enumerate(tracking_data[int(example_index * index_factor)]):
print("Fish {0} - Colour {1}".format(fish_index + 1, tracking_colours[fish_index]*255))
cv2.circle(new_image, (int(float(fish_tracking_points[0, 0])), int(float(fish_tracking_points[0,1]))), 3, tracking_colours[fish_index] * 255, 1, cv2.LINE_AA)
cv2.putText(new_image, "Fish {0}".format(fish_index + 1), (int(float(fish_tracking_points[0, 0])) + 10, int(float(fish_tracking_points[0,1])) + 10), cv2.FONT_HERSHEY_SIMPLEX, 1, tracking_colours[fish_index] * 255)
fig = plt.figure(figsize = (10, 10), dpi = 300, constrained_layout = False)
im_ax = fig.add_subplot(1, 1, 1)
im_ax.imshow(new_image, aspect = "equal")
im_ax.set_axis_off()
if save_path is not None:
plt.savefig(save_path)
plt.show()
def calculate_paths(tracking_data, paths, linewidth):
for time_index in range(tracking_data.shape[0] - 1):
for fish_index in range(tracking_data[time_index].shape[0]):
point1 = (int(tracking_data[time_index, fish_index, 0, 0]), int(tracking_data[time_index, fish_index, 0, 1]))
point2 = (int(tracking_data[time_index + 1, fish_index, 0, 0]), int(tracking_data[time_index + 1, fish_index, 0, 1]))
if point1 != point2:
paths[fish_index] = cv2.line(paths[fish_index], point1, point2, (time_index + 1) / tracking_data.shape[0] * 255, linewidth)
return paths
def plot_paths(tracking_data, image, linewidth = 1, save_path = None):
tracking_colours = np.array([cm.get_cmap("hsv")(plt.Normalize(0, tracking_data.shape[1])(i)) for i in range(tracking_data.shape[1])])
path_colours = [LinearSegmentedColormap.from_list("cmap_{0}".format(fish_index + 1), [[np.min([1, tracking_colour[0] * 1.6 + 0.8]), np.min([1, tracking_colour[1] * 1.6 + 0.8]), np.min([1, tracking_colour[2] * 1.6 + 0.8]), 1], tracking_colour, [np.max([0, tracking_colour[0] * 0.6 - 0.2]), np.max([0, tracking_colour[1] * 0.6 - 0.2]), np.max([0, tracking_colour[2] * 0.6 - 0.2]), 1]]) for fish_index, tracking_colour in enumerate(tracking_colours)]
[path_colour.set_under(color = [1, 1, 1, 0]) for path_colour in path_colours]
paths = np.zeros((tracking_colours.shape[0], image.shape[0], image.shape[1]))
paths = calculate_paths(tracking_data, paths, linewidth)
fig = plt.figure(figsize = (10, 10), dpi = 300, constrained_layout = False)
path_axes = [fig.add_subplot(1, 1, 1, label = "{0}".format(index)) for index, path in enumerate(paths)]
[path_ax.imshow(path, cmap = path_colour, origin = "upper", vmin = 0.000000000001, vmax = 255, aspect = "equal") for path_ax, path, path_colour in zip(path_axes, paths, path_colours)]
[path_ax.set_facecolor([1, 1, 1, 0]) for path_ax in path_axes]
[path_ax.set_axis_off() for path_ax in path_axes]
if save_path is not None:
plt.savefig(save_path)
plt.show()
def plot_colorbar(tracking_data, save_path = None):
colorbar_data = np.linspace(0, 255, tracking_data.shape[0])[:, np.newaxis]
fig = plt.figure(figsize = (0.5, 10), dpi = 300, constrained_layout = False)
colorbar_ax = fig.add_subplot(1, 1, 1)
colorbar_ax.imshow(colorbar_data, cmap = "gray", aspect = "auto")
colorbar_ax.set_axis_off()
if save_path is not None:
plt.savefig(save_path)
plt.show()
def plot_tail_curvature(tail_curvature, save_path = None, imaging_FPS = 332, tracking_errors = None):
fig = plt.figure(figsize = (30, 5), dpi = 300, constrained_layout = False)
gs = fig.add_gridspec(ncols = 1, nrows = tail_curvature.shape[1], hspace = -0.3)
tc_axes = [fig.add_subplot(gs[i, 0]) for i in range(tail_curvature.shape[1])]
x_vals = np.linspace(0, tail_curvature.shape[0]/imaging_FPS, tail_curvature.shape[0])
y_vals_baseline = np.zeros((x_vals.shape[0]))
tracking_colours = np.array([cm.get_cmap("hsv")(plt.Normalize(0, tail_curvature.shape[1])(i)) for i in range(tracking_data.shape[1])])
for tc_index, tc_ax in enumerate(tc_axes):
tc_ax.plot(x_vals, np.mean(tail_curvature[:, tc_index, -3:], axis = 1), color = tracking_colours[tc_index], linewidth = 1, rasterized = True)
tc_ax.plot(x_vals, y_vals_baseline, color = tracking_colours[tc_index], linewidth = 1, rasterized = True, alpha = 0.5, ls = "--")
tc_ax.set_ylim(-150, 150)
tc_ax.set_xlim(-0.01, x_vals[-1])
tc_ax.spines['top'].set_visible(False)
tc_ax.spines['right'].set_visible(False)
tc_ax.set_yticks([])
tc_ax.set_xticks([])
tc_ax.set_facecolor([1, 1, 1, 0])
if tc_index == len(tc_axes) - 1:
tc_ax.spines['bottom'].set_bounds(0, 1)
tc_ax.spines['bottom'].set_linewidth(5)
tc_ax.spines['left'].set_bounds(0, 100)
tc_ax.spines['left'].set_linewidth(5)
else:
tc_ax.spines['bottom'].set_visible(False)
tc_ax.spines['left'].set_visible(False)
if tracking_errors is not None:
tc_ax.fill_between(x_vals, -150, 150, where = tracking_errors[tc_index] == 1, color = "red", alpha = 0.5, edgecolor = [1, 1, 1, 0])
# Save the plot and show
if save_path is not None:
plt.savefig(save_path)
plt.show()
def extract_stimulus_data(csv_file):
with open(csv_file) as f:
stimulus_data = [[column for column in row if len(column) > 0] for row in csv.reader(f)]
stimulus_data = np.array([np.array([prey_data.split(",") for prey_data in data_point]).astype(float) for data_point in stimulus_data])
return stimulus_data
def load_stimulus_data(folder, prefix):
stimulus_data_csv = filenames_from_folder(folder, filename_contains = [prefix, "stimulus-data"], filename_ends_with = ".csv")[0]
stimulus_data = extract_stimulus_data(stimulus_data_csv)
return stimulus_data
def load_data_from_filenames(filenames, dtype = float):
return [np.loadtxt(filename, dtype = dtype) for filename in filenames]
def calculate_stimulus(stimulus_data, stimulus_image, example_index = 0, index_factor = 1):
stimulus_sizes = stimulus_data[:, :, 2]
stimulus_positions = np.moveaxis(np.array([stimulus_data[:, :, 0] * image.shape[0] / 2 + image.shape[0] / 2, stimulus_data[:, :, 1] * image.shape[1] / 2 + image.shape[1] / 2]), 0, -1)
for stimulus_position, stimulus_size in zip(stimulus_positions[example_index * index_factor], stimulus_sizes[example_index * index_factor]):
stimulus_image = cv2.circle(stimulus_image, (int(stimulus_position[0]), int(stimulus_position[1])), int(stimulus_size), [255, 255, 255], -1, cv2.LINE_AA)
return stimulus_image
def plot_stimulus(stimulus_data, stimulus_image, save_path = None, example_index = 0, index_factor = 1):
stimulus_image = calculate_stimulus(stimulus_data, stimulus_image, example_index, index_factor)
fig = plt.figure(figsize = (10, 10), dpi = 300, constrained_layout = False)
im_ax = fig.add_subplot(1, 1, 1)
im_ax.imshow(stimulus_image, cmap = "gray", vmin = 0, vmax = 255, aspect = "equal")
im_ax.set_axis_off()
if save_path is not None:
plt.savefig(save_path)
plt.show()
def interpolate_NaNs(data, skip_start = False, skip_end = False):
if not skip_start and np.isnan(data[0]):
data[0] = 0
if not skip_end and np.isnan(data[-1]):
data[-1] = 0
if np.isnan(data).any():
nans = np.isnan(data)
nan_indices = np.where(nans)[0]
good_indices = np.where(nans == False)[0]
data[nan_indices] = np.interp(nan_indices, good_indices, data[good_indices])
return data
def calculate_prey_yaw_angle(time, moving_prey_speed = 0.6):
return [np.arctan2(np.sin((-np.pi / 3) * np.sin(t * moving_prey_speed)), np.cos((-np.pi / 3) * np.sin(t * moving_prey_speed))) for t in time]
def calculate_velocity(pos_x, pos_y, size_of_FOV_cm = 6, image_width = 1088, imaging_FPS = 332):
return [np.hypot(np.diff(x[:np.min([len(x), len(y)])]), np.diff(y[:np.min([len(x), len(y)])])) * size_of_FOV_cm * imaging_FPS / image_width for x, y in zip(pos_x, pos_y)]
def calculate_paths2(pos_x, pos_y, image, colour, linewidth = 1, radius_threshold = 100):
for x, y in zip(pos_x, pos_y):
for i in range(len(x) - 1):
point1 = (int(x[i]), int(y[i]))
point2 = (int(x[i+1]), int(y[i+1]))
if np.sqrt((point2[0] - (image.shape[0] / 2))**2 + (point2[1] - (image.shape[1] / 2))**2) > radius_threshold:
continue
if point1 != point2:
image = cv2.line(image, point1, point2, colour, linewidth)
return image
def calculate_trajectory(x, y, image, colour, linewidth = 1, radius_threshold = 100):
for i in range(len(x) - 1):
point1 = (int(x[i]), int(y[i]))
point2 = (int(x[i+1]), int(y[i+1]))
if np.sqrt((point2[0] - (image.shape[0] / 2))**2 + (point2[1] - (image.shape[1] / 2))**2) > radius_threshold:
continue
if point1 != point2:
image = cv2.line(image, point1, point2, colour, linewidth)
return image
def threshold_trajectory(pos_x, pos_y, image, radius_threshold = 100):
thresh = [np.ones(x.shape).astype(bool) for x in pos_x]
for i, (x, y) in enumerate(zip(pos_x, pos_y)):
for j in range(len(x) - 1):
point1 = (int(x[j]), int(y[j]))
point2 = (int(x[j+1]), int(y[j+1]))
if np.sqrt((point2[0] - (image.shape[0] / 2))**2 + (point2[1] - (image.shape[1] / 2))**2) > radius_threshold:
thresh[i][j:] = False
break
return thresh
def normalize_path_data(pos_x, pos_y, heading_angle, offset_x = 200, offset_y = 200):
new_pos_x = []
new_pos_y = []
for x, y, ha in zip(pos_x, pos_y, heading_angle):
x = x - x[0]
y = y - y[0]
xnew = (x * np.cos(ha[0] + (np.pi/2)) - y * np.sin(ha[0] + (np.pi/2))) + offset_x
ynew = (x * np.sin(ha[0] + (np.pi/2)) + y * np.cos(ha[0] + (np.pi/2))) + offset_y
new_pos_x.append(xnew)
new_pos_y.append(ynew)
return new_pos_x, new_pos_y
def calculate_max_heading_angle(heading_angle):
max_heading_angle = []
for ha in heading_angle:
max_i = np.argmax(ha)
min_i = np.argmin(ha)
if np.abs(ha[max_i]) < np.abs(ha[min_i]):
max_heading_angle.append(ha[min_i])
else:
max_heading_angle.append(ha[max_i])
return max_heading_angle
def detect_peaks(data, delta, x = None):
maxtab = []
mintab = []
if x is None:
x = np.arange(len(data))
data =
|
np.asarray(data)
|
numpy.asarray
|
import numpy as np
import pytest
from inverse_covariance.profiling import metrics
class TestMetrics(object):
@pytest.mark.parametrize(
"m, m_hat, expected",
[
(
np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]),
np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]),
(6, 6, 6),
),
(
np.array([[2, 1, 0], [1, 2, 3], [0, 5, 6]]),
np.array([[1, 1, 0], [1, 2, 0], [0, 0, 3]]),
(4, 2, 2),
),
(
np.array([[0, 1, 0], [1, 0, 3], [0, 5, 0]]),
np.array([[0, 1, 0], [1, 0, 0], [0, 0, 0]]),
(4, 2, 2),
),
],
)
def test__nonzero_intersection(self, m, m_hat, expected):
result = metrics._nonzero_intersection(m, m_hat)
print(result)
assert result == expected
@pytest.mark.parametrize(
"m, m_hat, expected",
[
(
np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]),
np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]),
0,
),
(
np.array([[2, 1, 0], [1, 2, 3], [0, 5, 6]]),
np.array([[1, 1, 0], [1, 2, 0], [0, 0, 3]]),
0,
),
(
np.array([[0, 1, 0], [1, 0, 3], [0, 5, 0]]),
|
np.array([[0, 1, 1], [1, 0, 0], [1, 0, 0]])
|
numpy.array
|
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from sklearn.feature_selection import SelectKBest, f_regression
import matplotlib.pyplot as plt
def setMonths2(data):
"""
data: pandas DataFrame
Replaces the month feature by 12 features, one for each month, which can only be 1 or 0.
"""
months = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec']
for month in months:
new_column = pd.Series(0, index=data.index)
new_column[data.month == month] = 1
data[month] = new_column
data.drop('month', axis=1, inplace=True)
def setDays2(data):
"""
data: pandas DataFrame
Replaces the month feature by 12 features, one for each month, which can only be 1 or 0.
"""
days = ['sun', 'mon', 'tue', 'wed', 'thu', 'fri', 'sat']
for day in days:
new_column = pd.Series(0, index=data.index)
new_column[data.day == day] = 1
data[day] = new_column
data.drop('day', axis=1, inplace=True)
def setWeekend(data, deleteDay=True):
"""
data: pandas DataFrame
Replaces the day feature by a weekend feature, whose value is 1 if the day is Saturday or Sunday, 0 otherwise.
"""
new_col = pd.Series(0, index=data.index)
new_col[data['day'].isin(['sat', 'sun'])] = 1
data['weekend'] = new_col
if deleteDay:
data.drop('day', axis=1, inplace=True)
def setDays(data):
"""
data : pandas DataFrame
Replaces the name of the day of the week by a number
"""
days = {'sun':7, 'mon':1, 'tue':2, 'wed':3, 'thu':4, 'fri':5, 'sat':6}
a = pd.Series(index=data.index)
for d in days.keys():
a[data.day == d] = days[d]
data.day = a
def setMonths(data):
"""
data : pandas DataFrame
Replaces the name of the month by a number
"""
months = {'jan':1, 'feb':2, 'mar':3, 'apr':4, 'may':5, 'jun':6, 'jul':7, 'aug':8, 'sep':9, 'oct':10, 'nov':11, 'dec':12}
a = pd.Series(index=data.index)
for m in months.keys():
a[data.month == m] = months[m]
data.month = a
def scaleData(X):
"""
X : pandas DataFrame with only numerical values
This function linearly scales the features of X, to make each value lie between 0 and 1
"""
M, m = X.max(), X.min()
for col in X.columns:
if M[col] != m[col]:
X[col] = (X[col] - m[col])/(M[col] - m[col])
def normalizeData(X, columns='all'):
"""
X : pandas DataFrame with only numerical values
This function linearly scales the features of X, to make it centered with a unitary standard deviation
"""
M, S = X.mean(), X.std()
if columns == 'all':
colList = X.columns
else:
colList = columns
for col in colList:
if S[col] == 0:
X[col] = 0
else:
X[col] = (X[col] - M[col])/S[col]
def PCA(A, y, k):
"""
A : Numpy Array, k : integer
Performs PCA on A
"""
M = np.tile(np.average(A, axis=0), (A.shape[0], 1)) # Mean of the columns
C = A - M
W = np.dot(np.transpose(C), C)
_, eigvec = np.linalg.eigh(W)
eigvec = eigvec[:,::-1] # eigenvalues in ascending order : colums of U must be reversed
Uk = eigvec[:,:k]
return np.dot(A, Uk)
def featureSelect(X, y, j):
Xnew = SelectKBest(f_regression, k=j).fit_transform(X, y)
return Xnew
def plotData(X, y, features=None, sepFig=False, log=False):
a = np.array(y)
if features == None:
colList = X.columns
else:
colList = features
for col in colList:
if sepFig:
plt.figure()
plt.title(col)
A =
|
np.array(X[col])
|
numpy.array
|
#! /usr/bin/env python
"""
Module for evaluating the POD-RBF
reduced order model at new time-query
points during the online stage
"""
import numpy as np
import scipy
from scipy.spatial.distance import cdist
from numpy.lib.scimath import sqrt as csqrt
from scipy import interpolate
import pod as pod
import rbf as rbf
import greedy as gdy
import plotting as plo
# ---- Save initial conditions from snapshots
def init_cond_onl(snap_data, nt_online, n_pod, U, Smean, t0_ind=0):
soln_names = n_pod.keys()
uh0, zh0 = {}, {}
uh_save, zh_save = {}, {}
for ii, key in enumerate(n_pod.keys()):
uh0[key] = snap_data[key][:, t0_ind].copy()
zh0[key] = U[ii].T.dot(uh0[key]-Smean[key])
for key in soln_names:
zh_save[key] = np.zeros((n_pod[key], nt_online), 'd')
uh_save[key] = np.zeros((snap_data[key].shape[0], nt_online), 'd')
return uh_save, zh_save, uh0, zh0
def rbf_online(Zcent, wts, snap_data, times_online, epsn, U_trunc, Smn, kernel,
time_disc='None', beta=2.5, t0_ind=0):
"""
Compute RBF ROM solutions at online evaluation time points
"""
soln_names = wts.keys()
n_pod = {}
nw = np.zeros(len(wts.keys()), 'i')
U_incr = []
for ii, key in enumerate(wts.keys()):
n_pod[key] = wts[key].shape[0]
nw[ii] = n_pod[key]
U_incr.append(U_trunc[key])
nt_online = times_online.shape[0]
if t0_ind == 0:
uh_save, zh_save, uh0, zh0 = init_cond_onl(snap_data, nt_online,
n_pod, U_incr, Smn)
else:
uh_save, zh_save, uh0, zh0 = init_cond_onl(snap_data, nt_online,
n_pod, U_incr, Smn, t0_ind=t0_ind)
uh, zh, uhn, zhn = {}, {}, {}, {}
for key in soln_names:
uh[key] = uh0[key].copy()
zh[key] = zh0[key].copy()
uhn[key] = uh[key].copy()
zhn[key] = zh[key].copy()
zh_save[key][:, 0] = zh[key].copy()
uh_save[key][:, 0] = uh[key].copy()
#
zeval = np.zeros((nw.sum(),), 'd')
for istep, tn in enumerate(times_online[:-1]):
if istep % 500 == 0:
print("Computing solutions for time step %d" % (istep+1))
t = times_online[istep+1]
dt = t-tn
offset = 0
for key in soln_names:
zeval[offset:offset+n_pod[key]] = zhn[key]
offset += n_pod[key]
for ii, key in enumerate(soln_names):
# FOR HIGHER ORDER TIME STEPPING METHODS
zh[key] = time_march_multistep(zeval, Zcent, wts, zh_save, key, istep,
times_online, kernel, epsn, beta, time_disc)
# ## FOR ORDER 1 TIME STEPPING METHOD
# dtmp = rbf.rbf_evaluate(zeval[:,np.newaxis],Zcent,wts[key],
# epsilon=epsn, kernel=kernel,beta=beta)
# dtmp = np.squeeze(dtmp)
# zh[key] = zhn[key]+dt*dtmp
# update u = \bar{u} + \Phiw . w
for ii, key in enumerate(soln_names):
uh[key][:] = Smn[key]
uh[key][:] += np.dot(U_incr[ii], zh[key])
# update reduced solution (ie generalized coordinates)
for key in soln_names:
# update zh and uh history
zhn[key][:] = zh[key][:]
uhn[key][:] = uh[key][:]
# save for evaluation later
for key in soln_names:
zh_save[key][:, istep+1] = zh[key].copy()
uh_save[key][:, istep+1] = uh[key].copy()
return uh_save, zh_save
def time_march_multistep(zeval, Zcent, wts, zh_save, key, istep, times_online, kernel,
epsn=0.01, beta=2.5, time_disc='None'):
"""
Compute modal time steps for a chosen solution component
using a selected multistep method
Available routines:
1) Explicit midpoint or LeapFrog scheme,
2) 2nd & 3rd order Adams Bashforth
3) Explicit 3rd order Nystrom method
4) 2nd and 3rd order extrapolated BDF methods
======
Input-
zeval: Snapshot vector at time "n" (vertically stacked for all components)
Zcent: Matrix of RBF centers (vertically stacked), size = [nw_total x Nt_pod-1]
wts: Dictionary of computed weights for RBF interpolation
zh_save: Dictionary of all computed snapshots until time step "n"
istep: index of current time point
times_online: Array of normalized time points for online computation of snapshots
nw: Dictionary of number of POD modes per component
time_disc: Denotes the selected time discretization scheme
======
Output-
zh: Computed snapshot vector at time "n+1" (vertically stacked for all components)
"""
n_pod = {}
nw_total = 0
soln_names = wts.keys()
dt = times_online[istep+1] - times_online[istep]
for ky in wts.keys():
n_pod[ky] = wts[ky].shape[0]
nw_total += n_pod[ky]
if time_disc == 'LF':
dtmp = rbf.rbf_evaluate_modal(
zeval[:, np.newaxis], Zcent, wts[key], epsn, kernel)
dtmp = np.squeeze(dtmp)
if istep == 0:
zh = zh_save[key][:, istep]+dt*dtmp
else:
zh = zh_save[key][:, istep-1] + \
(times_online[istep+1]-times_online[istep-1])*dtmp
elif time_disc == 'AB2':
dtmp_1 = rbf.rbf_evaluate_modal(
zeval[:, np.newaxis], Zcent, wts[key], epsn, kernel)
dtmp_1 = np.squeeze(dtmp_1)
if istep == 0:
dtmp_2 = dtmp_1
else:
offset = 0
zeval2 = np.zeros((nw_total,), 'd')
for ky in soln_names:
zeval2[offset:offset+n_pod[ky]] = zh_save[ky][:, istep-1]
offset += n_pod[ky]
dtmp_2 = rbf.rbf_evaluate_modal(
zeval2[:, np.newaxis], Zcent, wts[key], epsn, kernel)
dtmp_2 = np.squeeze(dtmp_2)
zh = zh_save[key][:, istep] + dt*(1.5*dtmp_1 - 0.5*dtmp_2)
elif time_disc == 'AB3':
dtmp_1 = rbf.rbf_evaluate_modal(
zeval[:, np.newaxis], Zcent, wts[key], epsn, kernel)
dtmp_1 = np.squeeze(dtmp_1)
if istep <= 1:
dtmp_2 = dtmp_1
dtmp_3 = dtmp_1
else:
offset = 0
zeval2 = np.zeros((nw_total,), 'd')
zeval3 = np.zeros((nw_total,), 'd')
for ky in soln_names:
zeval2[offset:offset+n_pod[ky]] = zh_save[ky][:, istep-1]
zeval3[offset:offset+n_pod[ky]] = zh_save[ky][:, istep-2]
offset += n_pod[ky]
dtmp_2 = rbf.rbf_evaluate_modal(
zeval2[:, np.newaxis], Zcent, wts[key], epsn, kernel)
dtmp_3 = rbf.rbf_evaluate_modal(
zeval3[:, np.newaxis], Zcent, wts[key], epsn, kernel)
dtmp_2 = np.squeeze(dtmp_2)
dtmp_3 = np.squeeze(dtmp_3)
zh = zh_save[key][:, istep] + dt * \
((23./12.)*dtmp_1 - (4./3.)*dtmp_2 + (5./12.)*dtmp_3)
elif time_disc == 'NY3':
dtmp_1 = rbf.rbf_evaluate_modal(
zeval[:, np.newaxis], Zcent, wts[key], epsn, kernel)
dtmp_1 = np.squeeze(dtmp_1)
if istep <= 1:
zh = zh_save[key][:, istep]+dt*dtmp_1
else:
offset = 0
zeval2 = np.zeros((nw_total,), 'd')
zeval3 = np.zeros((nw_total,), 'd')
for ky in soln_names:
zeval2[offset:offset+n_pod[ky]] = zh_save[ky][:, istep-1]
zeval3[offset:offset+n_pod[ky]] = zh_save[ky][:, istep-2]
offset += n_pod[ky]
dtmp_2 = rbf.rbf_evaluate_modal(
zeval2[:, np.newaxis], Zcent, wts[key], epsn, kernel)
dtmp_3 = rbf.rbf_evaluate_modal(
zeval3[:, np.newaxis], Zcent, wts[key], epsn, kernel)
dtmp_2 = np.squeeze(dtmp_2)
dtmp_3 = np.squeeze(dtmp_3)
zh = zh_save[key][:, istep-1] + dt * \
(7.*dtmp_1 - 2.*dtmp_2 + dtmp_3)/3.
elif time_disc == 'BDF-EP2':
dtmp_1 = rbf.rbf_evaluate_modal(
zeval[:, np.newaxis], Zcent, wts[key], epsn, kernel)
dtmp_1 = np.squeeze(dtmp_1)
if istep == 0:
zh = zh_save[key][:, istep]+dt*dtmp_1
else:
offset = 0
zeval2 = np.zeros((nw_total,), 'd')
for ky in soln_names:
zeval2[offset:offset+n_pod[ky]] = zh_save[ky][:, istep-1]
offset += n_pod[ky]
dtmp_2 = rbf.rbf_evaluate_modal(
zeval2[:, np.newaxis], Zcent, wts[key], epsn, kernel)
dtmp_2 = np.squeeze(dtmp_2)
zh = 4. * zh_save[key][:, istep]/3. - zh_save[key][:, istep-1]/3. \
+ dt*((4./3.)*dtmp_1 - (2./3.)*dtmp_2)
elif time_disc == 'BDF-EP3':
dtmp_1 = rbf.rbf_evaluate_modal(
zeval[:, np.newaxis], Zcent, wts[key], epsn, kernel)
dtmp_1 =
|
np.squeeze(dtmp_1)
|
numpy.squeeze
|
import pickle
import scipy
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from sklearn import preprocessing
from laplotter import LossAccPlotter
ds_loc="../dataset/cifar-10-batches-py/"
plot_loc="./plots/"
show_not_save=False
def show_save(name):
if show_not_save:
plt.show()
else:
plt.savefig(plot_loc + name.replace(' ', '') + ".png")
plt.close()
class CIFAR:
def __init__(self):
self.out_size = 10
self.in_size = 32 * 32 * 3
self.loaded_batches = {}
self.load_labels()
self.label_encoder = preprocessing.LabelBinarizer()
self.label_encoder.fit([x for x in range(self.out_size)])
def load_labels(self):
with open(ds_loc + 'batches.meta', 'rb') as f:
data = pickle.load(f, encoding='bytes')
self.labels = [x.decode('ascii') for x in data[b'label_names']]
def get_batch(self, batch_name):
if not batch_name in self.loaded_batches:
with open(ds_loc + batch_name, 'rb') as f:
data = pickle.load(f, encoding='bytes')
data[b'labels'] = np.array(data[b'labels'])
self.loaded_batches[batch_name] = {
'batch_name': data[b'batch_label'],
'images': np.divide(data[b'data'], 255),
'one_hot': self.label_encoder.transform(data[b'labels']),
'labels': data[b'labels']
}
return self.loaded_batches[batch_name]
def get_batches(self, *args):
batches = [self.get_batch(name) for name in args]
return {
'batch_name': ", ".join(args),
'images': np.vstack([b['images'] for b in batches]),
'one_hot': np.vstack([b['one_hot'] for b in batches]),
'labels': np.hstack([b['labels'] for b in batches])
}
def barPlotLabels(dataset, labels, name):
n = dataset['labels'].size
y = [(dataset['labels'] == l).sum() / dataset['labels'].size for l in np.unique(dataset['labels'])]
index = np.arange(len(labels))
plt.bar(index, y)
plt.xlabel('Label', fontsize=10)
plt.ylabel('Frequency', fontsize=10)
plt.xticks(index, labels, fontsize=9, rotation=30)
plt.title('Label Distribution, n=' + str(n))
show_save(name)
def show_image(img, label='', interpolation='gaussian'):
squared_image = np.rot90(np.reshape(img, (32, 32, 3), order='F'), k=3)
plt.imshow(squared_image, interpolation=interpolation)
plt.axis('off')
plt.title(label)
def plotImages(dataset, name):
for plot_i, img_i in enumerate(np.random.choice(dataset['images'].shape[0], 10, replace=False)):
plt.subplot(2, 5, plot_i+1)
show_image(dataset['images'][img_i], cifar.labels[dataset['labels'][img_i]])
plt.suptitle('Sample images from ' + name, fontsize=20)
show_save(name)
# Time to make a network
class Net():
def __init__(self, in_size, output_size, lam=0.1):
self.in_size = in_size
self.out_size = output_size
# Guassian normal dist as sensible random initialization
# Weights
self.W = np.random.normal(loc=0.0, scale=0.01, size=(output_size, in_size))
# Bias
self.b = np.random.normal(loc=0.0, scale=0.01, size=(output_size, 1))
# Lambda term
self.lam = lam
def softmax(self, x):
try:
# this should prevent error tried for
e = np.exp(x - x.max())
return e / np.sum(e, axis=0)
except FloatingPointError:
# Gradient explosion scenario
print("jesus take the wheel")
return np.ones(x)
def evaluate(self, x):
"""
x: input of size [in_size, N]
returns:
prob: probabilities of each class [out_size, N]
pred: integer value of the most probable class: [1, N]
"""
x = np.reshape(x, (self.in_size, -1))
prob = self.softmax(np.dot(self.W, x) + self.b)
pred = np.argmax(prob, axis=0)
return prob, pred
def cost(self, prob, truth):
"""
prob: probabilities of each class [out_size, N]
(ground) truth: one hot encodings, one per image [out_size, N]
returns:
cost: cross entropy plus L2 regularization term to minimize
equivalent to (5), (6) in Assignment 1
"""
N = prob.shape[1]
prob = np.reshape(prob, (self.out_size, -1))
truth = np.reshape(truth, (self.out_size, -1))
Py = np.multiply(truth, prob).sum(axis=0)
Py[Py == 0] = np.finfo(float).eps # fix floats
return - np.log(Py).sum() / N + self.lam * np.power(self.W, 2).sum()
def accuracy(self, pred, truth):
"""
prob: probabilities of each class [out_size, N]
(ground) truth: one hot encodings, one per image [out_size, N]
returns
returns:
percentage of correct predictions given the (ground) truth
"""
N = pred.shape[0]
truth = np.argmax(truth, axis=0)
return np.sum(pred == truth) / N
def slides_gradient(self, x, prob, truth):
N = prob.shape[1]
gW = np.zeros(self.W.shape)
gB = np.zeros(self.b.shape)
for i in range(N):
p = prob[:,i]
t = truth[:,i]
# Jacobian according for formulas in Ass1
a =
|
np.outer(p,p)
|
numpy.outer
|
import numpy as np
import matplotlib.pyplot as plt
import warnings
from sklearn.datasets import load_boston
warnings.filterwarnings('ignore')
'''
__init__函数数值:
Model_name 模型名称: 可以实现基本的线性回归、岭回归和Lasso回归
lr: 学习率
max_epoch: 最大的迭代次数
threshold: 残差收敛条件,相邻两次损失函数数值的差值小于tol则模型优化结束
Lambda: 惩罚项系数
coef_: 训练的参数
fit_intercept_表示是否有bias
verbose: 是否有输出信息提示
fit
X: ndarray [epoch_size,dim] # 二维
y: ndarray [epoch_size,] # 一维
predict:
x: 一维向量或者二维向量,满足特征为维度为dim
'''
'''
注: 针对波士顿数据集党lr取1e-5以及更大的值后预测结果可能会爆炸,
可能要考虑数据normalize,这里用的是梯度下降法,原sklearn用的是坐标下降,会有差异
'''
class LinearRegression:
def __init__(self,Model_name='Ridge',lr=1e-6,
max_epoch=8000,
threshold=1e-5,
Lambda=0.001,
fit_intercept_=True,
verbose=False):
self.name=Model_name
self.lr=lr
self.epoch=max_epoch
self.threshhold=threshold
self.Lambda=Lambda
self.verbose=verbose
self.fit_intercept_=fit_intercept_
self.w=None
def fit(self,X,y):
size=X.shape[0] # 样本个数
if self.fit_intercept_:
X=np.concatenate((
|
np.ones((size,1))
|
numpy.ones
|
# <NAME> and <NAME>
# January 18, 2021
# v2.08
import numpy as np
import numpy.ma as ma
import random
import xarray as xr
import scipy.stats as stats
import random
import sys
import h5py
from sklearn import preprocessing
import matplotlib.pyplot as plt
import scipy.io as sio
#----------------------------------------------------------------
#----------------------------------------------------------------
def is_tranquil(N,inoise):
tr = np.ones((N,))
i = np.intersect1d(np.arange(0,N),inoise)
tr[i] = 0
return tr
#----------------------------------------------------------------
def make_categories(y,n,y_perc=np.nan):
p_vec = np.linspace(0,1,n+1)
ycat = np.empty(np.shape(y))*np.nan
y_p = np.empty((len(p_vec)-1,2))*np.nan
if(y_perc is not np.nan):
assert len(p_vec)==len(y_perc)+1
for i in np.arange(0,n):
if(y_perc is np.nan):
low = np.percentile(y,p_vec[i]*100)
high = np.percentile(y,p_vec[i+1]*100)
else:
low,high = y_perc[i]
y_p[i,:] = low, high
k = np.where(np.logical_and(y>=low, y<=high))[0]
ycat[k] = i
return ycat, y_p
#----------------------------------------------------------------
def undersample(x,y,tr):
cats = np.unique(y)
mincount = len(y)
for c in cats:
num = np.count_nonzero(y==c)
if(num < mincount):
mincount = num
# print results
for c in cats:
print('number originally in each class '
+ str(c)
+ ' = '
+ str(np.count_nonzero(y==c)))
yout = y[:1]
xout = x[:1]
trout = tr[:1]
for c in cats:
i = np.where(y==c)[0]
i = i[:mincount]
xout = np.append(xout,x[i],axis=0)
yout = np.append(yout,y[i],axis=0)
trout = np.append(trout,tr[i],axis=0)
yout = np.asarray(yout[1:])
xout = np.asarray(xout[1:])
trout = np.asarray(trout[1:])
# print results
for c in cats:
print('number in undersampled class '
+ str(c)
+ ' = '
+ str(np.count_nonzero(yout==c)))
return xout, yout, trout
#----------------------------------------------------------------
def split_data(X, y_cat, tranquil, corrupt):
NSAMPLES = np.shape(X)[0]
n_test = 5000
n_val = 5000
n_train = NSAMPLES - n_val - n_test
# make training data
X_train = X[:n_train,:,:]
y_train_cat = y_cat[:n_train]
tr_train = tranquil[:n_train]
corrupt_train = corrupt[:n_train]
print('----Training----')
print(np.shape(X_train))
print(np.shape(y_train_cat))
print(np.shape(tr_train))
# make validation data
X_val = X[n_train:n_train+n_val,:,:]
y_val_cat = y_cat[n_train:n_train+n_val]
tr_val = tranquil[n_train:n_train+n_val]
corrupt_val = corrupt[n_train:n_train+n_val]
print('----Validation----')
print(np.shape(X_val))
print(
|
np.shape(y_val_cat)
|
numpy.shape
|
# -*- coding: utf-8 -*-
import numpy as np
import pytest
import random
from unittest.mock import Mock
from endochrone.clustering import KMeans
from endochrone.utils import lazy_test_runner as ltr
__author__ = "nickwood"
__copyright__ = "nickwood"
__license__ = "mit"
def test_given_initials():
X = np.transpose([[1, 2, 3, 7, 8, 9, 10], [3, 4, 5, 1, 2, 3, 4]])
init_centroids = np.array([[1, 3], [7, 1]])
km_test = KMeans(k=2)
km_test.forgy_centroids_ = Mock(wraps=km_test.forgy_centroids_)
km_test.fit(features=X, initial_centroids=init_centroids)
assert not km_test.forgy_centroids_.called
act = km_test.centroids
exp = np.array([[2, 4], [8.5, 2.5]])
assert np.all(act == pytest.approx(exp))
def test_calculate_step():
X = np.transpose([[1, 2, 3, 7, 8, 9], [3, 4, 5, 1, 2, 3]])
km_test = KMeans(k=2)
km_test.centroids = np.array([[1, 7], [9, 3]])
act = km_test.calculate_step(features=X)
exp = np.array([[2, 4], [8, 2]])
assert np.all(act == pytest.approx(exp))
def test_forgy_initialisation():
X = np.transpose([random.sample(range(200), 20),
random.sample(range(200), 20)])
km_test = KMeans()
km_test.forgy_centroids_ = Mock(wraps=km_test.forgy_centroids_)
# check defaults
assert km_test.n_centroids_ == 3
km_test.fit(features=X)
km_test.forgy_centroids_.assert_called()
centroids = km_test.forgy_centroids_(features=X)
assert centroids.shape == (3, 2)
assert np.unique(centroids, axis=0).shape == (3, 2)
for i in range(3):
assert centroids[i] in X
# tests with k = 5, 12
for N in [5, 12]:
km_test2 = KMeans(k=N)
km_test2.forgy_centroids_ = Mock(wraps=km_test2.forgy_centroids_)
assert km_test2.n_centroids_ == N
km_test2.fit(features=X)
km_test2.forgy_centroids_.assert_called()
centroids = km_test2.forgy_centroids_(features=X)
assert centroids.shape == (N, 2)
assert
|
np.unique(centroids, axis=0)
|
numpy.unique
|
'''
script for testing the output of fiber assign
'''
import os
import glob
import numpy as np
# ---
import fitsio
from astropy.io import fits
# -- desitarget --
from desitarget.sv1.sv1_targetmask import bgs_mask
# -- plotting --
import matplotlib as mpl
import matplotlib.pyplot as plt
if 'NERSC_HOST' not in os.environ:
mpl.rcParams['text.usetex'] = True
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['axes.linewidth'] = 1.5
mpl.rcParams['axes.xmargin'] = 1
mpl.rcParams['xtick.labelsize'] = 'x-large'
mpl.rcParams['xtick.major.size'] = 5
mpl.rcParams['xtick.major.width'] = 1.5
mpl.rcParams['ytick.labelsize'] = 'x-large'
mpl.rcParams['ytick.major.size'] = 5
mpl.rcParams['ytick.major.width'] = 1.5
mpl.rcParams['legend.frameon'] = False
dir_dat = '/global/cscratch1/sd/chahah/feasibgs/survey_validation'
def new_SVfields(seed=1):
''' compare SV fields from Sep 2019 to newly proposed SV regions from
Anand and Christophe Dec 2019.
radius of tile is 1.6275 deg
'''
import matplotlib.patches as patches
np.random.seed(seed)
sv_old = fitsio.read(os.path.join(dir_dat, 'BGS_SV_30_3x_superset60_Sep2019.fits'))
sv_new = sv_old.copy()
svdict = {}
svdict['01_s82'] = '30,40,-7,2'
svdict['02_egs'] = '210,220,50,55'
svdict['03_gama12'] = '175,185,-3,2'
svdict['04_gama15'] = '212,222,-2,3'
svdict['05_overlap'] = '135,160,30,35'
svdict['06_refnorth'] = '215,230,41,46'
svdict['07_ages'] = '215,220,30,40'
svdict['08_sagittarius'] = '200,210,5,10'
svdict['09_highebv_n'] = '140,150,65,70'
svdict['10_highebv_s'] = '240,245,20,25'
svdict['11_highstardens_n'] = '273,283,40,45'
svdict['12_highstardens_s'] = '260,270,15,20'
# regions that were omitted
omitted = {}
omitted['g09'] = '129,141,-1,3'
omitted['PRIMUS-COSMOS'] = '149.6,150.7,1.8,2.9'
omitted['PRIMUS-CDFS-SWIRE'] = '51.8,54.4,-29.7,-28.0'
# formulate new SV fields
# keep fields that already fall into regions
keep = np.zeros(len(sv_old['RA'])).astype(bool)
for reg in svdict.keys():
# get RA, Dec range of new SV regions
ra_min, ra_max = float(svdict[reg].split(',')[0]), float(svdict[reg].split(',')[1])
dec_min, dec_max = float(svdict[reg].split(',')[2]), float(svdict[reg].split(',')[3])
inkeep = (
(sv_old['RA'] > ra_min) & (sv_old['RA'] < ra_max) &
(sv_old['DEC'] > dec_min) & (sv_old['DEC'] < dec_max)
)
keep = keep | inkeep
print('keeping %i previous fields in new region' % np.sum(keep))
for reg in omitted.keys():
ra_min, ra_max = float(omitted[reg].split(',')[0]), float(omitted[reg].split(',')[1])
dec_min, dec_max = float(omitted[reg].split(',')[2]), float(omitted[reg].split(',')[3])
inkeep = (
(sv_old['RA'] > ra_min) &
(sv_old['RA'] < ra_max) &
(sv_old['DEC'] > dec_min) &
(sv_old['DEC'] < dec_max)
)
keep = keep | inkeep
print('keeping %i previous fields including omitted regions' % np.sum(keep))
# put SV tiles on high EBV regions and high star density
ra_new, dec_new = [], []
for reg in ['01_s82', '02_egs', '05_overlap', '06_refnorth', '08_sagittarius', '09_highebv_n', '10_highebv_s', '11_highstardens_n', '12_highstardens_s']:
ra_min, ra_max = float(svdict[reg].split(',')[0]), float(svdict[reg].split(',')[1])
dec_min, dec_max = float(svdict[reg].split(',')[2]), float(svdict[reg].split(',')[3])
if reg == '10_highebv_s':
ra_new.append([0.5*(ra_min + ra_max)])
dec_new.append([0.5*(dec_min + dec_max)])
elif reg == '06_refnorth':
ra_new.append([0.5*(ra_min + ra_max)-4.89, 0.5*(ra_min + ra_max)-1.63, 0.5*(ra_min + ra_max)+1.63, 0.5*(ra_min + ra_max)+4.89])
dec_new.append([0.5*(dec_min + dec_max), 0.5*(dec_min + dec_max), 0.5*(dec_min + dec_max), 0.5*(dec_min + dec_max)])
elif reg == '05_overlap':
ra_new.append([0.5*(ra_min + ra_max)-6.52, 0.5*(ra_min + ra_max)-3.26, 0.5*(ra_min + ra_max), 0.5*(ra_min + ra_max)+3.26, 0.5*(ra_min + ra_max)+6.52])
dec_new.append([0.5*(dec_min + dec_max), 0.5*(dec_min + dec_max), 0.5*(dec_min + dec_max), 0.5*(dec_min + dec_max), 0.5*(dec_min + dec_max)])
elif reg == '01_s82':
ra_new.append([0.5*(ra_min + ra_max)-1.63, 0.5*(ra_min + ra_max)+1.63])
dec_new.append([0.25*dec_min + 0.75*dec_max, 0.25*dec_min + 0.75*dec_max])
else:
ra_new.append([0.5*(ra_min + ra_max)-1.63, 0.5*(ra_min + ra_max)+1.63])
dec_new.append([0.5*(dec_min + dec_max), 0.5*(dec_min + dec_max)])
ra_new = np.concatenate(ra_new)
dec_new = np.concatenate(dec_new)
i_new = len(ra_new)
print('%i new SV fields in new regions' % i_new)
for i in range(i_new):
sv_new['RA'][np.arange(len(sv_old['RA']))[~keep][i]] = ra_new[i]
sv_new['DEC'][np.arange(len(sv_old['RA']))[~keep][i]] = dec_new[i]
ra_new = np.concatenate([sv_old['RA'][keep], ra_new])
dec_new = np.concatenate([sv_old['DEC'][keep], dec_new])
# pick the rest of the tiles from desi-tiles.fits far from the selected tiles
desi_tiles = fitsio.read(os.path.join(dir_dat, 'desi-tiles.fits'))
keeptile = np.ones(desi_tiles.shape[0]).astype(bool)
for ra, dec in zip(ra_new, dec_new):
_keep = (
(desi_tiles['IN_DESI'] == 1) &
(desi_tiles['PASS'] == 5) &
((desi_tiles['RA'] - ra)**2 + (desi_tiles['DEC'] - dec)**2 > 100.)
)
keeptile = keeptile & _keep
for i in range(60 - i_new - np.sum(keep)):
new_pick = np.random.choice(np.arange(np.sum(keeptile)), 1, replace=False)
_ra, _dec = desi_tiles['RA'][keeptile][new_pick], desi_tiles['DEC'][keeptile][new_pick]
ra_new = np.concatenate([ra_new, _ra])
dec_new = np.concatenate([dec_new, _dec])
for k in sv_new.dtype.names:
if k in desi_tiles.dtype.names:
sv_new[k][np.arange(len(sv_old['RA']))[~keep][i_new+i]] = desi_tiles[k][keeptile][new_pick][0]
keeptile = keeptile & ((desi_tiles['RA'] - _ra)**2 + (desi_tiles['DEC'] - _dec)**2 > 100.)
print('%i random new fields' % (60 - i_new - np.sum(keep)))
# write out the tile centers
np.savetxt('updated_BGSSV_tile_centers.seed%i.dat' % seed, np.vstack([ra_new, dec_new]).T, fmt='%.2f %.2f')
# write out
fitsio.write(os.path.join(dir_dat, 'BGS_SV_30_3x_superset60_Jan2020.fits'), sv_new, clobber=True)
# --- plot everything ---
fig = plt.figure(figsize=(20,10))
sub = fig.add_subplot(111)
for i, reg in enumerate(svdict.keys()):
# get RA, Dec range of new SV regions
ra_min, ra_max = float(svdict[reg].split(',')[0]), float(svdict[reg].split(',')[1])
dec_min, dec_max = float(svdict[reg].split(',')[2]), float(svdict[reg].split(',')[3])
name = reg.split('_')[1]
region = patches.Rectangle((ra_min, dec_min), (ra_max - ra_min), (dec_max - dec_min),
linewidth=1, edgecolor='C%i' % i, facecolor='none', label=name)
sub.add_patch(region)
for i, reg in enumerate(omitted.keys()):
ra_min, ra_max = float(omitted[reg].split(',')[0]), float(omitted[reg].split(',')[1])
dec_min, dec_max = float(omitted[reg].split(',')[2]), float(omitted[reg].split(',')[3])
if i == 0:
region = patches.Rectangle((ra_min, dec_min), (ra_max - ra_min), (dec_max - dec_min),
linewidth=1, edgecolor='r', facecolor='none', linestyle='--', label='omitted')
else:
region = patches.Rectangle((ra_min, dec_min), (ra_max - ra_min), (dec_max - dec_min),
linewidth=1, edgecolor='r', facecolor='none', linestyle='--')
sub.add_patch(region)
if reg != 'g09':
sub.annotate(reg, xy=(0.5*(ra_min+ra_max), 0.5*(dec_min+dec_max)), xycoords='data',
xytext=(ra_max-50., dec_max+20.), textcoords='data',
arrowprops=dict(facecolor='black', width=0.1, headwidth=3),
horizontalalignment='right', verticalalignment='top')
else:
sub.annotate(reg, xy=(0.5*(ra_min+ra_max), 0.5*(dec_min+dec_max)), xycoords='data',
xytext=(ra_max-30., dec_max+10.), textcoords='data',
arrowprops=dict(facecolor='black', width=0.1, headwidth=3),
horizontalalignment='right', verticalalignment='top')
for i in range(len(sv_old['RA'])):
circ = patches.Circle((sv_old['RA'][i], sv_old['DEC'][i]),
radius=1.6275, edgecolor='k', facecolor='none', linestyle=':')
sub.add_patch(circ)
#sub.scatter(sv_old['RA'], sv_old['DEC'], c='k', s=20, label='BGS SV')
for i in range(len(sv_new['RA'])):
circ = patches.Circle((sv_new['RA'][i], sv_new['DEC'][i]), radius=1.6275, edgecolor='C1', facecolor='none')
sub.add_patch(circ)
sub.legend(loc='upper right', ncol=2, markerscale=2, handletextpad=0.2, fontsize=10)
sub.set_xlabel('RA', fontsize=20)
sub.set_xlim(360, 0)
sub.set_ylabel('Dec', fontsize=20)
sub.set_ylim(-40, 90)
fig.savefig('new_SVregion.png', bbox_inches='tight')
return None
def testFA_dr9sv():
''' test the fiberassign output of DR9SV
'''
# all the fiberassign output files
f_fbas = glob.glob(os.path.join(dir_dat, 'fba_dr9sv.spec_truth.Mar2020',
'fiberassign*.fits'))
n_zero = 0
n_nosky = 0
__n_bgs_bright, __n_bgs_faint, __n_bgs_extfaint, __n_bgs_fibmag, __n_bgs_lowq = [], [], [], [], []
for i, f in enumerate(f_fbas):
# read in tile
tile_i = fitsio.read(f)
if i == 0:
tile = tile_i
else:
tile = np.concatenate([tile, tile_i])
_n_bgs, _n_bgs_bright, _n_bgs_faint, _n_bgs_extfaint, _n_bgs_fibmag, _n_bgs_lowq = \
bgs_targetclass(tile_i['SV1_BGS_TARGET'])
_n_sky = np.sum(tile_i['OBJTYPE'] == 'SKY')
__n_bgs_bright.append(_n_bgs_bright/_n_bgs)
__n_bgs_faint.append(_n_bgs_faint/_n_bgs)
__n_bgs_extfaint.append(_n_bgs_extfaint/_n_bgs)
__n_bgs_fibmag.append(_n_bgs_fibmag/_n_bgs)
__n_bgs_lowq.append(_n_bgs_lowq/_n_bgs)
print('---------------------------------')
print('tiles: %s' % os.path.basename(f))
print('total n_bgs = %i' % _n_bgs)
print(' nobj frac (expected frac)')
print(' ------------------------------------')
print(' BGS Bright %i %.3f (0.45)' % (_n_bgs_bright, _n_bgs_bright/_n_bgs))
print(' BGS Faint %i %.3f (0.25)' % (_n_bgs_faint, _n_bgs_faint/_n_bgs))
print(' BGS Ext.Faint %i %.3f (0.125)' % (_n_bgs_extfaint, _n_bgs_extfaint/_n_bgs))
print(' BGS Fib.Mag %i %.3f (0.125)' % (_n_bgs_fibmag, _n_bgs_fibmag/_n_bgs))
print(' BGS Low Q. %i %.3f (0.05)' % (_n_bgs_lowq, _n_bgs_lowq/_n_bgs))
print(' SKY %i' % (_n_sky))
# tiles with no sky targets
if _n_sky == 0: n_nosky += 1
# tiles with no BGS targets
if _n_bgs == 0: n_zero += 1
print('---------------------------------')
print('%i tiles with zero BGS targets' % n_zero)
print('%i tiles with zero SKY targets' % n_nosky)
n_bgs, n_bgs_bright, n_bgs_faint, n_bgs_extfaint, n_bgs_fibmag, n_bgs_lowq = \
bgs_targetclass(tile['SV1_BGS_TARGET'])
print('---------------------------------')
print('total n_bgs = %i' % n_bgs)
print(' nobj frac (expected frac)')
print(' ------------------------------------')
print(' BGS Bright %i %.3f, %.3f-%.3f (0.45)' % (n_bgs_bright, n_bgs_bright/n_bgs, np.min(__n_bgs_bright), np.max(__n_bgs_bright)))
print(' BGS Faint %i %.3f, %.3f-%.3f (0.25)' % (n_bgs_faint, n_bgs_faint/n_bgs, np.min(__n_bgs_faint), np.max(__n_bgs_faint)))
print(' BGS Ext.Faint %i %.3f, %.3f-%.3f (0.125)' % (n_bgs_extfaint, n_bgs_extfaint/n_bgs, np.min(__n_bgs_extfaint), np.max(__n_bgs_extfaint)))
print(' BGS Fib.Mag %i %.3f, %.3f-%.3f (0.125)' % (n_bgs_fibmag, n_bgs_fibmag/n_bgs, np.min(__n_bgs_fibmag), np.max(__n_bgs_fibmag)))
print(' BGS Low Q. %i %.3f, %.3f-%.3f (0.05)' % (n_bgs_lowq, n_bgs_lowq/n_bgs, np.min(__n_bgs_lowq), np.max(__n_bgs_lowq)))
#fig = plt.figure(figsize=(10,5))
#sub = fig.add_subplot(111)
#sub.scatter(tile['TARGET_RA'], tile['TARGET_DEC'], c='k', s=2)
#sub.scatter(tile['TARGET_RA'][_flags], tile['TARGET_DEC'][_flags], c='C1', s=2)
##sub.legend(loc='upper right', handletextpad=0.2, markerscale=5, fontsize=15)
#sub.set_xlabel('RA', fontsize=20)
#sub.set_xlim(360., 0.)#tile['TARGET_RA'].min(), tile['TARGET_RA'].max())
#sub.set_ylabel('Dec', fontsize=20)
##sub.set_ylim(22., 26)
#sub.set_ylim(-30., 80.)#tile['TARGET_DEC'].min(), tile['TARGET_DEC'].max())
#fig.savefig(os.path.join(dir_dat, 'fba_dr9sv.spec_truth.Mar2020', 'fiberassign_outliers.png'), bbox_inches='tight')
return None
def _dr9sv_nosky():
''' examine why some tiles have no sky fibers
'''
# all the fiberassign output files
f_fbas = glob.glob(os.path.join(dir_dat, 'fba_dr9sv.spec_truth.Mar2020',
'fiberassign*.fits'))
# all the sky targets
f_skies = [
"/global/cfs/cdirs/desi/target/catalogs/dr8/0.37.0/skies/skies-dr8-hp-0.fits",
"/global/cfs/cdirs/desi/target/catalogs/dr8/0.37.0/skies/skies-dr8-hp-15.fits",
"/global/cfs/cdirs/desi/target/catalogs/dr8/0.37.0/skies/skies-dr8-hp-19.fits",
"/global/cfs/cdirs/desi/target/catalogs/dr8/0.37.0/skies/skies-dr8-hp-47.fits",
"/global/cfs/cdirs/desi/target/catalogs/dr9sv/0.37.0/skies/skies-dr9-hp-4.fits",
"/global/cfs/cdirs/desi/target/catalogs/dr9sv/0.37.0/skies/skies-dr9-hp-5.fits",
"/global/cfs/cdirs/desi/target/catalogs/dr9sv/0.37.0/skies/skies-dr9-hp-7.fits",
"/global/cfs/cdirs/desi/target/catalogs/dr9sv/0.37.0/skies/skies-dr9-hp-8.fits",
"/global/cfs/cdirs/desi/target/catalogs/dr9sv/0.37.0/skies/skies-dr9-hp-9.fits",
"/global/cfs/cdirs/desi/target/catalogs/dr9sv/0.37.0/skies/skies-dr9-hp-10.fits",
"/global/cfs/cdirs/desi/target/catalogs/dr9sv/0.37.0/skies/skies-dr9-hp-11.fits",
"/global/cfs/cdirs/desi/target/catalogs/dr9sv/0.37.0/skies/skies-dr9-hp-14.fits",
"/global/cfs/cdirs/desi/target/catalogs/dr9sv/0.37.0/skies/skies-dr9-hp-17.fits",
"/global/cfs/cdirs/desi/target/catalogs/dr9sv/0.37.0/skies/skies-dr9-hp-21.fits",
"/global/cfs/cdirs/desi/target/catalogs/dr9sv/0.37.0/skies/skies-dr9-hp-24.fits",
"/global/cfs/cdirs/desi/target/catalogs/dr9sv/0.37.0/skies/skies-dr9-hp-25.fits",
"/global/cfs/cdirs/desi/target/catalogs/dr9sv/0.37.0/skies/skies-dr9-hp-26.fits",
"/global/cfs/cdirs/desi/target/catalogs/dr9sv/0.37.0/skies/skies-dr9-hp-27.fits",
"/global/cfs/cdirs/desi/target/catalogs/dr9sv/0.37.0/skies/skies-dr9-hp-31.fits",
"/global/cfs/cdirs/desi/target/catalogs/dr9sv/0.37.0/skies/skies-dr9-hp-35.fits",
"/global/cfs/cdirs/desi/target/catalogs/dr9sv/0.37.0/skies/skies-dr9-hp-39.fits",
"/global/cfs/cdirs/desi/target/catalogs/dr9sv/0.37.0/skies/skies-dr9-hp-43.fits"]
fig = plt.figure(figsize=(10,5))
sub = fig.add_subplot(111)
for f in f_skies:
sky = fitsio.read(f)
_plt_sky = sub.scatter(sky['RA'][::100], sky['DEC'][::100], c='k', s=1)
dr8sky = fitsio.read(os.path.join(dir_dat, 'mtl', 'dr8_skies_cutout.fits'))
_plt_sky2 = sub.scatter(dr8sky['RA'][::100], dr8sky['DEC'][::100], c='C1', s=1)
for f in f_fbas:
# read in tile
tile_i = fitsio.read(f)
_n_sky = np.sum(tile_i['OBJTYPE'] == 'SKY')
if _n_sky == 0:
_plt_nosky = sub.scatter(tile_i['TARGET_RA'][::10], tile_i['TARGET_DEC'][::10],
c='r', s=1, label='No Sky Fibers')
else:
_plt_bgs = sub.scatter(tile_i['TARGET_RA'][::10], tile_i['TARGET_DEC'][::10],
c='C0', s=1, label='BGS SV tile')
sub.legend([_plt_sky, _plt_sky2, _plt_nosky, _plt_bgs],
['Sky Targets', 'DR8 cut out', 'No Sky Fibers', 'BGS SV tiles'],
loc='upper right', handletextpad=0.2, markerscale=5, fontsize=15)
sub.set_xlabel('RA', fontsize=20)
sub.set_xlim(360., 0.)#tile['TARGET_RA'].min(), tile['TARGET_RA'].max())
sub.set_ylabel('Dec', fontsize=20)
sub.set_ylim(-40., 90.)#tile['TARGET_DEC'].min(), tile['TARGET_DEC'].max())
fig.savefig('fba_dr9sv_nosky.png', bbox_inches='tight')
return None
def testFA_singletile(ftile):
''' examine the different target classes for a single tile
'''
# read in tile
tile = fits.open(ftile)[1].data
# bgs bitmasks
bitmask_bgs = tile['SV1_BGS_TARGET']
bgs_bright = (bitmask_bgs & bgs_mask.mask('BGS_BRIGHT')).astype(bool)
bgs_faint = (bitmask_bgs & bgs_mask.mask('BGS_FAINT')).astype(bool)
bgs_extfaint = (bitmask_bgs & bgs_mask.mask('BGS_FAINT_EXT')).astype(bool) # extended faint
bgs_fibmag = (bitmask_bgs & bgs_mask.mask('BGS_FIBMAG')).astype(bool) # fiber magnitude limited
bgs_lowq = (bitmask_bgs & bgs_mask.mask('BGS_LOWQ')).astype(bool) # low quality
#2203 Bright
#787 Faint
#597 Ext.Faint
#218 Fib.Mag.
#67 Low Q.
n_bgs, n_bgs_bright, n_bgs_faint, n_bgs_extfaint, n_bgs_fibmag, n_bgs_lowq = \
bgs_targetclass(tile['SV1_BGS_TARGET'])
print('---------------------------------')
print('total n_bgs = %i' % n_bgs)
print('%i' % (n_bgs_bright + n_bgs_faint + n_bgs_extfaint + n_bgs_fibmag + n_bgs_lowq))
print('BGS Bright %i %.3f' % (n_bgs_bright, n_bgs_bright/n_bgs))
print('BGS Faint %i %.3f' % (n_bgs_faint, n_bgs_faint/n_bgs))
print('BGS Ext.Faint %i %.3f' % (n_bgs_extfaint, n_bgs_extfaint/n_bgs))
print('BGS Fib.Mag %i %.3f' % (n_bgs_fibmag, n_bgs_fibmag/n_bgs))
print('BGS Low Q. %i %.3f' % (n_bgs_lowq, n_bgs_lowq/n_bgs))
fig = plt.figure(figsize=(10,10))
sub = fig.add_subplot(111)
sub.scatter(tile['TARGET_RA'], tile['TARGET_DEC'], c='k', s=1)
# BGS BRIGHT
sub.scatter(tile['TARGET_RA'][bgs_bright], tile['TARGET_DEC'][bgs_bright], c='C0', s=3,
label='Bright %i (%.2f)' % (np.sum(bgs_bright), np.float(np.sum(bgs_bright))/n_bgs))
# BGS FAINT
sub.scatter(tile['TARGET_RA'][bgs_faint], tile['TARGET_DEC'][bgs_faint], c='C1', s=3,
label='Faint %i (%.2f)' % (np.sum(bgs_faint), np.float(np.sum(bgs_faint))/n_bgs))
# BGS EXTFAINT
sub.scatter(tile['TARGET_RA'][bgs_extfaint], tile['TARGET_DEC'][bgs_extfaint], c='C4', s=5,
label='Ext.Faint %i (%.2f)' % (
|
np.sum(bgs_extfaint)
|
numpy.sum
|
#BSD 3-Clause License
#=======
#
#Copyright (c) 2017, Xilinx Inc.
#All rights reserved.
#
#Based <NAME>'s MNIST example code
#Copyright (c) 2015-2016, <NAME>
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
#EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
#DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
#(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
#ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
#SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import sys
import os
import time
from argparse import ArgumentParser
import numpy as np
|
np.random.seed(1234)
|
numpy.random.seed
|
from __future__ import absolute_import
import xarray as xr
import h5py
import numpy as np
import pandas as pd
import datetime
import scipy
import scipy.interpolate
import os
#turn off warnings so i can use the progressbar
import warnings
warnings.filterwarnings('ignore')
class GPMDPR():
"""
Author: <NAME>. This class is intended to help with the efficient processing of GPM-DPR radar files.
Currently, xarray cannot read NASA's HDF files directly (2A.GPM.DPR*). So here is an attempt to do so.
Once in xarray format, the effcient search functions can be used.
**NOTE 1: Currently, I do not have this function pass all variables through (there is quite the list of them.
Maybe in the future I will generalize it to do so. But right now its a bit tedious to code up all the units and such
**NOTE 2: Outerswath code not ready yet. Do not turn the flag on
Feel free to reach out to me on twitter (@dopplerchase) or email <EMAIL>
For your reference, please check out the ATBD: https://pps.gsfc.nasa.gov/GPMprelimdocs.html
"""
def __init__(self,filename=[],boundingbox=None,outer_swath=False,auto_run=True):
""" Initializes things.
filename: str, path to GPM-DPR file
boundingbox: list of floats, if you would like to cut the gpm to a lat lon box
send in a list of [lon_min,lon_mat,lat_min,lat_max]
"""
self.filename = filename
self.xrds = None
self.datestr=None
self.height= None
self.corners = boundingbox
self.retrieval_flag = 0
self.interp_flag = 0
self.outer_swath = outer_swath
#determine if you have to use the file variable name changes
if (filename.find('X') >= 0):
self.legacy = False
self.v07 = False
elif (filename.find('V9') >= 0):
self.legacy = False
self.v07 = True
else:
self.legacy = True
if auto_run:
#this reads the hdf5 file
self.read()
#this calculates the range height for the 2D cross-sections
self.calc_heights()
#this will convert the hdf to an xarray dataset
self.toxr()
def read(self):
"""
This method simply reads the HDF file and gives it to the class.
"""
self.hdf = h5py.File(self.filename,'r')
if self.legacy:
###set some global parameters
#whats the common shape of the DPR files
if self.outer_swath:
shape = self.hdf['NS']['PRE']['zFactorMeasured'][:,:,:].shape
self.along_track = np.arange(0,shape[0])
self.cross_track = np.arange(0,shape[1])
self.range = np.arange(0,shape[2])
else:
shape = self.hdf['NS']['PRE']['zFactorMeasured'][:,12:37,:].shape
self.along_track = np.arange(0,shape[0])
self.cross_track = np.arange(0,shape[1])
self.range = np.arange(0,shape[2])
else:
shape = self.hdf['FS']['PRE']['zFactorMeasured'][:,:,:].shape
self.along_track = np.arange(0,shape[0])
self.cross_track = np.arange(0,shape[1])
self.range = np.arange(0,shape[2])
def calc_heights(self):
""" Here we calculate the atitude above mean sea level. Surprisingly this was not
provided in version 6, but is included in the new version. Please not there is a
difference between this method and the supplied heights in the new version. It
seems to be less than 200 m error. Just keep that in mind!"""
x2 = 2. * 17 #total degrees is 48 (from -17 to +17)
re = 6378. #radius of the earth km
theta = -1 *(x2/2.) + (x2/48.)*np.arange(0,49) #break the -17 to 17 into equal degrees
theta2 = np.zeros(theta.shape[0]+1)
theta = theta - 0.70833333/2. #shift thing to get left edge for pcolors
theta2[:-1] = theta
theta2[-1] = theta[-1] + 0.70833333
theta = theta2 * (np.pi/180.) #convert to radians
prh = np.zeros([49,176]) #set up matrix
for i in np.arange(0,176): #loop over num range gates
for j in np.arange(0,49): #loop over scans
a = np.arcsin(((re+407)/re)*np.sin(theta[j]))-theta[j] #407 km is the orbit height, re radius of earth,
prh[j,i] = (176-(i))*0.125*np.cos(theta[j]+a) #more geometry
da = xr.DataArray(prh[:,:], dims=['cross_track','range'])
da.to_netcdf('./HEIGHTS_full.nc')
da = xr.DataArray(prh[12:37,:], dims=['cross_track','range'])
da.to_netcdf('./HEIGHTS.nc')
def toxr(self,ptype=None,clutter=False,echotop=False,precipflag=10):
"""
This is the main method of the package. It directly creates the xarray dataset from the HDF file.
To save computational time, it does first check to see if you set a box of interest.
Then it uses xarray effcient searching to make sure there are some profiles in that box.
"""
#set the precip type of interest. If none, give back all data...
self.ptype= ptype
self.snow = False
self.precip = False
if (self.ptype=='precip') or (self.ptype=='Precip') or \
(self.ptype=='PRECIP') or (self.ptype=='snow') or \
(self.ptype=='Snow') or (self.ptype=='SNOW'):
self.precip=True
if (self.ptype=='snow') or (self.ptype=='Snow') or (self.ptype=='SNOW'):
self.snow=True
#set the killflag to false. If this is True at the end, it means no points in the box were found.
self.killflag = False
#first thing first, check to make sure there are points in the bounding box.
#cut points to make sure there are points in your box.This should save you time.
if self.corners is not None:
#load data out of hdf
if self.outer_swath:
if self.legacy:
lons = self.hdf['NS']['Longitude'][:,:]
lats = self.hdf['NS']['Latitude'][:,:]
else:
lons = self.hdf['FS']['Longitude'][:,:]
lats = self.hdf['FS']['Latitude'][:,:]
else:
lons = self.hdf['NS']['Longitude'][:,12:37]
lats = self.hdf['NS']['Latitude'][:,12:37]
#shove it into a dataarray
da = xr.DataArray(np.zeros(lons.shape), dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats)})
#cut the the edges of the box
da = da.where((da.lons >= self.corners[0]) & \
(da.lons <= self.corners[1]) & \
(da.lats >= self.corners[2]) & \
(da.lats <= self.corners[3]),drop=False)
#okay, now drop nans
da = da.dropna(dim='along_track',how='all')
#if there are no profiles, the len is 0, and we will set the kill flag
if da.along_track.shape[0]==0:
self.killflag = True
#if there were no points it will not waste time with processing or io stuff
if self.killflag:
pass
else:
if self.datestr is None:
self.parse_dtime()
if self.height is None:
if self.legacy:
if self.outer_swath:
height = xr.open_dataarray('./HEIGHTS_full.nc')
height = height.values[np.newaxis,:,:]
height = np.tile(height,(self.hdf['NS']['Longitude'].shape[0],1,1))
self.height = height
else:
height = xr.open_dataarray('./HEIGHTS.nc')
height = height.values[np.newaxis,:,:]
height = np.tile(height,(self.hdf['NS']['Longitude'].shape[0],1,1))
self.height = height
else:
height = xr.open_dataarray('./HEIGHTS_full.nc')
height = height.values[np.newaxis,:,:]
height = np.tile(height,(self.hdf['FS']['Longitude'].shape[0],1,1))
self.height = height
if self.corners is None:
if self.legacy:
if self.outer_swath:
lons = self.hdf['NS']['Longitude'][:,:]
lats = self.hdf['NS']['Latitude'][:,:]
else:
lons = self.hdf['NS']['Longitude'][:,12:37]
lats = self.hdf['NS']['Latitude'][:,12:37]
else:
lons = self.hdf['FS']['Longitude'][:,:]
lats = self.hdf['FS']['Latitude'][:,:]
if self.legacy:
if self.outer_swath:
#need to fill the outerswath with nans
flagSurfaceSnowfall = np.ones([len(self.along_track),len(self.cross_track)],dtype=int)*255
flagSurfaceSnowfall[:,12:37] = self.hdf['MS']['Experimental']['flagSurfaceSnowfall'][:,:]
da = xr.DataArray(flagSurfaceSnowfall,
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=255)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'experimental flag to diagnose snow at surface'
#make xr dataset
self.xrds = da.to_dataset(name = 'flagSurfaceSnow')
#
#ADD BBtop and Bottom
da = xr.DataArray(self.hdf['NS']['CSF']['binBBTop'][:,:],
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'ind of BBtop'
self.xrds['binBBTop'] = da
da = xr.DataArray(self.hdf['NS']['CSF']['binBBBottom'][:,:],
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'ind of BBtop'
self.xrds['binBBBottom'] = da
flagPrecip = np.ones([len(self.along_track),len(self.cross_track)],dtype=int)*-9999
flagPrecip[:,12:37] = self.hdf['MS']['PRE']['flagPrecip'][:,:]
da = xr.DataArray(flagPrecip,
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'flag to diagnose precip at surface' + \
'11 is precip from both, 10 is preicp from just Ku-band'
#fill dataset
self.xrds['flagPrecip'] = da
#
typePrecip = np.ones([len(self.along_track),len(self.cross_track)],dtype=int)*-9999
typePrecip[:,12:37] = self.hdf['MS']['CSF']['typePrecip'][:]
typePrecip = np.asarray(typePrecip,dtype=float)
ind = np.where(typePrecip == -1111)
typePrecip[ind] = np.nan
ind = np.where(typePrecip == -9999)
typePrecip[ind] = np.nan
typePrecip = np.trunc(typePrecip/10000000)
typePrecip = np.asarray(typePrecip,dtype=int)
da = xr.DataArray(typePrecip, dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'flag to diagnose raintype. If 1: Strat. If 2: Conv. If 3:other '
self.xrds['typePrecip'] = da
#Get the phaseNearSurface (0 is snow, 1 is mixed 2, 2.55 is missing )
phaseNearSurface = self.hdf['NS']['SLV']['phaseNearSurface'][:,:]/100
phaseNearSurface[phaseNearSurface == 2.55] = -9999
phaseNearSurface =np.asarray(np.trunc(phaseNearSurface),dtype=int)
da = xr.DataArray(phaseNearSurface, dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'flag to diagnose near surface phase.'+ \
'0 is snow, 1 is mixed, 2 is rain. This is included to compare to Skofronick-Jackson 2019'
self.xrds['phaseNearSurface'] = da
#Get the precipRateNearSurf (needed for skofronick-jackson 2019 comparison)
precipRateNearSurface = self.hdf['NS']['SLV']['precipRateNearSurface'][:,:]
da = xr.DataArray(precipRateNearSurface,
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'Near surface R from the GPM-DPR algo.'
self.xrds['precipRateNearSurface'] = da
if clutter:
self.get_highest_clutter_bin()
da = xr.DataArray(self.dummy,
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'flag to remove ground clutter'
self.xrds['clutter'] = da
da = xr.DataArray(self.hdf['NS']['SLV']['zFactorCorrectedNearSurface'][:,:],
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'near surface Ku'
da = da.where(da >= 12)
self.xrds['nearsurfaceKu'] = da
kanearsurf = np.ones([len(self.along_track),len(self.cross_track)],dtype=int)*-9999
kanearsurf[:,12:37] = self.hdf['MS']['SLV']['zFactorCorrectedNearSurface'][:,:]
da = xr.DataArray(kanearsurf,
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'near surface Ka'
da = da.where(da >= 15)
self.xrds['nearsurfaceKa'] = da
da = xr.DataArray(self.hdf['NS']['SLV']['zFactorCorrected'][:,:,:],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'corrected KuPR'
if clutter:
da = da.where(self.xrds.clutter==0)
da = da.where(da >= 12)
self.xrds['NSKu_c'] = da
da = xr.DataArray(self.hdf['NS']['SLV']['epsilon'][:,:,:],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.fillna(value=-9999.9)
da = da.where(da >= 0)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'epsilon value for retrieval'
self.xrds['epsilon'] = da
MSKa_c = np.ones([len(self.along_track),len(self.cross_track),len(self.range)],dtype=float)*-9999
MSKa_c[:,12:37,:] = self.hdf['MS']['SLV']['zFactorCorrected'][:,:,:]
da = xr.DataArray(MSKa_c,
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'corrected KaPR, MS scan'
if clutter:
da = da.where(self.xrds.clutter==0)
da = da.where(da >= 15)
self.xrds['MSKa_c'] = da
if echotop:
self.echotop()
da = xr.DataArray(self.dummy2,
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'flag to remove noise outside cloud/precip top'
self.xrds['echotop'] = da
da = xr.DataArray(self.hdf['NS']['PRE']['zFactorMeasured'][:,:,:],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'measured KuPR'
if clutter:
da = da.where(self.xrds.clutter==0)
if echotop:
da = da.where(self.xrds.echotop==0)
da = da.where(da >= 0)
self.xrds['NSKu'] = da
MSKa = np.ones([len(self.along_track),len(self.cross_track),len(self.range)],dtype=float)*-9999
MSKa[:,12:37,:] = self.hdf['MS']['PRE']['zFactorMeasured'][:,:,:]
da = xr.DataArray(MSKa,
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'measured KaPR, MS scan'
if clutter:
da = da.where(self.xrds.clutter==0)
if echotop:
da = da.where(self.xrds.echotop==0)
da = da.where(da >= 0)
self.xrds['MSKa'] = da
da = xr.DataArray(self.hdf['NS']['SLV']['precipRate'][:,:,:],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'mm hr^-1'
da.attrs['standard_name'] = 'retrieved R, from DPR algo'
if clutter:
da = da.where(self.xrds.clutter==0)
if echotop:
da = da.where(self.xrds.echotop==0)
self.xrds['R'] = da
da = xr.DataArray(self.hdf['NS']['SLV']['paramDSD'][:,:,:,1],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'mm'
da.attrs['standard_name'] = 'retrieved Dm, from DPR algo'
if clutter:
da = da.where(self.xrds.clutter==0)
if echotop:
da = da.where(self.xrds.echotop==0)
da = da.where(da >= 0)
self.xrds['Dm_dpr'] = da
da = xr.DataArray(self.hdf['NS']['SLV']['paramDSD'][:,:,:,0],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'dBNw'
da.attrs['standard_name'] = 'retrieved Nw, from DPR algo'
if clutter:
da = da.where(self.xrds.clutter==0)
if echotop:
da = da.where(self.xrds.echotop==0)
da = da.where(da >= 0)
self.xrds['Nw_dpr'] = da
if self.precip:
#change this to 10 if you want to relax the conditions, because the ka band has bad sensativity
self.xrds = self.xrds.where(self.xrds.flagPrecip>=precipflag)
if self.corners is not None:
self.setboxcoords()
#as before, makes sure there is data...
if self.xrds.along_track.shape[0]==0:
self.killflag = True
else:
da = xr.DataArray(self.hdf['MS']['Experimental']['flagSurfaceSnowfall'][:,:],
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=255)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'experimental flag to diagnose snow at surface'
#make xr dataset
self.xrds = da.to_dataset(name = 'flagSurfaceSnow')
#
#ADD BBtop and Bottom
da = xr.DataArray(self.hdf['NS']['CSF']['binBBTop'][:,12:37],
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'ind of BBtop'
self.xrds['binBBTop'] = da
da = xr.DataArray(self.hdf['NS']['CSF']['binBBBottom'][:,12:37],
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'ind of BBtop'
self.xrds['binBBBottom'] = da
da = xr.DataArray(self.hdf['MS']['PRE']['flagPrecip'][:,:],
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'flag to diagnose precip at surface' + \
'11 is precip from both, 10 is preicp from just Ku-band'
#fill dataset
self.xrds['flagPrecip'] = da
#
typePrecip = self.hdf['MS']['CSF']['typePrecip'][:]
typePrecip = np.asarray(typePrecip,dtype=float)
ind = np.where(typePrecip == -1111)
typePrecip[ind] = np.nan
ind = np.where(typePrecip == -9999)
typePrecip[ind] = np.nan
typePrecip = np.trunc(typePrecip/10000000)
typePrecip = np.asarray(typePrecip,dtype=int)
da = xr.DataArray(typePrecip, dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'flag to diagnose raintype. If 1: Strat. If 2: Conv. If 3:other '
self.xrds['typePrecip'] = da
#Get the phaseNearSurface (0 is snow, 1 is mixed 2, 2.55 is missing )
phaseNearSurface = self.hdf['NS']['SLV']['phaseNearSurface'][:,12:37]/100
phaseNearSurface[phaseNearSurface == 2.55] = -9999
phaseNearSurface =np.asarray(np.trunc(phaseNearSurface),dtype=int)
da = xr.DataArray(phaseNearSurface, dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'flag to diagnose near surface phase.'+ \
'0 is snow, 1 is mixed, 2 is rain. This is included to compare to Skofronick-Jackson 2019'
self.xrds['phaseNearSurface'] = da
#Get the precipRateNearSurf (needed for skofronick-jackson 2019 comparison)
precipRateNearSurface = self.hdf['NS']['SLV']['precipRateNearSurface'][:,12:37]
da = xr.DataArray(precipRateNearSurface,
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'Near surface R from the GPM-DPR algo.'
self.xrds['precipRateNearSurface'] = da
if clutter:
self.get_highest_clutter_bin()
da = xr.DataArray(self.dummy,
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'flag to remove ground clutter'
self.xrds['clutter'] = da
da = xr.DataArray(self.hdf['NS']['SLV']['zFactorCorrectedNearSurface'][:,12:37],
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'near surface Ku'
da = da.where(da >= 12)
self.xrds['nearsurfaceKu'] = da
da = xr.DataArray(self.hdf['MS']['SLV']['zFactorCorrectedNearSurface'][:,:],
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'near surface Ka'
da = da.where(da >= 15)
self.xrds['nearsurfaceKa'] = da
da = xr.DataArray(self.hdf['NS']['SLV']['zFactorCorrected'][:,12:37,:],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'corrected KuPR'
if clutter:
da = da.where(self.xrds.clutter==0)
da = da.where(da >= 12)
self.xrds['NSKu_c'] = da
da = xr.DataArray(self.hdf['NS']['SLV']['epsilon'][:,12:37,:],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.fillna(value=-9999.9)
da = da.where(da >= 0)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'epsilon value for retrieval'
self.xrds['epsilon'] = da
da = xr.DataArray(self.hdf['MS']['SLV']['zFactorCorrected'][:,:,:],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'corrected KaPR, MS scan'
if clutter:
da = da.where(self.xrds.clutter==0)
da = da.where(da >= 15)
self.xrds['MSKa_c'] = da
if echotop:
self.echotop()
da = xr.DataArray(self.dummy2,
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'flag to remove noise outside cloud/precip top'
self.xrds['echotop'] = da
da = xr.DataArray(self.hdf['NS']['PRE']['zFactorMeasured'][:,12:37,:],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'measured KuPR'
if clutter:
da = da.where(self.xrds.clutter==0)
if echotop:
da = da.where(self.xrds.echotop==0)
da = da.where(da >= 0)
self.xrds['NSKu'] = da
da = xr.DataArray(self.hdf['MS']['PRE']['zFactorMeasured'][:,:,:],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'measured KaPR, MS scan'
if clutter:
da = da.where(self.xrds.clutter==0)
if echotop:
da = da.where(self.xrds.echotop==0)
da = da.where(da >= 0)
self.xrds['MSKa'] = da
da = xr.DataArray(self.hdf['NS']['SLV']['precipRate'][:,12:37,:],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'mm hr^-1'
da.attrs['standard_name'] = 'retrieved R, from DPR algo'
if clutter:
da = da.where(self.xrds.clutter==0)
if echotop:
da = da.where(self.xrds.echotop==0)
self.xrds['R'] = da
da = xr.DataArray(self.hdf['NS']['SLV']['paramDSD'][:,12:37,:,1],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'mm'
da.attrs['standard_name'] = 'retrieved Dm, from DPR algo'
if clutter:
da = da.where(self.xrds.clutter==0)
if echotop:
da = da.where(self.xrds.echotop==0)
da = da.where(da >= 0)
self.xrds['Dm_dpr'] = da
da = xr.DataArray(self.hdf['NS']['SLV']['paramDSD'][:,12:37,:,0],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'dBNw'
da.attrs['standard_name'] = 'retrieved Nw, from DPR algo'
if clutter:
da = da.where(self.xrds.clutter==0)
if echotop:
da = da.where(self.xrds.echotop==0)
da = da.where(da >= 0)
self.xrds['Nw_dpr'] = da
if self.precip:
#change this to 10 if you want to relax the conditions, because the ka band has bad sensativity
self.xrds = self.xrds.where(self.xrds.flagPrecip>=precipflag)
# if self.snow:
# self.xrds = self.xrds.where(self.xrds.flagSurfaceSnow==1)
if self.corners is not None:
self.setboxcoords()
#to reduce size of data, drop empty cross-track sections
# self.xrds = self.xrds.dropna(dim='along_track',how='all')
#as before, makes sure there is data...
if self.xrds.along_track.shape[0]==0:
self.killflag = True
else:
da = xr.DataArray(self.hdf['FS']['Experimental']['flagSurfaceSnowfall'][:,:],
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=255)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'experimental flag to diagnose snow at surface'
#make xr dataset
self.xrds = da.to_dataset(name = 'flagSurfaceSnow')
#
#ADD BBtop and Bottom
da = xr.DataArray(self.hdf['FS']['CSF']['binBBTop'][:,:],
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'ind of BBtop'
self.xrds['binBBTop'] = da
da = xr.DataArray(self.hdf['FS']['CSF']['binBBBottom'][:,:],
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'ind of BBtop'
self.xrds['binBBBottom'] = da
da = xr.DataArray(self.hdf['FS']['PRE']['flagPrecip'][:,:],
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'flag to diagnose precip at surface' + \
'11 is precip from both, 10 is preicp from just Ku-band'
#fill dataset
self.xrds['flagPrecip'] = da
#
typePrecip = self.hdf['FS']['CSF']['typePrecip'][:]
typePrecip = np.asarray(typePrecip,dtype=float)
ind = np.where(typePrecip == -1111)
typePrecip[ind] = np.nan
ind = np.where(typePrecip == -9999)
typePrecip[ind] = np.nan
typePrecip = np.trunc(typePrecip/10000000)
typePrecip = np.asarray(typePrecip,dtype=int)
da = xr.DataArray(typePrecip, dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'flag to diagnose raintype. If 1: Strat. If 2: Conv. If 3:other '
self.xrds['typePrecip'] = da
#Get the phaseNearSurface (0 is snow, 1 is mixed 2, 2.55 is missing )
phaseNearSurface = self.hdf['FS']['SLV']['phaseNearSurface'][:,:]/100
phaseNearSurface[phaseNearSurface == 2.55] = -9999
phaseNearSurface =np.asarray(np.trunc(phaseNearSurface),dtype=int)
da = xr.DataArray(phaseNearSurface, dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'flag to diagnose near surface phase.'+ \
'0 is snow, 1 is mixed, 2 is rain. This is included to compare to Skofronick-Jackson 2019'
self.xrds['phaseNearSurface'] = da
#Get the precipRateNearSurf (needed for skofronick-jackson 2019 comparison)
precipRateNearSurface = self.hdf['FS']['SLV']['precipRateNearSurface'][:,:]
da = xr.DataArray(precipRateNearSurface,
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'Near surface R from the GPM-DPR algo.'
self.xrds['precipRateNearSurface'] = da
if clutter:
self.get_highest_clutter_bin()
da = xr.DataArray(self.dummy,
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'flag to remove ground clutter'
self.xrds['clutter'] = da
#note, the v07 files use zFactorFinalNearSurf... have to adjust the key here
if self.v07:
temp_key = 'zFactorFinalNearSurface'
else:
temp_key = 'zFactorCorrectedNearSurface'
da = xr.DataArray(self.hdf['FS']['SLV'][temp_key][:,:,0],
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'near surface Ku'
da = da.where(da >= 12)
self.xrds['nearsurfaceKu'] = da
da = xr.DataArray(self.hdf['FS']['SLV'][temp_key][:,:,1],
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'near surface Ka'
da = da.where(da >= 15)
self.xrds['nearsurfaceKa'] = da
#note, the v07 files use zFactorFinal.. have to adjust the key here
if self.v07:
temp_key = 'zFactorFinal'
else:
temp_key = 'zFactorCorrected'
da = xr.DataArray(self.hdf['FS']['SLV'][temp_key][:,:,:,0],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'corrected KuPR'
if clutter:
da = da.where(self.xrds.clutter==0)
da = da.where(da >= 12)
self.xrds['NSKu_c'] = da
da = xr.DataArray(self.hdf['FS']['SLV']['epsilon'][:,:,:],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.fillna(value=-9999.9)
da = da.where(da >= 0)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'epsilon value for retrieval'
self.xrds['epsilon'] = da
da = xr.DataArray(self.hdf['FS']['SLV'][temp_key][:,:,:,1],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'corrected KaPR, MS scan'
if clutter:
da = da.where(self.xrds.clutter==0)
da = da.where(da >= 15)
self.xrds['MSKa_c'] = da
if echotop:
self.echotop()
da = xr.DataArray(self.dummy2,
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'flag to remove noise outside cloud/precip top'
self.xrds['echotop'] = da
da = xr.DataArray(self.hdf['FS']['PRE']['zFactorMeasured'][:,:,:,0],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'measured KuPR'
if clutter:
da = da.where(self.xrds.clutter==0)
if echotop:
da = da.where(self.xrds.echotop==0)
da = da.where(da >= 0)
self.xrds['NSKu'] = da
da = xr.DataArray(self.hdf['FS']['PRE']['zFactorMeasured'][:,:,:,1],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'measured KaPR, MS scan'
if clutter:
da = da.where(self.xrds.clutter==0)
if echotop:
da = da.where(self.xrds.echotop==0)
da = da.where(da >= 0)
self.xrds['MSKa'] = da
da = xr.DataArray(self.hdf['FS']['SLV']['precipRate'][:,:,:],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'mm hr^-1'
da.attrs['standard_name'] = 'retrieved R, from DPR algo'
if clutter:
da = da.where(self.xrds.clutter==0)
if echotop:
da = da.where(self.xrds.echotop==0)
self.xrds['R'] = da
da = xr.DataArray(self.hdf['FS']['SLV']['paramDSD'][:,:,:,1],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'mm'
da.attrs['standard_name'] = 'retrieved Dm, from DPR algo'
if clutter:
da = da.where(self.xrds.clutter==0)
if echotop:
da = da.where(self.xrds.echotop==0)
da = da.where(da >= 0)
self.xrds['Dm_dpr'] = da
da = xr.DataArray(self.hdf['FS']['SLV']['paramDSD'][:,:,:,0],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'dBNw'
da.attrs['standard_name'] = 'retrieved Nw, from DPR algo'
if clutter:
da = da.where(self.xrds.clutter==0)
if echotop:
da = da.where(self.xrds.echotop==0)
da = da.where(da >= 0)
self.xrds['Nw_dpr'] = da
if self.precip:
#change this to 10 if you want to relax the conditions, because the ka band has bad sensativity
self.xrds = self.xrds.where(self.xrds.flagPrecip>=precipflag)
# if self.snow:
# self.xrds = self.xrds.where(self.xrds.flagSurfaceSnow==1)
if self.corners is not None:
self.setboxcoords()
#to reduce size of data, drop empty cross-track sections
# self.xrds = self.xrds.dropna(dim='along_track',how='all')
#as before, makes sure there is data...
if self.xrds.along_track.shape[0]==0:
self.killflag = True
def get_highest_clutter_bin(self):
"""
This method makes us ground clutter conservative by supplying a clutter mask to apply to the fields.
It is based off the algorithim output of 'binClutterFreeBottom', which can be a bit conservative (~ 1km)
"""
if self.legacy:
if self.outer_swath:
ku = self.hdf['NS']['PRE']['binClutterFreeBottom'][:,:]
ku = np.reshape(ku,[1,ku.shape[0],ku.shape[1]])
ka = np.ones([len(self.along_track),len(self.cross_track)],dtype=int)*-9999
ka[:,12:37] = self.hdf['MS']['PRE']['binClutterFreeBottom'][:]
ka = np.reshape(ka,[1,ka.shape[0],ka.shape[1]])
both = np.vstack([ku,ka])
pick_max = np.argmin(both,axis=0)
ku = self.hdf['NS']['PRE']['binClutterFreeBottom'][:,:]
ka = np.ones([len(self.along_track),len(self.cross_track)],dtype=int)*-9999
ka[:,12:37] = self.hdf['MS']['PRE']['binClutterFreeBottom'][:]
inds_to_pick = np.zeros(ku.shape,dtype=int)
ind = np.where(pick_max == 0)
inds_to_pick[ind] = ku[ind]
ind = np.where(pick_max == 1)
inds_to_pick[ind] = ka[ind]
dummy_matrix = np.ma.zeros([inds_to_pick.shape[0],inds_to_pick.shape[1],176])
for i in np.arange(0,dummy_matrix.shape[0]):
for j in np.arange(0,dummy_matrix.shape[1]):
dummy_matrix[i,j,inds_to_pick[i,j]:] = 1
self.dummy = np.ma.asarray(dummy_matrix,dtype=int)
else:
ku = self.hdf['NS']['PRE']['binClutterFreeBottom'][:,12:37]
ku = np.reshape(ku,[1,ku.shape[0],ku.shape[1]])
ka = self.hdf['MS']['PRE']['binClutterFreeBottom'][:]
ka = np.reshape(ka,[1,ka.shape[0],ka.shape[1]])
both = np.vstack([ku,ka])
pick_max = np.argmin(both,axis=0)
ku = self.hdf['NS']['PRE']['binClutterFreeBottom'][:,12:37]
ka = self.hdf['MS']['PRE']['binClutterFreeBottom'][:]
inds_to_pick = np.zeros(ku.shape,dtype=int)
ind = np.where(pick_max == 0)
inds_to_pick[ind] = ku[ind]
ind = np.where(pick_max == 1)
inds_to_pick[ind] = ka[ind]
dummy_matrix = np.ma.zeros([inds_to_pick.shape[0],inds_to_pick.shape[1],176])
for i in np.arange(0,dummy_matrix.shape[0]):
for j in np.arange(0,dummy_matrix.shape[1]):
dummy_matrix[i,j,inds_to_pick[i,j]:] = 1
self.dummy = np.ma.asarray(dummy_matrix,dtype=int)
else:
ku = self.hdf['FS']['PRE']['binClutterFreeBottom'][:,:]
ku = np.reshape(ku,[1,ku.shape[0],ku.shape[1]])
ka = np.ones([len(self.along_track),len(self.cross_track)],dtype=int)*-9999
ka[:,12:37] = self.hdf['FS']['PRE']['binClutterFreeBottom'][:,12:37]
ka = np.reshape(ka,[1,ka.shape[0],ka.shape[1]])
both = np.vstack([ku,ka])
pick_max = np.argmin(both,axis=0)
ku = self.hdf['FS']['PRE']['binClutterFreeBottom'][:,:]
ka = np.ones([len(self.along_track),len(self.cross_track)],dtype=int)*-9999
ka[:,12:37] = self.hdf['FS']['PRE']['binClutterFreeBottom'][:,12:37]
inds_to_pick = np.zeros(ku.shape,dtype=int)
ind = np.where(pick_max == 0)
inds_to_pick[ind] = ku[ind]
ind = np.where(pick_max == 1)
inds_to_pick[ind] = ka[ind]
dummy_matrix = np.ma.zeros([inds_to_pick.shape[0],inds_to_pick.shape[1],176])
for i in np.arange(0,dummy_matrix.shape[0]):
for j in np.arange(0,dummy_matrix.shape[1]):
dummy_matrix[i,j,inds_to_pick[i,j]:] = 1
self.dummy = np.ma.asarray(dummy_matrix,dtype=int)
def echotop(self):
"""
This method takes the already clutter filtered data for the corrected reflectivity and cuts the
noisy uncorrected reflectivity to the same height. Again, the method is a bit conservative, but is
a good place to start.
"""
if self.legacy:
if self.outer_swath:
#HEADS UP, will default to using Ku in the outerswath because there is no Ka
keeper = self.range
keeper = np.reshape(keeper,[1,keeper.shape[0]])
keeper = np.tile(keeper,(49,1))
keeper = np.reshape(keeper,[1,keeper.shape[0],keeper.shape[1]])
keeper = np.tile(keeper,(self.xrds.NSKu_c.values.shape[0],1,1))
keeper[np.isnan(self.xrds.NSKu_c)] = 9999
inds_to_pick = np.argmin(keeper,axis=2)
dummy_matrix = np.ma.zeros([inds_to_pick.shape[0],inds_to_pick.shape[1],176])
for i in np.arange(0,dummy_matrix.shape[0]):
for j in np.arange(0,dummy_matrix.shape[1]):
dummy_matrix[i,j,:inds_to_pick[i,j]] = 1
self.dummy2 = np.ma.asarray(dummy_matrix,dtype=int)
else:
keeper = self.range
keeper = np.reshape(keeper,[1,keeper.shape[0]])
keeper = np.tile(keeper,(25,1))
keeper = np.reshape(keeper,[1,keeper.shape[0],keeper.shape[1]])
keeper = np.tile(keeper,(self.xrds.MSKa_c.values.shape[0],1,1))
keeper[np.isnan(self.xrds.MSKa_c)] = 9999
inds_to_pick = np.argmin(keeper,axis=2)
dummy_matrix = np.ma.zeros([inds_to_pick.shape[0],inds_to_pick.shape[1],176])
for i in np.arange(0,dummy_matrix.shape[0]):
for j in np.arange(0,dummy_matrix.shape[1]):
dummy_matrix[i,j,:inds_to_pick[i,j]] = 1
self.dummy2 = np.ma.asarray(dummy_matrix,dtype=int)
else:
#HEADS UP, will default to using Ku in the outerswath because there is no Ka
keeper = self.range
keeper = np.reshape(keeper,[1,keeper.shape[0]])
keeper = np.tile(keeper,(49,1))
keeper = np.reshape(keeper,[1,keeper.shape[0],keeper.shape[1]])
keeper = np.tile(keeper,(self.xrds.NSKu_c.values.shape[0],1,1))
keeper[np.isnan(self.xrds.NSKu_c)] = 9999
inds_to_pick = np.argmin(keeper,axis=2)
dummy_matrix = np.ma.zeros([inds_to_pick.shape[0],inds_to_pick.shape[1],176])
for i in np.arange(0,dummy_matrix.shape[0]):
for j in np.arange(0,dummy_matrix.shape[1]):
dummy_matrix[i,j,:inds_to_pick[i,j]] = 1
self.dummy2 = np.ma.asarray(dummy_matrix,dtype=int)
def setboxcoords(self):
"""
This method sets all points outside the box to nan.
"""
if len(self.corners) > 0:
self.ll_lon = self.corners[0]
self.ur_lon = self.corners[1]
self.ll_lat = self.corners[2]
self.ur_lat = self.corners[3]
self.xrds = self.xrds.where((self.xrds.lons >= self.ll_lon) & (self.xrds.lons <= self.ur_lon) & (self.xrds.lats >= self.ll_lat) & (self.xrds.lats <= self.ur_lat),drop=False)
else:
print('ERROR, not boxcoods set...did you mean to do this?')
def parse_dtime(self):
"""
This method creates datetime objects from the hdf file in a timely mannor.
Typically run this after you already filtered for precip/snow to save additional time.
"""
if self.legacy:
if self.outer_swath:
year = self.hdf['NS']['ScanTime']['Year'][:]
ind = np.where(year == -9999)[0]
year = np.asarray(year,dtype=str)
year = list(year)
month = self.hdf['NS']['ScanTime']['Month'][:]
month = np.asarray(month,dtype=str)
month = np.char.rjust(month, 2, fillchar='0')
month = list(month)
day = self.hdf['NS']['ScanTime']['DayOfMonth'][:]
day = np.asarray(day,dtype=str)
day = np.char.rjust(day, 2, fillchar='0')
day = list(day)
hour = self.hdf['NS']['ScanTime']['Hour'][:]
hour = np.asarray(hour,dtype=str)
hour = np.char.rjust(hour, 2, fillchar='0')
hour = list(hour)
minute = self.hdf['NS']['ScanTime']['Minute'][:]
minute = np.asarray(minute,dtype=str)
minute = np.char.rjust(minute, 2, fillchar='0')
minute = list(minute)
second = self.hdf['NS']['ScanTime']['Second'][:]
second = np.asarray(second,dtype=str)
second = np.char.rjust(second, 2, fillchar='0')
second = list(second)
datestr = [year[i] +"-"+ month[i]+ "-" + day[i] + \
' ' + hour[i] + ':' + minute[i] + ':' + second[i] for i in range(len(year))]
datestr = np.asarray(datestr,dtype=str)
datestr[ind] = '1970-01-01 00:00:00'
datestr = np.reshape(datestr,[len(datestr),1])
datestr = np.tile(datestr,(1,49))
self.datestr = np.asarray(datestr,dtype=np.datetime64)
else:
year = self.hdf['MS']['ScanTime']['Year'][:]
ind = np.where(year == -9999)[0]
year = np.asarray(year,dtype=str)
year = list(year)
month = self.hdf['MS']['ScanTime']['Month'][:]
month = np.asarray(month,dtype=str)
month = np.char.rjust(month, 2, fillchar='0')
month = list(month)
day = self.hdf['MS']['ScanTime']['DayOfMonth'][:]
day = np.asarray(day,dtype=str)
day = np.char.rjust(day, 2, fillchar='0')
day = list(day)
hour = self.hdf['MS']['ScanTime']['Hour'][:]
hour = np.asarray(hour,dtype=str)
hour = np.char.rjust(hour, 2, fillchar='0')
hour = list(hour)
minute = self.hdf['MS']['ScanTime']['Minute'][:]
minute = np.asarray(minute,dtype=str)
minute = np.char.rjust(minute, 2, fillchar='0')
minute = list(minute)
second = self.hdf['MS']['ScanTime']['Second'][:]
second = np.asarray(second,dtype=str)
second = np.char.rjust(second, 2, fillchar='0')
second = list(second)
datestr = [year[i] +"-"+ month[i]+ "-" + day[i] + \
' ' + hour[i] + ':' + minute[i] + ':' + second[i] for i in range(len(year))]
datestr = np.asarray(datestr,dtype=str)
datestr[ind] = '1970-01-01 00:00:00'
datestr = np.reshape(datestr,[len(datestr),1])
datestr = np.tile(datestr,(1,25))
self.datestr = np.asarray(datestr,dtype=np.datetime64)
else:
year = self.hdf['FS']['ScanTime']['Year'][:]
ind = np.where(year == -9999)[0]
year = np.asarray(year,dtype=str)
year = list(year)
month = self.hdf['FS']['ScanTime']['Month'][:]
month = np.asarray(month,dtype=str)
month = np.char.rjust(month, 2, fillchar='0')
month = list(month)
day = self.hdf['FS']['ScanTime']['DayOfMonth'][:]
day = np.asarray(day,dtype=str)
day = np.char.rjust(day, 2, fillchar='0')
day = list(day)
hour = self.hdf['FS']['ScanTime']['Hour'][:]
hour = np.asarray(hour,dtype=str)
hour = np.char.rjust(hour, 2, fillchar='0')
hour = list(hour)
minute = self.hdf['FS']['ScanTime']['Minute'][:]
minute = np.asarray(minute,dtype=str)
minute = np.char.rjust(minute, 2, fillchar='0')
minute = list(minute)
second = self.hdf['FS']['ScanTime']['Second'][:]
second = np.asarray(second,dtype=str)
second = np.char.rjust(second, 2, fillchar='0')
second = list(second)
datestr = [year[i] +"-"+ month[i]+ "-" + day[i] + \
' ' + hour[i] + ':' + minute[i] + ':' + second[i] for i in range(len(year))]
datestr = np.asarray(datestr,dtype=str)
datestr[ind] = '1970-01-01 00:00:00'
datestr = np.reshape(datestr,[len(datestr),1])
datestr = np.tile(datestr,(1,49))
self.datestr = np.asarray(datestr,dtype=np.datetime64)
def run_retrieval(self,path_to_models=None,old=False,notebook=False):
"""
This method is a way to run our neural network trained retreival to get Dm in snowfall.
Please see this AMS presentation until the paper comes out: *LINK HERE*.
This method requires the use of tensorflow. So go install that.
"""
#load scalers
from pickle import load
import tensorflow as tf
from tensorflow.python.keras import losses
#set number of threads = 1, this was crashing my parallel code, If in notebook comment this
if notebook:
pass
else:
tf.config.threading.set_inter_op_parallelism_threads(1)
# tf.config.threading.set_intra_op_parallelism_threads(1)
# print('Number of threads set to {}'.format(tf.config.threading.get_inter_op_parallelism_threads()))
if old:
scaler_X = load(open('/data/gpm/a/randyjc2/DRpy/drpy/models/scaler_X.pkl', 'rb'))
scaler_y = load(open('/data/gpm/a/randyjc2/DRpy/drpy/models/scaler_y.pkl', 'rb'))
else:
scaler_X = load(open('/data/gpm/a/randyjc2/DRpy/drpy/models/scaler_X_V2.pkl', 'rb'))
scaler_y = load(open('/data/gpm/a/randyjc2/DRpy/drpy/models/scaler_y_V2.pkl', 'rb'))
#supress warnings. skrews up my progress bar when running in parallel
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
if path_to_models is None:
print('Please insert path to NN models')
else:
if old:
model = tf.keras.models.load_model(path_to_models + 'NN_4by8.h5',custom_objects=None,compile=True)
else:
model = tf.keras.models.load_model(path_to_models + 'NN_6by8.h5',custom_objects=None,compile=True)
#now we have to reshape things to make sure they are in the right shape for the NN model [n_samples,n_features]
Ku = self.xrds.NSKu.values
shape_step1 = Ku.shape
Ku = Ku.reshape([Ku.shape[0],Ku.shape[1]*Ku.shape[2]])
shape_step2 = Ku.shape
Ku = Ku.reshape([Ku.shape[0]*Ku.shape[1]])
Ka = self.xrds.MSKa.values
Ka = Ka.reshape([Ka.shape[0],Ka.shape[1]*Ka.shape[2]])
Ka = Ka.reshape([Ka.shape[0]*Ka.shape[1]])
T = self.xrds['T'].values - 273.15 #expects in degC
T = T.reshape([T.shape[0],T.shape[1]*T.shape[2]])
T = T.reshape([T.shape[0]*T.shape[1]])
#Make sure we only run in on non-nan values.
ind_masked = np.isnan(Ku)
ind_masked2 = np.isnan(Ka)
Ku_nomask = np.zeros(Ku.shape)
Ka_nomask = np.zeros(Ka.shape)
T_nomask = np.zeros(T.shape)
Ku_nomask[~ind_masked] = Ku[~ind_masked]
Ka_nomask[~ind_masked] = Ka[~ind_masked]
T_nomask[~ind_masked] = T[~ind_masked]
ind = np.where(Ku_nomask!=0)[0]
#scale the input vectors by the mean that it was trained with
X = np.zeros([Ku_nomask.shape[0],3])
X[:,0] = (Ku_nomask - scaler_X.mean_[0])/scaler_X.scale_[0] #ku
X[:,1] = ((Ku_nomask - Ka_nomask)- scaler_X.mean_[1])/scaler_X.scale_[1] #dfr
X[:,2] = (T_nomask - scaler_X.mean_[2])/scaler_X.scale_[2] #T
#
yhat = model.predict(X[ind,0:3],batch_size=len(X[ind,0]))
yhat = scaler_y.inverse_transform(yhat)
yhat[:,1] = 10**yhat[:,1] #unlog Dm liquid
yhat[:,2] = 10**yhat[:,2] #unlog Dm solid
ind = np.where(Ku_nomask!=0)[0]
Nw = np.zeros(Ku_nomask.shape)
Nw[ind] = np.squeeze(yhat[:,0])
Nw = Nw.reshape([shape_step2[0],shape_step2[1]])
Nw = Nw.reshape([shape_step1[0],shape_step1[1],shape_step1[2]])
Nw = np.ma.masked_where(Nw==0.0,Nw)
da = xr.DataArray(Nw, dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],self.xrds.lons),
'lats': (['along_track','cross_track'],self.xrds.lons),
'time': (['along_track','cross_track'],self.xrds.time),
'alt':(['along_track', 'cross_track','range'],self.xrds.alt)})
da.fillna(value=-9999)
da.attrs['units'] = 'log(m^-4)'
da.attrs['standard_name'] = 'retrieved Nw from the NN (Chase et al. 2020)'
da = da.where(da > 0.)
self.xrds['Nw'] = da
Dm = np.zeros(Ku_nomask.shape)
Dm[ind] = np.squeeze(yhat[:,1])
Dm = Dm.reshape([shape_step2[0],shape_step2[1]])
Dm = Dm.reshape([shape_step1[0],shape_step1[1],shape_step1[2]])
Dm = np.ma.masked_where(Dm==0.0,Dm)
da = xr.DataArray(Dm, dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],self.xrds.lons),
'lats': (['along_track','cross_track'],self.xrds.lons),
'time': (['along_track','cross_track'],self.xrds.time),
'alt':(['along_track', 'cross_track','range'],self.xrds.alt)})
da.fillna(value=-9999)
da.attrs['units'] = 'mm'
da.attrs['standard_name'] = 'retrieved Liquid Eq. Dm from the NN (Chase et al. 2020)'
self.xrds['Dm'] = da
Dm_frozen = np.zeros(Ku_nomask.shape)
Dm_frozen[ind] = np.squeeze(yhat[:,2])
Dm_frozen = Dm_frozen.reshape([shape_step2[0],shape_step2[1]])
Dm_frozen = Dm_frozen.reshape([shape_step1[0],shape_step1[1],shape_step1[2]])
Dm_frozen = np.ma.masked_where(Dm_frozen==0.0,Dm_frozen)
da = xr.DataArray(Dm_frozen, dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],self.xrds.lons),
'lats': (['along_track','cross_track'],self.xrds.lons),
'time': (['along_track','cross_track'],self.xrds.time),
'alt':(['along_track', 'cross_track','range'],self.xrds.alt)})
da.fillna(value=-9999)
da.attrs['units'] = 'mm'
da.attrs['standard_name'] = 'retrieved Frozen Dm from the NN (Chase et al. 2020)'
self.xrds['Dm_frozen'] = da
Nw = 10**Nw #undo log, should be in m^-4
Dm = Dm/1000. # convert to m ^4
IWC = (Nw*(Dm)**4*1000*np.pi)/4**(4) # the 1000 is density of water (kg/m^3)
IWC = IWC*1000 #convert to g/m^3
da = xr.DataArray(IWC, dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],self.xrds.lons),
'lats': (['along_track','cross_track'],self.xrds.lons),
'time': (['along_track','cross_track'],self.xrds.time),
'alt':(['along_track', 'cross_track','range'],self.xrds.alt)})
da.fillna(value=-9999)
da.attrs['units'] = 'g m^{-3}'
da.attrs['standard_name'] = 'Calc IWC from retrieved Nw and Dm from the NN (Chase et al. 2020)'
self.xrds['IWC'] = da
self.retrieval_flag = 1
def get_ENV(self,ENVFILENAME=None):
hdf_env = h5py.File(ENVFILENAME)
temperature = hdf_env['NS']['VERENV']['airTemperature'][:,12:37,:]
da = xr.DataArray(temperature, dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],self.xrds.lons),
'lats': (['along_track','cross_track'],self.xrds.lons),
'time': (['along_track','cross_track'],self.xrds.time),
'alt':(['along_track', 'cross_track','range'],self.xrds.alt)})
da.where(da > 0)
da.attrs['units'] = 'K'
da.attrs['standard_name'] = 'GPM-DPR ENV data'
self.xrds["T"] = da
def get_merra(self,interp1=True,interp2=False,getsurf=True):
"""
This method matches up the *closest* MERRA-2 profiles.
To do so it uses the xarray.sel command.
Please note this is not generalized. The files structure of my MERRA-2 files is a bit particular.
In theory you could point this into your own directory where those files are. Or even use a different
reanalysis (e.g., ERA)
"""
time = self.xrds.time.values
orig_shape = time.shape
time = np.reshape(time,[orig_shape[0]*orig_shape[1]])
dates = pd.to_datetime(time,infer_datetime_format=True)
dates = dates.to_pydatetime()
dates = np.reshape(dates,[orig_shape[0],orig_shape[1]])
year = dates[0,0].year
month = dates[0,0].month
day = dates[0,0].day
if month < 10:
month = '0'+ str(month)
else:
month = str(month)
if day <10:
day = '0' + str(day)
else:
day = str(day)
ds_url = '/data/accp/a/snesbitt/merra-2/PROFILE/'+ str(year) + '/' + 'MERRA2_400.inst6_3d_ana_Np.'+ str(year) + month + day+ '.nc4'
###load file
merra = xr.open_dataset(ds_url,chunks={'lat': 361, 'lon': 576})
###
#select the closest profile to the lat, lon, time
sounding = merra.sel(lon=self.xrds.lons,lat=self.xrds.lats,time=self.xrds.time,method='nearest')
sounding.load()
self.sounding = sounding
if interp1:
self.interp_MERRA(keyname='T')
self.interp_MERRA(keyname='U')
self.interp_MERRA(keyname='V')
self.interp_MERRA(keyname='QV')
self.interp_flag = 1
elif interp2:
self.interp_MERRA_V2(keyname='T')
self.interp_MERRA_V2(keyname='U')
self.interp_MERRA_V2(keyname='V')
self.interp_MERRA_V2(keyname='QV')
self.interp_flag = 1
if getsurf:
ds_url ='/data/accp/a/snesbitt/merra-2/SURFACE/'+ str(year) + '/' + 'MERRA2_400.tavg1_2d_slv_Nx.'+str(year) + month + day +'.nc4'
###load file
merra = xr.open_dataset(ds_url)
###
#select the closest profile to the lat, lon, time
gpmcoords = merra.sel(lon=self.xrds.lons,lat=self.xrds.lats,time=self.xrds.time,method='nearest')
da = xr.DataArray(gpmcoords.T2M.values, dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],self.xrds.lons),
'lats': (['along_track','cross_track'],self.xrds.lons),
'time': (['along_track','cross_track'],self.xrds.time)})
da.attrs['units'] = gpmcoords.T2M.units
da.attrs['standard_name'] = gpmcoords.T2M.standard_name
self.xrds['T2M'] = da
da = xr.DataArray(gpmcoords.T2MWET.values, dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],self.xrds.lons),
'lats': (['along_track','cross_track'],self.xrds.lons),
'time': (['along_track','cross_track'],self.xrds.time)})
da.attrs['units'] = gpmcoords.T2MWET.units
da.attrs['standard_name'] = gpmcoords.T2MWET.standard_name
self.xrds['T2MWET'] = da
da = xr.DataArray(gpmcoords.T2MDEW.values, dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],self.xrds.lons),
'lats': (['along_track','cross_track'],self.xrds.lons),
'time': (['along_track','cross_track'],self.xrds.time)})
da.attrs['units'] = gpmcoords.T2MDEW.units
da.attrs['standard_name'] = gpmcoords.T2MDEW.standard_name
self.xrds['T2MDEW'] = da
if self.snow:
self.xrds = self.xrds.where(self.xrds.T2MWET-273.15 <= 0)
#to reduce size of data, drop empty cross-track sections
self.xrds = self.xrds.dropna(dim='along_track',how='all')
def interp_MERRA(self,keyname=None):
"""
This interpolates the MERRA data from the self.get_merra method, to the same veritcal levels as the GPM-DPR
NOTE: I am not sure this is optimized! Not very fast..., but if you want you can turn it off
"""
H_Merra = self.sounding.H.values
H_gpm = self.xrds.alt.values
new_variable = np.zeros(H_gpm.shape)
for i in self.sounding.along_track.values:
for j in self.sounding.cross_track.values:
#fit func
da = xr.DataArray(self.sounding[keyname].values[i,j,:], [('height', H_Merra[i,j,:]/1000)])
da = da.interp(height=H_gpm[i,j,:])
new_variable[i,j,:] = da.values
da = xr.DataArray(new_variable, dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],self.xrds.lons),
'lats': (['along_track','cross_track'],self.xrds.lons),
'time': (['along_track','cross_track'],self.xrds.time),
'alt':(['along_track', 'cross_track','range'],self.xrds.alt)})
da.attrs['units'] = self.sounding[keyname].units
da.attrs['standard_name'] = 'Interpolated ' + self.sounding[keyname].standard_name + ' to GPM height coord'
self.xrds[keyname] = da
return da
def interp_MERRA_V2(self,keyname=None):
"""This is an effcient way of doing linear interpolation of the MERRA soundings """
x = self.sounding['H'].values
y = self.sounding[keyname].values
z = self.xrds.alt.values*1000 #convert to m
interped = np.zeros(self.xrds.alt.values.shape)
for i in np.arange(0,len(self.cross_track)):
interped[:,i,:] = interp_2(x[:,i,:],y[:,i,:],z[0,i,:])
da = xr.DataArray(interped, dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],self.xrds.lons),
'lats': (['along_track','cross_track'],self.xrds.lons),
'time': (['along_track','cross_track'],self.xrds.time),
'alt':(['along_track', 'cross_track','range'],self.xrds.alt)})
da.attrs['units'] = self.sounding[keyname].units
da.attrs['standard_name'] = 'Interpolated ' + self.sounding[keyname].standard_name + ' to GPM height coord'
self.xrds[keyname] = da
return da
def extract_nearsurf(self):
"""
Since we are often concerned with whats happening at the surface, this will extract the variables just above
the clutter.
"""
if self.legacy:
if self.outer_swath:
keeper = self.xrds.range.values
keeper = np.reshape(keeper,[1,keeper.shape[0]])
keeper = np.tile(keeper,(49,1))
keeper = np.reshape(keeper,[1,keeper.shape[0],keeper.shape[1]])
keeper = np.tile(keeper,(self.xrds.NSKu.values.shape[0],1,1))
keeper[np.isnan(self.xrds.NSKu.values)] = -9999
inds_to_pick = np.argmax(keeper,axis=2)
dummy_matrix = np.ma.zeros([inds_to_pick.shape[0],inds_to_pick.shape[1],176])
#note, for all nan columns, it will say its 0, or the top of the GPM index, which should alway be nan anyway
for i in np.arange(0,dummy_matrix.shape[0]):
for j in np.arange(0,dummy_matrix.shape[1]):
dummy_matrix[i,j,inds_to_pick[i,j]] = 1
self.lowest_gate_index = np.ma.asarray(dummy_matrix,dtype=int)
self.grab_variable(keyname='NSKu',nearsurf=True)
self.grab_variable(keyname='NSKu_c',nearsurf=True)
self.grab_variable(keyname='MSKa',nearsurf=True)
self.grab_variable(keyname='MSKa_c',nearsurf=True)
self.grab_variable(keyname='R',nearsurf=True)
self.grab_variable(keyname='Dm_dpr',nearsurf=True)
self.grab_variable(keyname='alt',nearsurf=True)
if self.retrieval_flag == 1:
self.grab_variable(keyname='Dm',nearsurf=True)
self.grab_variable(keyname='IWC',nearsurf=True)
if self.interp_flag == 1:
self.grab_variable(keyname='T',nearsurf=True)
self.grab_variable(keyname='U',nearsurf=True)
self.grab_variable(keyname='V',nearsurf=True)
self.grab_variable(keyname='QV',nearsurf=True)
else:
keeper = self.xrds.range.values
keeper = np.reshape(keeper,[1,keeper.shape[0]])
keeper = np.tile(keeper,(25,1))
keeper = np.reshape(keeper,[1,keeper.shape[0],keeper.shape[1]])
keeper = np.tile(keeper,(self.xrds.NSKu.values.shape[0],1,1))
keeper[np.isnan(self.xrds.NSKu.values)] = -9999
inds_to_pick = np.argmax(keeper,axis=2)
dummy_matrix = np.ma.zeros([inds_to_pick.shape[0],inds_to_pick.shape[1],176])
#note, for all nan columns, it will say its 0, or the top of the GPM index, which should alway be nan anyway
for i in np.arange(0,dummy_matrix.shape[0]):
for j in np.arange(0,dummy_matrix.shape[1]):
dummy_matrix[i,j,inds_to_pick[i,j]] = 1
self.lowest_gate_index = np.ma.asarray(dummy_matrix,dtype=int)
self.grab_variable(keyname='NSKu',nearsurf=True)
self.grab_variable(keyname='NSKu_c',nearsurf=True)
self.grab_variable(keyname='MSKa',nearsurf=True)
self.grab_variable(keyname='MSKa_c',nearsurf=True)
self.grab_variable(keyname='R',nearsurf=True)
self.grab_variable(keyname='Dm_dpr',nearsurf=True)
self.grab_variable(keyname='alt',nearsurf=True)
if self.retrieval_flag == 1:
self.grab_variable(keyname='Dm',nearsurf=True)
self.grab_variable(keyname='IWC',nearsurf=True)
if self.interp_flag == 1:
self.grab_variable(keyname='T',nearsurf=True)
self.grab_variable(keyname='U',nearsurf=True)
self.grab_variable(keyname='V',nearsurf=True)
self.grab_variable(keyname='QV',nearsurf=True)
else:
keeper = self.xrds.range.values
keeper = np.reshape(keeper,[1,keeper.shape[0]])
keeper = np.tile(keeper,(49,1))
keeper = np.reshape(keeper,[1,keeper.shape[0],keeper.shape[1]])
keeper = np.tile(keeper,(self.xrds.NSKu.values.shape[0],1,1))
keeper[np.isnan(self.xrds.NSKu.values)] = -9999
inds_to_pick = np.argmax(keeper,axis=2)
dummy_matrix = np.ma.zeros([inds_to_pick.shape[0],inds_to_pick.shape[1],176])
#note, for all nan columns, it will say its 0, or the top of the GPM index, which should alway be nan anyway
for i in np.arange(0,dummy_matrix.shape[0]):
for j in np.arange(0,dummy_matrix.shape[1]):
dummy_matrix[i,j,inds_to_pick[i,j]] = 1
self.lowest_gate_index = np.ma.asarray(dummy_matrix,dtype=int)
self.grab_variable(keyname='NSKu',nearsurf=True)
self.grab_variable(keyname='NSKu_c',nearsurf=True)
self.grab_variable(keyname='MSKa',nearsurf=True)
self.grab_variable(keyname='MSKa_c',nearsurf=True)
self.grab_variable(keyname='R',nearsurf=True)
self.grab_variable(keyname='Dm_dpr',nearsurf=True)
self.grab_variable(keyname='alt',nearsurf=True)
if self.retrieval_flag == 1:
self.grab_variable(keyname='Dm',nearsurf=True)
self.grab_variable(keyname='IWC',nearsurf=True)
if self.interp_flag == 1:
self.grab_variable(keyname='T',nearsurf=True)
self.grab_variable(keyname='U',nearsurf=True)
self.grab_variable(keyname='V',nearsurf=True)
self.grab_variable(keyname='QV',nearsurf=True)
def extract_echotop(self):
"""
What are the various parameters found at the echotop. Make sure you ran the echotop bit first.
"""
keeper = self.xrds.range.values
keeper = np.reshape(keeper,[1,keeper.shape[0]])
keeper = np.tile(keeper,(25,1))
keeper = np.reshape(keeper,[1,keeper.shape[0],keeper.shape[1]])
#i'm using the Ka band as the echotop to ensure we have a retrieved param at echotop, not nan
keeper = np.tile(keeper,(self.xrds.MSKa.values.shape[0],1,1))
keeper[np.isnan(self.xrds.MSKa.values)] = +9999
inds_to_pick = np.argmin(keeper,axis=2)
dummy_matrix = np.ma.zeros([inds_to_pick.shape[0],inds_to_pick.shape[1],176])
#note, for all nan columns, it will say its 0, or the top of the GPM index, which should alway be nan anyway
for i in np.arange(0,dummy_matrix.shape[0]):
for j in np.arange(0,dummy_matrix.shape[1]):
dummy_matrix[i,j,inds_to_pick[i,j]] = 1
self.highest_gate_index = np.ma.asarray(dummy_matrix,dtype=int)
self.grab_variable(keyname='NSKu',nearsurf=False)
self.grab_variable(keyname='NSKu_c',nearsurf=False)
self.grab_variable(keyname='MSKa',nearsurf=False)
self.grab_variable(keyname='MSKa_c',nearsurf=False)
self.grab_variable(keyname='R',nearsurf=False)
self.grab_variable(keyname='Dm_dpr',nearsurf=False)
self.grab_variable(keyname='alt',nearsurf=False)
if self.retrieval_flag == 1:
self.grab_variable(keyname='Dm',nearsurf=False)
self.grab_variable(keyname='IWC',nearsurf=False)
if self.interp_flag == 1:
self.grab_variable(keyname='T',nearsurf=False)
self.grab_variable(keyname='U',nearsurf=False)
self.grab_variable(keyname='V',nearsurf=False)
self.grab_variable(keyname='QV',nearsurf=False)
def extract_echotopheight_ku(self):
"""
What are the various parameters found at the echotop. Make sure you ran the echotop bit first.
"""
keeper = self.xrds.range.values
keeper = np.reshape(keeper,[1,keeper.shape[0]])
keeper = np.tile(keeper,(25,1))
keeper = np.reshape(keeper,[1,keeper.shape[0],keeper.shape[1]])
keeper = np.tile(keeper,(self.xrds.NSKu.values.shape[0],1,1))
keeper[np.isnan(self.xrds.NSKu.values)] = +9999
inds_to_pick = np.argmin(keeper,axis=2)
dummy_matrix = np.ma.zeros([inds_to_pick.shape[0],inds_to_pick.shape[1],176])
#note, for all nan columns, it will say its 0, or the top of the GPM index, which should alway be nan anyway
for i in np.arange(0,dummy_matrix.shape[0]):
for j in np.arange(0,dummy_matrix.shape[1]):
dummy_matrix[i,j,inds_to_pick[i,j]] = 1
ind = np.where(dummy_matrix == 0)
variable = np.zeros([self.xrds.along_track.shape[0],self.xrds.cross_track.values.shape[0]])
variable2 = np.zeros([self.xrds.along_track.shape[0],self.xrds.cross_track.values.shape[0]])
variable[ind[0],ind[1]] = np.nan
variable2[ind[0],ind[1]] = np.nan
ind = np.where(dummy_matrix == 1)
variable[ind[0],ind[1]] = self.xrds['alt'].values[ind[0],ind[1],ind[2]]
variable2[ind[0],ind[1]] = self.xrds['NSKu'].values[ind[0],ind[1],ind[2]]
da = xr.DataArray(variable, dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],self.xrds.lons),
'lats': (['along_track','cross_track'],self.xrds.lons),
'time': (['along_track','cross_track'],self.xrds.time)})
da.attrs['units'] = 'km'
da.attrs['standard_name'] = 'altitude of the KuPR echo top'
self.xrds['alt_echoTopKuPR'] = da
da = xr.DataArray(variable2, dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],self.xrds.lons),
'lats': (['along_track','cross_track'],self.xrds.lons),
'time': (['along_track','cross_track'],self.xrds.time)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'KuPR reflectvity at its echotop'
self.xrds['NSKu_echoTopKuPR'] = da
def grab_variable(self,keyname=None,nearsurf=True,):
"""
This goes along with the self.extract_nearsurf() or self.extract_echotop()
"""
if keyname is None:
print('please supply keyname')
else:
variable = np.zeros([self.xrds.along_track.shape[0],self.xrds.cross_track.values.shape[0]])
if nearsurf:
ind = np.where(self.lowest_gate_index == 0)
else:
ind = np.where(self.highest_gate_index == 0)
variable[ind[0],ind[1]] = np.nan
if nearsurf:
ind = np.where(self.lowest_gate_index == 1)
else:
ind = np.where(self.highest_gate_index == 1)
variable[ind[0],ind[1]] = self.xrds[keyname].values[ind[0],ind[1],ind[2]]
da = xr.DataArray(variable, dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],self.xrds.lons),
'lats': (['along_track','cross_track'],self.xrds.lons),
'time': (['along_track','cross_track'],self.xrds.time)})
if nearsurf:
if keyname=='alt':
da.attrs['units'] = 'km'
da.attrs['standard_name'] = 'altitude of the near-surface bin'
self.xrds[keyname+'_nearSurf'] = da
else:
da.attrs['units'] = self.xrds[keyname].units
da.attrs['standard_name'] = 'near-surface' + self.xrds[keyname].standard_name
self.xrds[keyname+'_nearSurf'] = da
else:
if keyname=='alt':
da.attrs['units'] = 'km'
da.attrs['standard_name'] = 'altitude of the echoTop'
self.xrds[keyname+'_echoTop'] = da
else:
da.attrs['units'] = self.xrds[keyname].units
da.attrs['standard_name'] = 'echo-top' + self.xrds[keyname].standard_name
self.xrds[keyname+'_echoTop'] = da
def get_physcial_distance(self,reference_point = None):
"""
This method uses pyproj to calcualte distances between lats and lons.
reference_point is a list or array conisting of two entries, [Longitude,Latitude]
Please note that this intentionally uses an older version of pyproj (< version 2.0, i used 1.9.5.1)
This is because it preserves how the function is called.
"""
if reference_point is None and self.reference_point is None:
print('Error, no reference point found...please enter one')
else:
#this is envoke the pyproj package. Please note this must be an old version** < 2.0
from pyproj import Proj
p = Proj(proj='aeqd', ellps='WGS84', datum='WGS84', lat_0=reference_point[1], lon_0=reference_point[0])
#double check to make sure this returns 0 meters
x,y = p(reference_point[0],reference_point[1])
if np.sqrt(x**2 + y**2) != 0:
'something isnt right with the projection. investigate'
else:
ind = np.isnan(self.xrds.NSKu_nearSurf.values)
x = np.zeros(self.xrds.lons.values.shape)
y = np.zeros(self.xrds.lats.values.shape)
x[~ind],y[~ind] = p(self.xrds.lons.values[~ind],self.xrds.lats.values[~ind])
x[ind] = np.nan
y[ind] = np.nan
da = xr.DataArray(np.sqrt(x**2 + y**2)/1000, dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],self.xrds.lons),
'lats': (['along_track','cross_track'],self.xrds.lons),
'time': (['along_track','cross_track'],self.xrds.time)})
da.attrs['units'] = 'km'
da.attrs['standard_name'] = 'distance, way of the crow (i.e. direct), to the reference point'
self.xrds['distance'] = da
def get_TMAX(self):
#get the column max temperature in each profile
ind_tmax = (self.xrds['T'].argmax(axis=2)).values
#make the index the right shape
shp = np.array(self.xrds['T'].shape)
dim_idx = list(np.ix_(*[np.arange(i) for i in shp[:-1]]))
dim_idx.append(ind_tmax)
#grab the temperature
tmax = self.xrds['T'].values[tuple(dim_idx)]
#grab its altitude
alt = self.xrds['T'].alt.values
alt_tmax = alt[tuple(dim_idx)]
#determine the lapse rate across the echo
lapse = np.zeros(self.xrds.NSKu.shape)
#cut the last value
lapse[:,:,0] = np.nan
#grab the temps
T_nan = np.copy(self.xrds['T'].values)
#nan out values outside the echo
T_nan[np.isnan(self.xrds.NSKu.values)] = np.nan
#calc the lapse rate across the echo, convert to per km
lapse[:,:,1:] = np.diff(T_nan,axis=2)/0.125
#change sign
lapse = lapse*-1
#take mean
lapse2d = np.nanmean(lapse,axis=2)
#okay make stability flag
orig_shape = lapse2d.shape
lapse2d = np.reshape(lapse2d,[orig_shape[0]*orig_shape[1]])
stability_flag = np.ones(lapse2d.shape,dtype=int)*-9999
abs_unstable = np.where(lapse2d <= -10)
stability_flag[abs_unstable] = 0
ind_cond = np.where(lapse2d > -10)
ind_cond2 = np.where(lapse2d <= -6)
cond_unstable = np.intersect1d(ind_cond,ind_cond2)
stability_flag[cond_unstable] = 1
abs_stable = np.where(lapse2d > -6)
stability_flag[abs_stable] = 2
#make them 2d to put back into the dataset
stability_flag = np.reshape(stability_flag,[orig_shape[0],orig_shape[1]])
lapse2d = np.reshape(lapse2d,[orig_shape[0],orig_shape[1]])
da = xr.DataArray(tmax, dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],self.xrds.lons),
'lats': (['along_track','cross_track'],self.xrds.lons),
'time': (['along_track','cross_track'],self.xrds.time)})
da.attrs['units'] = 'K'
da.attrs['standard_name'] = 'Max temperature in the column'
self.xrds['TMAX'] = da
da = xr.DataArray(alt_tmax, dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],self.xrds.lons),
'lats': (['along_track','cross_track'],self.xrds.lons),
'time': (['along_track','cross_track'],self.xrds.time)})
da.attrs['units'] = 'km'
da.attrs['standard_name'] = 'Alt of max temperature'
self.xrds['TMAX_alt'] = da
da = xr.DataArray(lapse2d, dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],self.xrds.lons),
'lats': (['along_track','cross_track'],self.xrds.lons),
'time': (['along_track','cross_track'],self.xrds.time)})
da.attrs['units'] = 'deg C or deg K'
da.attrs['standard_name'] = 'Mean lapse rate across the NSKu echo'
self.xrds['lapse2d'] = da
da = xr.DataArray(stability_flag, dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],self.xrds.lons),
'lats': (['along_track','cross_track'],self.xrds.lons),
'time': (['along_track','cross_track'],self.xrds.time)})
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'Stability flag, 0 is absolutely unstable, 1 is cond. unstable, 2 is stable,-9999 means no precip echo'
self.xrds['stability_flag'] = da
def interp_2(x, y, z):
""" This is from this discussion: https://stackoverflow.com/questions/14559687/scipy-fast-1-d-interpolation-without-any-loop"""
rows, cols = x.shape
row_idx = np.arange(rows).reshape((rows,) + (1,) * z.ndim)
col_idx = np.argmax(x.reshape(x.shape + (1,) * z.ndim) > z, axis=1) - 1
ret = y[row_idx, col_idx + 1] - y[row_idx, col_idx]
ret /= x[row_idx, col_idx + 1] - x[row_idx, col_idx]
ret *= z - x[row_idx, col_idx]
ret += y[row_idx, col_idx]
return ret
###############################################################################################################################
###############################################################################################################################
###############################################################################################################################
class APR():
"""
Author: <NAME>. This class is intended to help with APR-2/APR-3 files.
Currently supported campaigns: gcpex, olympex
Feel free to reach out to me on twitter (@dopplerchase) or email <EMAIL>
"""
def __init__(self):
self.initialzied = True
self.T3d = False
def read(self,filename,campaign='gcpex'):
"""
===========
This is for reading in apr3 hdf files from OLYMPEX and return them all in one dictionary
===========
filename = filename of the apr3 file
"""
if campaign=='gcpex':
self.campaign = campaign
from pyhdf.SD import SD, SDC
apr = {}
flag = 0
##Radar varibles in hdf file found by hdf.datasets
radar_freq = 'zhh14' #Ku
radar_freq2 = 'zhh35' #Ka
radar_freq3 = 'zhh95' #W
radar_freq4 = 'ldr14' #LDR
vel_str = 'vel14' #Doppler
##
hdf = SD(filename, SDC.READ)
#What used to be here is a searching param looking for W-band, since we know there is no W in GCPEX, just autofill it.
alt = hdf.select('alt3D')
lat = hdf.select('lat')
lon = hdf.select('lon')
roll = hdf.select('roll').get()
time = hdf.select('scantime').get()
surf = hdf.select('surface_index').get()
isurf = hdf.select('isurf').get()
plane = hdf.select('alt_nav').get()
radar = hdf.select(radar_freq) #ku
radar2 = hdf.select(radar_freq2) #ka
radar4 = hdf.select(radar_freq4) #ldr
vel = hdf.select(vel_str)
lon3d = hdf.select('lon3D')
lat3d = hdf.select('lat3D')
alt3d = hdf.select('alt3D')
lat3d_scale = hdf.select('lat3D_scale').get()[0][0]
lon3d_scale = hdf.select('lon3D_scale').get()[0][0]
alt3d_scale = hdf.select('alt3D_scale').get()[0][0]
lat3d_offset = hdf.select('lat3D_offset').get()[0][0]
lon3d_offset = hdf.select('lon3D_offset').get()[0][0]
alt3d_offset = hdf.select('alt3D_offset').get()[0][0]
alt = alt.get()
ngates = alt.shape[0]
lat = lat.get()
lon = lon.get()
lat3d = lat3d.get()
lat3d = (lat3d/lat3d_scale) + lat3d_offset
lon3d = lon3d.get()
lon3d = (lon3d/lon3d_scale) + lon3d_offset
alt3d = alt3d.get()
alt3d = (alt3d/alt3d_scale) + alt3d_offset
radar_n = radar.get()
radar_n = radar_n/100.
radar_n2 = radar2.get()
radar_n2 = radar_n2/100.
radar_n4 = radar4.get()
radar_n4 = radar_n4/100.
vel_n = vel.get()
vel_n = vel_n/100.
#Quality control (use masked values
radar_n = np.ma.masked_where(radar_n<=-99,radar_n)
radar_n2 = np.ma.masked_where(radar_n2<=-99,radar_n2)
radar_n4 = np.ma.masked_where(radar_n4<=-99,radar_n4)
vel = np.ma.masked_where(vel_n <= -99,vel_n)
#Get rid of nans, the new HDF has builtin
radar_n = np.ma.masked_where(np.isnan(radar_n),radar_n)
radar_n2 = np.ma.masked_where(np.isnan(radar_n2),radar_n2)
radar_n4 = np.ma.masked_where(np.isnan(radar_n4),radar_n4)
vel = np.ma.masked_where(np.isnan(vel),vel)
radar_n3 = np.ma.zeros(radar_n.shape)
radar_n3 = np.ma.masked_where(radar_n3 == 0,radar_n3)
if campaign == 'olympex':
self.campaign = campaign
##Radar varibles in hdf file found by hdf.datasets
radar_freq = 'zhh14' #Ku
radar_freq2 = 'zhh35' #Ka
radar_freq3 = 'z95s' #W
radar_freq4 = 'ldr14' #LDR
vel_str = 'vel14' #Doppler
##
import h5py
hdf = h5py.File(filename,"r")
listofkeys = hdf['lores'].keys()
alt = hdf['lores']['alt3D'][:]
lat = hdf['lores']['lat'][:]
lon = hdf['lores']['lon'][:]
time = hdf['lores']['scantime'][:]
surf = hdf['lores']['surface_index'][:]
isurf = hdf['lores']['isurf'][:]
plane = hdf['lores']['alt_nav'][:]
radar = hdf['lores'][radar_freq][:]
radar2 = hdf['lores'][radar_freq2][:]
radar4 = hdf['lores'][radar_freq4][:]
vel = hdf['lores']['vel14c'][:]
lon3d = hdf['lores']['lon3D'][:]
lat3d = hdf['lores']['lat3D'][:]
alt3d = hdf['lores']['alt3D'][:]
roll = hdf['lores']['roll'][:]
#see if there is W band
if 'z95s' in listofkeys:
if 'z95n' in listofkeys:
radar_nadir = hdf['lores']['z95n'][:]
radar_scanning = hdf['lores']['z95s'][:]
radar3 = radar_scanning
w_flag = 1
##uncomment if you want high sensativty as nadir scan (WARNING, CALIBRATION)
#radar3[:,12,:] = radar_nadir[:,12,:]
else:
radar3 = hdf['lores']['z95s'][:]
print('No vv, using hh')
else:
radar3 = np.ma.zeros(radar.shape)
radar3 = np.ma.masked_where(radar3==0,radar3)
w_flag = 1
print('No W band')
#Quality control (masked where invalid)
radar_n = np.ma.masked_where(radar <= -99,radar)
radar_n2 = np.ma.masked_where(radar2 <= -99,radar2)
radar_n3 = np.ma.masked_where(radar3 <= -99,radar3)
radar_n4 = np.ma.masked_where(radar4 <= -99,radar4)
vel = np.ma.masked_where(vel <= -99,vel)
#Get rid of nans, the new HDF has builtin
radar_n = np.ma.masked_where(np.isnan(radar_n),radar_n)
radar_n2 = np.ma.masked_where(np.isnan(radar_n2),radar_n2)
radar_n3 = np.ma.masked_where(np.isnan(radar_n3),radar_n3)
radar_n4 = np.ma.masked_where(np.isnan(radar_n4),radar_n4)
vel = np.ma.masked_where(np.isnan(vel),vel)
if campaign == 'camp2ex':
##Radar varibles in hdf file found by hdf.datasets
radar_freq = 'zhh14' #Ku
radar_freq2 = 'zhh35' #Ka
radar_freq3 = 'zhh95' #W
radar_freq4 = 'ldrhh14' #LDR
vel_str = 'vel14' #Doppler
##
import h5py
hdf = h5py.File(filename,"r")
listofkeys = hdf['lores'].keys()
alt = hdf['lores']['alt3D'][:]
lat = hdf['lores']['lat'][:]
lon = hdf['lores']['lon'][:]
time = hdf['lores']['scantime'][:]
surf = hdf['lores']['surface_index'][:]
isurf = hdf['lores']['isurf'][:]
plane = hdf['lores']['alt_nav'][:]
radar = hdf['lores'][radar_freq][:]
radar2 = hdf['lores'][radar_freq2][:]
radar4 = hdf['lores'][radar_freq4][:]
vel = hdf['lores'][vel_str][:]
lon3d = hdf['lores']['lon3D'][:]
lat3d = hdf['lores']['lat3D'][:]
alt3d = hdf['lores']['alt3D'][:]
roll = hdf['lores']['roll'][:]
#see if there is W band
if 'z95s' in listofkeys:
if 'z95n' in listofkeys:
radar_nadir = hdf['lores']['z95n'][:]
radar_scanning = hdf['lores']['z95s'][:]
radar3 = radar_scanning
w_flag = 1
##uncomment if you want high sensativty as nadir scan (WARNING, CALIBRATION)
#radar3[:,12,:] = radar_nadir[:,12,:]
else:
radar3 = hdf['lores']['z95s'][:]
print('No vv, using hh')
else:
radar3 = np.ma.zeros(radar.shape)
radar3 = np.ma.masked_where(radar3==0,radar3)
w_flag = 1
print('No W band')
#Quality control (masked where invalid)
radar_n = np.ma.masked_where(radar <= -99,radar)
radar_n2 = np.ma.masked_where(radar2 <= -99,radar2)
radar_n3 = np.ma.masked_where(radar3 <= -99,radar3)
radar_n4 = np.ma.masked_where(radar4 <= -99,radar4)
vel = np.ma.masked_where(vel <= -99,vel)
#Get rid of nans, the new HDF has builtin
radar_n = np.ma.masked_where(np.isnan(radar_n),radar_n)
radar_n2 = np.ma.masked_where(np.isnan(radar_n2),radar_n2)
radar_n3 = np.ma.masked_where(np.isnan(radar_n3),radar_n3)
radar_n4 = np.ma.masked_where(np.isnan(radar_n4),radar_n4)
vel = np.ma.masked_where(np.isnan(vel),vel)
##convert time to datetimes
time_dates = np.empty(time.shape,dtype=object)
for i in np.arange(0,time.shape[0]):
for j in np.arange(0,time.shape[1]):
tmp = datetime.datetime.utcfromtimestamp(time[i,j])
time_dates[i,j] = tmp
#Create a time at each gate (assuming it is the same down each ray, there is a better way to do this)
time_gate = np.empty(lat3d.shape,dtype=object)
for k in np.arange(0,lat3d.shape[0]):
for i in np.arange(0,time_dates.shape[0]):
for j in np.arange(0,time_dates.shape[1]):
time_gate[k,i,j] = time_dates[i,j]
time3d = np.copy(time_gate)
da = xr.DataArray(radar_n,
dims={'range':np.arange(0,550),'cross_track':np.arange(0,24),
'along_track':np.arange(radar_n.shape[2])},
coords={'lon3d': (['range','cross_track','along_track'],lon3d),
'lat3d':(['range','cross_track','along_track'],lat3d),
'time3d': (['range','cross_track','along_track'],time3d),
'alt3d':(['range','cross_track','along_track'],alt3d)})
da.fillna(value=-9999)
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'Ku-band Reflectivity'
#make xr dataset
self.xrds = da.to_dataset(name = 'Ku')
#
da = xr.DataArray(radar_n2,
dims={'range':np.arange(0,550),'cross_track':np.arange(0,24),
'along_track':np.arange(radar_n.shape[2])},
coords={'lon3d': (['range','cross_track','along_track'],lon3d),
'lat3d':(['range','cross_track','along_track'],lat3d),
'time3d': (['range','cross_track','along_track'],time3d),
'alt3d':(['range','cross_track','along_track'],alt3d)})
da.fillna(value=-9999)
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'Ka-band Reflectivity'
#add to xr dataset
self.xrds['Ka'] = da
da = xr.DataArray(vel,
dims={'range':np.arange(0,550),'cross_track':np.arange(0,24),
'along_track':np.arange(radar_n.shape[2])},
coords={'lon3d': (['range','cross_track','along_track'],lon3d),
'lat3d':(['range','cross_track','along_track'],lat3d),
'time3d': (['range','cross_track','along_track'],time3d),
'alt3d':(['range','cross_track','along_track'],alt3d)})
da.fillna(value=-9999)
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'Ku-band Doppler Vel'
#add to xr dataset
self.xrds['DopKu'] = da
#
da = xr.DataArray(radar_n3,
dims={'range':np.arange(0,550),'cross_track':np.arange(0,24),
'along_track':np.arange(radar_n.shape[2])},
coords={'lon3d': (['range','cross_track','along_track'],lon3d),
'lat3d':(['range','cross_track','along_track'],lat3d),
'time3d': (['range','cross_track','along_track'],time3d),
'alt3d':(['range','cross_track','along_track'],alt3d)})
da.fillna(value=-9999)
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'W-band Reflectivity'
#add to xr dataset
self.xrds['W'] = da
#
da = xr.DataArray(radar_n4,
dims={'range':np.arange(0,550),'cross_track':np.arange(0,24),
'along_track':np.arange(radar_n.shape[2])},
coords={'lon3d': (['range','cross_track','along_track'],lon3d),
'lat3d':(['range','cross_track','along_track'],lat3d),
'time3d': (['range','cross_track','along_track'],time3d),
'alt3d':(['range','cross_track','along_track'],alt3d)})
da.fillna(value=-9999)
da.attrs['units'] = 'dB'
da.attrs['standard_name'] = 'LDR at Ku-band '
#add to xr dataset
self.xrds['LDR'] = da
#
da = xr.DataArray(roll,dims={'cross_track':np.arange(0,24),'along_track':np.arange(radar_n.shape[2])})
da.attrs['units'] = 'degrees'
da.attrs['standard_name'] = 'Left/Right Plane Roll'
#add to xr dataset
self.xrds['Roll'] = da
#
da = xr.DataArray(isurf,dims={'cross_track':np.arange(0,24),'along_track':np.arange(radar_n.shape[2])})
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'index of best guess for location of surfce'
#add to xr dataset
self.xrds['surf'] = da
#
def determine_ground_lon_lat(self,near_surf_Z=True):
mean_alt = self.xrds.alt3d.mean(axis=(1,2))
ind_sealevel = find_nearest(mean_alt,0)
g_lon = self.xrds.lon3d[ind_sealevel,:,:]
g_lat = self.xrds.lat3d[ind_sealevel,:,:]
da = xr.DataArray(g_lon,dims={'cross_track':g_lon.shape[0],'along_track':np.arange(g_lon.shape[1])})
da.attrs['units'] = 'degress longitude'
da.attrs['standard_name'] = 'Surface Longitude'
self.xrds['g_lon'] = da
da = xr.DataArray(g_lat,dims={'cross_track':g_lat.shape[0],'along_track':np.arange(g_lat.shape[1])})
da.attrs['units'] = 'degress latitude'
da.attrs['standard_name'] = 'Surface latitude'
self.xrds['g_lat'] = da
if near_surf_Z:
ind_nearsurf = find_nearest(mean_alt,1100)
g_Ku = self.xrds.Ku[ind_nearsurf,:,:]
da = xr.DataArray(g_Ku,dims={'cross_track':g_lon.shape[0],'along_track':np.arange(g_lon.shape[1])})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'Near_surf Z'
self.xrds['Ku_nearsurf'] = da
g_Ka = self.xrds.Ka[ind_nearsurf,:,:]
da = xr.DataArray(g_Ka,dims={'cross_track':g_lon.shape[0],'along_track':np.arange(g_lon.shape[1])})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'Near_surf Z'
self.xrds['Ka_nearsurf'] = da
def setboxcoords(self,nanout=True):
"""
This method sets all points outside the box to nan.
"""
if len(self.corners) > 0:
self.ll_lon = self.corners[0]
self.ur_lon = self.corners[1]
self.ll_lat = self.corners[2]
self.ur_lat = self.corners[3]
if nanout:
self.xrds = self.xrds.where((self.xrds.g_lon >= self.ll_lon) & (self.xrds.g_lon <= self.ur_lon) & (self.xrds.g_lat >= self.ll_lat) & (self.xrds.g_lat <= self.ur_lat),drop=False)
else:
print('ERROR, not boxcoods set...did you mean to do this?')
def cit_temp_to_apr(self,fl,time_inds,insidebox=True):
self.T3d = True
cit_lon = fl['longitude']['data'][time_inds]
cit_lat = fl['latitude']['data'][time_inds]
cit_alt = fl['altitude']['data'][time_inds]
cit_twc = fl['twc']['data'][time_inds]
cit_T = fl['temperature']['data'][time_inds]
if insidebox:
ind_inbox = np.where((cit_lon >= self.ll_lon) & (cit_lon <= self.ur_lon) & (cit_lat >= self.ll_lat) & (cit_lat <= self.ur_lat))
else:
ind_inbox = np.arange(0,len(fl['temperature']['data'][time_inds]))
bins = np.arange(0,6000,500)
binind = np.digitize(cit_alt[ind_inbox],bins=bins)
df = pd.DataFrame({'Temperature':cit_T[ind_inbox],'Alt':cit_alt[ind_inbox],'binind':binind})
df = df.groupby('binind').mean()
f_T = scipy.interpolate.interp1d(df.Alt.values,df.Temperature.values,fill_value='extrapolate',kind='linear')
T3d = f_T(self.xrds.alt3d.values)
da = xr.DataArray(T3d,
dims={'range':np.arange(0,550),'cross_track':np.arange(0,24),
'along_track':np.arange(self.xrds.Ku.shape[2])},
coords={'lon3d': (['range','cross_track','along_track'],self.xrds.lon3d),
'lat3d': (['range','cross_track','along_track'],self.xrds.lat3d),
'time3d': (['range','cross_track','along_track'],self.xrds.time3d),
'alt3d':(['range','cross_track','along_track'],self.xrds.alt3d)})
da.fillna(value=-9999)
da.attrs['units'] = 'degC'
da.attrs['standard_name'] = 'Temperature, inferred from Citation Spiral'
#add to xr dataset
self.xrds['T3d'] = da
#
def correct_gaseous(self,filepathtogasploutput=None,Wband=False):
""" This is a method to correct for 02 and H20 attenuation at Ku and Ka band, it requires you to run the gaspl package in matlab. If you wish to learn about this, please email me randyjc2 at illinois.edu """
if filepathtogasploutput is None:
print('Please supply filepath to gaspl output')
return
import scipy.io
import scipy.interpolate
d = scipy.io.loadmat(filepathtogasploutput)
#create interp funcs so we can plug in the apr gate structure
ka_func = scipy.interpolate.interp1d(d['alt'].ravel(),d['L'].ravel(),kind='cubic',bounds_error=False)
ku_func = scipy.interpolate.interp1d(d['alt'].ravel(),d['L2'].ravel(),kind='cubic',bounds_error=False)
if Wband:
w_func = scipy.interpolate.interp1d(d['alt'].ravel(),d['L3'].ravel(),kind='cubic',bounds_error=False)
k_ku = ku_func(self.xrds.alt3d.values)
k_ka = ka_func(self.xrds.alt3d.values)
k_ku = k_ku*0.03 #conver to db/gate
k_ka = k_ka*0.03 #conver to db/gate
k_ku[np.isnan(k_ku)] = 0
k_ka[np.isnan(k_ka)] = 0
k_ku = 2*np.cumsum(k_ku,axis=(0))
k_ka = 2*np.cumsum(k_ka,axis=(0))
ku_new = self.xrds.Ku.values + k_ku
da = xr.DataArray(ku_new,
dims={'range':np.arange(0,550),'cross_track':np.arange(0,24),
'along_track':np.arange(self.xrds.Ku.shape[2])},
coords={'lon3d': (['range','cross_track','along_track'],self.xrds.lon3d),
'lat3d': (['range','cross_track','along_track'],self.xrds.lat3d),
'time3d': (['range','cross_track','along_track'],self.xrds.time3d),
'alt3d':(['range','cross_track','along_track'],self.xrds.alt3d)})
da.fillna(value=-9999)
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'Ku-band Reflectivity'
self.xrds['Ku'] = da
ka_new = self.xrds.Ka.values + k_ka
da = xr.DataArray(ka_new,
dims={'range':np.arange(0,550),'cross_track':np.arange(0,24),
'along_track':np.arange(self.xrds.Ku.shape[2])},
coords={'lon3d': (['range','cross_track','along_track'],self.xrds.lon3d),
'lat3d': (['range','cross_track','along_track'],self.xrds.lat3d),
'time3d': (['range','cross_track','along_track'],self.xrds.time3d),
'alt3d':(['range','cross_track','along_track'],self.xrds.alt3d)})
da.fillna(value=-9999)
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'Ka-band Reflectivity'
self.xrds['Ka'] = da
if Wband:
k_w = w_func(self.xrds.alt3d.values)
k_w = k_w*0.03
k_w[np.isnan(k_w)] = 0
k_w = 2*np.cumsum(k_w,axis=(0))
w_new = self.xrds.W.values + k_w
da = xr.DataArray(w_new,
dims={'range':np.arange(0,550),'cross_track':np.arange(0,24),
'along_track':np.arange(self.xrds.Ku.shape[2])},
coords={'lon3d': (['range','cross_track','along_track'],self.xrds.lon3d),
'lat3d': (['range','cross_track','along_track'],self.xrds.lat3d),
'time3d': (['range','cross_track','along_track'],self.xrds.time3d),
'alt3d':(['range','cross_track','along_track'],self.xrds.alt3d)})
da.fillna(value=-9999)
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'W-band Reflectivity'
self.xrds['W'] = da
return
def run_retrieval(self,old=True):
from pickle import load
if old:
scaler_X = load(open('/data/gpm/a/randyjc2/DRpy/drpy/models/scaler_X.pkl', 'rb'))
scaler_y = load(open('/data/gpm/a/randyjc2/DRpy/drpy/models/scaler_y.pkl', 'rb'))
else:
scaler_X = load(open('/data/gpm/a/randyjc2/DRpy/drpy/models/scaler_X_V2.pkl', 'rb'))
scaler_y = load(open('/data/gpm/a/randyjc2/DRpy/drpy/models/scaler_y_V2.pkl', 'rb'))
#now we have to reshape things to make sure they are in the right shape for the NN model [n_samples,n_features]
Ku = self.xrds.Ku.values
shape_step1 = Ku.shape
Ku = Ku.reshape([Ku.shape[0],Ku.shape[1]*Ku.shape[2]])
shape_step2 = Ku.shape
Ku = Ku.reshape([Ku.shape[0]*Ku.shape[1]])
Ka = self.xrds.Ka.values
Ka = Ka.reshape([Ka.shape[0],Ka.shape[1]*Ka.shape[2]])
Ka = Ka.reshape([Ka.shape[0]*Ka.shape[1]])
T = self.xrds.T3d.values
T = T.reshape([T.shape[0],T.shape[1]*T.shape[2]])
T = T.reshape([T.shape[0]*T.shape[1]])
ind_masked = np.isnan(Ku)
Ku_nomask = np.zeros(Ku.shape)
Ka_nomask =
|
np.zeros(Ka.shape)
|
numpy.zeros
|
import numpy as np
import nibabel as nib
import random
def datagenerator_affine(gen,test = False):
"""
Datagenerator for the affine network (both stages)
Args:
gen: an image generator, loading the image, indicator, loss placeholders and landmarks
test: True in case we test the function
Returns:
generator to train the affine network using Keras
"""
while True:
X = next(gen)
yield ([X[0]], [X[1], X[2]])
if test == True:
return [X[0], [X[1],X[2]]]
def image_generator(vol_names, stage, M, age_atlas, data_aug, atlas_files, mode = 'giveall', test = False, batch_size=1, np_var='vol_data'):
"""
Generator that loads an image along with placeholders for the loss
Args:
vol_names: list with all files for the generator
stage: string, '1' for stage 1, '2' for stage 2
age_atlas: ordered list with all GA of the atlas images
M: number of atlases used to calculate the loss
data_aug: type of data augmentation used: [0,0] no augmentation, [1,0] only flips, [0,1] only rot90, [1,1] flip or rot is applied
atlas_files: ordered list of names of atlas files
mode: used training strategy: 'giveall': means give all M atlases to loss, give1: means give at random 1 of M atlases to loss
test: default (False) set to True to test the function
Returns:
image generator to train/validate the affine network using Keras
"""
while True:
idxes = np.random.randint(len(vol_names),size=batch_size)
for idx in idxes:
X_orig = load_volfile(vol_names[idx], np_var=np_var)
gt_file = vol_names[idx].split('.nii')[0]+'_annotation.npz'
X, Y = loader_img_anno(X_orig, gt_file, stage, data_aug)
X = X[np.newaxis, ..., np.newaxis]
XX = loader_age(gt_file, M, age_atlas, X, atlas_files, mode)
return_vals = [X,XX,Y]
yield tuple(return_vals)
if test == True:
return return_vals
def loader_age(gt_file, M, age_atlas, X, atlas_files, mode = 'giveall'):
"""
Function that loads the placeholder in the loss for image simmialirty,
contains (by design) the indicator list that tells which atlases are eligible.
Args:
gt_file: npz file containing the meta-data per image. Here we load the GA.
M: number of atlases used to calculate the loss.
age_atlas: ordered list of GA of atlas files
X: The image for which we will load the placeholder.
atlas_files: ordered list of names atlas files
mode: used training strategy: 'giveall': means give all M atlases to loss, give1: means give at random 1 of M atlases to loss
Returns:
XX: the placeholder for the loss, same shape as X, containing zeros and on
(0,indi,0,0,0) the indicator.
"""
age = int(load_volfile(gt_file,np_var='GA'))
indi = indicator(age, M, age_atlas, atlas_files)
XX = np.zeros(X.shape)
if mode == 'give1':
idx = select_index_atlas(indi)
XX[0,0,0,0,0] = idx
else:
idx = give_index_atlas(indi)
XX[0,0:M,0,0,0] = idx
return XX
def loader_img_anno(X_orig, gt_file, stage, data_aug, test=False,test_flips=False):
"""
Function that loads the image, applies data augmenation and load the annotations of the landmarks used in the datagenerator.
Args:
X_orig: Original image loaded, for this image we will load accompanying information.
gt_file: corresponding npz file with meta-data: age and landmarks
data_aug: type of data augmentation used: [0,0] no augmentation, [1,0] only flips, [0,1] only rot90, [1,1] flip or rot is applied
test: default (False) set to True for testing.
test_flips: defaul (False), set to True to test.
Returns:
X: image used for trainig
Y: (1,12) vector containing the landmarks for stage 1, containing zeros for stage 2.
"""
if stage == '1':
Y_t1 = load_volfile(gt_file, np_var='coor_t')
Y_b1 = load_volfile(gt_file, np_var='coor_b')
else:
Y_t1 = []
Y_b1 = []
if np.sum(data_aug) == 2:
if test == True:
flips = test_flips
else:
flips = random.choice([True, False])
if flips == True:
if test == True:
X, Y_t, Y_b = apply_flips(X_orig, Y_t1, Y_b1, size = 32, print_flip = False, test = True, test_lr = True)
else:
X, Y_t, Y_b = apply_flips(X_orig, Y_t1, Y_b1)
else:
if test == True:
X, Y_t, Y_b = apply_rot90(X_orig, Y_t1, Y_b1, size = 32, print_rot90 = False, test = True, r_test = 0)
else:
X, Y_t, Y_b = apply_rot90(X_orig, Y_t1, Y_b1)
elif data_aug[0] == 1:
if test == True:
X,Y_t,Y_b = apply_flips(X_orig, Y_t1, Y_b1, size = 32, print_flip = False, test = True, test_lr = True)
else:
X,Y_t,Y_b = apply_flips(X_orig, Y_t1, Y_b1)
elif data_aug[1] == 1:
if test == True:
X,Y_t,Y_b = apply_rot90(X_orig, Y_t1, Y_b1, size = 32, print_rot90 = False, test = True, r_test = 0)
else:
X,Y_t,Y_b = apply_rot90(X_orig, Y_t1, Y_b1)
else:
X = X_orig
Y_t = Y_t1
Y_b = Y_b1
Y = np.zeros([1,12])
if stage == '1':
Y[0,0:3] = Y_t
Y[0,3:6] = Y_b
return X, Y
def apply_flips(img, T, B, size = 32, print_flip = False,test = False, test_lr = False,test_ud = False,test_fb = False):
"""
Function that applies flips to img.
Args:
img: image that will be augmented
T: top landmark that will be augmented
B: bottum landmark that will be augmented
size: 0.5*shape of img
print_flips: if set to True the applied flip will be printed
test: set to True to test the code
test_lr: in case of test, control if lr flip is applied
test_ud: in case of test, control if ud flip is applied
test_fb: in case of test, control if fb flip is applied
Returns:
img_aug: image augmented
T_aug: top landmark augmented
B_aug: bottum landmark augmented
"""
if test == True:
fliplr = test_lr
else:
fliplr = random.choice([True,False])
if fliplr == True:
img = np.fliplr(img)
if T != []:
T[1] = 2*size-T[1]
B[1] = 2*size-B[1]
if test == True:
flipud = test_ud
else:
flipud = random.choice([True, False])
if flipud == True:
img = np.flipud(img)
if T != []:
T[0] = 2*size-T[0]
B[0] = 2*size-B[0]
if test == True:
flipfb = test_fb
else:
flipfb = random.choice([True,False])
if flipfb == True:
img = np.flip(img,2)
if T != []:
T[2] = 2*size-T[2]
B[2] = 2*size-B[2]
if print_flip == True:
print('fliplr: '+ str(fliplr))
print('flipud: '+ str(flipud))
print('flipfb: '+ str(flipfb))
return img, T, B
def apply_rot90(img, T, B, size = 32, print_rot90 = False, test = False, r_test = []):
"""
Function that applies 90 degree rotations to img.
Args:
img: image that will be augmented
T: top landmark that will be augmented
B: bottum landmark that will be augmented
size: 0.5*shape of img
print_rot90: if set to True the applied rotation will be printed
test: set to True to test the code
r_test: in case of test, control which rotation is applied
Returns:
img_aug: image augmented
T_aug: top landmark augmented
B_aug: bottum landmark augmented
"""
#list of all rotations we consider.
rots = [[90,[0,0,1],0,1,1],
[-90,[0,0,1],1,0,1],
[90,[1,0,0],1,2,1],
[-90,[1,0,0],2,1,1],
[90,[0,1,0],2,0,1],
[-90,[0,1,0],0,2,1],
[180,[0,0,1],0,1,2],
[180,[1,0,0],1,2,2],
[180,[0,1,0],0,2,2],
[0, [0,0,0],0,0,0]]
if test == True:
r = r_test
else:
r = random.randint(0,len(rots)-1)
if r != 9:
img = np.rot90(img,rots[r][4],axes=(rots[r][2],rots[r][3]))
if T != []:
theta = np.deg2rad(rots[r][0])
v = rots[r][1]
rotation_matrix = np.array([[v[0]*v[0]*(1-np.cos(theta))+np.cos(theta), v[1]*v[0]*(1-np.cos(theta))-v[2]*np.sin(theta), v[2]*v[0]*(1-np.cos(theta))+v[1]*np.sin(theta),0],
[v[0]*v[1]*(1-np.cos(theta))+v[2]*np.sin(theta), v[1]*v[1]*(1-np.cos(theta))+
|
np.cos(theta)
|
numpy.cos
|
import unittest
import numpy as np
from pptk.vfuncs import vfuncs
class TestAdd(unittest.TestCase):
def setUp(self):
self.A = np.float32(np.arange(9).reshape((3, 3)))
self.a = self.A[0]
self.b = np.float32(np.array([[1], [2], [3]]))
self.v = np.hstack((self.b, self.b))
self.w = np.float32(np.ndarray((0, 3)))
def test_single_scalar(self):
A = self.A
a = self.a
b = self.b
v = self.v
w = self.w
Y = [A, A, A]
z_gt = A + 1
# add integer
X = [1]
Z = vfuncs._add(X, Y)
for z in Z:
self.assertTrue(z.tolist() == z_gt.tolist())
Z = vfuncs._add(Y, X)
for z in Z:
self.assertTrue(z.tolist() == z_gt.tolist())
# add float
X = [1.0]
Z = vfuncs._add(X, Y)
for z in Z:
self.assertTrue(z.tolist() == z_gt.tolist())
Z = vfuncs._add(Y, X)
for z in Z:
self.assertTrue(z.tolist() == z_gt.tolist())
# add boolean
X = [True]
Z = vfuncs._add(X, Y)
for z in Z:
self.assertTrue(z.tolist() == z_gt.tolist())
Z = vfuncs._add(Y, X)
for z in Z:
self.assertTrue(z.tolist() == z_gt.tolist())
# numpy.float32
M = np.float32(np.array([1.0]))
X = [M[0]]
Z = vfuncs._add(X, Y)
for z in Z:
self.assertTrue(z.tolist() == z_gt.tolist())
Z = vfuncs._add(Y, X)
for z in Z:
self.assertTrue(z.tolist() == z_gt.tolist())
def test_input_checking(self):
A = self.A
a = self.a
b = self.b
v = self.v
w = self.w
X = [A, A]
Y = [A, A, A]
with self.assertRaises(ValueError):
vfuncs._add(X, Y)
with self.assertRaises(TypeError):
vfuncs._add(1, 2, 3)
with self.assertRaises(TypeError):
vfuncs._add(1, X)
with self.assertRaises(ValueError):
vfuncs._add([], X)
def test_broadcasting(self):
A = self.A
a = self.a
b = self.b
v = self.v
w = self.w
# broadcast rows
X = [A, A, A]
Y = [a, a, a]
Z = vfuncs._add(X, Y)
for z in Z:
self.assertTrue(z.tolist() == (A + a).tolist())
Z = vfuncs._add(Y, X)
for z in Z:
self.assertTrue(z.tolist() == (a + A).tolist())
# broadcast columns
X = [A, A, A]
Y = [b, b, b]
Z = vfuncs._add(X, Y)
for z in Z:
self.assertTrue(z.tolist() == (A + b).tolist())
Z = vfuncs._add(Y, X)
for z in Z:
self.assertTrue(z.tolist() == (b + A).tolist())
# outer sum
X = [a, a, a]
Y = [b, b, b]
z_gt = a + b
Z = vfuncs._add(X, Y)
for z in Z:
self.assertTrue(z.tolist() == z_gt.tolist())
# shapes incompatible for broadcasting
X = [A, A, A]
Y = [v, v, v]
with self.assertRaises(ValueError):
vfuncs._add(X, Y)
with self.assertRaises(ValueError):
vfuncs._add(Y, X)
# adding zero-row matrix to one-row matrix succeeds
X = [a, a, a]
Y = [w, w, w]
Z = vfuncs._add(X, Y)
for z in Z:
self.assertTrue(z.tolist() == [] and z.shape == (0, 3))
Z = vfuncs._add(Y, X)
for z in Z:
self.assertTrue(z.tolist() == [] and z.shape == (0, 3))
# adding zero-row matrix to anything else should fail
X = [A, A, A]
Y = [w, w, w]
with self.assertRaises(ValueError):
vfuncs._add(X, Y)
with self.assertRaises(ValueError):
vfuncs._add(Y, X)
def test_array_checking(self):
A = self.A
a = self.a
b = self.b
v = self.v
w = self.w
V = np.float32(np.random.rand(3, 3, 3))
# bad array occurs as single item in list
X = [V]
Y = [a, a, a]
with self.assertRaises(ValueError):
vfuncs._add(X, Y)
with self.assertRaises(ValueError):
vfuncs._add(Y, X)
# bad array occurs at second position in list
X = [A, V, A]
Y = [a, a, a]
with self.assertRaises(ValueError):
vfuncs._add(X, Y)
with self.assertRaises(ValueError):
vfuncs._add(Y, X)
class TestIndexing(unittest.TestCase):
def setUp(self):
np.random.seed(0)
self.A = np.matrix(np.float32(np.random.rand(10, 11)))
self.J = [2, 5]
self.S = slice(None, None, None)
self.B = np.array([
True,
False,
False,
True,
False,
False,
False,
True,
False,
False])
def test_check_inputs(self):
A = self.A
J = self.J
S = self.S
X = [A, A, A]
# simultaneous indexing of rows and columns disallowed
with self.assertRaises(TypeError):
vfuncs._idx(X, ([J], [J]))
# indexing just rows should not give error
Y = vfuncs._idx(X, ([J], S))
for y in Y:
self.assertTrue(y.tolist() == A[J].tolist())
# indexing just columns should not give error
Y = vfuncs._idx(X, (S, [J]))
for y in Y:
self.assertTrue(y.tolist() == A[:, J].tolist())
# indexing X=[A, A, A] with [J, J] should give error
with self.assertRaises(ValueError):
vfuncs._idx(X, ([J, J], S))
# index [A] with [J]
Y = vfuncs._idx([A], ([J], S))
for y in Y:
self.assertTrue(y.tolist() == A[J, :].tolist())
Y = vfuncs._idx([A], (S, [J]))
for y in Y:
self.assertTrue(y.tolist() == A[:, J].tolist())
# index [A] with [J, J, J]
Y = vfuncs._idx([A], ([J, J, J], S))
for y in Y:
self.assertTrue(y.tolist() == A[J, :].tolist())
Y = vfuncs._idx([A], (S, [J, J, J]))
for y in Y:
self.assertTrue(y.tolist() == A[:, J].tolist())
# index [A, A, A] with [J]
Y = vfuncs._idx([A, A, A], ([J], S))
for y in Y:
self.assertTrue(y.tolist() == A[J, :].tolist())
Y = vfuncs._idx([A, A, A], (S, [J]))
for y in Y:
self.assertTrue(y.tolist() == A[:, J].tolist())
# index [A, A, A] with [J, J, J]
Y = vfuncs._idx([A, A, A], ([J, J, J], S))
for y in Y:
self.assertTrue(y.tolist() == A[J, :].tolist())
Y = vfuncs._idx([A, A, A], (S, [J, J, J]))
for y in Y:
self.assertTrue(y.tolist() == A[:, J].tolist())
# indexing [] with [J] should give error
with self.assertRaises(ValueError):
vfuncs._idx([], ([J], S))
with self.assertRaises(ValueError):
vfuncs._idx([], (S, [J]))
# indexing [] with [J, J] should give error
with self.assertRaises(ValueError):
vfuncs._idx([], ([J, J], S))
with self.assertRaises(ValueError):
vfuncs._idx([], (S, [J, J]))
# indexing [A] with [] should give error
with self.assertRaises(ValueError):
vfuncs._idx([A], ([], S))
with self.assertRaises(ValueError):
vfuncs._idx([A], (S, []))
# indexing [A, A] with [] should give error
with self.assertRaises(ValueError):
vfuncs._idx([A, A], ([], S))
with self.assertRaises(ValueError):
vfuncs._idx([A, A], (S, []))
def test_slicing(self):
A = self.A
J = self.J
S = self.S
X = [A, A, A]
# out of bound slicing results in empty array
S1 = slice(12, None, None)
Y = vfuncs._idx(X, (S1, S))
for y in Y:
self.assertTrue(
y.shape == A[S1].shape and
y.tolist() == A[S1].tolist())
Y = vfuncs._idx(X, (S, S1))
for y in Y:
self.assertTrue(
y.shape == A[:, S1].shape and
y.tolist() == A[:, S1].tolist())
# slices with negative indices
S2 = slice(-2, None, None)
Y = vfuncs._idx(X, (S2, S))
for y in Y:
self.assertTrue(y.tolist() == A[S2].tolist())
Y = vfuncs._idx(X, (S, S2))
for y in Y:
self.assertTrue(y.tolist() == A[:, S2].tolist())
# simultaneous row and column slicing
Y = vfuncs._idx(X, (slice(None, None, 2), slice(None, None, 3)))
for y in Y:
self.assertTrue(y.tolist() == A[::2, ::3].tolist())
def test_scalar(self):
# indexing with scalar
A = self.A
X = [A, A, A]
S = slice(None, None, None)
# index rows (single scalar index for all list items)
Y = vfuncs._idx(X, ([0], S))
for y in Y:
self.assertTrue(y.tolist() == A[0].tolist())
# index rows (separate scalar index per list items)
Y = vfuncs._idx(X, ([0, 0, 0], S))
for y in Y:
self.assertTrue(y.tolist() == A[0].tolist())
# index columns (single scalar index for all list items)
Y = vfuncs._idx(X, (S, [0]))
for y in Y:
self.assertTrue(y.tolist() == A[:, 0].tolist())
# index columns (separate scalar index per list items)
Y = vfuncs._idx(X, (S, [0, 0, 0]))
for y in Y:
self.assertTrue(y.tolist() == A[:, 0].tolist())
def test_pylist(self):
# indexing with python list of indices
A = self.A
S = self.S
X = [A, A, A]
# indexing with list containing non-int()-able item
# should raise exception
class Foo:
pass
J = [1, Foo(), 2]
with self.assertRaises(TypeError):
vfuncs._idx(X, ([J], S))
def test_boolean(self):
# indexing with array of booleans
A = self.A
S = self.S
B = self.B
B2 = np.r_[B, False]
X = [A, A, A]
Y = vfuncs._idx(X, ([B], S))
for y in Y:
self.assertTrue(y.tolist() == A[B].tolist())
Y = vfuncs._idx(X, (S, [B2]))
for y in Y:
self.assertTrue(y.tolist() == A[:, B2].tolist())
Y = vfuncs._idx(X, ([B, B, B], S))
for y in Y:
self.assertTrue(y.tolist() == A[B, :].tolist())
Y = vfuncs._idx(X, (S, [B2, B2, B2]))
for y in Y:
self.assertTrue(y.tolist() == A[:, B2].tolist())
# length of boolean array allowed to exceed that of "indexee"
# as long as exceeding entries are all False
Y = vfuncs._idx(X, ([B[:3]], S))
for y in Y:
self.assertTrue(y.tolist() == A[:3, :][B[:3], :].tolist())
B2 = np.hstack((B, np.array([False, False, False])))
Y = vfuncs._idx(X, ([B2], S))
for y in Y:
self.assertTrue(y.tolist() == A[B, :].tolist())
def test_intarray(self):
A = self.A
S = self.S
X = [A, A, A]
# indexing with array of integers
# negative indices
# out of bound indices should raise exception
J = np.array([42])
with self.assertRaises(IndexError):
vfuncs._idx(X, ([J], S))
with self.assertRaises(IndexError):
vfuncs._idx(X, (S, [J]))
J = np.array([-13])
with self.assertRaises(IndexError):
vfuncs._idx(X, ([J], S))
with self.assertRaises(IndexError):
vfuncs._idx(X, (S, [J]))
def test_mixed(self):
A = self.A
S = self.S
J = [
self.B,
[1, 4, 5],
[True, 2.0, False, 6],
np.array([2, 3, 9], dtype=np.int32),
np.array([-3, 9])
]
J_1 = [
self.B,
[1, 4, 5],
[1, 2, 0, 6],
np.array([2, 3, 9], dtype=np.int32),
np.array([-3, 9])
]
J_2 = [
np.r_[self.B, False],
[1, 4, 5],
[1, 2, 0, 6],
np.array([2, 3, 9], dtype=np.int32),
np.array([-3, 9])
]
X = [A, A, A, A, A]
Y = vfuncs._idx(X, (J, S))
for i, y in enumerate(Y):
self.assertTrue(y.tolist() == A[J_1[i]].tolist())
Y = vfuncs._idx(X, (S, J))
for i, y in enumerate(Y):
self.assertTrue(y.tolist() == A[:, J_2[i]].tolist())
class TestDot(unittest.TestCase):
def setUp(self):
np.random.seed(0)
self.A1 = np.float32(np.random.rand(2, 5))
self.A2 = np.float32(np.random.rand(1, 5))
self.B1 = np.float32(np.random.rand(5, 3))
self.B2 = np.float32(np.random.rand(5, 2))
def test_simple(self):
A1 = self.A1
A2 = self.A2
B1 = self.B1
B2 = self.B2
# [A1]*[B1]
Y = vfuncs._dot([A1], [B1])
self.assertTrue(len(Y) == 1)
self.assertTrue(np.allclose(Y[0], np.dot(A1, B1)))
# [A1]*[B1,B2]
Y = vfuncs._dot([A1], [B1, B2])
self.assertTrue(len(Y) == 2)
self.assertTrue(np.allclose(Y[0], np.dot(A1, B1)))
self.assertTrue(np.allclose(Y[1], np.dot(A1, B2)))
# [A1,A2]*[B1]
Y = vfuncs._dot([A1, A2], [B1])
self.assertTrue(len(Y) == 2)
self.assertTrue(np.allclose(Y[0], np.dot(A1, B1)))
self.assertTrue(np.allclose(Y[1], np.dot(A2, B1)))
# [A1,A2]*[B1,B2]
Y = vfuncs._dot([A1, A2], [B1, B2])
self.assertTrue(len(Y) == 2)
self.assertTrue(np.allclose(Y[0], np.dot(A1, B1)))
self.assertTrue(np.allclose(Y[1], np.dot(A2, B2)))
# incompatible shapes should raise value error
with self.assertRaises(ValueError):
vfuncs._dot([A1], [A2])
def test_multiply_by_scalar(self):
A = self.A1
# [A,A,A]*[.5]
Y = vfuncs._dot([A, A, A], [.5])
for y in Y:
self.assertTrue(y.tolist() == (A*.5).tolist())
# [.2]*[.5]
Y = vfuncs._dot([.2], [.5])
self.assertTrue(Y[0].tolist() == [[np.float32(.2) * np.float32(.5)]])
class TestTranspose(unittest.TestCase):
def setUp(self):
np.random.seed(0)
self.A = np.float32(np.random.rand(3, 3))
self.B = np.float32(
|
np.random.rand(3, 4)
|
numpy.random.rand
|
import os
import numpy as np
import torch as th
from torchvision import utils
from utils.helper_functions import *
import utils.visualization as visualization
import utils.filename_templates as TEMPLATES
import utils.helper_functions as helper
import utils.logger as logger
from utils import image_utils
from pathlib import Path
from torchvision import transforms
MAX_FLOW = 400
def sequence_loss(flow_pred, flow_gt, valid, gamma=0.8, max_flow=MAX_FLOW):
# flow_gt.shape torch.Size([4, 480, 640, 2]) flow_pred list,里面12个元素 每个:torch.Size([4, 2, 480, 640])
# 则先对flow_gt的shape进行转换
flow_gt = flow_gt.permute(0, 2, 3, 1)
# 去掉无效像素点和特别大的光流
mag = torch.sum(flow_gt**2, dim=1).sqrt()
# mag.shape torch.Size([4, 480, 640])
mask0 = valid >= 0.5
mask1 = mag < max_flow # mask: torch.Size([4, 480, 640])
valid = mask0 & mask1
epe = torch.sum((flow_pred - flow_gt)**2, dim=1).sqrt() # 逐像素误差
epe = epe.view(-1)[valid.view(-1)]
metrics = {
'epe': epe.mean().item(),
'1px': (epe < 1).float().mean().item(),
'3px': (epe < 3).float().mean().item(),
'5px': (epe < 5).float().mean().item(),
}
return metrics
def flow_error_dense(flow_gt, flow_pred, valid, name):
# flow_gt.shape torch.Size([4, 480, 640, 2])
# flow_pred list,里面12个元素 每个:torch.Size([4, 2, 480, 640]) => [4, 480, 640, 2]
if name == 'mvsec_45Hz':
flow_gt = flow_gt.permute(0, 2, 3, 1)
flow_pred= flow_pred.permute(0, 2, 3, 1)
mask = (valid > 0)
gt_masked = flow_gt[mask, :]
pre_masked = flow_pred[mask, :]
ee = np.linalg.norm(gt_masked.cpu() - pre_masked.cpu(), axis=-1)
n_points = ee.shape[0]
aee = np.mean(ee)
PE1 = np.mean(ee < 1)
PE3 = np.mean(ee < 3)
thresh = 3.
num = float((ee < thresh).sum())
precent_AEE = float((ee < thresh).sum()) / float(ee.shape[0] + 1e-5)
outlier = 1. - precent_AEE
metrics = {
'AEE': aee,
'outlier': outlier,
'n_points': n_points,
'PE1': PE1,
'PE3': PE3
}
return metrics
class Logger:
def __init__(self, path):
self.total_steps = 0
self.running_loss = {}
self.writer = None
self.train_epe_list = []
self.train_steps_list = []
self.txt_file =Path(path) / 'metrics_Attention_SELayer.txt'
def _print_training_status(self):
metrics_data = [np.mean(self.running_loss[k]) for k in sorted(self.running_loss.keys())]
metrics_str = ("{:^10.4f}, " * len(metrics_data)).format(*metrics_data)
# print the training status
# print('epe|\t 1px|\t 2px|\t 3px|\t 5px')
print('{:^10}{:^10}{:^10}{:^10}{:^10}'.format('AEE', '1PE', '3PE', 'n_points', 'outlier', ))
print(metrics_str)
# logging running loss to total loss
for k in self.running_loss:
self.running_loss[k] = []
with self.txt_file.open(mode='a+') as file:
file.write(metrics_str)
file.write('\n')
self.train_epe_list.append(
|
np.mean(self.running_loss['AEE'])
|
numpy.mean
|
import os
import time
import numpy as np
import pandas as pd
from PIL import Image, ImageDraw
from scipy import interpolate
from skimage import filters
from skimage.morphology import disk, binary_dilation
import multiprocessing
import scipy.interpolate as spint
import scipy.spatial.qhull as qhull
import itertools
# ================= Three main functions ================
def pano2fisheye(pil_img, model='equal_width', inter_method='linear', log='off', multi='off'):
if log == 'on':
print('|- Start conversing...')
img_df, shape = img2dataframe(pil_img)
if log == 'on':
print('|- Image DataFrame initialized')
img_conv, fisheye_shape = coordinate_converse(img_df, shape, model=model)
if log == 'on':
print('|- New pixel positions calculated')
img_cali = unique_points(img_conv)
if log == 'on':
print('|- Duplicated pixels removed')
if log == 'on':
print('|- Start fixing holes')
red, green, blue = interpolate_img(img_cali, fisheye_shape, method=inter_method, multi=multi, log=log)
fisheye_img = merge_layers(red, green, blue)
if log == 'on':
print('|- Image combined')
return fisheye_img
def fisheye_correction(img, center_radius, model='equidistant', vision_angle=50, inter_method='linear', multi='off', log='off'):
x, y, r = center_radius
if log == 'on':
print('- Start conversing...')
img_df, shape = img2dataframe(img, center_radius)
if log == 'on':
print('|- Image DataFrame initialized')
img_df = calculate_radius_calibrated(img_df, r, model=model)
if log == 'on':
print('|- New radius calculated')
img_df, r_new = zenith_filter(img_df, r, vision_angle)
if log == 'on':
print('|- Vision angle filtered')
img_df = calculate_new_xy(img_df, r, r_new)
if log == 'on':
print('|- New pixel positions calculated')
df_cali = unique_points(img_df)
if log == 'on':
print('|- Duplicated pixels removed')
print('|- Start fixing holes')
length = int(np.ceil(r_new * 2))
red, green, blue = interpolate_img(df_cali, (length, length), method=inter_method, multi=multi)
corrected_img = merge_layers(red, green, blue)
if log == 'on':
print('|- Image combined')
return corrected_img
def pano2fisheye_correction(pil_img, trans_model='equal_width', cali_model='equisolid', vision_angle=50,
inter_method='linear', multi='off', log='off'):
if log == 'on':
print('- Start conversing...')
img_df, shape = img2dataframe(pil_img)
del pil_img
if log == 'on':
print('|- Image DataFrame initialized')
img_conv, fisheye_shape = coordinate_converse(img_df, shape, model=trans_model)
if log == 'on':
print('|- New pixel positions calculated')
img_cali = unique_points(img_conv)
if log == 'on':
print('|- Duplicated pixels removed')
del img_conv
r = int(fisheye_shape[0] / 2)
img_df = calculate_radius_calibrated(img_cali, r, model=cali_model)
del img_cali
if log == 'on':
print('|- New radius calculated')
img_df, r_new = zenith_filter(img_df, r, vision_angle)
if log == 'on':
print('|- Vision angle filtered')
img_df = calculate_new_xy(img_df, r, r_new)
if log == 'on':
print('|- New pixel positions calculated')
df_cali = unique_points(img_df)
if log == 'on':
print('|- Start fixing holes')
length = int(np.ceil(r_new * 2))
red, green, blue = interpolate_img(df_cali, (length, length), method=inter_method, multi=multi)
del df_cali
fisheye_cali_img = merge_layers(red, green, blue)
if log == 'on':
print('|- Image combined')
return fisheye_cali_img
# =============== The modules that used for previews three main functions ======================
def img2dataframe(img, center_radius=None):
'''
if img is a fisheye image, (x, y, r) is required to crop fisheye edge.
'''
if type(center_radius) == tuple and len(center_radius) == 3:
# fisheye image
x, y, radius = center_radius
img_crop = img.crop((x - radius - 1, y - radius - 1, x + radius - 1, y + radius - 1))
mask = Image.new('RGBA', (2 * radius, 2 * radius))
draw = ImageDraw.Draw(mask)
draw.ellipse((0, 0, 2 * radius, 2 * radius), fill='blue', outline='blue')
# overlay mask on image
img_out = Image.new('RGBA', (2 * radius, 2 * radius))
img_out.paste(img_crop, (0, 0), mask)
else:
# panorama image need crop half
width, height = img.size
img_out = img.crop((0, 0, width, int(height / 2)))
'''PIL.img.size = (w, h)'''
w, h = img_out.size
np_img = np.asarray(img_out)
'''
numpy.ndarray:
┌─→ y
│ np.asarray(img).shape = (h, w, d)
x↓
'''
x_grid, y_grid = np.mgrid[0:h, 0:w]
red = np_img[:, :, 0]
green = np_img[:, :, 1]
blue = np_img[:, :, 2]
if np_img.shape[2] == 4:
alpha = np.asarray(img_out)[:, :, 3]
img_df = pd.DataFrame({'x': x_grid.flatten(), 'y': y_grid.flatten(),
'red': red.flatten(), 'green': green.flatten(), 'blue': blue.flatten(),
'alpha': alpha.flatten()},
columns=['x', 'y', 'red', 'green', 'blue', 'alpha'])
# remove alpha layer
img_df = img_df.loc[img_df['alpha'] == 255]
img_df = img_df.drop(['alpha'], axis=1)
else:
img_df = pd.DataFrame({'x': x_grid.flatten(), 'y': y_grid.flatten(),
'red': red.flatten(), 'green': green.flatten(), 'blue': blue.flatten()},
columns=['x', 'y', 'red', 'green', 'blue'])
np_shape = (h, w)
return img_df, np_shape
def unique_points(img_df):
'''
remove duplicate points which overlay one the same pixel
x: conversed x (float)
y: conversed y (float)
shape: (height, width)
return x, y , v_new
'''
x_int = img_df['x_cali'].astype(int)
y_int = img_df['y_cali'].astype(int)
df_cali = pd.DataFrame(
{'x': x_int, 'y': y_int, 'red': img_df['red'], 'green': img_df['green'], 'blue': img_df['blue']})
df_cali = df_cali.groupby(['x', 'y'], as_index=False).mean()
return df_cali
def interpolate_img(img_df, shape, method='none', log='off', multi='off'):
'''
shape: (height, width)
method: using griddata.linear to interpolate, this is very slow and time consuming
return 2D.ndarray of that layer
'''
xi, yi = np.mgrid[0:shape[0], 0:shape[1]]
if method == 'none':
red = np.zeros(shape)
green = np.zeros(shape)
blue = np.zeros(shape)
red[img_df['x'], img_df['y']] = img_df['red']
green[img_df['x'], img_df['y']] = img_df['green']
blue[img_df['x'], img_df['y']] = img_df['blue']
if log == 'on': print('| |- no interpolate method applied')
return red, green, blue
if method == 'linear':
if multi == 'on':
key_words = ['red', 'green', 'blue']
multiprocessing.freeze_support()
pool = multiprocessing.Pool(processes=3)
pool_list = {}
for channel in key_words:
pool_list[channel] = pool.apply_async(interpolate.griddata,
args=((img_df['x'], img_df['y']), img_df[channel], (xi, yi)))
result_list = {key:value.get() for key, value in pool_list.items()}
pool.close()
pool.join()
return result_list['red'], result_list['green'], result_list['blue']
else:
mask_hole = np.zeros(shape)
mask_hole[img_df['x'], img_df['y']] = 1
mask_circ = createCircularMask(*shape)
mask = (mask_hole==0) * mask_circ
if log == 'on': print('| |- mask generated')
red = np.zeros(shape).astype(np.uint16)
green = np.zeros(shape).astype(np.uint16)
blue = np.zeros(shape).astype(np.uint16)
red[img_df['x'], img_df['y']] = img_df['red']
green[img_df['x'], img_df['y']] = img_df['green']
blue[img_df['x'], img_df['y']] = img_df['blue']
red_filter = filters.rank.mean(red, disk(1),mask=mask_hole==1)
green_filter = filters.rank.mean(green, disk(1),mask=mask_hole==1)
blue_filter = filters.rank.mean(blue, disk(1),mask=mask_hole==1)
if log == 'on': print('| |- filter generated')
#mask_deli = binary_dilation(mask)
red[mask] = red_filter[mask]
green[mask] = green_filter[mask]
blue[mask] = blue_filter[mask]
'''
t = time.time()
red = interpolate.griddata((img_df['x'], img_df['y']), img_df['red'], (xi, yi))
if log == 'on':
print(f'| |- [red] channel finished, {time.time() - t}s spend')
tr = time.time()
green = interpolate.griddata((img_df['x'], img_df['y']), img_df['green'], (xi, yi))
if log == 'on':
print(f'| |- [green] channel finished, {time.time() - t}s spend')
tr = time.time()
blue = interpolate.griddata((img_df['x'], img_df['y']), img_df['blue'], (xi, yi))
if log == 'on':
print(f'| |- [blue] channel finished, {time.time() - t}s spend')
'''
return red, green, blue
def merge_layers(r, g, b):
img = np.zeros((r.shape[0], r.shape[1], 3))
img[:, :, 0] = r
img[:, :, 1] = g
img[:, :, 2] = b
img[np.isnan(img)] = 0
img = img.astype('uint8')
pil_img = Image.fromarray(img)
return pil_img
def coordinate_converse(img_df, panorama_size, model='equal_width'):
'''
panorama size = (h, w)
h: panorama image height
w: panorama image width
model
'equal_width': assume height of one pixel in panorama = the distance to circle center in fisheye
'projection': assume height of panorama = height of hemisphere of fisheye ghostball,
which the radius of ghostball (r), height of pixel in panorama (h),
distance to circle center of fisheye for that pixel(d) satisfy:
r^2 = h^2 + d^2
'''
h, w = panorama_size
r = h
fisheye_size = (2 * r, 2 * r)
theta = (img_df['y'] + 0.5) / w * 360 # +0.5 is to mark pixel center
sin_theta = np.sin(np.deg2rad(theta))
cos_theta = np.cos(np.deg2rad(theta))
if model == 'equal_width':
img_df['x_cali'] = r + sin_theta * img_df['x']
img_df['y_cali'] = r + cos_theta * img_df['x']
if model == 'projection':
d = np.sqrt(abs(2 * (img_df['x'] + 0.5) * r - (img_df['x'] + 0.5) ** 2))
img_df['x_cali'] = r + sin_theta * d
img_df['y_cali'] = r + cos_theta * d
return img_df, fisheye_size
def calculate_radius_calibrated(img_df, img_radius, model='equidistant', log='off'):
'''
img_radius: half of fisheye width or height
models:
'equidistant'
'equidolid'
'orthographic'
'stereographic'
'''
if model == 'equidistant':
f = img_radius * 2 / np.pi # the radius of fake sphere
d = np.sqrt((img_df['x'] - img_radius) ** 2 + (img_df['y'] - img_radius) ** 2)
img_df['r_cali'] = f * np.tan(d / f)
elif model == 'equisolid':
f = img_radius / np.sqrt(2)
d = np.sqrt((img_df['x'] - img_radius) ** 2 + (img_df['y'] - img_radius) ** 2)
img_df['r_cali'] = f * np.tan(2 *
|
np.arcsin(d / (2 * f))
|
numpy.arcsin
|
import numpy as np
from scipy.special import erfinv
def log_normal(x, mu, sig):
"""Lognormal probability distribution function normalized for representation in finite intervals.
Args:
x (np.array): variable of the probability distribution.
mu (real): mean of the lognormal distribution.
sig (real): sigma of the lognormal distribution.
Returns:
f (np.array): normalized probability distribution for the intervals of x.
"""
dx = x[1]-x[0]
log_norm = 1 / (x * sig * np.sqrt(2 * np.pi)) * np.exp(- np.power(np.log(x) - mu, 2.) / (2 * np.power(sig, 2.)))
f = log_norm*dx/(np.sum(log_norm * dx))
return f
def classical_payoff(S0, sig, r, T, K, samples=1000000):
"""Function computing the payoff classically given some data.
Args:
S0 (real): initial asset price.
sig (real): market volatility.
r (real): market rate.
T (real): maturity time.
K (real): strike price.
samples (int): total precision of the classical calculation.
Returns:
cl_payoff (real): classically computed payoff.
"""
mu = (r - 0.5 * sig ** 2) * T + np.log(S0) # Define all the parameters to be used in the computation
mean = np.exp(mu + 0.5 * T * sig ** 2) # Set the relevant zone of study and create the mapping between qubit and option price, and
# generate the target lognormal distribution within the interval
variance = (np.exp(T * sig ** 2) - 1) * np.exp(2 * mu + T * sig ** 2)
Sp = np.linspace(max(mean - 3 * np.sqrt(variance), 0), mean + 3 * np.sqrt(variance), samples)
lnp = log_normal(Sp, mu, sig * np.sqrt(T))
cl_payoff = 0
for i in range(len(Sp)):
if K < Sp[i]:
cl_payoff += lnp[i] * (Sp[i] - K)
return cl_payoff
def get_theta(m_s, ones_s, zeroes_s, alpha=0.05):
"""Function to extract measurement values of a in an iterative AE algorithm.
Args:
m_s (list): set of m to be used.
ones_s (list): Number of outcomes with 1, as many as m_s.
zeroes_s (real): Number of outcomes with 0, as many as m_s.
alpha (real): Confidence level.
Returns:
theta_s (list): results for the angle estimation
err_theta_s (list): errors on the angle estimation
"""
z = erfinv(1 - alpha / 2)
ones_s = np.array(ones_s)
zeroes_s = np.array(zeroes_s)
valid_s = ones_s + zeroes_s
a_s = ones_s / valid_s
theta_s = np.zeros(len(m_s))
err_theta_s = np.zeros(len(m_s))
if m_s[0] == 0:
theta_s[0] = np.arcsin(np.sqrt(a_s[0]))
err_theta_s[0] = z / 2 / np.sqrt(valid_s[0])
else:
raise ValueError('AE does not start with m=0')
for j, m in enumerate(m_s[1:]):
aux_theta = np.arcsin(
|
np.sqrt(a_s[j + 1])
|
numpy.sqrt
|
from spikeextractors import SortingExtractor, RecordingExtractor
from spikeextractors.extractors.bindatrecordingextractor import BinDatRecordingExtractor
from spikeextractors.extraction_tools import read_python, write_python
import numpy as np
from pathlib import Path
import csv
class PhyRecordingExtractor(BinDatRecordingExtractor):
extractor_name = 'PhyRecordingExtractor'
has_default_locations = True
installed = True # check at class level if installed or not
is_writable = False
mode = 'folder'
extractor_gui_params = [
{'name': 'folder_path', 'type': 'folder', 'title': "Path to folder"},
]
installation_mesg = "" # error message when not installed
def __init__(self, folder_path):
RecordingExtractor.__init__(self)
phy_folder = Path(folder_path)
self.params = read_python(str(phy_folder / 'params.py'))
datfile = [x for x in phy_folder.iterdir() if x.suffix == '.dat' or x.suffix == '.bin']
if (phy_folder / 'channel_map_si.npy').is_file():
channel_map = list(np.squeeze(np.load(phy_folder / 'channel_map_si.npy')))
assert len(channel_map) == self.params['n_channels_dat']
elif (phy_folder / 'channel_map.npy').is_file():
channel_map = list(np.squeeze(np.load(phy_folder / 'channel_map.npy')))
assert len(channel_map) == self.params['n_channels_dat']
else:
channel_map = list(range(self.params['n_channels_dat']))
BinDatRecordingExtractor.__init__(self, datfile[0], sampling_frequency=float(self.params['sample_rate']),
dtype=self.params['dtype'], numchan=self.params['n_channels_dat'],
recording_channels=list(channel_map))
if (phy_folder / 'channel_groups.npy').is_file():
channel_groups = np.load(phy_folder / 'channel_groups.npy')
assert len(channel_groups) == self.get_num_channels()
for (ch, cg) in zip(self.get_channel_ids(), channel_groups):
self.set_channel_property(ch, 'group', cg)
if (phy_folder / 'channel_positions.npy').is_file():
channel_locations = np.load(phy_folder / 'channel_positions.npy')
assert len(channel_locations) == self.get_num_channels()
for (ch, loc) in zip(self.get_channel_ids(), channel_locations):
self.set_channel_property(ch, 'location', loc)
class PhySortingExtractor(SortingExtractor):
extractor_name = 'PhySortingExtractor'
exporter_name = 'PhySortingExporter'
exporter_gui_params = [
{'name': 'save_path', 'type': 'folder', 'title': "Save path"},
]
installed = True # check at class level if installed or not
is_writable = True
mode = 'folder'
installation_mesg = "" # error message when not installed
def __init__(self, folder_path, exclude_cluster_groups=None, load_waveforms=False, verbose=False):
SortingExtractor.__init__(self)
phy_folder = Path(folder_path)
spike_times = np.load(phy_folder / 'spike_times.npy')
spike_templates = np.load(phy_folder / 'spike_templates.npy')
if (phy_folder /'spike_clusters.npy').is_file():
spike_clusters = np.load(phy_folder / 'spike_clusters.npy')
else:
spike_clusters = spike_templates
if (phy_folder / 'amplitudes.npy').is_file():
amplitudes = np.load(phy_folder / 'amplitudes.npy')
else:
amplitudes = np.ones(len(spike_times))
if (phy_folder /'pc_features.npy').is_file():
pc_features = np.load(phy_folder / 'pc_features.npy')
else:
pc_features = None
clust_id = np.unique(spike_clusters)
self._unit_ids = list(clust_id)
spike_times.astype(int)
self.params = read_python(str(phy_folder / 'params.py'))
self._sampling_frequency = self.params['sample_rate']
# set unit quality properties
csv_tsv_files = [x for x in phy_folder.iterdir() if x.suffix == '.csv' or x.suffix == '.tsv']
for f in csv_tsv_files:
if f.suffix == '.csv':
with f.open() as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count == 0:
tokens = row[0].split("\t")
property = tokens[1]
else:
tokens = row[0].split("\t")
if int(tokens[0]) in self.get_unit_ids():
if 'cluster_group' in str(f):
self.set_unit_property(int(tokens[0]), 'quality', tokens[1])
elif property == 'chan_grp':
self.set_unit_property(int(tokens[0]), 'group', tokens[1])
else:
if isinstance(tokens[1], (int, np.int, float, np.float, str)):
self.set_unit_property(int(tokens[0]), property, tokens[1])
line_count += 1
elif f.suffix == '.tsv':
with f.open() as csv_file:
csv_reader = csv.reader(csv_file, delimiter='\t')
line_count = 0
for row in csv_reader:
if line_count == 0:
property = row[1]
else:
if int(row[0]) in self.get_unit_ids():
if 'cluster_group' in str(f):
self.set_unit_property(int(row[0]), 'quality', row[1])
elif property == 'chan_grp':
self.set_unit_property(int(row[0]), 'group', row[1])
else:
if isinstance(row[1], (int, np.int, float, np.float, str)):
self.set_unit_property(int(row[0]), property, row[1])
line_count += 1
for unit in self.get_unit_ids():
if 'quality' not in self.get_unit_property_names(unit):
self.set_unit_property(unit, 'quality', 'unsorted')
if exclude_cluster_groups is not None:
if len(exclude_cluster_groups) > 0:
included_units = []
for u in self.get_unit_ids():
if self.get_unit_property(u, 'quality') not in exclude_cluster_groups:
included_units.append(u)
else:
included_units = self._unit_ids
else:
included_units = self._unit_ids
original_units = self._unit_ids
self._unit_ids = included_units
# set features
self._spiketrains = []
for clust in self._unit_ids:
idx = np.where(spike_clusters == clust)[0]
self._spiketrains.append(spike_times[idx])
self.set_unit_spike_features(clust, 'amplitudes', amplitudes[idx])
if pc_features is not None:
self.set_unit_spike_features(clust, 'pc_features', pc_features[idx])
if load_waveforms:
datfile = [x for x in phy_folder.iterdir() if x.suffix == '.dat' or x.suffix == '.bin']
recording = BinDatRecordingExtractor(datfile[0], sampling_frequency=float(self.params['sample_rate']),
dtype=self.params['dtype'], numchan=self.params['n_channels_dat'])
# if channel groups are present, compute waveforms by group
if (phy_folder / 'channel_groups.npy').is_file():
channel_groups = np.load(phy_folder / 'channel_groups.npy')
assert len(channel_groups) == recording.get_num_channels()
for (ch, cg) in zip(recording.get_channel_ids(), channel_groups):
recording.set_channel_property(ch, 'group', cg)
for u_i, u in enumerate(self.get_unit_ids()):
if verbose:
print('Computing waveform by group for unit', u)
frames_before = int(0.5 / 1000. * recording.get_sampling_frequency())
frames_after = int(2 / 1000. * recording.get_sampling_frequency())
spiketrain = self.get_unit_spike_train(u)
if 'group' in self.get_unit_property_names(u):
group_idx = np.where(channel_groups == int(self.get_unit_property(u, 'group')))[0]
wf = recording.get_snippets(reference_frames=spiketrain,
snippet_len=[frames_before, frames_after],
channel_ids=group_idx)
else:
wf = recording.get_snippets(reference_frames=spiketrain,
snippet_len=[frames_before, frames_after])
max_chan = np.unravel_index(np.argmin(np.mean(wf, axis=0)), np.mean(wf, axis=0).shape)[0]
group = recording.get_channel_property(int(max_chan), 'group')
self.set_unit_property(u, 'group', group)
group_idx =
|
np.where(channel_groups == group)
|
numpy.where
|
"""
"""
import numpy as np
from ..TS_statistics.utils import build_ngram_arrays, create_ngram,\
uniform_input_samevals, uniform_input_lags
from ..TS_statistics.probabilitytools import compute_conditional_probs,\
compute_marginal_probs
from ..TS_statistics.ts_statistics import prob_ngram_x, prob_ngram_xy,\
prob_ngram_ind, prob_xy, prob_xy_ind, prob_x, compute_joint_probs
from ..TS_statistics.regime_statistics import prob_regimes_x, temporal_counts,\
temporal_average_counts, count_repeated, temporal_densities, parse_spks,\
isis_computation, isi_distribution, general_count, counts_normalization,\
temporal_si, prob_spk_xy, count_into_bursts
def test():
#####
X = np.random.randn(1000).cumsum()
X_disc = np.random.randint(0, 20, 1000)
X_mdisc = np.random.randint(0, 20, (1000, 5))
################
#Test functions
##################
## Utils
#########
uniform_input_lags([5], X)
lags = uniform_input_lags(np.array([5]), X)
uniform_input_samevals(True, X_disc)
uniform_input_samevals(False, np.atleast_2d(X_disc).T)
uniform_input_samevals(range(20), X_disc)
uniform_input_samevals(np.arange(20), X_disc)
ngram = create_ngram(X_disc, lags, samevals=True)
lags = lags = uniform_input_lags(np.array([5]), X_disc)
ngram = create_ngram(X_disc, range(5), samevals=True)
pres, post = [0], [0]
L = 1
build_ngram_arrays(np.atleast_2d(X_disc).T, post, pres, L)
L = 2
build_ngram_arrays(np.atleast_2d(X_disc).T, post, pres, L)
## TO IMPLEMENT
# X_mdisc = np.random.randint(0, 20, (1000, 5))
# pres, post = [0, 1, 2], [[1, 2, 3], [2, 3, 4], [0, 1, 4]]
# build_ngram_arrays(X_mdisc, post, pres, L)
#
## probabilitytools
###################
probs =
|
np.random.random((10, 7, 5))
|
numpy.random.random
|
import numpy as np
import numba
from numba import jit
import cv2
import glob
from moviepy.editor import *
import matplotlib.pyplot as plt
from scipy import ndimage
from skimage import color, feature, transform
data = './data/'
@numba.jit(forceobj=True)
def load_data():
"""
load all three clips to a dict variable
"""
# =================== first clip ===================
print('clip_1 images importing...')
fileName1 = sorted(glob.glob(data + 'clip_1/*.jpg'))
img1 = [cv2.imread(img) for img in fileName1]
label1 = np.zeros((len(img1), 1))
shot1 = np.array([(133, 134)])
# =================== second clip ===================
print('clip_2 images importing...')
fileName2 = sorted(glob.glob(data + 'clip_2/*.jpg'))
img2 = [cv2.imread(img) for img in fileName2]
label2 = np.zeros((len(img2), 1))
shot2 = np.array([(0, 1), (55, 57), (72, 74), (78, 79),
(86, 87), (98, 99), (110, 112), (121, 124)])
# =================== third clip ===================
print('clip_3 images importing...')
fileName3 = sorted(glob.glob(data + 'clip_3/*.jpg'))
img3 = [cv2.imread(img) for img in fileName3]
label3 = np.zeros((len(img3), 1))
shot3 = np.array([(32, 41), (59, 60), (61, 62),
(76, 89), (170, 171), (243, 254)])
print('Done !')
test = {'X': [img1, img2, img3], 'Y': [shot1, shot2, shot3]}
return test
def image_to_video(path):
images = sorted(glob.glob(path + '*.jpg'))
clips = [ImageClip(m).set_duration(2) for m in images]
concat_clip = concatenate_videoclips(clips, method="compose")
concat_clip.write_videofile(path + "test.mp4", fps=3)
def color_hist_diff(image1, image2, patch_size=16):
"""
calculate the color histogram difference of two images
"""
# get dimesion and patch
vertical, horizontal, Z = image1.shape
v_patch = vertical // 16
h_patch = horizontal // 16
# calculate difference
diff = 0
for z in range(Z):
img1, img2 = image1[:, :, z], image2[:, :, z]
for i in range(0, img1.shape[0] - v_patch + 1, patch_size):
for j in range(0, img1.shape[1] - h_patch + 1, patch_size):
patch1 = img1[i:i + v_patch, j:j + h_patch].flatten()
patch2 = img2[i:i + v_patch, j:j + h_patch].flatten()
hist1 = np.histogram(patch1, bins=np.arange(257), density=True)
hist2 = np.histogram(patch2, bins=np.arange(257), density=True)
diff += np.linalg.norm(hist1[0] - hist2[0])
return diff
def mean_pixel_intensity_diff(img1, img2):
diff = 0
for z in range(img1.shape[-1]):
diff += np.abs(np.mean(img1[:,:,z]) - np.mean(img2[:,:,z]))
return diff
# ========================== calculate Edge Change Ratio ==========================
@numba.jit(forceobj=True)
def EdgeChangeRate(img1, img2, iteration=1, sigma=5):
# convert to gray
gray1 = color.rgb2gray(img1)
gray2 = color.rgb2gray(img2)
# get white background and edge
black1 = feature.canny(gray1, sigma=sigma).astype("uint8") # background: 0 edge: 1
black2 = feature.canny(gray2, sigma=sigma).astype("uint8")
# count number of edge pixel
E1 = max(np.sum(black1 == 1), 0.1)
E2 = max(np.sum(black2 == 1), 0.1)
# dilate both image
kernel = np.ones((3, 3)).astype("uint8")
dilate1 = cv2.dilate(black1, kernel).astype("uint8")
dilate2 = cv2.dilate(black2, kernel).astype("uint8")
# combine
imgIn = black1 * dilate2
imgOut = black2 * dilate1
# count edge change pixel
C1 = np.sum(imgIn == 1)
C2 =
|
np.sum(imgOut == 1)
|
numpy.sum
|
__author__ = 'Mario'
import Data
import numpy as np
from numpy.linalg import inv
# Calculate the mean of each category
omega1mean = [np.average(Data.omega1[i]) for i in range(3)]
omega2mean = [np.average(Data.omega2[i]) for i in range(3)]
omega3mean = [np.average(Data.omega3[i]) for i in range(3)]
# Calculate the covariance matrix
covOmega1 = np.cov(np.array(Data.omega1).T)
covOmega2 = np.cov(np.array(Data.omega2).T)
covOmega3 = np.cov(np.array(Data.omega3).T)
# Priors
p1 = 1.0/3
p2 = 1.0/3
p3 = 1.0/3
def MahalanobisDistance(sample):
# This will calculate our Mahalanobis Distance using our estimated Covariance
omega1Mahalanobis = np.sqrt(np.dot(np.dot(
|
np.subtract(sample,omega1mean)
|
numpy.subtract
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Tests for cubic spline interpolation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tf_quant_finance.math.interpolation import cubic
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
@test_util.run_all_in_graph_and_eager_modes
class CubicInterpolationTest(tf.test.TestCase):
def test_error_calc(self):
"""Test that the deviation between the interpolated values and the actual values.
This should be less than 0.02. This value was derived by running the
same test with scipy cubic interpolation
"""
sampling_points = 1000
spline_x = np.linspace(0.0, 10.0, num=11, dtype=np.float64)
spline_y = [1.0 / (1.0 + x * x) for x in spline_x]
x_series = np.array([spline_x])
y_series = np.array([spline_y])
spline = cubic.build_spline(x_series, y_series)
# There is an error if we go to 10.0
test_range_x = np.linspace(0.0, 9.99, num=sampling_points, dtype=np.float64)
search_args = tf.constant(np.array([test_range_x]), dtype=tf.float64)
projected_y = cubic.interpolate(search_args, spline)
expected_y = tf.constant([[1.0 / (1.0 + x * x) for x in test_range_x]],
dtype=tf.float64)
errors = expected_y - projected_y
deviation = self.evaluate(tfp.stats.stddev(errors[0], sample_axis=0))
limit = 0.02
self.assertLess(deviation, limit)
def test_compare_spline_64(self):
x_data1 = np.linspace(-5.0, 5.0, num=11, dtype=np.float64)
x_data2 = np.linspace(0.0, 10.0, num=11, dtype=np.float64)
x_series =
|
np.array([x_data1, x_data2])
|
numpy.array
|
import fiona
import geopandas as gpd
import json
import matplotlib
import numpy as np
import os
import pandas as pd
import rasterio
import rasterio.mask
matplotlib.use("tkagg")
import matplotlib.pyplot as plt
import seaborn
import seaborn as sns
import pickle
import warnings
warnings.filterwarnings("ignore", "GeoSeries.notna", UserWarning)
from sklearn.metrics import confusion_matrix
from shapely.geometry import box, Polygon
from pyproj import CRS
from rasterio import merge
from copy import deepcopy
from glob import glob
from subprocess import check_output
from collections import defaultdict
import runspec as rs
ACRES_PER_SQUARE_METER = 0.000247105
MONTANA_SHAPEFILE = "/home/thomas/irrigated-training-data-aug21/aux-shapefiles/mt.shp"
def merge_rasters_gdal(raster_files, out_filename):
if not len(raster_files):
return
if not os.path.isfile(out_filename):
print("processing", out_filename)
cmd = check_output(
["gdal_merge.py", "-of", "GTiff", "-ot", "Byte", "-o", out_filename]
+ raster_files
)
print(cmd)
def flu_data_irr_area_by_county(county_shp, flu, out_filename, plot=False, save=False):
if os.path.isfile(out_filename):
return
flu = gpd.read_file(flu)
flu = flu.loc[flu["LType"] == "I"]
counties = gpd.read_file(county_shp)
flu = flu.to_crs("EPSG:5070")
counties = counties.to_crs("EPSG:5070")
counties_with_irr_attr = counties.copy()
irrigated_area = {}
for row in counties.iterrows():
polygon = Polygon(row[1]["geometry"])
county_name = row[1]["NAME"]
# if county_name not in ('TETON', 'CASCADE'):
# continue
# print(county_name)
try:
flu_county = gpd.clip(flu, polygon)
except Exception as e:
print("error", county_name, e)
irrigated_area[county_name] = -1
continue
if plot:
fig, ax = plt.subplots()
flu_county.plot(ax=ax)
poly_gdf = gpd.geopandas.GeoDataFrame(
[1], geometry=[polygon], crs=counties.crs
)
poly_gdf.boundary.plot(ax=ax, color="red")
plt.title(county_name)
plt.show()
else:
irr_acres = np.sum(flu_county["geometry"].area)
irrigated_area[county_name] = irr_acres
names = list(irrigated_area.keys())
areas = list(irrigated_area.values())
for name, area in zip(names, areas):
print(name, area * ACRES_PER_SQUARE_METER)
counties_with_irr_attr["IRR_AREA"] = areas
if save:
counties_with_irr_attr.to_file(out_filename)
def merge_split_rasters_copy_band_descriptions(rasters, out_filename):
if not os.path.isfile(out_filename):
dsets = []
for raster in rasters:
dsets.append(rasterio.open(raster, "r"))
merged, out_transform = merge.merge(dsets)
with rasterio.open(rasters[0], "r") as src:
meta = src.meta.copy()
descriptions = src.descriptions
meta.update(
{
"height": merged.shape[1],
"width": merged.shape[2],
"transform": out_transform,
}
)
with rasterio.open(out_filename, "w", **meta) as dst:
dst.descriptions = descriptions
dst.write(merged)
for raster in rasters:
print("removing", raster)
os.remove(raster)
def _assign_year(raster, years):
for year in years:
if year in raster:
return year
raise ValueError("raster {} didn't have a year attr.")
def rename_rasters(rasters, county_shapefile, out_directory):
osb = os.path.basename
years = [str(r) for r in range(2000, 2020)]
gdf = gpd.read_file(county_shapefile)
gdf = gdf.to_crs("EPSG:5070")
for raster in rasters:
year = _assign_year(osb(raster), years)
with rasterio.open(raster, "r") as src:
bounds = src.bounds
geom = box(*bounds)
n = 0
for row in gdf.iterrows():
poly = Polygon(row[1]["geometry"])
if geom.contains(poly):
n += 1
name = row[1]["NAME"]
if n > 1:
raise ValueError("raster {} contains more than one county".format(raster))
else:
out_filename = os.path.join(out_directory, name + "_" + year + ".tif")
print(out_filename)
# os.rename(raster, out_filename)
def check_for_missing_rasters(rasters, county_shapefile):
osb = os.path.basename
years = [str(r) for r in range(2000, 2020)]
gdf = gpd.read_file(county_shapefile)
counties = gdf.loc[:, "NAME"]
for year in years:
yearly_rasters = [f for f in rasters if year in osb(f)]
county_rasters = [osb(f)[: osb(f).find("_")] for f in yearly_rasters]
missing = counties[~counties.isin(county_rasters)]
print(missing, len(yearly_rasters), counties.shape[0], year)
def clip_raster_to_shapefiles_gdal(raster_file, shapefiles, out_directory, year):
for f in shapefiles:
out_filename = os.path.join(
out_directory,
os.path.splitext(os.path.basename(f))[0] + "_" + year + ".tif",
)
if not os.path.isfile(out_filename):
print("clipping", raster_file, "to", f, "saving to", out_filename)
cmd = check_output(
[
"gdalwarp",
"-of",
"GTiff",
"-cutline",
f,
"-crop_to_cutline",
raster_file,
out_filename,
]
)
def get_irrigated_statistics(rasters_by_county, csv_out):
county_to_year_and_acres = defaultdict(dict)
for i, raster in enumerate(rasters_by_county):
ss = os.path.splitext((os.path.basename(raster)))[0]
year = ss[-4:]
name = ss[:-5]
county_to_year_and_acres[name][year] = calc_irr_area(raster)
print(i)
irr = pd.DataFrame.from_dict(county_to_year_and_acres)
irr = irr.sort_index() # sort by year
irr = irr.sort_index(axis=1) # and county name
irr.to_csv(csv_out)
def tabulate_flu_data(shapefiles, out_filename):
county_to_year_and_acres = defaultdict(dict)
for shp in shapefiles:
year = oss(osb(shp))[0][-4:]
flu = gpd.read_file(shp)
for i, county in flu.iterrows():
name = county["NAME"].lower().replace(" ", "_")
area = county["IRR_AREA"]
county_to_year_and_acres[name][year] = area
df = pd.DataFrame.from_dict(county_to_year_and_acres)
df = df.sort_index()
df = df.sort_index(axis=1)
df.to_csv(out_filename)
def calc_irr_area(f, is_binary):
if not isinstance(f, np.ndarray):
with rasterio.open(f, "r") as src:
arr = src.read()
else:
arr = f
if is_binary:
irrigated = arr[arr == 1]
else:
amax = np.argmax(arr, axis=0)
mask = np.sum(arr, axis=0) == 0
amax = amax[~mask]
irrigated = amax[amax == 0]
return irrigated.shape[0] * (30 ** 2) * ACRES_PER_SQUARE_METER
def convert_to_uint16(files):
for f in files:
print("converting", f)
with rasterio.open(f, "r") as src:
image_stack = src.read()
target_meta = deepcopy(src.meta)
descriptions = src.descriptions
if image_stack.dtype == rasterio.uint16:
print("didn't need to convert", f)
continue
image_stack = image_stack.astype(rasterio.uint16)
target_meta.update({"dtype": rasterio.uint16})
with rasterio.open(f, "w", **target_meta) as dst:
dst.descriptions = descriptions
dst.write(image_stack)
def plotter():
import matplotlib.pyplot as plt
df = pd.read_csv("./montana_irrigated_acreage.csv")
nass = pd.read_csv("./nass_data.csv")
years = [2002, 2007, 2012, 2017]
preds = np.round(df.loc[df.iloc[:, 0].isin(years), :])
fig, ax = plt.subplots(nrows=6, ncols=10)
counties = list(preds.keys())
counties.remove("year")
nass_counties = nass.columns
nass_counties = [
s.replace("MT_", "").lower().replace(" ", "_") for s in nass_counties
]
nass.columns = nass_counties
for i, county in enumerate(counties):
n = nass[county]
p = preds[county]
if i == 55:
ax[i // 10, i % 10].plot(range(4), n, label="nass")
ax[i // 10, i % 10].plot(range(4), p, label="preds")
ax[i // 10, i % 10].axis("off")
ax[i // 10, i % 10].set_title(county)
ax[i // 10, i % 10].legend()
else:
ax[i // 10, i % 10].plot(range(4), n)
ax[i // 10, i % 10].plot(range(4), p)
ax[i // 10, i % 10].set_title(county)
ax[i // 10, i % 10].axis("off")
plt.show()
def plot_df(df, counties=None):
# bin.
if counties == None:
counties = list(df.columns.drop("year"))
years = df.loc[:, "year"].astype(np.int32)
plt.figure(figsize=(18, 15))
for i, county in enumerate(counties):
acreage = df.loc[:, county]
plt.plot(years, acreage, label=county)
plt.plot(years, acreage, "k.")
plt.xticks(years)
plt.ylabel("irrigated acreage")
plt.xlabel("year")
plt.legend()
plt.title("irr. area, selected counties in MT")
def precip_timeseries():
irr = pd.read_csv("/home/thomas/mt/statistics/irrigated_acreage_cnn_sept28.csv")
irr_summed = np.sum(irr, axis=1)
precip = pd.read_csv("/home/thomas/mt/statistics/precip_1999_2020.csv")
precip["date"] = [int(str(p)[:-2]) for p in precip["date"]]
summ = pd.DataFrame(irr_summed)
summ["date"] = np.arange(2000, 2020)
summ = summ.rename(columns={0: "acreage"})
sns.set()
fig, ax = plt.subplots()
precip.plot(x="date", y="precip", ax=ax, label="precip (in)", legend=False)
ax.set_ylabel("precip (in)")
ax1 = ax.twinx()
ax.set_ylim([14, 23])
summ.plot(x="date", y="acreage", ax=ax1, c="r", label="irr. area", legend=False)
ax1.set_ylabel("irr. area (acres)")
fig.legend(loc="upper right", bbox_to_anchor=(1, 1), bbox_transform=ax.transAxes)
ax1.grid(None)
plt.title("irr. area and precip by year")
plt.show()
def mask_raster_to_shapefile(shapefile, raster, year, return_binary=True):
"""
Generates a mask with 1 everywhere
shapefile data is present and a no_data value everywhere else.
no_data is -1 in this case, as it is never a valid class label.
Switching coordinate reference systems is important here, or
else the masking won't work.
"""
shp = gpd.read_file(shapefile)
shp = shp[shp.geometry.notnull()]
with rasterio.open(raster, "r") as src:
# pyproj deprecated the +init syntax.
crs = CRS(src.crs["init"])
shp = shp.to_crs(crs)
features = get_features(shp, year)
if len(features):
out_image, out_transform = rasterio.mask.mask(
src, shapes=features, filled=False
)
else:
return None, None
if return_binary:
out_image[out_image != 0] = 1
meta = src.meta
return out_image, meta
def get_features(gdf, year):
tmp = json.loads(gdf.to_json())
if year is not None:
for feature in tmp["features"]:
break
features = [
feature["geometry"]
for feature in tmp["features"]
if feature["properties"]["YEAR"] == year
]
else:
features = [feature["geometry"] for feature in tmp["features"]]
return features
def create_class_labels(shapefiles, assign_shapefile_class_code, mask_file, year):
first = True
class_labels = None
for f in shapefiles:
class_code = assign_shapefile_class_code(f)
print(f, class_code)
osb = os.path.basename(f)
if "irrigated" in osb and "unirrigated" not in osb:
out, _ = mask_raster_to_shapefile(
f, mask_file, year=year, return_binary=False
)
elif "fallow" in osb:
out, _ = mask_raster_to_shapefile(
f, mask_file, year=year, return_binary=False
)
else:
out, _ = mask_raster_to_shapefile(
f, mask_file, year=None, return_binary=False
)
if out is None:
print("no features for {}, {}".format(osb, year))
continue
if first:
class_labels = out
class_labels[~class_labels.mask] = class_code
first = False
else:
class_labels[~out.mask] = class_code
return class_labels
def clip_to_mt_and_get_area(tif, save=False):
with fiona.open(MONTANA_SHAPEFILE, "r") as s:
shapes = [feature["geometry"] for feature in s]
with rasterio.open(tif, "r") as src:
out_image, out_transform = rasterio.mask.mask(src, shapes, crop=True)
out_meta = src.meta
nbands = out_meta["count"]
out_meta.update(
{
"driver": "GTiff",
"height": out_image.shape[1],
"width": out_image.shape[2],
"transform": out_transform,
}
)
out_f = os.path.splitext(tif)[0] + "_clipped.tif"
if save:
with rasterio.open(out_f, "w", **out_meta) as dest:
dest.write(out_image)
elif nbands == 1:
print(out_image.shape)
print(out_image.shape[1] * out_image.shape[2])
irrigated = out_image[out_image != 0]
print(irrigated.shape)
return irrigated.shape[0] * (30 ** 2) * ACRES_PER_SQUARE_METER
else:
amax = np.argmax(out_image, axis=0)
mask = np.sum(out_image, axis=0) == 0
amax = amax[~mask]
irrigated = amax[amax == 0]
return irrigated.shape[0] * (30 ** 2) * ACRES_PER_SQUARE_METER
def filter_shapefile_by_year(shapefile, year):
outf = os.path.splitext(os.path.basename(shapefile))[0] + "_{}.shp".format(year)
outf = os.path.join("/tmp/", outf)
gdf = gpd.read_file(shapefile)
gdf = gdf.loc[gdf["YEAR"] == year, :]
if gdf.shape[0] == 0:
return None
else:
return outf
def filter_shapefiles(shapefiles, year):
shapefiles_ = []
for s in shapefiles:
sbs = os.path.basename(s)
if "irrigated" in sbs and "unirrigated" not in sbs:
shapefiles_.append(filter_shapefile_by_year(s, year))
elif "fallow" in sbs:
shapefiles_.append(filter_shapefile_by_year(s, year))
else:
shapefiles_.append(s)
return [s for s in shapefiles_ if s is not None]
def assign_shapefile_class_code(f):
f = os.path.basename(f)
if "irrigated" in f and "unirrigated" not in f:
return 1
elif "fallow" in f or "unirrigated" in f:
return 2
else:
return 3
def load_raster(raster_name):
with rasterio.open(raster_name, "r") as src:
arr = src.read()
meta = src.meta.copy()
return arr, meta
def prf_from_cmat(cmat):
tn, fp, fn, tp = cmat.ravel()
oa = (tn + tp) / (tn + fp + tp + fn)
prec = (tp) / (tp + fp)
rec = (tp) / (tp + fn)
return oa, prec, rec, 2 * prec * rec / (prec + rec)
def irrmapper_by_county():
with open("../mt_counties.pkl", "rb") as f:
counties = pickle.load(f)
counties = "/home/thomas/irrigated-training-data-aug21/aux-shapefiles/MontanaCounties_shp/County.shp"
counties = gpd.read_file(counties)
counties = counties.to_crs("EPSG:5070")
features = json.loads(counties.to_json())
county_to_year_and_area = defaultdict(dict)
r = "/home/thomas/method_comparison/irrmapper/"
tifs = [r + "irrmapper{}.tif".format(i) for i in [2002, 2007, 2012, 2017]]
tifs = ["/home/thomas/method_comparison/best_percentile/argmaxed.tif"]
for feature in features["features"]:
name = feature["properties"]["NAME"]
year_to_area = {}
for tif in tifs:
osb = os.path.splitext(os.path.basename(tif))[0]
year = osb[-4:]
with rasterio.open(tif, "r") as src:
out_image, out_transform = rasterio.mask.mask(
src, [feature["geometry"]], crop=True, nodata=4
)
out_image = out_image[out_image == 0]
out_image[out_image == 0] = 1
area = calc_irr_area(out_image, is_binary=True)
year = 2017
year_to_area[year] = area
print(name, area, year)
county_to_year_and_area[name] = year_to_area
return county_to_year_and_area
def lanid_by_county():
with open("../mt_counties.pkl", "rb") as f:
counties = pickle.load(f)
counties = "/home/thomas/irrigated-training-data-aug21/aux-shapefiles/MontanaCounties_shp/County.shp"
counties = gpd.read_file(counties)
counties = counties.to_crs("EPSG:5070")
features = json.loads(counties.to_json())
county_to_area = {}
tif = "/home/thomas/method_comparison/lanid/lanid12.tif"
for feature in features["features"]:
name = feature["properties"]["NAME"]
year_to_area = {}
osb = os.path.splitext(os.path.basename(tif))[0]
with rasterio.open(tif, "r") as src:
out_image, out_transform = rasterio.mask.mask(
src, [feature["geometry"]], crop=True
)
area = calc_irr_area(out_image, is_binary=True)
county_to_area[name] = [area]
print(name, area)
return county_to_area
def predictions_by_county():
root = "/home/thomas/ssd/bootstrapped_merged_rasters/"
results = os.listdir(root)
with open("../mt_counties.pkl", "rb") as f:
counties = pickle.load(f)
counties = "/home/thomas/irrigated-training-data-aug21/aux-shapefiles/MontanaCounties_shp/County.shp"
counties = gpd.read_file(counties)
counties = counties.to_crs("EPSG:5070")
features = json.loads(counties.to_json())
model_to_county_and_area = defaultdict(dict)
# with open("predictions_by_model_masked_roads.json", 'r') as f:
# model_to_county_and_area = json.load(f)
for feature in features["features"]:
name = feature["properties"]["NAME"]
if name != "BEAVERHEAD":
continue
with rasterio.open("./masked_Roads.tif", "r") as src:
out_mask, _ = rasterio.mask.mask(src, [feature["geometry"]], crop=True)
mask = np.sum(out_mask, axis=0) == 0
for d in results:
tifs = glob(os.path.join(root, d, "*tif"))
model = d
year_to_area = {}
for tif in tifs:
osb = os.path.splitext(os.path.basename(tif))[0]
year = osb[-4:]
if year != "2016":
continue
else:
print(year, name)
with rasterio.open(tif, "r") as src:
out_image, out_transform = rasterio.mask.mask(
src, [feature["geometry"]], crop=True
)
out_image[0][mask] = 0
out_image[1][mask] = 0
out_image[2][mask] = 0
area = calc_irr_area(out_image)
year_to_area[year] = [area]
print(year, area)
model_to_county_and_area[model][name] = year_to_area
print(name)
print(model_to_county_and_area)
# with open("predictions_by_model_masked_roads_updated_carbon.json", 'w') as f:
# json.dump(model_to_county_and_area, f)
def cc(f):
f = os.path.basename(f)
if "irrigated" in f and "unirrigated" not in f:
return 1
else:
return 2
def mt_confusion_matrices(tifs):
years = range(2008, 2014)
root = "/home/thomas/irrigated-training-data-aug21/ee-dataset/data/test/"
shapefiles = [
"irrigated_test.shp",
"unirrigated_test.shp",
"uncultivated_test.shp",
"wetlands_buffered_test.shp",
"fallow_test.shp",
]
shapefiles = [root + s for s in shapefiles]
msk_file = "/home/thomas/method_comparison/labels/mask.tif"
cmats_agg = np.zeros((2, 2))
for year in years:
# labels = create_class_labels(shapefiles,
# assign_shapefile_class_code,
# msk_file, year)
with rasterio.open(
"/home/thomas/method_comparison/labels/labels_{}.tif".format(year), "r"
) as src:
labels = src.read()
tif = "/home/thomas/method_comparison/irrmapper/irrmapper{}.tif".format(year)
with rasterio.open(tif, "r") as src:
preds = src.read()
bad = labels == 0
labels = labels[~bad]
labels[labels != 1] = 2
preds = preds[~bad]
preds[preds != 0] = 1
labels = labels - 1
preds = ~preds
labels = ~labels
cmat = confusion_matrix(labels, preds)
tn, fp, fn, tp = cmat.ravel()
rec = tp / (tp + fn)
prec = tp / (tp + fp)
# print(rec, tp, f, fp)
f1 = 2 * prec * rec / (prec + rec)
print(year, prec, rec, f1)
cmats_agg += cmat
tn, fp, fn, tp = cmats_agg.ravel()
rec = tp / (tp + fn)
prec = tp / (tp + fp)
f1 = 2 * prec * rec / (prec + rec)
print(prec, rec, f1)
def confusion_matrices_per_label_class():
# tifs = ['/home/thomas/ndvi/lanid2013.tif', '/home/thomas/ndvi/irrmapper2013.tif']
tifs = ["/home/thomas/ndvi/lanid2013.tif"]
tifs = [
"/home/thomas/ssd/median_rasters/median_masked_to_roads/irrmedian3bands2013_roads.tif"
]
osb = os.path.basename
r = "/home/thomas/irrigated-training-data-aug21/ee-dataset/"
test = glob(r + "data/test/*shp")
train = glob(r + "data/train/*shp")
s = [f for f in test + train if "regions" not in f]
targets = ["irrigated", "unirrigated", "wetlands", "uncultivated"]
template = "/home/thomas/ndvi/lanid2013.tif"
template = "/home/thomas/ssd/median_rasters/median_masked_to_roads/irrmedian3bands2000_roads.tif"
for target in targets:
if target == "irrigated":
file_subset = [
f for f in s if target in osb(f) and "unirrigated" not in osb(f)
]
elif target == "unirrigated":
file_subset = [f for f in s if target in osb(f) or "fallow" in osb(f)]
else:
file_subset = [f for f in s if target in osb(f)]
labels = create_class_labels(file_subset, cc, template, 2013)
for tif in tifs:
print(osb(tif), osb(file_subset[0]))
with rasterio.open(tif, "r") as src:
arr = src.read()
if arr.shape[0] == 1:
amax = arr / 255
amax = amax.squeeze()
amax[amax == 0] = 2
y_true = labels.squeeze()[~labels.mask.squeeze()]
else:
amax =
|
np.argmax(arr, axis=0)
|
numpy.argmax
|
import os
import numpy as np
import h5py
from fuel.converters.base import fill_hdf5_file
from scipy.io import loadmat
from os import listdir
from os.path import isfile, join
from PIL import Image
import shutil
from argparse import ArgumentParser
def main(path):
train_features = []
train_locations = []
train_labels = []
test_features = []
test_locations = []
test_labels = []
for f in listdir('images'):
if isfile(join('images', f)):
number, label, x, y = f.split('.')[0].split('_')
location = np.array((0.28, 0, (int(x) + 14.0 - 50.0) / 50.0, 0, 0.28, (int(y) + 14.0 - 50.0) / 50.0), ndmin=1, dtype=np.float32)
image = np.array(Image.open(join('images', f)), ndmin=3, dtype=np.uint8)
label = int(label)
if int(number) <= 60000:
train_features.append(image)
train_locations.append(location)
train_labels.append(label)
else:
test_features.append(image)
test_locations.append(location)
test_labels.append(label)
h5file = h5py.File(path, mode='w')
data = (
('train', 'features', np.array(train_features)),
('test', 'features',
|
np.array(test_features)
|
numpy.array
|
# -*- mode: python; coding: utf-8 -*
# Copyright (c) 2018 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
"""Primary container for radio interferometer datasets.
"""
from __future__ import absolute_import, division, print_function
import os
import copy
import re
import numpy as np
import six
import warnings
from astropy import constants as const
import astropy.units as units
from astropy.time import Time
from astropy.coordinates import SkyCoord, EarthLocation, FK5, Angle
from .uvbase import UVBase
from . import parameter as uvp
from . import telescopes as uvtel
from . import utils as uvutils
if six.PY2:
from collections import Iterable
else:
from collections.abc import Iterable
class UVData(UVBase):
"""
A class for defining a radio interferometer dataset.
Currently supported file types: uvfits, miriad, fhd.
Provides phasing functions.
Attributes
----------
UVParameter objects :
For full list see UVData Parameters
(http://pyuvdata.readthedocs.io/en/latest/uvdata_parameters.html).
Some are always required, some are required for certain phase_types
and others are always optional.
"""
def __init__(self):
"""Create a new UVData object."""
# add the UVParameters to the class
# standard angle tolerance: 10 mas in radians.
# Should perhaps be decreased to 1 mas in the future
radian_tol = 10 * 2 * np.pi * 1e-3 / (60.0 * 60.0 * 360.0)
self._Ntimes = uvp.UVParameter('Ntimes', description='Number of times',
expected_type=int)
self._Nbls = uvp.UVParameter('Nbls', description='Number of baselines',
expected_type=int)
self._Nblts = uvp.UVParameter('Nblts', description='Number of baseline-times '
'(i.e. number of spectra). Not necessarily '
'equal to Nbls * Ntimes', expected_type=int)
self._Nfreqs = uvp.UVParameter('Nfreqs', description='Number of frequency channels',
expected_type=int)
self._Npols = uvp.UVParameter('Npols', description='Number of polarizations',
expected_type=int)
desc = ('Array of the visibility data, shape: (Nblts, Nspws, Nfreqs, '
'Npols), type = complex float, in units of self.vis_units')
self._data_array = uvp.UVParameter('data_array', description=desc,
form=('Nblts', 'Nspws',
'Nfreqs', 'Npols'),
expected_type=np.complex)
desc = 'Visibility units, options are: "uncalib", "Jy" or "K str"'
self._vis_units = uvp.UVParameter('vis_units', description=desc,
form='str', expected_type=str,
acceptable_vals=["uncalib", "Jy", "K str"])
desc = ('Number of data points averaged into each data element, '
'NOT required to be an integer, type = float, same shape as data_array.'
'The product of the integration_time and the nsample_array '
'value for a visibility reflects the total amount of time '
'that went into the visibility. Best practice is for the '
'nsample_array to be used to track flagging within an integration_time '
'(leading to a decrease of the nsample array value below 1) and '
'LST averaging (leading to an increase in the nsample array '
'value). So datasets that have not been LST averaged should '
'have nsample array values less than or equal to 1.'
'Note that many files do not follow this convention, but it is '
'safe to assume that the product of the integration_time and '
'the nsample_array is the total amount of time included in a visibility.')
self._nsample_array = uvp.UVParameter('nsample_array', description=desc,
form=('Nblts', 'Nspws',
'Nfreqs', 'Npols'),
expected_type=(np.float))
desc = 'Boolean flag, True is flagged, same shape as data_array.'
self._flag_array = uvp.UVParameter('flag_array', description=desc,
form=('Nblts', 'Nspws',
'Nfreqs', 'Npols'),
expected_type=np.bool)
self._Nspws = uvp.UVParameter('Nspws', description='Number of spectral windows '
'(ie non-contiguous spectral chunks). '
'More than one spectral window is not '
'currently supported.', expected_type=int)
self._spw_array = uvp.UVParameter('spw_array',
description='Array of spectral window '
'Numbers, shape (Nspws)', form=('Nspws',),
expected_type=int)
desc = ('Projected baseline vectors relative to phase center, '
'shape (Nblts, 3), units meters. Convention is: uvw = xyz(ant2) - xyz(ant1).'
'Note that this is the Miriad convention but it is different '
'from the AIPS/FITS convention (where uvw = xyz(ant1) - xyz(ant2)).')
self._uvw_array = uvp.UVParameter('uvw_array', description=desc,
form=('Nblts', 3),
expected_type=np.float,
acceptable_range=(0, 1e8), tols=1e-3)
desc = ('Array of times, center of integration, shape (Nblts), '
'units Julian Date')
self._time_array = uvp.UVParameter('time_array', description=desc,
form=('Nblts',),
expected_type=np.float,
tols=1e-3 / (60.0 * 60.0 * 24.0)) # 1 ms in days
desc = ('Array of lsts, center of integration, shape (Nblts), '
'units radians')
self._lst_array = uvp.UVParameter('lst_array', description=desc,
form=('Nblts',),
expected_type=np.float,
tols=radian_tol)
desc = ('Array of first antenna indices, shape (Nblts), '
'type = int, 0 indexed')
self._ant_1_array = uvp.UVParameter('ant_1_array', description=desc,
expected_type=int, form=('Nblts',))
desc = ('Array of second antenna indices, shape (Nblts), '
'type = int, 0 indexed')
self._ant_2_array = uvp.UVParameter('ant_2_array', description=desc,
expected_type=int, form=('Nblts',))
desc = ('Array of baseline indices, shape (Nblts), '
'type = int; baseline = 2048 * (ant1+1) + (ant2+1) + 2^16')
self._baseline_array = uvp.UVParameter('baseline_array',
description=desc,
expected_type=int, form=('Nblts',))
# this dimensionality of freq_array does not allow for different spws
# to have different dimensions
desc = 'Array of frequencies, center of the channel, shape (Nspws, Nfreqs), units Hz'
self._freq_array = uvp.UVParameter('freq_array', description=desc,
form=('Nspws', 'Nfreqs'),
expected_type=np.float,
tols=1e-3) # mHz
desc = ('Array of polarization integers, shape (Npols). '
'AIPS Memo 117 says: pseudo-stokes 1:4 (pI, pQ, pU, pV); '
'circular -1:-4 (RR, LL, RL, LR); linear -5:-8 (XX, YY, XY, YX). '
'NOTE: AIPS Memo 117 actually calls the pseudo-Stokes polarizations '
'"Stokes", but this is inaccurate as visibilities cannot be in '
'true Stokes polarizations for physical antennas. We adopt the '
'term pseudo-Stokes to refer to linear combinations of instrumental '
'visibility polarizations (e.g. pI = xx + yy).')
self._polarization_array = uvp.UVParameter('polarization_array',
description=desc,
expected_type=int,
acceptable_vals=list(
np.arange(-8, 0)) + list(np.arange(1, 5)),
form=('Npols',))
desc = ('Length of the integration in seconds, shape (Nblts). '
'The product of the integration_time and the nsample_array '
'value for a visibility reflects the total amount of time '
'that went into the visibility. Best practice is for the '
'integration_time to reflect the length of time a visibility '
'was integrated over (so it should vary in the case of '
'baseline-dependent averaging and be a way to do selections '
'for differently integrated baselines).'
'Note that many files do not follow this convention, but it is '
'safe to assume that the product of the integration_time and '
'the nsample_array is the total amount of time included in a visibility.')
self._integration_time = uvp.UVParameter('integration_time',
description=desc,
form=('Nblts',),
expected_type=np.float, tols=1e-3) # 1 ms
self._channel_width = uvp.UVParameter('channel_width',
description='Width of frequency channels (Hz)',
expected_type=np.float,
tols=1e-3) # 1 mHz
# --- observation information ---
self._object_name = uvp.UVParameter('object_name',
description='Source or field '
'observed (string)', form='str',
expected_type=str)
self._telescope_name = uvp.UVParameter('telescope_name',
description='Name of telescope '
'(string)', form='str',
expected_type=str)
self._instrument = uvp.UVParameter('instrument', description='Receiver or backend. '
'Sometimes identical to telescope_name',
form='str', expected_type=str)
desc = ('Telescope location: xyz in ITRF (earth-centered frame). '
'Can also be accessed using telescope_location_lat_lon_alt or '
'telescope_location_lat_lon_alt_degrees properties')
self._telescope_location = uvp.LocationParameter('telescope_location',
description=desc,
acceptable_range=(
6.35e6, 6.39e6),
tols=1e-3)
self._history = uvp.UVParameter('history', description='String of history, units English',
form='str', expected_type=str)
# --- phasing information ---
desc = ('String indicating phasing type. Allowed values are "drift", '
'"phased" and "unknown"')
self._phase_type = uvp.UVParameter('phase_type', form='str', expected_type=str,
description=desc, value='unknown',
acceptable_vals=['drift', 'phased', 'unknown'])
desc = ('Required if phase_type = "phased". Epoch year of the phase '
'applied to the data (eg 2000.)')
self._phase_center_epoch = uvp.UVParameter('phase_center_epoch',
required=False,
description=desc,
expected_type=np.float)
desc = ('Required if phase_type = "phased". Right ascension of phase '
'center (see uvw_array), units radians. Can also be accessed using phase_center_ra_degrees.')
self._phase_center_ra = uvp.AngleParameter('phase_center_ra',
required=False,
description=desc,
expected_type=np.float,
tols=radian_tol)
desc = ('Required if phase_type = "phased". Declination of phase center '
'(see uvw_array), units radians. Can also be accessed using phase_center_dec_degrees.')
self._phase_center_dec = uvp.AngleParameter('phase_center_dec',
required=False,
description=desc,
expected_type=np.float,
tols=radian_tol)
desc = ('Only relevant if phase_type = "phased". Specifies the frame the'
' data and uvw_array are phased to. Options are "gcrs" and "icrs",'
' default is "icrs"')
self._phase_center_frame = uvp.UVParameter('phase_center_frame',
required=False,
description=desc,
expected_type=str,
acceptable_vals=['icrs', 'gcrs'])
# --- antenna information ----
desc = ('Number of antennas with data present (i.e. number of unique '
'entries in ant_1_array and ant_2_array). May be smaller '
'than the number of antennas in the array')
self._Nants_data = uvp.UVParameter('Nants_data', description=desc,
expected_type=int)
desc = ('Number of antennas in the array. May be larger '
'than the number of antennas with data')
self._Nants_telescope = uvp.UVParameter('Nants_telescope',
description=desc, expected_type=int)
desc = ('List of antenna names, shape (Nants_telescope), '
'with numbers given by antenna_numbers (which can be matched '
'to ant_1_array and ant_2_array). There must be one entry '
'here for each unique entry in ant_1_array and '
'ant_2_array, but there may be extras as well.')
self._antenna_names = uvp.UVParameter('antenna_names', description=desc,
form=('Nants_telescope',),
expected_type=str)
desc = ('List of integer antenna numbers corresponding to antenna_names, '
'shape (Nants_telescope). There must be one '
'entry here for each unique entry in ant_1_array and '
'ant_2_array, but there may be extras as well.')
self._antenna_numbers = uvp.UVParameter('antenna_numbers', description=desc,
form=('Nants_telescope',),
expected_type=int)
# -------- extra, non-required parameters ----------
desc = ('Orientation of the physical dipole corresponding to what is '
'labelled as the x polarization. Options are "east" '
'(indicating east/west orientation) and "north" (indicating '
'north/south orientation)')
self._x_orientation = uvp.UVParameter('x_orientation', description=desc,
required=False, expected_type=str,
acceptable_vals=['east', 'north'])
blt_order_options = ['time', 'baseline', 'ant1', 'ant2', 'bda']
desc = ('Ordering of the data array along the blt axis. A tuple with '
'the major and minor order (minor order is omitted if order is "bda"). '
'The allowed values are: '
+ ' ,'.join([str(val) for val in blt_order_options]))
self._blt_order = uvp.UVParameter('blt_order', description=desc, form=(2,),
required=False, expected_type=str,
acceptable_vals=blt_order_options)
desc = ('Any user supplied extra keywords, type=dict. Keys should be '
'8 character or less strings if writing to uvfits or miriad files. '
'Use the special key "comment" for long multi-line string comments.')
self._extra_keywords = uvp.UVParameter('extra_keywords', required=False,
description=desc, value={},
spoof_val={}, expected_type=dict)
desc = ('Array giving coordinates of antennas relative to '
'telescope_location (ITRF frame), shape (Nants_telescope, 3), '
'units meters. See the tutorial page in the documentation '
'for an example of how to convert this to topocentric frame.'
'Will be a required parameter in a future version.')
self._antenna_positions = uvp.AntPositionParameter('antenna_positions',
required=False,
description=desc,
form=(
'Nants_telescope', 3),
expected_type=np.float,
tols=1e-3) # 1 mm
desc = ('Array of antenna diameters in meters. Used by CASA to '
'construct a default beam if no beam is supplied.')
self._antenna_diameters = uvp.UVParameter('antenna_diameters',
required=False,
description=desc,
form=('Nants_telescope',),
expected_type=np.float,
tols=1e-3) # 1 mm
# --- other stuff ---
# the below are copied from AIPS memo 117, but could be revised to
# merge with other sources of data.
self._gst0 = uvp.UVParameter('gst0', required=False,
description='Greenwich sidereal time at '
'midnight on reference date',
spoof_val=0.0, expected_type=np.float)
self._rdate = uvp.UVParameter('rdate', required=False,
description='Date for which the GST0 or '
'whatever... applies',
spoof_val='', form='str')
self._earth_omega = uvp.UVParameter('earth_omega', required=False,
description='Earth\'s rotation rate '
'in degrees per day',
spoof_val=360.985, expected_type=np.float)
self._dut1 = uvp.UVParameter('dut1', required=False,
description='DUT1 (google it) AIPS 117 '
'calls it UT1UTC',
spoof_val=0.0, expected_type=np.float)
self._timesys = uvp.UVParameter('timesys', required=False,
description='We only support UTC',
spoof_val='UTC', form='str')
desc = ('FHD thing we do not understand, something about the time '
'at which the phase center is normal to the chosen UV plane '
'for phasing')
self._uvplane_reference_time = uvp.UVParameter('uvplane_reference_time',
required=False,
description=desc,
spoof_val=0)
desc = "Per-antenna and per-frequency equalization coefficients"
self._eq_coeffs = uvp.UVParameter("eq_coeffs",
required=False,
description=desc,
form=("Nants_telescope", "Nfreqs"),
expected_type=np.float,
spoof_val=1.0)
desc = "Convention for how to remove eq_coeffs from data"
self._eq_coeffs_convention = uvp.UVParameter("eq_coeffs_convention",
required=False,
description=desc,
form="str",
spoof_val="divide")
super(UVData, self).__init__()
@property
def _data_params(self):
"""List of strings giving the data-like parameters"""
return ['data_array', 'nsample_array', 'flag_array']
@property
def data_like_parameters(self):
"""An iterator of defined parameters which are data-like (not metadata-like)"""
for key in self._data_params:
if hasattr(self, key):
yield getattr(self, key)
@property
def metadata_only(self):
"""
Property that determines whether this is a metadata only object.
An object is metadata only if data_array, nsample_array and flag_array
are all None.
"""
metadata_only = all(d is None for d in self.data_like_parameters)
for param_name in self._data_params:
getattr(self, "_" + param_name).required = not metadata_only
return metadata_only
def check(self, check_extra=True, run_check_acceptability=True):
"""
Add some extra checks on top of checks on UVBase class.
Check that required parameters exist. Check that parameters have
appropriate shapes and optionally that the values are acceptable.
Parameters
----------
check_extra : bool
If true, check all parameters, otherwise only check required parameters.
run_check_acceptability : bool
Option to check if values in parameters are acceptable.
Returns
-------
bool
True if check passes
Raises
------
ValueError
if parameter shapes or types are wrong or do not have acceptable
values (if run_check_acceptability is True)
"""
# first run the basic check from UVBase
# set the phase type based on object's value
if self.phase_type == 'phased':
self.set_phased()
elif self.phase_type == 'drift':
self.set_drift()
else:
self.set_unknown_phase_type()
# check for deprecated x_orientation strings and convert to new values (if possible)
if self.x_orientation is not None:
# the acceptability check is always done with a `lower` for strings
if self.x_orientation.lower() not in self._x_orientation.acceptable_vals:
warn_string = ('x_orientation {xval} is not one of [{vals}], '
.format(xval=self.x_orientation,
vals=(', ').join(self._x_orientation.acceptable_vals)))
if self.x_orientation.lower() == 'e':
self.x_orientation = 'east'
warn_string += 'converting to "east".'
elif self.x_orientation.lower() == 'n':
self.x_orientation = 'north'
warn_string += 'converting to "north".'
else:
warn_string += 'cannot be converted.'
warnings.warn(warn_string + ' Only [{vals}] will be supported '
'starting in version 1.5'
.format(vals=(', ').join(self._x_orientation.acceptable_vals)),
DeprecationWarning)
super(UVData, self).check(check_extra=check_extra,
run_check_acceptability=run_check_acceptability)
# Check internal consistency of numbers which don't explicitly correspond
# to the shape of another array.
nants_data_calc = int(len(np.unique(self.ant_1_array.tolist()
+ self.ant_2_array.tolist())))
if self.Nants_data != nants_data_calc:
raise ValueError('Nants_data must be equal to the number of unique '
'values in ant_1_array and ant_2_array')
if self.Nbls != len(np.unique(self.baseline_array)):
raise ValueError('Nbls must be equal to the number of unique '
'baselines in the data_array')
if self.Ntimes != len(np.unique(self.time_array)):
raise ValueError('Ntimes must be equal to the number of unique '
'times in the time_array')
# require that all entries in ant_1_array and ant_2_array exist in antenna_numbers
if not all(ant in self.antenna_numbers for ant in self.ant_1_array):
raise ValueError('All antennas in ant_1_array must be in antenna_numbers.')
if not all(ant in self.antenna_numbers for ant in self.ant_2_array):
raise ValueError('All antennas in ant_2_array must be in antenna_numbers.')
# issue warning if extra_keywords keys are longer than 8 characters
for key in self.extra_keywords.keys():
if len(key) > 8:
warnings.warn('key {key} in extra_keywords is longer than 8 '
'characters. It will be truncated to 8 if written '
'to uvfits or miriad file formats.'.format(key=key))
# issue warning if extra_keywords values are lists, arrays or dicts
for key, value in self.extra_keywords.items():
if isinstance(value, (list, dict, np.ndarray)):
warnings.warn('{key} in extra_keywords is a list, array or dict, '
'which will raise an error when writing uvfits or '
'miriad file types'.format(key=key))
# issue deprecation warning if antenna positions are not set
if self.antenna_positions is None:
warnings.warn('antenna_positions are not defined. '
'antenna_positions will be a required parameter in '
'version 1.5', DeprecationWarning)
# check auto and cross-corrs have sensible uvws
autos = np.isclose(self.ant_1_array - self.ant_2_array, 0.0)
if not np.all(np.isclose(self.uvw_array[autos], 0.0,
rtol=self._uvw_array.tols[0],
atol=self._uvw_array.tols[1])):
raise ValueError("Some auto-correlations have non-zero "
"uvw_array coordinates.")
if np.any(np.isclose([np.linalg.norm(uvw) for uvw in self.uvw_array[~autos]], 0.0,
rtol=self._uvw_array.tols[0],
atol=self._uvw_array.tols[1])):
raise ValueError("Some cross-correlations have near-zero "
"uvw_array magnitudes.")
return True
def copy(self, metadata_only=False):
"""Make and return a copy of the UVData object.
Parameters
----------
metadata_only : bool
If True, only copy the metadata of the object.
Returns
-------
uv : UVData
Copy of self.
"""
uv = UVData()
for param in self:
# parameter names have a leading underscore we want to ignore
if metadata_only and param.lstrip("_") in self._data_params:
continue
setattr(uv, param, copy.deepcopy(getattr(self, param)))
return uv
def set_drift(self):
"""Set phase_type to 'drift' and adjust required parameters."""
self.phase_type = 'drift'
self._phase_center_epoch.required = False
self._phase_center_ra.required = False
self._phase_center_dec.required = False
def set_phased(self):
"""Set phase_type to 'phased' and adjust required parameters."""
self.phase_type = 'phased'
self._phase_center_epoch.required = True
self._phase_center_ra.required = True
self._phase_center_dec.required = True
def set_unknown_phase_type(self):
"""Set phase_type to 'unknown' and adjust required parameters."""
self.phase_type = 'unknown'
self._phase_center_epoch.required = False
self._phase_center_ra.required = False
self._phase_center_dec.required = False
def known_telescopes(self):
"""
Get a list of telescopes known to pyuvdata.
This is just a shortcut to uvdata.telescopes.known_telescopes()
Returns
-------
list of str
List of names of known telescopes
"""
return uvtel.known_telescopes()
def set_telescope_params(self, overwrite=False):
"""
Set telescope related parameters.
If the telescope_name is in the known_telescopes, set any missing
telescope-associated parameters (e.g. telescope location) to the value
for the known telescope.
Parameters
----------
overwrite : bool
Option to overwrite existing telescope-associated parameters with
the values from the known telescope.
Raises
------
ValueError
if the telescope_name is not in known telescopes
"""
telescope_obj = uvtel.get_telescope(self.telescope_name)
if telescope_obj is not False:
params_set = []
for p in telescope_obj:
telescope_param = getattr(telescope_obj, p)
self_param = getattr(self, p)
if telescope_param.value is not None and (overwrite is True
or self_param.value is None):
telescope_shape = telescope_param.expected_shape(telescope_obj)
self_shape = self_param.expected_shape(self)
if telescope_shape == self_shape:
params_set.append(self_param.name)
prop_name = self_param.name
setattr(self, prop_name, getattr(telescope_obj, prop_name))
else:
# expected shapes aren't equal. This can happen e.g. with diameters,
# which is a single value on the telescope object but is
# an array of length Nants_telescope on the UVData object
# use an assert here because we want an error if this condition
# isn't true, but it's really an internal consistency check.
# This will error if there are changes to the Telescope
# object definition, but nothing that a normal user does will cause an error
assert(telescope_shape == () and self_shape != 'str')
array_val = np.zeros(self_shape,
dtype=telescope_param.expected_type) + telescope_param.value
params_set.append(self_param.name)
prop_name = self_param.name
setattr(self, prop_name, array_val)
if len(params_set) > 0:
params_set_str = ', '.join(params_set)
warnings.warn('{params} is not set. Using known values '
'for {telescope_name}.'.format(params=params_set_str,
telescope_name=telescope_obj.telescope_name))
else:
raise ValueError('Telescope {telescope_name} is not in '
'known_telescopes.'.format(telescope_name=self.telescope_name))
def baseline_to_antnums(self, baseline):
"""
Get the antenna numbers corresponding to a given baseline number.
Parameters
----------
baseline : int or array_like of int
baseline number
Returns
-------
int or array_like of int
first antenna number(s)
int or array_like of int
second antenna number(s)
"""
return uvutils.baseline_to_antnums(baseline, self.Nants_telescope)
def antnums_to_baseline(self, ant1, ant2, attempt256=False):
"""
Get the baseline number corresponding to two given antenna numbers.
Parameters
----------
ant1 : int or array_like of int
first antenna number
ant2 : int or array_like of int
second antenna number
attempt256 : bool
Option to try to use the older 256 standard used in many uvfits files
(will use 2048 standard if there are more than 256 antennas).
Returns
-------
int or array of int
baseline number corresponding to the two antenna numbers.
"""
return uvutils.antnums_to_baseline(ant1, ant2, self.Nants_telescope, attempt256=attempt256)
def set_lsts_from_time_array(self):
"""Set the lst_array based from the time_array."""
latitude, longitude, altitude = self.telescope_location_lat_lon_alt_degrees
unique_times, inverse_inds = np.unique(self.time_array, return_inverse=True)
unique_lst_array = uvutils.get_lst_for_time(unique_times, latitude, longitude, altitude)
self.lst_array = unique_lst_array[inverse_inds]
def unphase_to_drift(self, phase_frame=None, use_ant_pos=False):
"""
Convert from a phased dataset to a drift dataset.
See the phasing memo under docs/references for more documentation.
Parameters
----------
phase_frame : str
The astropy frame to phase from. Either 'icrs' or 'gcrs'.
'gcrs' accounts for precession & nutation, 'icrs' also includes abberation.
Defaults to using the 'phase_center_frame' attribute or 'icrs'
if that attribute is None.
use_ant_pos : bool
If True, calculate the uvws directly from the antenna positions
rather than from the existing uvws.
Raises
------
ValueError
If the phase_type is not 'phased'
"""
if self.phase_type == 'phased':
pass
elif self.phase_type == 'drift':
raise ValueError('The data is already drift scanning; can only '
'unphase phased data.')
else:
raise ValueError('The phasing type of the data is unknown. '
'Set the phase_type to drift or phased to '
'reflect the phasing status of the data')
if phase_frame is None:
if self.phase_center_frame is not None:
phase_frame = self.phase_center_frame
else:
phase_frame = 'icrs'
icrs_coord = SkyCoord(ra=self.phase_center_ra, dec=self.phase_center_dec,
unit='radian', frame='icrs')
if phase_frame == 'icrs':
frame_phase_center = icrs_coord
else:
# use center of observation for obstime for gcrs
center_time = np.mean([np.max(self.time_array), np.min(self.time_array)])
icrs_coord.obstime = Time(center_time, format='jd')
frame_phase_center = icrs_coord.transform_to('gcrs')
# This promotion is REQUIRED to get the right answer when we
# add in the telescope location for ICRS
# In some cases, the uvws are already float64, but sometimes they're not
self.uvw_array = np.float64(self.uvw_array)
# apply -w phasor
if not self.metadata_only:
w_lambda = (self.uvw_array[:, 2].reshape(self.Nblts, 1)
/ const.c.to('m/s').value * self.freq_array.reshape(1, self.Nfreqs))
phs = np.exp(-1j * 2 * np.pi * (-1) * w_lambda[:, None, :, None])
self.data_array *= phs
unique_times, unique_inds = np.unique(self.time_array, return_index=True)
for ind, jd in enumerate(unique_times):
inds = np.where(self.time_array == jd)[0]
obs_time = Time(jd, format='jd')
itrs_telescope_location = SkyCoord(x=self.telescope_location[0] * units.m,
y=self.telescope_location[1] * units.m,
z=self.telescope_location[2] * units.m,
frame='itrs', obstime=obs_time)
frame_telescope_location = itrs_telescope_location.transform_to(phase_frame)
itrs_lat_lon_alt = self.telescope_location_lat_lon_alt
if use_ant_pos:
ant_uvw = uvutils.phase_uvw(self.telescope_location_lat_lon_alt[1],
self.telescope_location_lat_lon_alt[0],
self.antenna_positions)
for bl_ind in inds:
ant1_index = np.where(self.antenna_numbers == self.ant_1_array[bl_ind])[0][0]
ant2_index = np.where(self.antenna_numbers == self.ant_2_array[bl_ind])[0][0]
self.uvw_array[bl_ind, :] = ant_uvw[ant2_index, :] - ant_uvw[ant1_index, :]
else:
uvws_use = self.uvw_array[inds, :]
uvw_rel_positions = uvutils.unphase_uvw(frame_phase_center.ra.rad,
frame_phase_center.dec.rad,
uvws_use)
# astropy 2 vs 3 use a different keyword name
if six.PY2:
rep_keyword = 'representation'
else:
rep_keyword = 'representation_type'
setattr(frame_telescope_location, rep_keyword, 'cartesian')
rep_dict = {}
rep_dict[rep_keyword] = 'cartesian'
frame_uvw_coord = SkyCoord(x=uvw_rel_positions[:, 0] * units.m + frame_telescope_location.x,
y=uvw_rel_positions[:, 1] * units.m + frame_telescope_location.y,
z=uvw_rel_positions[:, 2] * units.m + frame_telescope_location.z,
frame=phase_frame, obstime=obs_time,
**rep_dict)
itrs_uvw_coord = frame_uvw_coord.transform_to('itrs')
# now convert them to ENU, which is the space uvws are in
self.uvw_array[inds, :] = uvutils.ENU_from_ECEF(itrs_uvw_coord.cartesian.get_xyz().value.T,
*itrs_lat_lon_alt)
# remove phase center
self.phase_center_frame = None
self.phase_center_ra = None
self.phase_center_dec = None
self.phase_center_epoch = None
self.set_drift()
def phase(self, ra, dec, epoch='J2000', phase_frame='icrs', use_ant_pos=False):
"""
Phase a drift scan dataset to a single ra/dec at a particular epoch.
See the phasing memo under docs/references for more documentation.
Tested against MWA_Tools/CONV2UVFITS/convutils.
Will not phase already phased data.
Parameters
----------
ra : float
The ra to phase to in radians.
dec : float
The dec to phase to in radians.
epoch : astropy.time.Time object or str
The epoch to use for phasing. Either an astropy Time object or the
string "J2000" (which is the default).
Note that the epoch is only used to evaluate the ra & dec values,
if the epoch is not J2000, the ra & dec values are interpreted
as FK5 ra/dec values and translated to J2000, the data are then
phased to the J2000 ra/dec values.
phase_frame : str
The astropy frame to phase to. Either 'icrs' or 'gcrs'.
'gcrs' accounts for precession & nutation,
'icrs' accounts for precession, nutation & abberation.
use_ant_pos : bool
If True, calculate the uvws directly from the antenna positions
rather than from the existing uvws.
Raises
------
ValueError
If the phase_type is not 'drift'
"""
if self.phase_type == 'drift':
pass
elif self.phase_type == 'phased':
raise ValueError('The data is already phased; can only phase '
'drift scan data. Use unphase_to_drift to '
'convert to a drift scan.')
else:
raise ValueError('The phasing type of the data is unknown. '
'Set the phase_type to "drift" or "phased" to '
'reflect the phasing status of the data')
if phase_frame not in ['icrs', 'gcrs']:
raise ValueError('phase_frame can only be set to icrs or gcrs.')
if epoch == "J2000" or epoch == 2000:
icrs_coord = SkyCoord(ra=ra, dec=dec, unit='radian', frame='icrs')
else:
assert(isinstance(epoch, Time))
phase_center_coord = SkyCoord(ra=ra, dec=dec, unit='radian',
equinox=epoch, frame=FK5)
# convert to icrs (i.e. J2000) to write to object
icrs_coord = phase_center_coord.transform_to('icrs')
self.phase_center_ra = icrs_coord.ra.radian
self.phase_center_dec = icrs_coord.dec.radian
self.phase_center_epoch = 2000.0
if phase_frame == 'icrs':
frame_phase_center = icrs_coord
else:
# use center of observation for obstime for gcrs
center_time = np.mean([np.max(self.time_array), np.min(self.time_array)])
icrs_coord.obstime = Time(center_time, format='jd')
frame_phase_center = icrs_coord.transform_to('gcrs')
# This promotion is REQUIRED to get the right answer when we
# add in the telescope location for ICRS
self.uvw_array = np.float64(self.uvw_array)
unique_times, unique_inds = np.unique(self.time_array, return_index=True)
for ind, jd in enumerate(unique_times):
inds = np.where(self.time_array == jd)[0]
obs_time = Time(jd, format='jd')
itrs_telescope_location = SkyCoord(x=self.telescope_location[0] * units.m,
y=self.telescope_location[1] * units.m,
z=self.telescope_location[2] * units.m,
frame='itrs', obstime=obs_time)
itrs_lat_lon_alt = self.telescope_location_lat_lon_alt
frame_telescope_location = itrs_telescope_location.transform_to(phase_frame)
# astropy 2 vs 3 use a different keyword name
if six.PY2:
rep_keyword = 'representation'
else:
rep_keyword = 'representation_type'
setattr(frame_telescope_location, rep_keyword, 'cartesian')
if use_ant_pos:
# This promotion is REQUIRED to get the right answer when we
# add in the telescope location for ICRS
ecef_ant_pos = np.float64(self.antenna_positions) + self.telescope_location
itrs_ant_coord = SkyCoord(x=ecef_ant_pos[:, 0] * units.m,
y=ecef_ant_pos[:, 1] * units.m,
z=ecef_ant_pos[:, 2] * units.m,
frame='itrs', obstime=obs_time)
frame_ant_coord = itrs_ant_coord.transform_to(phase_frame)
frame_ant_rel = (frame_ant_coord.cartesian
- frame_telescope_location.cartesian).get_xyz().T.value
frame_ant_uvw = uvutils.phase_uvw(frame_phase_center.ra.rad,
frame_phase_center.dec.rad,
frame_ant_rel)
for bl_ind in inds:
ant1_index = np.where(self.antenna_numbers == self.ant_1_array[bl_ind])[0][0]
ant2_index = np.where(self.antenna_numbers == self.ant_2_array[bl_ind])[0][0]
self.uvw_array[bl_ind, :] = frame_ant_uvw[ant2_index, :] - frame_ant_uvw[ant1_index, :]
else:
# Also, uvws should be thought of like ENU, not ECEF (or rotated ECEF)
# convert them to ECEF to transform between frames
uvws_use = self.uvw_array[inds, :]
uvw_ecef = uvutils.ECEF_from_ENU(uvws_use, *itrs_lat_lon_alt)
itrs_uvw_coord = SkyCoord(x=uvw_ecef[:, 0] * units.m,
y=uvw_ecef[:, 1] * units.m,
z=uvw_ecef[:, 2] * units.m,
frame='itrs', obstime=obs_time)
frame_uvw_coord = itrs_uvw_coord.transform_to(phase_frame)
# this takes out the telescope location in the new frame,
# so these are vectors again
frame_rel_uvw = (frame_uvw_coord.cartesian.get_xyz().value.T
- frame_telescope_location.cartesian.get_xyz().value)
self.uvw_array[inds, :] = uvutils.phase_uvw(frame_phase_center.ra.rad,
frame_phase_center.dec.rad,
frame_rel_uvw)
# calculate data and apply phasor
if not self.metadata_only:
w_lambda = (self.uvw_array[:, 2].reshape(self.Nblts, 1)
/ const.c.to('m/s').value * self.freq_array.reshape(1, self.Nfreqs))
phs = np.exp(-1j * 2 * np.pi * w_lambda[:, None, :, None])
self.data_array *= phs
self.phase_center_frame = phase_frame
self.set_phased()
def phase_to_time(self, time, phase_frame='icrs', use_ant_pos=False):
"""
Phase a drift scan dataset to the ra/dec of zenith at a particular time.
See the phasing memo under docs/references for more documentation.
Parameters
----------
time : astropy.time.Time object or float
The time to phase to, an astropy Time object or a float Julian Date
phase_frame : str
The astropy frame to phase to. Either 'icrs' or 'gcrs'.
'gcrs' accounts for precession & nutation,
'icrs' accounts for precession, nutation & abberation.
use_ant_pos : bool
If True, calculate the uvws directly from the antenna positions
rather than from the existing uvws.
Raises
------
ValueError
If the phase_type is not 'drift'
TypeError
If time is not an astropy.time.Time object or Julian Date as a float
"""
if self.phase_type == 'drift':
pass
elif self.phase_type == 'phased':
raise ValueError('The data is already phased; can only phase '
'drift scanning data.')
else:
raise ValueError('The phasing type of the data is unknown. '
'Set the phase_type to drift or phased to '
'reflect the phasing status of the data')
if isinstance(time, (float, np.float32)):
time = Time(time, format='jd')
if not isinstance(time, Time):
raise TypeError("time must be an astropy.time.Time object or a float")
# Generate ra/dec of zenith at time in the phase_frame coordinate system
# to use for phasing
telescope_location = EarthLocation.from_geocentric(self.telescope_location[0],
self.telescope_location[1],
self.telescope_location[2],
unit='m')
zenith_coord = SkyCoord(alt=Angle(90 * units.deg), az=Angle(0 * units.deg),
obstime=time, frame='altaz', location=telescope_location)
obs_zenith_coord = zenith_coord.transform_to(phase_frame)
zenith_ra = obs_zenith_coord.ra
zenith_dec = obs_zenith_coord.dec
self.phase(zenith_ra, zenith_dec, epoch='J2000', phase_frame=phase_frame,
use_ant_pos=use_ant_pos)
def set_uvws_from_antenna_positions(self, allow_phasing=False,
orig_phase_frame=None,
output_phase_frame='icrs'):
"""
Calculate UVWs based on antenna_positions
Parameters
----------
allow_phasing : bool
Option for phased data. If data is phased and allow_phasing is set,
data will be unphased, UVWs will be calculated, and then data will
be rephased.
orig_phase_frame : str
The astropy frame to phase from. Either 'icrs' or 'gcrs'.
Defaults to using the 'phase_center_frame' attribute or 'icrs' if
that attribute is None. Only used if allow_phasing is True.
output_phase_frame : str
The astropy frame to phase to. Either 'icrs' or 'gcrs'. Only used if
allow_phasing is True.
Raises
------
ValueError
If data is phased and allow_phasing is False.
Warns
-----
UserWarning
If the phase_type is 'phased'
"""
phase_type = self.phase_type
if phase_type == 'phased':
if allow_phasing:
if not self.metadata_only:
warnings.warn('Data will be unphased and rephased '
'to calculate UVWs, which might introduce small '
'inaccuracies to the data.')
if orig_phase_frame not in [None, 'icrs', 'gcrs']:
raise ValueError('Invalid parameter orig_phase_frame. '
'Options are "icrs", "gcrs", or None.')
if output_phase_frame not in ['icrs', 'gcrs']:
raise ValueError('Invalid parameter output_phase_frame. '
'Options are "icrs" or "gcrs".')
phase_center_ra = self.phase_center_ra
phase_center_dec = self.phase_center_dec
phase_center_epoch = self.phase_center_epoch
self.unphase_to_drift(phase_frame=orig_phase_frame)
else:
raise ValueError('UVW calculation requires unphased data. '
'Use unphase_to_drift or set '
'allow_phasing=True.'
)
antenna_locs_ENU = uvutils.ENU_from_ECEF(
(self.antenna_positions + self.telescope_location),
*self.telescope_location_lat_lon_alt)
uvw_array = np.zeros((self.baseline_array.size, 3))
for baseline in list(set(self.baseline_array)):
baseline_inds = np.where(self.baseline_array == baseline)[0]
ant1_index = np.where(self.antenna_numbers
== self.ant_1_array[baseline_inds[0]])[0][0]
ant2_index = np.where(self.antenna_numbers
== self.ant_2_array[baseline_inds[0]])[0][0]
uvw_array[baseline_inds, :] = (antenna_locs_ENU[ant2_index, :]
- antenna_locs_ENU[ant1_index, :])
self.uvw_array = uvw_array
if phase_type == 'phased':
self.phase(phase_center_ra, phase_center_dec, phase_center_epoch,
phase_frame=output_phase_frame)
def conjugate_bls(self, convention='ant1<ant2', use_enu=True, uvw_tol=0.0):
"""
Conjugate baselines according to one of the supported conventions.
This will fail if only one of the cross pols is present (because
conjugation requires changing the polarization number for cross pols).
Parameters
----------
convention : str or array_like of int
A convention for the directions of the baselines, options are:
'ant1<ant2', 'ant2<ant1', 'u<0', 'u>0', 'v<0', 'v>0' or an
index array of blt indices to conjugate.
use_enu : bool
Use true antenna positions to determine uv location (as opposed to
uvw array). Only applies if `convention` is 'u<0', 'u>0', 'v<0', 'v>0'.
Set to False to use uvw array values.
uvw_tol : float
Defines a tolerance on uvw coordinates for setting the
u>0, u<0, v>0, or v<0 conventions. Defaults to 0m.
Raises
------
ValueError
If convention is not an allowed value or if not all conjugate pols exist.
"""
if isinstance(convention, (np.ndarray, list, tuple)):
convention = np.array(convention)
if (np.max(convention) >= self.Nblts or np.min(convention) < 0
or convention.dtype not in [int, np.int, np.int32, np.int64]):
raise ValueError('If convention is an index array, it must '
'contain integers and have values greater '
'than zero and less than NBlts')
else:
if convention not in ['ant1<ant2', 'ant2<ant1', 'u<0', 'u>0', 'v<0', 'v>0']:
raise ValueError("convention must be one of 'ant1<ant2', "
"'ant2<ant1', 'u<0', 'u>0', 'v<0', 'v>0' or "
"an index array with values less than NBlts")
if isinstance(convention, str):
if convention in ['u<0', 'u>0', 'v<0', 'v>0']:
if use_enu is True:
enu, anum = self.get_ENU_antpos()
anum = anum.tolist()
uvw_array_use = np.zeros_like(self.uvw_array)
for i, bl in enumerate(self.baseline_array):
a1, a2 = self.ant_1_array[i], self.ant_2_array[i]
i1, i2 = anum.index(a1), anum.index(a2)
uvw_array_use[i, :] = enu[i2] - enu[i1]
else:
uvw_array_use = copy.copy(self.uvw_array)
if convention == 'ant1<ant2':
index_array = np.asarray(self.ant_1_array > self.ant_2_array).nonzero()
elif convention == 'ant2<ant1':
index_array = np.asarray(self.ant_2_array > self.ant_1_array).nonzero()
elif convention == 'u<0':
index_array = np.asarray((uvw_array_use[:, 0] > uvw_tol)
| (uvw_array_use[:, 1] > uvw_tol) & np.isclose(uvw_array_use[:, 0], 0, atol=uvw_tol)
| (uvw_array_use[:, 2] > uvw_tol)
& np.isclose(uvw_array_use[:, 0], 0, atol=uvw_tol)
& np.isclose(uvw_array_use[:, 1], 0, atol=uvw_tol)).nonzero()
elif convention == 'u>0':
index_array = np.asarray((uvw_array_use[:, 0] < -uvw_tol)
| ((uvw_array_use[:, 1] < -uvw_tol) & np.isclose(uvw_array_use[:, 0], 0, atol=uvw_tol))
| ((uvw_array_use[:, 2] < -uvw_tol)
& np.isclose(uvw_array_use[:, 0], 0, atol=uvw_tol)
& np.isclose(uvw_array_use[:, 1], 0, atol=uvw_tol))).nonzero()
elif convention == 'v<0':
index_array = np.asarray((uvw_array_use[:, 1] > uvw_tol)
| (uvw_array_use[:, 0] > uvw_tol) & np.isclose(uvw_array_use[:, 1], 0, atol=uvw_tol)
| (uvw_array_use[:, 2] > uvw_tol)
& np.isclose(uvw_array_use[:, 0], 0, atol=uvw_tol)
& np.isclose(uvw_array_use[:, 1], 0, atol=uvw_tol)).nonzero()
elif convention == 'v>0':
index_array = np.asarray((uvw_array_use[:, 1] < -uvw_tol)
| (uvw_array_use[:, 0] < -uvw_tol) & np.isclose(uvw_array_use[:, 1], 0, atol=uvw_tol)
| (uvw_array_use[:, 2] < -uvw_tol)
& np.isclose(uvw_array_use[:, 0], 0, atol=uvw_tol)
& np.isclose(uvw_array_use[:, 1], 0, atol=uvw_tol)).nonzero()
else:
index_array = convention
if index_array[0].size > 0:
new_pol_inds = uvutils.reorder_conj_pols(self.polarization_array)
self.uvw_array[index_array] *= (-1)
orig_data_array = copy.copy(self.data_array)
for pol_ind in np.arange(self.Npols):
self.data_array[index_array, :, :, new_pol_inds[pol_ind]] = \
np.conj(orig_data_array[index_array, :, :, pol_ind])
ant_1_vals = self.ant_1_array[index_array]
ant_2_vals = self.ant_2_array[index_array]
self.ant_1_array[index_array] = ant_2_vals
self.ant_2_array[index_array] = ant_1_vals
self.baseline_array[index_array] = self.antnums_to_baseline(
self.ant_1_array[index_array], self.ant_2_array[index_array])
self.Nbls = np.unique(self.baseline_array).size
def reorder_pols(self, order='AIPS', run_check=True, check_extra=True,
run_check_acceptability=True):
"""
Rearrange polarizations in the event they are not uvfits compatible.
Parameters
----------
order : str
Either a string specifying a cannonical ordering ('AIPS' or 'CASA')
or an index array of length Npols that specifies how to shuffle the
data (this is not the desired final pol order).
CASA ordering has cross-pols in between (e.g. XX,XY,YX,YY)
AIPS ordering has auto-pols followed by cross-pols (e.g. XX,YY,XY,YX)
Default ('AIPS') will sort by absolute value of pol values.
run_check : bool
Option to check for the existence and proper shapes of parameters
after reordering.
check_extra : bool
Option to check optional parameters as well as required ones.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
reordering.
Raises
------
ValueError
If the order is not one of the allowed values.
"""
if isinstance(order, (np.ndarray, list, tuple)):
order = np.array(order)
if (order.size != self.Npols
or order.dtype not in [int, np.int, np.int32, np.int64]
or np.min(order) < 0 or np.max(order) >= self.Npols):
raise ValueError('If order is an index array, it must '
'contain integers and be length Npols.')
index_array = order
elif order == 'AIPS':
index_array = np.argsort(np.abs(self.polarization_array))
elif order == 'CASA':
casa_order = np.array([1, 2, 3, 4, -1, -3, -4, -2, -5, -7, -8, -6])
pol_inds = []
for pol in self.polarization_array:
pol_inds.append(np.where(casa_order == pol)[0][0])
index_array = np.argsort(pol_inds)
else:
raise ValueError("order must be one of: 'AIPS', 'CASA', or an "
"index array of length Npols")
self.polarization_array = self.polarization_array[index_array]
self.data_array = self.data_array[:, :, :, index_array]
self.nsample_array = self.nsample_array[:, :, :, index_array]
self.flag_array = self.flag_array[:, :, :, index_array]
# check if object is self-consistent
if run_check:
self.check(check_extra=check_extra,
run_check_acceptability=run_check_acceptability)
def order_pols(self, order='AIPS'):
"""
Will be deprecated in version 1.5, now just calls reorder_pols.
Parameters
----------
order : str
either 'CASA' or 'AIPS'.
Raises
------
ValueError
If the order is not one of the allowed values.
Warns
-----
DeprecationWarning
Always, because this method will be deprecated in version 1.5
"""
warnings.warn('order_pols method will be deprecated in favor of '
'reorder_pols in version 1.5', DeprecationWarning)
self.reorder_pols(order=order)
def reorder_blts(self, order='time', minor_order=None, conj_convention=None, uvw_tol=0.0,
conj_convention_use_enu=True, run_check=True, check_extra=True,
run_check_acceptability=True):
"""
Arrange blt axis according to desired order. Optionally conjugate some baselines.
Parameters
----------
order : str or array_like of int
A string describing the desired order along the blt axis.
Options are: `time`, `baseline`, `ant1`, `ant2`, `bda` or an
index array of length Nblts that specifies the new order.
minor_order : str
Optionally specify a secondary ordering. Default depends on how
order is set: if order is 'time', this defaults to `baseline`,
if order is `ant1`, or `ant2` this defaults to the other antenna,
if order is `baseline` the only allowed value is `time`. Ignored if
order is `bda` If this is the same as order, it is reset to the default.
conj_convention : str or array_like of int
Optionally conjugate baselines to make the baselines have the
desired orientation. See conjugate_bls for allowed values and details.
uvw_tol : float
If conjugating baselines, sets a tolerance for determining the signs
of u,v, and w, and whether or not they are zero.
See conjugate_bls for details.
conj_convention_use_enu: bool
If `conj_convention` is set, this is passed to conjugate_bls, see that
method for details.
run_check : bool
Option to check for the existence and proper shapes of parameters
after reordering.
check_extra : bool
Option to check optional parameters as well as required ones.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
reordering.
Raises
------
ValueError
If parameter values are inappropriate
"""
if isinstance(order, (np.ndarray, list, tuple)):
order = np.array(order)
if (order.size != self.Nblts
or order.dtype not in [int, np.int, np.int32, np.int64]):
raise ValueError('If order is an index array, it must '
'contain integers and be length Nblts.')
if minor_order is not None:
raise ValueError('Minor order cannot be set if order is an index array.')
else:
if order not in ['time', 'baseline', 'ant1', 'ant2', 'bda']:
raise ValueError("order must be one of 'time', 'baseline', "
"'ant1', 'ant2', 'bda' or an index array of "
"length Nblts")
if minor_order == order:
minor_order = None
if minor_order is not None:
if minor_order not in ['time', 'baseline', 'ant1', 'ant2']:
raise ValueError("minor_order can only be one of 'time', "
"'baseline', 'ant1', 'ant2'")
if isinstance(order, np.ndarray) or order == 'bda':
raise ValueError("minor_order cannot be specified if order is "
"'bda' or an index array.")
if order == 'baseline':
if minor_order in ['ant1', 'ant2']:
raise ValueError('minor_order conflicts with order')
else:
if order == 'time':
minor_order = 'baseline'
elif order == 'ant1':
minor_order = 'ant2'
elif order == 'ant2':
minor_order = 'ant1'
elif order == 'baseline':
minor_order = 'time'
if conj_convention is not None:
self.conjugate_bls(convention=conj_convention,
use_enu=conj_convention_use_enu, uvw_tol=uvw_tol)
if isinstance(order, str):
if minor_order is None:
self.blt_order = (order,)
self._blt_order.form = (1,)
else:
self.blt_order = (order, minor_order)
# set it back to the right shape in case it was set differently before
self._blt_order.form = (2,)
else:
self.blt_order = None
if not isinstance(order, np.ndarray):
# Use lexsort to sort along different arrays in defined order.
if order == 'time':
arr1 = self.time_array
if minor_order == 'ant1':
arr2 = self.ant_1_array
arr3 = self.ant_2_array
elif minor_order == 'ant2':
arr2 = self.ant_2_array
arr3 = self.ant_1_array
else:
# minor_order is baseline
arr2 = self.baseline_array
arr3 = self.baseline_array
elif order == 'ant1':
arr1 = self.ant_1_array
if minor_order == 'time':
arr2 = self.time_array
arr3 = self.ant_2_array
elif minor_order == 'ant2':
arr2 = self.ant_2_array
arr3 = self.time_array
else: # minor_order is baseline
arr2 = self.baseline_array
arr3 = self.time_array
elif order == 'ant2':
arr1 = self.ant_2_array
if minor_order == 'time':
arr2 = self.time_array
arr3 = self.ant_1_array
elif minor_order == 'ant1':
arr2 = self.ant_1_array
arr3 = self.time_array
else:
# minor_order is baseline
arr2 = self.baseline_array
arr3 = self.time_array
elif order == 'baseline':
arr1 = self.baseline_array
# only allowed minor order is time
arr2 = self.time_array
arr3 = self.time_array
elif order == 'bda':
arr1 = self.integration_time
# only allowed minor order is time
arr2 = self.baseline_array
arr3 = self.time_array
# lexsort uses the listed arrays from last to first (so the primary sort is on the last one)
index_array = np.lexsort((arr3, arr2, arr1))
else:
index_array = order
# actually do the reordering
self.ant_1_array = self.ant_1_array[index_array]
self.ant_2_array = self.ant_2_array[index_array]
self.baseline_array = self.baseline_array[index_array]
self.uvw_array = self.uvw_array[index_array, :]
self.time_array = self.time_array[index_array]
self.lst_array = self.lst_array[index_array]
self.integration_time = self.integration_time[index_array]
if not self.metadata_only:
self.data_array = self.data_array[index_array, :, :, :]
self.flag_array = self.flag_array[index_array, :, :, :]
self.nsample_array = self.nsample_array[index_array, :, :, :]
# check if object is self-consistent
if run_check:
self.check(check_extra=check_extra,
run_check_acceptability=run_check_acceptability)
def __add__(self, other, run_check=True, check_extra=True,
run_check_acceptability=True, inplace=False):
"""
Combine two UVData objects along frequency, polarization and/or baseline-time.
Parameters
----------
other : UVData object
Another UVData object which will be added to self.
run_check : bool
Option to check for the existence and proper shapes of parameters
after combining objects.
check_extra : bool
Option to check optional parameters as well as required ones.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
combining objects.
inplace : bool
If True, overwrite self as we go, otherwise create a third object
as the sum of the two.
Raises
------
ValueError
If other is not a UVData object, self and other are not compatible
or if data in self and other overlap.
"""
if inplace:
this = self
else:
this = copy.deepcopy(self)
# Check that both objects are UVData and valid
this.check(check_extra=check_extra, run_check_acceptability=run_check_acceptability)
if not issubclass(other.__class__, this.__class__):
if not issubclass(this.__class__, other.__class__):
raise ValueError('Only UVData (or subclass) objects can be '
'added to a UVData (or subclass) object')
other.check(check_extra=check_extra, run_check_acceptability=run_check_acceptability)
# Define parameters that must be the same to add objects
# But phase_center should be the same, even if in drift (empty parameters)
compatibility_params = ['_vis_units', '_channel_width', '_object_name',
'_telescope_name', '_instrument',
'_telescope_location', '_phase_type',
'_Nants_telescope', '_antenna_names',
'_antenna_numbers', '_antenna_positions',
'_phase_center_ra', '_phase_center_dec',
'_phase_center_epoch']
# Build up history string
history_update_string = ' Combined data along '
n_axes = 0
# Create blt arrays for convenience
prec_t = - 2 * \
np.floor(np.log10(this._time_array.tols[-1])).astype(int)
prec_b = 8
this_blts = np.array(["_".join(["{1:.{0}f}".format(prec_t, blt[0]),
str(blt[1]).zfill(prec_b)]) for blt in
zip(this.time_array, this.baseline_array)])
other_blts = np.array(["_".join(["{1:.{0}f}".format(prec_t, blt[0]),
str(blt[1]).zfill(prec_b)]) for blt in
zip(other.time_array, other.baseline_array)])
# Check we don't have overlapping data
both_pol, this_pol_ind, other_pol_ind = np.intersect1d(
this.polarization_array, other.polarization_array, return_indices=True)
both_freq, this_freq_ind, other_freq_ind = np.intersect1d(
this.freq_array[0, :], other.freq_array[0, :], return_indices=True)
both_blts, this_blts_ind, other_blts_ind = np.intersect1d(
this_blts, other_blts, return_indices=True)
if not self.metadata_only and (
len(both_pol) > 0 and len(both_freq) > 0 and len(both_blts) > 0
):
# check that overlapping data is not valid
this_all_zero = np.all(this.data_array[this_blts_ind][
:, :, this_freq_ind][:, :, :, this_pol_ind] == 0)
this_all_flag = np.all(this.flag_array[this_blts_ind][
:, :, this_freq_ind][:, :, :, this_pol_ind])
other_all_zero = np.all(other.data_array[other_blts_ind][
:, :, other_freq_ind][:, :, :, other_pol_ind] == 0)
other_all_flag = np.all(other.flag_array[other_blts_ind][
:, :, other_freq_ind][:, :, :, other_pol_ind])
if this_all_zero and this_all_flag:
# we're fine to overwrite; update history accordingly
history_update_string = ' Overwrote invalid data using pyuvdata.'
this.history += history_update_string
elif other_all_zero and other_all_flag:
raise ValueError('To combine these data, please run the add operation again, '
'but with the object whose data is to be overwritten as the '
'first object in the add operation.')
else:
raise ValueError('These objects have overlapping data and'
' cannot be combined.')
# find the blt indices in "other" but not in "this"
temp = np.nonzero(~np.in1d(other_blts, this_blts))[0]
if len(temp) > 0:
bnew_inds = temp
new_blts = other_blts[temp]
history_update_string += 'baseline-time'
n_axes += 1
else:
bnew_inds, new_blts = ([], [])
# add metadata to be checked to compatibility params
extra_params = ['_integration_time', '_uvw_array', '_lst_array']
compatibility_params.extend(extra_params)
# find the freq indices in "other" but not in "this"
temp = np.nonzero(
~np.in1d(other.freq_array[0, :], this.freq_array[0, :]))[0]
if len(temp) > 0:
fnew_inds = temp
if n_axes > 0:
history_update_string += ', frequency'
else:
history_update_string += 'frequency'
n_axes += 1
else:
fnew_inds = []
# find the pol indices in "other" but not in "this"
temp = np.nonzero(~np.in1d(other.polarization_array,
this.polarization_array))[0]
if len(temp) > 0:
pnew_inds = temp
if n_axes > 0:
history_update_string += ', polarization'
else:
history_update_string += 'polarization'
n_axes += 1
else:
pnew_inds = []
# Actually check compatibility parameters
for a in compatibility_params:
if a == "_integration_time":
# only check that overlapping blt indices match
params_match = np.allclose(this.integration_time[this_blts_ind],
other.integration_time[other_blts_ind],
rtol=this._integration_time.tols[0],
atol=this._integration_time.tols[1])
elif a == "_uvw_array":
# only check that overlapping blt indices match
params_match = np.allclose(this.uvw_array[this_blts_ind, :],
other.uvw_array[other_blts_ind, :],
rtol=this._uvw_array.tols[0],
atol=this._uvw_array.tols[1])
elif a == "_lst_array":
# only check that overlapping blt indices match
params_match = np.allclose(this.lst_array[this_blts_ind],
other.lst_array[other_blts_ind],
rtol=this._lst_array.tols[0],
atol=this._lst_array.tols[1])
else:
params_match = (getattr(this, a) == getattr(other, a))
if not params_match:
msg = 'UVParameter ' + \
a[1:] + ' does not match. Cannot combine objects.'
raise ValueError(msg)
# Pad out self to accommodate new data
if len(bnew_inds) > 0:
this_blts = np.concatenate((this_blts, new_blts))
blt_order = np.argsort(this_blts)
if not self.metadata_only:
zero_pad = np.zeros(
(len(bnew_inds), this.Nspws, this.Nfreqs, this.Npols))
this.data_array = np.concatenate([this.data_array, zero_pad], axis=0)
this.nsample_array = np.concatenate([this.nsample_array, zero_pad], axis=0)
this.flag_array = np.concatenate([this.flag_array,
1 - zero_pad], axis=0).astype(np.bool)
this.uvw_array = np.concatenate([this.uvw_array,
other.uvw_array[bnew_inds, :]], axis=0)[blt_order, :]
this.time_array = np.concatenate([this.time_array,
other.time_array[bnew_inds]])[blt_order]
this.integration_time = np.concatenate([this.integration_time,
other.integration_time[bnew_inds]])[blt_order]
this.lst_array = np.concatenate(
[this.lst_array, other.lst_array[bnew_inds]])[blt_order]
this.ant_1_array = np.concatenate([this.ant_1_array,
other.ant_1_array[bnew_inds]])[blt_order]
this.ant_2_array = np.concatenate([this.ant_2_array,
other.ant_2_array[bnew_inds]])[blt_order]
this.baseline_array = np.concatenate([this.baseline_array,
other.baseline_array[bnew_inds]])[blt_order]
if len(fnew_inds) > 0:
this.freq_array = np.concatenate([this.freq_array,
other.freq_array[:, fnew_inds]], axis=1)
f_order = np.argsort(this.freq_array[0, :])
if not self.metadata_only:
zero_pad = np.zeros((this.data_array.shape[0], this.Nspws, len(fnew_inds),
this.Npols))
this.data_array = np.concatenate([this.data_array, zero_pad], axis=2)
this.nsample_array = np.concatenate([this.nsample_array, zero_pad], axis=2)
this.flag_array = np.concatenate([this.flag_array, 1 - zero_pad],
axis=2).astype(np.bool)
if len(pnew_inds) > 0:
this.polarization_array = np.concatenate([this.polarization_array,
other.polarization_array[pnew_inds]])
p_order = np.argsort(np.abs(this.polarization_array))
if not self.metadata_only:
zero_pad = np.zeros((this.data_array.shape[0], this.Nspws,
this.data_array.shape[2], len(pnew_inds)))
this.data_array = np.concatenate([this.data_array, zero_pad], axis=3)
this.nsample_array = np.concatenate([this.nsample_array, zero_pad], axis=3)
this.flag_array = np.concatenate([this.flag_array, 1 - zero_pad],
axis=3).astype(np.bool)
# Now populate the data
pol_t2o = np.nonzero(
np.in1d(this.polarization_array, other.polarization_array))[0]
freq_t2o = np.nonzero(
np.in1d(this.freq_array[0, :], other.freq_array[0, :]))[0]
blt_t2o = np.nonzero(np.in1d(this_blts, other_blts))[0]
if not self.metadata_only:
this.data_array[np.ix_(blt_t2o, [0], freq_t2o,
pol_t2o)] = other.data_array
this.nsample_array[np.ix_(
blt_t2o, [0], freq_t2o, pol_t2o)] = other.nsample_array
this.flag_array[np.ix_(blt_t2o, [0], freq_t2o,
pol_t2o)] = other.flag_array
if not self.metadata_only:
if len(bnew_inds) > 0:
for name, param in zip(this._data_params, this.data_like_parameters):
setattr(this, name, param[blt_order, :, :, :])
if len(fnew_inds) > 0:
for name, param in zip(this._data_params, this.data_like_parameters):
setattr(this, name, param[:, :, f_order, :])
if len(pnew_inds) > 0:
for name, param in zip(this._data_params, this.data_like_parameters):
setattr(this, name, param[:, :, :, p_order])
if len(fnew_inds) > 0:
this.freq_array = this.freq_array[:, f_order]
if len(pnew_inds) > 0:
this.polarization_array = this.polarization_array[p_order]
# Update N parameters (e.g. Npols)
this.Ntimes = len(np.unique(this.time_array))
this.Nbls = len(np.unique(this.baseline_array))
this.Nblts = this.uvw_array.shape[0]
this.Nfreqs = this.freq_array.shape[1]
this.Npols = this.polarization_array.shape[0]
this.Nants_data = len(
np.unique(this.ant_1_array.tolist() + this.ant_2_array.tolist()))
# Check specific requirements
if this.Nfreqs > 1:
freq_separation = np.diff(this.freq_array[0, :])
if not np.isclose(np.min(freq_separation), np.max(freq_separation),
rtol=this._freq_array.tols[0], atol=this._freq_array.tols[1]):
warnings.warn('Combined frequencies are not evenly spaced. This will '
'make it impossible to write this data out to some file types.')
elif np.max(freq_separation) > this.channel_width + this._channel_width.tols[1]:
warnings.warn('Combined frequencies are not contiguous. This will make '
'it impossible to write this data out to some file types.')
if this.Npols > 2:
pol_separation = np.diff(this.polarization_array)
if np.min(pol_separation) < np.max(pol_separation):
warnings.warn('Combined polarizations are not evenly spaced. This will '
'make it impossible to write this data out to some file types.')
if n_axes > 0:
history_update_string += ' axis using pyuvdata.'
this.history += history_update_string
this.history = uvutils._combine_histories(this.history, other.history)
# Check final object is self-consistent
if run_check:
this.check(check_extra=check_extra,
run_check_acceptability=run_check_acceptability)
if not inplace:
return this
def __iadd__(self, other):
"""
In place add.
Parameters
----------
other : UVData object
Another UVData object which will be added to self.
Raises
------
ValueError
If other is not a UVData object, self and other are not compatible
or if data in self and other overlap.
"""
self.__add__(other, inplace=True)
return self
def fast_concat(self, other, axis, run_check=True, check_extra=True,
run_check_acceptability=True, inplace=False):
"""
Concatenate two UVData objects along specified axis with almost no checking of metadata.
Warning! This method assumes all the metadata along other axes is sorted
the same way. The __add__ method is much safer, it checks all the metadata,
but it is slower. Some quick checks are run, but this method doesn't
make any guarantees that the resulting object is correct.
Parameters
----------
other : UVData object
Another UVData object which will be added to self.
axis : str
Axis to concatenate files along. This enables fast concatenation
along the specified axis without the normal checking that all other
metadata agrees. Allowed values are: 'blt', 'freq', 'polarization'.
run_check : bool
Option to check for the existence and proper shapes of parameters
after combining objects.
check_extra : bool
Option to check optional parameters as well as required ones.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
combining objects.
inplace : bool
If True, overwrite self as we go, otherwise create a third object
as the sum of the two.
Raises
------
ValueError
If other is not a UVData object, axis is not an allowed value or if
self and other are not compatible.
"""
if inplace:
this = self
else:
this = copy.deepcopy(self)
# Check that both objects are UVData and valid
this.check(check_extra=check_extra, run_check_acceptability=run_check_acceptability)
if not issubclass(other.__class__, this.__class__):
if not issubclass(this.__class__, other.__class__):
raise ValueError('Only UVData (or subclass) objects can be '
'added to a UVData (or subclass) object')
other.check(check_extra=check_extra, run_check_acceptability=run_check_acceptability)
allowed_axes = ['blt', 'freq', 'polarization']
if axis not in allowed_axes:
raise ValueError('If axis is specifed it must be one of: '
+ ', '.join(allowed_axes))
compatibility_params = ['_vis_units', '_channel_width', '_object_name',
'_telescope_name', '_instrument',
'_telescope_location', '_phase_type',
'_Nants_telescope', '_antenna_names',
'_antenna_numbers', '_antenna_positions',
'_phase_center_ra', '_phase_center_dec',
'_phase_center_epoch']
history_update_string = ' Combined data along '
if axis == 'freq':
history_update_string += 'frequency'
compatibility_params += ['_polarization_array', '_ant_1_array',
'_ant_2_array', '_integration_time',
'_uvw_array', '_lst_array']
elif axis == 'polarization':
history_update_string += 'polarization'
compatibility_params += ['_freq_array', '_ant_1_array',
'_ant_2_array', '_integration_time',
'_uvw_array', '_lst_array']
elif axis == 'blt':
history_update_string += 'baseline-time'
compatibility_params += ['_freq_array', '_polarization_array']
history_update_string += ' axis using pyuvdata.'
this.history += history_update_string
this.history = uvutils._combine_histories(this.history, other.history)
# Actually check compatibility parameters
for a in compatibility_params:
params_match = (getattr(this, a) == getattr(other, a))
if not params_match:
msg = 'UVParameter ' + \
a[1:] + ' does not match. Cannot combine objects.'
raise ValueError(msg)
if axis == 'freq':
this.freq_array = np.concatenate([this.freq_array, other.freq_array], axis=1)
this.Nfreqs = this.Nfreqs + other.Nfreqs
freq_separation = np.diff(this.freq_array[0, :])
if not np.isclose(np.min(freq_separation), np.max(freq_separation),
rtol=this._freq_array.tols[0], atol=this._freq_array.tols[1]):
warnings.warn('Combined frequencies are not evenly spaced. This will '
'make it impossible to write this data out to some file types.')
elif np.max(freq_separation) > this.channel_width + this._channel_width.tols[1]:
warnings.warn('Combined frequencies are not contiguous. This will make '
'it impossible to write this data out to some file types.')
if not self.metadata_only:
this.data_array = np.concatenate([this.data_array, other.data_array], axis=2)
this.nsample_array = np.concatenate([this.nsample_array, other.nsample_array], axis=2)
this.flag_array = np.concatenate([this.flag_array, other.flag_array], axis=2)
elif axis == 'polarization':
this.polarization_array = np.concatenate([this.polarization_array,
other.polarization_array])
this.Npols = this.Npols + other.Npols
pol_separation = np.diff(this.polarization_array)
if np.min(pol_separation) < np.max(pol_separation):
warnings.warn('Combined polarizations are not evenly spaced. This will '
'make it impossible to write this data out to some file types.')
if not self.metadata_only:
this.data_array = np.concatenate([this.data_array, other.data_array], axis=3)
this.nsample_array = np.concatenate([this.nsample_array, other.nsample_array], axis=3)
this.flag_array = np.concatenate([this.flag_array, other.flag_array], axis=3)
elif axis == 'blt':
this.Nblts = this.Nblts + other.Nblts
this.ant_1_array = np.concatenate([this.ant_1_array,
other.ant_1_array])
this.ant_2_array = np.concatenate([this.ant_2_array,
other.ant_2_array])
this.Nants_data = int(len(np.unique(self.ant_1_array.tolist()
+ self.ant_2_array.tolist())))
this.uvw_array = np.concatenate([this.uvw_array,
other.uvw_array], axis=0)
this.time_array = np.concatenate([this.time_array,
other.time_array])
this.Ntimes = len(np.unique(this.time_array))
this.lst_array = np.concatenate([this.lst_array,
other.lst_array])
this.baseline_array = np.concatenate([this.baseline_array,
other.baseline_array])
this.Nbls = len(np.unique(this.baseline_array))
this.integration_time = np.concatenate([this.integration_time,
other.integration_time])
if not self.metadata_only:
this.data_array = np.concatenate([this.data_array, other.data_array], axis=0)
this.nsample_array = np.concatenate([this.nsample_array, other.nsample_array], axis=0)
this.flag_array = np.concatenate([this.flag_array, other.flag_array], axis=0)
# Check final object is self-consistent
if run_check:
this.check(check_extra=check_extra,
run_check_acceptability=run_check_acceptability)
if not inplace:
return this
def _select_preprocess(self, antenna_nums, antenna_names, ant_str, bls,
frequencies, freq_chans, times, polarizations, blt_inds):
"""
Internal function to build up blt_inds, freq_inds, pol_inds
and history_update_string for select.
Parameters
----------
antenna_nums : array_like of int, optional
The antennas numbers to keep in the object (antenna positions and
names for the removed antennas will be retained unless
`keep_all_metadata` is False). This cannot be provided if
`antenna_names` is also provided.
antenna_names : array_like of str, optional
The antennas names to keep in the object (antenna positions and
names for the removed antennas will be retained unless
`keep_all_metadata` is False). This cannot be provided if
`antenna_nums` is also provided.
bls : list of tuple, optional
A list of antenna number tuples (e.g. [(0,1), (3,2)]) or a list of
baseline 3-tuples (e.g. [(0,1,'xx'), (2,3,'yy')]) specifying baselines
to keep in the object. For length-2 tuples, the ordering of the numbers
within the tuple does not matter. For length-3 tuples, the polarization
string is in the order of the two antennas. If length-3 tuples are
provided, `polarizations` must be None.
ant_str : str, optional
A string containing information about what antenna numbers
and polarizations to keep in the object. Can be 'auto', 'cross', 'all',
or combinations of antenna numbers and polarizations (e.g. '1',
'1_2', '1x_2y'). See tutorial for more examples of valid strings and
the behavior of different forms for ant_str.
If '1x_2y,2y_3y' is passed, both polarizations 'xy' and 'yy' will
be kept for both baselines (1,2) and (2,3) to return a valid
pyuvdata object.
An ant_str cannot be passed in addition to any of `antenna_nums`,
`antenna_names`, `bls` args or the `polarizations` parameters,
if it is a ValueError will be raised.
frequencies : array_like of float, optional
The frequencies to keep in the object, each value passed here should
exist in the freq_array.
freq_chans : array_like of int, optional
The frequency channel numbers to keep in the object.
times : array_like of float, optional
The times to keep in the object, each value passed here should
exist in the time_array.
polarizations : array_like of int, optional
The polarizations numbers to keep in the object, each value passed
here should exist in the polarization_array.
blt_inds : array_like of int, optional
The baseline-time indices to keep in the object. This is
not commonly used.
Returns
-------
blt_inds : list of int
list of baseline-time indices to keep. Can be None (to keep everything).
freq_inds : list of int
list of frequency indices to keep. Can be None (to keep everything).
pol_inds : list of int
list of polarization indices to keep. Can be None (to keep everything).
history_update_string : str
string to append to the end of the history.
"""
# build up history string as we go
history_update_string = ' Downselected to specific '
n_selects = 0
if ant_str is not None:
if not (antenna_nums is None and antenna_names is None
and bls is None and polarizations is None):
raise ValueError(
'Cannot provide ant_str with antenna_nums, antenna_names, '
'bls, or polarizations.')
else:
bls, polarizations = self.parse_ants(ant_str)
# Antennas, times and blt_inds all need to be combined into a set of
# blts indices to keep.
# test for blt_inds presence before adding inds from antennas & times
if blt_inds is not None:
blt_inds = uvutils._get_iterable(blt_inds)
if np.array(blt_inds).ndim > 1:
blt_inds = np.array(blt_inds).flatten()
history_update_string += 'baseline-times'
n_selects += 1
if antenna_names is not None:
if antenna_nums is not None:
raise ValueError(
'Only one of antenna_nums and antenna_names can be provided.')
if not isinstance(antenna_names, (list, tuple, np.ndarray)):
antenna_names = (antenna_names,)
if np.array(antenna_names).ndim > 1:
antenna_names = np.array(antenna_names).flatten()
antenna_nums = []
for s in antenna_names:
if s not in self.antenna_names:
raise ValueError(
'Antenna name {a} is not present in the antenna_names array'.format(a=s))
antenna_nums.append(self.antenna_numbers[np.where(
np.array(self.antenna_names) == s)][0])
if antenna_nums is not None:
antenna_nums = uvutils._get_iterable(antenna_nums)
if np.array(antenna_nums).ndim > 1:
antenna_nums = np.array(antenna_nums).flatten()
if n_selects > 0:
history_update_string += ', antennas'
else:
history_update_string += 'antennas'
n_selects += 1
inds1 = np.zeros(0, dtype=np.int)
inds2 = np.zeros(0, dtype=np.int)
for ant in antenna_nums:
if ant in self.ant_1_array or ant in self.ant_2_array:
wh1 = np.where(self.ant_1_array == ant)[0]
wh2 = np.where(self.ant_2_array == ant)[0]
if len(wh1) > 0:
inds1 = np.append(inds1, list(wh1))
if len(wh2) > 0:
inds2 = np.append(inds2, list(wh2))
else:
raise ValueError('Antenna number {a} is not present in the '
'ant_1_array or ant_2_array'.format(a=ant))
ant_blt_inds = np.array(
list(set(inds1).intersection(inds2)), dtype=np.int)
else:
ant_blt_inds = None
if bls is not None:
if isinstance(bls, tuple) and (len(bls) == 2 or len(bls) == 3):
bls = [bls]
if len(bls) == 0 or not all(isinstance(item, tuple) for item in bls):
raise ValueError(
'bls must be a list of tuples of antenna numbers (optionally with polarization).')
if not all([isinstance(item[0], six.integer_types + (np.integer,)) for item in bls]
+ [isinstance(item[1], six.integer_types + (np.integer,)) for item in bls]):
raise ValueError(
'bls must be a list of tuples of antenna numbers (optionally with polarization).')
if all([len(item) == 3 for item in bls]):
if polarizations is not None:
raise ValueError('Cannot provide length-3 tuples and also specify polarizations.')
if not all([isinstance(item[2], str) for item in bls]):
raise ValueError('The third element in each bl must be a polarization string')
if ant_str is None:
if n_selects > 0:
history_update_string += ', baselines'
else:
history_update_string += 'baselines'
else:
history_update_string += 'antenna pairs'
n_selects += 1
bls_blt_inds = np.zeros(0, dtype=np.int)
bl_pols = set()
for bl in bls:
if not (bl[0] in self.ant_1_array or bl[0] in self.ant_2_array):
raise ValueError('Antenna number {a} is not present in the '
'ant_1_array or ant_2_array'.format(a=bl[0]))
if not (bl[1] in self.ant_1_array or bl[1] in self.ant_2_array):
raise ValueError('Antenna number {a} is not present in the '
'ant_1_array or ant_2_array'.format(a=bl[1]))
wh1 = np.where(np.logical_and(
self.ant_1_array == bl[0], self.ant_2_array == bl[1]))[0]
wh2 = np.where(np.logical_and(
self.ant_1_array == bl[1], self.ant_2_array == bl[0]))[0]
if len(wh1) > 0:
bls_blt_inds = np.append(bls_blt_inds, list(wh1))
if len(bl) == 3:
bl_pols.add(bl[2])
elif len(wh2) > 0:
bls_blt_inds = np.append(bls_blt_inds, list(wh2))
if len(bl) == 3:
bl_pols.add(bl[2][::-1]) # reverse polarization string
else:
raise ValueError('Antenna pair {p} does not have any data '
'associated with it.'.format(p=bl))
if len(bl_pols) > 0:
polarizations = list(bl_pols)
if ant_blt_inds is not None:
# Use intersection (and) to join antenna_names/nums & ant_pairs_nums
ant_blt_inds = np.array(list(set(ant_blt_inds).intersection(bls_blt_inds)))
else:
ant_blt_inds = bls_blt_inds
if ant_blt_inds is not None:
if blt_inds is not None:
# Use intersection (and) to join antenna_names/nums/ant_pairs_nums with blt_inds
blt_inds = np.array(
list(set(blt_inds).intersection(ant_blt_inds)), dtype=np.int)
else:
blt_inds = ant_blt_inds
if times is not None:
times = uvutils._get_iterable(times)
if np.array(times).ndim > 1:
times = np.array(times).flatten()
if n_selects > 0:
history_update_string += ', times'
else:
history_update_string += 'times'
n_selects += 1
time_blt_inds = np.zeros(0, dtype=np.int)
for jd in times:
if jd in self.time_array:
time_blt_inds = np.append(
time_blt_inds, np.where(self.time_array == jd)[0])
else:
raise ValueError(
'Time {t} is not present in the time_array'.format(t=jd))
if blt_inds is not None:
# Use intesection (and) to join antenna_names/nums/ant_pairs_nums/blt_inds with times
blt_inds = np.array(
list(set(blt_inds).intersection(time_blt_inds)), dtype=np.int)
else:
blt_inds = time_blt_inds
if blt_inds is not None:
if len(blt_inds) == 0:
raise ValueError(
'No baseline-times were found that match criteria')
if max(blt_inds) >= self.Nblts:
raise ValueError(
'blt_inds contains indices that are too large')
if min(blt_inds) < 0:
raise ValueError('blt_inds contains indices that are negative')
blt_inds = list(sorted(set(list(blt_inds))))
if freq_chans is not None:
freq_chans = uvutils._get_iterable(freq_chans)
if np.array(freq_chans).ndim > 1:
freq_chans = np.array(freq_chans).flatten()
if frequencies is None:
frequencies = self.freq_array[0, freq_chans]
else:
frequencies = uvutils._get_iterable(frequencies)
frequencies = np.sort(list(set(frequencies)
| set(self.freq_array[0, freq_chans])))
if frequencies is not None:
frequencies = uvutils._get_iterable(frequencies)
if np.array(frequencies).ndim > 1:
frequencies = np.array(frequencies).flatten()
if n_selects > 0:
history_update_string += ', frequencies'
else:
history_update_string += 'frequencies'
n_selects += 1
freq_inds = np.zeros(0, dtype=np.int)
# this works because we only allow one SPW. This will have to be reworked when we support more.
freq_arr_use = self.freq_array[0, :]
for f in frequencies:
if f in freq_arr_use:
freq_inds = np.append(
freq_inds, np.where(freq_arr_use == f)[0])
else:
raise ValueError(
'Frequency {f} is not present in the freq_array'.format(f=f))
if len(frequencies) > 1:
freq_ind_separation = freq_inds[1:] - freq_inds[:-1]
if np.min(freq_ind_separation) < np.max(freq_ind_separation):
warnings.warn('Selected frequencies are not evenly spaced. This '
'will make it impossible to write this data out to '
'some file types')
elif np.max(freq_ind_separation) > 1:
warnings.warn('Selected frequencies are not contiguous. This '
'will make it impossible to write this data out to '
'some file types.')
freq_inds = list(sorted(set(list(freq_inds))))
else:
freq_inds = None
if polarizations is not None:
polarizations = uvutils._get_iterable(polarizations)
if np.array(polarizations).ndim > 1:
polarizations = np.array(polarizations).flatten()
if n_selects > 0:
history_update_string += ', polarizations'
else:
history_update_string += 'polarizations'
n_selects += 1
pol_inds = np.zeros(0, dtype=np.int)
for p in polarizations:
if isinstance(p, str):
p_num = uvutils.polstr2num(p, x_orientation=self.x_orientation)
else:
p_num = p
if p_num in self.polarization_array:
pol_inds = np.append(pol_inds, np.where(
self.polarization_array == p_num)[0])
else:
raise ValueError(
'Polarization {p} is not present in the polarization_array'.format(p=p))
if len(pol_inds) > 2:
pol_ind_separation = pol_inds[1:] - pol_inds[:-1]
if np.min(pol_ind_separation) < np.max(pol_ind_separation):
warnings.warn('Selected polarization values are not evenly spaced. This '
'will make it impossible to write this data out to '
'some file types')
pol_inds = list(sorted(set(list(pol_inds))))
else:
pol_inds = None
history_update_string += ' using pyuvdata.'
return blt_inds, freq_inds, pol_inds, history_update_string
def _select_metadata(self, blt_inds, freq_inds, pol_inds, history_update_string,
keep_all_metadata=True):
"""
Internal function to perform select on everything except the data-sized arrays.
Parameters
----------
blt_inds : list of int
list of baseline-time indices to keep. Can be None (to keep everything).
freq_inds : list of int
list of frequency indices to keep. Can be None (to keep everything).
pol_inds : list of int
list of polarization indices to keep. Can be None (to keep everything).
history_update_string : str
string to append to the end of the history.
keep_all_metadata : bool
Option to keep metadata for antennas that are no longer in the dataset.
"""
if blt_inds is not None:
self.Nblts = len(blt_inds)
self.baseline_array = self.baseline_array[blt_inds]
self.Nbls = len(np.unique(self.baseline_array))
self.time_array = self.time_array[blt_inds]
self.integration_time = self.integration_time[blt_inds]
self.lst_array = self.lst_array[blt_inds]
self.uvw_array = self.uvw_array[blt_inds, :]
self.ant_1_array = self.ant_1_array[blt_inds]
self.ant_2_array = self.ant_2_array[blt_inds]
self.Nants_data = int(
len(set(self.ant_1_array.tolist() + self.ant_2_array.tolist())))
self.Ntimes = len(np.unique(self.time_array))
if not keep_all_metadata:
ants_to_keep = set(self.ant_1_array.tolist() + self.ant_2_array.tolist())
inds_to_keep = [self.antenna_numbers.tolist().index(ant) for ant in ants_to_keep]
self.antenna_names = [self.antenna_names[ind] for ind in inds_to_keep]
self.antenna_numbers = self.antenna_numbers[inds_to_keep]
self.antenna_positions = self.antenna_positions[inds_to_keep, :]
if self.antenna_diameters is not None:
self.antenna_diameters = self.antenna_diameters[inds_to_keep]
self.Nants_telescope = int(len(ants_to_keep))
if freq_inds is not None:
self.Nfreqs = len(freq_inds)
self.freq_array = self.freq_array[:, freq_inds]
if pol_inds is not None:
self.Npols = len(pol_inds)
self.polarization_array = self.polarization_array[pol_inds]
self.history = self.history + history_update_string
def select(self, antenna_nums=None, antenna_names=None, ant_str=None,
bls=None, frequencies=None, freq_chans=None,
times=None, polarizations=None, blt_inds=None, run_check=True,
check_extra=True, run_check_acceptability=True, inplace=True,
metadata_only=None, keep_all_metadata=True):
"""
Downselect data to keep on the object along various axes.
Axes that can be selected along include antenna names or numbers,
antenna pairs, frequencies, times and polarizations. Specific
baseline-time indices can also be selected, but this is not commonly used.
The history attribute on the object will be updated to identify the
operations performed.
Parameters
----------
antenna_nums : array_like of int, optional
The antennas numbers to keep in the object (antenna positions and
names for the removed antennas will be retained unless
`keep_all_metadata` is False). This cannot be provided if
`antenna_names` is also provided.
antenna_names : array_like of str, optional
The antennas names to keep in the object (antenna positions and
names for the removed antennas will be retained unless
`keep_all_metadata` is False). This cannot be provided if
`antenna_nums` is also provided.
bls : list of tuple, optional
A list of antenna number tuples (e.g. [(0,1), (3,2)]) or a list of
baseline 3-tuples (e.g. [(0,1,'xx'), (2,3,'yy')]) specifying baselines
to keep in the object. For length-2 tuples, the ordering of the numbers
within the tuple does not matter. For length-3 tuples, the polarization
string is in the order of the two antennas. If length-3 tuples are
provided, `polarizations` must be None.
ant_str : str, optional
A string containing information about what antenna numbers
and polarizations to keep in the object. Can be 'auto', 'cross', 'all',
or combinations of antenna numbers and polarizations (e.g. '1',
'1_2', '1x_2y'). See tutorial for more examples of valid strings and
the behavior of different forms for ant_str.
If '1x_2y,2y_3y' is passed, both polarizations 'xy' and 'yy' will
be kept for both baselines (1,2) and (2,3) to return a valid
pyuvdata object.
An ant_str cannot be passed in addition to any of `antenna_nums`,
`antenna_names`, `bls` args or the `polarizations` parameters,
if it is a ValueError will be raised.
frequencies : array_like of float, optional
The frequencies to keep in the object, each value passed here should
exist in the freq_array.
freq_chans : array_like of int, optional
The frequency channel numbers to keep in the object.
times : array_like of float, optional
The times to keep in the object, each value passed here should
exist in the time_array.
polarizations : array_like of int, optional
The polarizations numbers to keep in the object, each value passed
here should exist in the polarization_array.
blt_inds : array_like of int, optional
The baseline-time indices to keep in the object. This is
not commonly used.
run_check : bool
Option to check for the existence and proper shapes of parameters
after downselecting data on this object (the default is True,
meaning the check will be run).
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
downselecting data on this object (the default is True, meaning the
acceptable range check will be done).
inplace : bool
Option to perform the select directly on self or return a new UVData
object with just the selected data (the default is True, meaning the
select will be done on self).
metadata_only : bool
Option to only do the select on the metadata. Not allowed if the
data_array, flag_array or nsample_array is not None. Note this option
has been replaced by an automatic detection of whether the data like
arrays are present. The keyword will be deprecated in version 1.6.
keep_all_metadata : bool
Option to keep all the metadata associated with antennas, even those
that do do not have data associated with them after the select option.
Returns
-------
UVData object or None
None is returned if inplace is True, otherwise a new UVData object
with just the selected data is returned
Raises
------
ValueError
If any of the parameters are set to inappropriate values.
"""
if metadata_only is not None:
warnings.warn('The metadata_only option has been replaced by an '
'automatic detection of whether the data like arrays '
'are present. The keyword will be deprecated in version 1.6.',
DeprecationWarning)
if metadata_only != self.metadata_only:
raise ValueError('The metadata_only option can only be True if '
'data_array, flag_array or nsample_array are '
'all None and must be False otherwise.')
if inplace:
uv_object = self
else:
uv_object = copy.deepcopy(self)
blt_inds, freq_inds, pol_inds, history_update_string = \
uv_object._select_preprocess(antenna_nums, antenna_names, ant_str, bls,
frequencies, freq_chans, times, polarizations, blt_inds)
# do select operations on everything except data_array, flag_array and nsample_array
uv_object._select_metadata(blt_inds, freq_inds, pol_inds, history_update_string,
keep_all_metadata)
if self.metadata_only:
if not inplace:
return uv_object
else:
return
if blt_inds is not None:
for param_name, param in zip(self._data_params, uv_object.data_like_parameters):
setattr(uv_object, param_name, param[blt_inds, :, :, :])
if freq_inds is not None:
for param_name, param in zip(self._data_params, uv_object.data_like_parameters):
setattr(uv_object, param_name, param[:, :, freq_inds, :])
if pol_inds is not None:
for param_name, param in zip(self._data_params, uv_object.data_like_parameters):
setattr(uv_object, param_name, param[:, :, :, pol_inds])
# check if object is uv_object-consistent
if run_check:
uv_object.check(check_extra=check_extra,
run_check_acceptability=run_check_acceptability)
if not inplace:
return uv_object
def _convert_from_filetype(self, other):
"""
Internal function to convert from a file-type specific object to a UVData object.
Used in reads.
Parameters
----------
other : object that inherits from UVData
File type specific object to convert to UVData
"""
for p in other:
param = getattr(other, p)
setattr(self, p, param)
def _convert_to_filetype(self, filetype):
"""
Internal function to convert from a UVData object to a file-type specific object.
Used in writes.
Parameters
----------
filetype : str
Specifies what file type object to convert to. Options are: 'uvfits',
'fhd', 'miriad', 'uvh5'
Raises
------
ValueError
if filetype is not a known type
"""
if filetype == 'uvfits':
from . import uvfits
other_obj = uvfits.UVFITS()
elif filetype == 'fhd':
from . import fhd
other_obj = fhd.FHD()
elif filetype == 'miriad':
from . import miriad
other_obj = miriad.Miriad()
elif filetype == 'uvh5':
from . import uvh5
other_obj = uvh5.UVH5()
else:
raise ValueError('filetype must be uvfits, miriad, fhd, or uvh5')
for p in self:
param = getattr(self, p)
setattr(other_obj, p, param)
return other_obj
def read_uvfits(self, filename, axis=None, antenna_nums=None, antenna_names=None,
ant_str=None, bls=None, frequencies=None,
freq_chans=None, times=None, polarizations=None, blt_inds=None,
keep_all_metadata=True, read_data=True, read_metadata=True,
run_check=True, check_extra=True, run_check_acceptability=True):
"""
Read in header, metadata and data from uvfits file(s).
Parameters
----------
filename : str or list of str
The uvfits file or list of files to read from.
axis : str
Axis to concatenate files along. This enables fast concatenation
along the specified axis without the normal checking that all other
metadata agrees. This method does not guarantee correct resulting
objects. Please see the docstring for fast_concat for details.
Allowed values are: 'blt', 'freq', 'polarization'. Only used if
multiple files are passed.
antenna_nums : array_like of int, optional
The antennas numbers to include when reading data into the object
(antenna positions and names for the removed antennas will be retained
unless `keep_all_metadata` is False). This cannot be provided if
`antenna_names` is also provided. Ignored if read_data is False.
antenna_names : array_like of str, optional
The antennas names to include when reading data into the object
(antenna positions and names for the removed antennas will be retained
unless `keep_all_metadata` is False). This cannot be provided if
`antenna_nums` is also provided. Ignored if read_data is False.
bls : list of tuple, optional
A list of antenna number tuples (e.g. [(0,1), (3,2)]) or a list of
baseline 3-tuples (e.g. [(0,1,'xx'), (2,3,'yy')]) specifying baselines
to include when reading data into the object. For length-2 tuples,
the ordering of the numbers within the tuple does not matter. For
length-3 tuples, the polarization string is in the order of the two
antennas. If length-3 tuples are provided, `polarizations` must be
None. Ignored if read_data is False.
ant_str : str, optional
A string containing information about what antenna numbers
and polarizations to include when reading data into the object.
Can be 'auto', 'cross', 'all', or combinations of antenna numbers
and polarizations (e.g. '1', '1_2', '1x_2y'). See tutorial for more
examples of valid strings and the behavior of different forms for ant_str.
If '1x_2y,2y_3y' is passed, both polarizations 'xy' and 'yy' will
be kept for both baselines (1,2) and (2,3) to return a valid
pyuvdata object.
An ant_str cannot be passed in addition to any of `antenna_nums`,
`antenna_names`, `bls` args or the `polarizations` parameters,
if it is a ValueError will be raised. Ignored if read_data is False.
frequencies : array_like of float, optional
The frequencies to include when reading data into the object, each
value passed here should exist in the freq_array. Ignored if
read_data is False.
freq_chans : array_like of int, optional
The frequency channel numbers to include when reading data into the
object. Ignored if read_data is False.
times : array_like of float, optional
The times to include when reading data into the object, each value
passed here should exist in the time_array. Ignored if read_data is False.
polarizations : array_like of int, optional
The polarizations numbers to include when reading data into the
object, each value passed here should exist in the polarization_array.
Ignored if read_data is False.
blt_inds : array_like of int, optional
The baseline-time indices to include when reading data into the
object. This is not commonly used. Ignored if read_data is False.
keep_all_metadata : bool
Option to keep all the metadata associated with antennas, even those
that do not have data associated with them after the select option.
read_data : bool
Read in the visibility and flag data. If set to false, only the
basic header info and metadata (if read_metadata is True) will be
read in. Setting read_data to False results in an incompletely
defined object (check will not pass).
read_metadata: : bool
Read in metadata (times, baselines, uvws) as well as basic header
info. Only used if read_data is False (metadata will be read if data
is read). If both read_data and read_metadata are false, only basic
header info is read in.
run_check : bool
Option to check for the existence and proper shapes of parameters
after after reading in the file (the default is True,
meaning the check will be run). Ignored if read_data is False.
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
Ignored if read_data is False.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
reading in the file (the default is True, meaning the acceptable
range check will be done). Ignored if read_data is False.
"""
from . import uvfits
# work out what function should be called depending on what's
# already defined on the object
if self.freq_array is not None:
hdr_loaded = True
else:
hdr_loaded = False
if self.data_array is not None:
data_loaded = True
else:
data_loaded = False
if not read_data and not read_metadata:
# not reading data or metadata, use read_uvfits to get header
func = 'read_uvfits'
elif not read_data:
# reading metadata but not data
if hdr_loaded:
# header already read, use read_uvfits_metadata
# (which will error if the data have already been read)
func = 'read_uvfits_metadata'
else:
# header not read, use read_uvfits
func = 'read_uvfits'
else:
# reading data
if hdr_loaded and not data_loaded:
# header already read, data not read, use read_uvfits_data
# (which will read metadata if it doesn't exist)
func = 'read_uvfits_data'
else:
# header not read or object already fully defined,
# use read_uvfits to get a new object
func = 'read_uvfits'
if isinstance(filename, (list, tuple)):
if not read_data and not read_metadata:
raise ValueError('A list of files cannot be used when just '
'reading the header (read_data and read_metadata are False)')
if func == 'read_uvfits_data':
raise ValueError('A list of files cannot be used when just '
'reading data (metadata already exists)')
self.read_uvfits(filename[0], antenna_nums=antenna_nums,
antenna_names=antenna_names, ant_str=ant_str,
bls=bls, frequencies=frequencies,
freq_chans=freq_chans, times=times,
polarizations=polarizations, blt_inds=blt_inds,
read_data=read_data, read_metadata=read_metadata,
run_check=run_check, check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
keep_all_metadata=keep_all_metadata)
if len(filename) > 1:
for f in filename[1:]:
uv2 = UVData()
uv2.read_uvfits(f, antenna_nums=antenna_nums,
antenna_names=antenna_names, ant_str=ant_str,
bls=bls, frequencies=frequencies,
freq_chans=freq_chans, times=times,
polarizations=polarizations, blt_inds=blt_inds,
read_data=read_data, read_metadata=read_metadata,
run_check=run_check, check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
keep_all_metadata=keep_all_metadata)
if axis is not None:
self.fast_concat(uv2, axis, run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
inplace=True)
else:
self += uv2
del(uv2)
else:
if func == 'read_uvfits':
uvfits_obj = uvfits.UVFITS()
uvfits_obj.read_uvfits(filename, antenna_nums=antenna_nums,
antenna_names=antenna_names, ant_str=ant_str,
bls=bls, frequencies=frequencies,
freq_chans=freq_chans, times=times,
polarizations=polarizations, blt_inds=blt_inds,
read_data=read_data, read_metadata=read_metadata,
run_check=run_check, check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
keep_all_metadata=keep_all_metadata)
self._convert_from_filetype(uvfits_obj)
del(uvfits_obj)
elif func == 'read_uvfits_metadata':
# can only be one file, it would have errored earlier otherwise
uvfits_obj = self._convert_to_filetype('uvfits')
uvfits_obj.read_uvfits_metadata(
filename, run_check_acceptability=run_check_acceptability)
self._convert_from_filetype(uvfits_obj)
del(uvfits_obj)
elif func == 'read_uvfits_data':
# can only be one file, it would have errored earlier otherwise
uvfits_obj = self._convert_to_filetype('uvfits')
uvfits_obj.read_uvfits_data(filename, antenna_nums=antenna_nums,
antenna_names=antenna_names, ant_str=ant_str,
bls=bls, frequencies=frequencies,
freq_chans=freq_chans, times=times,
polarizations=polarizations, blt_inds=blt_inds,
run_check=run_check, check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
keep_all_metadata=keep_all_metadata)
self._convert_from_filetype(uvfits_obj)
del(uvfits_obj)
def write_uvfits(self, filename, spoof_nonessential=False, write_lst=True,
force_phase=False, run_check=True, check_extra=True,
run_check_acceptability=True):
"""
Write the data to a uvfits file.
Parameters
----------
filename : str
The uvfits file to write to.
spoof_nonessential : bool
Option to spoof the values of optional UVParameters that are not set
but are required for uvfits files.
write_lst : bool
Option to write the LSTs to the metadata (random group parameters).
force_phase: : bool
Option to automatically phase drift scan data to zenith of the first
timestamp.
run_check : bool
Option to check for the existence and proper shapes of parameters
after before writing the file (the default is True,
meaning the check will be run).
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
run_check_acceptability : bool
Option to check acceptable range of the values of parameters before
writing the file (the default is True, meaning the acceptable
range check will be done).
"""
uvfits_obj = self._convert_to_filetype('uvfits')
uvfits_obj.write_uvfits(filename, spoof_nonessential=spoof_nonessential,
write_lst=write_lst, force_phase=force_phase,
run_check=run_check, check_extra=check_extra,
run_check_acceptability=run_check_acceptability)
del(uvfits_obj)
def read_ms(self, filepath, axis=None, data_column='DATA', pol_order='AIPS',
run_check=True, check_extra=True, run_check_acceptability=True):
"""
Read in data from a measurement set
Parameters
----------
filepath : str or list of str
The measurement set file directory or list of directories to read from.
axis : str
Axis to concatenate files along. This enables fast concatenation
along the specified axis without the normal checking that all other
metadata agrees. This method does not guarantee correct resulting
objects. Please see the docstring for fast_concat for details.
Allowed values are: 'blt', 'freq', 'polarization'. Only used if
multiple files are passed.
data_column : str
name of CASA data column to read into data_array. Options are:
'DATA', 'MODEL', or 'CORRECTED_DATA'
pol_order : str
Option to specify polarizations order convention, options are 'CASA' or 'AIPS'.
run_check : bool
Option to check for the existence and proper shapes of parameters
after after reading in the file (the default is True,
meaning the check will be run).
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
reading in the file (the default is True, meaning the acceptable
range check will be done).
"""
from . import ms
if isinstance(filepath, (list, tuple)):
self.read_ms(filepath[0], run_check=run_check, check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
data_column=data_column, pol_order=pol_order)
if len(filepath) > 1:
for f in filepath[1:]:
uv2 = UVData()
uv2.read_ms(f, run_check=run_check, check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
data_column=data_column, pol_order=pol_order)
if axis is not None:
self.fast_concat(uv2, axis, run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
inplace=True)
else:
self += uv2
del(uv2)
else:
ms_obj = ms.MS()
ms_obj.read_ms(filepath, run_check=run_check, check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
data_column=data_column, pol_order=pol_order)
self._convert_from_filetype(ms_obj)
del(ms_obj)
def read_fhd(self, filelist, use_model=False, axis=None,
run_check=True, check_extra=True, run_check_acceptability=True):
"""
Read in data from a list of FHD files.
Parameters
----------
filelist : list of str
The list of FHD save files to read from. Must include at least one
polarization file, a params file and a flag file. Can also be a list
of lists to read multiple data sets.
use_model : bool
Option to read in the model visibilities rather than the dirty
visibilities (the default is False, meaning the dirty visibilities
will be read).
axis : str
Axis to concatenate files along. This enables fast concatenation
along the specified axis without the normal checking that all other
metadata agrees. This method does not guarantee correct resulting
objects. Please see the docstring for fast_concat for details.
Allowed values are: 'blt', 'freq', 'polarization'. Only used if
multiple data sets are passed.
run_check : bool
Option to check for the existence and proper shapes of parameters
after after reading in the file (the default is True,
meaning the check will be run).
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
reading in the file (the default is True, meaning the acceptable
range check will be done).
"""
from . import fhd
if isinstance(filelist[0], (list, tuple)):
self.read_fhd(filelist[0], use_model=use_model, run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability)
if len(filelist) > 1:
for f in filelist[1:]:
uv2 = UVData()
uv2.read_fhd(f, use_model=use_model, run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability)
if axis is not None:
self.fast_concat(uv2, axis, run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
inplace=True)
else:
self += uv2
del(uv2)
else:
fhd_obj = fhd.FHD()
fhd_obj.read_fhd(filelist, use_model=use_model, run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability)
self._convert_from_filetype(fhd_obj)
del(fhd_obj)
def read_miriad(self, filepath, axis=None, antenna_nums=None, ant_str=None,
bls=None, polarizations=None, time_range=None, read_data=True,
phase_type=None, correct_lat_lon=True, run_check=True,
check_extra=True, run_check_acceptability=True):
"""
Read in data from a miriad file.
Parameters
----------
filepath : str or list of str
The miriad file directory or list of directories to read from.
axis : str
Axis to concatenate files along. This enables fast concatenation
along the specified axis without the normal checking that all other
metadata agrees. This method does not guarantee correct resulting
objects. Please see the docstring for fast_concat for details.
Allowed values are: 'blt', 'freq', 'polarization'. Only used if
multiple files are passed.
antenna_nums : array_like of int, optional
The antennas numbers to read into the object.
bls : list of tuple, optional
A list of antenna number tuples (e.g. [(0,1), (3,2)]) or a list of
baseline 3-tuples (e.g. [(0,1,'xx'), (2,3,'yy')]) specifying baselines
to include when reading data into the object. For length-2 tuples,
the ordering of the numbers within the tuple does not matter. For
length-3 tuples, the polarization string is in the order of the two
antennas. If length-3 tuples are provided, `polarizations` must be
None.
ant_str : str, optional
A string containing information about what antenna numbers
and polarizations to include when reading data into the object.
Can be 'auto', 'cross', 'all', or combinations of antenna numbers
and polarizations (e.g. '1', '1_2', '1x_2y'). See tutorial for more
examples of valid strings and the behavior of different forms for ant_str.
If '1x_2y,2y_3y' is passed, both polarizations 'xy' and 'yy' will
be kept for both baselines (1,2) and (2,3) to return a valid
pyuvdata object.
An ant_str cannot be passed in addition to any of `antenna_nums`,
`bls` or `polarizations` parameters, if it is a ValueError will be raised.
polarizations : array_like of int or str, optional
List of polarization integers or strings to read-in. e.g. ['xx', 'yy', ...]
time_range : list of float, optional
len-2 list containing min and max range of times in Julian Date to
include when reading data into the object. e.g. [2458115.20, 2458115.40]
read_data : bool
Read in the visibility and flag data. If set to false,
only the metadata will be read in. Setting read_data to False
results in an incompletely defined object (check will not pass).
phase_type : str, optional
Option to specify the phasing status of the data. Options are 'drift',
'phased' or None. 'drift' means the data are zenith drift data,
'phased' means the data are phased to a single RA/Dec. Default is None
meaning it will be guessed at based on the file contents.
correct_lat_lon : bool
Option to update the latitude and longitude from the known_telescopes
list if the altitude is missing.
run_check : bool
Option to check for the existence and proper shapes of parameters
after after reading in the file (the default is True,
meaning the check will be run). Ignored if read_data is False.
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
Ignored if read_data is False.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
reading in the file (the default is True, meaning the acceptable
range check will be done). Ignored if read_data is False.
"""
from . import miriad
if isinstance(filepath, (list, tuple)):
self.read_miriad(filepath[0], correct_lat_lon=correct_lat_lon,
run_check=run_check, check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
phase_type=phase_type, antenna_nums=antenna_nums,
ant_str=ant_str, bls=bls,
polarizations=polarizations, time_range=time_range)
if len(filepath) > 1:
for f in filepath[1:]:
uv2 = UVData()
uv2.read_miriad(f, correct_lat_lon=correct_lat_lon,
run_check=run_check, check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
phase_type=phase_type, antenna_nums=antenna_nums,
ant_str=ant_str, bls=bls,
polarizations=polarizations, time_range=time_range)
if axis is not None:
self.fast_concat(uv2, axis, run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
inplace=True)
else:
self += uv2
del(uv2)
else:
# work out what function should be called
if read_data:
# reading data, use read_miriad
miriad_obj = miriad.Miriad()
miriad_obj.read_miriad(filepath, correct_lat_lon=correct_lat_lon,
run_check=run_check, check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
phase_type=phase_type, antenna_nums=antenna_nums,
ant_str=ant_str, bls=bls,
polarizations=polarizations, time_range=time_range)
self._convert_from_filetype(miriad_obj)
del(miriad_obj)
else:
# not reading data. Will error if data_array is already defined.
miriad_obj = self._convert_to_filetype('miriad')
miriad_obj.read_miriad_metadata(filepath, correct_lat_lon=correct_lat_lon)
self._convert_from_filetype(miriad_obj)
del(miriad_obj)
def write_miriad(self, filepath, run_check=True, check_extra=True,
run_check_acceptability=True, clobber=False, no_antnums=False):
"""
Write the data to a miriad file.
Parameters
----------
filename : str
The miriad file directory to write to.
run_check : bool
Option to check for the existence and proper shapes of parameters
after before writing the file (the default is True,
meaning the check will be run).
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
run_check_acceptability : bool
Option to check acceptable range of the values of parameters before
writing the file (the default is True, meaning the acceptable
range check will be done).
clobber : bool
Option to overwrite the filename if the file already exists.
no_antnums : bool
Option to not write the antnums variable to the file.
Should only be used for testing purposes.
"""
miriad_obj = self._convert_to_filetype('miriad')
miriad_obj.write_miriad(filepath, run_check=run_check, check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
clobber=clobber, no_antnums=no_antnums)
del(miriad_obj)
def read_mwa_corr_fits(self, filelist, axis=None, use_cotter_flags=False,
correct_cable_len=False, phase_data=False,
phase_center=None, run_check=True,
check_extra=True, run_check_acceptability=True):
"""
Read in MWA correlator gpu box files.
Parameters
----------
filelist : list of str
The list of MWA correlator files to read from. Must include at
least one fits file and only one metafits file per data set.
Can also be a list of lists to read multiple data sets.
axis : str
Axis to concatenate files along. This enables fast concatenation
along the specified axis without the normal checking that all other
metadata agrees. This method does not guarantee correct resulting
objects. Please see the docstring for fast_concat for details.
Allowed values are: 'blt', 'freq', 'polarization'. Only used if
multiple files are passed.
use_cotter_flags : bool
Option to use cotter output mwaf flag files. Otherwise flagging
will only be applied to missing data and bad antennas. Default is
False.
correct_cable_len : bool
Option to apply a cable delay correction. Default is False.
phase_data : bool
Option to phase data. Default is False.
phase_center : tuple, optional
A tuple containing the ra and dec coordinates in radians of a
specific location to phase data to. If not specified, the
observation pointing center will be used when phase_data is True.
run_check : bool
Option to check for the existence and proper shapes of parameters
after after reading in the file (the default is True,
meaning the check will be run).
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
reading in the file (the default is True, meaning the acceptable
range check will be done).
"""
from . import mwa_corr_fits
if isinstance(filelist[0], (list, tuple)):
self.read_mwa_corr_fits(filelist[0], use_cotter_flags=use_cotter_flags,
correct_cable_len=correct_cable_len,
phase_data=phase_data, phase_center=phase_center,
run_check=run_check, check_extra=check_extra,
run_check_acceptability=run_check_acceptability)
if len(filelist) > 1:
for f in filelist[1:]:
uv2 = UVData()
uv2.read_mwa_corr_fits(f, use_cotter_flags=use_cotter_flags,
correct_cable_len=correct_cable_len,
phase_data=phase_data, phase_center=phase_center,
run_check=run_check, check_extra=check_extra,
run_check_acceptability=run_check_acceptability)
if axis is not None:
self.fast_concat(uv2, axis, run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
inplace=True)
else:
self += uv2
del(uv2)
else:
corr_obj = mwa_corr_fits.MWACorrFITS()
corr_obj.read_mwa_corr_fits(filelist, use_cotter_flags=use_cotter_flags,
correct_cable_len=correct_cable_len,
phase_data=phase_data, phase_center=phase_center,
run_check=run_check, check_extra=check_extra,
run_check_acceptability=run_check_acceptability)
self._convert_from_filetype(corr_obj)
del(corr_obj)
def read_uvh5(self, filename, axis=None, antenna_nums=None, antenna_names=None,
ant_str=None, bls=None, frequencies=None, freq_chans=None,
times=None, polarizations=None, blt_inds=None,
keep_all_metadata=True, read_data=True, data_array_dtype=np.complex128,
run_check=True, check_extra=True, run_check_acceptability=True):
"""
Read a UVH5 file.
Parameters
----------
filename : str or list of str
The UVH5 file or list of files to read from.
axis : str
Axis to concatenate files along. This enables fast concatenation
along the specified axis without the normal checking that all other
metadata agrees. This method does not guarantee correct resulting
objects. Please see the docstring for fast_concat for details.
Allowed values are: 'blt', 'freq', 'polarization'. Only used if
multiple files are passed.
antenna_nums : array_like of int, optional
The antennas numbers to include when reading data into the object
(antenna positions and names for the removed antennas will be retained
unless `keep_all_metadata` is False). This cannot be provided if
`antenna_names` is also provided. Ignored if read_data is False.
antenna_names : array_like of str, optional
The antennas names to include when reading data into the object
(antenna positions and names for the removed antennas will be retained
unless `keep_all_metadata` is False). This cannot be provided if
`antenna_nums` is also provided. Ignored if read_data is False.
bls : list of tuple, optional
A list of antenna number tuples (e.g. [(0,1), (3,2)]) or a list of
baseline 3-tuples (e.g. [(0,1,'xx'), (2,3,'yy')]) specifying baselines
to include when reading data into the object. For length-2 tuples,
the ordering of the numbers within the tuple does not matter. For
length-3 tuples, the polarization string is in the order of the two
antennas. If length-3 tuples are provided, `polarizations` must be
None. Ignored if read_data is False.
ant_str : str, optional
A string containing information about what antenna numbers
and polarizations to include when reading data into the object.
Can be 'auto', 'cross', 'all', or combinations of antenna numbers
and polarizations (e.g. '1', '1_2', '1x_2y'). See tutorial for more
examples of valid strings and the behavior of different forms for ant_str.
If '1x_2y,2y_3y' is passed, both polarizations 'xy' and 'yy' will
be kept for both baselines (1,2) and (2,3) to return a valid
pyuvdata object.
An ant_str cannot be passed in addition to any of `antenna_nums`,
`antenna_names`, `bls` args or the `polarizations` parameters,
if it is a ValueError will be raised. Ignored if read_data is False.
frequencies : array_like of float, optional
The frequencies to include when reading data into the object, each
value passed here should exist in the freq_array. Ignored if
read_data is False.
freq_chans : array_like of int, optional
The frequency channel numbers to include when reading data into the
object. Ignored if read_data is False.
times : array_like of float, optional
The times to include when reading data into the object, each value
passed here should exist in the time_array. Ignored if read_data is False.
polarizations : array_like of int, optional
The polarizations numbers to include when reading data into the
object, each value passed here should exist in the polarization_array.
Ignored if read_data is False.
blt_inds : array_like of int, optional
The baseline-time indices to include when reading data into the
object. This is not commonly used. Ignored if read_data is False.
keep_all_metadata : bool
Option to keep all the metadata associated with antennas, even those
that do not have data associated with them after the select option.
read_data : bool
Read in the visibility and flag data. If set to false, only the
basic header info and metadata will be read in. Setting read_data to
False results in an incompletely defined object (check will not pass).
data_array_dtype : numpy dtype
Datatype to store the output data_array as. Must be either
np.complex64 (single-precision real and imaginary) or np.complex128 (double-
precision real and imaginary). Only used if the datatype of the visibility
data on-disk is not 'c8' or 'c16'.
run_check : bool
Option to check for the existence and proper shapes of parameters
after after reading in the file (the default is True,
meaning the check will be run). Ignored if read_data is False.
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
Ignored if read_data is False.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
reading in the file (the default is True, meaning the acceptable
range check will be done). Ignored if read_data is False.
"""
from . import uvh5
if isinstance(filename, (list, tuple)):
self.read_uvh5(filename[0], antenna_nums=antenna_nums,
antenna_names=antenna_names, ant_str=ant_str, bls=bls,
frequencies=frequencies, freq_chans=freq_chans, times=times,
polarizations=polarizations, blt_inds=blt_inds,
read_data=read_data, run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
data_array_dtype=data_array_dtype,
keep_all_metadata=keep_all_metadata)
if len(filename) > 1:
for f in filename[1:]:
uv2 = UVData()
uv2.read_uvh5(f, axis=axis, antenna_nums=antenna_nums,
antenna_names=antenna_names, ant_str=ant_str, bls=bls,
frequencies=frequencies, freq_chans=freq_chans,
times=times, polarizations=polarizations,
blt_inds=blt_inds, read_data=read_data,
run_check=run_check, check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
data_array_dtype=data_array_dtype,
keep_all_metadata=keep_all_metadata)
if axis is not None:
self.fast_concat(uv2, axis, run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
inplace=True)
else:
self += uv2
del(uv2)
else:
uvh5_obj = uvh5.UVH5()
uvh5_obj.read_uvh5(filename, antenna_nums=antenna_nums,
antenna_names=antenna_names, ant_str=ant_str, bls=bls,
frequencies=frequencies, freq_chans=freq_chans, times=times,
polarizations=polarizations, blt_inds=blt_inds,
read_data=read_data, run_check=run_check, check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
data_array_dtype=data_array_dtype,
keep_all_metadata=keep_all_metadata)
self._convert_from_filetype(uvh5_obj)
del(uvh5_obj)
def write_uvh5(self, filename, run_check=True, check_extra=True,
run_check_acceptability=True, clobber=False,
data_compression=None, flags_compression="lzf",
nsample_compression="lzf", data_write_dtype=None):
"""
Write a completely in-memory UVData object to a UVH5 file.
Parameters
----------
filename : str
The UVH5 file to write to.
clobber : bool
Option to overwrite the file if it already exists.
data_compression : str
HDF5 filter to apply when writing the data_array. Default is
None meaning no filter or compression.
flags_compression : str
HDF5 filter to apply when writing the flags_array. Default is "lzf"
for the LZF filter.
nsample_compression : str
HDF5 filter to apply when writing the nsample_array. Default is "lzf"
for the LZF filter.
data_write_dtype : numpy dtype
datatype of output visibility data. If 'None', then the same datatype
as data_array will be used. Otherwise, a numpy dtype object must be specified with
an 'r' field and an 'i' field for real and imaginary parts, respectively. See
uvh5.py for an example of defining such a datatype.
run_check : bool
Option to check for the existence and proper shapes of parameters
after before writing the file (the default is True,
meaning the check will be run).
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
run_check_acceptability : bool
Option to check acceptable range of the values of parameters before
writing the file (the default is True, meaning the acceptable
range check will be done).
"""
uvh5_obj = self._convert_to_filetype('uvh5')
uvh5_obj.write_uvh5(filename, run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
clobber=clobber, data_compression=data_compression,
flags_compression=flags_compression,
nsample_compression=nsample_compression,
data_write_dtype=data_write_dtype)
del(uvh5_obj)
def initialize_uvh5_file(self, filename, clobber=False, data_compression=None,
flags_compression="lzf", nsample_compression="lzf",
data_write_dtype=None):
"""
Initialize a UVH5 file on disk with the header metadata and empty data arrays.
Parameters
----------
filename : str
The UVH5 file to write to.
clobber : bool
Option to overwrite the file if it already exists.
data_compression : str
HDF5 filter to apply when writing the data_array. Default is
None meaning no filter or compression.
flags_compression : str
HDF5 filter to apply when writing the flags_array. Default is "lzf"
for the LZF filter.
nsample_compression : str
HDF5 filter to apply when writing the nsample_array. Default is "lzf"
for the LZF filter.
data_write_dtype : numpy dtype
datatype of output visibility data. If 'None', then the same datatype
as data_array will be used. Otherwise, a numpy dtype object must be specified with
an 'r' field and an 'i' field for real and imaginary parts, respectively. See
uvh5.py for an example of defining such a datatype.
Notes
-----
When partially writing out data, this function should be called first to initialize the
file on disk. The data is then actually written by calling the write_uvh5_part method,
with the same filename as the one specified in this function. See the tutorial for a
worked example.
"""
uvh5_obj = self._convert_to_filetype('uvh5')
uvh5_obj.initialize_uvh5_file(filename, clobber=clobber,
data_compression=data_compression,
flags_compression=flags_compression,
nsample_compression=nsample_compression,
data_write_dtype=data_write_dtype)
del(uvh5_obj)
def write_uvh5_part(self, filename, data_array, flags_array, nsample_array, check_header=True,
antenna_nums=None, antenna_names=None, ant_str=None, bls=None,
frequencies=None, freq_chans=None, times=None, polarizations=None,
blt_inds=None, run_check_acceptability=True, add_to_history=None):
"""
Write data to a UVH5 file that has already been initialized.
Parameters
----------
filename : str
The UVH5 file to write to. It must already exist, and is assumed to
have been initialized with initialize_uvh5_file.
data_array : ndarray
The data to write to disk. A check is done to ensure that the
dimensions of the data passed in conform to the ones specified by
the "selection" arguments.
flags_array : ndarray
The flags array to write to disk. A check is done to ensure that the
dimensions of the data passed in conform to the ones specified by
the "selection" arguments.
nsample_array : ndarray
The nsample array to write to disk. A check is done to ensure that the
dimensions of the data passed in conform to the ones specified by
the "selection" arguments.
check_header : bool
Option to check that the metadata present in the header on disk
matches that in the object.
antenna_nums : array_like of int, optional
The antennas numbers to include when writing data into the file
(antenna positions and names for the removed antennas will be retained).
This cannot be provided if `antenna_names` is also provided.
antenna_names : array_like of str, optional
The antennas names to include when writing data into the file
(antenna positions and names for the removed antennas will be retained).
This cannot be provided if `antenna_nums` is also provided.
bls : list of tuple, optional
A list of antenna number tuples (e.g. [(0,1), (3,2)]) or a list of
baseline 3-tuples (e.g. [(0,1,'xx'), (2,3,'yy')]) specifying baselines
to include when writing data into the file. For length-2 tuples,
the ordering of the numbers within the tuple does not matter. For
length-3 tuples, the polarization string is in the order of the two
antennas. If length-3 tuples are provided, `polarizations` must be
None.
ant_str : str, optional
A string containing information about what antenna numbers
and polarizations to include writing data into the file.
Can be 'auto', 'cross', 'all', or combinations of antenna numbers
and polarizations (e.g. '1', '1_2', '1x_2y'). See tutorial for more
examples of valid strings and the behavior of different forms for ant_str.
If '1x_2y,2y_3y' is passed, both polarizations 'xy' and 'yy' will
be kept for both baselines (1,2) and (2,3) to return a valid
pyuvdata object.
An ant_str cannot be passed in addition to any of `antenna_nums`,
`antenna_names`, `bls` args or the `polarizations` parameters,
if it is a ValueError will be raised.
frequencies : array_like of float, optional
The frequencies to include when writing data into the file, each
value passed here should exist in the freq_array.
freq_chans : array_like of int, optional
The frequency channel numbers to include writing data into the file.
times : array_like of float, optional
The times to include when writing data into the file, each value
passed here should exist in the time_array.
polarizations : array_like of int, optional
The polarizations numbers to include when writing data into the file,
each value passed here should exist in the polarization_array.
blt_inds : array_like of int, optional
The baseline-time indices to include when writing data into the file.
This is not commonly used.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters before
writing the file (the default is True, meaning the acceptable
range check will be done).
add_to_history : str
String to append to history before write out. Default is no appending.
"""
uvh5_obj = self._convert_to_filetype('uvh5')
uvh5_obj.write_uvh5_part(filename, data_array, flags_array, nsample_array,
check_header=check_header, antenna_nums=antenna_nums,
antenna_names=antenna_names, bls=bls, ant_str=ant_str,
frequencies=frequencies, freq_chans=freq_chans,
times=times, polarizations=polarizations,
blt_inds=blt_inds,
run_check_acceptability=run_check_acceptability,
add_to_history=add_to_history)
del(uvh5_obj)
def read(self, filename, axis=None, file_type=None,
antenna_nums=None, antenna_names=None, ant_str=None, bls=None,
frequencies=None, freq_chans=None, times=None, polarizations=None,
blt_inds=None, time_range=None, keep_all_metadata=True,
read_metadata=True, read_data=True,
phase_type=None, correct_lat_lon=True, use_model=False,
data_column='DATA', pol_order='AIPS', data_array_dtype=np.complex128,
run_check=True, check_extra=True, run_check_acceptability=True):
"""
Read a generic file into a UVData object.
Parameters
----------
filename : str or list of str
The file(s) or list(s) of files to read from.
file_type : str
One of ['uvfits', 'miriad', 'fhd', 'ms', 'uvh5'] or None.
If None, the code attempts to guess what the file type is.
For miriad and ms types, this is based on the standard directory
structure. For FHD, uvfits and uvh5 files it's based on file
extensions (FHD: .sav, .txt; uvfits: .uvfits; uvh5: .uvh5).
Note that if a list of datasets is passed, the file type is
determined from the first dataset.
axis : str
Axis to concatenate files along. This enables fast concatenation
along the specified axis without the normal checking that all other
metadata agrees. This method does not guarantee correct resulting
objects. Please see the docstring for fast_concat for details.
Allowed values are: 'blt', 'freq', 'polarization'. Only used if
multiple files are passed.
antenna_nums : array_like of int, optional
The antennas numbers to include when reading data into the object
(antenna positions and names for the removed antennas will be retained
unless `keep_all_metadata` is False). This cannot be provided if
`antenna_names` is also provided. Ignored if read_data is False.
antenna_names : array_like of str, optional
The antennas names to include when reading data into the object
(antenna positions and names for the removed antennas will be retained
unless `keep_all_metadata` is False). This cannot be provided if
`antenna_nums` is also provided. Ignored if read_data is False.
bls : list of tuple, optional
A list of antenna number tuples (e.g. [(0,1), (3,2)]) or a list of
baseline 3-tuples (e.g. [(0,1,'xx'), (2,3,'yy')]) specifying baselines
to include when reading data into the object. For length-2 tuples,
the ordering of the numbers within the tuple does not matter. For
length-3 tuples, the polarization string is in the order of the two
antennas. If length-3 tuples are provided, `polarizations` must be
None. Ignored if read_data is False.
ant_str : str, optional
A string containing information about what antenna numbers
and polarizations to include when reading data into the object.
Can be 'auto', 'cross', 'all', or combinations of antenna numbers
and polarizations (e.g. '1', '1_2', '1x_2y'). See tutorial for more
examples of valid strings and the behavior of different forms for ant_str.
If '1x_2y,2y_3y' is passed, both polarizations 'xy' and 'yy' will
be kept for both baselines (1,2) and (2,3) to return a valid
pyuvdata object.
An ant_str cannot be passed in addition to any of `antenna_nums`,
`antenna_names`, `bls` args or the `polarizations` parameters,
if it is a ValueError will be raised. Ignored if read_data is False.
frequencies : array_like of float, optional
The frequencies to include when reading data into the object, each
value passed here should exist in the freq_array. Ignored if
read_data is False.
freq_chans : array_like of int, optional
The frequency channel numbers to include when reading data into the
object. Ignored if read_data is False.
times : array_like of float, optional
The times to include when reading data into the object, each value
passed here should exist in the time_array. Ignored if read_data is False.
time_range : list of float, optional
len-2 list containing min and max range of times in Julian Date to
include when reading data into the object. e.g. [2458115.20, 2458115.40]
Cannot be set with times.
polarizations : array_like of int, optional
The polarizations numbers to include when reading data into the
object, each value passed here should exist in the polarization_array.
Ignored if read_data is False.
blt_inds : array_like of int, optional
The baseline-time indices to include when reading data into the
object. This is not commonly used. Ignored if read_data is False.
keep_all_metadata : bool
Option to keep all the metadata associated with antennas, even those
that do not have data associated with them after the select option.
read_metadata : bool
Option to read in metadata (times, baselines, uvws) as well as
basic header info. Only used if file_type is 'uvfits' and read_data
is False (metadata will be read if data is read). If file_type is
'uvfits' and both read_data and read_metadata are false, only basic
header info is read in.
read_data : bool
Read in the data. Only used if file_type is 'uvfits',
'miriad' or 'uvh5'. If set to False, only the metadata will be
read in (for uvfits, this can be further restricted to just the
header if read_metadata is False). Setting read_data to False
results in an incompletely defined object (check will not pass).
phase_type : str, optional
Option to specify the phasing status of the data. Only used if
file_type is 'miriad'. Options are 'drift', 'phased' or None.
'drift' means the data are zenith drift data, 'phased' means the
data are phased to a single RA/Dec. Default is None
meaning it will be guessed at based on the file contents.
correct_lat_lon : bool
Option to update the latitude and longitude from the known_telescopes
list if the altitude is missing. Only used if file_type is 'miriad'.
use_model : bool
Option to read in the model visibilities rather than the dirty
visibilities (the default is False, meaning the dirty visibilities
will be read). Only used if file_type is 'fhd'.
data_column : str
name of CASA data column to read into data_array. Options are:
'DATA', 'MODEL', or 'CORRECTED_DATA'. Only used if file_type is 'ms'.
pol_order : str
Option to specify polarizations order convention, options are
'CASA' or 'AIPS'. Only used if file_type is 'ms'.
data_array_dtype : numpy dtype
Datatype to store the output data_array as. Must be either
np.complex64 (single-precision real and imaginary) or np.complex128 (double-
precision real and imaginary). Only used if the datatype of the visibility
data on-disk is not 'c8' or 'c16'. Only used if file_type is 'uvh5'.
run_check : bool
Option to check for the existence and proper shapes of parameters
after after reading in the file (the default is True,
meaning the check will be run). Ignored if read_data is False.
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
Ignored if read_data is False.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
reading in the file (the default is True, meaning the acceptable
range check will be done). Ignored if read_data is False.
"""
if isinstance(filename, (list, tuple)):
# this is either a list of separate files to read or a list of FHD files
if isinstance(filename[0], (list, tuple)):
# this must be a list of lists for FHD
file_type = 'fhd'
multi = True
else:
basename, extension = os.path.splitext(filename[0])
if extension == '.sav' or extension == '.txt':
file_type = 'fhd'
multi = False
else:
multi = True
else:
multi = False
if file_type is None:
if multi:
file_test = filename[0]
else:
file_test = filename
if os.path.isdir(file_test):
# it's a directory, so it's either miriad or ms file type
if os.path.exists(os.path.join(file_test, 'vartable')):
# It's miriad.
file_type = 'miriad'
elif os.path.exists(os.path.join(file_test, 'OBSERVATION')):
# It's a measurement set.
file_type = 'ms'
else:
basename, extension = os.path.splitext(file_test)
if extension == '.uvfits':
file_type = 'uvfits'
elif extension == '.uvh5':
file_type = 'uvh5'
if file_type is None:
raise ValueError('File type could not be determined.')
if time_range is not None:
if times is not None:
raise ValueError(
'Only one of times and time_range can be provided.')
if antenna_names is not None and antenna_nums is not None:
raise ValueError('Only one of antenna_nums and antenna_names can be provided.')
if file_type == 'uvfits':
if time_range is not None:
select = True
warnings.warn('Warning: "time_range" keyword is set which is not '
'supported by read_uvfits. This select will be '
'done after reading the file.')
else:
select = False
self.read_uvfits(filename, antenna_nums=antenna_nums,
antenna_names=antenna_names, ant_str=ant_str,
bls=bls, frequencies=frequencies,
freq_chans=freq_chans, times=times,
polarizations=polarizations, blt_inds=blt_inds,
read_data=read_data, read_metadata=read_metadata,
run_check=run_check, check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
keep_all_metadata=keep_all_metadata, axis=axis)
if select:
unique_times = np.unique(self.time_array)
times_to_keep = unique_times[np.where((unique_times >= np.min(time_range))
& (unique_times <= np.max(time_range)))]
self.select(times=times_to_keep, run_check=run_check, check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
keep_all_metadata=keep_all_metadata)
elif file_type == 'miriad':
if (antenna_names is not None or frequencies is not None or freq_chans is not None
or times is not None or blt_inds is not None):
if blt_inds is not None:
if (antenna_nums is not None or ant_str is not None
or bls is not None or time_range is not None):
warnings.warn('Warning: blt_inds is set along with select '
'on read keywords that are supported by '
'read_miriad and may downselect blts. '
'This may result in incorrect results '
'because the select on read will happen '
'before the blt_inds selection so the '
'indices may not match the expected locations.')
else:
warnings.warn('Warning: a select on read keyword is set that is not '
'supported by read_miriad. This select will be '
'done after reading the file.')
select = True
else:
select = False
self.read_miriad(filename, antenna_nums=antenna_nums, ant_str=ant_str,
bls=bls, polarizations=polarizations,
time_range=time_range, read_data=read_data,
phase_type=phase_type, correct_lat_lon=correct_lat_lon,
run_check=run_check, check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
axis=axis)
if select:
self.select(antenna_names=antenna_names, frequencies=frequencies,
freq_chans=freq_chans, times=times,
blt_inds=blt_inds, run_check=run_check, check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
keep_all_metadata=keep_all_metadata)
elif file_type == 'fhd':
if (antenna_nums is not None or antenna_names is not None
or ant_str is not None or bls is not None
or frequencies is not None or freq_chans is not None
or times is not None or polarizations is not None
or blt_inds is not None):
select = True
warnings.warn('Warning: select on read keyword set, but '
'file_type is "fhd" which does not support select '
'on read. Entire file will be read and then select '
'will be performed')
else:
select = False
self.read_fhd(filename, use_model=use_model, run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
axis=axis)
if select:
self.select(antenna_nums=antenna_nums, antenna_names=antenna_names,
ant_str=ant_str, bls=bls, frequencies=frequencies,
freq_chans=freq_chans, times=times,
polarizations=polarizations, blt_inds=blt_inds,
run_check=run_check, check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
keep_all_metadata=keep_all_metadata)
elif file_type == 'ms':
if (antenna_nums is not None or antenna_names is not None
or ant_str is not None or bls is not None
or frequencies is not None or freq_chans is not None
or times is not None or polarizations is not None
or blt_inds is not None):
select = True
warnings.warn('Warning: select on read keyword set, but '
'file_type is "fhd" which does not support select '
'on read. Entire file will be read and then select '
'will be performed')
else:
select = False
self.read_ms(filename, run_check=run_check, check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
data_column=data_column, pol_order=pol_order, axis=axis)
if select:
self.select(antenna_nums=antenna_nums, antenna_names=antenna_names,
ant_str=ant_str, bls=bls, frequencies=frequencies,
freq_chans=freq_chans, times=times,
polarizations=polarizations, blt_inds=blt_inds,
run_check=run_check, check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
keep_all_metadata=keep_all_metadata)
elif file_type == 'uvh5':
if (time_range is not None):
select = True
warnings.warn('Warning: "time_range" keyword is set which is not '
'supported by read_uvh5. This select will be '
'done after reading the file.')
else:
select = False
self.read_uvh5(filename, antenna_nums=antenna_nums,
antenna_names=antenna_names, ant_str=ant_str, bls=bls,
frequencies=frequencies, freq_chans=freq_chans, times=times,
polarizations=polarizations, blt_inds=blt_inds,
read_data=read_data, run_check=run_check, check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
data_array_dtype=data_array_dtype,
keep_all_metadata=keep_all_metadata, axis=axis)
if select:
unique_times = np.unique(self.time_array)
times_to_keep = unique_times[np.where((unique_times >= np.min(time_range))
& (unique_times <= np.max(time_range)))]
self.select(times=times_to_keep, run_check=run_check, check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
keep_all_metadata=keep_all_metadata)
def get_ants(self):
"""
Get the unique antennas that have data associated with them.
Returns
-------
ndarray of int
Array of unique antennas with data associated with them.
"""
return np.unique(np.append(self.ant_1_array, self.ant_2_array))
def get_ENU_antpos(self, center=None, pick_data_ants=False):
"""
Returns antenna positions in ENU (topocentric) coordinates in units of meters.
Parameters
----------
center : bool
If True, subtract median of array position from antpos
pick_data_ants : bool
If True, return only antennas found in data
Returns
-------
antpos : ndarray
Antenna positions in ENU (topocentric) coordinates in units of meters, shape=(Nants, 3)
ants : ndarray
Antenna numbers matching ordering of antpos, shape=(Nants,)
"""
if center is None:
center = False
warnings.warn('The default for the `center` keyword has changed. '
'Previously it defaulted to True, using the median '
'antennna location; now it defaults to False, '
'using the telescope_location. This warning will be '
'removed in version 1.5', DeprecationWarning)
antpos = uvutils.ENU_from_ECEF((self.antenna_positions + self.telescope_location),
*self.telescope_location_lat_lon_alt)
ants = self.antenna_numbers
if pick_data_ants:
data_ants = np.unique(np.concatenate([self.ant_1_array, self.ant_2_array]))
telescope_ants = self.antenna_numbers
select = [x in data_ants for x in telescope_ants]
antpos = antpos[select, :]
ants = telescope_ants[select]
if center is True:
antpos -= np.median(antpos, axis=0)
return antpos, ants
def get_baseline_nums(self):
"""
Get the unique baselines that have data associated with them.
Returns
-------
ndarray of int
Array of unique baselines with data associated with them.
"""
return np.unique(self.baseline_array)
def get_antpairs(self):
"""
Get the unique antpair tuples that have data associated with them.
Returns
-------
list of tuples of int
list of unique antpair tuples (ant1, ant2) with data associated with them.
"""
return [self.baseline_to_antnums(bl) for bl in self.get_baseline_nums()]
def get_pols(self):
"""
Get the polarizations in the data.
Returns
-------
list of str
list of polarizations (as strings) in the data.
"""
return uvutils.polnum2str(self.polarization_array, x_orientation=self.x_orientation)
def get_antpairpols(self):
"""
Get the unique antpair + pol tuples that have data associated with them.
Returns
-------
list of tuples of int
list of unique antpair + pol tuples (ant1, ant2, pol) with data associated with them.
"""
pols = self.get_pols()
bls = self.get_antpairs()
return [(bl) + (pol,) for bl in bls for pol in pols]
def get_feedpols(self):
"""
Get the unique antenna feed polarizations in the data.
Returns
-------
list of str
list of antenna feed polarizations (e.g. ['X', 'Y']) in the data.
Raises
------
ValueError
If any pseudo-Stokes visibilities are present
"""
if np.any(self.polarization_array > 0):
raise ValueError('Pseudo-Stokes visibilities cannot be interpreted as feed polarizations')
else:
return list(set(''.join(self.get_pols())))
def antpair2ind(self, ant1, ant2=None, ordered=True):
"""
Get indices along the baseline-time axis for a given antenna pair.
This will search for either the key as specified, or the key and its
conjugate.
Parameters
----------
ant1, ant2 : int
Either an antenna-pair key, or key expanded as arguments,
e.g. antpair2ind( (10, 20) ) or antpair2ind(10, 20)
ordered : bool
If True, search for antpair as provided, else search for it and it's conjugate.
Returns
-------
inds : ndarray of int-64
indices of the antpair along the baseline-time axis.
"""
# check for expanded antpair or key
if ant2 is None:
if not isinstance(ant1, tuple):
raise ValueError("antpair2ind must be fed an antpair tuple "
"or expand it as arguments")
ant2 = ant1[1]
ant1 = ant1[0]
else:
if not isinstance(ant1, (int, np.integer)):
raise ValueError("antpair2ind must be fed an antpair tuple or "
"expand it as arguments")
if not isinstance(ordered, (bool, np.bool)):
raise ValueError("ordered must be a boolean")
# if getting auto-corr, ordered must be True
if ant1 == ant2:
ordered = True
# get indices
inds = np.where((self.ant_1_array == ant1) & (self.ant_2_array == ant2))[0]
if ordered:
return inds
else:
ind2 = np.where((self.ant_1_array == ant2) & (self.ant_2_array == ant1))[0]
inds = np.asarray(np.append(inds, ind2), dtype=np.int64)
return inds
def _key2inds(self, key):
"""
Interpret user specified key as a combination of antenna pair and/or polarization.
Parameters
----------
key : tuple of int
Identifier of data. Key can be length 1, 2, or 3:
if len(key) == 1:
if (key < 5) or (type(key) is str): interpreted as a
polarization number/name, return all blts for that pol.
else: interpreted as a baseline number. Return all times and
polarizations for that baseline.
if len(key) == 2: interpreted as an antenna pair. Return all
times and pols for that baseline.
if len(key) == 3: interpreted as antenna pair and pol (ant1, ant2, pol).
Return all times for that baseline, pol. pol may be a string.
Returns
----------
blt_ind1 : ndarray of int
blt indices for antenna pair.
blt_ind2 : ndarray of int
blt indices for conjugate antenna pair.
Note if a cross-pol baseline is requested, the polarization will
also be reversed so the appropriate correlations are returned.
e.g. asking for (1, 2, 'xy') may return conj(2, 1, 'yx'), which
is equivalent to the requesting baseline. See utils.conj_pol() for
complete conjugation mapping.
pol_ind : tuple of ndarray of int
polarization indices for blt_ind1 and blt_ind2
"""
key = uvutils._get_iterable(key)
if type(key) is str:
# Single string given, assume it is polarization
pol_ind1 = np.where(self.polarization_array
== uvutils.polstr2num(key, x_orientation=self.x_orientation))[0]
if len(pol_ind1) > 0:
blt_ind1 = np.arange(self.Nblts, dtype=np.int64)
blt_ind2 = np.array([], dtype=np.int64)
pol_ind2 = np.array([], dtype=np.int64)
pol_ind = (pol_ind1, pol_ind2)
else:
raise KeyError('Polarization {pol} not found in data.'.format(pol=key))
elif len(key) == 1:
key = key[0] # For simplicity
if isinstance(key, Iterable):
# Nested tuple. Call function again.
blt_ind1, blt_ind2, pol_ind = self._key2inds(key)
elif key < 5:
# Small number, assume it is a polarization number a la AIPS memo
pol_ind1 = np.where(self.polarization_array == key)[0]
if len(pol_ind1) > 0:
blt_ind1 = np.arange(self.Nblts)
blt_ind2 = np.array([], dtype=np.int64)
pol_ind2 = np.array([], dtype=np.int64)
pol_ind = (pol_ind1, pol_ind2)
else:
raise KeyError('Polarization {pol} not found in data.'.format(pol=key))
else:
# Larger number, assume it is a baseline number
inv_bl = self.antnums_to_baseline(self.baseline_to_antnums(key)[1],
self.baseline_to_antnums(key)[0])
blt_ind1 = np.where(self.baseline_array == key)[0]
blt_ind2 = np.where(self.baseline_array == inv_bl)[0]
if len(blt_ind1) + len(blt_ind2) == 0:
raise KeyError('Baseline {bl} not found in data.'.format(bl=key))
if len(blt_ind1) > 0:
pol_ind1 = np.arange(self.Npols)
else:
pol_ind1 = np.array([], dtype=np.int64)
if len(blt_ind2) > 0:
try:
pol_ind2 = uvutils.reorder_conj_pols(self.polarization_array)
except ValueError:
if len(blt_ind1) == 0:
raise KeyError('Baseline {bl} not found for polarization'
+ ' array in data.'.format(bl=key))
else:
pol_ind2 = np.array([], dtype=np.int64)
blt_ind2 = np.array([], dtype=np.int64)
else:
pol_ind2 = np.array([], dtype=np.int64)
pol_ind = (pol_ind1, pol_ind2)
elif len(key) == 2:
# Key is an antenna pair
blt_ind1 = self.antpair2ind(key[0], key[1])
blt_ind2 = self.antpair2ind(key[1], key[0])
if len(blt_ind1) + len(blt_ind2) == 0:
raise KeyError('Antenna pair {pair} not found in data'.format(pair=key))
if len(blt_ind1) > 0:
pol_ind1 = np.arange(self.Npols)
else:
pol_ind1 = np.array([], dtype=np.int64)
if len(blt_ind2) > 0:
try:
pol_ind2 = uvutils.reorder_conj_pols(self.polarization_array)
except ValueError:
if len(blt_ind1) == 0:
raise KeyError('Baseline {bl} not found for polarization'
+ ' array in data.'.format(bl=key))
else:
pol_ind2 = np.array([], dtype=np.int64)
blt_ind2 = np.array([], dtype=np.int64)
else:
pol_ind2 = np.array([], dtype=np.int64)
pol_ind = (pol_ind1, pol_ind2)
elif len(key) == 3:
# Key is an antenna pair + pol
blt_ind1 = self.antpair2ind(key[0], key[1])
blt_ind2 = self.antpair2ind(key[1], key[0])
if len(blt_ind1) + len(blt_ind2) == 0:
raise KeyError('Antenna pair {pair} not found in '
'data'.format(pair=(key[0], key[1])))
if type(key[2]) is str:
# pol is str
if len(blt_ind1) > 0:
pol_ind1 = np.where(
self.polarization_array
== uvutils.polstr2num(key[2],
x_orientation=self.x_orientation))[0]
else:
pol_ind1 = np.array([], dtype=np.int64)
if len(blt_ind2) > 0:
pol_ind2 = np.where(
self.polarization_array
== uvutils.polstr2num(uvutils.conj_pol(key[2]),
x_orientation=self.x_orientation))[0]
else:
pol_ind2 = np.array([], dtype=np.int64)
else:
# polarization number a la AIPS memo
if len(blt_ind1) > 0:
pol_ind1 = np.where(self.polarization_array == key[2])[0]
else:
pol_ind1 = np.array([], dtype=np.int64)
if len(blt_ind2) > 0:
pol_ind2 = np.where(self.polarization_array == uvutils.conj_pol(key[2]))[0]
else:
pol_ind2 = np.array([], dtype=np.int64)
pol_ind = (pol_ind1, pol_ind2)
if len(blt_ind1) * len(pol_ind[0]) + len(blt_ind2) * len(pol_ind[1]) == 0:
raise KeyError('Polarization {pol} not found in data.'.format(pol=key[2]))
# Catch autos
if np.array_equal(blt_ind1, blt_ind2):
blt_ind2 = np.array([], dtype=np.int64)
return (blt_ind1, blt_ind2, pol_ind)
def _smart_slicing(self, data, ind1, ind2, indp, squeeze='default',
force_copy=False):
"""
Method to quickly get the relevant section of a data-like array.
Used in get_data, get_flags and get_nsamples.
Parameters
----------
data : ndarray
4-dimensional array shaped like self.data_array
ind1 : array_like of int
blt indices for antenna pair (e.g. from self._key2inds)
ind2 : array_like of int
blt indices for conjugate antenna pair. (e.g. from self._key2inds)
indp : tuple array_like of int
polarization indices for ind1 and ind2 (e.g. from self._key2inds)
squeeze : str
string specifying how to squeeze the returned array. Options are:
'default': squeeze pol and spw dimensions if possible;
'none': no squeezing of resulting numpy array;
'full': squeeze all length 1 dimensions.
force_copy : bool
Option to explicitly make a copy of the data.
Returns
-------
ndarray
copy (or if possible, a read-only view) of relevant section of data
"""
p_reg_spaced = [False, False]
p_start = [0, 0]
p_stop = [0, 0]
dp = [1, 1]
for i, pi in enumerate(indp):
if len(pi) == 0:
continue
if len(set(np.ediff1d(pi))) <= 1:
p_reg_spaced[i] = True
p_start[i] = pi[0]
p_stop[i] = pi[-1] + 1
if len(pi) != 1:
dp[i] = pi[1] - pi[0]
if len(ind2) == 0:
# only unconjugated baselines
if len(set(np.ediff1d(ind1))) <= 1:
blt_start = ind1[0]
blt_stop = ind1[-1] + 1
if len(ind1) == 1:
dblt = 1
else:
dblt = ind1[1] - ind1[0]
if p_reg_spaced[0]:
out = data[blt_start:blt_stop:dblt, :, :, p_start[0]:p_stop[0]:dp[0]]
else:
out = data[blt_start:blt_stop:dblt, :, :, indp[0]]
else:
out = data[ind1, :, :, :]
if p_reg_spaced[0]:
out = out[:, :, :, p_start[0]:p_stop[0]:dp[0]]
else:
out = out[:, :, :, indp[0]]
elif len(ind1) == 0:
# only conjugated baselines
if len(set(np.ediff1d(ind2))) <= 1:
blt_start = ind2[0]
blt_stop = ind2[-1] + 1
if len(ind2) == 1:
dblt = 1
else:
dblt = ind2[1] - ind2[0]
if p_reg_spaced[1]:
out = np.conj(data[blt_start:blt_stop:dblt, :, :, p_start[1]:p_stop[1]:dp[1]])
else:
out = np.conj(data[blt_start:blt_stop:dblt, :, :, indp[1]])
else:
out = data[ind2, :, :, :]
if p_reg_spaced[1]:
out = np.conj(out[:, :, :, p_start[1]:p_stop[1]:dp[1]])
else:
out = np.conj(out[:, :, :, indp[1]])
else:
# both conjugated and unconjugated baselines
out = (data[ind1, :, :, :], np.conj(data[ind2, :, :, :]))
if p_reg_spaced[0] and p_reg_spaced[1]:
out = np.append(out[0][:, :, :, p_start[0]:p_stop[0]:dp[0]],
out[1][:, :, :, p_start[1]:p_stop[1]:dp[1]], axis=0)
else:
out = np.append(out[0][:, :, :, indp[0]],
out[1][:, :, :, indp[1]], axis=0)
if squeeze == 'full':
out = np.squeeze(out)
elif squeeze == 'default':
if out.shape[3] == 1:
# one polarization dimension
out = np.squeeze(out, axis=3)
if out.shape[1] == 1:
# one spw dimension
out = np.squeeze(out, axis=1)
elif squeeze != 'none':
raise ValueError('"' + str(squeeze) + '" is not a valid option for squeeze.'
'Only "default", "none", or "full" are allowed.')
if force_copy:
out =
|
np.array(out)
|
numpy.array
|
# Aiyagari model
# Endogenous Grid Points with IID Income
# <NAME> 2017
# Translated by <NAME> 2021
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import fsolve
from scipy.interpolate import interp1d
from discrete_normal import discrete_normal
from lininterp1 import lininterp1
# PARAMETERS
## preferences
risk_aver = 2
beta = 0.95
## production
deprec = 0.10
capshare = 0.4
## income risk: discretized N(mu,sigma^2)
mu_y = 1
sd_y = 0.2
ny = 5
## asset grids
na = 40
amax = 50
borrow_lim = 0
agrid_par = 0.5 # 1 for linear, 0 for L-shaped
## computation
max_iter = 1000
tol_iter = 1.0e-6
Nsim = 50000
Tsim = 500
maxiter_KL = 70
tol_KL = 1.0e-5
step_KL = 0.005
rguess = 1/beta-1-0.001 # a bit lower than inverse of discount rate
KLratioguess = ((rguess + deprec)/capshare)**(1/(capshare-1))
# OPTIONS
Display = 1
MakePlots = 1
## which function to interpolation
InterpCon = 0
InterpEMUC = 1
## tolerance for non-linear solver
TolX=1.0e-6
# UTILITY FUNCTION
if risk_aver==1:
u = lambda c: np.log(c)
else:
u = lambda c: (c**(1-risk_aver)-1)/(1-risk_aver)
u1 = lambda c: c**(-risk_aver)
u1inv = lambda u: u**(-1/risk_aver)
# DRAW RANDOM NUMBERS
np.random.seed(2021)
yrand = np.random.rand(Nsim,Tsim)
# SET UP GRIDS
## assets
agrid = np.linspace(0,1,na).reshape(na,1)
agrid = agrid**(1/agrid_par)
agrid = borrow_lim + (amax-borrow_lim)*agrid
## income: disretize normal distribution
width = fsolve(lambda x: discrete_normal(ny,mu_y,sd_y,x)[0],2)
temp, ygrid, ydist = discrete_normal(ny,mu_y,sd_y,width)
ycumdist = np.cumsum(ydist)
# SIMULATE LABOR EFFICIENCY REALIZATIONS
if Display>=1:
print("Simulating labor efficiency realizations in advance")
yindsim = np.zeros((Nsim,Tsim), dtype=int)
for it in range(0,Tsim):
## income realization: note we vectorize simulations at once because
## of matlab, in other languages we would loop over individuals
yindsim[yrand[:,it]<=ycumdist[0],it] = 0
for iy in range(1,ny):
yindsim[np.logical_and(yrand[:,it]>ycumdist[iy-1], yrand[:,it]<=ycumdist[iy]),it] = iy
ysim = ygrid[yindsim]
# ITERATE OVER KL RATIO
KLratio = KLratioguess
iterKL = 0
KLdiff = 1
while iterKL<=maxiter_KL and abs(KLdiff)>tol_KL:
iterKL = iterKL + 1
r = capshare*(KLratio**(capshare-1)) - deprec
R = 1+r
wage = (1-capshare)* (KLratio**capshare)
## rescale efficiency units of labor so that output = 1
yscale = (KLratio**(-capshare))/(ygrid.T @ ydist)
## initialize consumption function in first iteration only
if iterKL==1:
conguess = np.zeros((na,ny))
for iy in range(0,ny):
conguess[:,iy] = r*agrid[:,0] + wage*yscale*ygrid[iy]
con = conguess.copy()
## solve for policy functions with EGP
Iter = 0
cdiff = 1000
while Iter<=max_iter and cdiff>tol_iter:
Iter = Iter + 1
sav = np.zeros((na,ny))
conlast = con.copy()
emuc = u1(conlast) @ ydist
muc1 = beta*R*emuc
con1 = u1inv(muc1)
## loop over income
ass1 = np.zeros((na,ny))
for iy in range(0,ny):
ass1[:,iy] = ((con1 + agrid -wage*yscale*ygrid[iy])/R)[:,0]
## loop over current period ssets
for ia in range(0,na):
if agrid[ia]<ass1[0,iy]: # borrowing constraint binds
sav[ia,iy] = borrow_lim
else: # borrowing constraint does not bind
sav[ia,iy] = lininterp1(ass1[:,iy],agrid[:,0],agrid[ia])
con[:,iy] = (R*agrid + wage*yscale*ygrid[iy])[:,0] - sav[:,iy]
cdiff = np.max(abs(con-conlast))
if Display>=2:
print('Iteration no. ' + str(Iter), ' max con fn diff is ' + str(cdiff))
## simulate: start at assets from last interation
if iterKL==1:
asim = np.zeros((Nsim,Tsim))
elif iterKL>1:
# asim[:,0] = Ea.*ones(Nsim,1)
asim[:,0] = asim[:,Tsim-1]
## create interpolating function
savinterp = list()
for iy in range(0,ny):
savinterp.append(interp1d(agrid[:,0],sav[:,iy],'linear'))
## loop over time periods
for it in range(0,Tsim):
if Display>=2 and (it+1)%100==0:
print("Simulating, time period " + str(it))
## asset choice
if it < Tsim-1:
for iy in range(0,ny):
asim[yindsim[:,it]==iy,it+1] = savinterp[iy](asim[yindsim[:,it]==iy,it])
## assign actual labor income values
labincsim = wage*yscale*ysim
## mean assets and efficiency units
Ea =
|
np.mean(asim[:,Tsim-1])
|
numpy.mean
|
"""
Copyright 2013 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import cvxpy as cvx
from cvxpy.expressions.variable import Variable
from cvxpy.expressions.constants import Constant
from cvxpy.expressions.constants import Parameter
from cvxpy import Problem, Minimize
from cvxpy.tests.base_test import BaseTest
import numpy as np
import scipy.sparse as sp
import sys
PY35 = sys.version_info >= (3, 5)
class TestComplex(BaseTest):
""" Unit tests for the expression/expression module. """
def test_variable(self):
"""Test the Variable class.
"""
x = Variable(2, complex=False)
y = Variable(2, complex=True)
z = Variable(2, imag=True)
assert not x.is_complex()
assert not x.is_imag()
assert y.is_complex()
assert not y.is_imag()
assert z.is_complex()
assert z.is_imag()
with self.assertRaises(Exception) as cm:
x.value = np.array([1j, 0.])
self.assertEqual(str(cm.exception), "Variable value must be real.")
y.value = np.array([1., 0.])
y.value = np.array([1j, 0.])
with self.assertRaises(Exception) as cm:
z.value = np.array([1., 0.])
self.assertEqual(str(cm.exception), "Variable value must be imaginary.")
def test_parameter(self):
"""Test the parameter class.
"""
x = Parameter(2, complex=False)
y = Parameter(2, complex=True)
z = Parameter(2, imag=True)
assert not x.is_complex()
assert not x.is_imag()
assert y.is_complex()
assert not y.is_imag()
assert z.is_complex()
assert z.is_imag()
with self.assertRaises(Exception) as cm:
x.value = np.array([1j, 0.])
self.assertEqual(str(cm.exception), "Parameter value must be real.")
y.value = np.array([1., 0.])
y.value = np.array([1j, 0.])
with self.assertRaises(Exception) as cm:
z.value = np.array([1., 0.])
self.assertEqual(str(cm.exception), "Parameter value must be imaginary.")
def test_constant(self):
"""Test the parameter class.
"""
x = Constant(2)
y = Constant(2j+1)
z = Constant(2j)
assert not x.is_complex()
assert not x.is_imag()
assert y.is_complex()
assert not y.is_imag()
assert z.is_complex()
assert z.is_imag()
def test_objective(self):
"""Test objectives.
"""
x = Variable(complex=True)
with self.assertRaises(Exception) as cm:
Minimize(x)
self.assertEqual(str(cm.exception), "The 'minimize' objective must be real valued.")
with self.assertRaises(Exception) as cm:
cvx.Maximize(x)
self.assertEqual(str(cm.exception), "The 'maximize' objective must be real valued.")
def test_arithmetic(self):
"""Test basic arithmetic expressions.
"""
x = Variable(complex=True)
y = Variable(imag=True)
z = Variable()
expr = x + z
assert expr.is_complex()
assert not expr.is_imag()
expr = y + z
assert expr.is_complex()
assert not expr.is_imag()
expr = y*z
assert expr.is_complex()
assert expr.is_imag()
expr = y*y
assert not expr.is_complex()
assert not expr.is_imag()
expr = y/2
assert expr.is_complex()
assert expr.is_imag()
expr = y/1j
assert not expr.is_complex()
assert not expr.is_imag()
A = np.ones((2, 2))
expr = A*y*A
assert expr.is_complex()
assert expr.is_imag()
def test_real(self):
"""Test real.
"""
A = np.ones((2, 2))
expr = Constant(A) + 1j*Constant(A)
expr = cvx.real(expr)
assert expr.is_real()
assert not expr.is_complex()
assert not expr.is_imag()
self.assertItemsAlmostEqual(expr.value, A)
x = Variable(complex=True)
expr = cvx.imag(x) + cvx.real(x)
assert expr.is_real()
def test_imag(self):
"""Test imag.
"""
A = np.ones((2, 2))
expr = Constant(A) + 2j*Constant(A)
expr = cvx.imag(expr)
assert expr.is_real()
assert not expr.is_complex()
assert not expr.is_imag()
self.assertItemsAlmostEqual(expr.value, 2*A)
def test_conj(self):
"""Test imag.
"""
A = np.ones((2, 2))
expr = Constant(A) + 1j*Constant(A)
expr = cvx.conj(expr)
assert not expr.is_real()
assert expr.is_complex()
assert not expr.is_imag()
self.assertItemsAlmostEqual(expr.value, A - 1j*A)
def test_affine_atoms_canon(self):
"""Test canonicalization for affine atoms.
"""
# Scalar.
x = Variable()
expr = cvx.imag(x + 1j*x)
prob = Problem(Minimize(expr), [x >= 0])
result = prob.solve()
self.assertAlmostEqual(result, 0)
self.assertAlmostEqual(x.value, 0)
x = Variable(imag=True)
expr = 1j*x
prob = Problem(Minimize(expr), [cvx.imag(x) <= 1])
result = prob.solve()
self.assertAlmostEqual(result, -1)
self.assertAlmostEqual(x.value, 1j)
x = Variable(2)
expr = x/1j
prob = Problem(Minimize(expr[0]*1j + expr[1]*1j), [cvx.real(x + 1j) >= 1])
result = prob.solve()
self.assertAlmostEqual(result, -np.inf)
prob = Problem(Minimize(expr[0]*1j + expr[1]*1j), [cvx.real(x + 1j) <= 1])
result = prob.solve()
self.assertAlmostEqual(result, -2)
self.assertItemsAlmostEqual(x.value, [1, 1])
prob = Problem(Minimize(expr[0]*1j + expr[1]*1j), [cvx.real(x + 1j) >= 1, cvx.conj(x) <= 0])
result = prob.solve()
self.assertAlmostEqual(result, np.inf)
x = Variable((2, 2))
y = Variable((3, 2), complex=True)
expr = cvx.vstack([x, y])
prob = Problem(Minimize(cvx.sum(cvx.imag(cvx.conj(expr)))),
[x == 0, cvx.real(y) == 0, cvx.imag(y) <= 1])
result = prob.solve()
self.assertAlmostEqual(result, -6)
self.assertItemsAlmostEqual(y.value, 1j*np.ones((3, 2)))
self.assertItemsAlmostEqual(x.value, np.zeros((2, 2)))
x = Variable((2, 2))
y = Variable((3, 2), complex=True)
expr = cvx.vstack([x, y])
prob = Problem(Minimize(cvx.sum(cvx.imag(expr.H))),
[x == 0, cvx.real(y) == 0, cvx.imag(y) <= 1])
result = prob.solve()
self.assertAlmostEqual(result, -6)
self.assertItemsAlmostEqual(y.value, 1j*np.ones((3, 2)))
self.assertItemsAlmostEqual(x.value, np.zeros((2, 2)))
def test_params(self):
"""Test with parameters.
"""
p = cvx.Parameter(imag=True, value=1j)
x = Variable(2, complex=True)
prob = Problem(cvx.Maximize(cvx.sum(cvx.imag(x) + cvx.real(x))), [cvx.abs(p*x) <= 2])
result = prob.solve()
self.assertAlmostEqual(result, 4*np.sqrt(2))
val = np.ones(2)*np.sqrt(2)
self.assertItemsAlmostEqual(x.value, val + 1j*val)
def test_abs(self):
"""Test with absolute value.
"""
x = Variable(2, complex=True)
prob = Problem(cvx.Maximize(cvx.sum(cvx.imag(x) + cvx.real(x))), [cvx.abs(x) <= 2])
result = prob.solve()
self.assertAlmostEqual(result, 4*np.sqrt(2))
val = np.ones(2)*np.sqrt(2)
self.assertItemsAlmostEqual(x.value, val + 1j*val)
def test_pnorm(self):
"""Test complex with pnorm.
"""
x = Variable((1, 2), complex=True)
prob = Problem(cvx.Maximize(cvx.sum(cvx.imag(x) + cvx.real(x))), [cvx.norm1(x) <= 2])
result = prob.solve()
self.assertAlmostEqual(result, 2*np.sqrt(2))
val = np.ones(2)*np.sqrt(2)/2
# self.assertItemsAlmostEqual(x.value, val + 1j*val)
x = Variable((2, 2), complex=True)
prob = Problem(cvx.Maximize(cvx.sum(cvx.imag(x) + cvx.real(x))),
[cvx.pnorm(x, p=2) <= np.sqrt(8)])
result = prob.solve()
self.assertAlmostEqual(result, 8)
val = np.ones((2, 2))
self.assertItemsAlmostEqual(x.value, val + 1j*val)
def test_matrix_norms(self):
"""Test matrix norms.
"""
P = np.arange(8) - 2j*np.arange(8)
P = np.reshape(P, (2, 4))
sigma_max = np.linalg.norm(P, 2)
X = Variable((2, 4), complex=True)
prob = Problem(Minimize(cvx.norm(X, 2)), [X == P])
result = prob.solve()
self.assertAlmostEqual(result, sigma_max, places=1)
norm_nuc = np.linalg.norm(P, 'nuc')
X = Variable((2, 4), complex=True)
prob = Problem(Minimize(cvx.norm(X, 'nuc')), [X == P])
result = prob.solve(solver=cvx.SCS, eps=1e-4)
self.assertAlmostEqual(result, norm_nuc, places=1)
def test_log_det(self):
"""Test log det.
"""
P = np.arange(9) - 2j*np.arange(9)
P = np.reshape(P, (3, 3))
P = np.conj(P.T).dot(P)/100 + np.eye(3)*.1
value = cvx.log_det(P).value
X = Variable((3, 3), complex=True)
prob = Problem(cvx.Maximize(cvx.log_det(X)), [X == P])
result = prob.solve(solver=cvx.SCS, eps=1e-6)
self.assertAlmostEqual(result, value, places=2)
def test_eigval_atoms(self):
"""Test eigenvalue atoms.
"""
P = np.arange(9) - 2j*np.arange(9)
P = np.reshape(P, (3, 3))
P1 = np.conj(P.T).dot(P)/10 + np.eye(3)*.1
P2 = np.array([[10, 1j, 0], [-1j, 10, 0], [0, 0, 1]])
for P in [P1, P2]:
value = cvx.lambda_max(P).value
X = Variable(P.shape, complex=True)
prob = Problem(cvx.Minimize(cvx.lambda_max(X)), [X == P])
result = prob.solve(solver=cvx.SCS, eps=1e-5)
self.assertAlmostEqual(result, value, places=2)
eigs = np.linalg.eigvals(P).real
value = cvx.sum_largest(eigs, 2).value
X = Variable(P.shape, complex=True)
prob = Problem(cvx.Minimize(cvx.lambda_sum_largest(X, 2)), [X == P])
result = prob.solve(solver=cvx.SCS, eps=1e-8, verbose=True)
self.assertAlmostEqual(result, value, places=3)
self.assertItemsAlmostEqual(X.value, P, places=3)
value = cvx.sum_smallest(eigs, 2).value
X = Variable(P.shape, complex=True)
prob = Problem(cvx.Maximize(cvx.lambda_sum_smallest(X, 2)), [X == P])
result = prob.solve(solver=cvx.SCS, eps=1e-6)
self.assertAlmostEqual(result, value, places=3)
def test_quad_form(self):
"""Test quad_form atom.
"""
# Create a random positive definite Hermitian matrix for all tests.
np.random.seed(42)
P = np.random.randn(3, 3) - 1j*np.random.randn(3, 3)
P = np.conj(P.T).dot(P)
# Solve a problem with real variable
b = np.arange(3)
x = Variable(3, complex=False)
value = cvx.quad_form(b, P).value
prob = Problem(cvx.Minimize(cvx.quad_form(x, P)), [x == b])
result = prob.solve()
self.assertAlmostEqual(result, value)
# Solve a problem with complex variable
b =
|
np.arange(3)
|
numpy.arange
|
"""classic Linear Quadratic Gaussian Regulator task"""
from numbers import Number
import gym
import gym.spaces
from gym import spaces
from gym.utils import seeding
import numpy as np
"""
Linear quadratic gaussian regulator task.
References
----------
- <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>,
Policy gradient approaches for multi-objective sequential decision making
2014 International Joint Conference on Neural Networks (IJCNN)
- <NAME> and <NAME>,
Reinforcement learning of motor skills with policy gradients,
Neural Networks, vol. 21, no. 4, pp. 682-697, 2008.
"""
#classic_control
from gym.envs.registration import register, spec
try:
spec('LQG1D-v0')
except:
register(
id='LQG1D-v0',
entry_point='baselines.envs.lqg1d:LQG1D'
)
class LQG1D(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 30
}
def __init__(self, discrete_reward=False):
self.horizon = 20
self.gamma = 0.99
self.discrete_reward = discrete_reward
self.max_pos = 4.0
self.max_action = 4.0
self.start_state = np.random.uniform(-self.max_pos, self.max_pos)
self.sigma_noise = 0.1
self.A = np.array([1]).reshape((1, 1))
self.B = np.array([1]).reshape((1, 1))
self.Q = np.array([0.9]).reshape((1, 1))
self.R = np.array([0.9]).reshape((1, 1))
self.max_cost = np.dot(self.max_pos,
np.dot(self.Q, self.max_pos)) + \
np.dot(self.max_action, np.dot(self.R, self.max_action))
# gym attributes
self.viewer = None
high = np.array([self.max_pos])
self.action_space = spaces.Box(low=-self.max_action,
high=self.max_action,
shape=(1,), dtype=np.float32)
self.observation_space = spaces.Box(low=-high, high=high,
dtype=np.float32)
# initialize state
# self.seed()
self.reset()
def step(self, action, render=False):
u = np.clip(action, -self.max_action, self.max_action)
noise = np.random.randn() * self.sigma_noise
xn = np.dot(self.A, self.state) + np.dot(self.B, u) + noise
xn = np.clip(xn, -self.max_pos, self.max_pos)
cost = np.dot(self.state,
np.dot(self.Q, self.state)) + \
np.dot(u, np.dot(self.R, u))
assert cost >= 0
normalized_cost = cost / self.max_cost
#normalized_cost = cost
self.state = np.array(xn.ravel())
if self.discrete_reward:
if abs(self.state[0]) <= 2 and abs(u) <= 2:
return self.get_state(), 0, False, {}
return self.get_state(), -1, False, {}
return self.get_state(), 1 - np.asscalar(normalized_cost), False, {}
def reset(self, random_start=None):
if random_start is None:
self.state = np.array(self.start_state)
else:
self.state = np.array([np.random.uniform(low=-self.max_pos,
high=self.max_pos)])
return self.get_state()
def get_state(self):
return np.array(self.state)
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def render(self, mode='human', close=False):
if close:
if self.viewer is not None:
self.viewer.close()
self.viewer = None
return
screen_width = 600
screen_height = 400
world_width = (self.max_pos * 2) * 2
scale = screen_width / world_width
bally = 100
ballradius = 3
if self.viewer is None:
clearance = 0 # y-offset
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(screen_width, screen_height)
mass = rendering.make_circle(ballradius * 2)
mass.set_color(.8, .3, .3)
mass.add_attr(rendering.Transform(translation=(0, clearance)))
self.masstrans = rendering.Transform()
mass.add_attr(self.masstrans)
self.viewer.add_geom(mass)
self.track = rendering.Line((0, bally), (screen_width, bally))
self.track.set_color(0.5, 0.5, 0.5)
self.viewer.add_geom(self.track)
zero_line = rendering.Line((screen_width / 2, 0),
self.viewer.add_geom(zero_line)
(screen_width / 2, screen_height))
zero_line.set_color(0.5, 0.5, 0.5)
x = self.state[0]
ballx = x * scale + screen_width / 2.0
self.masstrans.set_translation(ballx, bally)
return self.viewer.render(return_rgb_array=mode == 'rgb_array')
def _computeP2(self, K):
"""
This function computes the Riccati equation associated to the LQG
problem.
Args:
K (matrix): the matrix associated to the linear controller K * x
Returns:
P (matrix): the Riccati Matrix
"""
I = np.eye(self.Q.shape[0], self.Q.shape[1])
if np.array_equal(self.A, I) and np.array_equal(self.B, I):
P = (self.Q + np.dot(K.T,
|
np.dot(self.R, K)
|
numpy.dot
|
import warnings
import fire
import numpy as np
from scipy.special import logsumexp
from swissknife import utils
def sinkhorn_knopp_unbalanced(M, reg, reg_a, reg_b, numItermax=1000,
stopThr=1e-6, verbose=False, log=False,
a=np.array([]), b=np.array([]),
eps_div=1e-7, **unused_kwargs):
"""Allows different regularization weights on source and target domains."""
utils.handle_unused_kwargs(unused_kwargs)
a = np.asarray(a, dtype=np.float64)
b = np.asarray(b, dtype=np.float64)
M = np.asarray(M, dtype=np.float64)
dim_a, dim_b = M.shape
if len(a) == 0:
a = np.ones(dim_a, dtype=np.float64) / dim_a
if len(b) == 0:
b = np.ones(dim_b, dtype=np.float64) / dim_b
if len(b.shape) > 1:
n_hists = b.shape[1]
else:
n_hists = 0
if log:
log = {'err': []}
# we assume that no distances are null except those of the diagonal of
# distances
if n_hists:
u = np.ones((dim_a, 1)) / dim_a
v = np.ones((dim_b, n_hists)) / dim_b
a = a.reshape(dim_a, 1)
else:
u = np.ones(dim_a) / dim_a
v = np.ones(dim_b) / dim_b
# Next 3 lines equivalent to K= np.exp(-M/reg), but faster to compute
K = np.empty(M.shape, dtype=M.dtype)
np.divide(M, -reg, out=K)
np.exp(K, out=K)
f_a = reg_a / (reg_a + reg)
f_b = reg_b / (reg_b + reg)
for i in range(numItermax):
uprev = u
vprev = v
Kv = K.dot(v)
# Kv = np.where(Kv == 0, eps_div, Kv)
u = (a / Kv) ** f_a
Ktu = K.T.dot(u)
# Ktu = np.where(Ktu == 0, eps_div, Ktu)
v = (b / Ktu) ** f_b
if (np.any(Ktu == 0.)
or np.any(np.isnan(u)) or np.any(np.isnan(v))
or np.any(np.isinf(u)) or np.any(np.isinf(v))):
# we have reached the machine precision
# come back to previous solution and quit loop
warnings.warn('Numerical errors at iteration %s' % i)
u = uprev
v = vprev
break
err_u = abs(u - uprev).max() / max(abs(u).max(), abs(uprev).max(), 1.)
err_v = abs(v - vprev).max() / max(abs(v).max(), abs(vprev).max(), 1.)
err = 0.5 * (err_u + err_v)
if log:
log['err'].append(err)
if verbose:
if i % 50 == 0:
print(
'{:5s}|{:12s}'.format('It.', 'Err') + '\n' + '-' * 19)
print('{:5d}|{:8e}|'.format(i, err))
if err < stopThr:
break
if log:
log['logu'] = np.log(u + 1e-300)
log['logv'] = np.log(v + 1e-300)
if n_hists: # return only loss
res = np.einsum('ik,ij,jk,ij->k', u, K, v, M)
if log:
return res, log
else:
return res
else: # return OT matrix
if log:
return u[:, None] * K * v[None, :], log
else:
return u[:, None] * K * v[None, :]
def sinkhorn_stabilized_unbalanced(M, reg, reg_a, reg_b, tau=1e5, numItermax=1000,
stopThr=1e-6, verbose=False, log=False,
a=np.array([]), b=np.array([]), **kwargs):
a = np.asarray(a, dtype=np.float64)
b = np.asarray(b, dtype=np.float64)
M = np.asarray(M, dtype=np.float64)
dim_a, dim_b = M.shape
if len(a) == 0:
a = np.ones(dim_a, dtype=np.float64) / dim_a
if len(b) == 0:
b = np.ones(dim_b, dtype=np.float64) / dim_b
if len(b.shape) > 1:
n_hists = b.shape[1]
else:
n_hists = 0
if log:
log = {'err': []}
# we assume that no distances are null except those of the diagonal of
# distances
if n_hists:
u = np.ones((dim_a, n_hists)) / dim_a
v = np.ones((dim_b, n_hists)) / dim_b
a = a.reshape(dim_a, 1)
else:
u = np.ones(dim_a) / dim_a
v = np.ones(dim_b) / dim_b
# print(reg)
# Next 3 lines equivalent to K= np.exp(-M/reg), but faster to compute
K = np.empty(M.shape, dtype=M.dtype)
np.divide(M, -reg, out=K)
np.exp(K, out=K)
f_a = reg_a / (reg_a + reg)
f_b = reg_b / (reg_b + reg)
cpt = 0
err = 1.
alpha = np.zeros(dim_a)
beta = np.zeros(dim_b)
while (err > stopThr and cpt < numItermax):
uprev = u
vprev = v
Kv = K.dot(v)
f_alpha = np.exp(- alpha / (reg + reg_a))
f_beta = np.exp(- beta / (reg + reg_b))
if n_hists:
f_alpha = f_alpha[:, None]
f_beta = f_beta[:, None]
u = ((a / (Kv + 1e-16)) ** f_a) * f_alpha
Ktu = K.T.dot(u)
v = ((b / (Ktu + 1e-16)) ** f_b) * f_beta
absorbing = False
if (u > tau).any() or (v > tau).any():
absorbing = True
if n_hists:
alpha = alpha + reg * np.log(np.max(u, 1))
beta = beta + reg * np.log(np.max(v, 1))
else:
alpha = alpha + reg * np.log(np.max(u))
beta = beta + reg * np.log(np.max(v))
K = np.exp((alpha[:, None] + beta[None, :] -
M) / reg)
v = np.ones_like(v)
if (np.any(Ktu == 0.)
or np.any(np.isnan(u)) or np.any(np.isnan(v))
or np.any(np.isinf(u)) or np.any(np.isinf(v))):
# we have reached the machine precision
# come back to previous solution and quit loop
warnings.warn('Numerical errors at iteration %s' % cpt)
u = uprev
v = vprev
break
if (cpt % 10 == 0 and not absorbing) or cpt == 0:
# we can speed up the process by checking for the error only all
# the 10th iterations
err = abs(u - uprev).max() / max(abs(u).max(), abs(uprev).max(),
1.)
if log:
log['err'].append(err)
if verbose:
if cpt % 200 == 0:
print(
'{:5s}|{:12s}'.format('It.', 'Err') + '\n' + '-' * 19)
print('{:5d}|{:8e}|'.format(cpt, err))
cpt = cpt + 1
if err > stopThr:
warnings.warn("Stabilized Unbalanced Sinkhorn did not converge." +
"Try a larger entropy `reg` or a lower mass `reg_m`." +
"Or a larger absorption threshold `tau`.")
if n_hists:
logu = alpha[:, None] / reg + np.log(u)
logv = beta[:, None] / reg + np.log(v)
else:
logu = alpha / reg + np.log(u)
logv = beta / reg + np.log(v)
if log:
log['logu'] = logu
log['logv'] = logv
if n_hists: # return only loss
res = logsumexp(np.log(M + 1e-100)[:, :, None] + logu[:, None, :] +
logv[None, :, :] - M[:, :, None] / reg, axis=(0, 1))
res = np.exp(res)
if log:
return res, log
else:
return res
else: # return OT matrix
ot_matrix = np.exp(logu[:, None] + logv[None, :] - M / reg)
if log:
return ot_matrix, log
else:
return ot_matrix
def test_unbalanced_solvers(
reg_a=10.,
reg_b=0.1,
reg=0.1,
stable_version=False,
seed=42,
img_path=None,
):
np.random.seed(seed)
n = 10
mu1, mu2, mu3 = -3, 0, 3
std = 0.3
x1 = -np.ones(n) * 0.3
x2 = -x1 * 0.3
y1 = np.concatenate(
[
|
np.random.randn(n // 2)
|
numpy.random.randn
|
# Ignoring some linting rules in tests
# pylint: disable=missing-docstring
import pytest
import numpy as np
from bingo.variation.add_random_individuals import AddRandomIndividuals
@pytest.mark.parametrize("indvs_added", range(1, 5))
def test_random_individuals_added_to_pop(mocker, indvs_added):
dummy_population = [0]*10
mocked_variation = mocker.Mock(return_value=dummy_population)
mocked_variation.crossover_offspring = np.ones(10, dtype=bool)
mocked_variation.mutation_offspring =
|
np.ones(10, dtype=bool)
|
numpy.ones
|
# Fourier transforms of kernels test
#
# <EMAIL>, 2020
import numpy as np
import matplotlib.pyplot as plt
import sys
import pickle
import os
from matplotlib import cm
from tqdm import tqdm
sys.path.append('./covidgen')
import tools
import functions
import aux
# Font style
import matplotlib; matplotlib.rcParams.update(aux.tex_fonts)
figsize = (10, 3.8) # Two plots side-by-side
t =
|
np.linspace(0,100,101)
|
numpy.linspace
|
import click
import glob
import matplotlib.pyplot as plt
import pandas as pd
import geopandas as gpd
import numpy as np
import os
@click.command()
@click.option('-id', '--polygon-id', multiple=True, type=str)
@click.option('-s', '--statistics', help='which statistical method to use (mean, max, min, std). note: has only effect if with "-id all"!', default='mean', type=str)
@click.option('-c', '--column', help='column name', default='chance_of_conflict', type=str)
@click.option('-t', '--title', help='title for plot and file_object name', type=str)
@click.option('--verbose/--no-verbose', help='verbose on/off', default=False)
@click.argument('input-dir', type=click.Path())
@click.argument('output-dir', type=click.Path())
def main(input_dir=None, statistics=None, polygon_id=None, column=None, title=None, output_dir=None, verbose=None):
"""Quick and dirty function to plot the develoment of a column in the outputted geojson-files over time.
The script uses all geoJSON-files located in input-dir and retrieves values from them.
Possible to plot obtain development for multiple polygons (indicated via their ID) or entire study area.
If the latter, then different statistics can be chosen (mean, max, min, std, median, 'q05', 'q10', 'q90', 'q95').
Args:
input-dir (str): path to input directory with geoJSON-files located per projection year.
output-dir (str): path to directory where output will be stored.
Output:
a csv-file containing values per time step.
a png-file showing development over time.
"""
click.echo('\nPLOTTING VARIABLE DEVELOPMENT OVER TIME')
# converting polygon IDs to list
polygon_id = list(polygon_id)
# check that there is at least one ID or 'all' specified
assert(len(polygon_id) > 0), AssertionError('ERROR: please specify at least one polygon ID to be sampled or select ''all'' for sampling the entire study area')
# if 'all' is specified, no need to have list but get value directly
if polygon_id[0] == 'all':
click.echo('INFO: selected entire study area')
polygon_id = 'all'
click.echo('INFO: selected statistcal method is {}'.format(statistics))
# create a suffix to be used for output files
suffix = 'all_{}'.format(statistics)
# check if supported statistical function is selected
if statistics not in ['mean', 'max', 'min', 'std', 'median', 'q05', 'q10', 'q90', 'q95']:
raise ValueError('ERROR: {} is not a supported statistical method'.format(statistics))
else:
click.echo('INFO: sampling from IDs'.format(polygon_id))
# for IDs, no statistical function can be applied as it's only one value...
if statistics != None:
raise Warning('WARNING: if one or more IDs are provided, the statistical function is neglected.')
# absolute path to input_dir
input_dir = os.path.abspath(input_dir)
click.echo('INFO: getting geojson-files from {}'.format(input_dir))
# collect all files in input_dir
all_files = glob.glob(os.path.join(input_dir, '*.geojson'))
# create dictionary with list for areas (either IDs or entire study area) to be sampled from
out_dict = dict()
if polygon_id != 'all':
for idx in polygon_id:
out_dict[int(idx)] = list()
else:
out_dict[polygon_id] = list()
# create a list to keep track of year-values in files
years = list()
# go through all files
click.echo('INFO: retrieving values from column {}'.format(column))
for geojson in all_files:
if verbose: click.echo('DEBUG: reading file {}'.format(geojson))
# read file and convert to geo-dataframe
gdf = gpd.read_file(geojson, driver='GeoJSON')
# convert geo-dataframe to dataframe
df = pd.DataFrame(gdf.drop(columns='geometry'))
# get year-value
year = int(str(str(os.path.basename(geojson)).rsplit('.')[0]).rsplit('_')[-1])
years.append(year)
# go throough all IDs
if polygon_id != 'all':
for idx in polygon_id:
if verbose:
click.echo('DEBUG: sampling ID {}'.format(idx))
# if ID not in file, assign NaN
if int(idx) not in df.ID.values:
click.echo('WARNING: ID {} is not in {} - NaN set'.format(int(idx), geojson))
vals = np.nan
# otherwise, get value of column at this ID
else:
vals = df[column].loc[df.ID==int(idx)].values[0]
# append this value to list in dict
idx_list = out_dict[int(idx)]
idx_list.append(vals)
else:
# compute mean value over column
if statistics == 'mean': vals = df[column].mean()
if statistics == 'median': vals = df[column].median()
if statistics == 'max': vals = df[column].max()
if statistics == 'min': vals = df[column].min()
if statistics == 'std': vals = df[column].std()
if statistics == 'q05': vals = df[column].quantile(.05)
if statistics == 'q10': vals = df[column].quantile(.1)
if statistics == 'q90': vals = df[column].quantile(.9)
if statistics == 'q95': vals = df[column].quantile(.95)
# append this value to list in dict
idx_list = out_dict[polygon_id]
idx_list.append(vals)
# create a dataframe from dict and assign year-values as index
df = pd.DataFrame().from_dict(out_dict)
years = pd.to_datetime(years, format='%Y')
df.index = years
# create an output folder, if not yet there
if not os.path.isdir(os.path.abspath(output_dir)):
click.echo('INFO: creating output folder {}'.format(os.path.abspath(output_dir)))
os.makedirs(os.path.abspath(output_dir))
# save dataframe as csv-file
if polygon_id != 'all':
click.echo('INFO: saving to file {}'.format(os.path.abspath(os.path.join(output_dir, '{}_dev_IDs.csv'.format(column)))))
df.to_csv(os.path.abspath(os.path.join(output_dir, '{}_dev_IDs.csv'.format(column))))
else:
click.echo('INFO: saving to file {}'.format(os.path.abspath(os.path.join(output_dir, '{}_dev_{}.csv'.format(column, suffix)))))
df.to_csv(os.path.abspath(os.path.join(output_dir, '{}_dev_{}.csv'.format(column, suffix))))
# create a simple plot and save to file
# if IDs are specified, with one subplot per ID
if polygon_id != 'all':
fig, axes = plt.subplots(nrows=len(polygon_id), ncols=1, sharex=True)
df.plot(subplots=True, ax=axes)
for ax in axes:
ax.set_ylim(0, 1)
ax.set_yticks(np.arange(0, 1.1, 1))
if title != None:
ax.set_title(str(title))
plt.savefig(os.path.abspath(os.path.join(output_dir, '{}_dev_IDs.png'.format(column))), dpi=300, bbox_inches='tight')
# otherwise, only one plot needed
else:
fig, ax = plt.subplots(nrows=1, ncols=1, sharex=True)
df.plot(ax=ax)
ax.set_ylim(0, 1)
ax.set_yticks(
|
np.arange(0, 1.1, 1)
|
numpy.arange
|
import numpy as np
from datetime import datetime
from motionstruct.classes import PhiWorld, PhiWorldDataHose,\
PhiObservationGeneratorLocPredict,PhiObservationGeneratorLocPredictFromFiles,\
PhiKalmanFilterLocPredict
from motionstruct.functions import dist_mod2pi
# # # # # # # # # # # # # #
# # # CORE PARAMETERS # # #
# # # # # # # # # # # # # #
GROUNDTRUTH = "GLO" # in ("GLO", "CLU", "CDH")
datadsl = "2019-03-26-10-47-59-579319_uid_00107_glo"
glo = 4/5
human_readable_dsl = "pred_datarun_for_" + datadsl
N = 7 # number of objects
volatility_factor = 8. * np.array( [1., 1., 1.] + [1]*7 ) # Volatility
speed_factor = 1.5
tau_vphi = 8. # OU time constant of angular velocity
sigma_obs_phi = 0.001 # observation noise of phi
whitespace = True
# # # # # # # # # # # # # #
# # # AUTO PARAMETERS # # #
# # # # # # # # # # # # # #
# Create dataset label
dsl = datetime.now().strftime("%Y-%m-%d-%H-%M-%S-%f") + "_" + human_readable_dsl
M = 3+N # number of motion sources
# # # BUILD MOTION STRUCTURE MATRICES # # #
Tau = {}
Lam = {}
Bs = {}
# # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # O B S E R V E R M O D E L S # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # OBSERVER : independent # # #
OBSERVER = "IND"
B = np.zeros((N, M)) # Motion structure component matrix
lam = np.zeros(M) # Motion component strengths
B[:] = np.array([
[ 1, 1, 0, 1,0,0,0,0,0,0],
[ 1, 1, 0, 0,1,0,0,0,0,0],
[ 1, 1, 0, 0,0,1,0,0,0,0],
[ 1,-1, 0, 0,0,0,1,0,0,0],
[ 1,-1, 0, 0,0,0,0,1,0,0],
[ 1,-1, 0, 0,0,0,0,0,1,0],
[ 1, 0, 0, 0,0,0,0,0,0,1],
], dtype=np.float64)
lam_tot = 1/2
lam_I = lam_tot
lam_G = 0.
lam_C = 0.
lam[:] = np.sqrt(volatility_factor) * np.array([lam_G, lam_C, 0.] + [lam_I]*7)
lam *= speed_factor
Tau[OBSERVER] = tau_vphi / volatility_factor # adapt by volatility
Lam[OBSERVER] = lam
Bs[OBSERVER] = B
# # # OBSERVER : global # # #
OBSERVER = "GLO"
B = np.zeros((N, M)) # Motion structure component matrix
lam = np.zeros(M) # Motion component strengths
B[:] = np.array([
[ 1, 1, 0, 1,0,0,0,0,0,0],
[ 1, 1, 0, 0,1,0,0,0,0,0],
[ 1, 1, 0, 0,0,1,0,0,0,0],
[ 1,-1, 0, 0,0,0,1,0,0,0],
[ 1,-1, 0, 0,0,0,0,1,0,0],
[ 1,-1, 0, 0,0,0,0,0,1,0],
[ 1, 0, 0, 0,0,0,0,0,0,1],
], dtype=np.float64)
lam_tot = 1/2
lam_I = 1/12
lam_G = np.sqrt(lam_tot**2 - lam_I**2)
lam_C = 0.
lam[:] = np.sqrt(volatility_factor) * np.array([lam_G, lam_C, 0.] + [lam_I]*7)
lam *= speed_factor
Tau[OBSERVER] = tau_vphi / volatility_factor # adapt by volatility
Lam[OBSERVER] = lam
Bs[OBSERVER] = B
# # # OBSERVER : weak global (to match global component of CDH) # # #
OBSERVER = "GLW"
B = np.zeros((N, M)) # Motion structure component matrix
lam = np.zeros(M) # Motion component strengths
B[:] = np.array([
[ 1, 1, 0, 1,0,0,0,0,0,0],
[ 1, 1, 0, 0,1,0,0,0,0,0],
[ 1, 1, 0, 0,0,1,0,0,0,0],
[ 1,-1, 0, 0,0,0,1,0,0,0],
[ 1,-1, 0, 0,0,0,0,1,0,0],
[ 1,-1, 0, 0,0,0,0,0,1,0],
[ 1, 0, 0, 0,0,0,0,0,0,1],
], dtype=np.float64)
lam_tot = 1/2
lam_I = 1/12
lam_G = np.sqrt(glo) * np.sqrt(lam_tot**2 - lam_I**2)
lam_C = 0.
lam_M = np.sqrt(lam_tot**2 - lam_G**2)
lam[:] = np.sqrt(volatility_factor) * np.array([lam_G, lam_C, 0.] + [lam_M]*7)
lam *= speed_factor
Tau[OBSERVER] = tau_vphi / volatility_factor # adapt by volatility
Lam[OBSERVER] = lam
Bs[OBSERVER] = B
# # # OBSERVER : counter-rotating # # #
OBSERVER = "CNT"
B = np.zeros((N, M)) # Motion structure component matrix
lam = np.zeros(M) # Motion component strengths
B[:] = np.array([
[ 1, 1, 0, 1,0,0,0,0,0,0],
[ 1, 1, 0, 0,1,0,0,0,0,0],
[ 1, 1, 0, 0,0,1,0,0,0,0],
[ 1,-1, 0, 0,0,0,1,0,0,0],
[ 1,-1, 0, 0,0,0,0,1,0,0],
[ 1,-1, 0, 0,0,0,0,0,1,0],
[ 1,-1, 0, 0,0,0,0,0,0,1],
], dtype=np.float64)
lam_tot = 1/2
lam_I = 1/12
lam_G = 0.
lam_C = np.sqrt(lam_tot**2 - lam_I**2)
lam[:] = np.sqrt(volatility_factor) * np.array([lam_G, lam_C, 0.] + [lam_I]*7)
lam *= speed_factor
Tau[OBSERVER] = tau_vphi / volatility_factor # adapt by volatility
Lam[OBSERVER] = lam
Bs[OBSERVER] = B
# # # OBSERVER : clusters # # #
OBSERVER = "CLU"
B = np.zeros((N, M)) # Motion structure component matrix
lam = np.zeros(M) # Motion component strengths
B[:] = np.array([
[ 1, 0, 0, 1,0,0,0,0,0,0],
[ 1, 0, 0, 0,1,0,0,0,0,0],
[ 1, 0, 0, 0,0,1,0,0,0,0],
[ 0, 1, 0, 0,0,0,1,0,0,0],
[ 0, 1, 0, 0,0,0,0,1,0,0],
[ 0, 1, 0, 0,0,0,0,0,1,0],
[ 0, 1, 0, 0,0,0,0,0,0,1],
], dtype=np.float64)
lam_tot = 1/2
lam_I = 1/12
lam_G = 0.
lam_C = np.sqrt(lam_tot**2 - lam_I**2)
lam_M = lam_tot
lam[:] = np.sqrt(volatility_factor) * np.array([lam_C, lam_C, 0.] + [lam_I]*7 +[lam_M]*0 )
lam *= speed_factor
Tau[OBSERVER] = tau_vphi / volatility_factor # adapt by volatility
Lam[OBSERVER] = lam
Bs[OBSERVER] = B
# # # OBSERVER : weak clusters (To match the Green Cluster <-> Maverick correlation in CDH) # # #
OBSERVER = "CLW"
B =
|
np.zeros((N, M))
|
numpy.zeros
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import numpy as np
import torch
import argparse
from itertools import chain
from env_wrappers import VectorEnv
from simple_model import SimpleModel
from parl.algorithms import MAPPO
from simple_agent import SimpleAgent
from mappo_buffer import SeparatedReplayBuffer
from parl.utils import logger, summary
LR = 7e-4 # learning rate
VALUE_LOSS_COEF = 1 # Value loss coefficient (ie. c_1 in the paper)
ENTROPY_COEF = 0.01 # Entropy coefficient (ie. c_2 in the paper)
HUBER_DELTA = 10.0 # coefficience of huber loss
EPS = 1e-5 # Adam optimizer epsilon (default: 1e-5)
MAX_GRAD_NORM = 10.0 # Max gradient norm for gradient clipping
EPISODE_LENGTH = 25 # Max length for any episode
GAMMA = 0.99 # discount factor for rewards (default: 0.99)
GAE_LAMBDA = 0.95 # gae lambda parameter (default: 0.95)
LOG_INTERVAL_EPISODES = 5 # time duration between contiunous twice log printing
CLIP_PARAM = 0.2 # ppo clip parameter, suggestion 4 in the paper (default: 0.2)
PPO_EPOCH = 15 # number of epochs for updating using each T data, suggestion 3 in the paper (default: 15)
NUM_MINI_BATCH = 1 # number of batches for ppo, suggestion 3 in the paper (default: 1)
def get_act_dim_from_act_space(action_space):
if action_space.__class__.__name__ == "Discrete":
act_dim = action_space.n
else:
act_dim = action_space.high - action_space.low + 1
return act_dim
def main():
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
|
np.random.seed(args.seed)
|
numpy.random.seed
|
#%%
###############################################################################
# Import libraries
###############################################################################
from brian2 import *
import numpy as np
from pars import *
import matplotlib
import matplotlib.pyplot as plt
#%%
###############################################################################
# Simulation
###############################################################################
### Time parameters
seeds_num = 20 # number of seeds for most experiments
seeds_num_1 = 30 # number of seeds for persistence and spont. act. experiments
growth_time = 400 * second # growth time
test_time = 100 * second # test time
test_time_afper = 300 * second # test time after learning for persistence experiment
learn_time = 200 * second # training/learning time
relax_time = 10 * second # relaxation time
### Cluster parameters
clu_start = np.array([375, 500]) * umeter # position of first cluster
clu_end = np.array([2125, 500]) * umeter # position of last cluster
clu_shift = np.array([0, 150]) * umeter # shift of clusters/sequence from default position
clu_num = 8 # number of clusters/elements
clu_r = 100 * umeter # radius of clusters
### Light spot
seq_break = 2000 * ms # break between trials
offset_time = 199.9 * ms # time after full second at which input starts
spot_start = clu_start - np.array([clu_r, 0]) * meter # start position of spot
spot_end = clu_end + np.array([clu_r, 0]) * meter # end position of spot
spot_mid = 1 / 2 * (spot_end + spot_start) # mid position of spot
spot_dist = np.linalg.norm(spot_end - spot_start) * meter # distance of spot travel
w_ff = 0.04 # weight of input connections
N_aff = 100 # number of input spiketrains
spot_flash = 100 * ms # duration of flash during testing
bar_flash = 400 * ms # duration of bar flash during learning
spot_peak = 50 * Hz # max rate of input spike trains
spot_width = 150 * umeter # scale of light spot
spot_v = 4 * umeter/ms # default speed of light spot
spot_vs =
|
np.linspace(4, 20, 5)
|
numpy.linspace
|
#data parse part
#reading force constant data from phonopy format
#scaled by mass, thus retun is dynamical matrix
import numpy as np
def read_fc_phonopy(FC_file,natom,masses):
"""
Reading force constant data from phonopy format.
In this module, force constant is scaled by mass.
Thus, the return is dynamical matrix
"""
with open(FC_file,'r') as fc:
lines=fc.readlines()
nlineblock=4
fc_all=np.zeros((natom,natom,3,3))
start=1
for i in range(natom):
for j in range(natom):
fc_block=lines[start+1:start+nlineblock]
fc=np.loadtxt(fc_block)
fc_all[i,j]=fc/np.sqrt(masses[i]*masses[j])
#fc_all[i,j]=fc
start=start+nlineblock
return fc_all
def read_fc_phonopy_noscale(FC_file,natom):
"""
Reading force constant data from phonopy format.
In this module, force constant is not scaled by mass.
Thus, the return is phonopy format force constant ndarray
"""
with open(FC_file,'r') as fc:
lines=fc.readlines()
nlineblock=4
fc_all=np.zeros((natom,natom,3,3))
start=1
for i in range(natom):
for j in range(natom):
fc_block=lines[start+1:start+nlineblock]
fc=np.loadtxt(fc_block)
fc_all[i,j]=fc
start=start+nlineblock
return fc_all
#convert phonopy style to flat(LAMMPS Dyn regular) format
def phonopy_to_flat(force_constants, natom):
return np.reshape(force_constants.transpose(0,2,1,3),(natom*3,natom*3))
#convert flat to phonopy style
def flat_to_phonopy(force_constants,natom):
return np.reshape(force_constants,(natom,3,natom,3)).transpose(0,2,1,3)
def dynmat_to_fcphonopy(dynmat,natom,masses):
'''
convert from dynamical matrix in ndarray(natom,natom,3,3) to force constant
return: phonopy format force constant
'''
fcphonopy=np.zeros((natom,natom,3,3))
for i in range(natom):
for j in range(natom):
fcphonopy[i,j]=dynmat[i,j]*np.sqrt(masses[i]*masses[j])
return fcphonopy
def fcphonopy_to_dynmat(force_constants,natom,masses):
dynmat=np.zeros((natom,natom,3,3))
for i in range(natom):
for j in range(natom):
dynmat[i,j]=force_constants[i,j]/
|
np.sqrt(masses[i]*masses[j])
|
numpy.sqrt
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import numpy as np
import scipy.stats as ss
import os
import sys
import time
from .nutsjump import NUTSJump, HMCJump, MALAJump
try:
from mpi4py import MPI
except ImportError:
print('Do not have mpi4py package.')
from . import nompi4py as MPI
# try:
# import acor
# except ImportError:
# print('Do not have acor package')
# pass
# try:
# from emcee import autocorr
# except ImportError:
# print('Do not have emcee package')
# pass
class PTSampler(object):
"""
Parallel Tempering Markov Chain Monte-Carlo (PTMCMC) sampler.
This implementation uses an adaptive jump proposal scheme
by default using both standard and single component Adaptive
Metropolis (AM) and Differential Evolution (DE) jumps.
This implementation also makes use of MPI (mpi4py) to run
the parallel chains.
Along with the AM and DE jumps, the user can add custom
jump proposals with the ``addProposalToCycle`` fuction.
@param ndim: number of dimensions in problem
@param logl: log-likelihood function
@param logp: log prior function (must be normalized for evidence evaluation)
@param cov: Initial covariance matrix of model parameters for jump proposals
@param covinds: Indices of parameters for which to perform adaptive jumps
@param loglargs: any additional arguments (apart from the parameter vector) for
log likelihood
@param loglkwargs: any additional keyword arguments (apart from the parameter vector)
for log likelihood
@param logpargs: any additional arguments (apart from the parameter vector) for
log like prior
@param logl_grad: log-likelihood function, including gradients
@param logp_grad: prior function, including gradients
@param logpkwargs: any additional keyword arguments (apart from the parameter vector)
for log prior
@param outDir: Full path to output directory for chain files (default = ./chains)
@param verbose: Update current run-status to the screen (default=True)
@param resume: Resume from a previous chain (still in testing so beware) (default=False)
"""
def __init__(self, ndim, logl, logp, cov, groups=None, loglargs=[], loglkwargs={},
logpargs=[], logpkwargs={}, logl_grad=None, logp_grad=None,
comm=MPI.COMM_WORLD, outDir='./chains', verbose=True, resume=False):
# MPI initialization
self.comm = comm
self.MPIrank = self.comm.Get_rank()
self.nchain = self.comm.Get_size()
self.ndim = ndim
self.logl = _function_wrapper(logl, loglargs, loglkwargs)
self.logp = _function_wrapper(logp, logpargs, logpkwargs)
if logl_grad is not None and logp_grad is not None:
self.logl_grad = _function_wrapper(logl_grad, loglargs, loglkwargs)
self.logp_grad = _function_wrapper(logp_grad, logpargs, logpkwargs)
else:
self.logl_grad = None
self.logp_grad = None
self.outDir = outDir
self.verbose = verbose
self.resume = resume
# setup output file
if not os.path.exists(self.outDir):
try:
os.makedirs(self.outDir)
except OSError:
pass
# find indices for which to perform adaptive jumps
self.groups = groups
if groups is None:
self.groups = [np.arange(0, self.ndim)]
# set up covariance matrix
self.cov = cov
self.U = [[]] * len(self.groups)
self.S = [[]] * len(self.groups)
# do svd on parameter groups
for ct, group in enumerate(self.groups):
covgroup = np.zeros((len(group), len(group)))
for ii in range(len(group)):
for jj in range(len(group)):
covgroup[ii, jj] = self.cov[group[ii], group[jj]]
self.U[ct], self.S[ct], v = np.linalg.svd(covgroup)
self.M2 = np.zeros((ndim, ndim))
self.mu = np.zeros(ndim)
# initialize proposal cycle
self.propCycle = []
self.jumpDict = {}
# indicator for auxilary jumps
self.aux = []
def initialize(self, Niter, ladder=None, Tmin=1, Tmax=None, Tskip=100,
isave=1000, covUpdate=1000, SCAMweight=30,
AMweight=20, DEweight=50,
NUTSweight=20, HMCweight=20, MALAweight=0,
burn=10000, HMCstepsize=0.1, HMCsteps=300,
maxIter=None, thin=10, i0=0, neff=100000,
writeHotChains=False, hotChain=False):
"""
Initialize MCMC quantities
@param maxIter: maximum number of iterations
@Tmin: minumum temperature to use in temperature ladder
"""
# get maximum number of iteration
if maxIter is None and self.MPIrank > 0:
maxIter = 2 * Niter
elif maxIter is None and self.MPIrank == 0:
maxIter = Niter
self.ladder = ladder
self.covUpdate = covUpdate
self.SCAMweight = SCAMweight
self.AMweight = AMweight
self.DEweight = DEweight
self.burn = burn
self.Tskip = Tskip
self.thin = thin
self.isave = isave
self.Niter = Niter
self.neff = neff
self.tstart = 0
N = int(maxIter / thin)
self._lnprob = np.zeros(N)
self._lnlike = np.zeros(N)
self._chain = np.zeros((N, self.ndim))
self.naccepted = 0
self.swapProposed = 0
self.nswap_accepted = 0
# set up covariance matrix and DE buffers
# TODO: better way of allocating this to save memory
if self.MPIrank == 0:
self._AMbuffer = np.zeros((self.Niter, self.ndim))
self._DEbuffer = np.zeros((self.burn, self.ndim))
# ##### setup default jump proposal distributions ##### #
# Gradient-based jumps
if self.logl_grad is not None and self.logp_grad is not None:
# DOES MALA do anything with the burnin? (Not adaptive enabled yet)
malajump = MALAJump(self.logl_grad, self.logp_grad, self.cov,
self.burn)
self.addProposalToCycle(malajump, MALAweight)
if MALAweight > 0:
print("WARNING: MALA jumps are not working properly yet")
# Perhaps have an option to adaptively tune the mass matrix?
# Now that is done by defaulk
hmcjump = HMCJump(self.logl_grad, self.logp_grad, self.cov,
self.burn, stepsize=HMCstepsize, nminsteps=2,
nmaxsteps=HMCsteps)
self.addProposalToCycle(hmcjump, HMCweight)
# Target acceptance rate (delta) should be optimal for 0.6
nutsjump = NUTSJump(self.logl_grad, self.logp_grad, self.cov,
self.burn, trajectoryDir=None, write_burnin=False,
force_trajlen=None, force_epsilon=None, delta=0.6)
self.addProposalToCycle(nutsjump, NUTSweight)
# add SCAM
self.addProposalToCycle(self.covarianceJumpProposalSCAM,
self.SCAMweight)
# add AM
self.addProposalToCycle(self.covarianceJumpProposalAM, self.AMweight)
# check length of jump cycle
if len(self.propCycle) == 0:
raise ValueError('No jump proposals specified!')
# randomize cycle
self.randomizeProposalCycle()
# setup default temperature ladder
if self.ladder is None:
self.ladder = self.temperatureLadder(Tmin, Tmax=Tmax)
# temperature for current chain
self.temp = self.ladder[self.MPIrank]
# hot chain sampling from prior
if hotChain and self.MPIrank == self.nchain-1:
self.temp = 1e80
self.fname = self.outDir + '/chain_hot.txt'
else:
self.fname = self.outDir + '/chain_{0}.txt'.format(self.temp)
# write hot chains
self.writeHotChains = writeHotChains
self.resumeLength = 0
if self.resume and os.path.isfile(self.fname):
if self.verbose:
print('Resuming run from chain file {0}'.format(self.fname))
try:
self.resumechain = np.loadtxt(self.fname)
self.resumeLength = self.resumechain.shape[0]
except ValueError:
print('WARNING: Cant read in file. Removing last line.')
os.system('sed -ie \'$d\' {0}'.format(self.fname))
self.resumechain = np.loadtxt(self.fname)
self.resumeLength = self.resumechain.shape[0]
self._chainfile = open(self.fname, 'a')
else:
self._chainfile = open(self.fname, 'w')
self._chainfile.close()
def updateChains(self, p0, lnlike0, lnprob0, iter):
"""
Update chains after jump proposals
"""
# update buffer
if self.MPIrank == 0:
self._AMbuffer[iter, :] = p0
# put results into arrays
if iter % self.thin == 0:
ind = int(iter / self.thin)
self._chain[ind, :] = p0
self._lnlike[ind] = lnlike0
self._lnprob[ind] = lnprob0
# write to file
if iter % self.isave == 0 and iter > 1 and iter > self.resumeLength:
if self.writeHotChains or self.MPIrank == 0:
self._writeToFile(iter)
# write output covariance matrix
np.save(self.outDir + '/cov.npy', self.cov)
if self.MPIrank == 0 and self.verbose and iter > 1:
sys.stdout.write('\r')
sys.stdout.write('Finished %2.2f percent in %f s Acceptance rate = %g'
% (iter / self.Niter * 100, time.time() - self.tstart,
self.naccepted / iter))
sys.stdout.flush()
def sample(self, p0, Niter, ladder=None, Tmin=1, Tmax=None, Tskip=100,
isave=1000, covUpdate=1000, SCAMweight=20,
AMweight=20, DEweight=20, NUTSweight=20, MALAweight=20,
HMCweight=20, burn=10000, HMCstepsize=0.1, HMCsteps=300,
maxIter=None, thin=10, i0=0, neff=100000,
writeHotChains=False, hotChain=False):
"""
Function to carry out PTMCMC sampling.
@param p0: Initial parameter vector
@param self.Niter: Number of iterations to use for T = 1 chain
@param ladder: User defined temperature ladder
@param Tmin: Minimum temperature in ladder (default=1)
@param Tmax: Maximum temperature in ladder (default=None)
@param Tskip: Number of steps between proposed temperature swaps (default=100)
@param isave: Number of iterations before writing to file (default=1000)
@param covUpdate: Number of iterations between AM covariance updates (default=1000)
@param SCAMweight: Weight of SCAM jumps in overall jump cycle (default=20)
@param AMweight: Weight of AM jumps in overall jump cycle (default=20)
@param DEweight: Weight of DE jumps in overall jump cycle (default=20)
@param NUTSweight: Weight of the NUTS jumps in jump cycle (default=20)
@param MALAweight: Weight of the MALA jumps in jump cycle (default=20)
@param HMCweight: Weight of the HMC jumps in jump cycle (default=20)
@param HMCstepsize: Step-size of the HMC jumps (default=0.1)
@param HMCsteps: Maximum number of steps in an HMC trajectory (default=300)
@param burn: Burn in time (DE jumps added after this iteration) (default=10000)
@param maxIter: Maximum number of iterations for high temperature chains
(default=2*self.Niter)
@param self.thin: Save every self.thin MCMC samples
@param i0: Iteration to start MCMC (if i0 !=0, do not re-initialize)
@param neff: Number of effective samples to collect before terminating
"""
# get maximum number of iteration
if maxIter is None and self.MPIrank > 0:
maxIter = 2 * Niter
elif maxIter is None and self.MPIrank == 0:
maxIter = Niter
# set up arrays to store lnprob, lnlike and chain
N = int(maxIter / thin)
# if picking up from previous run, don't re-initialize
if i0 == 0:
self.initialize(Niter, ladder=ladder, Tmin=Tmin, Tmax=Tmax,
Tskip=Tskip, isave=isave, covUpdate=covUpdate,
SCAMweight=SCAMweight,
AMweight=AMweight, DEweight=DEweight,
NUTSweight=NUTSweight, MALAweight=MALAweight,
HMCweight=HMCweight, burn=burn,
HMCstepsize=HMCstepsize, HMCsteps=HMCsteps,
maxIter=maxIter, thin=thin, i0=i0,
neff=neff, writeHotChains=writeHotChains,
hotChain=hotChain)
### compute lnprob for initial point in chain ###
# if resuming, just start with first point in chain
if self.resume and self.resumeLength > 0:
p0, lnlike0, lnprob0 = self.resumechain[0, :-4], \
self.resumechain[0, -3], self.resumechain[0, -4]
else:
# compute prior
lp = self.logp(p0)
if lp == float(-np.inf):
lnprob0 = -np.inf
lnlike0 = -np.inf
else:
lnlike0 = self.logl(p0)
lnprob0 = 1 / self.temp * lnlike0 + lp
# record first values
self.updateChains(p0, lnlike0, lnprob0, i0)
self.comm.barrier()
# start iterations
iter = i0
self.tstart = time.time()
runComplete = False
Neff = 0
while runComplete is False:
iter += 1
accepted = 0
# call PTMCMCOneStep
p0, lnlike0, lnprob0 = self.PTMCMCOneStep(
p0, lnlike0, lnprob0, iter)
# compute effective number of samples
# if iter % 1000 == 0 and iter > 2 * self.burn and self.MPIrank == 0:
# try:
# Neff = iter / \
# max(1, np.nanmax([acor.acor(self._AMbuffer[self.burn:(iter - 1), ii])[0]
# for ii in range(self.ndim)]))
# print('\n {0} effective samples'.format(Neff))
# Neff = iter / \
# max(1, np.nanmax([autocorr(self._AMbuffer[self.burn:(iter - 1), ii])[0]
# for ii in range(self.ndim)]))
# print('\n {0} effective samples'.format(Neff))
# except NameError:
# Neff = 0
# pass
# stop if reached maximum number of iterations
if self.MPIrank == 0 and iter >= self.Niter - 1:
if self.verbose:
print('\nRun Complete')
runComplete = True
# stop if reached effective number of samples
if self.MPIrank == 0 and int(Neff) > self.neff:
if self.verbose:
print('\nRun Complete with {0} effective samples'.format(int(Neff)))
runComplete = True
if self.MPIrank == 0 and runComplete:
for jj in range(1, self.nchain):
self.comm.send(runComplete, dest=jj, tag=55)
# check for other chains
if self.MPIrank > 0:
runComplete = self.comm.Iprobe(source=0, tag=55)
time.sleep(0.000001) # trick to get around
def PTMCMCOneStep(self, p0, lnlike0, lnprob0, iter):
"""
Function to carry out PTMCMC sampling.
@param p0: Initial parameter vector
@param lnlike0: Initial log-likelihood value
@param lnprob0: Initial log probability value
@param iter: iteration number
@return p0: next value of parameter vector after one MCMC step
@return lnlike0: next value of likelihood after one MCMC step
@return lnprob0: next value of posterior after one MCMC step
"""
# update covariance matrix
if (iter - 1) % self.covUpdate == 0 and (iter -
1) != 0 and self.MPIrank == 0:
self._updateRecursive(iter - 1, self.covUpdate)
# broadcast to other chains
[self.comm.send(self.cov, dest=rank + 1, tag=111) for rank
in range(self.nchain - 1)]
# check for sent covariance matrix from T = 0 chain
getCovariance = self.comm.Iprobe(source=0, tag=111)
time.sleep(0.000001)
if getCovariance and self.MPIrank > 0:
self.cov[:,:] = self.comm.recv(source=0, tag=111)
for ct, group in enumerate(self.groups):
covgroup = np.zeros((len(group), len(group)))
for ii in range(len(group)):
for jj in range(len(group)):
covgroup[ii, jj] = self.cov[group[ii], group[jj]]
self.U[ct], self.S[ct], v = np.linalg.svd(covgroup)
getCovariance = 0
# update DE buffer
if (iter - 1) % self.burn == 0 and (iter -
1) != 0 and self.MPIrank == 0:
self._updateDEbuffer(iter - 1, self.burn)
# broadcast to other chains
[self.comm.send(self._DEbuffer, dest=rank + 1, tag=222) for rank
in range(self.nchain - 1)]
# check for sent DE buffer from T = 0 chain
getDEbuf = self.comm.Iprobe(source=0, tag=222)
time.sleep(0.000001)
if getDEbuf and self.MPIrank > 0:
self._DEbuffer = self.comm.recv(source=0, tag=222)
# randomize cycle
if self.DEJump not in self.propCycle:
self.addProposalToCycle(self.DEJump, self.DEweight)
self.randomizeProposalCycle()
# reset
getDEbuf = 0
# after burn in, add DE jumps
if (iter - 1) == self.burn and self.MPIrank == 0:
if self.verbose:
print('Adding DE jump with weight {0}'.format(self.DEweight))
self.addProposalToCycle(self.DEJump, self.DEweight)
# randomize cycle
self.randomizeProposalCycle()
### jump proposal ###
# if resuming, just use previous chain points
if self.resume and self.resumeLength > 0 and iter < self.resumeLength:
p0, lnlike0, lnprob0 = self.resumechain[iter, :-4], \
self.resumechain[iter, -3], self.resumechain[iter, -4]
# update acceptance counter
self.naccepted = iter * self.resumechain[iter, -2]
accepted = 1
else:
y, qxy, jump_name = self._jump(p0, iter)
self.jumpDict[jump_name][0] += 1
# compute prior and likelihood
lp = self.logp(y)
if lp == -np.inf:
newlnprob = -np.inf
else:
newlnlike = self.logl(y)
newlnprob = 1 / self.temp * newlnlike + lp
# hastings step
diff = newlnprob - lnprob0 + qxy
if diff > np.log(np.random.rand()):
# accept jump
p0, lnlike0, lnprob0 = y, newlnlike, newlnprob
# update acceptance counter
self.naccepted += 1
accepted = 1
self.jumpDict[jump_name][1] += 1
# temperature swap
swapReturn, p0, lnlike0, lnprob0 = self.PTswap(
p0, lnlike0, lnprob0, iter)
# check return value
if swapReturn != 0:
self.swapProposed += 1
if swapReturn == 2:
self.nswap_accepted += 1
self.updateChains(p0, lnlike0, lnprob0, iter)
return p0, lnlike0, lnprob0
def PTswap(self, p0, lnlike0, lnprob0, iter):
"""
Do parallel tempering swap.
@param p0: current parameter vector
@param lnlike0: current log-likelihood
@param lnprob0: current log posterior value
@param iter: current iteration number
@return swapReturn: 0 = no swap proposed,
1 = swap proposed and rejected,
2 = swap proposed and accepted
@return p0: new parameter vector
@return lnlike0: new log-likelihood
@return lnprob0: new log posterior value
"""
# initialize variables
readyToSwap = 0
swapAccepted = 0
swapProposed = 0
# if Tskip is reached, block until next chain in ladder is ready for
# swap proposal
if iter % self.Tskip == 0 and self.MPIrank < self.nchain - 1:
swapProposed = 1
# send current likelihood for swap proposal
self.comm.send(lnlike0, dest=self.MPIrank + 1, tag=18)
# determine if swap was accepted
swapAccepted = self.comm.recv(source=self.MPIrank + 1, tag=888)
# perform swap
if swapAccepted:
# exchange likelihood
lnlike0 = self.comm.recv(source=self.MPIrank + 1, tag=18)
# exchange parameters
pnew = np.empty(self.ndim)
self.comm.Sendrecv(p0, dest=self.MPIrank+1, sendtag=19,
recvbuf=pnew, source=self.MPIrank+1,
recvtag=19)
p0 = pnew
# calculate new posterior values
lnprob0 = 1 / self.temp * lnlike0 + self.logp(p0)
# check if next lowest temperature is ready to swap
elif self.MPIrank > 0:
readyToSwap = self.comm.Iprobe(source=self.MPIrank - 1, tag=18)
# trick to get around processor using 100% cpu while waiting
time.sleep(0.000001)
# hotter chain decides acceptance
if readyToSwap:
newlnlike = self.comm.recv(source=self.MPIrank - 1, tag=18)
# determine if swap is accepted and tell other chain
logChainSwap = (1 / self.ladder[self.MPIrank - 1] -
1 / self.ladder[self.MPIrank]) \
* (lnlike0 - newlnlike)
if logChainSwap > np.log(np.random.rand()):
swapAccepted = 1
else:
swapAccepted = 0
# send out result
self.comm.send(swapAccepted, dest=self.MPIrank - 1, tag=888)
# perform swap
if swapAccepted:
# exchange likelihood
self.comm.send(lnlike0, dest=self.MPIrank - 1, tag=18)
lnlike0 = newlnlike
# exchange parameters
pnew = np.empty(self.ndim)
self.comm.Sendrecv(p0, dest=self.MPIrank-1, sendtag=19,
recvbuf=pnew, source=self.MPIrank-1,
recvtag=19)
p0 = pnew
# calculate new posterior values
lnprob0 = 1 / self.temp * lnlike0 + self.logp(p0)
# Return values for colder chain: 0=nothing happened; 1=swap proposed,
# not accepted; 2=swap proposed & accepted
if swapProposed:
if swapAccepted:
swapReturn = 2
else:
swapReturn = 1
else:
swapReturn = 0
return swapReturn, p0, lnlike0, lnprob0
def temperatureLadder(self, Tmin, Tmax=None, tstep=None):
"""
Method to compute temperature ladder. At the moment this uses
a geometrically spaced temperature ladder with a temperature
spacing designed to give 25 % temperature swap acceptance rate.
"""
# TODO: make options to do other temperature ladders
if self.nchain > 1:
if tstep is None and Tmax is None:
tstep = 1 + np.sqrt(2 / self.ndim)
elif tstep is None and Tmax is not None:
tstep = np.exp(np.log(Tmax / Tmin) / (self.nchain - 1))
ladder = np.zeros(self.nchain)
for ii in range(self.nchain):
ladder[ii] = Tmin * tstep ** ii
else:
ladder = np.array([1])
return ladder
def _writeToFile(self, iter):
"""
Function to write chain file. File has ndim+4 columns,
the first are parameter values, followed by log-posterior (unweighted),
log-likelihood, and acceptance probability.
@param iter: Iteration of sampler
"""
self._chainfile = open(self.fname, 'a+')
for jj in range((iter - self.isave), iter, self.thin):
ind = int(jj / self.thin)
pt_acc = 1
if self.MPIrank < self.nchain - 1 and self.swapProposed != 0:
pt_acc = self.nswap_accepted / self.swapProposed
self._chainfile.write('\t'.join(['%22.22f' % (self._chain[ind, kk])
for kk in range(self.ndim)]))
self._chainfile.write('\t%f\t %f\t %f\t %f\t' % (self._lnprob[ind],
self._lnlike[ind],
self.naccepted /
iter, pt_acc))
self._chainfile.write('\n')
self._chainfile.close()
#### write jump statistics files ####
# only for T=1 chain
if self.MPIrank == 0:
# first write file contaning jump names and jump rates
fout = open(self.outDir + '/jumps.txt', 'w')
njumps = len(self.propCycle)
ujumps = np.array(list(set(self.propCycle)))
for jump in ujumps:
fout.write('%s %4.2g\n' % (
jump.__name__,
np.sum(np.array(self.propCycle)==jump)/njumps))
fout.close()
# now write jump statistics for each jump proposal
for jump in self.jumpDict:
fout = open(self.outDir + '/' + jump + '_jump.txt', 'a+')
fout.write('%g\n'%(self.jumpDict[jump][1]/max(1,
self.jumpDict[jump][0])))
fout.close()
# function to update covariance matrix for jump proposals
def _updateRecursive(self, iter, mem):
"""
Function to recursively update sample covariance matrix.
@param iter: Iteration of sampler
@param mem: Number of steps between updates
"""
it = iter - mem
ndim = self.ndim
if it == 0:
self.M2 = np.zeros((ndim, ndim))
self.mu = np.zeros(ndim)
for ii in range(mem):
diff =
|
np.zeros(ndim)
|
numpy.zeros
|
#!/usr/bin/python
'''
<NAME>, <EMAIL>
2015/11/30
Unit tests for eval_segm.py.
'''
import numpy as np
import eval_segm as es
import unittest
class pixel_accuracy_UnitTests(unittest.TestCase):
'''
Wrong inputs
'''
def test1dInput(self):
mat = np.array([0])
self.assertRaises(IndexError, es.pixel_accuracy, mat, mat)
def testDiffDim(self):
mat0 = np.array([[0,0], [0,0]])
mat1 = np.array([[0,0,0], [0,0,0]])
self.assertRaisesRegexp(es.EvalSegErr, "DiffDim", es.pixel_accuracy, mat0, mat1)
'''
Correct inputs
'''
def testOneClass(self):
segm = np.array([[0,0], [0,0]])
gt = np.array([[0,0], [0,0]])
res = es.pixel_accuracy(segm, gt)
self.assertEqual(res, 1.0)
def testTwoClasses0(self):
segm = np.array([[1,1,1,1,1], [1,1,1,1,1]])
gt = np.array([[0,0,0,0,0], [0,0,0,0,0]])
res = es.pixel_accuracy(segm, gt)
self.assertEqual(res, 0)
def testTwoClasses1(self):
segm = np.array([[1,0,0,0,0], [0,0,0,0,0]])
gt = np.array([[0,0,0,0,0], [0,0,0,0,0]])
res = es.pixel_accuracy(segm, gt)
self.assertEqual(res, (9.0)/(10.0))
def testTwoClasses2(self):
segm = np.array([[0,0,0,0,0], [0,0,0,0,0]])
gt = np.array([[1,0,0,0,0], [0,0,0,0,0]])
res = es.pixel_accuracy(segm, gt)
self.assertEqual(res, (9.0+0.0)/(9.0+1.0))
def testThreeClasses0(self):
segm = np.array([[0,0,0,0,0], [0,0,0,0,0]])
gt = np.array([[1,2,0,0,0], [0,0,0,0,0]])
res = es.pixel_accuracy(segm, gt)
self.assertEqual(res, (8.0+0.0+0.0)/(8.0+1.0+1.0))
def testThreeClasses1(self):
segm = np.array([[0,2,0,0,0], [0,0,0,0,0]])
gt = np.array([[1,0,0,0,0], [0,0,0,0,0]])
res = es.pixel_accuracy(segm, gt)
self.assertEqual(res, (8.0+0.0)/(9.0+1.0))
def testFourClasses0(self):
segm = np.array([[0,2,3,0,0], [0,0,0,0,0]])
gt = np.array([[1,0,0,0,0], [0,0,0,0,0]])
res = es.pixel_accuracy(segm, gt)
self.assertEqual(res, (7.0+0.0)/(9.0+1.0))
def testFourClasses1(self):
segm = np.array([[1,2,3,0,0], [0,0,0,0,0]])
gt = np.array([[1,0,0,0,0], [0,0,0,0,0]])
res = es.pixel_accuracy(segm, gt)
self.assertEqual(res, (7.0+1.0)/(9.0+1.0))
def testFiveClasses0(self):
segm = np.array([[1,2,3,4,3], [0,0,0,0,0]])
gt = np.array([[1,0,3,0,0], [0,0,0,0,0]])
res = es.pixel_accuracy(segm, gt)
self.assertEqual(res, (5.0+1.0+1.0)/(8.0+1.0+1.0))
class mean_accuracy_UnitTests(unittest.TestCase):
'''
Wrong inputs
'''
def test1dInput(self):
mat = np.array([0])
self.assertRaises(IndexError, es.mean_accuracy, mat, mat)
def testDiffDim(self):
mat0 = np.array([[0,0], [0,0]])
mat1 = np.array([[0,0,0], [0,0,0]])
self.assertRaisesRegexp(es.EvalSegErr, "DiffDim", es.mean_accuracy, mat0, mat1)
'''
Correct inputs
'''
def testOneClass(self):
segm = np.array([[0,0], [0,0]])
gt = np.array([[0,0], [0,0]])
res = es.mean_accuracy(segm, gt)
self.assertEqual(res, 1.0)
def testTwoClasses0(self):
segm = np.array([[1,1,1,1,1], [1,1,1,1,1]])
gt = np.array([[0,0,0,0,0], [0,0,0,0,0]])
res = es.mean_accuracy(segm, gt)
self.assertEqual(res, 0)
def testTwoClasses1(self):
segm = np.array([[1,0,0,0,0], [0,0,0,0,0]])
gt = np.array([[0,0,0,0,0], [0,0,0,0,0]])
res = es.mean_accuracy(segm, gt)
self.assertEqual(res, 9.0/10.0)
def testTwoClasses2(self):
segm = np.array([[0,0,0,0,0], [0,0,0,0,0]])
gt = np.array([[1,0,0,0,0], [0,0,0,0,0]])
res = es.mean_accuracy(segm, gt)
self.assertEqual(res, np.mean([9.0/9.0, 0.0/1.0]))
def testThreeClasses0(self):
segm = np.array([[0,0,0,0,0], [0,0,0,0,0]])
gt = np.array([[1,2,0,0,0], [0,0,0,0,0]])
res = es.mean_accuracy(segm, gt)
self.assertEqual(res, np.mean([8.0/8.0, 0.0/1.0, 0.0/1.0]))
def testThreeClasses1(self):
segm = np.array([[0,2,0,0,0], [0,0,0,0,0]])
gt = np.array([[1,0,0,0,0], [0,0,0,0,0]])
res = es.mean_accuracy(segm, gt)
self.assertEqual(res, np.mean([8.0/9.0, 0.0/1.0]))
def testFourClasses0(self):
segm = np.array([[0,2,3,0,0], [0,0,0,0,0]])
gt = np.array([[1,0,0,0,0], [0,0,0,0,0]])
res = es.mean_accuracy(segm, gt)
self.assertEqual(res, np.mean([7.0/9.0, 0.0/1.0]))
def testFourClasses1(self):
segm = np.array([[1,2,3,0,0], [0,0,0,0,0]])
gt = np.array([[1,0,0,0,0], [0,0,0,0,0]])
res = es.mean_accuracy(segm, gt)
self.assertEqual(res, np.mean([7.0/9.0, 1.0/1.0]))
def testFiveClasses0(self):
segm = np.array([[1,2,3,4,3], [0,0,0,0,0]])
gt = np.array([[1,0,3,0,0], [0,0,0,0,0]])
res = es.mean_accuracy(segm, gt)
self.assertEqual(res, np.mean([5.0/8.0, 1.0, 1.0]))
class mean_IU_UnitTests(unittest.TestCase):
'''
Wrong inputs
'''
def test1dInput(self):
mat = np.array([0])
self.assertRaises(IndexError, es.mean_IU, mat, mat)
def testDiffDim(self):
mat0 = np.array([[0,0], [0,0]])
mat1 = np.array([[0,0,0], [0,0,0]])
self.assertRaisesRegexp(es.EvalSegErr, "DiffDim", es.mean_IU, mat0, mat1)
'''
Correct inputs
'''
def testOneClass(self):
segm = np.array([[0,0], [0,0]])
gt = np.array([[0,0], [0,0]])
res = es.mean_IU(segm, gt)
self.assertEqual(res, 1.0)
def testTwoClasses0(self):
segm = np.array([[1,1,1,1,1], [1,1,1,1,1]])
gt = np.array([[0,0,0,0,0], [0,0,0,0,0]])
res = es.mean_IU(segm, gt)
self.assertEqual(res, 0)
def testTwoClasses1(self):
segm = np.array([[1,0,0,0,0], [0,0,0,0,0]])
gt = np.array([[0,0,0,0,0], [0,0,0,0,0]])
res = es.mean_IU(segm, gt)
self.assertEqual(res, np.mean([0.9]))
def testTwoClasses2(self):
segm = np.array([[0,0,0,0,0], [0,0,0,0,0]])
gt = np.array([[1,0,0,0,0], [0,0,0,0,0]])
res = es.mean_IU(segm, gt)
self.assertEqual(res, np.mean([0.9, 0]))
def testThreeClasses0(self):
segm =
|
np.array([[0,0,0,0,0], [0,0,0,0,0]])
|
numpy.array
|
# Copyright 2019, by the California Institute of Technology.
# ALL RIGHTS RESERVED. United States Government Sponsorship acknowledged.
# Any commercial use must be negotiated with the Office of Technology
# Transfer at the California Institute of Technology.
#
# This software may be subject to U.S. export control laws. By accepting
# this software, the user agrees to comply with all applicable U.S. export
# laws and regulations. User has the responsibility to obtain export
# licenses, or other export authority as may be required before exporting
# such information to foreign countries or providing access to foreign
# persons.
"""
==============
test_subset.py
==============
Test the subsetter functionality.
"""
import json
import operator
import os
import shutil
import tempfile
import unittest
from os import listdir
from os.path import dirname, join, realpath, isfile, basename
import geopandas as gpd
import importlib_metadata
import netCDF4 as nc
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from jsonschema import validate
from shapely.geometry import Point
from podaac.subsetter import subset
from podaac.subsetter.subset import SERVICE_NAME
from podaac.subsetter import xarray_enhancements as xre
class TestSubsetter(unittest.TestCase):
"""
Unit tests for the L2 subsetter. These tests are all related to the
subsetting functionality itself, and should provide coverage on the
following files:
- podaac.subsetter.subset.py
- podaac.subsetter.xarray_enhancements.py
"""
@classmethod
def setUpClass(cls):
cls.test_dir = dirname(realpath(__file__))
cls.test_data_dir = join(cls.test_dir, 'data')
cls.subset_output_dir = tempfile.mkdtemp(dir=cls.test_data_dir)
cls.test_files = [f for f in listdir(cls.test_data_dir)
if isfile(join(cls.test_data_dir, f)) and f.endswith(".nc")]
cls.history_json_schema = {
"$schema": "https://json-schema.org/draft/2020-12/schema",
"$id": "https://harmony.earthdata.nasa.gov/history.schema.json",
"title": "Data Processing History",
"description": "A history record of processing that produced a given data file. For more information, see: https://wiki.earthdata.nasa.gov/display/TRT/In-File+Provenance+Metadata+-+TRT-42",
"type": ["array", "object"],
"items": {"$ref": "#/definitions/history_record"},
"definitions": {
"history_record": {
"type": "object",
"properties": {
"date_time": {
"description": "A Date/Time stamp in ISO-8601 format, including time-zone, GMT (or Z) preferred",
"type": "string",
"format": "date-time"
},
"derived_from": {
"description": "List of source data files used in the creation of this data file",
"type": ["array", "string"],
"items": {"type": "string"}
},
"program": {
"description": "The name of the program which generated this data file",
"type": "string"
},
"version": {
"description": "The version identification of the program which generated this data file",
"type": "string"
},
"parameters": {
"description": "The list of parameters to the program when generating this data file",
"type": ["array", "string"],
"items": {"type": "string"}
},
"program_ref": {
"description": "A URL reference that defines the program, e.g., a UMM-S reference URL",
"type": "string"
},
"$schema": {
"description": "The URL to this schema",
"type": "string"
}
},
"required": ["date_time", "program"],
"additionalProperties": False
}
}
}
@classmethod
def tearDownClass(cls):
# Remove the temporary directories used to house subset data
shutil.rmtree(cls.subset_output_dir)
def test_subset_variables(self):
"""
Test that all variables present in the original NetCDF file
are present after the subset takes place, and with the same
attributes.
"""
bbox = np.array(((-180, 90), (-90, 90)))
for file in self.test_files:
output_file = "{}_{}".format(self._testMethodName, file)
subset.subset(
file_to_subset=join(self.test_data_dir, file),
bbox=bbox,
output_file=join(self.subset_output_dir, output_file)
)
in_ds = xr.open_dataset(join(self.test_data_dir, file),
decode_times=False,
decode_coords=False)
out_ds = xr.open_dataset(join(self.subset_output_dir, output_file),
decode_times=False,
decode_coords=False)
for in_var, out_var in zip(in_ds.data_vars.items(), out_ds.data_vars.items()):
# compare names
assert in_var[0] == out_var[0]
# compare attributes
np.testing.assert_equal(in_var[1].attrs, out_var[1].attrs)
# compare type and dimension names
assert in_var[1].dtype == out_var[1].dtype
assert in_var[1].dims == out_var[1].dims
in_ds.close()
out_ds.close()
def test_subset_bbox(self):
"""
Test that all data present is within the bounding box given,
and that the correct bounding box is used. This test assumed
that the scanline *is* being cut.
"""
# pylint: disable=too-many-locals
bbox = np.array(((-180, 90), (-90, 90)))
for file in self.test_files:
output_file = "{}_{}".format(self._testMethodName, file)
subset.subset(
file_to_subset=join(self.test_data_dir, file),
bbox=bbox,
output_file=join(self.subset_output_dir, output_file)
)
out_ds = xr.open_dataset(join(self.subset_output_dir, output_file),
decode_times=False,
decode_coords=False,
mask_and_scale=False)
lat_var_name, lon_var_name = subset.get_coord_variable_names(out_ds)
lat_var_name = lat_var_name[0]
lon_var_name = lon_var_name[0]
lon_bounds, lat_bounds = subset.convert_bbox(bbox, out_ds, lat_var_name, lon_var_name)
lats = out_ds[lat_var_name].values
lons = out_ds[lon_var_name].values
np.warnings.filterwarnings('ignore')
# Step 1: Get mask of values which aren't in the bounds.
# For lon spatial condition, need to consider the
# lon_min > lon_max case. If that's the case, should do
# an 'or' instead.
oper = operator.and_ if lon_bounds[0] < lon_bounds[1] else operator.or_
# In these two masks, True == valid and False == invalid
lat_truth = np.ma.masked_where((lats >= lat_bounds[0])
& (lats <= lat_bounds[1]), lats).mask
lon_truth = np.ma.masked_where(oper((lons >= lon_bounds[0]),
(lons <= lon_bounds[1])), lons).mask
# combine masks
spatial_mask = np.bitwise_and(lat_truth, lon_truth)
# Create a mask which represents the valid matrix bounds of
# the spatial mask. This is used in the case where a var
# has no _FillValue.
if lon_truth.ndim == 1:
bound_mask = spatial_mask
else:
rows = np.any(spatial_mask, axis=1)
cols = np.any(spatial_mask, axis=0)
bound_mask = np.array([[r & c for c in cols] for r in rows])
# If all the lat/lon values are valid, the file is valid and
# there is no need to check individual variables.
if np.all(spatial_mask):
continue
# Step 2: Get mask of values which are NaN or "_FillValue in
# each variable.
for _, var in out_ds.data_vars.items():
# remove dimension of '1' if necessary
vals = np.squeeze(var.values)
# Get the Fill Value
fill_value = var.attrs.get('_FillValue')
# If _FillValue isn't provided, check that all values
# are in the valid matrix bounds go to the next variable
if fill_value is None:
combined_mask = np.ma.mask_or(spatial_mask, bound_mask)
np.testing.assert_equal(bound_mask, combined_mask)
continue
# If the shapes of this var doesn't match the mask,
# reshape the var so the comparison can be made. Take
# the first index of the unknown dims. This makes
# assumptions about the ordering of the dimensions.
if vals.shape != out_ds[lat_var_name].shape and vals.shape:
slice_list = []
for dim in var.dims:
if dim in out_ds[lat_var_name].dims:
slice_list.append(slice(None))
else:
slice_list.append(slice(0, 1))
vals = np.squeeze(vals[tuple(slice_list)])
# In this mask, False == NaN and True = valid
var_mask = np.invert(np.ma.masked_invalid(vals).mask)
fill_mask = np.invert(np.ma.masked_values(vals, fill_value).mask)
var_mask = np.bitwise_and(var_mask, fill_mask)
# Step 3: Combine the spatial and var mask with 'or'
combined_mask = np.ma.mask_or(var_mask, spatial_mask)
# Step 4: compare the newly combined mask and the
# spatial mask created from the lat/lon masks. They
# should be equal, because the 'or' of the two masks
# where out-of-bounds values are 'False' will leave
# those values assuming there are only NaN values
# in the data at those locations.
np.testing.assert_equal(spatial_mask, combined_mask)
out_ds.close()
@pytest.mark.skip(reason="This is being tested currently. Temporarily skipped.")
def test_subset_no_bbox(self):
"""
Test that the subsetted file is identical to the given file
when a 'full' bounding box is given.
"""
bbox = np.array(((-180, 180), (-90, 90)))
for file in self.test_files:
output_file = "{}_{}".format(self._testMethodName, file)
subset.subset(
file_to_subset=join(self.test_data_dir, file),
bbox=bbox,
output_file=join(self.subset_output_dir, output_file)
)
# pylint: disable=no-member
in_nc = nc.Dataset(join(self.test_data_dir, file), 'r')
out_nc = nc.Dataset(join(self.subset_output_dir, output_file), 'r')
# Make sure the output dimensions match the input
# dimensions, which means the full file was returned.
for name, dimension in in_nc.dimensions.items():
assert dimension.size == out_nc.dimensions[name].size
in_nc.close()
out_nc.close()
def test_subset_empty_bbox(self):
"""
Test that an empty file is returned when the bounding box
contains no data.
"""
bbox = np.array(((120, 125), (-90, -85)))
for file in self.test_files:
output_file = "{}_{}".format(self._testMethodName, file)
subset.subset(
file_to_subset=join(self.test_data_dir, file),
bbox=bbox,
output_file=join(self.subset_output_dir, output_file)
)
empty_dataset = xr.open_dataset(
join(self.subset_output_dir, output_file),
decode_times=False,
decode_coords=False,
mask_and_scale=False
)
# Ensure all variables are present but empty.
for variable_name, variable in empty_dataset.data_vars.items():
assert not variable.data
def test_bbox_conversion(self):
"""
Test that the bounding box conversion returns expected
results. Expected results are hand-calculated.
"""
ds_180 = xr.open_dataset(join(self.test_data_dir,
"MODIS_A-JPL-L2P-v2014.0.nc"),
decode_times=False,
decode_coords=False)
ds_360 = xr.open_dataset(join(
self.test_data_dir,
"ascat_20150702_084200_metopa_45145_eps_o_250_2300_ovw.l2.nc"),
decode_times=False,
decode_coords=False)
# Elements in each tuple are:
# ds type, lon_range, expected_result
test_bboxes = [
(ds_180, (-180, 180), (-180, 180)),
(ds_360, (-180, 180), (0, 360)),
(ds_180, (-180, 0), (-180, 0)),
(ds_360, (-180, 0), (180, 360)),
(ds_180, (-80, 80), (-80, 80)),
(ds_360, (-80, 80), (280, 80)),
(ds_180, (0, 180), (0, 180)),
(ds_360, (0, 180), (0, 180)),
(ds_180, (80, -80), (80, -80)),
(ds_360, (80, -80), (80, 280)),
(ds_180, (-80, -80), (-180, 180)),
(ds_360, (-80, -80), (0, 360))
]
lat_var = 'lat'
lon_var = 'lon'
for test_bbox in test_bboxes:
dataset = test_bbox[0]
lon_range = test_bbox[1]
expected_result = test_bbox[2]
actual_result, _ = subset.convert_bbox(np.array([lon_range, [0, 0]]),
dataset, lat_var, lon_var)
np.testing.assert_equal(actual_result, expected_result)
def compare_java(self, java_files, cut):
"""
Run the L2 subsetter and compare the result to the equivelant
legacy (Java) subsetter result.
Parameters
----------
java_files : list of strings
List of paths to each subsetted Java file.
cut : boolean
True if the subsetter should return compact.
"""
bbox_map = [("ascat_20150702_084200", ((-180, 0), (-90, 0))),
("ascat_20150702_102400", ((-180, 0), (-90, 0))),
("MODIS_A-JPL", ((65.8, 86.35), (40.1, 50.15))),
("MODIS_T-JPL", ((-78.7, -60.7), (-54.8, -44))),
("VIIRS", ((-172.3, -126.95), (62.3, 70.65))),
("AMSR2-L2B_v08_r38622", ((-180, 0), (-90, 0)))]
for file_str, bbox in bbox_map:
java_file = [file for file in java_files if file_str in file][0]
test_file = [file for file in self.test_files if file_str in file][0]
output_file = "{}_{}".format(self._testMethodName, test_file)
subset.subset(
file_to_subset=join(self.test_data_dir, test_file),
bbox=np.array(bbox),
output_file=join(self.subset_output_dir, output_file),
cut=cut
)
j_ds = xr.open_dataset(join(self.test_data_dir, java_file),
decode_times=False,
decode_coords=False,
mask_and_scale=False)
py_ds = xr.open_dataset(join(self.subset_output_dir, output_file),
decode_times=False,
decode_coords=False,
mask_and_scale=False)
for var_name, var in j_ds.data_vars.items():
# Compare shape
np.testing.assert_equal(var.shape, py_ds[var_name].shape)
# Compare meta
|
np.testing.assert_equal(var.attrs, py_ds[var_name].attrs)
|
numpy.testing.assert_equal
|
"""
Copyright (c) 2016, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
The code in this file (lime_base.py) is modified from https://github.com/marcotcr/lime.
"""
import numpy as np
import scipy as sp
import tqdm
import copy
from functools import partial
import paddlex.utils.logging as logging
class LimeBase(object):
"""Class for learning a locally linear sparse model from perturbed data"""
def __init__(self,
kernel_fn,
verbose=False,
random_state=None):
"""Init function
Args:
kernel_fn: function that transforms an array of distances into an
array of proximity values (floats).
verbose: if true, print local prediction values from linear model.
random_state: an integer or numpy.RandomState that will be used to
generate random numbers. If None, the random state will be
initialized using the internal numpy seed.
"""
from sklearn.utils import check_random_state
self.kernel_fn = kernel_fn
self.verbose = verbose
self.random_state = check_random_state(random_state)
@staticmethod
def generate_lars_path(weighted_data, weighted_labels):
"""Generates the lars path for weighted data.
Args:
weighted_data: data that has been weighted by kernel
weighted_label: labels, weighted by kernel
Returns:
(alphas, coefs), both are arrays corresponding to the
regularization parameter and coefficients, respectively
"""
from sklearn.linear_model import lars_path
x_vector = weighted_data
alphas, _, coefs = lars_path(x_vector,
weighted_labels,
method='lasso',
verbose=False)
return alphas, coefs
def forward_selection(self, data, labels, weights, num_features):
"""Iteratively adds features to the model"""
clf = Ridge(alpha=0, fit_intercept=True, random_state=self.random_state)
used_features = []
for _ in range(min(num_features, data.shape[1])):
max_ = -100000000
best = 0
for feature in range(data.shape[1]):
if feature in used_features:
continue
clf.fit(data[:, used_features + [feature]], labels,
sample_weight=weights)
score = clf.score(data[:, used_features + [feature]],
labels,
sample_weight=weights)
if score > max_:
best = feature
max_ = score
used_features.append(best)
return np.array(used_features)
def feature_selection(self, data, labels, weights, num_features, method):
"""Selects features for the model. see interpret_instance_with_data to
understand the parameters."""
from sklearn.linear_model import Ridge
if method == 'none':
return np.array(range(data.shape[1]))
elif method == 'forward_selection':
return self.forward_selection(data, labels, weights, num_features)
elif method == 'highest_weights':
clf = Ridge(alpha=0.01, fit_intercept=True,
random_state=self.random_state)
clf.fit(data, labels, sample_weight=weights)
coef = clf.coef_
if sp.sparse.issparse(data):
coef = sp.sparse.csr_matrix(clf.coef_)
weighted_data = coef.multiply(data[0])
# Note: most efficient to slice the data before reversing
sdata = len(weighted_data.data)
argsort_data = np.abs(weighted_data.data).argsort()
# Edge case where data is more sparse than requested number of feature importances
# In that case, we just pad with zero-valued features
if sdata < num_features:
nnz_indexes = argsort_data[::-1]
indices = weighted_data.indices[nnz_indexes]
num_to_pad = num_features - sdata
indices = np.concatenate((indices, np.zeros(num_to_pad, dtype=indices.dtype)))
indices_set = set(indices)
pad_counter = 0
for i in range(data.shape[1]):
if i not in indices_set:
indices[pad_counter + sdata] = i
pad_counter += 1
if pad_counter >= num_to_pad:
break
else:
nnz_indexes = argsort_data[sdata - num_features:sdata][::-1]
indices = weighted_data.indices[nnz_indexes]
return indices
else:
weighted_data = coef * data[0]
feature_weights = sorted(
zip(range(data.shape[1]), weighted_data),
key=lambda x: np.abs(x[1]),
reverse=True)
return np.array([x[0] for x in feature_weights[:num_features]])
elif method == 'lasso_path':
weighted_data = ((data - np.average(data, axis=0, weights=weights))
* np.sqrt(weights[:, np.newaxis]))
weighted_labels = ((labels - np.average(labels, weights=weights))
* np.sqrt(weights))
nonzero = range(weighted_data.shape[1])
_, coefs = self.generate_lars_path(weighted_data,
weighted_labels)
for i in range(len(coefs.T) - 1, 0, -1):
nonzero = coefs.T[i].nonzero()[0]
if len(nonzero) <= num_features:
break
used_features = nonzero
return used_features
elif method == 'auto':
if num_features <= 6:
n_method = 'forward_selection'
else:
n_method = 'highest_weights'
return self.feature_selection(data, labels, weights,
num_features, n_method)
def interpret_instance_with_data(self,
neighborhood_data,
neighborhood_labels,
distances,
label,
num_features,
feature_selection='auto',
model_regressor=None):
"""Takes perturbed data, labels and distances, returns interpretation.
Args:
neighborhood_data: perturbed data, 2d array. first element is
assumed to be the original data point.
neighborhood_labels: corresponding perturbed labels. should have as
many columns as the number of possible labels.
distances: distances to original data point.
label: label for which we want an interpretation
num_features: maximum number of features in interpretation
feature_selection: how to select num_features. options are:
'forward_selection': iteratively add features to the model.
This is costly when num_features is high
'highest_weights': selects the features that have the highest
product of absolute weight * original data point when
learning with all the features
'lasso_path': chooses features based on the lasso
regularization path
'none': uses all features, ignores num_features
'auto': uses forward_selection if num_features <= 6, and
'highest_weights' otherwise.
model_regressor: sklearn regressor to use in interpretation.
Defaults to Ridge regression if None. Must have
model_regressor.coef_ and 'sample_weight' as a parameter
to model_regressor.fit()
Returns:
(intercept, exp, score, local_pred):
intercept is a float.
exp is a sorted list of tuples, where each tuple (x,y) corresponds
to the feature id (x) and the local weight (y). The list is sorted
by decreasing absolute value of y.
score is the R^2 value of the returned interpretation
local_pred is the prediction of the interpretation model on the original instance
"""
from sklearn.linear_model import Ridge
weights = self.kernel_fn(distances)
labels_column = neighborhood_labels[:, label]
used_features = self.feature_selection(neighborhood_data,
labels_column,
weights,
num_features,
feature_selection)
if model_regressor is None:
model_regressor = Ridge(alpha=1, fit_intercept=True,
random_state=self.random_state)
easy_model = model_regressor
easy_model.fit(neighborhood_data[:, used_features],
labels_column, sample_weight=weights)
prediction_score = easy_model.score(
neighborhood_data[:, used_features],
labels_column, sample_weight=weights)
local_pred = easy_model.predict(neighborhood_data[0, used_features].reshape(1, -1))
if self.verbose:
logging.info('Intercept' + str(easy_model.intercept_))
logging.info('Prediction_local' + str(local_pred))
logging.info('Right:' + str(neighborhood_labels[0, label]))
return (easy_model.intercept_,
sorted(zip(used_features, easy_model.coef_),
key=lambda x: np.abs(x[1]), reverse=True),
prediction_score, local_pred)
class ImageInterpretation(object):
def __init__(self, image, segments):
"""Init function.
Args:
image: 3d numpy array
segments: 2d numpy array, with the output from skimage.segmentation
"""
self.image = image
self.segments = segments
self.intercept = {}
self.local_weights = {}
self.local_pred = None
def get_image_and_mask(self, label, positive_only=True, negative_only=False, hide_rest=False,
num_features=5, min_weight=0.):
"""Init function.
Args:
label: label to interpret
positive_only: if True, only take superpixels that positively contribute to
the prediction of the label.
negative_only: if True, only take superpixels that negatively contribute to
the prediction of the label. If false, and so is positive_only, then both
negativey and positively contributions will be taken.
Both can't be True at the same time
hide_rest: if True, make the non-interpretation part of the return
image gray
num_features: number of superpixels to include in interpretation
min_weight: minimum weight of the superpixels to include in interpretation
Returns:
(image, mask), where image is a 3d numpy array and mask is a 2d
numpy array that can be used with
skimage.segmentation.mark_boundaries
"""
if label not in self.local_weights:
raise KeyError('Label not in interpretation')
if positive_only & negative_only:
raise ValueError("Positive_only and negative_only cannot be true at the same time.")
segments = self.segments
image = self.image
local_weights_label = self.local_weights[label]
mask = np.zeros(segments.shape, segments.dtype)
if hide_rest:
temp = np.zeros(self.image.shape)
else:
temp = self.image.copy()
if positive_only:
fs = [x[0] for x in local_weights_label
if x[1] > 0 and x[1] > min_weight][:num_features]
if negative_only:
fs = [x[0] for x in local_weights_label
if x[1] < 0 and abs(x[1]) > min_weight][:num_features]
if positive_only or negative_only:
for f in fs:
temp[segments == f] = image[segments == f].copy()
mask[segments == f] = 1
return temp, mask
else:
for f, w in local_weights_label[:num_features]:
if np.abs(w) < min_weight:
continue
c = 0 if w < 0 else 1
mask[segments == f] = -1 if w < 0 else 1
temp[segments == f] = image[segments == f].copy()
temp[segments == f, c] = np.max(image)
return temp, mask
def get_rendered_image(self, label, min_weight=0.005):
"""
Args:
label: label to interpret
min_weight:
Returns:
image, is a 3d numpy array
"""
if label not in self.local_weights:
raise KeyError('Label not in interpretation')
from matplotlib import cm
segments = self.segments
image = self.image
local_weights_label = self.local_weights[label]
temp = np.zeros_like(image)
weight_max = abs(local_weights_label[0][1])
local_weights_label = [(f, w/weight_max) for f, w in local_weights_label]
local_weights_label = sorted(local_weights_label, key=lambda x: x[1], reverse=True) # negatives are at last.
cmaps = cm.get_cmap('Spectral')
colors = cmaps(np.linspace(0, 1, len(local_weights_label)))
colors = colors[:, :3]
for i, (f, w) in enumerate(local_weights_label):
if np.abs(w) < min_weight:
continue
temp[segments == f] = image[segments == f].copy()
temp[segments == f] = colors[i] * 255
return temp
class LimeImageInterpreter(object):
"""Interpres predictions on Image (i.e. matrix) data.
For numerical features, perturb them by sampling from a Normal(0,1) and
doing the inverse operation of mean-centering and scaling, according to the
means and stds in the training data. For categorical features, perturb by
sampling according to the training distribution, and making a binary
feature that is 1 when the value is the same as the instance being
interpreted."""
def __init__(self, kernel_width=.25, kernel=None, verbose=False,
feature_selection='auto', random_state=None):
"""Init function.
Args:
kernel_width: kernel width for the exponential kernel.
If None, defaults to sqrt(number of columns) * 0.75.
kernel: similarity kernel that takes euclidean distances and kernel
width as input and outputs weights in (0,1). If None, defaults to
an exponential kernel.
verbose: if true, print local prediction values from linear model
feature_selection: feature selection method. can be
'forward_selection', 'lasso_path', 'none' or 'auto'.
See function 'einterpret_instance_with_data' in lime_base.py for
details on what each of the options does.
random_state: an integer or numpy.RandomState that will be used to
generate random numbers. If None, the random state will be
initialized using the internal numpy seed.
"""
from sklearn.utils import check_random_state
kernel_width = float(kernel_width)
if kernel is None:
def kernel(d, kernel_width):
return np.sqrt(np.exp(-(d ** 2) / kernel_width ** 2))
kernel_fn = partial(kernel, kernel_width=kernel_width)
self.random_state = check_random_state(random_state)
self.feature_selection = feature_selection
self.base = LimeBase(kernel_fn, verbose, random_state=self.random_state)
def interpret_instance(self, image, classifier_fn, labels=(1,),
hide_color=None,
num_features=100000, num_samples=1000,
batch_size=10,
distance_metric='cosine',
model_regressor=None
):
"""Generates interpretations for a prediction.
First, we generate neighborhood data by randomly perturbing features
from the instance (see __data_inverse). We then learn locally weighted
linear models on this neighborhood data to interpret each of the classes
in an interpretable way (see lime_base.py).
Args:
image: 3 dimension RGB image. If this is only two dimensional,
we will assume it's a grayscale image and call gray2rgb.
classifier_fn: classifier prediction probability function, which
takes a numpy array and outputs prediction probabilities. For
ScikitClassifiers , this is classifier.predict_proba.
labels: iterable with labels to be interpreted.
hide_color: TODO
num_features: maximum number of features present in interpretation
num_samples: size of the neighborhood to learn the linear model
batch_size: TODO
distance_metric: the distance metric to use for weights.
model_regressor: sklearn regressor to use in interpretation. Defaults
to Ridge regression in LimeBase. Must have model_regressor.coef_
and 'sample_weight' as a parameter to model_regressor.fit()
Returns:
An ImageIinterpretation object (see lime_image.py) with the corresponding
interpretations.
"""
import sklearn
from skimage.measure import regionprops
from skimage.segmentation import quickshift
from skimage.color import gray2rgb
if len(image.shape) == 2:
image = gray2rgb(image)
try:
segments = quickshift(image, sigma=1)
except ValueError as e:
raise e
self.segments = segments
fudged_image = image.copy()
if hide_color is None:
# if no hide_color, use the mean
for x in np.unique(segments):
mx = np.mean(image[segments == x], axis=0)
fudged_image[segments == x] = mx
elif hide_color == 'avg_from_neighbor':
from scipy.spatial.distance import cdist
n_features = np.unique(segments).shape[0]
regions = regionprops(segments + 1)
centroids = np.zeros((n_features, 2))
for i, x in enumerate(regions):
centroids[i] = np.array(x.centroid)
d = cdist(centroids, centroids, 'sqeuclidean')
for x in np.unique(segments):
a = [image[segments == i] for i in np.argsort(d[x])[1:6]]
mx = np.mean(np.concatenate(a), axis=0)
fudged_image[segments == x] = mx
else:
fudged_image[:] = 0
top = labels
data, labels = self.data_labels(image, fudged_image, segments,
classifier_fn, num_samples,
batch_size=batch_size)
distances = sklearn.metrics.pairwise_distances(
data,
data[0].reshape(1, -1),
metric=distance_metric
).ravel()
interpretation_image = ImageInterpretation(image, segments)
for label in top:
(interpretation_image.intercept[label],
interpretation_image.local_weights[label],
interpretation_image.score, interpretation_image.local_pred) = self.base.interpret_instance_with_data(
data, labels, distances, label, num_features,
model_regressor=model_regressor,
feature_selection=self.feature_selection)
return interpretation_image
def data_labels(self,
image,
fudged_image,
segments,
classifier_fn,
num_samples,
batch_size=10):
"""Generates images and predictions in the neighborhood of this image.
Args:
image: 3d numpy array, the image
fudged_image: 3d numpy array, image to replace original image when
superpixel is turned off
segments: segmentation of the image
classifier_fn: function that takes a list of images and returns a
matrix of prediction probabilities
num_samples: size of the neighborhood to learn the linear model
batch_size: classifier_fn will be called on batches of this size.
Returns:
A tuple (data, labels), where:
data: dense num_samples * num_superpixels
labels: prediction probabilities matrix
"""
n_features = np.unique(segments).shape[0]
data = self.random_state.randint(0, 2, num_samples * n_features) \
.reshape((num_samples, n_features))
labels = []
data[0, :] = 1
imgs = []
for row in tqdm.tqdm(data):
temp = copy.deepcopy(image)
zeros = np.where(row == 0)[0]
mask = np.zeros(segments.shape).astype(bool)
for z in zeros:
mask[segments == z] = True
temp[mask] = fudged_image[mask]
imgs.append(temp)
if len(imgs) == batch_size:
preds = classifier_fn(np.array(imgs))
labels.extend(preds)
imgs = []
if len(imgs) > 0:
preds = classifier_fn(np.array(imgs))
labels.extend(preds)
return data,
|
np.array(labels)
|
numpy.array
|
""" This module implements the RL simulation environment
"""
import gym
from gym.utils import seeding
import numpy as np
import scipy.integrate as integrate
import sys
import os
from ..utils.wind import Wind
from parameters import params_environment, params_triangle_soaring
from hierarchical_policy.decision_maker import params_decision_maker
from hierarchical_policy.updraft_exploiter import params_updraft_exploiter
sys.path.append(os.path.join("..", "..", ".."))
class GliderEnv3D(gym.Env):
""" Class which implements an OpenAI gym environment for simulating the glider
Attributes
----------
_params_glider : GliderParameters
Mass and aerodynamic parameters
_params_physics : PhysicsParameters
Gravity constant and air density
_params_sim : SimulationParameters
Simulation time and ODE-solver
_params_wind : WindParameters
Updraft model parameters
_params_task : TaskParameters
Triangle soaring task parameters
_params_agent : AgentParameters
Parameters for updraft exploiter or decision maker
agent : str
Chooses environment for updraft exploiter or decision maker
_wind_fun : Wind
Wind function
_integrator : scipy.integrate
Integrator from scipy package or self written Euler integrator
lb: ndarray
Lower bound for control command
ub: ndarray
Upper bound for control command
state: ndarray
Vehicle state [NED-position,NED-velocity]
time: float
Simulation time
control: ndarray
Control command
active_vertex: int
Current target vertex
vertex_counter: int
Number of hit vertices
lap_counter: int
Number of completed laps
viewer: TODO: Wird diese Variable irgendwo gesetzt bzw. verwendet?
...
np_random: object
Numpy random number generator
"""
def __init__(self, agent='vertex_tracker'): # was ist default, wenn vertex tracker raus ist?
# instantiate parameters
self._params_glider = params_environment.GliderParameters()
self._params_physics = params_environment.PhysicsParameters()
self._params_sim = params_environment.SimulationParameters()
self._params_wind = params_environment.WindParameters()
self._params_task = params_triangle_soaring.TaskParameters()
self._wind_fun = Wind()
if agent == 'updraft_exploiter':
self.agent = agent
self._params_agent = params_updraft_exploiter.AgentParameters()
self.current_task = 'exploitation'
elif agent == 'decision_maker':
self.agent = agent
self._params_agent = params_decision_maker.AgentParameters()
self.current_task = self._params_task.TASK
else:
sys.exit("not a valid agent passed for env setup")
# set integrator
if self._params_sim.USE_RK45:
self._integrator = integrate.ode(self.build_dynamics_3d).set_integrator('dopri5', rtol=1e-2, atol=1e-4)
else:
self._integrator = 'euler'
# TODO: mix aus integrator Objekt und string auflösen
# set random seed
self.np_random, _ = seeding.np_random()
# initialize further member variables
self.lb = np.min(self._params_agent.ACTION_SPACE, 1) * (np.pi / 180)
self.ub = np.max(self._params_agent.ACTION_SPACE, 1) * (np.pi / 180)
self.state = None
self.time = None
self.control = None
self.active_vertex = None
self.vertex_counter = None
self.lap_counter = None
self.viewer = None
def seed(self, seed=None):
""" Sets seed for environment
Parameters
----------
seed : Seed value
"""
self.np_random, _ = seeding.np_random(seed)
def reset(self):
""" Reset environment and glider state. Initial state depends on agent type.
Returns
-------
state : ndarray
Reset vehicle state
"""
if self.agent == 'updraft_exploiter':
initState = self.np_random.uniform(self._params_agent.INITIAL_SPACE[:, 0],
self._params_agent.INITIAL_SPACE[:, 1])
self.active_vertex = self.np_random.randint(1, 4) # should not matter
self.time = 0
elif self.agent == 'decision_maker':
initState = self.np_random.multivariate_normal(self._params_agent.INITIAL_STATE,
np.diag(np.square(self._params_agent.INITIAL_STD)))
self.time = 0
self.active_vertex = 1
else:
sys.exit("not a valid agent passed for env setup")
if self._params_wind.ALWAYS_RESET:
self._wind_fun.reset_wind()
self.vertex_counter = 0
self.lap_counter = 0
self.state = np.copy(initState)
return self.state
def step(self, action, timestep=None):
""" Performs one simulation step. Action from agent is converted to control command and integration
over timestep is performed. Returns reward and observation and checks if episode is done.
Parameters
----------
action : ndarray
Output from ANN
timestep : float
Simulation timestep
Returns
-------
observation: ndarray
Observation depending on agent
reward: ndarray
Reward for updraft exploiter or decision maker
done: bool
Flag if episode has terminated
info:
Get simulation time, position and velocity as string for printing
"""
timestep = self._params_agent.TIMESTEP_CTRL if not timestep else timestep
self.control = self.action2control(action)
self.integrate(timestep)
observation = self.get_observation()
reward, done = self.get_reward_and_done()
info = self.get_info()
return observation, reward, done, info
def action2control(self, action):
""" Transforms output from policy to control interval
Parameters
----------
action: ndarray
Output from policy
Returns
-------
control: ndarray
Controller setpoint
"""
control = self.lb + (action + 1.) * 0.5 * (self.ub - self.lb)
control = np.clip(control, self.lb, self.ub)
return control
def integrate(self, timestep):
""" Integrates system state
Parameters
----------
timestep :
Integration timestep
"""
if self._integrator == 'euler':
t0 = self.time
while self.time < (t0 + timestep):
x_dot = self.build_dynamics_3d(self.state)
dt = np.minimum((t0 + timestep) - self.time, self._params_sim.TIMESTEP_SIM)
self.state += (dt * x_dot)
self.time += dt
else:
r = self._integrator
r.set_initial_value(self.state)
r.integrate(timestep)
self.time += r.t
self.state = r.y
def build_dynamics_3d(self, x):
""" Calculates state derivative x_dot
Parameters
----------
x : ndarray
Glider state
Returns
-------
xp: ndarray
State derivative
"""
# control variables assignment
mu_a = self.control.item(0)
alpha = self.control.item(1)
# get wind vector at current aircraft position
g_v_W = self._wind_fun.get_current_wind(x[0:3])
# track speed in local NED coordinates
g_v_K = x[3:6].reshape(3, 1)
# airspeed in local NED coordinates: airspeed = groundspeed - windspeed
g_v_A = g_v_K - g_v_W
# air-path angles
v_A_norm = np.maximum(np.linalg.norm(g_v_A), .1)
gamma_a = -np.arcsin(np.clip((g_v_A[2] / v_A_norm), -1, 1))
chi_a = np.arctan2(g_v_A[1], g_v_A[0])
# aerodynamic force in aerodynamic coordinates
cl = 2 * np.pi * (self._params_glider.ST / (self._params_glider.ST + 2)) * alpha
cd = self._params_glider.CD0 + (1 / (np.pi * self._params_glider.ST * self._params_glider.OE)) * np.power(cl, 2)
a_f_A = (self._params_physics.RHO / 2) * self._params_glider.S * np.power(v_A_norm, 2) * np.array(
[[-cd], [0], [-cl]])
# aerodynamic force in local NED coordinates
g_T_a = self.get_rotation_matrix(-chi_a.item(), 3) \
@ self.get_rotation_matrix(-gamma_a.item(), 2) \
@ self.get_rotation_matrix(-mu_a, 1)
g_f_A = g_T_a @ a_f_A
# track acceleration in local NED coordinates
g_a_K = (g_f_A / self._params_glider.M) + np.array([[0], [0], [self._params_physics.G]])
# state derivative
xp = np.append(g_v_K, g_a_K)
if np.isnan(xp).any():
print("xp is not a number: {}".format(xp))
return xp
@staticmethod
def get_rotation_matrix(angle, axis):
"""
Parameters
----------
angle : float
Rotation angle around axis
axis : int
Rotation axis(x = 1, y = 2, z = 3)
Returns
-------
rotationMatrix: ndarray
"""
if axis == 1:
rotationMatrix = np.array([[1, 0, 0],
[0, np.cos(angle), np.sin(angle)],
[0, -np.sin(angle), np.cos(angle)]])
elif axis == 2:
rotationMatrix = np.array([[np.cos(angle), 0, -np.sin(angle)],
[0, 1, 0],
[np.sin(angle), 0, np.cos(angle)]])
elif axis == 3:
rotationMatrix = np.array([[np.cos(angle), np.sin(angle), 0],
[-np.sin(angle), np.cos(angle), 0],
[0, 0, 1]])
else:
sys.exit("not a valid rotation axis")
return rotationMatrix
def get_observation(self):
""" Calls observation function, depending on chosen agent
Returns
-------
observation: ndarray
"""
if self.agent == 'updraft_exploiter':
observation = self.get_updraft_positions()
elif self.agent == 'decision_maker':
observation = self.get_sparse_observation()
else:
sys.exit("not a valid agent passed for env setup")
return observation
def get_sparse_observation(self):
""" Observation for decision maker, which consists of the normalized values for time, altitude
and distance to finish
Returns
-------
observation: ndarray
"""
# vector from active vertex to aircraft in g-coordinates
g_active_to_aircraft = self.state[0:2] - self._params_task.TRIANGLE[:, (self.active_vertex - 1)]
# aircraft distance from active vertex
dist_to_active_vertex = np.linalg.norm(g_active_to_aircraft)
# triangle dimensions
len_base = np.linalg.norm(self._params_task.TRIANGLE[:, 0] - self._params_task.TRIANGLE[:, 2])
len_legs = len_base / np.sqrt(2)
# distance to finish line
if self.vertex_counter != 3:
dist_to_finish = dist_to_active_vertex + (3 - self.active_vertex) * len_legs + 0.5 * len_base
else:
T_pos_ac = np.transpose(self._params_task.G_T_T) @ self.state[0:2].reshape(2, 1)
dist_to_finish = T_pos_ac[1].item() - self._params_task.FINISH_LINE[1].item()
observation = (np.array([self.time, -self.state[2], dist_to_finish])
- self._params_agent.OBS_MEAN) / self._params_agent.OBS_STD
return observation
def get_updraft_positions(self):
""" Calculates positions of updrafts. Positions position is given, relatively to glider, so closes
updraft can be find.
"""
# assign updraft data
updraft_count = int(self._wind_fun.wind_data['updraft_count'])
updraft_position = self._wind_fun.wind_data['updraft_position']
# horizontal track speed in local NE coordinates
g_v_K = self.state[3:5].reshape(2, 1)
# aircraft heading wrt g-frame
g_chi = np.arctan2(g_v_K[1], g_v_K[0]).item()
# rotation matrix from local NED-coordinates to k-coordinates
k_T_g = self.get_rotation_matrix(g_chi, 3)
# set relative updraft positions (dist, dir)
rel_updraft_pos = np.empty([updraft_count, 2])
for k in range(0, updraft_count):
# vector from aircraft to updraft in g-coordinates (i.e., line-of-sight)
g_aircraft2updraft = updraft_position[:, k].reshape(2, 1) - self.state[0:2].reshape(2, 1)
# updraft position in cartesian k-coordinates
k_p = k_T_g[0:2, 0:2] @ g_aircraft2updraft
# (negative) aircraft heading wrt. line-of-sight to updraft
k_phi = np.arctan2(k_p[1], k_p[0]).item()
# assign return values
rel_updraft_pos[k, :] = np.array([np.linalg.norm(g_aircraft2updraft), k_phi])
# sort the array in descending order wrt updraft distance (nearest updraft in last column)
rel_updraft_pos_sorted = rel_updraft_pos[np.argsort(-rel_updraft_pos[:, 0]), :]
# standardization
rel_updraft_pos_normalized = (rel_updraft_pos_sorted[:, 0] - self._params_wind.UPD_MEAN) / \
self._params_wind.UPD_STD
rel_updraft_pos_normalized = np.stack((rel_updraft_pos_normalized, rel_updraft_pos_sorted[:, 1] / np.pi), 1)
return rel_updraft_pos_normalized
def get_azimuth_wrt_r_frame(self):
""" Calculates azimuth in r-frame
Returns
-------
r_chi: float
Azimuth with reference to r-frame
"""
previous_vertex = np.mod((self.active_vertex - 1), 3)
g_previous2active = self._params_task.TRIANGLE[:, (self.active_vertex - 1)] \
- self._params_task.TRIANGLE[:, (previous_vertex - 1)]
# polar angle of r-frame wrt g-frame
g_phi_r = np.arctan2(g_previous2active[1], g_previous2active[0]).item()
# rotation matrix from local NED-coordinates to reference coordinates
r_T_g = self.get_rotation_matrix(g_phi_r, 3)
# tack speed in r-coordinates
g_v_K = self.state[3:6].reshape(3, 1)
r_v_K = r_T_g @ g_v_K
# norm, azimuth wrt x_r-axis, and flight path angle of track speed vector
r_chi = np.arctan2(r_v_K[1], r_v_K[0]).item()
return r_chi
def get_reward_and_done(self):
"""
Returns
-------
reward: float
Reward, depending on agent
done: bool
Flag for termination of episode
"""
# set active vertex
old_vertex = self.active_vertex
self.set_active_vertex()
# set lap counter
old_lap_counter = self.lap_counter
self.set_lap_counter()
# set flags relevant for done flag
ground = (-self.state[2] <= 0)
out_of_sight = (-self.state[2] > self._params_agent.HEIGHT_MAX)
if self.agent == 'updraft_exploiter':
reward = self.get_energy_reward()
out_of_sight = False
timeout = (self.time >= self._params_task.WORKING_TIME/6) # 1800 s / 6 = 5 minutes
elif self.agent == 'decision_maker':
# reward = 200 / 3 if (self.active_vertex != old_vertex) else 0
reward = 200 if (self.lap_counter > old_lap_counter) else 0
# reward = reward - (self._params_task.WORKING_TIME - self.time) if (ground or out_of_sight) else reward
timeout = (self.time >= self._params_task.WORKING_TIME)
# reward = reward - (-self.state[2]) if timeout else reward
else:
sys.exit("not a valid agent passed for env setup")
# set done flag
done = (ground or timeout or out_of_sight or (not np.isfinite(self.state).all()))
return reward, done
def set_active_vertex(self):
""" Sets active vertex, depending on vehicle position. If sector of vertex is hit, the next vertex is
chosen as active
"""
# get horizontal aircraft position in active-sector-coordinates
sec_T_g = self.get_trafo_to_sector_coords()
sec_pos_ac = sec_T_g @ (self.state[0:2].reshape(2, 1)
- self._params_task.TRIANGLE[:, (self.active_vertex - 1)].reshape(2, 1))
# update active vertex if both active-sector-coordinates are positive
if (sec_pos_ac >= 0).all():
self.vertex_counter += 1
if (self.active_vertex + 1) > 3:
self.active_vertex = 1
else:
self.active_vertex += 1
def get_trafo_to_sector_coords(self):
""" Calculates transformation matrix from geodetic coordinates to sector coordinates,
depending on current vertex
Returns
-------
sec_T_g : ndarray
Rotation matrix from geodetic to sector coordinates
"""
if self.active_vertex == 1:
# rotation matrix from geodetic to sector-one-coordinates
sec_T_g = (self._params_task.ONE_T_T @ np.transpose(self._params_task.G_T_T))
elif self.active_vertex == 2:
# rotation matrix from geodetic to sector-two-coordinates
sec_T_g = (self._params_task.TWO_T_T @ np.transpose(self._params_task.G_T_T))
elif self.active_vertex == 3:
# rotation matrix from geodetic to sector-three-coordinates
sec_T_g = (self._params_task.THREE_T_T @ np.transpose(self._params_task.G_T_T))
else:
sec_T_g = None
print("active vertex no. {} is not a valid triangle vertex".format(self.active_vertex))
return sec_T_g
def set_lap_counter(self):
""" Increments lap counter, if three vertices (one lap) are hit """
T_pos_ac =
|
np.transpose(self._params_task.G_T_T)
|
numpy.transpose
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 27 09:59:57 2017
@author: similarities
"""
import numpy as np
from tkinter import filedialog as fd
import os
import ntpath
import matplotlib.pyplot as plt
class Load_text_into_2D_array:
def __init__(self):
self.file_name = str
self.file_path = str
self.loaded_array = np.empty([], dtype=np.float32)
def ask_file_dialog(self):
self.file_path = fd.askopenfilename()
return self.file_path
def path_leaf(self):
ntpath.basename("a/b/c")
head, self.file_name = ntpath.split(self.file_path)
print(head, " header")
print("filename:", self.file_name)
return self.file_name or ntpath.basename(head)
def loadarray(self):
## Test first - by integers... bla
# reads column1 from txt / skips first rows (3),
liste1=np.loadtxt(self.file_path, skiprows=(4), usecols=(0,))
#reads column2 from txt / skips first rows (3)
liste=np.loadtxt(self.file_path, skiprows=(4), usecols=(1,))
#converts loaded column1 to an numpy array:
matrix_x = np.array((liste1))
#converts loaded column2 to an numpy array:
matrix_y= np.array((liste))
#joins the arrays into a 2xN array
self.loaded_array=
|
np.column_stack((matrix_x, matrix_y))
|
numpy.column_stack
|
# -*- coding: utf-8 -*-
"""
Demand model of thermal loads
"""
import numpy as np
import pandas as pd
from cea.constants import HOURS_IN_YEAR, HOURS_PRE_CONDITIONING
from cea.demand import demand_writers
from cea.demand import hourly_procedure_heating_cooling_system_load, ventilation_air_flows_simple
from cea.demand import latent_loads
from cea.demand import sensible_loads, electrical_loads, hotwater_loads, refrigeration_loads, datacenter_loads
from cea.demand import ventilation_air_flows_detailed, control_heating_cooling_systems
from cea.demand.building_properties import get_thermal_resistance_surface
from cea.demand.latent_loads import convert_rh_to_moisture_content
from cea.utilities import reporting
def calc_thermal_loads(building_name, bpr, weather_data, date_range, locator,
use_dynamic_infiltration_calculation, resolution_outputs, loads_output, massflows_output,
temperatures_output, config, debug):
"""
Calculate thermal loads of a single building with mechanical or natural ventilation.
Calculation procedure follows the methodology of ISO 13790
The structure of ``usage_schedules`` is:
.. code-block:: python
:emphasize-lines: 2,4
{
'list_uses': ['ADMIN', 'GYM', ...],
'schedules': [ ([...], [...], [...], [...]), (), (), () ]
}
* each element of the 'list_uses' entry represents a building occupancy type.
* each element of the 'schedules' entry represents the schedules for a building occupancy type.
* the schedules for a building occupancy type are a 4-tuple (occupancy, electricity, domestic hot water,
probability of use), with each element of the 4-tuple being a list of hourly values (HOURS_IN_YEAR values).
Side effect include a number of files in two folders:
* ``scenario/outputs/data/demand``
* ``${Name}.csv`` for each building
* temporary folder (as returned by ``tempfile.gettempdir()``)
* ``${Name}T.csv`` for each building
daren-thomas: as far as I can tell, these are the only side-effects.
:param building_name: name of building
:type building_name: str
:param bpr: a collection of building properties for the building used for thermal loads calculation
:type bpr: BuildingPropertiesRow
:param weather_data: data from the .epw weather file. Each row represents an hour of the year. The columns are:
``drybulb_C``, ``relhum_percent``, and ``windspd_ms``
:type weather_data: pandas.DataFrame
:param locator:
:param use_dynamic_infiltration_calculation:
:returns: This function does not return anything
:rtype: NoneType
"""
schedules, tsd = initialize_inputs(bpr, weather_data, locator)
# CALCULATE ELECTRICITY LOADS
tsd = electrical_loads.calc_Eal_Epro(tsd, schedules)
# CALCULATE REFRIGERATION LOADS
if refrigeration_loads.has_refrigeration_load(bpr):
tsd = refrigeration_loads.calc_Qcre_sys(bpr, tsd, schedules)
tsd = refrigeration_loads.calc_Qref(locator, bpr, tsd)
else:
tsd['DC_cre'] = tsd['Qcre_sys'] = tsd['Qcre'] = np.zeros(HOURS_IN_YEAR)
tsd['mcpcre_sys'] = tsd['Tcre_sys_re'] = tsd['Tcre_sys_sup'] = np.zeros(HOURS_IN_YEAR)
tsd['E_cre'] = np.zeros(HOURS_IN_YEAR)
# CALCULATE PROCESS HEATING
tsd['Qhpro_sys'] = schedules['Qhpro_W'] # in Wh
# CALCULATE PROCESS COOLING
tsd['Qcpro_sys'] = schedules['Qcpro_W'] # in Wh
# CALCULATE DATA CENTER LOADS
if datacenter_loads.has_data_load(bpr):
tsd = datacenter_loads.calc_Edata(tsd, schedules) # end-use electricity
tsd = datacenter_loads.calc_Qcdata_sys(bpr, tsd) # system need for cooling
tsd = datacenter_loads.calc_Qcdataf(locator, bpr, tsd) # final need for cooling
else:
tsd['DC_cdata'] = tsd['Qcdata_sys'] = tsd['Qcdata'] = np.zeros(HOURS_IN_YEAR)
tsd['mcpcdata_sys'] = tsd['Tcdata_sys_re'] = tsd['Tcdata_sys_sup'] = np.zeros(HOURS_IN_YEAR)
tsd['Edata'] = tsd['E_cdata'] = np.zeros(HOURS_IN_YEAR)
# CALCULATE SPACE CONDITIONING DEMANDS
if np.isclose(bpr.rc_model['Af'], 0.0): # if building does not have conditioned area
tsd['T_int'] = tsd['T_ext']
tsd['x_int'] = np.vectorize(convert_rh_to_moisture_content)(tsd['rh_ext'], tsd['T_int'])
tsd['E_cs'] = tsd['E_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['Eaux_cs'] = tsd['Eaux_hs'] = tsd['Ehs_lat_aux'] = np.zeros(HOURS_IN_YEAR)
print(f"building {bpr.name} does not have an air-conditioned area")
else:
# get hourly thermal resistances of external surfaces
tsd['RSE_wall'], \
tsd['RSE_roof'], \
tsd['RSE_win'] = get_thermal_resistance_surface(bpr.architecture, weather_data)
# calculate heat gains
tsd = latent_loads.calc_Qgain_lat(tsd, schedules)
tsd = calc_set_points(bpr, date_range, tsd, building_name, config, locator,
schedules) # calculate the setpoints for every hour
tsd = calc_Qhs_Qcs(bpr, tsd,
use_dynamic_infiltration_calculation) # end-use demand latent and sensible + ventilation
tsd = sensible_loads.calc_Qhs_Qcs_loss(bpr, tsd) # losses
tsd = sensible_loads.calc_Qhs_sys_Qcs_sys(tsd) # system (incl. losses)
tsd = sensible_loads.calc_temperatures_emission_systems(bpr, tsd) # calculate temperatures
tsd = electrical_loads.calc_Eve(tsd) # calc auxiliary loads ventilation
tsd = electrical_loads.calc_Eaux_Qhs_Qcs(tsd, bpr) # calc auxiliary loads heating and cooling
tsd = calc_Qcs_sys(bpr, tsd) # final : including fuels and renewables
tsd = calc_Qhs_sys(bpr, tsd) # final : including fuels and renewables
# Positive loads
tsd['Qcs_lat_sys'] = abs(tsd['Qcs_lat_sys'])
tsd['DC_cs'] = abs(tsd['DC_cs'])
tsd['Qcs_sys'] = abs(tsd['Qcs_sys'])
tsd['Qcre_sys'] = abs(tsd['Qcre_sys']) # inverting sign of cooling loads for reporting and graphs
tsd['Qcdata_sys'] = abs(tsd['Qcdata_sys']) # inverting sign of cooling loads for reporting and graphs
# CALCULATE HOT WATER LOADS
if hotwater_loads.has_hot_water_technical_system(bpr):
tsd = electrical_loads.calc_Eaux_fw(tsd, bpr, schedules)
tsd = hotwater_loads.calc_Qww(bpr, tsd, schedules) # end-use
tsd = hotwater_loads.calc_Qww_sys(bpr, tsd) # system (incl. losses)
tsd = electrical_loads.calc_Eaux_ww(tsd, bpr) # calc auxiliary loads
tsd = hotwater_loads.calc_Qwwf(bpr, tsd) # final
else:
tsd = electrical_loads.calc_Eaux_fw(tsd, bpr, schedules)
tsd['Qww'] = tsd['DH_ww'] = tsd['Qww_sys'] = np.zeros(HOURS_IN_YEAR)
tsd['mcpww_sys'] = tsd['Tww_sys_re'] = tsd['Tww_sys_sup'] = np.zeros(HOURS_IN_YEAR)
tsd['Eaux_ww'] = np.zeros(HOURS_IN_YEAR)
tsd['NG_ww'] = tsd['COAL_ww'] = tsd['OIL_ww'] = tsd['WOOD_ww'] = np.zeros(HOURS_IN_YEAR)
tsd['E_ww'] = np.zeros(HOURS_IN_YEAR)
# CALCULATE SUM OF HEATING AND COOLING LOADS
tsd = calc_QH_sys_QC_sys(tsd) # aggregated cooling and heating loads
# CALCULATE ELECTRICITY LOADS PART 2/2 AUXILIARY LOADS + ENERGY GENERATION
tsd = electrical_loads.calc_Eaux(tsd) # auxiliary totals
tsd = electrical_loads.calc_E_sys(tsd) # system (incl. losses)
tsd = electrical_loads.calc_Ef(bpr, tsd) # final (incl. self. generated)
# WRITE SOLAR RESULTS
write_results(bpr, building_name, date_range, loads_output, locator, massflows_output,
resolution_outputs, temperatures_output, tsd, debug)
return
def calc_QH_sys_QC_sys(tsd):
tsd['QH_sys'] = tsd['Qww_sys'] + tsd['Qhs_sys'] + tsd['Qhpro_sys']
tsd['QC_sys'] = tsd['Qcs_sys'] + tsd['Qcdata_sys'] + tsd['Qcre_sys'] + tsd['Qcpro_sys']
return tsd
def write_results(bpr, building_name, date, loads_output, locator, massflows_output,
resolution_outputs, temperatures_output, tsd, debug):
if resolution_outputs == 'hourly':
writer = demand_writers.HourlyDemandWriter(loads_output, massflows_output, temperatures_output)
elif resolution_outputs == 'monthly':
writer = demand_writers.MonthlyDemandWriter(loads_output, massflows_output, temperatures_output)
else:
raise Exception('error')
if debug:
print('Creating instant plotly visualizations of demand variable time series.')
print('Behavior can be changed in cea.utilities.reporting code.')
print('Writing detailed demand results of {} to .xls file.'.format(building_name))
reporting.quick_visualization_tsd(tsd, locator.get_demand_results_folder(), building_name)
reporting.full_report_to_xls(tsd, locator.get_demand_results_folder(), building_name)
else:
writer.results_to_csv(tsd, bpr, locator, date, building_name)
def calc_Qcs_sys(bpr, tsd):
# GET SYSTEMS EFFICIENCIES
energy_source = bpr.supply['source_cs']
scale_technology = bpr.supply['scale_cs']
efficiency_average_year = bpr.supply['eff_cs']
if scale_technology == "BUILDING":
if energy_source == "GRID":
# sum
tsd['E_cs'] = abs(tsd['Qcs_sys']) / efficiency_average_year
tsd['DC_cs'] = np.zeros(HOURS_IN_YEAR)
elif energy_source == "NONE":
tsd['E_cs'] = np.zeros(HOURS_IN_YEAR)
tsd['DC_cs'] = np.zeros(HOURS_IN_YEAR)
else:
raise Exception('check potential error in input database of LCA infrastructure / COOLING')
elif scale_technology == "DISTRICT":
if energy_source == "GRID":
tsd['DC_cs'] = tsd['Qcs_sys'] / efficiency_average_year
tsd['E_cs'] = np.zeros(HOURS_IN_YEAR)
elif energy_source == "NONE":
tsd['DC_cs'] = np.zeros(HOURS_IN_YEAR)
tsd['E_cs'] = np.zeros(HOURS_IN_YEAR)
else:
raise Exception('check potential error in input database of ALL IN ONE SYSTEMS / COOLING')
elif scale_technology == "NONE":
tsd['DC_cs'] = np.zeros(HOURS_IN_YEAR)
tsd['E_cs'] = np.zeros(HOURS_IN_YEAR)
else:
raise Exception('check potential error in input database of LCA infrastructure / COOLING')
return tsd
def calc_Qhs_sys(bpr, tsd):
"""
it calculates final loads
"""
# GET SYSTEMS EFFICIENCIES
# GET SYSTEMS EFFICIENCIES
energy_source = bpr.supply['source_hs']
scale_technology = bpr.supply['scale_hs']
efficiency_average_year = bpr.supply['eff_hs']
if scale_technology == "BUILDING":
if energy_source == "GRID":
tsd['E_hs'] = tsd['Qhs_sys'] / efficiency_average_year
tsd['DH_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['NG_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['COAL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['OIL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['WOOD_hs'] = np.zeros(HOURS_IN_YEAR)
elif energy_source == "NATURALGAS":
tsd['NG_hs'] = tsd['Qhs_sys'] / efficiency_average_year
tsd['COAL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['OIL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['WOOD_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['DH_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['E_hs'] = np.zeros(HOURS_IN_YEAR)
elif energy_source == "OIL":
tsd['NG_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['COAL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['OIL_hs'] = tsd['Qhs_sys'] / efficiency_average_year
tsd['WOOD_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['DH_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['E_hs'] = np.zeros(HOURS_IN_YEAR)
elif energy_source == "COAL":
tsd['NG_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['COAL_hs'] = tsd['Qhs_sys'] / efficiency_average_year
tsd['OIL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['WOOD_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['DH_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['E_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['SOLAR_hs'] = np.zeros(HOURS_IN_YEAR)
elif energy_source == "WOOD":
tsd['NG_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['COAL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['OIL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['WOOD_hs'] = tsd['Qhs_sys'] / efficiency_average_year
tsd['DH_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['E_hs'] = np.zeros(HOURS_IN_YEAR)
elif energy_source == "NONE":
tsd['NG_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['COAL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['OIL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['WOOD_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['DH_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['E_hs'] =
|
np.zeros(HOURS_IN_YEAR)
|
numpy.zeros
|
# -*- coding: utf-8 -*-
from c3s_sm.interface import C3SImg
import os
import numpy.testing as nptest
from smecv_grid.grid import SMECV_Grid_v052
import numpy as np
import pytest
def test_C3STs_tcdr_combined_daily():
file = os.path.join(os.path.join(os.path.dirname(__file__),
'c3s_sm-test-data', 'img', 'TCDR', '060_dailyImages', 'combined', '2014',
'C3S-SOILMOISTURE-L3S-SSMV-COMBINED-DAILY-20140101000000-TCDR-v201801.0.0.nc'))
ds = C3SImg(file, mode='r', parameters=None, flatten=False, fillval={'sm': np.nan})
image= ds.read()
test_loc_lonlat = (16.375, 48.125)
row, col = np.where((image.lon==test_loc_lonlat[0]) & (image.lat==test_loc_lonlat[1]))
nptest.assert_almost_equal(image.data['sm'][row, col], 0.34659, 4)
assert(image.metadata['sm']['long_name'] == 'Volumetric Soil Moisture')
def test_C3STs_tcdr_active_monthly():
file = os.path.join(os.path.join(os.path.dirname(__file__),
'c3s_sm-test-data', 'img', 'TCDR', '061_monthlyImages', 'active',
'C3S-SOILMOISTURE-L3S-SSMS-ACTIVE-MONTHLY-20140101000000-TCDR-v201801.0.0.nc'))
ds = C3SImg(file, mode='r', parameters='sm', flatten=False, fillval=None,
subgrid=SMECV_Grid_v052(None).subgrid_from_bbox(-181,-91, 181,91))
image = ds.read()
test_loc_lonlat = (16.375, 48.125)
row, col = np.where((image.lon==test_loc_lonlat[0]) & (image.lat==test_loc_lonlat[1]))
assert image.data['sm'].shape == (720,1440)
nptest.assert_almost_equal(image.data['sm'][row, col], 47.69982, 4)
assert(image.metadata['sm']['_FillValue'] == -9999.)
assert image.data['sm'].min() == image.metadata['sm']['_FillValue']
assert(image.metadata['sm']['long_name'] == 'Percent of Saturation Soil Moisture')
def test_C3STs_tcdr_passive_decadal():
file = os.path.join(os.path.join(os.path.dirname(__file__),
'c3s_sm-test-data', 'img', 'TCDR', '062_dekadalImages', 'passive',
'C3S-SOILMOISTURE-L3S-SSMV-PASSIVE-DEKADAL-20140101000000-TCDR-v201801.0.0.nc'))
ds = C3SImg(file, mode='r', flatten=False, fillval={'nobs': -1, 'sm': np.nan},
subgrid=SMECV_Grid_v052('landcover_class', subset_value=[10,11,60,70]).subgrid_from_bbox(-14, 30, 44, 73))
image = ds.read()
test_loc_lonlat = (16.125, 48.125)
row, col = np.where((image.lon==test_loc_lonlat[0]) & (image.lat==test_loc_lonlat[1]))
assert image['nobs'].min() == -1
assert np.any(
|
np.isnan(image['sm'])
|
numpy.isnan
|
"""Climate data and mass-balance computations"""
# Built ins
import logging
import os
import datetime
import warnings
# External libs
import numpy as np
import netCDF4
import pandas as pd
import xarray as xr
from scipy import stats
from scipy import optimize as optimization
# Optional libs
try:
import salem
except ImportError:
pass
# Locals
from oggm import cfg
from oggm import utils
from oggm.core import centerlines
from oggm import entity_task, global_task
from oggm.exceptions import MassBalanceCalibrationError, InvalidParamsError
# Module logger
log = logging.getLogger(__name__)
@entity_task(log, writes=['climate_monthly', 'climate_info'])
def process_custom_climate_data(gdir):
"""Processes and writes the climate data from a user-defined climate file.
The input file must have a specific format (see
https://github.com/OGGM/oggm-sample-data test-files/histalp_merged_hef.nc
for an example).
This is the way OGGM used to do it for HISTALP before it got automatised.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
the glacier directory to process
"""
if not (('climate_file' in cfg.PATHS) and
os.path.exists(cfg.PATHS['climate_file'])):
raise InvalidParamsError('Custom climate file not found')
if cfg.PARAMS['baseline_climate'] not in ['', 'CUSTOM']:
raise InvalidParamsError("When using custom climate data please set "
"PARAMS['baseline_climate'] to an empty "
"string or `CUSTOM`. Note also that you can "
"now use the `process_histalp_data` task for "
"automated HISTALP data processing.")
# read the file
fpath = cfg.PATHS['climate_file']
nc_ts = salem.GeoNetcdf(fpath)
# set temporal subset for the ts data (hydro years)
sm = cfg.PARAMS['hydro_month_' + gdir.hemisphere]
em = sm - 1 if (sm > 1) else 12
yrs = nc_ts.time.year
y0, y1 = yrs[0], yrs[-1]
if cfg.PARAMS['baseline_y0'] != 0:
y0 = cfg.PARAMS['baseline_y0']
if cfg.PARAMS['baseline_y1'] != 0:
y1 = cfg.PARAMS['baseline_y1']
nc_ts.set_period(t0='{}-{:02d}-01'.format(y0, sm),
t1='{}-{:02d}-01'.format(y1, em))
time = nc_ts.time
ny, r = divmod(len(time), 12)
if r != 0:
raise InvalidParamsError('Climate data should be full years')
# Units
assert nc_ts._nc.variables['hgt'].units.lower() in ['m', 'meters', 'meter',
'metres', 'metre']
assert nc_ts._nc.variables['temp'].units.lower() in ['degc', 'degrees',
'degree', 'c']
assert nc_ts._nc.variables['prcp'].units.lower() in ['kg m-2', 'l m-2',
'mm', 'millimeters',
'millimeter']
# geoloc
lon = nc_ts._nc.variables['lon'][:]
lat = nc_ts._nc.variables['lat'][:]
ilon = np.argmin(np.abs(lon - gdir.cenlon))
ilat = np.argmin(np.abs(lat - gdir.cenlat))
ref_pix_lon = lon[ilon]
ref_pix_lat = lat[ilat]
# read the data
temp = nc_ts.get_vardata('temp')
prcp = nc_ts.get_vardata('prcp')
hgt = nc_ts.get_vardata('hgt')
ttemp = temp[:, ilat-1:ilat+2, ilon-1:ilon+2]
itemp = ttemp[:, 1, 1]
thgt = hgt[ilat-1:ilat+2, ilon-1:ilon+2]
ihgt = thgt[1, 1]
thgt = thgt.flatten()
iprcp = prcp[:, ilat, ilon]
nc_ts.close()
# Should we compute the gradient?
use_grad = cfg.PARAMS['temp_use_local_gradient']
igrad = None
if use_grad:
igrad = np.zeros(len(time)) * np.NaN
for t, loct in enumerate(ttemp):
slope, _, _, p_val, _ = stats.linregress(thgt,
loct.flatten())
igrad[t] = slope if (p_val < 0.01) else np.NaN
gdir.write_monthly_climate_file(time, iprcp, itemp, ihgt,
ref_pix_lon, ref_pix_lat,
gradient=igrad)
# metadata
out = {'baseline_climate_source': fpath,
'baseline_hydro_yr_0': y0+1,
'baseline_hydro_yr_1': y1}
gdir.write_json(out, 'climate_info')
@entity_task(log, writes=['climate_monthly', 'climate_info'])
def process_cru_data(gdir):
"""Processes and writes the CRU baseline climate data for this glacier.
Interpolates the CRU TS data to the high-resolution CL2 climatologies
(provided with OGGM) and writes everything to a NetCDF file.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
the glacier directory to process
"""
if cfg.PATHS.get('climate_file', None):
warnings.warn("You seem to have set a custom climate file for this "
"run, but are using the default CRU climate "
"file instead.")
if cfg.PARAMS['baseline_climate'] != 'CRU':
raise InvalidParamsError("cfg.PARAMS['baseline_climate'] should be "
"set to CRU")
# read the climatology
clfile = utils.get_cru_cl_file()
ncclim = salem.GeoNetcdf(clfile)
# and the TS data
nc_ts_tmp = salem.GeoNetcdf(utils.get_cru_file('tmp'), monthbegin=True)
nc_ts_pre = salem.GeoNetcdf(utils.get_cru_file('pre'), monthbegin=True)
# set temporal subset for the ts data (hydro years)
sm = cfg.PARAMS['hydro_month_' + gdir.hemisphere]
em = sm - 1 if (sm > 1) else 12
yrs = nc_ts_pre.time.year
y0, y1 = yrs[0], yrs[-1]
if cfg.PARAMS['baseline_y0'] != 0:
y0 = cfg.PARAMS['baseline_y0']
if cfg.PARAMS['baseline_y1'] != 0:
y1 = cfg.PARAMS['baseline_y1']
nc_ts_tmp.set_period(t0='{}-{:02d}-01'.format(y0, sm),
t1='{}-{:02d}-01'.format(y1, em))
nc_ts_pre.set_period(t0='{}-{:02d}-01'.format(y0, sm),
t1='{}-{:02d}-01'.format(y1, em))
time = nc_ts_pre.time
ny, r = divmod(len(time), 12)
assert r == 0
lon = gdir.cenlon
lat = gdir.cenlat
# This is guaranteed to work because I prepared the file (I hope)
ncclim.set_subset(corners=((lon, lat), (lon, lat)), margin=1)
# get climatology data
loc_hgt = ncclim.get_vardata('elev')
loc_tmp = ncclim.get_vardata('temp')
loc_pre = ncclim.get_vardata('prcp')
loc_lon = ncclim.get_vardata('lon')
loc_lat = ncclim.get_vardata('lat')
# see if the center is ok
if not np.isfinite(loc_hgt[1, 1]):
# take another candidate where finite
isok = np.isfinite(loc_hgt)
# wait: some areas are entirely NaNs, make the subset larger
_margin = 1
while not np.any(isok):
_margin += 1
ncclim.set_subset(corners=((lon, lat), (lon, lat)), margin=_margin)
loc_hgt = ncclim.get_vardata('elev')
isok = np.isfinite(loc_hgt)
if _margin > 1:
log.debug('(%s) I had to look up for far climate pixels: %s',
gdir.rgi_id, _margin)
# Take the first candidate (doesn't matter which)
lon, lat = ncclim.grid.ll_coordinates
lon = lon[isok][0]
lat = lat[isok][0]
# Resubset
ncclim.set_subset()
ncclim.set_subset(corners=((lon, lat), (lon, lat)), margin=1)
loc_hgt = ncclim.get_vardata('elev')
loc_tmp = ncclim.get_vardata('temp')
loc_pre = ncclim.get_vardata('prcp')
loc_lon = ncclim.get_vardata('lon')
loc_lat = ncclim.get_vardata('lat')
assert np.isfinite(loc_hgt[1, 1])
isok = np.isfinite(loc_hgt)
hgt_f = loc_hgt[isok].flatten()
assert len(hgt_f) > 0.
# Should we compute the gradient?
use_grad = cfg.PARAMS['temp_use_local_gradient']
ts_grad = None
if use_grad and len(hgt_f) >= 5:
ts_grad = np.zeros(12) * np.NaN
for i in range(12):
loc_tmp_mth = loc_tmp[i, ...][isok].flatten()
slope, _, _, p_val, _ = stats.linregress(hgt_f, loc_tmp_mth)
ts_grad[i] = slope if (p_val < 0.01) else np.NaN
# convert to a timeseries and hydrological years
ts_grad = ts_grad.tolist()
ts_grad = ts_grad[em:] + ts_grad[0:em]
ts_grad = np.asarray(ts_grad * ny)
# maybe this will throw out of bounds warnings
nc_ts_tmp.set_subset(corners=((lon, lat), (lon, lat)), margin=1)
nc_ts_pre.set_subset(corners=((lon, lat), (lon, lat)), margin=1)
# compute monthly anomalies
# of temp
ts_tmp = nc_ts_tmp.get_vardata('tmp', as_xarray=True)
ts_tmp_avg = ts_tmp.sel(time=slice('1961-01-01', '1990-12-01'))
ts_tmp_avg = ts_tmp_avg.groupby('time.month').mean(dim='time')
ts_tmp = ts_tmp.groupby('time.month') - ts_tmp_avg
# of precip
ts_pre = nc_ts_pre.get_vardata('pre', as_xarray=True)
ts_pre_avg = ts_pre.sel(time=slice('1961-01-01', '1990-12-01'))
ts_pre_avg = ts_pre_avg.groupby('time.month').mean(dim='time')
ts_pre_ano = ts_pre.groupby('time.month') - ts_pre_avg
# scaled anomalies is the default. Standard anomalies above
# are used later for where ts_pre_avg == 0
ts_pre = ts_pre.groupby('time.month') / ts_pre_avg
# interpolate to HR grid
if np.any(~np.isfinite(ts_tmp[:, 1, 1])):
# Extreme case, middle pix is not valid
# take any valid pix from the 3*3 (and hope there's one)
found_it = False
for idi in range(2):
for idj in range(2):
if np.all(np.isfinite(ts_tmp[:, idj, idi])):
ts_tmp[:, 1, 1] = ts_tmp[:, idj, idi]
ts_pre[:, 1, 1] = ts_pre[:, idj, idi]
ts_pre_ano[:, 1, 1] = ts_pre_ano[:, idj, idi]
found_it = True
if not found_it:
msg = '({}) there is no climate data'.format(gdir.rgi_id)
raise MassBalanceCalibrationError(msg)
elif np.any(~np.isfinite(ts_tmp)):
# maybe the side is nan, but we can do nearest
ts_tmp = ncclim.grid.map_gridded_data(ts_tmp.values, nc_ts_tmp.grid,
interp='nearest')
ts_pre = ncclim.grid.map_gridded_data(ts_pre.values, nc_ts_pre.grid,
interp='nearest')
ts_pre_ano = ncclim.grid.map_gridded_data(ts_pre_ano.values,
nc_ts_pre.grid,
interp='nearest')
else:
# We can do bilinear
ts_tmp = ncclim.grid.map_gridded_data(ts_tmp.values, nc_ts_tmp.grid,
interp='linear')
ts_pre = ncclim.grid.map_gridded_data(ts_pre.values, nc_ts_pre.grid,
interp='linear')
ts_pre_ano = ncclim.grid.map_gridded_data(ts_pre_ano.values,
nc_ts_pre.grid,
interp='linear')
# take the center pixel and add it to the CRU CL clim
# for temp
loc_tmp = xr.DataArray(loc_tmp[:, 1, 1], dims=['month'],
coords={'month': ts_tmp_avg.month})
ts_tmp = xr.DataArray(ts_tmp[:, 1, 1], dims=['time'],
coords={'time': time})
ts_tmp = ts_tmp.groupby('time.month') + loc_tmp
# for prcp
loc_pre = xr.DataArray(loc_pre[:, 1, 1], dims=['month'],
coords={'month': ts_pre_avg.month})
ts_pre = xr.DataArray(ts_pre[:, 1, 1], dims=['time'],
coords={'time': time})
ts_pre_ano = xr.DataArray(ts_pre_ano[:, 1, 1], dims=['time'],
coords={'time': time})
# scaled anomalies
ts_pre = ts_pre.groupby('time.month') * loc_pre
# standard anomalies
ts_pre_ano = ts_pre_ano.groupby('time.month') + loc_pre
# Correct infinite values with standard anomalies
ts_pre.values = np.where(np.isfinite(ts_pre.values),
ts_pre.values,
ts_pre_ano.values)
# The last step might create negative values (unlikely). Clip them
ts_pre.values = utils.clip_min(ts_pre.values, 0)
# done
loc_hgt = loc_hgt[1, 1]
loc_lon = loc_lon[1]
loc_lat = loc_lat[1]
assert np.isfinite(loc_hgt)
assert np.all(np.isfinite(ts_pre.values))
assert np.all(np.isfinite(ts_tmp.values))
gdir.write_monthly_climate_file(time, ts_pre.values, ts_tmp.values,
loc_hgt, loc_lon, loc_lat,
gradient=ts_grad)
source = nc_ts_tmp._nc.title[:10]
ncclim._nc.close()
nc_ts_tmp._nc.close()
nc_ts_pre._nc.close()
# metadata
out = {'baseline_climate_source': source,
'baseline_hydro_yr_0': y0+1,
'baseline_hydro_yr_1': y1}
gdir.write_json(out, 'climate_info')
@entity_task(log, writes=['climate_monthly', 'climate_info'])
def process_dummy_cru_file(gdir, sigma_temp=2, sigma_prcp=0.5, seed=None):
"""Create a simple baseline climate file for this glacier - for testing!
This simply reproduces the climatology with a little randomness in it.
TODO: extend the functionality by allowing a monthly varying sigma
Parameters
----------
gdir : GlacierDirectory
the glacier directory
sigma_temp : float
the standard deviation of the random timeseries (set to 0 for constant
ts)
sigma_prcp : float
the standard deviation of the random timeseries (set to 0 for constant
ts)
seed : int
the RandomState seed
"""
# read the climatology
clfile = utils.get_cru_cl_file()
ncclim = salem.GeoNetcdf(clfile)
# set temporal subset for the ts data (hydro years)
sm = cfg.PARAMS['hydro_month_' + gdir.hemisphere]
em = sm - 1 if (sm > 1) else 12
y0, y1 = 1901, 2018
if cfg.PARAMS['baseline_y0'] != 0:
y0 = cfg.PARAMS['baseline_y0']
if cfg.PARAMS['baseline_y1'] != 0:
y1 = cfg.PARAMS['baseline_y1']
time = pd.date_range(start='{}-{:02d}-01'.format(y0, sm),
end='{}-{:02d}-01'.format(y1, em),
freq='MS')
ny, r = divmod(len(time), 12)
assert r == 0
lon = gdir.cenlon
lat = gdir.cenlat
# This is guaranteed to work because I prepared the file (I hope)
ncclim.set_subset(corners=((lon, lat), (lon, lat)), margin=1)
# get climatology data
loc_hgt = ncclim.get_vardata('elev')
loc_tmp = ncclim.get_vardata('temp')
loc_pre = ncclim.get_vardata('prcp')
loc_lon = ncclim.get_vardata('lon')
loc_lat = ncclim.get_vardata('lat')
# see if the center is ok
if not np.isfinite(loc_hgt[1, 1]):
# take another candidate where finite
isok = np.isfinite(loc_hgt)
# wait: some areas are entirely NaNs, make the subset larger
_margin = 1
while not np.any(isok):
_margin += 1
ncclim.set_subset(corners=((lon, lat), (lon, lat)), margin=_margin)
loc_hgt = ncclim.get_vardata('elev')
isok = np.isfinite(loc_hgt)
if _margin > 1:
log.debug('(%s) I had to look up for far climate pixels: %s',
gdir.rgi_id, _margin)
# Take the first candidate (doesn't matter which)
lon, lat = ncclim.grid.ll_coordinates
lon = lon[isok][0]
lat = lat[isok][0]
# Resubset
ncclim.set_subset()
ncclim.set_subset(corners=((lon, lat), (lon, lat)), margin=1)
loc_hgt = ncclim.get_vardata('elev')
loc_tmp = ncclim.get_vardata('temp')
loc_pre = ncclim.get_vardata('prcp')
loc_lon = ncclim.get_vardata('lon')
loc_lat = ncclim.get_vardata('lat')
assert np.isfinite(loc_hgt[1, 1])
isok = np.isfinite(loc_hgt)
hgt_f = loc_hgt[isok].flatten()
assert len(hgt_f) > 0.
# Should we compute the gradient?
use_grad = cfg.PARAMS['temp_use_local_gradient']
ts_grad = None
if use_grad and len(hgt_f) >= 5:
ts_grad = np.zeros(12) * np.NaN
for i in range(12):
loc_tmp_mth = loc_tmp[i, ...][isok].flatten()
slope, _, _, p_val, _ = stats.linregress(hgt_f, loc_tmp_mth)
ts_grad[i] = slope if (p_val < 0.01) else np.NaN
# convert to a timeseries and hydrological years
ts_grad = ts_grad.tolist()
ts_grad = ts_grad[em:] + ts_grad[0:em]
ts_grad = np.asarray(ts_grad * ny)
# Make DataArrays
rng = np.random.RandomState(seed)
loc_tmp = xr.DataArray(loc_tmp[:, 1, 1], dims=['month'],
coords={'month': np.arange(1, 13)})
ts_tmp = rng.randn(len(time)) * sigma_temp
ts_tmp = xr.DataArray(ts_tmp, dims=['time'],
coords={'time': time})
loc_pre = xr.DataArray(loc_pre[:, 1, 1], dims=['month'],
coords={'month': np.arange(1, 13)})
ts_pre = utils.clip_min(rng.randn(len(time)) * sigma_prcp + 1, 0)
ts_pre = xr.DataArray(ts_pre, dims=['time'],
coords={'time': time})
# Create the time series
ts_tmp = ts_tmp.groupby('time.month') + loc_tmp
ts_pre = ts_pre.groupby('time.month') * loc_pre
# done
loc_hgt = loc_hgt[1, 1]
loc_lon = loc_lon[1]
loc_lat = loc_lat[1]
assert np.isfinite(loc_hgt)
gdir.write_monthly_climate_file(time, ts_pre.values, ts_tmp.values,
loc_hgt, loc_lon, loc_lat,
gradient=ts_grad)
source = 'CRU CL2 and some randomness'
ncclim._nc.close()
# metadata
out = {'baseline_climate_source': source,
'baseline_hydro_yr_0': y0+1,
'baseline_hydro_yr_1': y1}
gdir.write_json(out, 'climate_info')
@entity_task(log, writes=['climate_monthly', 'climate_info'])
def process_histalp_data(gdir):
"""Processes and writes the HISTALP baseline climate data for this glacier.
Extracts the nearest timeseries and writes everything to a NetCDF file.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
the glacier directory to process
"""
if cfg.PATHS.get('climate_file', None):
warnings.warn("You seem to have set a custom climate file for this "
"run, but are using the default HISTALP climate file "
"instead.")
if cfg.PARAMS['baseline_climate'] != 'HISTALP':
raise InvalidParamsError("cfg.PARAMS['baseline_climate'] should be "
"set to HISTALP.")
# read the time out of the pure netcdf file
ft = utils.get_histalp_file('tmp')
fp = utils.get_histalp_file('pre')
with utils.ncDataset(ft) as nc:
vt = nc.variables['time']
assert vt[0] == 0
assert vt[-1] == vt.shape[0] - 1
t0 = vt.units.split(' since ')[1][:7]
time_t = pd.date_range(start=t0, periods=vt.shape[0], freq='MS')
with utils.ncDataset(fp) as nc:
vt = nc.variables['time']
assert vt[0] == 0.5
assert vt[-1] == vt.shape[0] - .5
t0 = vt.units.split(' since ')[1][:7]
time_p = pd.date_range(start=t0, periods=vt.shape[0], freq='MS')
# Now open with salem
nc_ts_tmp = salem.GeoNetcdf(ft, time=time_t)
nc_ts_pre = salem.GeoNetcdf(fp, time=time_p)
# set temporal subset for the ts data (hydro years)
# the reference time is given by precip, which is shorter
sm = cfg.PARAMS['hydro_month_nh']
em = sm - 1 if (sm > 1) else 12
yrs = nc_ts_pre.time.year
y0, y1 = yrs[0], yrs[-1]
if cfg.PARAMS['baseline_y0'] != 0:
y0 = cfg.PARAMS['baseline_y0']
if cfg.PARAMS['baseline_y1'] != 0:
y1 = cfg.PARAMS['baseline_y1']
nc_ts_tmp.set_period(t0='{}-{:02d}-01'.format(y0, sm),
t1='{}-{:02d}-01'.format(y1, em))
nc_ts_pre.set_period(t0='{}-{:02d}-01'.format(y0, sm),
t1='{}-{:02d}-01'.format(y1, em))
time = nc_ts_pre.time
ny, r = divmod(len(time), 12)
assert r == 0
# Units
assert nc_ts_tmp._nc.variables['HSURF'].units.lower() in ['m', 'meters',
'meter',
'metres',
'metre']
assert nc_ts_tmp._nc.variables['T_2M'].units.lower() in ['degc', 'degrees',
'degrees celcius',
'degree', 'c']
assert nc_ts_pre._nc.variables['TOT_PREC'].units.lower() in ['kg m-2',
'l m-2', 'mm',
'millimeters',
'millimeter']
# geoloc
lon = gdir.cenlon
lat = gdir.cenlat
nc_ts_tmp.set_subset(corners=((lon, lat), (lon, lat)), margin=1)
nc_ts_pre.set_subset(corners=((lon, lat), (lon, lat)), margin=1)
# read the data
temp = nc_ts_tmp.get_vardata('T_2M')
prcp = nc_ts_pre.get_vardata('TOT_PREC')
hgt = nc_ts_tmp.get_vardata('HSURF')
ref_lon = nc_ts_tmp.get_vardata('lon')
ref_lat = nc_ts_tmp.get_vardata('lat')
source = nc_ts_tmp._nc.title[:7]
nc_ts_tmp._nc.close()
nc_ts_pre._nc.close()
# Should we compute the gradient?
use_grad = cfg.PARAMS['temp_use_local_gradient']
igrad = None
if use_grad:
igrad = np.zeros(len(time)) * np.NaN
for t, loct in enumerate(temp):
slope, _, _, p_val, _ = stats.linregress(hgt.flatten(),
loct.flatten())
igrad[t] = slope if (p_val < 0.01) else np.NaN
gdir.write_monthly_climate_file(time, prcp[:, 1, 1], temp[:, 1, 1],
hgt[1, 1], ref_lon[1], ref_lat[1],
gradient=igrad)
# metadata
out = {'baseline_climate_source': source,
'baseline_hydro_yr_0': y0 + 1,
'baseline_hydro_yr_1': y1}
gdir.write_json(out, 'climate_info')
def mb_climate_on_height(gdir, heights, *, time_range=None, year_range=None):
"""Mass-balance climate of the glacier at a specific height
Reads the glacier's monthly climate data file and computes the
temperature "energies" (temp above 0) and solid precipitation at the
required height.
All MB parameters are considered here! (i.e. melt temp, precip scaling
factor, etc.)
Parameters
----------
gdir : GlacierDirectory
the glacier directory
heights: ndarray
a 1D array of the heights (in meter) where you want the data
time_range : [datetime, datetime], optional
default is to read all data but with this you
can provide a [t0, t1] bounds (inclusive).
year_range : [int, int], optional
Provide a [y0, y1] year range to get the data for specific
(hydrological) years only. Easier to use than the time bounds above.
Returns
-------
(time, tempformelt, prcpsol)::
- time: array of shape (nt,)
- tempformelt: array of shape (len(heights), nt)
- prcpsol: array of shape (len(heights), nt)
"""
if year_range is not None:
sm = cfg.PARAMS['hydro_month_' + gdir.hemisphere]
em = sm - 1 if (sm > 1) else 12
t0 = datetime.datetime(year_range[0]-1, sm, 1)
t1 = datetime.datetime(year_range[1], em, 1)
return mb_climate_on_height(gdir, heights, time_range=[t0, t1])
# Parameters
temp_all_solid = cfg.PARAMS['temp_all_solid']
temp_all_liq = cfg.PARAMS['temp_all_liq']
temp_melt = cfg.PARAMS['temp_melt']
prcp_fac = cfg.PARAMS['prcp_scaling_factor']
default_grad = cfg.PARAMS['temp_default_gradient']
g_minmax = cfg.PARAMS['temp_local_gradient_bounds']
# Read file
igrad = None
with utils.ncDataset(gdir.get_filepath('climate_monthly'), mode='r') as nc:
# time
time = nc.variables['time']
time = netCDF4.num2date(time[:], time.units)
if time_range is not None:
p0 = np.where(time == time_range[0])[0]
try:
p0 = p0[0]
except IndexError:
raise MassBalanceCalibrationError('time_range[0] not found in '
'file')
p1 = np.where(time == time_range[1])[0]
try:
p1 = p1[0]
except IndexError:
raise MassBalanceCalibrationError('time_range[1] not found in '
'file')
else:
p0 = 0
p1 = len(time)-1
time = time[p0:p1+1]
# Read timeseries
itemp = nc.variables['temp'][p0:p1+1]
iprcp = nc.variables['prcp'][p0:p1+1]
if 'gradient' in nc.variables:
igrad = nc.variables['gradient'][p0:p1+1]
# Security for stuff that can happen with local gradients
igrad = np.where(~np.isfinite(igrad), default_grad, igrad)
igrad = utils.clip_array(igrad, g_minmax[0], g_minmax[1])
ref_hgt = nc.ref_hgt
# Default gradient?
if igrad is None:
igrad = itemp * 0 + default_grad
# Correct precipitation
iprcp *= prcp_fac
# For each height pixel:
# Compute temp and tempformelt (temperature above melting threshold)
npix = len(heights)
grad_temp = np.atleast_2d(igrad).repeat(npix, 0)
grad_temp *= (heights.repeat(len(time)).reshape(grad_temp.shape) - ref_hgt)
temp2d = np.atleast_2d(itemp).repeat(npix, 0) + grad_temp
temp2dformelt = temp2d - temp_melt
temp2dformelt = utils.clip_min(temp2dformelt, 0)
# Compute solid precipitation from total precipitation
prcpsol = np.atleast_2d(iprcp).repeat(npix, 0)
fac = 1 - (temp2d - temp_all_solid) / (temp_all_liq - temp_all_solid)
fac = utils.clip_array(fac, 0, 1)
prcpsol = prcpsol * fac
return time, temp2dformelt, prcpsol
def mb_yearly_climate_on_height(gdir, heights, *,
year_range=None, flatten=False):
"""Yearly mass-balance climate of the glacier at a specific height
See also: mb_climate_on_height
Parameters
----------
gdir : GlacierDirectory
the glacier directory
heights: ndarray
a 1D array of the heights (in meter) where you want the data
year_range : [int, int], optional
Provide a [y0, y1] year range to get the data for specific
(hydrological) years only.
flatten : bool
for some applications (glacier average MB) it's ok to flatten the
data (average over height) prior to annual summing.
Returns
-------
(years, tempformelt, prcpsol)::
- years: array of shape (ny,)
- tempformelt: array of shape (len(heights), ny) (or ny if flatten
is set)
- prcpsol: array of shape (len(heights), ny) (or ny if flatten
is set)
"""
time, temp, prcp = mb_climate_on_height(gdir, heights,
year_range=year_range)
ny, r = divmod(len(time), 12)
if r != 0:
raise InvalidParamsError('Climate data should be N full years '
'exclusively')
# Last year gives the tone of the hydro year
years = np.arange(time[-1].year-ny+1, time[-1].year+1, 1)
if flatten:
# Spatial average
temp_yr = np.zeros(len(years))
prcp_yr = np.zeros(len(years))
temp = np.mean(temp, axis=0)
prcp = np.mean(prcp, axis=0)
for i, y in enumerate(years):
temp_yr[i] = np.sum(temp[i*12:(i+1)*12])
prcp_yr[i] = np.sum(prcp[i*12:(i+1)*12])
else:
# Annual prcp and temp for each point (no spatial average)
temp_yr = np.zeros((len(heights), len(years)))
prcp_yr = np.zeros((len(heights), len(years)))
for i, y in enumerate(years):
temp_yr[:, i] = np.sum(temp[:, i*12:(i+1)*12], axis=1)
prcp_yr[:, i] = np.sum(prcp[:, i*12:(i+1)*12], axis=1)
return years, temp_yr, prcp_yr
def mb_yearly_climate_on_glacier(gdir, *, year_range=None):
"""Yearly mass-balance climate at all glacier heights,
multiplied with the flowlines widths. (all in pix coords.)
See also: mb_climate_on_height
Parameters
----------
gdir : GlacierDirectory
the glacier directory
year_range : [int, int], optional
Provide a [y0, y1] year range to get the data for specific
(hydrological) years only.
Returns
-------
(years, tempformelt, prcpsol)::
- years: array of shape (ny)
- tempformelt: array of shape (ny)
- prcpsol: array of shape (ny)
"""
flowlines = gdir.read_pickle('inversion_flowlines')
heights = np.array([])
widths = np.array([])
for fl in flowlines:
heights = np.append(heights, fl.surface_h)
widths = np.append(widths, fl.widths)
years, temp, prcp = mb_yearly_climate_on_height(gdir, heights,
year_range=year_range,
flatten=False)
temp =
|
np.average(temp, axis=0, weights=widths)
|
numpy.average
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.object_detection_evaluation."""
import numpy as np
import tensorflow as tf
from research.object_detection.core import standard_fields
from research.object_detection.utils import object_detection_evaluation
class OpenImagesV2EvaluationTest(tf.test.TestCase):
def test_returns_correct_metric_values(self):
categories = [{
'id': 1,
'name': 'cat'
}, {
'id': 2,
'name': 'dog'
}, {
'id': 3,
'name': 'elephant'
}]
oiv2_evaluator = object_detection_evaluation.OpenImagesDetectionEvaluator(
categories)
image_key1 = 'img1'
groundtruth_boxes1 = np.array(
[[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]], dtype=float)
groundtruth_class_labels1 = np.array([1, 3, 1], dtype=int)
oiv2_evaluator.add_single_ground_truth_image_info(image_key1, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes1,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels1,
standard_fields.InputDataFields.groundtruth_group_of:
np.array([], dtype=bool)
})
image_key2 = 'img2'
groundtruth_boxes2 = np.array(
[[10, 10, 11, 11], [500, 500, 510, 510], [10, 10, 12, 12]], dtype=float)
groundtruth_class_labels2 = np.array([1, 1, 3], dtype=int)
groundtruth_is_group_of_list2 = np.array([False, True, False], dtype=bool)
oiv2_evaluator.add_single_ground_truth_image_info(image_key2, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes2,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels2,
standard_fields.InputDataFields.groundtruth_group_of:
groundtruth_is_group_of_list2
})
image_key3 = 'img3'
groundtruth_boxes3 = np.array([[0, 0, 1, 1]], dtype=float)
groundtruth_class_labels3 = np.array([2], dtype=int)
oiv2_evaluator.add_single_ground_truth_image_info(image_key3, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes3,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels3
})
# Add detections
image_key = 'img2'
detected_boxes = np.array(
[[10, 10, 11, 11], [100, 100, 120, 120], [100, 100, 220, 220]],
dtype=float)
detected_class_labels = np.array([1, 1, 3], dtype=int)
detected_scores = np.array([0.7, 0.8, 0.9], dtype=float)
oiv2_evaluator.add_single_detected_image_info(image_key, {
standard_fields.DetectionResultFields.detection_boxes:
detected_boxes,
standard_fields.DetectionResultFields.detection_scores:
detected_scores,
standard_fields.DetectionResultFields.detection_classes:
detected_class_labels
})
metrics = oiv2_evaluator.evaluate()
self.assertAlmostEqual(
metrics['OpenImagesV2_PerformanceByCategory/[email protected]/dog'], 0.0)
self.assertAlmostEqual(
metrics['OpenImagesV2_PerformanceByCategory/[email protected]/elephant'], 0.0)
self.assertAlmostEqual(
metrics['OpenImagesV2_PerformanceByCategory/[email protected]/cat'], 0.16666666)
self.assertAlmostEqual(metrics['OpenImagesV2_Precision/[email protected]'],
0.05555555)
oiv2_evaluator.clear()
self.assertFalse(oiv2_evaluator._image_ids)
class OpenImagesDetectionChallengeEvaluatorTest(tf.test.TestCase):
def test_returns_correct_metric_values(self):
categories = [{
'id': 1,
'name': 'cat'
}, {
'id': 2,
'name': 'dog'
}, {
'id': 3,
'name': 'elephant'
}]
oivchallenge_evaluator = (
object_detection_evaluation.OpenImagesDetectionChallengeEvaluator(
categories, group_of_weight=0.5))
image_key = 'img1'
groundtruth_boxes = np.array(
[[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]], dtype=float)
groundtruth_class_labels = np.array([1, 3, 1], dtype=int)
groundtruth_is_group_of_list = np.array([False, False, True], dtype=bool)
groundtruth_verified_labels = np.array([1, 2, 3], dtype=int)
oivchallenge_evaluator.add_single_ground_truth_image_info(
image_key, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels,
standard_fields.InputDataFields.groundtruth_group_of:
groundtruth_is_group_of_list,
standard_fields.InputDataFields.groundtruth_image_classes:
groundtruth_verified_labels,
})
image_key = 'img2'
groundtruth_boxes = np.array(
[[10, 10, 11, 11], [500, 500, 510, 510], [10, 10, 12, 12]], dtype=float)
groundtruth_class_labels = np.array([1, 1, 3], dtype=int)
groundtruth_is_group_of_list = np.array([False, False, True], dtype=bool)
oivchallenge_evaluator.add_single_ground_truth_image_info(
image_key, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels,
standard_fields.InputDataFields.groundtruth_group_of:
groundtruth_is_group_of_list
})
image_key = 'img3'
groundtruth_boxes = np.array([[0, 0, 1, 1]], dtype=float)
groundtruth_class_labels = np.array([2], dtype=int)
oivchallenge_evaluator.add_single_ground_truth_image_info(
image_key, {
standard_fields.InputDataFields.groundtruth_boxes:
groundtruth_boxes,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels
})
image_key = 'img1'
detected_boxes = np.array(
[[10, 10, 11, 11], [100, 100, 120, 120]], dtype=float)
detected_class_labels = np.array([2, 2], dtype=int)
detected_scores = np.array([0.7, 0.8], dtype=float)
oivchallenge_evaluator.add_single_detected_image_info(
image_key, {
standard_fields.DetectionResultFields.detection_boxes:
detected_boxes,
standard_fields.DetectionResultFields.detection_scores:
detected_scores,
standard_fields.DetectionResultFields.detection_classes:
detected_class_labels
})
image_key = 'img2'
detected_boxes = np.array(
[[10, 10, 11, 11], [100, 100, 120, 120], [100, 100, 220, 220],
[10, 10, 11, 11]],
dtype=float)
detected_class_labels = np.array([1, 1, 2, 3], dtype=int)
detected_scores = np.array([0.7, 0.8, 0.5, 0.9], dtype=float)
oivchallenge_evaluator.add_single_detected_image_info(
image_key, {
standard_fields.DetectionResultFields.detection_boxes:
detected_boxes,
standard_fields.DetectionResultFields.detection_scores:
detected_scores,
standard_fields.DetectionResultFields.detection_classes:
detected_class_labels
})
image_key = 'img3'
detected_boxes = np.array([[0, 0, 1, 1]], dtype=float)
detected_class_labels = np.array([2], dtype=int)
detected_scores = np.array([0.5], dtype=float)
oivchallenge_evaluator.add_single_detected_image_info(
image_key, {
standard_fields.DetectionResultFields.detection_boxes:
detected_boxes,
standard_fields.DetectionResultFields.detection_scores:
detected_scores,
standard_fields.DetectionResultFields.detection_classes:
detected_class_labels
})
metrics = oivchallenge_evaluator.evaluate()
self.assertAlmostEqual(
metrics['OpenImagesChallenge2018_PerformanceByCategory/[email protected]/dog'],
0.3333333333)
self.assertAlmostEqual(
metrics[
'OpenImagesChallenge2018_PerformanceByCategory/[email protected]/elephant'],
0.333333333333)
self.assertAlmostEqual(
metrics['OpenImagesChallenge2018_PerformanceByCategory/[email protected]/cat'],
0.142857142857)
self.assertAlmostEqual(
metrics['OpenImagesChallenge2018_Precision/[email protected]'], 0.269841269)
oivchallenge_evaluator.clear()
self.assertFalse(oivchallenge_evaluator._image_ids)
class PascalEvaluationTest(tf.test.TestCase):
def test_returns_correct_metric_values_on_boxes(self):
categories = [{'id': 1, 'name': 'cat'},
{'id': 2, 'name': 'dog'},
{'id': 3, 'name': 'elephant'}]
# Add groundtruth
pascal_evaluator = object_detection_evaluation.PascalDetectionEvaluator(
categories)
image_key1 = 'img1'
groundtruth_boxes1 = np.array([[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]],
dtype=float)
groundtruth_class_labels1 = np.array([1, 3, 1], dtype=int)
pascal_evaluator.add_single_ground_truth_image_info(
image_key1,
{standard_fields.InputDataFields.groundtruth_boxes: groundtruth_boxes1,
standard_fields.InputDataFields.groundtruth_classes:
groundtruth_class_labels1,
standard_fields.InputDataFields.groundtruth_difficult:
|
np.array([], dtype=bool)
|
numpy.array
|
import pytest
import numpy as np
from damuta import load_sigs, sim_from_sigs, sim_parametric
def test_from_sigs_seeding(sig_defs):
# same seed
d0, p0 = sim_from_sigs(sig_defs, 0.1, 10, 1000, 5, np.random.default_rng(100))
d1, p1 = sim_from_sigs(sig_defs, 0.1, 10, 1000, 5, np.random.default_rng(100))
assert np.all(d0==d1) and np.all([np.all(p0[x] == p1[x]) for x in p0.keys()]), 'Seeding not reproducible'
# diff seed
d0, p0 = sim_from_sigs(sig_defs, 0.1, 10, 1000, 5)
d1, p1 = sim_from_sigs(sig_defs, 0.1, 10, 1000, 5)
assert not (np.all(d0==d1) and np.all([np.all(p0[x] == p1[x]) for x in p0.keys()])), 'Different seeds produced same result'
def test_parametric_seeding():
d0, p0 = sim_parametric(5,6,10,1000,.1,.1,.1,.1,np.random.default_rng(100))
d1, p1 = sim_parametric(5,6,10,1000,.1,.1,.1,.1,np.random.default_rng(100))
assert
|
np.all(d0==d1)
|
numpy.all
|
import numpy as np
def frame_accuracy(test,gt,prev_pseudo_gt):
t = [np.sum(test[i] == gt[i]) for i in range(len(test))]
T = [len(test[i]) for i in range(len(test))]
frame_accuracy=np.sum(t)/np.sum(T)
t2 = [np.sum(prev_pseudo_gt[i] == gt[i]) for i in range(len(test))]
frame_accuracy2=np.sum(t2)/np.sum(T)
per_vid_frame_accuracy1 = np.asarray([np.sum(test[i] == gt[i]) / len(test[i]) for i in range(len(test))])
per_vid_frame_accuracy2 = np.asarray([np.sum(prev_pseudo_gt[i] == gt[i]) / len(prev_pseudo_gt[i]) for i in range(len(prev_pseudo_gt))])
print('previously:')
print(np.average(per_vid_frame_accuracy2))
print("#####")
print('currently:')
print(np.average(per_vid_frame_accuracy1))
return frame_accuracy
def fine_proximity_accuracy(err_distance):
proximity2 = []
for idx in range(np.shape(err_distance)[0]):
if err_distance[idx] <= 1:
proximity2.append(1)
else:
proximity2.append(0)
return proximity2
def proximity_accuracy(proximity2,length,err_distance,true_label,hard):
proximity1=[]
err_distance=err_distance*5
for idx in range(np.shape(err_distance)[0]):
a = (3.0 / (length*5 - 1))
b = a - 1.0
if hard:
ind = true_label[idx]
true=3+ind*5
else:
ind=np.argmax(true_label[idx][:])
true = 3 + ind * 5
sigma=5
if err_distance[idx] <= sigma * 2:
proximity2.append(1)
else:
proximity2.append(0)
if err_distance[idx] <= sigma * 1:
proximity1.append(1)
else:
proximity1.append(0)
return proximity1,proximity2
def per_class_acc(true,dist,nclass,hard): # TP/#Positives #Recall
acc=[]
if hard==False:
argmax = np.argmax(true, axis=1)
else:
argmax=true
for i in range(nclass):
inds=argmax==i
correct=np.sum(dist[inds]==0)
acc.append(correct/np.sum(inds))
return acc
def per_class_prec(true,pred,gt): #TP/#Classified as T
prec=[]
TOTAL={}
tp={}
for i,true_video in enumerate(true):
if i==340:
print(i)
for action in gt[i]:
TOTAL_temp=np.sum(pred[i]==action)
tp_temp=np.sum(pred[i][true_video==action]==action)
if action in TOTAL:
TOTAL[action] = TOTAL_temp + TOTAL[action]
tp[action]=tp_temp + tp[action]
else:
TOTAL[action]=TOTAL_temp
tp[action]=tp_temp
for act in range(len(TOTAL)):
T=np.sum(TOTAL[act])
t=
|
np.sum(tp[act])
|
numpy.sum
|
import numpy as np
import os as os
FILE_DIR = os.path.dirname(__file__)
DATA_DIR = os.path.join(FILE_DIR, "data")
from scipy.interpolate import interp2d
class JumpDistFromAngle:
def __init__(self, data_path: str = ""):
if data_path is "":
data_path = os.path.join(DATA_DIR, "jump_distances_angleweighted.csv")
use_range = list(range(38))
self.data = np.loadtxt(
data_path, skiprows=1, usecols=use_range[1:], delimiter=","
)
self.angle_data = self.data[:, 1:]
self.jump_values = self.data[:, 0]
y = np.arange(2.5, 180, 5)
self.interp = interp2d(
x=self.jump_values, y=y, z=self.angle_data.T, kind="linear"
)
def distribution_from_angle(self, input_angle: float):
"""
Bins are constructed from angles [0,5], (5,10], (10,15], ... (175,180].
which implies bin centers at 2.5, 7.5, 12.5 ...
So, interpolate / extrapolate from bin centers to obtain the correct
distribution.
"""
vals = self.interp(x=self.jump_values, y=input_angle)
return vals / np.sum(vals)
class AngleFromAngle:
def __init__(self, data_path: str = ""):
if data_path is "":
data_path = os.path.join(DATA_DIR, "successive_angle_probability.csv")
use_range = list(range(38))
self.data = np.loadtxt(
data_path, skiprows=1, usecols=use_range[1:], delimiter=","
)
self.angle_data = self.data[:, 1:]
self.next_angles = self.data[:, 0]
y =
|
np.arange(2.5, 180, 5)
|
numpy.arange
|
# _*_ coding: utf-8 _*_
"""
python_lda.py by xianhu
"""
import os
import numpy
import logging
from collections import defaultdict
# 全局变量
MAX_ITER_NUM = 10000 # 最大迭代次数
VAR_NUM = 20 # 自动计算迭代次数时,计算方差的区间大小
class BiDictionary(object):
"""
定义双向字典,通过key可以得到value,通过value也可以得到key
"""
def __init__(self):
"""
:key: 双向字典初始化
"""
self.dict = {} # 正向的数据字典,其key为self的key
self.dict_reversed = {} # 反向的数据字典,其key为self的value
return
def __len__(self):
"""
:key: 获取双向字典的长度
"""
return len(self.dict)
def __str__(self):
"""
:key: 将双向字典转化为字符串对象
"""
str_list = ["%s\t%s" % (key, self.dict[key]) for key in self.dict]
return "\n".join(str_list)
def clear(self):
"""
:key: 清空双向字典对象
"""
self.dict.clear()
self.dict_reversed.clear()
return
def add_key_value(self, key, value):
"""
:key: 更新双向字典,增加一项
"""
self.dict[key] = value
self.dict_reversed[value] = key
return
def remove_key_value(self, key, value):
"""
:key: 更新双向字典,删除一项
"""
if key in self.dict:
del self.dict[key]
del self.dict_reversed[value]
return
def get_value(self, key, default=None):
"""
:key: 通过key获取value,不存在返回default
"""
return self.dict.get(key, default)
def get_key(self, value, default=None):
"""
:key: 通过value获取key,不存在返回default
"""
return self.dict_reversed.get(value, default)
def contains_key(self, key):
"""
:key: 判断是否存在key值
"""
return key in self.dict
def contains_value(self, value):
"""
:key: 判断是否存在value值
"""
return value in self.dict_reversed
def keys(self):
"""
:key: 得到双向字典全部的keys
"""
return self.dict.keys()
def values(self):
"""
:key: 得到双向字典全部的values
"""
return self.dict_reversed.keys()
def items(self):
"""
:key: 得到双向字典全部的items
"""
return self.dict.items()
class CorpusSet(object):
"""
定义语料集类,作为LdaBase的基类
"""
def __init__(self):
"""
:key: 初始化函数
"""
# 定义关于word的变量
self.local_bi = BiDictionary() # id和word之间的本地双向字典,key为id,value为word
self.words_count = 0 # 数据集中word的数量(排重之前的)
self.V = 0 # 数据集中word的数量(排重之后的)
# 定义关于article的变量
self.artids_list = [] # 全部article的id的列表,按照数据读取的顺序存储
self.arts_Z = [] # 全部article中所有词的id信息,维数为 M * art.length()
self.M = 0 # 数据集中article的数量
# 定义推断中用到的变量(可能为空)
self.global_bi = None # id和word之间的全局双向字典,key为id,value为word
self.local_2_global = {} # 一个字典,local字典和global字典之间的对应关系
return
def init_corpus_with_file(self, file_name):
"""
:key: 利用数据文件初始化语料集数据。文件每一行的数据格式: id[tab]word1 word2 word3......
"""
with open(file_name, "r", encoding="utf-8") as file_iter:
self.init_corpus_with_articles(file_iter)
return
def init_corpus_with_articles(self, article_list):
"""
:key: 利用article的列表初始化语料集。每一篇article的格式为: id[tab]word1 word2 word3......
"""
# 清理数据--word数据
self.local_bi.clear()
self.words_count = 0
self.V = 0
# 清理数据--article数据
self.artids_list.clear()
self.arts_Z.clear()
self.M = 0
# 清理数据--清理local到global的映射关系
self.local_2_global.clear()
# 读取article数据
for line in article_list:
frags = line.strip().split()
if len(frags) < 2:
continue
# 获取article的id
art_id = frags[0].strip()
# 获取word的id
art_wordid_list = []
for word in [w.strip() for w in frags[1:] if w.strip()]:
local_id = self.local_bi.get_key(word) if self.local_bi.contains_value(word) else len(self.local_bi)
# 这里的self.global_bi为None和为空是有区别的
if self.global_bi is None:
# 更新id信息
self.local_bi.add_key_value(local_id, word)
art_wordid_list.append(local_id)
else:
if self.global_bi.contains_value(word):
# 更新id信息
self.local_bi.add_key_value(local_id, word)
art_wordid_list.append(local_id)
# 更新local_2_global
self.local_2_global[local_id] = self.global_bi.get_key(word)
# 更新类变量: 必须article中word的数量大于0
if len(art_wordid_list) > 0:
self.words_count += len(art_wordid_list)
self.artids_list.append(art_id)
self.arts_Z.append(art_wordid_list)
# 做相关初始计算--word相关
self.V = len(self.local_bi)
logging.debug("words number: " + str(self.V) + ", " + str(self.words_count))
# 做相关初始计算--article相关
self.M = len(self.artids_list)
logging.debug("articles number: " + str(self.M))
return
def save_wordmap(self, file_name):
"""
:key: 保存word字典,即self.local_bi的数据
"""
with open(file_name, "w", encoding="utf-8") as f_save:
f_save.write(str(self.local_bi))
return
def load_wordmap(self, file_name):
"""
:key: 加载word字典,即加载self.local_bi的数据
"""
self.local_bi.clear()
with open(file_name, "r", encoding="utf-8") as f_load:
for _id, _word in [line.strip().split() for line in f_load if line.strip()]:
self.local_bi.add_key_value(int(_id), _word.strip())
self.V = len(self.local_bi)
return
class LdaBase(CorpusSet):
"""
LDA模型的基类,相关说明:
》article的下标范围为[0, self.M), 下标为 m
》wordid的下标范围为[0, self.V), 下标为 w
》topic的下标范围为[0, self.K), 下标为 k 或 topic
》article中word的下标范围为[0, article.size()), 下标为 n
"""
def __init__(self):
"""
:key: 初始化函数
"""
CorpusSet.__init__(self)
# 基础变量--1
self.dir_path = "" # 文件夹路径,用于存放LDA运行的数据、中间结果等
self.model_name = "" # LDA训练或推断的模型名称,也用于读取训练的结果
self.current_iter = 0 # LDA训练或推断的模型已经迭代的次数,用于继续模型训练过程
self.iters_num = 0 # LDA训练或推断过程中Gibbs抽样迭代的总次数,整数值或者"auto"
self.topics_num = 0 # LDA训练或推断过程中的topic的数量,即self.K值
self.K = 0 # LDA训练或推断过程中的topic的数量,即self.topics_num值
self.twords_num = 0 # LDA训练或推断结束后输出与每个topic相关的word的个数
# 基础变量--2
self.alpha = numpy.zeros(self.K) # 超参数alpha,K维的float值,默认为50/K
self.beta = numpy.zeros(self.V) # 超参数beta,V维的float值,默认为0.01
# 基础变量--3
self.Z = [] # 所有word的topic信息,即Z(m, n),维数为 M * article.size()
# 统计计数(可由self.Z计算得到)
self.nd = numpy.zeros((self.M, self.K)) # nd[m, k]用于保存第m篇article中第k个topic产生的词的个数,其维数为 M * K
self.ndsum = numpy.zeros((self.M, 1)) # ndsum[m, 0]用于保存第m篇article的总词数,维数为 M * 1
self.nw = numpy.zeros((self.K, self.V)) # nw[k, w]用于保存第k个topic产生的词中第w个词的数量,其维数为 K * V
self.nwsum = numpy.zeros((self.K, 1)) # nwsum[k, 0]用于保存第k个topic产生的词的总数,维数为 K * 1
# 多项式分布参数变量
self.theta = numpy.zeros((self.M, self.K)) # Doc-Topic多项式分布的参数,维数为 M * K,由alpha值影响
self.phi = numpy.zeros((self.K, self.V)) # Topic-Word多项式分布的参数,维数为 K * V,由beta值影响
# 辅助变量,目的是提高算法执行效率
self.sum_alpha = 0.0 # 超参数alpha的和
self.sum_beta = 0.0 # 超参数beta的和
# 先验知识,格式为{word_id: [k1, k2, ...], ...}
self.prior_word = defaultdict(list)
# 推断时需要的训练模型
self.train_model = None
return
# --------------------------------------------------辅助函数---------------------------------------------------------
def init_statistics_document(self):
"""
:key: 初始化关于article的统计计数。先决条件: self.M, self.K, self.Z
"""
assert self.M > 0 and self.K > 0 and self.Z
# 统计计数初始化
self.nd = numpy.zeros((self.M, self.K), dtype=numpy.int)
self.ndsum = numpy.zeros((self.M, 1), dtype=numpy.int)
# 根据self.Z进行更新,更新self.nd[m, k]和self.ndsum[m, 0]
for m in range(self.M):
for k in self.Z[m]:
self.nd[m, k] += 1
self.ndsum[m, 0] = len(self.Z[m])
return
def init_statistics_word(self):
"""
:key: 初始化关于word的统计计数。先决条件: self.V, self.K, self.Z, self.arts_Z
"""
assert self.V > 0 and self.K > 0 and self.Z and self.arts_Z
# 统计计数初始化
self.nw = numpy.zeros((self.K, self.V), dtype=numpy.int)
self.nwsum = numpy.zeros((self.K, 1), dtype=numpy.int)
# 根据self.Z进行更新,更新self.nw[k, w]和self.nwsum[k, 0]
for m in range(self.M):
for k, w in zip(self.Z[m], self.arts_Z[m]):
self.nw[k, w] += 1
self.nwsum[k, 0] += 1
return
def init_statistics(self):
"""
:key: 初始化全部的统计计数。上两个函数的综合函数。
"""
self.init_statistics_document()
self.init_statistics_word()
return
def sum_alpha_beta(self):
"""
:key: 计算alpha、beta的和
"""
self.sum_alpha = self.alpha.sum()
self.sum_beta = self.beta.sum()
return
def calculate_theta(self):
"""
:key: 初始化并计算模型的theta值(M*K),用到alpha值
"""
assert self.sum_alpha > 0
self.theta = (self.nd + self.alpha) / (self.ndsum + self.sum_alpha)
return
def calculate_phi(self):
"""
:key: 初始化并计算模型的phi值(K*V),用到beta值
"""
assert self.sum_beta > 0
self.phi = (self.nw + self.beta) / (self.nwsum + self.sum_beta)
return
# ---------------------------------------------计算Perplexity值------------------------------------------------------
def calculate_perplexity(self):
"""
:key: 计算Perplexity值,并返回
"""
# 计算theta和phi值
self.calculate_theta()
self.calculate_phi()
# 开始计算
preplexity = 0.0
for m in range(self.M):
for w in self.arts_Z[m]:
preplexity += numpy.log(numpy.sum(self.theta[m] * self.phi[:, w]))
return
|
numpy.exp(-(preplexity / self.words_count))
|
numpy.exp
|
from __future__ import annotations
from typing import Iterable, Optional
import contextlib
import math
import numpy as np
import scipy.linalg
import scipy.spatial
from .utils import temporary_seed
__all__ = [
"divisors",
"lcm",
"linearly_independent",
"nearest_points",
"nontrivial_vector",
"normalize",
"orthogonal_decomp",
"periodicity",
"perpendicular_vector",
"reflection_matrix",
"rotation_matrix",
"sample_spherical_lune",
"sample_spherical_triangle",
"spherical_excess",
"tetrahedron_volume",
]
def divisors(n: int) -> list[int]:
"""Find all positive integer divisors of a given positive integer.
Parameters
----------
n : int
Number whose divisors are to be found.
Returns
-------
list[int]
List of divisors of :math:`n`.
"""
if n == 1:
return [1]
d = [1, n]
sqrt = math.ceil(math.sqrt(n))
for k in range(2, sqrt):
if n % k == 0:
d.extend([k, n // k])
if n == sqrt ** 2 and sqrt not in d:
d.append(sqrt)
return sorted(d)
def lcm(numbers: Iterable[int]) -> int:
"""Find least common multiple of a list of integers.
Parameters
----------
numbers : Iterable[int]
Integers whose least common multiple is to be found
Returns
-------
int
Least common multiple
"""
a, *b = numbers
if len(b) > 1:
return lcm(numbers=(a, lcm(numbers=b)))
else:
[b] = b
return a * b // math.gcd(a, b)
def linearly_independent(
vectors: np.ndarray, indep: Optional[np.ndarray] = None
) -> np.ndarray:
"""Select a linearly independent subset of vectors from the given set.
Parameters
----------
vectors : np.ndarray
Array of :math:`N` :math:`k`-dimensional candidate vectors.
Shape: :math:`(k,N)`
indep : Optional[np.ndarray], optional
Array of :math:`M` known linearly independent :math:`k`-dimensional vectors.
By default None.
Shape: :math:`(k,M)`
Returns
-------
np.ndarray
Linearly independent subset of vectors
"""
# vectors is kxNv array where Nv is number of vectors
# indep is kxNi array where Ni is number of linearly independent vectors
if indep is None:
indep = np.array([[]])
k, _ = vectors.shape # dimension of vectors
_, n = indep.shape # number of independent vectors
arrays = (np.vstack([*indep.T, v]) for v in vectors.T)
try:
indep = next(
a for a in arrays if scipy.linalg.null_space(a).shape[-1] == k - n - 1
).T
return linearly_independent(vectors=vectors, indep=indep)
except StopIteration:
return indep
def nearest_points(points: np.ndarray, m: int) -> np.ndarray:
"""
Find the nearest points from a given list of points.
Parameters
----------
points : numpy.ndarray
Array of :math:`n` points in :math:`d`-dimensional space.
Shape: :math:`(d,n)`
m : int
number of nearest points to find
Returns
-------
numpy.ndarray:
Array of the :math:`m` nearest points from the given list of points.
Shape: :math:`(d,m)`
"""
tree = scipy.spatial.KDTree(points.T)
# this ensures that the first loop finds the two nearest points of all the points
centroid = points
for i in range(m - 1):
dists, inds = tree.query(centroid.T, k=2 + i)
if i == 0:
nn_dists = dists[:, -1]
inds = inds[np.argmin(nn_dists), :]
cluster = points.T[inds].T
centroid = cluster.mean(axis=1)
return cluster
def nontrivial_vector(R: np.ndarray, seed: Optional[int] = None) -> np.ndarray:
"""
Generates a random vector acted upon nontrivially
(i.e. is sent to a linearly independent vector)
by the given rotation matrix.
Parameters
----------
R: numpy.ndarray
Array representing a rotation matrix.
Shape: :math:`(3,3)`
Returns
-------
numpy.ndarray:
Array representing a vector which is acted upon nontrivially by R.
Shape: :math:`(3,1)`
"""
identity = np.eye(3)
if (
np.allclose(R, identity)
or np.allclose(R, -identity)
or np.allclose(R, np.zeros_like(identity))
):
return None
# get the eigenvectors with real (i.e. 1 or -1) eigenvalues,
# since these are mapped to colinear vectors by R
# each column of evecs is an eigenvector of R
evals, evecs = scipy.linalg.eig(R)
real_eigenbasis = np.real(evecs.T[np.isclose(np.imag(evals), 0)].T)
# form the linear combination of the "trivial" eigenvectors
# get random coefficients between 1 and 2 so that 0 is never chosen
# the result is guaranteed to be mapped to a linearly independent vector
# by R because the "trivial" eigenvectors do not all have the same eigenvalues
# this is true because R is not proportional to the identity matrix
with temporary_seed(seed=seed) if seed is not None else contextlib.nullcontext():
coeffs = np.random.uniform(low=1, high=2, size=(real_eigenbasis.shape[1], 1))
return normalize(v=real_eigenbasis @ coeffs)
def normalize(v: np.ndarray) -> np.ndarray:
"""Normalize a vector.
Parameters
----------
v : numpy.ndarray
Vector to be normalized.
Shape: :math:`(3,1)`
Returns
-------
numpy.ndarray
Normalized vector.
Shape: :math:`(3,1)`
"""
norm = np.linalg.norm(v)
return v / norm if norm > 0 else v
def orthogonal_decomp(v: np.ndarray, u: np.ndarray) -> np.ndarray:
"""Decompose a vector into a projection and the orthogonal complement.
Parameters
----------
v : numpy.ndarray
Array to be decomposed.
Shape: :math:`(3,1)`
u : numpy.ndarray
Array defining the subspace decomposition.
Shape: :math:`(3,1)`
Returns
-------
numpy.ndarray
Projection of :math:`v` along :math:`u`.
Shape: :math:`(3,1)`
numpy.ndarray
Orthogonal complement of :math:`v` relative to :math:`u`.
Shape: :math:`(3,1)`
"""
a = normalize(u)
projection = np.dot(v.T, a) * a
complement = v - projection
return projection, complement
def periodicity(matrix: np.ndarray) -> int:
# if A is periodic, then its eigenvalues are roots of unity,
# and its periodicity is the lcm of the periodicities of these roots of unity
# kth roots of unity form the vertices of
# a regular k-gon with internal angles 2*pi/k
# the angle between two such vertices z1=a+jb and
# z2=c+jd is given by cos(theta) = a*c + b*d = Re(z1*conj(z2))
# choose z2 = z1**2 (clearly z2 is still a root of unity);
# then z1*conj(z2) = exp(2*pi*j/k)*exp(-4*pi*j/k) = exp(-2*pi*j/k)
# then Re(z1*conj(z2)) = Re(exp(-2*pi*j/k)) = cos(2*pi*j/k) = Re(z1)
# so 2*pi*j/k = arccos(Re(z1)) -> j/k = arccos(Re(z1))/(2*pi),
# and k = lcm(k/j1, k/j2,...)
evals = scipy.linalg.eigvals(matrix)
angles = (max(min(z.real, 1), -1) for z in evals if not np.isclose(z, 1))
# if z is close to 1, then it contributes a period of 1,
# which doesn't impact the lcm and therefore the final period
periods = [int((2 * np.pi / np.arccos(angle)).round()) for angle in angles]
if len(periods) == 0:
# all evals must have been close to 1
return 1
else:
return lcm(numbers=periods)
def perpendicular_vector(a: np.ndarray, b: Optional[np.ndarray] = None) -> np.ndarray:
"""
Generates a unit vector which is perpendicular to
one or two given nonzero vector(s).
Parameters
----------
a: numpy.ndarray
Array representing a nonzero vector.
Shape: :math:`(3,1)`
b: numpy.ndarray
Array representing a nonzero vector.
Shape: :math:`(3,1)`
Returns
-------
numpy.ndarray:
Array representing a unit vector which
is perpendicular to a (and b, if applicable).
Shape: :math:`(3,1)`
"""
if b is None:
m = np.zeros(a.shape)
# storing in variable for reuse
ravel_a = np.ravel(a)
# index of the first nonzero element of a
i = (ravel_a != 0).argmax()
# first index of a which is not i
j = next(ind for ind in range(len(ravel_a)) if ind != i)
# unravel indices for 3x1 arrays m and a
i, j = (
np.unravel_index(i, a.shape),
np.unravel_index(j, a.shape),
)
# make m = np.array([[-ay,ax,0]]).T so np.dot(m.T,a) = -ax*ay + ax*ay = 0
m[j] = a[i]
m[i] = -a[j]
else:
m = np.cross(a.T, b.T).T
return normalize(v=m)
def reflection_matrix(normal: np.ndarray) -> np.ndarray:
"""Generates a reflection matrix given a normal vector.
Parameters
----------
normal : numpy.ndarray
Array normal to the reflection plane.
Shape: :math:`(3,1)`
Returns
-------
numpy.ndarray
Reflection matrix.
Shape: :math:`(3,3)`
"""
n = normalize(normal)
return np.eye(3) - 2 * np.outer(n, n)
def rotation_matrix(
axis: Optional[np.ndarray] = None,
theta: Optional[float] = None,
m: Optional[np.ndarray] = None,
n: Optional[np.ndarray] = None,
improper: Optional[bool] = False,
) -> np.ndarray:
"""Generates a rotation matrix given an axis and angle.
Parameters
----------
axis: numpy.ndarray
Axis of rotation.
Shape: :math:`(3,1)`
theta: float
Angle of rotation (in radians) about the axis of rotation.
m: numpy.ndarray
The vector to be rotated.
Shape: :math:`(3,1)`
n: numpy.ndarray
The vector after rotation, i.e. the target vector.
Shape: :math:`(3,1)`
improper: bool
If true, return an improper rotation.
By default False.
Returns
-------
numpy.ndarray:
Rotation matrix.
Shape: :math:`(3,3)`
"""
if m is not None and n is not None:
a = normalize(np.cross(m.T, n.T).T)
c = np.dot(normalize(m).T, normalize(n))
elif axis is not None and theta is not None:
a = normalize(axis)
c = np.cos(theta)
else:
raise ValueError("Provide either an axis and an angle or a pair of vectors.")
u1, u2, u3 = np.ravel(a)
K = np.array([[0, -u3, u2], [u3, 0, -u1], [-u2, u1, 0]])
s = np.sqrt(1 - c ** 2)
R = (np.eye(3) + s * K + (1 - c) * (K @ K)).astype("float64")
if improper:
R = R @ reflection_matrix(a)
R, _ = scipy.linalg.polar(R)
return R
def sample_spherical_lune(n1: np.ndarray, n2: np.ndarray) -> np.ndarray:
"""Sample point on unit sphere from within lune defined by two plane normals.
Parameters
----------
n1 : np.ndarray
First plane normal.
Shape: :math:`(3,1)`
n2 : np.ndarray
Second plane normal.
Shape: :math:`(3,1)`
Returns
-------
np.ndarray
Sampled point.
Shape: :math:`(3,1)`
"""
# angular extent of lune
dphi = np.arccos(np.dot(n1.T, n2)).item()
# sample point from standard polar lune of correct angular extent
eps = np.finfo(float).eps
phi = np.random.uniform(low=eps, high=dphi)
cos_theta = np.random.uniform(low=-1, high=1)
sin_theta = np.sqrt(1 - cos_theta ** 2)
q = np.array([[sin_theta * np.cos(phi), sin_theta * np.sin(phi), cos_theta]]).T
# rotate sampled point to lie within correct lune
y = np.array([[0, 1, 0]]).T
v = np.array([[-np.sin(dphi), np.cos(dphi), 0]]).T
Ru = rotation_matrix(m=y, n=n1)
_, vprime = orthogonal_decomp(Ru @ v, n1)
_, n2prime = orthogonal_decomp(n2, n1)
axis = np.cross(n2.T, vprime.T)
theta = np.arccos(np.dot(normalize(vprime).T, normalize(n2prime)))
Rv = rotation_matrix(axis=axis, theta=theta)
return (Rv @ Ru) @ q
def sample_spherical_triangle(
A: np.ndarray,
B: np.ndarray,
C: np.ndarray,
sin_alpha: float,
sin_beta: float,
sin_gamma: float,
seed: Optional[int] = None,
) -> np.ndarray:
"""Sample point from spherical triangle defined by vertices.
Parameters
----------
A : np.ndarray
First vertex coordinates.
Shape: :math:`(3,1)`
B : np.ndarray
Second vertex coordinates.
Shape: :math:`(3,1)`
C : np.ndarray
Third vertex coordinates.
Shape: :math:`(3,1)`
sin_alpha : float
Sine of angle at vertex :math:`A`.
sin_beta : float
Sine of angle at vertex :math:`B`.
sin_gamma : float
Sine of angle at vertex :math:`C`.
seed : Optional[int], optional
Random seed.
By default None.
Returns
-------
np.ndarray
Sampled point.
Shape: :math:`(3,1)`
"""
# see https://www.graphics.cornell.edu/pubs/1995/Arv95c.pdf
# a, b, and c are cross products of normal vectors, so their magnitudes
# are the sine of the angles between these normal vectors; these angles
# are also the angles between planes and therefore the great arcs which
# define the legs of the triangle; therefore, these angles are also
# the internal angles of the triangle
eps = np.finfo(float).eps # machine precision
with temporary_seed(seed=seed):
fraction, cos_theta = np.random.uniform(low=eps, high=1, size=2)
cos_alpha, cos_beta, cos_gamma = np.sqrt(
(1 - sin_alpha ** 2, 1 - sin_beta ** 2, 1 - sin_gamma ** 2)
)
area = fraction * spherical_excess(
cos_alpha=cos_alpha, cos_beta=cos_beta, cos_gamma=cos_gamma
)
cos_area, sin_area = np.cos(area), np.sin(area)
# s = sin(area - alpha)
s = sin_area * cos_alpha - cos_area * sin_alpha
# t = cos(area - alpha)
t = cos_area * cos_alpha + sin_area * sin_alpha
u = t - cos_alpha
# spherical law of cosines
v = s + (cos_gamma + cos_beta * cos_alpha) / sin_beta
q = ((v * t - u * s) * cos_alpha - v) / ((v * s + u * t) * sin_alpha)
_, x = orthogonal_decomp(C, A)
C_prime = q * A + np.sqrt(1 - q ** 2) * normalize(x)
z = 1 - cos_theta * (1 -
|
np.dot(C_prime.T, B)
|
numpy.dot
|
# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the NiBabel package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
''' Test for volumeutils module '''
from __future__ import with_statement
from StringIO import StringIO
import tempfile
import numpy as np
from ..tmpdirs import InTemporaryDirectory
from ..volumeutils import (array_from_file,
array_to_file,
calculate_scale,
scale_min_max,
can_cast, allopen,
make_dt_codes,
native_code,
shape_zoom_affine,
rec2dict)
from numpy.testing import (assert_array_almost_equal,
assert_array_equal)
from nose.tools import assert_true, assert_equal, assert_raises
from ..testing import parametric
@parametric
def test_array_from_file():
shape = (2,3,4)
dtype = np.dtype(np.float32)
in_arr = np.arange(24, dtype=dtype).reshape(shape)
# Check on string buffers
offset = 0
yield assert_true(buf_chk(in_arr, StringIO(), None, offset))
offset = 10
yield assert_true(buf_chk(in_arr, StringIO(), None, offset))
# check on real file
fname = 'test.bin'
with InTemporaryDirectory() as tmpdir:
# fortran ordered
out_buf = file(fname, 'wb')
in_buf = file(fname, 'rb')
yield assert_true(buf_chk(in_arr, out_buf, in_buf, offset))
# Drop offset to check that shape's not coming from file length
out_buf.seek(0)
in_buf.seek(0)
offset = 5
yield assert_true(buf_chk(in_arr, out_buf, in_buf, offset))
del out_buf, in_buf
# Make sure empty shape, and zero length, give empty arrays
arr = array_from_file((), np.dtype('f8'), StringIO())
yield assert_equal(len(arr), 0)
arr = array_from_file((0,), np.dtype('f8'), StringIO())
yield assert_equal(len(arr), 0)
# Check error from small file
yield assert_raises(IOError, array_from_file,
shape, dtype, StringIO())
# check on real file
fd, fname = tempfile.mkstemp()
with InTemporaryDirectory():
open(fname, 'wb').write('1')
in_buf = open(fname, 'rb')
# For windows this will raise a WindowsError from mmap, Unices
# appear to raise an IOError
yield assert_raises(Exception, array_from_file,
shape, dtype, in_buf)
del in_buf
def buf_chk(in_arr, out_buf, in_buf, offset):
''' Write contents of in_arr into fileobj, read back, check same '''
instr = ' ' * offset + in_arr.tostring(order='F')
out_buf.write(instr)
out_buf.flush()
if in_buf is None: # we're using in_buf from out_buf
out_buf.seek(0)
in_buf = out_buf
arr = array_from_file(
in_arr.shape,
in_arr.dtype,
in_buf,
offset)
return np.allclose(in_arr, arr)
@parametric
def test_array_to_file():
arr = np.arange(10).reshape(5,2)
str_io = StringIO()
for tp in (np.uint64, np.float, np.complex):
dt =
|
np.dtype(tp)
|
numpy.dtype
|
# Copyright (c) 2020, <NAME>.
# Distributed under the MIT License. See LICENSE for more info.
"""A module for generating scatter plots of variables."""
from itertools import combinations
import warnings
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # pylint: disable=unused-import
from scipy.special import comb
from .colors import generate_class_colors, generate_colors
from .common import (
add_xy_line,
add_trendline,
create_fig_and_axes,
iqr_outlier,
get_figure_kwargs,
)
_WARNING_MAX_PLOTS = (
'This will generate {0} plots. If you want to generate '
'all these plots, rerun the function with the '
'argument "max_plots={0}".'
)
def create_scatter_legend(axi, color_labels, class_names, show=False,
**kwargs):
"""Generate a legend for a scatter plot with class labels.
Parameters
----------
axi : object like :class:`matplotlib.axes.Axes`
The axes we will add the legend for.
color_labels : dict of objects like :class:`numpy.ndarray`
Colors for the different classes.
color_names : dict of strings
Names for the classes.
show : boolean, optional
If True, we will add the legend here.
kwargs : dict, optional
Additional arguments passed to the scatter method. Used
here to get a consistent styling.
Returns
-------
patches : list of objects like :class:`matplotlib.artist.Artist`
The items we will create a legend for.
labels : list of strings
The labels for the legend.
"""
patches, labels = [], []
for key, val in color_labels.items():
patches.append(
axi.scatter([], [], color=val, **kwargs)
)
if class_names is not None:
label = class_names.get(key, key)
else:
label = key
labels.append(label)
if show:
axi.legend(patches, labels, ncol=1)
return patches, labels
def plot_scatter(data, xvar, yvar, axi=None, xlabel=None, ylabel=None,
class_data=None, class_names=None, highlight=None,
cmap_class=None, **kwargs):
"""Make a 2D scatter plot of the given data.
Parameters
----------
data : object like :class:`pandas.core.frame.DataFrame`
The data we are plotting.
xvar : string
The column to use as the x-variable.
yvar : string
The column to use as the y-variable.
xlabel : string, optional
The label to use for the x-axis. If None, we will use xvar.
ylabel : string, optional
The label to use for the y-axis. If None, we will use yvar.
axi : object like :class:`matplotlib.axes.Axes`, optional
An axis to add the plot to. If this is not provided,
a new axis (and figure) will be created here.
class_data : object like :class:`pandas.core.series.Series`, optional
Class information for the points (if available).
class_names : dict of strings
A mapping from the class data to labels/names.
highlight : list of integers, optional
This can be used to highlight certain points in the plot.
cmap_class : string or object like :class:`matplotlib.colors.Colormap`, optional
A color map to use for classes.
kwargs : dict, optional
Additional settings for the plotting.
Returns
-------
fig : object like :class:`matplotlib.figure.Figure`
The figure containing the plot.
axi : object like :class:`matplotlib.axes.Axes`
The axis containing the plot.
patches : list of objects like :class:`matplotlib.artist.Artist`
The items we will create a legend for.
labels : list of strings
The labels for the legend.
"""
patches, labels = [], []
color_class, color_labels, idx_class = generate_class_colors(
class_data, cmap=cmap_class
)
fig = None
if axi is None:
fig_kw = get_figure_kwargs(kwargs)
fig, axi = plt.subplots(**fig_kw)
if xvar is None:
axi.set(xlabel='Data point no.', ylabel=yvar)
xdata = np.arange(len(data[yvar]))
else:
xlabel = xvar if xlabel is None else xlabel
ylabel = yvar if ylabel is None else ylabel
axi.set(xlabel=xlabel, ylabel=ylabel)
xdata = data[xvar]
ydata = data[yvar]
if class_data is None:
axi.scatter(xdata, ydata, **kwargs.get('scatter', {}))
else:
for class_id, idx in idx_class.items():
axi.scatter(
xdata[idx],
ydata[idx],
color=color_class[class_id],
**kwargs.get('scatter', {}),
)
patches, labels = create_scatter_legend(
axi, color_labels, class_names, **kwargs.get('scatter', {}),
)
if highlight is not None:
scat = axi.scatter(
xdata[highlight],
ydata[highlight],
**kwargs.get('scatter-outlier', {}),
)
patches.append(scat)
labels.append(scat.get_label())
return fig, axi, patches, labels
def generate_1d_scatter(data, variables, class_data=None,
class_names=None, nrows=None, ncols=None,
sharex=False, sharey=False, show_legend=True,
outliers=False, cmap_class=None, **kwargs):
"""Generate 1D scatter plots from the given data and variables.
Parameters
----------
data : object like :class:`pandas.core.frame.DataFrame`
The data we will plot here.
variables : list of strings
The variables we will generate scatter plots for.
class_data : object like :class:`pandas.core.series.Series`, optional
Class information for the points (if available).
class_names : dict of strings, optional
A mapping from the class data to labels/names.
nrows : integer, optional
The number of rows to use in a figure.
ncols : integer, optional
The number of columns to use in a figure.
sharex : boolean, optional
If True, the scatter plots will share the x-axis.
sharey : boolean, optional
If True, the scatter plots will share the y-axis.
show_legend : boolean, optional
If True, we will create a legend here and show it.
outliers : boolean, optional
If True, we will try to mark outliers in the plot.
cmap_class : string or object like :class:`matplotlib.colors.Colormap`, optional
A color map to use for classes.
kwargs : dict, optional
Additional arguments used for the plotting.
Returns
-------
figures : list of objects like :class:`matplotlib.figure.Figure`
The figures containing the plots.
axes : list of objects like :class:`matplotlib.axes.Axes`
The axes containing the plots.
"""
nplots = len(variables)
figures, axes = create_fig_and_axes(
nplots, nrows=nrows, ncols=ncols, sharex=sharex, sharey=sharey,
**kwargs,
)
outlier_points = {}
bounds = [{}, {}]
if outliers:
_, outlier_points, bounds = iqr_outlier(data, variables)
fig = None
for i, yvar in enumerate(variables):
show_legend = False
if axes[i].figure != fig:
fig = axes[i].figure
show_legend = True
highlight = None
if outliers:
highlight = outlier_points.get(yvar, None)
_, _, patches, labels = plot_scatter(
data,
None,
yvar,
axi=axes[i],
class_data=class_data,
class_names=class_names,
highlight=highlight,
cmap_class=cmap_class,
**kwargs,
)
if outliers:
lower = bounds[0].get(yvar, None)
upper = bounds[1].get(yvar, None)
if lower is not None:
axes[i].axhline(y=lower, ls=':', color='#262626')
if upper is not None:
axes[i].axhline(y=upper, ls=':', color='#262626')
if show_legend and patches and labels:
axes[i].legend(patches, labels)
return figures, axes, outlier_points
def generate_2d_scatter(data, variables, class_data=None, class_names=None,
nrows=None, ncols=None, sharex=False, sharey=False,
show_legend=True, xy_line=False, trendline=False,
cmap_class=None, shorten_variables=False,
**kwargs):
"""Generate 2D scatter plots from the given data and variables.
This method will generate 2D scatter plots for all combinations
of the given variables.
Parameters
----------
data : object like :class:`pandas.core.frame.DataFrame`
The data we will plot here.
variables : list of strings
The variables we will generate scatter plots for.
class_data : object like :class:`pandas.core.series.Series`, optional
Class information for the points (if available).
class_names : dict of strings, optional
A mapping from the class data to labels/names.
nrows : integer, optional
The number of rows to use in a figure.
ncols : integer, optional
The number of columns to use in a figure.
sharex : boolean, optional
If True, the scatter plots will share the x-axis.
sharey : boolean, optional
If True, the scatter plots will share the y-axis.
show_legend : boolean, optional
If True, we will create a legend here and show it.
xy_line : boolean, optional
If True, we will add a x=y line to the plot.
trendline : boolean, optional
If True, we will add a trend line to the plot.
cmap_class : string or object like :class:`matplotlib.colors.Colormap`, optional
A color map to use for classes.
kwargs : dict, optional
Additional arguments used for the plotting.
Returns
-------
figures : list of objects like :class:`matplotlib.figure.Figure`
The figures containing the plots.
axes : list of objects like :class:`matplotlib.axes.Axes`
The axes containing the plots.
"""
nplots = comb(len(variables), 2, exact=True)
figures, axes = create_fig_and_axes(
nplots, nrows=nrows, ncols=ncols, sharex=sharex, sharey=sharey,
**kwargs,
)
fig = None
for i, (xvar, yvar) in enumerate(combinations(variables, 2)):
# We do not want to repeat the legend in all subplots:
show_legend_ax = False
if axes[i].figure != fig:
fig = axes[i].figure
show_legend_ax = True
xlabel = None
ylabel = None
if shorten_variables:
if len(xvar) > 5:
xlabel = xvar[:3] + '...'
if len(yvar) > 5:
ylabel = yvar[:3] + '...'
_, _, patches, labels = plot_scatter(
data,
xvar,
yvar,
axi=axes[i],
xlabel=xlabel,
ylabel=ylabel,
class_data=class_data,
class_names=class_names,
cmap_class=cmap_class,
**kwargs,
)
if xy_line:
line_xy = add_xy_line(axes[i], alpha=0.7, color='black')
patches.append(line_xy)
labels.append('x = y')
if trendline:
line_trend = add_trendline(axes[i], data[xvar], data[yvar],
alpha=0.7, ls='--', color='black')
patches.append(line_trend)
labels.append('y = a + bx')
if show_legend and show_legend_ax and patches and labels:
axes[i].legend(patches, labels)
return figures, axes
def plot_3d_scatter(data, xvar, yvar, zvar, class_data=None,
class_names=None, cmap_class=None, **kwargs):
"""Make a 3D scatter plot of the given data.
Parameters
----------
data : object like :class:`pandas.core.frame.DataFrame`
The data we are plotting.
xvar : string
The column to use as the x-variable.
yvar : string
The column to use as the y-variable.
zvar : string
The column to use as the z-variable
class_data : object like :class:`pandas.core.series.Series`, optional
Class information for the points (if available).
class_names : dict of strings, optional
A mapping from the class data to labels/names.
cmap_class : string or object like :class:`matplotlib.colors.Colormap`, optional
A color map to use for classes.
kwargs : dict, optional
Additional arguments used for the plotting.
Returns
-------
fig : object like :class:`matplotlib.figure.Figure`
The figure containing the plot.
axi : object like :class:`matplotlib.axes.Axes`, optional
The axis containing the plot.
"""
color_class, color_labels, idx_class = generate_class_colors(
class_data, cmap=cmap_class
)
fig = plt.figure()
axi = fig.add_subplot(111, projection='3d')
axi.set_xlabel(xvar, labelpad=15)
axi.set_ylabel(yvar, labelpad=15)
axi.set_zlabel(zvar, labelpad=15)
if class_data is None:
axi.scatter(data[xvar], data[yvar], data[zvar], **kwargs)
else:
for class_id, idx in idx_class.items():
axi.scatter(
data[xvar][idx],
data[yvar][idx],
data[zvar][idx],
color=color_class[class_id],
**kwargs
)
create_scatter_legend(
axi, color_labels, class_names, show=True, **kwargs
)
fig.tight_layout()
return fig, axi
def generate_3d_scatter(data, variables, class_data=None, class_names=None,
max_plots=5, **kwargs):
"""Generate 3D scatter plots from the given data and variables.
This method will generate 3D scatter plots for all combinations
of the given variables. Note that if the number of plots is large,
then no plots will be generated and a warning will be issued. The
maximum number of plots to create can be set with the parameter
`max_plots`
Parameters
----------
data : object like :class:`pandas.core.frame.DataFrame`
The data we will plot here.
variables : list of strings
The variables we will generate scatter plots for.
class_data : object like :class:`pandas.core.series.Series`, optional
Class information for the points (if available).
class_names : dict of strings, optional
A mapping from the class data to labels/names.
max_plots : integer, optional
The maximum number of plots to create.
kwargs : dict, optional
Additional arguments used for the plotting.
Returns
-------
figures : list of objects like :class:`matplotlib.figure.Figure`
The figures created here.
axes : list of objects like :class:`matplotlib.axes.Axes`
The axes created here.
"""
figures = []
axes = []
if len(variables) < 3:
raise ValueError(
'For generating 3D plots, at least 3 variables must be provided.'
)
nplots = comb(len(variables), 3, exact=True)
if nplots > max_plots:
msg = _WARNING_MAX_PLOTS.format(nplots)
warnings.warn(msg)
return figures, axes
for (xvar, yvar, zvar) in combinations(variables, 3):
figi, axi = plot_3d_scatter(
data,
xvar,
yvar,
zvar,
class_data=class_data,
class_names=class_names,
**kwargs
)
figures.append(figi)
axes.append(axi)
return figures, axes
def scatter_1d_flat(data, class_data=None, class_names=None, scaler=None,
add_average=False, add_lines=False,
cmap_lines=None, cmap_class=None, split_class=False,
scatter_settings=None, line_settings=None):
"""Make a flat plot of several variables.
Here, the points on the x-axis are the variables, while
the y-values are points for each data series.
Parameters
----------
data : object like :class:`pandas.core.frame.DataFrame`
The data we are plotting.
class_data : object like :class:`pandas.core.series.Series`, optional
Class information for the points (if available).
class_names : dict of strings
A mapping from the class data to labels/names.
scaler : callable, optional
A function that can be used to scale the variables.
add_average : boolean, optional
If True, we will show the averages for each variable.
add_lines : boolean, optional
If True, we will show lines for each "measurement".
cmap_lines : string or object like :class:`matplotlib.colors.Colormap`, optional
A color map to use for lines.
cmap_class : string or object like :class:`matplotlib.colors.Colormap`, optional
A color map to use for classes.
split_class : boolean, optional
If True, the plot with class information will be split into
one plot for each class.
scatter_settings : dict, optional
Additional settings for the scatter plot.
line_settings : dict, optional
Additional settings for plotting lines.
Returns
-------
figures : objects like :class:`matplotlib.figure.Figure`
The figure created here.
axes : object(s) like :class:`matplotlib.axes.Axes`
The axes created here.
"""
if class_data is None:
return _scatter_1d_flat_no_class(data, scaler=scaler,
add_average=add_average,
add_lines=add_lines,
cmap_lines=cmap_lines,
line_settings=line_settings,
scatter_settings=scatter_settings)
return _scatter_1d_flat_class(data, class_data,
split_class=split_class,
class_names=class_names,
scaler=scaler,
cmap_class=cmap_class,
add_lines=add_lines,
add_average=add_average,
line_settings=line_settings,
scatter_settings=scatter_settings)
def _get_settings_if_empty(settings):
"""Get settings if None are given."""
if settings is None:
return {}
return settings
def _scatter_1d_flat_no_class(data, scaler=None, add_average=False,
add_lines=False, cmap_lines=None,
scatter_settings=None,
line_settings=None):
"""Make a flat plot of several variables.
Here, the points on the x-axis are the variables, while
the y-values are points for each data series.
Parameters
----------
data : object like :class:`pandas.core.frame.DataFrame`
The data we are plotting.
scaler : callable, optional
A function that can be used to scale the variables.
add_average : boolean, optional
If True, we will show the averages for each variable.
add_lines : boolean, optional
If True, we will show lines for each "measurement".
cmap_lines : string or object like :class:`matplotlib.colors.Colormap`, optional
A color map to use for lines.
scatter_settings : dict, optional
Additional settings for the scatter plot.
line_settings : dict, optional
Additional settings for plotting lines.
Returns
-------
fig : object like :class:`matplotlib.figure.Figure`
The figure containing the plot.
axi : object like :class:`matplotlib.axes.Axes`
The axis containing the plot.
"""
fig, axi = plt.subplots(constrained_layout=True)
variables = data.columns
axi.set_xticks(range(len(variables)))
axi.set_xticklabels(variables, rotation='vertical')
yvalues = []
xvalues = []
if scaler is not None:
axi.set_ylabel('Scaled values')
else:
axi.set_ylabel('Values')
for i, variable in enumerate(variables):
yval = data[variable]
if scaler is not None:
yval = scaler(yval)
yvalues.append(yval)
xvalues.append(np.full_like(yval, i))
yvalues = np.array(yvalues)
xvalues =
|
np.array(xvalues)
|
numpy.array
|
import numpy as np
import pandas as pd
import scipy.stats
def calcNormFactors(counts, lib_size=None, method="none", refColumn=None, logratioTrim=0.3, sumTrim=0.05, doWeighting=True, Acutoff=-1e10, p=0.75):
"""
Scale normalization of RNA-Seq data, for count matrices.
Original version in R by <NAME>, <NAME> and edgeR team (2010).
Porting from R to python by <NAME>.
"""
# check counts
if len(counts.shape) != 2:
raise ValueError("counts must have two dimensions")
if np.any(
|
np.isnan(counts)
|
numpy.isnan
|
"""
File containing analyses for readout.
This includes
- readout discrimination analysis
- single shot readout analysis
- multiplexed readout analysis (to be updated!)
Originally written by Adriaan, updated/rewritten by <NAME> 2018
"""
import itertools
from copy import deepcopy
import matplotlib.pyplot as plt
import lmfit
from collections import OrderedDict
import numpy as np
import pycqed.analysis.fitting_models as fit_mods
from pycqed.analysis.fitting_models import ro_gauss, ro_CDF, ro_CDF_discr, gaussian_2D, gauss_2D_guess, gaussianCDF, ro_double_gauss_guess
import pycqed.analysis.analysis_toolbox as a_tools
import pycqed.analysis_v2.base_analysis as ba
import pycqed.analysis_v2.simple_analysis as sa
from scipy.optimize import minimize
from pycqed.analysis.tools.plotting import SI_val_to_msg_str, \
set_xlabel, set_ylabel, set_cbarlabel, flex_colormesh_plot_vs_xy
from pycqed.analysis_v2.tools.plotting import scatter_pnts_overlay
from mpl_toolkits.axes_grid1 import make_axes_locatable
import pycqed.analysis.tools.data_manipulation as dm_tools
from pycqed.utilities.general import int2base
from pycqed.utilities.general import format_value_string
class Singleshot_Readout_Analysis(ba.BaseDataAnalysis):
def __init__(self, t_start: str=None, t_stop: str=None,
label: str='', do_fitting: bool = True,
data_file_path: str=None,
options_dict: dict=None, auto=True,
**kw):
'''
options dict options:
'fixed_p10' fixes p(e|g) res_exc (do not vary in fit)
'fixed_p01' : fixes p(g|pi) mmt_rel (do not vary in fit)
'auto_rotation_angle' : (bool) automatically find the I/Q mixing angle
'rotation_angle' : manually define the I/Q mixing angle (ignored if auto_rotation_angle is set to True)
'nr_bins' : number of bins to use for the histograms
'post_select' : (bool) sets on or off the post_selection based on an initialization measurement (needs to be in agreement with nr_samples)
'post_select_threshold' : (float) threshold used for post-selection (only activated by above parameter)
'nr_samples' : amount of different samples (e.g. ground and excited = 2 and with post-selection = 4)
'sample_0' : index of first sample (ground-state)
'sample_1' : index of second sample (first excited-state)
'max_datapoints' : maximum amount of datapoints for culumative fit
'log_hist' : use log scale for the y-axis of the 1D histograms
'verbose' : see BaseDataAnalysis
'presentation_mode' : see BaseDataAnalysis
see BaseDataAnalysis for more.
'''
super().__init__(t_start=t_start, t_stop=t_stop,
label=label, do_fitting=do_fitting,
data_file_path=data_file_path,
options_dict=options_dict,
**kw)
self.single_timestamp = True
self.params_dict = {
'measurementstring': 'measurementstring',
'measured_values': 'measured_values',
'value_names': 'value_names',
'value_units': 'value_units'}
self.numeric_params = []
# Determine the default for auto_rotation_angle
man_angle = self.options_dict.get('rotation_angle', False) is False
self.options_dict['auto_rotation_angle'] = self.options_dict.get(
'auto_rotation_angle', man_angle)
self.predict_qubit_temp = 'predict_qubit_temp' in self.options_dict
if self.predict_qubit_temp:
self.qubit_freq = self.options_dict['qubit_freq']
if auto:
self.run_analysis()
def process_data(self):
"""
Responsible for creating the histograms based on the raw data
"""
post_select = self.options_dict.get('post_select', False)
post_select_threshold = self.options_dict.get(
'post_select_threshold', 0)
nr_samples = self.options_dict.get('nr_samples', 2)
sample_0 = self.options_dict.get('sample_0', 0)
sample_1 = self.options_dict.get('sample_1', 1)
nr_bins = int(self.options_dict.get('nr_bins', 100))
######################################################
# Separating data into shots for 0 and shots for 1 #
######################################################
meas_val = self.raw_data_dict['measured_values']
unit = self.raw_data_dict['value_units'][0]
# loop through channels
shots = np.zeros((2, len(meas_val),), dtype=np.ndarray)
for j, dat in enumerate(meas_val):
assert unit == self.raw_data_dict['value_units'][
j], 'The channels have been measured using different units. This is not supported yet.'
sh_0, sh_1 = get_shots_zero_one(
dat, post_select=post_select, nr_samples=nr_samples,
post_select_threshold=post_select_threshold,
sample_0=sample_0, sample_1=sample_1)
shots[0, j] = sh_0
shots[1, j] = sh_1
#shots = np.array(shots, dtype=float)
# Do we have two quadratures?
if len(meas_val) == 2:
########################################################
# Bin the data in 2D, to calculate the opt. angle
########################################################
data_range_x = (np.min([np.min(b) for b in shots[:, 0]]),
np.max([np.max(b) for b in shots[:, 0]]))
data_range_y = (np.min([np.min(b) for b in shots[:, 1]]),
np.max([np.max(b) for b in shots[:, 1]]))
data_range_xy = (data_range_x, data_range_y)
nr_bins_2D = int(self.options_dict.get(
'nr_bins_2D', 6*np.sqrt(nr_bins)))
H0, xedges, yedges = np.histogram2d(x=shots[0, 0],
y=shots[0, 1],
bins=nr_bins_2D,
range=data_range_xy)
H1, xedges, yedges = np.histogram2d(x=shots[1, 0],
y=shots[1, 1],
bins=nr_bins_2D,
range=data_range_xy)
binsize_x = xedges[1] - xedges[0]
binsize_y = yedges[1] - yedges[0]
bin_centers_x = xedges[:-1] + binsize_x
bin_centers_y = yedges[:-1] + binsize_y
self.proc_data_dict['2D_histogram_x'] = bin_centers_x
self.proc_data_dict['2D_histogram_y'] = bin_centers_y
self.proc_data_dict['2D_histogram_z'] = [H0, H1]
# Find and apply the effective/rotated integrated voltage
angle = self.options_dict.get('rotation_angle', 0)
auto_angle = self.options_dict.get('auto_rotation_angle', True)
if auto_angle:
##########################################
# Determining the rotation of the data #
##########################################
gauss2D_model_0 = lmfit.Model(gaussian_2D,
independent_vars=['x', 'y'])
gauss2D_model_1 = lmfit.Model(gaussian_2D,
independent_vars=['x', 'y'])
guess0 = gauss_2D_guess(model=gauss2D_model_0, data=H0.transpose(),
x=bin_centers_x, y=bin_centers_y)
guess1 = gauss_2D_guess(model=gauss2D_model_1, data=H1.transpose(),
x=bin_centers_x, y=bin_centers_y)
x2d = np.array([bin_centers_x]*len(bin_centers_y))
y2d = np.array([bin_centers_y]*len(bin_centers_x)).transpose()
fitres0 = gauss2D_model_0.fit(data=H0.transpose(), x=x2d, y=y2d,
**guess0)
fitres1 = gauss2D_model_1.fit(data=H1.transpose(), x=x2d, y=y2d,
**guess1)
fr0 = fitres0.best_values
fr1 = fitres1.best_values
x0 = fr0['center_x']
x1 = fr1['center_x']
y0 = fr0['center_y']
y1 = fr1['center_y']
self.proc_data_dict['IQ_pos'] = [[x0, x1], [y0, y1]]
dx = x1 - x0
dy = y1 - y0
mid = [x0 + dx/2, y0 + dy/2]
angle = np.arctan2(dy, dx)
else:
mid = [0, 0]
if self.verbose:
ang_deg = (angle*180/np.pi)
print('Mixing I/Q channels with %.3f degrees ' % ang_deg +
#'around point (%.2f, %.2f)%s'%(mid[0], mid[1], unit) +
' to obtain effective voltage.')
self.proc_data_dict['raw_offset'] = [*mid, angle]
# create matrix
rot_mat = [[+np.cos(-angle), -np.sin(-angle)],
[+np.sin(-angle), +np.cos(-angle)]]
# rotate data accordingly
eff_sh = np.zeros(len(shots[0]), dtype=np.ndarray)
eff_sh[0] = np.dot(rot_mat[0], shots[0]) # - mid
eff_sh[1] = np.dot(rot_mat[0], shots[1]) # - mid
else:
# If we have only one quadrature, use that (doh!)
eff_sh = shots[:, 0]
self.proc_data_dict['all_channel_int_voltages'] = shots
# self.raw_data_dict['value_names'][0]
self.proc_data_dict['shots_xlabel'] = 'Effective integrated Voltage'
self.proc_data_dict['shots_xunit'] = unit
self.proc_data_dict['eff_int_voltages'] = eff_sh
self.proc_data_dict['nr_shots'] = [len(eff_sh[0]), len(eff_sh[1])]
sh_min = min(np.min(eff_sh[0]), np.min(eff_sh[1]))
sh_max = max(np.max(eff_sh[0]), np.max(eff_sh[1]))
data_range = (sh_min, sh_max)
eff_sh_sort = [np.sort(eff_sh[0]), np.sort(eff_sh[1])]
x0, n0 = np.unique(eff_sh_sort[0], return_counts=True)
cumsum0 = np.cumsum(n0)
x1, n1 = np.unique(eff_sh_sort[1], return_counts=True)
cumsum1 = np.cumsum(n1)
self.proc_data_dict['cumsum_x'] = [x0, x1]
self.proc_data_dict['cumsum_y'] = [cumsum0, cumsum1]
all_x = np.unique(np.sort(np.concatenate((x0, x1))))
md = self.options_dict.get('max_datapoints', 1000)
if len(all_x) > md:
all_x = np.linspace(*data_range, md)
ecumsum0 = np.interp(x=all_x, xp=x0, fp=cumsum0, left=0)
necumsum0 = ecumsum0/np.max(ecumsum0)
ecumsum1 = np.interp(x=all_x, xp=x1, fp=cumsum1, left=0)
necumsum1 = ecumsum1/np.max(ecumsum1)
self.proc_data_dict['cumsum_x_ds'] = all_x
self.proc_data_dict['cumsum_y_ds'] = [ecumsum0, ecumsum1]
self.proc_data_dict['cumsum_y_ds_n'] = [necumsum0, necumsum1]
##################################
# Binning data into histograms #
##################################
h0, bin_edges = np.histogram(eff_sh[0], bins=nr_bins,
range=data_range)
h1, bin_edges = np.histogram(eff_sh[1], bins=nr_bins,
range=data_range)
self.proc_data_dict['hist'] = [h0, h1]
binsize = (bin_edges[1] - bin_edges[0])
self.proc_data_dict['bin_edges'] = bin_edges
self.proc_data_dict['bin_centers'] = bin_edges[:-1]+binsize
self.proc_data_dict['binsize'] = binsize
#######################################################
# Threshold and fidelity based on culmulative counts #
#######################################################
# Average assignment fidelity: F_ass = (P01 - P10 )/2
# where Pxy equals probability to measure x when starting in y
F_vs_th = (1-(1-abs(necumsum0 - necumsum1))/2)
opt_idxs = np.argwhere(F_vs_th == np.amax(F_vs_th))
opt_idx = int(round(np.average(opt_idxs)))
self.proc_data_dict['F_assignment_raw'] = F_vs_th[opt_idx]
self.proc_data_dict['threshold_raw'] = all_x[opt_idx]
def prepare_fitting(self):
###################################
# First fit the histograms (PDF) #
###################################
self.fit_dicts = OrderedDict()
bin_x = self.proc_data_dict['bin_centers']
bin_xs = [bin_x, bin_x]
bin_ys = self.proc_data_dict['hist']
m = lmfit.model.Model(ro_gauss)
m.guess = ro_double_gauss_guess.__get__(m, m.__class__)
params = m.guess(x=bin_xs, data=bin_ys,
fixed_p01=self.options_dict.get('fixed_p01', False),
fixed_p10=self.options_dict.get('fixed_p10', False))
res = m.fit(x=bin_xs, data=bin_ys, params=params)
self.fit_dicts['shots_all_hist'] = {
'model': m,
'fit_xvals': {'x': bin_xs},
'fit_yvals': {'data': bin_ys},
'guessfn_pars': {'fixed_p01': self.options_dict.get('fixed_p01', False),
'fixed_p10': self.options_dict.get('fixed_p10', False)},
}
###################################
# Fit the CDF #
###################################
m_cul = lmfit.model.Model(ro_CDF)
cdf_xs = self.proc_data_dict['cumsum_x_ds']
cdf_xs = [np.array(cdf_xs), np.array(cdf_xs)]
cdf_ys = self.proc_data_dict['cumsum_y_ds']
cdf_ys = [np.array(cdf_ys[0]), np.array(cdf_ys[1])]
#cul_res = m_cul.fit(x=cdf_xs, data=cdf_ys, params=res.params)
cum_params = res.params
cum_params['A_amplitude'].value = np.max(cdf_ys[0])
cum_params['A_amplitude'].vary = False
cum_params['B_amplitude'].value = np.max(cdf_ys[1])
cum_params['A_amplitude'].vary = False # FIXME: check if correct
self.fit_dicts['shots_all'] = {
'model': m_cul,
'fit_xvals': {'x': cdf_xs},
'fit_yvals': {'data': cdf_ys},
'guess_pars': cum_params,
}
def analyze_fit_results(self):
# Create a CDF based on the fit functions of both fits.
fr = self.fit_res['shots_all']
bv = fr.best_values
# best values new
bvn = deepcopy(bv)
bvn['A_amplitude'] = 1
bvn['B_amplitude'] = 1
def CDF(x):
return ro_CDF(x=x, **bvn)
def CDF_0(x):
return CDF(x=[x, x])[0]
def CDF_1(x):
return CDF(x=[x, x])[1]
def infid_vs_th(x):
cdf = ro_CDF(x=[x, x], **bvn)
return (1-np.abs(cdf[0] - cdf[1]))/2
self._CDF_0 = CDF_0
self._CDF_1 = CDF_1
self._infid_vs_th = infid_vs_th
thr_guess = (3*bv['B_center'] - bv['A_center'])/2
opt_fid = minimize(infid_vs_th, thr_guess)
# for some reason the fit sometimes returns a list of values
if isinstance(opt_fid['fun'], float):
self.proc_data_dict['F_assignment_fit'] = (1-opt_fid['fun'])
else:
self.proc_data_dict['F_assignment_fit'] = (1-opt_fid['fun'])[0]
self.proc_data_dict['threshold_fit'] = opt_fid['x'][0]
# Calculate the fidelity of both
###########################################
# Extracting the discrimination fidelity #
###########################################
def CDF_0_discr(x):
return gaussianCDF(x, amplitude=1,
mu=bv['A_center'], sigma=bv['A_sigma'])
def CDF_1_discr(x):
return gaussianCDF(x, amplitude=1,
mu=bv['B_center'], sigma=bv['B_sigma'])
def disc_infid_vs_th(x):
cdf0 = gaussianCDF(x, amplitude=1, mu=bv['A_center'],
sigma=bv['A_sigma'])
cdf1 = gaussianCDF(x, amplitude=1, mu=bv['B_center'],
sigma=bv['B_sigma'])
return (1-np.abs(cdf0 - cdf1))/2
self._CDF_0_discr = CDF_0_discr
self._CDF_1_discr = CDF_1_discr
self._disc_infid_vs_th = disc_infid_vs_th
opt_fid_discr = minimize(disc_infid_vs_th, thr_guess)
# for some reason the fit sometimes returns a list of values
if isinstance(opt_fid_discr['fun'], float):
self.proc_data_dict['F_discr'] = (1-opt_fid_discr['fun'])
else:
self.proc_data_dict['F_discr'] = (1-opt_fid_discr['fun'])[0]
self.proc_data_dict['threshold_discr'] = opt_fid_discr['x'][0]
fr = self.fit_res['shots_all']
bv = fr.params
self.proc_data_dict['residual_excitation'] = bv['A_spurious'].value
self.proc_data_dict['relaxation_events'] = bv['B_spurious'].value
###################################
# Save quantities of interest. #
###################################
self.proc_data_dict['quantities_of_interest'] = {
'SNR': self.fit_res['shots_all'].params['SNR'].value,
'F_d': self.proc_data_dict['F_discr'],
'F_a': self.proc_data_dict['F_assignment_raw'],
'residual_excitation': self.proc_data_dict['residual_excitation'],
'relaxation_events':
self.proc_data_dict['relaxation_events']
}
self.qoi = self.proc_data_dict['quantities_of_interest']
def prepare_plots(self):
# Did we load two voltage components (shall we do 2D plots?)
two_dim_data = len(
self.proc_data_dict['all_channel_int_voltages'][0]) == 2
eff_voltage_label = self.proc_data_dict['shots_xlabel']
eff_voltage_unit = self.proc_data_dict['shots_xunit']
x_volt_label = self.raw_data_dict['value_names'][0]
x_volt_unit = self.raw_data_dict['value_units'][0]
if two_dim_data:
y_volt_label = self.raw_data_dict['value_names'][1]
y_volt_unit = self.raw_data_dict['value_units'][1]
z_hist_label = 'Counts'
labels = self.options_dict.get(
'preparation_labels', ['|g> prep.', '|e> prep.'])
label_0 = labels[0]
label_1 = labels[1]
title = ('\n' + self.timestamps[0] + ' - "' +
self.raw_data_dict['measurementstring'] + '"')
# 1D histograms (PDF)
log_hist = self.options_dict.get('log_hist', False)
bin_x = self.proc_data_dict['bin_edges']
bin_y = self.proc_data_dict['hist']
self.plot_dicts['hist_0'] = {
'title': 'Binned Shot Counts' + title,
'ax_id': '1D_histogram',
'plotfn': self.plot_bar,
'xvals': bin_x,
'yvals': bin_y[0],
'xwidth': self.proc_data_dict['binsize'],
'bar_kws': {'log': log_hist, 'alpha': .4, 'facecolor': 'C0',
'edgecolor': 'C0'},
'setlabel': label_0,
'xlabel': eff_voltage_label,
'xunit': eff_voltage_unit,
'ylabel': z_hist_label,
}
self.plot_dicts['hist_1'] = {
'ax_id': '1D_histogram',
'plotfn': self.plot_bar,
'xvals': bin_x,
'yvals': bin_y[1],
'xwidth': self.proc_data_dict['binsize'],
'bar_kws': {'log': log_hist, 'alpha': .3, 'facecolor': 'C3',
'edgecolor': 'C3'},
'setlabel': label_1,
'do_legend': True,
'xlabel': eff_voltage_label,
'xunit': eff_voltage_unit,
'ylabel': z_hist_label,
}
if log_hist:
self.plot_dicts['hist_0']['yrange'] = (0.5, 1.5*np.max(bin_y[0]))
self.plot_dicts['hist_1']['yrange'] = (0.5, 1.5*np.max(bin_y[1]))
# CDF
cdf_xs = self.proc_data_dict['cumsum_x']
cdf_ys = self.proc_data_dict['cumsum_y']
cdf_ys[0] = cdf_ys[0]/np.max(cdf_ys[0])
cdf_ys[1] = cdf_ys[1]/np.max(cdf_ys[1])
xra = (bin_x[0], bin_x[-1])
self.plot_dicts['cdf_shots_0'] = {
'title': 'Culmulative Shot Counts (no binning)' + title,
'ax_id': 'cdf',
'plotfn': self.plot_line,
'xvals': cdf_xs[0],
'yvals': cdf_ys[0],
'setlabel': label_0,
'xrange': xra,
'line_kws': {'color': 'C0', 'alpha': 0.3},
'marker': '',
'xlabel': eff_voltage_label,
'xunit': eff_voltage_unit,
'ylabel': 'Culmulative Counts',
'yunit': 'norm.',
'do_legend': True,
}
self.plot_dicts['cdf_shots_1'] = {
'ax_id': 'cdf',
'plotfn': self.plot_line,
'xvals': cdf_xs[1],
'yvals': cdf_ys[1],
'setlabel': label_1,
'line_kws': {'color': 'C3', 'alpha': 0.3},
'marker': '',
'xlabel': eff_voltage_label,
'xunit': eff_voltage_unit,
'ylabel': 'Culmulative Counts',
'yunit': 'norm.',
'do_legend': True,
}
# Vlines for thresholds
th_raw = self.proc_data_dict['threshold_raw']
threshs = [th_raw, ]
if self.do_fitting:
threshs.append(self.proc_data_dict['threshold_fit'])
threshs.append(self.proc_data_dict['threshold_discr'])
for ax in ['1D_histogram', 'cdf']:
self.plot_dicts[ax+'_vlines_thresh'] = {
'ax_id': ax,
'plotfn': self.plot_vlines_auto,
'xdata': threshs,
'linestyles': ['--', '-.', ':'],
'labels': ['$th_{raw}$', '$th_{fit}$', '$th_{d}$'],
'colors': ['0.3', '0.5', '0.2'],
'do_legend': True,
}
# 2D Histograms
if two_dim_data:
iq_centers = None
if 'IQ_pos' in self.proc_data_dict and self.proc_data_dict['IQ_pos'] is not None:
iq_centers = self.proc_data_dict['IQ_pos']
peak_marker_2D = {
'plotfn': self.plot_line,
'xvals': iq_centers[0],
'yvals': iq_centers[1],
'xlabel': x_volt_label,
'xunit': x_volt_unit,
'ylabel': y_volt_label,
'yunit': y_volt_unit,
'marker': 'x',
'aspect': 'equal',
'linestyle': '',
'color': 'black',
#'line_kws': {'markersize': 1, 'color': 'black', 'alpha': 1},
'setlabel': 'Peaks',
'do_legend': True,
}
peak_marker_2D_rot = deepcopy(peak_marker_2D)
peak_marker_2D_rot['xvals'] = iq_centers[0]
peak_marker_2D_rot['yvals'] = iq_centers[1]
self.plot_dicts['2D_histogram_0'] = {
'title': 'Raw '+label_0+' Binned Shot Counts' + title,
'ax_id': '2D_histogram_0',
# 'plotfn': self.plot_colorxy,
'plotfn': plot_2D_ssro_histogram,
'xvals': self.proc_data_dict['2D_histogram_x'],
'yvals': self.proc_data_dict['2D_histogram_y'],
'zvals': self.proc_data_dict['2D_histogram_z'][0].T,
'xlabel': x_volt_label,
'xunit': x_volt_unit,
'ylabel': y_volt_label,
'yunit': y_volt_unit,
'zlabel': z_hist_label,
'zunit': '-',
'cmap': 'Blues',
}
if iq_centers is not None:
dp = deepcopy(peak_marker_2D)
dp['ax_id'] = '2D_histogram_0'
self.plot_dicts['2D_histogram_0_marker'] = dp
self.plot_dicts['2D_histogram_1'] = {
'title': 'Raw '+label_1+' Binned Shot Counts' + title,
'ax_id': '2D_histogram_1',
# 'plotfn': self.plot_colorxy,
'plotfn': plot_2D_ssro_histogram,
'xvals': self.proc_data_dict['2D_histogram_x'],
'yvals': self.proc_data_dict['2D_histogram_y'],
'zvals': self.proc_data_dict['2D_histogram_z'][1].T,
'xlabel': x_volt_label,
'xunit': x_volt_unit,
'ylabel': y_volt_label,
'yunit': y_volt_unit,
'zlabel': z_hist_label,
'zunit': '-',
'cmap': 'Reds',
}
if iq_centers is not None:
dp = deepcopy(peak_marker_2D)
dp['ax_id'] = '2D_histogram_1'
self.plot_dicts['2D_histogram_1_marker'] = dp
# Scatter Shots
volts = self.proc_data_dict['all_channel_int_voltages']
v_flat = np.concatenate(np.concatenate(volts))
plot_range = (np.min(v_flat), np.max(v_flat))
vxr = plot_range
vyr = plot_range
self.plot_dicts['2D_shots_0'] = {
'title': 'Raw Shots' + title,
'ax_id': '2D_shots',
'aspect': 'equal',
'plotfn': self.plot_line,
'xvals': volts[0][0],
'yvals': volts[0][1],
'range': [vxr, vyr],
'xrange': vxr,
'yrange': vyr,
'xlabel': x_volt_label,
'xunit': x_volt_unit,
'ylabel': y_volt_label,
'yunit': y_volt_unit,
'zlabel': z_hist_label,
'marker': 'o',
'linestyle': '',
'color': 'C0',
'line_kws': {'markersize': 0.25, 'color': 'C0', 'alpha': 0.5},
'setlabel': label_0,
'do_legend': True,
}
self.plot_dicts['2D_shots_1'] = {
'ax_id': '2D_shots',
'plotfn': self.plot_line,
'xvals': volts[1][0],
'yvals': volts[1][1],
'aspect': 'equal',
'range': [vxr, vyr],
'xrange': vxr,
'yrange': vyr,
'xlabel': x_volt_label,
'xunit': x_volt_unit,
'ylabel': y_volt_label,
'yunit': y_volt_unit,
'zlabel': z_hist_label,
'marker': 'o',
'linestyle': '',
'color': 'C3',
'line_kws': {'markersize': 0.25, 'color': 'C3', 'alpha': 0.5},
'setlabel': label_1,
'do_legend': True,
}
if iq_centers is not None:
dp = deepcopy(peak_marker_2D)
dp['ax_id'] = '2D_shots'
self.plot_dicts['2D_shots_marker'] = dp
# The cumulative histograms
#####################################
# Adding the fits to the figures #
#####################################
if self.do_fitting:
# todo: add seperate fits for residual and main gaussians
x = np.linspace(bin_x[0], bin_x[-1], 150)
para_hist_tmp = self.fit_res['shots_all_hist'].best_values
para_cdf = self.fit_res['shots_all'].best_values
para_hist = para_cdf
para_hist['A_amplitude'] = para_hist_tmp['A_amplitude']
para_hist['B_amplitude'] = para_hist_tmp['B_amplitude']
ro_g = ro_gauss(x=[x, x], **para_hist)
self.plot_dicts['new_fit_shots_0'] = {
'ax_id': '1D_histogram',
'plotfn': self.plot_line,
'xvals': x,
'yvals': ro_g[0],
'setlabel': 'Fit '+label_0,
'line_kws': {'color': 'C0'},
'marker': '',
'do_legend': True,
}
self.plot_dicts['new_fit_shots_1'] = {
'ax_id': '1D_histogram',
'plotfn': self.plot_line,
'xvals': x,
'yvals': ro_g[1],
'marker': '',
'setlabel': 'Fit '+label_1,
'line_kws': {'color': 'C3'},
'do_legend': True,
}
self.plot_dicts['cdf_fit_shots_0'] = {
'ax_id': 'cdf',
'plotfn': self.plot_line,
'xvals': x,
'yvals': self._CDF_0(x),
'setlabel': 'Fit '+label_0,
'line_kws': {'color': 'C0', 'alpha': 0.8},
'linestyle': ':',
'marker': '',
'do_legend': True,
}
self.plot_dicts['cdf_fit_shots_1'] = {
'ax_id': 'cdf',
'plotfn': self.plot_line,
'xvals': x,
'yvals': self._CDF_1(x),
'marker': '',
'linestyle': ':',
'setlabel': 'Fit '+label_1,
'line_kws': {'color': 'C3', 'alpha': 0.8},
'do_legend': True,
}
##########################################
# Add textbox (eg.g Thresholds, fidelity #
# information, number of shots etc) #
##########################################
if not self.presentation_mode:
fit_text = 'Thresholds:'
fit_text += '\nName | Level | Fidelity'
thr, th_unit = SI_val_to_msg_str(
self.proc_data_dict['threshold_raw'],
eff_voltage_unit, return_type=float)
raw_th_msg = (
'\n>raw | ' +
'{:.2f} {} | '.format(thr, th_unit) +
'{:.1f}%'.format(
self.proc_data_dict['F_assignment_raw']*100))
fit_text += raw_th_msg
if self.do_fitting:
thr, th_unit = SI_val_to_msg_str(
self.proc_data_dict['threshold_fit'],
eff_voltage_unit, return_type=float)
fit_th_msg = (
'\n>fit | ' +
'{:.2f} {} | '.format(thr, th_unit) +
'{:.1f}%'.format(
self.proc_data_dict['F_assignment_fit']*100))
fit_text += fit_th_msg
thr, th_unit = SI_val_to_msg_str(
self.proc_data_dict['threshold_discr'],
eff_voltage_unit, return_type=float)
fit_th_msg = (
'\n>dis | ' +
'{:.2f} {} | '.format(thr, th_unit) +
'{:.1f}%'.format(
self.proc_data_dict['F_discr']*100))
fit_text += fit_th_msg
snr = self.fit_res['shots_all'].params['SNR']
fit_text += format_value_string('\nSNR (fit)', lmfit_par=snr)
fr = self.fit_res['shots_all']
bv = fr.params
a_sp = bv['A_spurious']
fit_text += '\n\nSpurious Excitations:'
fit_text += format_value_string('\n$p(e|0)$', lmfit_par=a_sp)
b_sp = bv['B_spurious']
fit_text += format_value_string('\n$p(g|\\pi)$',
lmfit_par=b_sp)
if two_dim_data:
offs = self.proc_data_dict['raw_offset']
fit_text += '\n\nRotated by ${:.1f}^\\circ$'.format(
(offs[2]*180/np.pi) % 180)
auto_rot = self.options_dict.get('auto_rotation_angle', True)
fit_text += '(auto)' if auto_rot else '(man.)'
else:
fit_text += '\n\n(Single quadrature data)'
fit_text += '\n\nTotal shots: %d+%d' % (*self.proc_data_dict['nr_shots'],)
if self.predict_qubit_temp:
h = 6.62607004e-34
kb = 1.38064852e-23
res_exc = a_sp.value
effective_temp = h*6.42e9/(kb*np.log((1-res_exc)/res_exc))
fit_text += '\n\nQubit '+'$T_{eff}$'+\
' = {:.2f} mK\n@{:.0f}'.format(effective_temp*1e3,
self.qubit_freq)
for ax in ['cdf', '1D_histogram']:
self.plot_dicts['text_msg_' + ax] = {
'ax_id': ax,
'xpos': 1.05,
'horizontalalignment': 'left',
'plotfn': self.plot_text,
'box_props': 'fancy',
'text_string': fit_text,
}
class Dispersive_shift_Analysis(ba.BaseDataAnalysis):
'''
Analisys for dispersive shift.
Designed to be used with <CCL_Transmon>.measure-dispersive_shift_pulsed
'''
def __init__(self, t_start: str=None, t_stop: str=None,
label: str='', do_fitting: bool = True,
data_file_path: str=None,
options_dict: dict=None, auto=True,
**kw):
'''
Extract ground and excited state timestamps
'''
if (t_start is None) and (t_stop is None):
ground_ts = a_tools.return_last_n_timestamps(1, contains='Resonator_scan_off')
excited_ts= a_tools.return_last_n_timestamps(1, contains='Resonator_scan_on')
elif (t_start is None) ^ (t_stop is None):
raise ValueError('Must provide either none or both timestamps.')
else:
ground_ts = t_start # t_start is assigned to ground state
excited_ts= t_stop # t_stop is assigned to excited state
super().__init__(t_start=ground_ts, t_stop=excited_ts,
label='Resonator_scan', do_fitting=do_fitting,
data_file_path=data_file_path,
options_dict=options_dict,
**kw)
self.params_dict = {'xlabel': 'sweep_name',
'xunit': 'sweep_unit',
'sweep_points': 'sweep_points',
'value_names': 'value_names',
'value_units': 'value_units',
'measured_values': 'measured_values'
}
self.numeric_params = []
#self.proc_data_dict = OrderedDict()
if auto:
self.run_analysis()
def process_data(self):
'''
Processing data
'''
# Frequencu sweep range in the ground/excited state
self.proc_data_dict['data_freqs_ground'] = \
self.raw_data_dict['sweep_points'][0]
self.proc_data_dict['data_freqs_excited'] = \
self.raw_data_dict['sweep_points'][1]
# S21 mag (transmission) in the ground/excited state
self.proc_data_dict['data_S21_ground'] = \
self.raw_data_dict['measured_values'][0][0]
self.proc_data_dict['data_S21_excited'] = \
self.raw_data_dict['measured_values'][1][0]
#self.proc_data_dict['f0_ground'] = self.raw_data_dict['f0'][0]
#############################
# Find resonator dips
#############################
pk_rep_ground = a_tools.peak_finder( \
self.proc_data_dict['data_freqs_ground'],
self.proc_data_dict['data_S21_ground'],
window_len=5)
pk_rep_excited= a_tools.peak_finder( \
self.proc_data_dict['data_freqs_excited'],
self.proc_data_dict['data_S21_excited'],
window_len=5)
min_idx_ground = np.argmin(pk_rep_ground['dip_values'])
min_idx_excited= np.argmin(pk_rep_excited['dip_values'])
min_freq_ground = pk_rep_ground['dips'][min_idx_ground]
min_freq_excited= pk_rep_excited['dips'][min_idx_excited]
min_S21_ground = pk_rep_ground['dip_values'][min_idx_ground]
min_S21_excited= pk_rep_excited['dip_values'][min_idx_excited]
dispersive_shift = min_freq_excited-min_freq_ground
self.proc_data_dict['Res_freq_ground'] = min_freq_ground
self.proc_data_dict['Res_freq_excited']= min_freq_excited
self.proc_data_dict['Res_S21_ground'] = min_S21_ground
self.proc_data_dict['Res_S21_excited']= min_S21_excited
self.proc_data_dict['quantities_of_interest'] = \
{'dispersive_shift': dispersive_shift}
self.qoi = self.proc_data_dict['quantities_of_interest']
def prepare_plots(self):
x_range = [min(self.proc_data_dict['data_freqs_ground'][0],
self.proc_data_dict['data_freqs_excited'][0]) ,
max(self.proc_data_dict['data_freqs_ground'][-1],
self.proc_data_dict['data_freqs_excited'][-1])]
y_range = [0, max(max(self.proc_data_dict['data_S21_ground']),
max(self.proc_data_dict['data_S21_excited']))]
x_label = self.raw_data_dict['xlabel'][0]
y_label = self.raw_data_dict['value_names'][0][0]
x_unit = self.raw_data_dict['xunit'][0][0]
y_unit = self.raw_data_dict['value_units'][0][0]
title = 'Transmission in the ground and excited state'
self.plot_dicts['S21_ground'] = {
'title': title,
'ax_id': 'Transmission_axis',
'xvals': self.proc_data_dict['data_freqs_ground'],
'yvals': self.proc_data_dict['data_S21_ground'],
'xrange': x_range,
'yrange': y_range,
'xlabel': x_label,
'xunit': x_unit,
'ylabel': y_label,
'yunit': y_unit,
'plotfn': self.plot_line,
'line_kws': {'color': 'C0', 'alpha': 1},
'marker': ''
}
self.plot_dicts['S21_excited'] = {
'title': title,
'ax_id': 'Transmission_axis',
'xvals': self.proc_data_dict['data_freqs_excited'],
'yvals': self.proc_data_dict['data_S21_excited'],
'xrange': x_range,
'yrange': y_range,
'xlabel': x_label,
'xunit': x_unit,
'ylabel': y_label,
'yunit': y_unit,
'plotfn': self.plot_line,
'line_kws': {'color': 'C1', 'alpha': 1},
'marker': ''
}
####################################
# Plot arrow
####################################
min_freq_ground = self.proc_data_dict['Res_freq_ground']
min_freq_excited= self.proc_data_dict['Res_freq_excited']
yval = y_range[1]/2
dispersive_shift = int((min_freq_excited-min_freq_ground)*1e-4)*1e-2
txt_str = r'$2_\chi/2\pi=$' + str(dispersive_shift) + ' MHz'
self.plot_dicts['Dispersive_shift_line'] = {
'ax_id': 'Transmission_axis',
'xvals': [min_freq_ground , min_freq_excited] ,
'yvals': [yval, yval] ,
'plotfn': self.plot_line,
'line_kws': {'color': 'black', 'alpha': 1},
'marker': ''
}
self.plot_dicts['Dispersive_shift_vline'] = {
'ax_id': 'Transmission_axis',
'ymin': y_range[0],
'ymax': y_range[1],
'x': [min_freq_ground, min_freq_excited],
'xrange': x_range,
'yrange': y_range,
'plotfn': self.plot_vlines,
'line_kws': {'color': 'black', 'alpha': 0.5}
}
self.plot_dicts['Dispersive_shift_rmarker'] = {
'ax_id': 'Transmission_axis',
'xvals': [min_freq_ground] ,
'yvals': [yval] ,
'plotfn': self.plot_line,
'line_kws': {'color': 'black', 'alpha': 1},
'marker': 5
}
self.plot_dicts['Dispersive_shift_lmarker'] = {
'ax_id': 'Transmission_axis',
'xvals': [min_freq_excited] ,
'yvals': [yval] ,
'plotfn': self.plot_line,
'line_kws': {'color': 'black', 'alpha': 1},
'marker': 4
}
self.plot_dicts['Dispersive_shift_text'] = {
'ax_id': 'Transmission_axis',
'plotfn': self.plot_text,
'xpos': .5,
'ypos': .5,
'horizontalalignment': 'center',
'verticalalignment': 'bottom',
'text_string': txt_str,
'box_props': dict(boxstyle='round', pad=.4,
facecolor='white', alpha=0.)
}
class RO_acquisition_delayAnalysis(ba.BaseDataAnalysis):
def __init__(self, t_start: str=None, t_stop: str=None,
label: str='', do_fitting: bool = True,
data_file_path: str=None,
qubit_name = '',
options_dict: dict=None, auto=True,
**kw):
super().__init__(t_start=t_start, t_stop=t_stop,
label=label, do_fitting=do_fitting,
data_file_path=data_file_path,
options_dict=options_dict,
**kw)
self.single_timestamp = True
self.qubit_name = qubit_name
self.params_dict = {'ro_pulse_length': '{}.ro_pulse_length'.format(self.qubit_name),
'xlabel': 'sweep_name',
'xunit': 'sweep_unit',
'sweep_points': 'sweep_points',
'value_names': 'value_names',
'value_units': 'value_units',
'measured_values': 'measured_values'
}
self.numeric_params = []
if auto:
self.run_analysis()
def process_data(self):
"""
Processing data
"""
self.Times = self.raw_data_dict['sweep_points']
self.I_data_UHF = self.raw_data_dict['measured_values'][0]
self.Q_data_UHF = self.raw_data_dict['measured_values'][1]
self.pulse_length = float(self.raw_data_dict['ro_pulse_length'.format(self.qubit_name)])
#######################################
# Determine the start of the pusle
#######################################
def get_pulse_start(x, y, tolerance=2):
'''
The start of the pulse is estimated in three steps:
1. Evaluate signal standard deviation in a certain interval as
function of time: f(t).
2. Calculate the derivative of the aforementioned data: f'(t).
3. Evaluate when the derivative exceeds a threshold. This
threshold is defined as max(f'(t))/5.
This approach is more tolerant to noisy signals.
'''
pulse_baseline = np.mean(y) # get pulse baseline
pulse_std = np.std(y) # get pulse standard deviation
nr_points_interval = 200 # number of points in the interval
aux = int(nr_points_interval/2)
iteration_idx = np.arange(-aux, len(y)+aux) # mask for circular array
aux_list = [ y[i%len(y)] for i in iteration_idx] # circular array
# Calculate standard deviation for each interval
y_std = []
for i in range(len(y)):
interval = aux_list[i : i+nr_points_interval]
y_std.append( np.std(interval) )
y_std_derivative = np.gradient(y_std[:-aux])# calculate derivative
threshold = max(y_std_derivative)/5 # define threshold
start_index = np.where( y_std_derivative > threshold )[0][0] + aux
return start_index-tolerance
#######################################
# Determine the end of depletion
#######################################
def get_pulse_length(x, y):
'''
Similarly to get_pulse_start, the end of depletion is
set when the signal goes below 5% of its standard dev.
'''
pulse_baseline = np.mean(y)
threshold = 0.05*np.std(y)
pulse_std = threshold+1
i = 0
while pulse_std > threshold:
pulse_std = np.std(y[i:]-pulse_baseline)
i += 1
end_index = i-1
return end_index
Amplitude_I = max(abs(self.I_data_UHF))
baseline_I = np.mean(self.I_data_UHF)
start_index_I = get_pulse_start(self.Times, self.I_data_UHF)
end_index_I = get_pulse_length(self.Times, self.I_data_UHF)
Amplitude_Q = max(abs(self.Q_data_UHF))
baseline_Q = np.mean(self.Q_data_UHF)
start_index_Q = get_pulse_start(self.Times, self.Q_data_UHF)
end_index_Q = get_pulse_length(self.Times, self.Q_data_UHF)
self.proc_data_dict['I_Amplitude'] = Amplitude_I
self.proc_data_dict['I_baseline'] = baseline_I
self.proc_data_dict['I_pulse_start_index'] = start_index_I
self.proc_data_dict['I_pulse_end_index'] = end_index_I
self.proc_data_dict['I_pulse_start'] = self.Times[start_index_I]
self.proc_data_dict['I_pulse_end'] = self.Times[end_index_I]
self.proc_data_dict['Q_Amplitude'] = Amplitude_Q
self.proc_data_dict['Q_baseline'] = baseline_Q
self.proc_data_dict['Q_pulse_start_index'] = start_index_Q
self.proc_data_dict['Q_pulse_end_index'] = end_index_Q
self.proc_data_dict['Q_pulse_start'] = self.Times[start_index_Q]
self.proc_data_dict['Q_pulse_end'] = self.Times[end_index_Q]
def prepare_plots(self):
I_start_line_x = [self.proc_data_dict['I_pulse_start'],
self.proc_data_dict['I_pulse_start']]
I_pulse_line_x = [self.proc_data_dict['I_pulse_start']+self.pulse_length,
self.proc_data_dict['I_pulse_start']+self.pulse_length]
I_end_line_x = [self.proc_data_dict['I_pulse_end'],
self.proc_data_dict['I_pulse_end']]
Q_start_line_x = [self.proc_data_dict['Q_pulse_start'],
self.proc_data_dict['Q_pulse_start']]
Q_pulse_line_x = [self.proc_data_dict['Q_pulse_start']+self.pulse_length,
self.proc_data_dict['Q_pulse_start']+self.pulse_length]
Q_end_line_x = [self.proc_data_dict['Q_pulse_end'],
self.proc_data_dict['Q_pulse_end']]
Amplitude = max(self.proc_data_dict['I_Amplitude'],
self.proc_data_dict['Q_Amplitude'])
vline_y = np.array([1.1*Amplitude, -1.1*Amplitude])
x_range= [self.Times[0], self.Times[-1]]
y_range= [vline_y[1], vline_y[0]]
I_title = str(self.qubit_name)+' Measured transients $I_{quadrature}$'
Q_title = str(self.qubit_name)+' Measured transients $Q_{quadrature}$'
##########################
# Transients
##########################
self.plot_dicts['I_transients'] = {
'title': I_title,
'ax_id': 'I_axis',
'xvals': self.Times,
'yvals': self.I_data_UHF,
'xrange': x_range,
'yrange': y_range,
'xlabel': self.raw_data_dict['xlabel'],
'xunit': 's',
'ylabel': 'I Amplitude',
'yunit': 'V',
'plotfn': self.plot_line,
'line_kws': {'color': 'C0', 'alpha': 1},
'marker': ''
}
self.plot_dicts['Q_transients'] = {
'title': Q_title,
'ax_id': 'Q_axis',
'xvals': self.Times,
'yvals': self.Q_data_UHF,
'xrange': x_range,
'yrange': y_range,
'xlabel': self.raw_data_dict['xlabel'],
'xunit': 's',
'ylabel': 'Q Amplitude',
'yunit': 'V',
'plotfn': self.plot_line,
'line_kws': {'color': 'C0', 'alpha': 1},
'marker': ''
}
##########################
# Vertical lines
##########################
# I quadrature
self.plot_dicts['I_pulse_start'] = {
'ax_id': 'I_axis',
'xvals': I_start_line_x,
'yvals': vline_y,
'xrange': x_range,
'yrange': y_range,
'xlabel': self.raw_data_dict['xlabel'],
'xunit': 's',
'ylabel': 'I Amplitude',
'yunit': 'V',
'plotfn': self.plot_line,
'linestyle': '--',
'line_kws': {'color': 'black', 'alpha': 1},
'marker': ''
}
self.plot_dicts['I_pulse_end'] = {
'ax_id': 'I_axis',
'xvals': I_pulse_line_x,
'yvals': vline_y,
'xrange': x_range,
'yrange': y_range,
'xlabel': self.raw_data_dict['xlabel'],
'xunit': 's',
'ylabel': 'I Amplitude',
'yunit': 'V',
'plotfn': self.plot_line,
'linestyle': '--',
'line_kws': {'color': 'black', 'alpha': 1},
'marker': ''
}
self.plot_dicts['I_depletion_end'] = {
'ax_id': 'I_axis',
'xvals': I_end_line_x,
'yvals': vline_y,
'xrange': x_range,
'yrange': y_range,
'xlabel': self.raw_data_dict['xlabel'],
'xunit': 's',
'ylabel': 'I Amplitude',
'yunit': 'V',
'plotfn': self.plot_line,
'linestyle': '--',
'line_kws': {'color': 'black', 'alpha': 1},
'marker': ''
}
# Q quadrature
self.plot_dicts['Q_pulse_start'] = {
'ax_id': 'Q_axis',
'xvals': Q_start_line_x,
'yvals': vline_y,
'xrange': x_range,
'yrange': y_range,
'xlabel': self.raw_data_dict['xlabel'],
'xunit': 's',
'ylabel': 'Q Amplitude',
'yunit': 'V',
'plotfn': self.plot_line,
'linestyle': '--',
'line_kws': {'color': 'black', 'alpha': 1},
'marker': ''
}
self.plot_dicts['Q_pulse_end'] = {
'ax_id': 'Q_axis',
'xvals': Q_pulse_line_x,
'yvals': vline_y,
'xrange': x_range,
'yrange': y_range,
'xlabel': self.raw_data_dict['xlabel'],
'xunit': 's',
'ylabel': 'Q Amplitude',
'yunit': 'V',
'plotfn': self.plot_line,
'linestyle': '--',
'line_kws': {'color': 'black', 'alpha': 1},
'marker': ''
}
self.plot_dicts['Q_depletion_end'] = {
'ax_id': 'Q_axis',
'xvals': Q_end_line_x,
'yvals': vline_y,
'xrange': x_range,
'yrange': y_range,
'xlabel': self.raw_data_dict['xlabel'],
'xunit': 's',
'ylabel': 'Q Amplitude',
'yunit': 'V',
'plotfn': self.plot_line,
'linestyle': '--',
'line_kws': {'color': 'black', 'alpha': 1},
'marker': ''
}
########################
# Plot pulse windows
########################
I_pulse_bin = np.array([self.proc_data_dict['I_pulse_start'],
self.proc_data_dict['I_pulse_start']+self.pulse_length])
I_depletion_bin = np.array([self.proc_data_dict['I_pulse_start']
+self.pulse_length, self.proc_data_dict['I_pulse_end']])
Q_pulse_bin = np.array([self.proc_data_dict['Q_pulse_start'],
self.proc_data_dict['Q_pulse_start']+self.pulse_length])
Q_depletion_bin = np.array([self.proc_data_dict['Q_pulse_start']
+self.pulse_length, self.proc_data_dict['Q_pulse_end']])
self.plot_dicts['I_pulse_length'] = {
'ax_id': 'I_axis',
'xvals': I_pulse_bin,
'yvals': vline_y,
'xwidth': self.pulse_length,
'ywidth': self.proc_data_dict['I_Amplitude'],
'xrange': x_range,
'yrange': y_range,
'xlabel': self.raw_data_dict['xlabel'],
'xunit': 's',
'ylabel': 'I Amplitude',
'yunit': 'V',
'plotfn': self.plot_bar,
'bar_kws': { 'alpha': .25, 'facecolor': 'C0'}
}
self.plot_dicts['I_pulse_depletion'] = {
'ax_id': 'I_axis',
'xvals': I_depletion_bin,
'yvals': vline_y,
'xwidth': self.pulse_length,
'ywidth': self.proc_data_dict['I_Amplitude'],
'xrange': x_range,
'yrange': y_range,
'xlabel': self.raw_data_dict['xlabel'],
'xunit': 's',
'ylabel': 'I Amplitude',
'yunit': 'V',
'plotfn': self.plot_bar,
'bar_kws': { 'alpha': .25, 'facecolor': 'C1'}
}
self.plot_dicts['Q_pulse_length'] = {
'ax_id': 'Q_axis',
'xvals': Q_pulse_bin,
'yvals': vline_y,
'xwidth': self.pulse_length,
'ywidth': self.proc_data_dict['Q_Amplitude'],
'xrange': x_range,
'yrange': y_range,
'xlabel': self.raw_data_dict['xlabel'],
'xunit': 's',
'ylabel': 'Q Amplitude',
'yunit': 'V',
'plotfn': self.plot_bar,
'bar_kws': { 'alpha': .25, 'facecolor': 'C0'}
}
self.plot_dicts['Q_pulse_depletion'] = {
'ax_id': 'Q_axis',
'grid': True,
'grid_kws': {'alpha': .25, 'linestyle': '--'},
'xvals': Q_depletion_bin,
'yvals': vline_y,
'xwidth': self.pulse_length,
'ywidth': self.proc_data_dict['Q_Amplitude'],
'xrange': x_range,
'yrange': y_range,
'xlabel': self.raw_data_dict['xlabel'],
'xunit': 's',
'ylabel': 'Q Amplitude',
'yunit': 'V',
'plotfn': self.plot_bar,
'bar_kws': { 'alpha': .25, 'facecolor': 'C1'}
}
class Readout_landspace_Analysis(sa.Basic2DInterpolatedAnalysis):
'''
Analysis for Readout landscapes using adaptive sampling.
Stores maximum fidelity parameters in quantities of interest dict as:
- <analysis_object>.qoi['Optimal_parameter_X']
- <analysis_object>.qoi['Optimal_parameter_Y']
'''
def __init__(self, t_start: str=None, t_stop: str=None,
label: str='', data_file_path: str=None,
interp_method: str = 'linear',
options_dict: dict=None, auto=True,
**kw):
super().__init__(t_start = t_start, t_stop = t_stop,
label = label,
data_file_path = data_file_path,
options_dict = options_dict,
auto = auto,
interp_method=interp_method,
**kw)
if auto:
self.run_analysis()
def process_data(self):
super().process_data()
# Extract maximum interpolated fidelity
idx = [i for i, s in enumerate(self.proc_data_dict['value_names']) \
if 'F_a' in s][0]
X = self.proc_data_dict['x_int']
Y = self.proc_data_dict['y_int']
Z = self.proc_data_dict['interpolated_values'][idx]
max_idx = np.unravel_index(np.argmax(Z), (len(X),len(Y)) )
self.proc_data_dict['Max_F_a_idx'] = max_idx
self.proc_data_dict['Max_F_a'] = Z[max_idx[1],max_idx[0]]
self.proc_data_dict['quantities_of_interest'] = {
'Optimal_parameter_X': X[max_idx[1]],
'Optimal_parameter_Y': Y[max_idx[0]]
}
def prepare_plots(self):
# assumes that value names are unique in an experiment
for i, val_name in enumerate(self.proc_data_dict['value_names']):
zlabel = '{} ({})'.format(val_name,
self.proc_data_dict['value_units'][i])
# Plot interpolated landscape
self.plot_dicts[val_name] = {
'ax_id': val_name,
'plotfn': a_tools.color_plot,
'x': self.proc_data_dict['x_int'],
'y': self.proc_data_dict['y_int'],
'z': self.proc_data_dict['interpolated_values'][i],
'xlabel': self.proc_data_dict['xlabel'],
'x_unit': self.proc_data_dict['xunit'],
'ylabel': self.proc_data_dict['ylabel'],
'y_unit': self.proc_data_dict['yunit'],
'zlabel': zlabel,
'title': '{}\n{}'.format(
self.timestamp, self.proc_data_dict['measurementstring'])
}
# Plot sampled values
self.plot_dicts[val_name+str('_sampled_values')] = {
'ax_id': val_name,
'plotfn': scatter_pnts_overlay,
'x': self.proc_data_dict['x'],
'y': self.proc_data_dict['y'],
'xlabel': self.proc_data_dict['xlabel'],
'x_unit': self.proc_data_dict['xunit'],
'ylabel': self.proc_data_dict['ylabel'],
'y_unit': self.proc_data_dict['yunit'],
'alpha': .75,
'setlabel': 'Sampled points',
'do_legend': True
}
# Plot maximum fidelity point
self.plot_dicts[val_name+str('_max_fidelity')] = {
'ax_id': val_name,
'plotfn': self.plot_line,
'xvals': [self.proc_data_dict['x_int']\
[self.proc_data_dict['Max_F_a_idx'][1]]],
'yvals': [self.proc_data_dict['y_int']\
[self.proc_data_dict['Max_F_a_idx'][0]]],
'xlabel': self.proc_data_dict['xlabel'],
'xunit': self.proc_data_dict['xunit'],
'ylabel': self.proc_data_dict['ylabel'],
'yunit': self.proc_data_dict['yunit'],
'marker': 'x',
'linestyle': '',
'color': 'red',
'setlabel': 'Max fidelity',
'do_legend': True,
'legend_pos': 'upper right'
}
class Multiplexed_Readout_Analysis_deprecated(ba.BaseDataAnalysis):
"""
For two qubits, to make an n-qubit mux readout experiment.
we should vectorize this analysis
TODO: This needs to be rewritten/debugged!
Suggestion:
Use N*(N-1)/2 instances of Singleshot_Readout_Analysis,
run them without saving the plots and then merge together the
plot_dicts as in the cross_dephasing_analysis.
"""
def __init__(self, t_start: str=None, t_stop: str=None,
label: str='',
data_file_path: str=None,
options_dict: dict=None, extract_only: bool=False,
nr_of_qubits: int = 2,
qubit_names: list=None,
do_fitting: bool=True, auto=True):
"""
Inherits from BaseDataAnalysis.
Extra arguments of interest
qubit_names (list) : used to label the experiments, names of the
qubits. LSQ is last name in the list. If not specified will
set qubit_names to [qN, ..., q1, q0]
"""
self.nr_of_qubits = nr_of_qubits
if qubit_names is None:
self.qubit_names = list(reversed(['q{}'.format(i)
for i in range(nr_of_qubits)]))
else:
self.qubit_names = qubit_names
super().__init__(t_start=t_start, t_stop=t_stop,
label=label,
data_file_path=data_file_path,
options_dict=options_dict,
extract_only=extract_only, do_fitting=do_fitting)
self.single_timestamp = False
self.params_dict = {
'measurementstring': 'measurementstring',
'measured_values': 'measured_values',
'value_names': 'value_names',
'value_units': 'value_units'}
self.numeric_params = []
if auto:
self.run_analysis()
def process_data(self):
"""
Responsible for creating the histograms based on the raw data
"""
# Determine the shape of the data to extract wheter to rotate or not
nr_bins = int(self.options_dict.get('nr_bins', 100))
# self.proc_data_dict['shots_0'] = [''] * nr_expts
# self.proc_data_dict['shots_1'] = [''] * nr_expts
#################################################################
# Separating data into shots for the different prepared states #
#################################################################
self.proc_data_dict['nr_of_qubits'] = self.nr_of_qubits
self.proc_data_dict['qubit_names'] = self.qubit_names
self.proc_data_dict['ch_names'] = self.raw_data_dict['value_names'][0]
for ch_name, shots in self.raw_data_dict['measured_values_ord_dict'].items():
self.proc_data_dict[ch_name] = shots[0] # only 1 dataset
self.proc_data_dict[ch_name +
' all'] = self.proc_data_dict[ch_name]
min_sh = np.min(self.proc_data_dict[ch_name])
max_sh = np.max(self.proc_data_dict[ch_name])
self.proc_data_dict['nr_shots'] = len(self.proc_data_dict[ch_name])
base = 2
number_of_experiments = base ** self.nr_of_qubits
combinations = [int2base(
i, base=base, fixed_length=self.nr_of_qubits) for i in
range(number_of_experiments)]
self.proc_data_dict['combinations'] = combinations
for i, comb in enumerate(combinations):
# No post selection implemented yet
self.proc_data_dict['{} {}'.format(ch_name, comb)] = \
self.proc_data_dict[ch_name][i::number_of_experiments]
#####################################
# Binning data into 1D histograms #
#####################################
hist_name = 'hist {} {}'.format(
ch_name, comb)
self.proc_data_dict[hist_name] = np.histogram(
self.proc_data_dict['{} {}'.format(
ch_name, comb)],
bins=nr_bins, range=(min_sh, max_sh))
# Cumulative histograms #
chist_name = 'c'+hist_name
# the cumulative histograms are normalized to ensure the right
# fidelities can be calculated
self.proc_data_dict[chist_name] = np.cumsum(
self.proc_data_dict[hist_name][0])/(
|
np.sum(self.proc_data_dict[hist_name][0])
|
numpy.sum
|
"""Spectral analysis methods for SciPy linear operators."""
from typing import Tuple
from numpy import exp, inner, linspace, log, ndarray, pi, sqrt, zeros, zeros_like
from numpy.linalg import norm
from numpy.random import randn
from scipy.linalg import eigh, eigh_tridiagonal
from scipy.sparse import diags
from scipy.sparse.linalg import LinearOperator, eigsh
def fast_lanczos(
A: LinearOperator, ncv: int, use_eigh_tridiagonal: bool = False
) -> Tuple[ndarray, ndarray]:
"""Lanczos iterations for large-scale problems (no reorthogonalization step).
Algorithm 2 of papyan2020traces.
Args:
A: Symmetric linear operator.
ncv: Number of Lanczos vectors.
use_eigh_tridiagonal: Whether to use eigh_tridiagonal to eigen-decompose the
tri-diagonal matrix. Default: ``False``. Setting this value to ``True``
results in faster eigen-decomposition, but is less stable.
Returns:
Eigenvalues and eigenvectors of the tri-diagonal matrix built up during
Lanczos iterations. ``evecs[:, i]`` is normalized eigenvector of ``evals[i]``.
"""
alphas, betas = zeros(ncv), zeros(ncv - 1)
dim = A.shape[1]
v, v_prev = None, None
for m in range(ncv):
if m == 0:
v = randn(dim)
v /=
|
norm(v)
|
numpy.linalg.norm
|
#!/usr/bin/env python3
# command line program
import argparse
# deepcopy
import copy
# numpy
import numpy as np
import scipy as sp
import scipy.integrate
# test
import matplotlib.pyplot as plt
# internal modules
import libpost
BIN_RANGE = (-40.0, 40.0)#(-10.0, 10.0)
BIN_NB = 128#64#'auto'
def parse():
parser = argparse.ArgumentParser(description='Computes statistics of the lagrangian gradients matrix (computed along particle trajectories)')
return parser.parse_args()
def main_gradients():
print("INFO: Post processing flow velocity gradients statistics sampled by lagrangian objects.", flush=True)
object_names = libpost.get_object_names()
print("INFO: Object names are:", " ".join(object_names), flush=True)
# get gradient matrix
print("INFO: Reading objects properties...", flush=True)
time = libpost.get_time();
objects_j_0_0 = libpost.get_objects_properties(["j_0_0"], object_names)
print("INFO: Done.", flush=True)
print("INFO: Computing f(t) using pdfs...", flush=True)
average_f = {object_name:{"value":np.empty((objects_j_0_0[object_name]["value"].shape[0], 2)), "info":["f", "95CLI"]} for object_name in objects_j_0_0}
average_integral_f = {object_name:{"value":np.empty((objects_j_0_0[object_name]["value"].shape[0], 2)), "info":["integral_f", "95CLI"]} for object_name in objects_j_0_0}
average_tau = {object_name:{"value":np.empty(2), "info":["tau", "95CLI"]} for object_name in objects_j_0_0}
fig = plt.figure() # PLT
fig.add_axes(plt.gca()) # PLT
ax = fig.get_axes()[0] # PLT
ax.set_aspect('equal', 'box') # PLT
for object_name in objects_j_0_0:
print("INFO: Processing " + object_name + "...", flush=True)
# init
average_f[object_name]["value"][0, 0] = 1.0
average_f[object_name]["value"][0, 1] = 0.0
average_integral_f[object_name]["value"][0, 0] = 0.0
average_integral_f[object_name]["value"][0, 1] = 0.0
for k in range(1, objects_j_0_0[object_name]["value"].shape[0]):
j_t = objects_j_0_0[object_name]["value"][:-k, :].flatten()
j_0 = objects_j_0_0[object_name]["value"][k:, :].flatten()
pdf, edges = np.histogramdd((
j_0,
j_t
), bins=(np.histogram_bin_edges(j_0, bins=BIN_NB, range=BIN_RANGE), np.histogram_bin_edges(j_t, bins=BIN_NB, range=BIN_RANGE)), density=True)
if k < 64: # PLT
ax.clear() # PLT
ax.pcolormesh(edges[0], edges[1], pdf) # PLT
fig.savefig("pdf_{0}_{1:03d}.png".format(object_name, k)) # PLT
# compute f
## remove zeros
sum__j_t__p_j_t_j_0 = np.sum(pdf * 0.5 * (edges[1][1:] + edges[1][:-1]) * np.diff(edges[1]), axis=1)
j_0_p_j_0 = np.sum(pdf * np.diff(edges[1]), axis=1) * 0.5 * (edges[0][1:] + edges[0][:-1])
mask = (j_0_p_j_0 != 0.0)
f_value = sum__j_t__p_j_t_j_0[mask] / j_0_p_j_0[mask]
## computation
average_f[object_name]["value"][k, 0] = np.average(f_value)
average_f[object_name]["value"][k, 1] = 1.96 * np.std(f_value) / np.sqrt(f_value.size)
# compute integral f
average_integral_f[object_name]["value"][0, 0] = 0.0
average_integral_f[object_name]["value"][1:, 0] = sp.integrate.cumtrapz(average_f[object_name]["value"][:, 0], x=time, axis=0)
average_integral_f[object_name]["value"][:, 1] = -1.0
# compute tau
average_tau[object_name]["value"][0] = np.trapz(average_integral_f[object_name]["value"][:, 0], x=time, axis=0) / time[-1]
average_tau[object_name]["value"][1] = -1.0
print("INFO: " + object_name + " done.", flush=True)
print("INFO: Done.", flush=True)
# save
print("INFO: Saving...", flush=True)
libpost.savet(time, average_f, "f_0_0")
libpost.savet(time, average_integral_f, "integral_f_0_0")
libpost.save(average_tau, "tau_f_0_0")
print("INFO: Done.", flush=True)
def main_velocity():
print("INFO: Post processing flow velocity statistics sampled by lagrangian objects.", flush=True)
object_names = libpost.get_object_names()
print("INFO: Object names are:", " ".join(object_names), flush=True)
# get gradient matrix
print("INFO: Reading objects properties...", flush=True)
time = libpost.get_time();
# gradients
objects_j_0_0 = libpost.get_objects_properties(["j_0_0"], object_names)
# objects_j_0_1 = libpost.get_objects_properties(["j_0_1"], object_names)
# objects_j_0_2 = libpost.get_objects_properties(["j_0_2"], object_names)
# objects_j_1_0 = libpost.get_objects_properties(["j_1_0"], object_names)
# objects_j_1_1 = libpost.get_objects_properties(["j_1_1"], object_names)
# objects_j_1_2 = libpost.get_objects_properties(["j_1_2"], object_names)
# objects_j_2_0 = libpost.get_objects_properties(["j_2_0"], object_names)
# objects_j_2_1 = libpost.get_objects_properties(["j_2_1"], object_names)
# objects_j_2_2 = libpost.get_objects_properties(["j_2_2"], object_names)
# velocity
objects_u_0 = libpost.get_objects_properties(["u_0"], object_names)
# objects_u_1 = libpost.get_objects_properties(["u_1"], object_names)
# objects_u_2 = libpost.get_objects_properties(["u_2"], object_names)
# position
objects_pos_0 = libpost.get_objects_properties(["pos_0"], object_names)
# objects_pos_1 = libpost.get_objects_properties(["pos_1"], object_names)
# objects_pos_2 = libpost.get_objects_properties(["pos_2"], object_names)
print("INFO: Done.", flush=True)
# print("INFO: Reconstructing gradients, velocity and position.", flush=True)
# gradients_value = {}
# velocity_value = {}
# position_value = {}
# for object_name in objects_j_0_0:
# # gradients
# gradients_value[object_name] = np.empty((objects_j_0_0[object_name]["value"].shape[0], objects_j_0_0[object_name]["value"].shape[1], 3, 3))
# gradients_value[object_name][:, :, 0, 0] = objects_j_0_0[object_name]["value"]
# gradients_value[object_name][:, :, 0, 1] = objects_j_0_1[object_name]["value"]
# gradients_value[object_name][:, :, 0, 2] = objects_j_0_2[object_name]["value"]
# gradients_value[object_name][:, :, 1, 0] = objects_j_1_0[object_name]["value"]
# gradients_value[object_name][:, :, 1, 1] = objects_j_1_1[object_name]["value"]
# gradients_value[object_name][:, :, 1, 2] = objects_j_1_2[object_name]["value"]
# gradients_value[object_name][:, :, 2, 0] = objects_j_2_0[object_name]["value"]
# gradients_value[object_name][:, :, 2, 1] = objects_j_2_1[object_name]["value"]
# gradients_value[object_name][:, :, 2, 2] = objects_j_2_2[object_name]["value"]
# # velocity
# velocity_value[object_name] = np.empty((objects_u_0[object_name]["value"].shape[0], objects_u_0[object_name]["value"].shape[1], 3))
# velocity_value[object_name][:, :, 0] = np.empty((objects_u_0[object_name]["value"].shape[0], objects_u_0[object_name]["value"].shape[1], 3))
# velocity_value[object_name][:, :, 1] = np.empty((objects_u_1[object_name]["value"].shape[0], objects_u_1[object_name]["value"].shape[1], 3))
# velocity_value[object_name][:, :, 2] = np.empty((objects_u_2[object_name]["value"].shape[0], objects_u_2[object_name]["value"].shape[1], 3))
# # position
# position_value[object_name] = np.empty((objects_pos_0[object_name]["value"].shape[0], objects_pos_0[object_name]["value"].shape[1], 3))
# position_value[object_name][:, :, 0] = np.empty((objects_pos_0[object_name]["value"].shape[0], objects_pos_0[object_name]["value"].shape[1], 3))
# position_value[object_name][:, :, 1] = np.empty((objects_pos_1[object_name]["value"].shape[0], objects_pos_1[object_name]["value"].shape[1], 3))
# position_value[object_name][:, :, 2] = np.empty((objects_pos_2[object_name]["value"].shape[0], objects_pos_2[object_name]["value"].shape[1], 3))
# print("INFO: Done.", flush=True)
print("INFO: Computing f(t) using pdfs...", flush=True)
average_f = {object_name:{"value":np.empty((objects_j_0_0[object_name]["value"].shape[0], 2)), "info":["f", "95CLI"]} for object_name in objects_j_0_0}
average_integral_f = {object_name:{"value":np.empty((objects_j_0_0[object_name]["value"].shape[0], 2)), "info":["integral_f", "95CLI"]} for object_name in objects_j_0_0}
average_tau = {object_name:{"value":np.empty(2), "info":["tau", "95CLI"]} for object_name in objects_j_0_0}
for object_name in objects_j_0_0:
print("INFO: Processing " + object_name + "...", flush=True)
# init
average_f[object_name]["value"][0, 0] = 1.0
average_f[object_name]["value"][0, 1] = 0.0
average_integral_f[object_name]["value"][0, 0] = 0.0
average_integral_f[object_name]["value"][0, 1] = 0.0
for k in range(1, objects_j_0_0[object_name]["value"].shape[0]):
u_t = np.concatenate((objects_u_0[object_name]["value"][:-k, :].flatten(), objects_u_0[object_name]["value"][k:, :].flatten()))
x_t = np.concatenate((objects_pos_0[object_name]["value"][:-k, :].flatten(), objects_pos_0[object_name]["value"][k:, :].flatten()))
j_0 = np.concatenate((objects_j_0_0[object_name]["value"][k:, :].flatten(), objects_j_0_0[object_name]["value"][:-k, :].flatten()))
pdf_j_u, edges_j_u = np.histogramdd((
j_0,
u_t
), bins=(
|
np.histogram_bin_edges(j_0, bins=BIN_NB)
|
numpy.histogram_bin_edges
|
# -*- coding: utf-8 -*-
#
# This file is part of the pyFDA project hosted at https://github.com/chipmuenk/pyfda
#
# Copyright © pyFDA Project Contributors
# Licensed under the terms of the MIT License
# (see file LICENSE in root directory for details)
"""
Widget for plotting poles and zeros
"""
from __future__ import print_function, division, unicode_literals, absolute_import
import logging
logger = logging.getLogger(__name__)
from ..compat import (QWidget, QLabel, QCheckBox, QFrame, QDial, QHBoxLayout,
pyqtSlot, pyqtSignal)
import numpy as np
import scipy.signal as sig
import pyfda.filterbroker as fb
from pyfda.pyfda_rc import params
from pyfda.pyfda_lib import unique_roots
from pyfda.plot_widgets.mpl_widget import MplWidget
from matplotlib import patches
class Plot_PZ(QWidget):
# incoming, connected in sender widget (locally connected to self.process_sig_rx() )
sig_rx = pyqtSignal(object)
def __init__(self, parent):
super(Plot_PZ, self).__init__(parent)
self.needs_draw = True # flag whether plot needs to be updated
self.needs_redraw = True # flag whether plot needs to be redrawn
self.tool_tip = "Pole / zero plan"
self.tab_label = "P / Z"
self._construct_UI()
def _construct_UI(self):
"""
Intitialize the widget, consisting of:
- Matplotlib widget with NavigationToolbar
- Frame with control elements
"""
self.chkHf = QCheckBox("Show |H(f)|", self)
self.chkHf.setToolTip("<span>Display |H(f)| around unit circle.</span>")
self.chkHf.setEnabled(True)
self.chkHfLog = QCheckBox("Log. Scale", self)
self.chkHfLog.setToolTip("<span>Log. scale for |H(f)|.</span>")
self.chkHfLog.setEnabled(True)
self.diaRad_Hf = QDial(self)
self.diaRad_Hf.setRange(2., 10.)
self.diaRad_Hf.setValue(2)
self.diaRad_Hf.setTracking(False) # produce less events when turning
self.diaRad_Hf.setFixedHeight(30)
self.diaRad_Hf.setFixedWidth(30)
self.diaRad_Hf.setWrapping(False)
self.diaRad_Hf.setToolTip("<span>Set max. radius for |H(f)| plot.</span>")
self.lblRad_Hf = QLabel("Radius", self)
self.chkFIR_P = QCheckBox("Plot FIR Poles", self)
self.chkFIR_P.setToolTip("<span>Show FIR poles at the origin.</span>")
self.chkFIR_P.setChecked(True)
layHControls = QHBoxLayout()
layHControls.addWidget(self.chkHf)
layHControls.addWidget(self.chkHfLog)
layHControls.addWidget(self.diaRad_Hf)
layHControls.addWidget(self.lblRad_Hf)
layHControls.addStretch(10)
layHControls.addWidget(self.chkFIR_P)
#----------------------------------------------------------------------
# ### frmControls ###
#
# This widget encompasses all control subwidgets
#----------------------------------------------------------------------
self.frmControls = QFrame(self)
self.frmControls.setObjectName("frmControls")
self.frmControls.setLayout(layHControls)
#----------------------------------------------------------------------
# ### mplwidget ###
#
# main widget, encompassing the other widgets
#----------------------------------------------------------------------
self.mplwidget = MplWidget(self)
self.mplwidget.layVMainMpl.addWidget(self.frmControls)
self.mplwidget.layVMainMpl.setContentsMargins(*params['wdg_margins'])
self.setLayout(self.mplwidget.layVMainMpl)
self.init_axes()
self.draw() # calculate and draw poles and zeros
#----------------------------------------------------------------------
# GLOBAL SIGNALS & SLOTs
#----------------------------------------------------------------------
self.sig_rx.connect(self.process_sig_rx)
#----------------------------------------------------------------------
# LOCAL SIGNALS & SLOTs
#----------------------------------------------------------------------
self.mplwidget.mplToolbar.sig_tx.connect(self.process_sig_rx)
self.chkHf.clicked.connect(self.draw)
self.chkHfLog.clicked.connect(self.draw)
self.diaRad_Hf.valueChanged.connect(self.draw)
self.chkFIR_P.clicked.connect(self.draw)
#------------------------------------------------------------------------------
def process_sig_rx(self, dict_sig=None):
"""
Process signals coming from the navigation toolbar and from sig_rx
"""
logger.debug("Processing {0} | needs_draw = {1}, visible = {2}"\
.format(dict_sig, self.needs_draw, self.isVisible()))
if self.isVisible():
if 'data_changed' in dict_sig or 'home' in dict_sig or self.needs_draw:
self.draw()
self.needs_draw = False
self.needs_redraw = False
elif 'ui_changed' in dict_sig and dict_sig['ui_changed'] == 'resized'\
or self.needs_redraw:
self.redraw()
self.needs_redraw = False
elif 'view_changed' in dict_sig:
self.update_view()
else:
if 'data_changed' in dict_sig or 'view_changed' in dict_sig:
self.needs_draw = True
elif 'ui_changed' in dict_sig and dict_sig['ui_changed'] == 'resized':
self.needs_redraw = True
#------------------------------------------------------------------------------
def init_axes(self):
"""
Initialize and clear the axes
"""
if self.chkHf.isChecked():
self.ax = self.mplwidget.fig.add_subplot(111)
else:
self.ax = self.mplwidget.fig.add_subplot(111)
self.ax.clear()
self.ax.get_xaxis().tick_bottom() # remove axis ticks on top
self.ax.get_yaxis().tick_left() # remove axis ticks right
#------------------------------------------------------------------------------
def update_view(self):
"""
Draw the figure with new limits, scale etcs without recalculating H(f)
-- not yet implemented, just use draw() for the moment
"""
self.draw()
#------------------------------------------------------------------------------
def draw(self):
self.chkFIR_P.setVisible(fb.fil[0]['ft']=='FIR')
self.init_axes()
self.draw_pz()
#------------------------------------------------------------------------------
def draw_pz(self):
"""
(re)draw P/Z plot
"""
p_marker = params['P_Marker']
z_marker = params['Z_Marker']
zpk = fb.fil[0]['zpk']
# add antiCausals if they exist (must take reciprocal to plot)
if 'rpk' in fb.fil[0]:
zA = fb.fil[0]['zpk'][0]
zA = np.conj(1./zA)
pA = fb.fil[0]['zpk'][1]
pA = np.conj(1./pA)
zC = np.append(zpk[0],zA)
pC = np.append(zpk[1],pA)
zpk[0] = zC
zpk[1] = pC
self.ax.clear()
[z,p,k] = self.zplane(z = zpk[0], p = zpk[1], k = zpk[2], plt_ax = self.ax,
plt_poles=self.chkFIR_P.isChecked() or fb.fil[0]['ft'] == 'IIR',
mps = p_marker[0], mpc = p_marker[1], mzs = z_marker[0], mzc = z_marker[1])
self.ax.set_title(r'Pole / Zero Plot')
self.ax.set_xlabel('Real axis')
self.ax.set_ylabel('Imaginary axis')
self.draw_Hf(r = self.diaRad_Hf.value())
self.redraw()
#------------------------------------------------------------------------------
def redraw(self):
"""
Redraw the canvas when e.g. the canvas size has changed
"""
self.mplwidget.redraw()
#------------------------------------------------------------------------------
def zplane(self, b=None, a=1, z=None, p=None, k=1, pn_eps=1e-3, analog=False,
plt_ax = None, plt_poles=True, style='square', anaCircleRad=0, lw=2,
mps = 10, mzs = 10, mpc = 'r', mzc = 'b', plabel = '', zlabel = ''):
"""
Plot the poles and zeros in the complex z-plane either from the
coefficients (`b,`a) of a discrete transfer function `H`(`z`) (zpk = False)
or directly from the zeros and poles (z,p) (zpk = True).
When only b is given, an FIR filter with all poles at the origin is assumed.
Parameters
----------
b : array_like
Numerator coefficients (transversal part of filter)
When b is not None, poles and zeros are determined from the coefficients
b and a
a : array_like (optional, default = 1 for FIR-filter)
Denominator coefficients (recursive part of filter)
z : array_like, default = None
Zeros
When b is None, poles and zeros are taken directly from z and p
p : array_like, default = None
Poles
analog : boolean (default: False)
When True, create a P/Z plot suitable for the s-plane, i.e. suppress
the unit circle (unless anaCircleRad > 0) and scale the plot for
a good display of all poles and zeros.
pn_eps : float (default : 1e-2)
Tolerance for separating close poles or zeros
plt_ax : handle to axes for plotting (default: None)
When no axes is specified, the current axes is determined via plt.gca()
plt_poles : Boolean (default : True)
Plot poles. This can be used to suppress poles for FIR systems
where all poles are at the origin.
style : string (default: 'square')
Style of the plot, for style == 'square' make scale of x- and y-
axis equal.
mps : integer (default: 10)
Size for pole marker
mzs : integer (default: 10)
Size for zero marker
mpc : char (default: 'r')
Pole marker colour
mzc : char (default: 'b')
Zero marker colour
lw : integer (default: 2)
Linewidth for unit circle
plabel, zlabel : string (default: '')
This string is passed to the plot command for poles and zeros and
can be displayed by legend()
Returns
-------
z, p, k : ndarray
Notes
-----
"""
# TODO:
# - polar option
# - add keywords for color of circle -> **kwargs
# - add option for multi-dimensional arrays and zpk data
# make sure that all inputs are arrays
b = np.atleast_1d(b)
a = np.atleast_1d(a)
z = np.atleast_1d(z) # make sure that p, z are arrays
p = np.atleast_1d(p)
if b.any(): # coefficients were specified
if len(b) < 2 and len(a) < 2:
logger.error('No proper filter coefficients: both b and a are scalars!')
return z, p, k
# The coefficients are less than 1, normalize the coefficients
if np.max(b) > 1:
kn = np.max(b)
b = b / float(kn)
else:
kn = 1.
if
|
np.max(a)
|
numpy.max
|
import copy
import itertools
import time
import numpy as np
import pandas as pd
import pybnb
import scipy.sparse as sp
from pysat.examples.rc2 import RC2
from pysat.formula import WCNF
import scphylo as scp
rec_num = 0
def bnb(df_input, bounding, time_limit=86400):
"""Solving using PhISCS-BnB.
PhISCS-BnB: a fast branch and bound algorithm for the perfect tumor phylogeny
reconstruction problem
:cite:`PhISCS-BnB`.
Parameters
----------
df_input : :class:`pandas.DataFrame`
Input genotype matrix in which rows are cells and columns are mutations.
Values inside this matrix show the presence (1), absence (0) and missing
entires (3).
bounding : :obj:`str`
The bounding strategy {'simulated', 'real'}
time_limit : :obj:`int`, optional
Time limit of the BnB core running in seconds, by default 86400 (one day)
Returns
-------
:class:`pandas.DataFrame`
A conflict-free matrix in which rows are cells and columns are mutations.
Values inside this matrix show the presence (1) and absence (0).
"""
_, mpi4py_is_not_imporeted = scp.ul.import_mpi4py()
if mpi4py_is_not_imporeted:
scp.logg.error("Unable to import a package!")
if bounding not in ["simulated", "real"]:
scp.logg.error("Wrong choice of bounding!")
scp.logg.info(f"running BnB with bounding={bounding}")
matrix_input = df_input.values
matrix_output = matrix_input.copy()
na_value = 3
bounding_algs = {
"real": TwoSatBounding(
heuristic_setting=None,
n_levels=2,
compact_formulation=False,
na_value=na_value,
), # Real Data
"simulated": TwoSatBounding(
heuristic_setting=[True, True, False, True, True],
n_levels=1,
compact_formulation=True,
na_value=na_value,
), # Simulation
}
bounding_alg = bounding_algs[bounding]
s_time = time.time()
flips = solve_by_BnB(matrix_input, na_value, bounding_alg, time_limit)
e_time = time.time()
running_time = e_time - s_time
for k in flips:
matrix_output[k] = 1
matrix_output[np.where(matrix_output == na_value)] = 0
df_output = pd.DataFrame(matrix_output)
df_output.columns = df_input.columns
df_output.index = df_input.index
df_output.index.name = "cellIDxmutID"
scp.ul.stat(df_input, df_output, 0.1, 0.000000001, running_time)
return df_output
def solve_by_BnB(matrix_in, na_value, bounding_alg, time_limit):
result = bnb_solve(
matrix_in,
bounding_algorithm=bounding_alg,
na_value=na_value,
time_limit=time_limit,
)
matrix_output = result[0]
flips = []
zero_one_flips = np.where((matrix_in != matrix_output) & (matrix_in != na_value))
for i in range(len(zero_one_flips[0])):
flips.append((zero_one_flips[0][i], zero_one_flips[1][i]))
na_one_flips = np.where((matrix_output == 1) & (matrix_in == na_value))
for i in range(len(na_one_flips[0])):
flips.append((na_one_flips[0][i], na_one_flips[1][i]))
return flips
def all_None(*args):
return args.count(None) == len(args)
def calculate_column_intersections(matrix, for_loop=False, row_by_row=False):
ret = np.empty((matrix.shape[1], matrix.shape[1]), dtype=np.bool)
mask_1 = matrix == 1
if for_loop:
for p in range(matrix.shape[1]):
# even though the diagonals are not necessary, I keep it for ease of
# debugging
for q in range(p, matrix.shape[1]):
ret[p, q] = np.any(np.logical_and(mask_1[:, p], mask_1[:, q]))
ret[q, p] = ret[p, q]
elif row_by_row:
ret[:, :] = 0
for r in range(matrix.shape[0]):
one_columns = mask_1[r]
ret[np.ix_(one_columns, one_columns)] = True
return ret
def zero_or_na(vec, na_value=-1):
return np.logical_or(vec == 0, vec == na_value)
def make_sure_variable_exists(
memory_matrix, row, col, num_var_F, map_f2ij, var_list, na_value
):
if memory_matrix[row, col] < 0:
num_var_F += 1
map_f2ij[num_var_F] = (row, col)
memory_matrix[row, col] = num_var_F
var_list.append(num_var_F)
return num_var_F
def get_effective_matrix(I_mtr, delta01, delta_na_to_1, change_na_to_0=False):
x = np.array(I_mtr + delta01, dtype=np.int8)
if delta_na_to_1 is not None:
na_indices = delta_na_to_1.nonzero()
x[na_indices] = 1 # should have been (but does not accept):
# x[na_indices] = delta_na_to_1[na_indices]
if change_na_to_0:
x[np.logical_and(x != 0, x != 1)] = 0
return x
def make_twosat_model_from_np(
constraints,
F,
zero_vars,
na_vars,
eps=None,
heuristic_setting=None,
compact_formulation=True,
):
if eps is None:
eps = 1 / (len(zero_vars) + len(na_vars))
if heuristic_setting is None:
rc2 = RC2(WCNF())
else:
assert len(heuristic_setting) == 5
rc2 = RC2(
WCNF(),
adapt=heuristic_setting[0],
exhaust=heuristic_setting[1],
incr=heuristic_setting[2],
minz=heuristic_setting[3],
trim=heuristic_setting[4],
)
if not compact_formulation:
# hard constraints Z_a,p or Z_b,q
for constr_ind in range(constraints[0].shape[0]):
constraint = constraints[0][constr_ind]
a, p, b, q = constraint.flat
# print(constraint, F.shape)
# print(a, p, b, q)
rc2.add_clause([F[a, p], F[b, q]])
if len(constraints) >= 2:
# hard constraints Z_a,p or Z_b,q or -Z_c,d
for constr_ind in range(constraints[1].shape[0]):
constraint = constraints[1][constr_ind]
a, p, b, q, c, d = constraint.flat
# print(a, p, b, q, c, d)
rc2.add_clause([F[a, p], F[b, q], -F[c, d]])
else:
# hard constraints Z_a,p or (sign) b_pq
for constr_ind in range(constraints[0].shape[0]):
constraint = constraints[0][constr_ind]
row, col, b_pq, sign = constraint.flat
rc2.add_clause([F[row, col], sign * b_pq])
if len(constraints) >= 2:
# hard constraints Z_a,p or Z_b,q or -Z_c,d
for constr_ind in range(constraints[1].shape[0]):
constraint = constraints[1][constr_ind]
row, col, c_pq0, c_pq1 = constraint.flat
# if Z_rc is True at least one of p, q should become active
# E.g., c_pq0 be False
rc2.add_clause([-F[row, col], -c_pq0, -c_pq1])
# if c_pq0 is False then Z_rc has to be flipped
rc2.add_clause([F[row, col], c_pq0])
# soft constraints for zero variables
for var in zero_vars:
rc2.add_clause([-var], weight=1)
if eps > 0:
# soft constraints for zero variables
for var in na_vars:
rc2.add_clause([-var], weight=eps)
return rc2
def twosat_solver(
matrix,
cluster_rows=False,
cluster_cols=False,
only_descendant_rows=False,
na_value=None,
leave_nas_if_zero=False,
return_lb=False,
heuristic_setting=None,
n_levels=2,
eps=0,
compact_formulation=False,
):
global rec_num
rec_num += 1
assert not cluster_rows, "Not implemented yet"
assert not cluster_cols, "Not implemented yet"
assert not only_descendant_rows, "Not implemented yet"
model_time = 0
opt_time = 0
start_time = time.time()
return_value = make_constraints_np_matrix(
matrix,
n_levels=n_levels,
na_value=na_value,
compact_formulation=compact_formulation,
)
model_time += time.time() - start_time
F, map_f2ij, zero_vars, na_vars, hard_constraints, col_pair = (
return_value.F,
return_value.map_f2ij,
return_value.zero_vars,
return_value.na_vars,
return_value.hard_constraints,
return_value.col_pair,
)
if col_pair is not None:
icf = False
elif return_value.complete_version:
icf = True
else:
icf = None
final_output = None
lower_bound = 0
if icf:
final_output, _ = matrix.copy(), 0
else:
start_time = time.time()
rc2 = make_twosat_model_from_np(
hard_constraints,
F,
zero_vars,
na_vars,
eps,
heuristic_setting,
compact_formulation=compact_formulation,
)
model_time += time.time() - start_time
a = time.time()
variables = rc2.compute()
b = time.time()
opt_time += b - a
output_matrix = matrix.copy()
output_matrix = output_matrix.astype(np.int8)
for var_ind in range(len(variables)):
if (
0 < variables[var_ind] and variables[var_ind] in map_f2ij
): # if 0 or 2 make it one
output_matrix[map_f2ij[variables[var_ind]]] = 1
if matrix[map_f2ij[variables[var_ind]]] != na_value:
lower_bound += 1
# I don't change 2s to 0s here keep them 2 for next time
# For recursion I set off all sparsification parameters
# Also I want na->0 to stay na for the recursion regardless of original
# input for leave_nas_if_zero
# I am also not passing eps here to wrap up the recursion soon
Orec, rec_model_time, rec_opt_time = twosat_solver(
output_matrix,
na_value=na_value,
heuristic_setting=None,
n_levels=n_levels,
leave_nas_if_zero=True,
compact_formulation=compact_formulation,
)
model_time += rec_model_time
opt_time += rec_opt_time
if not leave_nas_if_zero:
Orec[Orec == na_value] = 0
final_output = Orec
if return_lb:
return final_output, model_time, opt_time, lower_bound
else:
return final_output, model_time, opt_time
def make_constraints_np_matrix(
matrix,
constraints=None,
n_levels=2,
na_value=None,
row_coloring=None,
col_coloring=None,
probability_threshold=None,
fn_rate=None,
column_intersection=None,
compact_formulation=True,
):
"""
Return a "C x 2 x 2" matrix where C is the number of extracted constraints.
Each constraints is of the form:
((r1, c1), (r2, c2)) and correspond to Z_{r1, c1} or Z{r2, c2}
:param matrix: A binary matrix cellsXmutations
:param constraints: If not None instead of evaluating the whole matrix it will
only look at potential constraints
:param level: The type of constraints to add
:param na_value:
:param row_coloring: Only constraints that has the same row coloring will be used
:param col_coloring: Only constraints that has the same column coloring will be used
:param probability_threshold:
:param fn_rate:
:return:
"""
# TD: Take decendence analysis out of here?
# TD: how to reuse constraints input
from collections import namedtuple
assert (probability_threshold is None) == (fn_rate is None)
# descendance_analysis = probability_threshold is not None
assert 1 <= n_levels <= 2, "not implemented yet"
# means none of scarification ideas have been used
complete_version = all_None(
row_coloring, col_coloring, probability_threshold, fn_rate
)
# soft_cnst_num = 0
hard_constraints = [[] for _ in range(n_levels)] # an empty list each level
# if descendance_analysis:
# # dictionary for lazy calculation of decadence:
# descendent_dict = dict()
# variables for each zero
F = -np.ones(matrix.shape, dtype=np.int64)
num_var_F = 0
map_f2ij = {}
zero_vars = []
na_vars = []
if compact_formulation:
B_vars_offset = matrix.shape[0] * matrix.shape[1] + 1
num_var_B = 0
# map_b2ij = dict()
if n_levels >= 2:
C_vars_offset = B_vars_offset + matrix.shape[1] * matrix.shape[1] + 1
num_var_C = 0
# map_c2ij = dict()
col_pair = None
pair_cost = 0
if column_intersection is None:
column_intersection = calculate_column_intersections(matrix, row_by_row=True)
# column_intersection = calculate_column_intersections(matrix, for_loop=True)
for p in range(matrix.shape[1]):
for q in range(p + 1, matrix.shape[1]):
if column_intersection[p, q]: # p and q has intersection
# TD: check col_coloring here
r01 = np.nonzero(
np.logical_and(
zero_or_na(matrix[:, p], na_value=na_value), matrix[:, q] == 1
)
)[0]
r10 = np.nonzero(
np.logical_and(
matrix[:, p] == 1, zero_or_na(matrix[:, q], na_value=na_value)
)
)[0]
cost = min(len(r01), len(r10))
if cost > pair_cost: # keep best pair to return as auxiliary info
col_pair = (p, q)
pair_cost = cost
if cost > 0: # don't do anything if one of r01 or r10 is empty
if (
not compact_formulation
): # len(r01) * len(r10) many constraints will be added
for a, b in itertools.product(r01, r10):
# TD: check row_coloring
for row, col in [
(a, p),
(b, q),
]: # make sure the variables for this are made
var_list = (
zero_vars if matrix[row, col] == 0 else na_vars
)
num_var_F = make_sure_variable_exists(
F, row, col, num_var_F, map_f2ij, var_list, na_value
)
hard_constraints[0].append(
[[a, p], [b, q]]
) # at least one of them should be flipped
else: # compact formulation: (r01 + r10) number of new constraints
# will be added define new B variable
b_pq = B_vars_offset + num_var_B
num_var_B += 1
for row_list, col, sign in zip((r01, r10), (p, q), (1, -1)):
for row in row_list:
var_list = (
zero_vars if matrix[row, col] == 0 else na_vars
)
num_var_F = make_sure_variable_exists(
F, row, col, num_var_F, map_f2ij, var_list, na_value
)
hard_constraints[0].append([row, col, b_pq, sign])
# this will be translated to (Z_ap or (sign)B_pq)
elif n_levels >= 2:
r01 = np.nonzero(
np.logical_and(
zero_or_na(matrix[:, p], na_value=na_value), matrix[:, q] == 1
)
)[0]
r10 = np.nonzero(
np.logical_and(
matrix[:, p] == 1, zero_or_na(matrix[:, q], na_value=na_value)
)
)[0]
cost = min(len(r01), len(r10))
if cost > 0: # don't do anything if one of r01 or r10 is empty
if not compact_formulation:
# len(r01) * len(r10) * (len(r01) * len(r10)) many constraints
# will be added
x = np.empty((r01.shape[0] + r10.shape[0], 2), dtype=np.int)
x[: len(r01), 0] = r01
x[: len(r01), 1] = p
x[-len(r10) :, 0] = r10 # noqa
x[-len(r10) :, 1] = q # noqa
for a, b, ind in itertools.product(r01, r10, range(x.shape[0])):
for row, col in [
(a, p),
(b, q),
(x[ind, 0], x[ind, 1]),
]: # make sure the variables for this are made
# print(row, col)
var_list = (
zero_vars if matrix[row, col] == 0 else na_vars
)
num_var_F = make_sure_variable_exists(
F, row, col, num_var_F, map_f2ij, var_list, na_value
)
row = [[a, p], [b, q], [x[ind, 0], x[ind, 1]]]
if not np.array_equal(
row[0], row[2]
) and not np.array_equal(row[1], row[2]):
hard_constraints[1].append(
[[a, p], [b, q], [x[ind, 0], x[ind, 1]]]
)
else: # if compact_formulation: 2(r01 + r10) will be added
# define two new C variable
c_pq0 = C_vars_offset + num_var_C
num_var_C += 1
c_pq1 = C_vars_offset + num_var_C
num_var_C += 1
for row_list, col, sign in zip((r01, r10), (p, q), (1, -1)):
for row in row_list:
var_list = (
zero_vars if matrix[row, col] == 0 else na_vars
)
num_var_F = make_sure_variable_exists(
F, row, col, num_var_F, map_f2ij, var_list, na_value
)
if sign == 1:
hard_constraints[1].append([row, col, c_pq0, c_pq1])
# this will be translated to
# (~Z_ap or ~c_pq0 or ~c_pq1) and (Z_ap or c_pq0)
else:
hard_constraints[1].append([row, col, c_pq1, c_pq0])
# this will be translated to
# (~Z_ap or ~c_pq0 or ~c_pq1) (the same)
# and (Z_ap or c_pq1) (different)
# TD: when using this make sure to put an if to say if the model is small and
return_type = namedtuple(
"ReturnType",
"F map_f2ij zero_vars na_vars hard_constraints col_pair complete_version",
)
for ind in range(n_levels):
hard_constraints[ind] = np.array(hard_constraints[ind], dtype=np.int)
return return_type(
F, map_f2ij, zero_vars, na_vars, hard_constraints, col_pair, complete_version
)
def is_conflict_free_gusfield_and_get_two_columns_in_coflicts(I_mtr, na_value):
def sort_bin(a):
b = np.transpose(a)
b_view = np.ascontiguousarray(b).view(
np.dtype((np.void, b.dtype.itemsize * b.shape[1]))
)
idx = np.argsort(b_view.ravel())[::-1]
c = b[idx]
return np.transpose(c), idx
Ip = I_mtr.copy()
Ip[Ip == na_value] = 0
O, idx = sort_bin(Ip)
# TD: delete duplicate columns
# print(O, '\n')
Lij = np.zeros(O.shape, dtype=int)
for i in range(O.shape[0]):
maxK = 0
for j in range(O.shape[1]):
if O[i, j] == 1:
Lij[i, j] = maxK
maxK = j + 1
# print(Lij, '\n')
Lj = np.amax(Lij, axis=0)
# print(Lj, '\n')
for i in range(O.shape[0]):
for j in range(O.shape[1]):
if O[i, j] == 1:
if Lij[i, j] != Lj[j]:
return False, (idx[j], idx[Lj[j] - 1])
return True, (None, None)
class BoundingAlgAbstract:
def __init__(self):
"""[summary]."""
self.matrix = None
self._extra_info = None
self._extraInfo = {}
self._times = {}
self.na_support = False
def reset(self, matrix):
scp.logg.error("The method not implemented")
def get_bound(self, delta):
"""
Include the flips done so far too.
delta: a sparse matrix with flipped ones
"""
scp.logg.error("The method not implemented")
def get_name(self):
return type(self).__name__
def get_state(self):
return None
def set_state(self, state):
assert state is None
def get_extra_info(self):
"""
Provide extra information after calling bounding.
E.g.,
return {"icf":True, "bestPair":(a,b)}
"""
return copy.copy(self._extraInfo)
def get_priority(self, till_here, this_step, after_here, icf=False):
return -after_here
def get_times(self):
return self._times
def get_init_node(self):
return None
class TwoSatBounding(BoundingAlgAbstract):
def __init__(
self,
priority_version=-1,
cluster_rows=False,
cluster_cols=False,
only_descendant_rows=False,
na_value=None,
heuristic_setting=None,
n_levels=2,
eps=0,
compact_formulation=False,
):
"""
TwoSatBounding.
:param priority_version:
"""
super().__init__()
assert not cluster_rows, "Not implemented yet"
assert not cluster_cols, "Not implemented yet"
assert not only_descendant_rows, "Not implemented yet"
self.priority_version = priority_version
self.na_support = True
self.na_value = na_value
self.matrix = None
self._times = None
self.next_lb = None
self.heuristic_setting = heuristic_setting
self.n_levels = n_levels
self.eps = eps # only for upperbound
self.compact_formulation = compact_formulation
self.cluster_rows = cluster_rows
self.cluster_cols = cluster_cols
self.only_descendant_rows = only_descendant_rows
def get_name(self):
params = [
type(self).__name__,
self.priority_version,
self.heuristic_setting,
self.n_levels,
self.eps,
self.compact_formulation,
]
params_str = map(str, params)
return "_".join(params_str)
def reset(self, matrix):
self.matrix = matrix # TD: make the model here and do small alterations later
# self.na_value = infer_na_value(matrix)
self._times = {"model_preparation_time": 0, "optimization_time": 0}
def get_init_node(self):
node = pybnb.Node()
solution, model_time, opt_time, lb = twosat_solver(
self.matrix,
cluster_rows=self.cluster_rows,
cluster_cols=self.cluster_cols,
only_descendant_rows=self.only_descendant_rows,
na_value=self.na_value,
leave_nas_if_zero=True,
return_lb=True,
heuristic_setting=None,
n_levels=self.n_levels,
eps=self.eps,
compact_formulation=self.compact_formulation,
)
self._times["model_preparation_time"] += model_time
self._times["optimization_time"] += opt_time
nodedelta = sp.lil_matrix(np.logical_and(solution == 1, self.matrix == 0))
node_na_delta = sp.lil_matrix(
np.logical_and(solution == 1, self.matrix == self.na_value)
)
node.state = (
nodedelta,
True,
None,
nodedelta.count_nonzero(),
self.get_state(),
node_na_delta,
)
node.queue_priority = self.get_priority(
till_here=-1, this_step=-1, after_here=-1, icf=True
)
self.next_lb = lb
return node
def get_bound(self, delta, delta_na=None):
# make this dynamic when more nodes were getting explored
if self.next_lb is not None:
lb = self.next_lb
self.next_lb = None
return lb
self._extraInfo = None
current_matrix = get_effective_matrix(self.matrix, delta, delta_na)
has_na = np.any(current_matrix == self.na_value)
model_time = time.time()
return_value = make_constraints_np_matrix(
current_matrix,
n_levels=self.n_levels,
na_value=self.na_value,
compact_formulation=self.compact_formulation,
)
F, map_f2ij, zero_vars, na_vars, hard_constraints, col_pair = (
return_value.F,
return_value.map_f2ij,
return_value.zero_vars,
return_value.na_vars,
return_value.hard_constraints,
return_value.col_pair,
)
if col_pair is not None:
icf = False
elif return_value.complete_version:
icf = True
else:
icf = None # not sure
rc2 = make_twosat_model_from_np(
hard_constraints,
F,
zero_vars,
na_vars,
eps=0,
heuristic_setting=self.heuristic_setting,
compact_formulation=self.compact_formulation,
)
model_time = time.time() - model_time
self._times["model_preparation_time"] += model_time
opt_time = time.time()
variables = rc2.compute()
opt_time = time.time() - opt_time
self._times["optimization_time"] += opt_time
result = 0
for var_ind in range(len(variables)):
if (
variables[var_ind] > 0
and abs(variables[var_ind]) in map_f2ij
and self.matrix[map_f2ij[abs(variables[var_ind])]] == 0
):
result += 1
assert has_na or ((result == 0) == (col_pair is None)), f"{result}_{col_pair}"
self._extraInfo = {
"icf": icf,
"one_pair_of_columns": col_pair,
}
ret = result + delta.count_nonzero()
return ret
def get_priority(self, till_here, this_step, after_here, icf=False):
if icf:
return self.matrix.shape[0] * self.matrix.shape[1] + 10
else:
sgn = np.sign(self.priority_version)
pv_abs = self.priority_version * sgn
if pv_abs == 1:
return sgn * (till_here + this_step + after_here)
elif pv_abs == 2:
return sgn * (this_step + after_here)
elif pv_abs == 3:
return sgn * (after_here)
elif pv_abs == 4:
return sgn * (till_here + after_here)
elif pv_abs == 5:
return sgn * (till_here)
elif pv_abs == 6:
return sgn * (till_here + this_step)
elif pv_abs == 7:
return 0
scp.logg.error("get_priority did not return anything!")
class BnB(pybnb.Problem):
def __init__(self, I_mtr, boundingAlg: BoundingAlgAbstract, na_value=None):
"""[summary].
Parameters
----------
I_mtr : [type]
[description]
boundingAlg : BoundingAlgAbstract
[description]
na_value : [type], optional
[description], by default None
"""
self.na_value = na_value
self.has_na = np.any(I_mtr == self.na_value)
self.I_mtr = I_mtr
self.delta = sp.lil_matrix(
I_mtr.shape, dtype=np.int8
) # this can be coo_matrix too
self.boundingAlg = boundingAlg
self.delta_na = None
if self.has_na:
assert (
boundingAlg.na_support
), "Input has N/A coordinates but bounding algorithm doesn't support it."
self.delta_na = sp.lil_matrix(
I_mtr.shape, dtype=np.int8
) # the coordinates with na that are decided to be 1
(
self.icf,
self.colPair,
) = is_conflict_free_gusfield_and_get_two_columns_in_coflicts(
self.I_mtr, na_value
)
self.boundingAlg.reset(I_mtr)
self.node_to_add = self.boundingAlg.get_init_node()
self.bound_value = self.boundingAlg.get_bound(self.delta)
def sense(self):
return pybnb.minimize
def objective(self):
if self.icf:
return self.delta.count_nonzero()
else:
return pybnb.Problem.infeasible_objective(self)
def bound(self):
return self.bound_value
def save_state(self, node):
node.state = (
self.delta,
self.icf,
self.colPair,
self.bound_value,
self.boundingAlg.get_state(),
self.delta_na,
)
def load_state(self, node):
(
self.delta,
self.icf,
self.colPair,
self.bound_value,
boundingAlgState,
self.delta_na,
) = node.state
self.boundingAlg.set_state(boundingAlgState)
def get_current_matrix(self):
return get_effective_matrix(self.I_mtr, self.delta, self.delta_na)
def branch(self):
if self.icf:
return
need_for_new_nodes = True
if self.node_to_add is not None:
newnode = self.node_to_add
self.node_to_add = None
if (
newnode.state[0].count_nonzero() == self.bound_value
): # current_obj == lb => no need to explore
need_for_new_nodes = False
assert (
newnode.queue_priority is not None
), "Right before adding a node its priority in the queue is not set!"
yield newnode
if need_for_new_nodes:
p, q = self.colPair
nf01 = None
current_matrix = self.get_current_matrix()
for col, colp in [(q, p), (p, q)]:
node = pybnb.Node()
nodedelta = copy.deepcopy(self.delta)
node_na_delta = copy.deepcopy(self.delta_na)
col1 = np.array(current_matrix[:, col], dtype=np.int8).reshape(-1)
col2 =
|
np.array(current_matrix[:, colp], dtype=np.int8)
|
numpy.array
|
#!/usr/bin/env python3
# coding=utf-8
__author__ = "<NAME>"
__copyright__ = "Copyright (C) 2019 <NAME>"
__license__ = "MIT"
import os as os
import sys as sys
import traceback as trb
import argparse as argp
import json as json
import datetime as dt
import socket as skt
import random as rand
import string as string
import logging as logging
import logging.config as logconf
import numpy as np
import numpy.random as rng
logger = logging.getLogger()
BITS_PER_GB = 1024 * 1024 * 1024 * 8
def parse_command_line():
"""
:return:
"""
script_full_path = os.path.realpath(__file__)
script_dir = os.path.dirname(script_full_path)
log_config_default_path = os.path.join(script_dir, 'configs', 'log_config.json')
if not os.path.isfile(log_config_default_path):
script_root = os.path.split(script_dir)[0]
log_config_default_path = os.path.join(script_root, 'configs', 'log_config.json')
if not os.path.isfile(log_config_default_path):
log_config_default_path = ''
parser = argp.ArgumentParser(add_help=True, allow_abbrev=False)
parser.add_argument('--debug', '-d', action='store_true', dest='debug',
help='Print progress messages (by default: to stderr).')
parser.add_argument('--use-logger', '-ul', type=str,
default='default', dest='use_logger',
help='Name of logger to use (default).')
parser.add_argument('--log-config', '-lc', type=str,
default=log_config_default_path, dest='log_config',
help='Full path to JSON file containing '
'configuration parameters for the '
'loggers to use. A logger named "debug" '
'must be present in the configuration file.')
parser.add_argument('--output-folder', '-of', type=str, required=True, dest='outdir',
help='Path to store temp data.')
parser.add_argument('--repeat', '-r', type=int, default=10, dest='repeat',
help='Repeat measurements this many times')
parser.add_argument('--data-size', '-ds', type=int, default=4, dest='datasize',
help='Size of the test data file in GiB.')
args = parser.parse_args()
return args
def init_logger(cli_args):
"""
:param cli_args:
:return:
"""
if not os.path.isfile(cli_args.log_config):
return
with open(cli_args.log_config, 'r') as log_config:
config = json.load(log_config)
if 'debug' not in config['loggers']:
raise ValueError('Logger named "debug" must be present '
'in log config JSON: {}'.format(cli_args.logconfig))
logconf.dictConfig(config)
global logger
if cli_args.debug:
logger = logging.getLogger('debug')
else:
logger = logging.getLogger(cli_args.use_logger)
logger.debug('Logger initialized')
return
def main():
"""
:return:
"""
args = parse_command_line()
init_logger(args)
logger.debug('Starting performance test')
os.makedirs(args.outdir, exist_ok=True)
hostname = skt.gethostname()
rand_string = ''.join(rand.sample(string.ascii_lowercase, 8))
file_path = os.path.join(args.outdir, 'tmp_io-perf_{}_{}.npy'.format(hostname, rand_string))
logger.info('Running on host: {}'.format(hostname))
logger.info('Writing temp data to file: {}'.format(file_path))
logger.info('Repeating measurements {} times'.format(args.repeat))
timings = []
speeds = []
# assuming numpy default float64 dtype
num_floats = int(args.datasize * BITS_PER_GB / 64)
logger.debug('Generating {} random floats per iteration'.format(num_floats))
for idx in range(args.repeat):
rand_data = rng.random(num_floats)
data_size_bytes = rand_data.nbytes
data_size_mbytes = data_size_bytes / 1024 / 1024
logger.debug(
'Iteration {}: random data of size {} B (~ {} MB) generated'.format(
idx + 1, data_size_bytes, data_size_mbytes
)
)
logger.debug('Writing data...')
with open(file_path, 'wb') as dump:
start = dt.datetime.now()
np.save(dump, rand_data, allow_pickle=False)
end = dt.datetime.now()
diff_in_sec = (end - start).total_seconds()
mb_per_sec = data_size_mbytes / diff_in_sec
timings.append(diff_in_sec)
speeds.append(mb_per_sec)
os.unlink(file_path)
logger.debug('Iter complete')
timings = np.array(timings, dtype=np.float64)
timings = timings.round(2)
speeds = np.array(speeds, dtype=np.float64)
speeds = speeds.round(2)
logger.info('Timings in seconds between I/O start and end')
logger.info('Min.: {} s'.format(timings.min()))
logger.info('Avg.: {} s'.format(timings.mean()))
logger.info('Median: {} s'.format(np.median(timings)))
logger.info('Max.: {} s'.format(timings.max()))
logger.info('======================')
logger.info('Write speed in MB/sec')
logger.info('Min.: {} MB/s'.format(speeds.min()))
logger.info('Avg.: {} MB/s'.format(speeds.mean()))
logger.info('Median: {} MB/s'.format(
|
np.median(speeds)
|
numpy.median
|
"""Functions for point cloud data augmentation"""
import numpy as np
###########################################
# numpy based functions
###########################################
def rotate_point_cloud(pc):
"""
Rotate the point cloud along up direction with certain angle.
Input:
pc: Nx3 array of original point clouds
Return:
rotated_pc: Nx3 array of point clouds after rotation
"""
angle = np.random.uniform(0, np.pi * 2)
cosval = np.cos(angle)
sinval = np.sin(angle)
rotation_matrix = np.array([[cosval, 0, sinval],
[0, 1, 0],
[-sinval, 0, cosval]])
rotated_pc = np.dot(pc, rotation_matrix)
return rotated_pc
def jitter_point_cloud(pc, sigma=0.01, clip=0.05):
"""
Randomly jitter point cloud per point.
Input:
pc: Nx3 array of original point clouds
Return:
jittered_pc: Nx3 array of point clouds after jitter
"""
N, C = pc.shape
assert clip > 0
jittered_pc = np.clip(sigma * np.random.randn(N, C), -1 * clip, clip)
jittered_pc += pc
return jittered_pc
def translate_point_cloud(pc):
xyz1 =
|
np.random.uniform(low=2. / 3., high=3. / 2., size=[3])
|
numpy.random.uniform
|
# opt_utils: Contains various helper functions to extract data from
# optimized Pyomo models.
# Author: <NAME>
# Contact: <EMAIL>
# Date: 02.08.2018
import numpy as np
import sys
import os
import matplotlib as mpl
if sys.platform == 'linux':
if os.environ.get('DISPLAY', '') == '':
mpl.use('Agg')
else:
mpl.use('TkAgg')
import matplotlib.pyplot as plt
import pandas as pd
import pickle
from os.path import exists
from pathlib import Path
from datetime import datetime
from ada.Loggermixin import *
# use self.logger from Loggermixin within classes
# use h_logger outside classes
h_logger=Loggermixin.get_default_logger()
# Discount orders based on their lateness
def discount_order_dict(model):
horizon_limit = 1 + model.K + model.sim_time
order_dict_disc = {}
for n in model.order_dict.keys():
planned_gi_time = model.order_dict[n][
model.order_cols.index('planned_gi_time')]
vsm = model.order_dict[n][
model.order_cols.index('var_std_margin')]
order_dict_disc[n] = {t:
[model.order_dict[n][model.order_cols.index(idx)]
if idx != 'var_std_margin'
else vsm * (1 - model.alpha/100*(t-planned_gi_time))
if t >= planned_gi_time else 0
for idx in model.order_cols]
for t in range(model.sim_time, horizon_limit)}
return order_dict_disc
# Convert schedule to dict of binary values
def schedule_to_binary(env, model, schedule=None):
if schedule is not None:
current_schedule_vector = [int(i)
for i in schedule[:, env.sched_indices['gmid']]]
current_schedule_dict = {(i, t): 1 if i==current_schedule_vector[t]
else 0
for i in model.i
for t in range(len(current_schedule_vector))}
else:
current_schedule_dict = None
return current_schedule_dict
# Assign binaries to production slots according to schedule
def convert_schedule_to_vars(env, model, schedule=None):
current_schedule = schedule_to_binary(env, model, schedule=schedule)
# Fix future y[0, t>env.sim_time] == 0 to prevent the model from choosing
# to shutdown
for t in model.t:
if t >= env.sim_time:
model.y[0, t].fix(0)
# Scheduling constraint forces the schedule to be maintained for a
# given slot
if schedule is not None:
for t in model.t:
for i in model.i:
if t < 0:
# Fix values before simulation to 0
model.y[i, t].fix(0)
else:
try:
model.y[i, t].fix(current_schedule[i, t])
except KeyError:
pass
# If schedule is None, first two time intervals of t == 0
else:
current_schedule = {}
for t in model.t:
for i in model.i:
if t < 0:
model.y[i, t].fix(0)
current_schedule[i, t] = 0
model.current_schedule = current_schedule
return model
# Assign binaries to production slots for each scenario in the stoch prog
def convert_schedule_to_stochastic_vars(env, model, schedule=None):
current_schedule = schedule_to_binary(env, model, schedule=schedule)
# Fix future y[0, t>env.sim_time] == 0 to prevent the model from choosing
# to shutdown
for s in model.s:
for t in model.t:
if t >= env.sim_time:
model.y[0, t, s].fix(0)
# Scheduling constraint forces the schedule to be maintained for a
# given slot
if schedule is not None:
for s in model.s:
for t in model.t:
for i in model.i:
if t < 0:
# Fix values before simulation to 0
model.y[i, t, s].fix(0)
else:
try:
model.y[i, t, s].fix(current_schedule[i, t])
except KeyError:
pass
# If schedule is None, first two time intervals of t == 0
else:
current_schedule = {}
for s in model.s:
for t in model.t:
for i in model.i:
if t < 0:
model.y[i, t, s].fix(0)
current_schedule[i, t] = 0
model.current_schedule = current_schedule
return model
# Builds dictionary of orders
def build_order_dict(env, order_book):
order_dict = {}
order_cols = ['gmid', 'order_qty', 'var_std_margin',
'planned_gi_time', 'shipped']
for col in order_cols:
col_idx = env.ob_indices[col]
for n in order_book[:, env.ob_indices['doc_num']]:
order_index =
|
np.where(order_book[:, env.ob_indices['doc_num']]==n)
|
numpy.where
|
#0 normal, 1 neumonía
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import loadmat
from scipy.io import savemat
import scipy.optimize as opt
def sigmoide(X):
#print(np.ravel(X)[np.argmax(X)])
return 1/(1+np.exp(-X))
def pesosAleatorios(L_in, L_out):
eini = np.sqrt(6)/np.sqrt(L_in + L_out)
aux = np.random.uniform(-eini,eini,(L_in+1)*L_out)
return np.reshape(aux, (L_out,L_in + 1))
def forwprop(theta1, theta2, X):
a1 = X
z2 = np.dot(theta1, np.transpose(a1))
a2 = sigmoide(z2)
a2 = np.vstack((np.ones(np.shape(a2)[1]), a2))
z3 = np.dot(theta2, a2)
a3 = sigmoide(z3)
return a2.transpose(), a3.transpose()
def coste(theta1, theta2, m, y, lda, H):
aux = (-y*np.log((H + 1e-10))) - ((1-y)*np.log((1-H + 1e-10)))
aux = (1 / m) * np.sum(aux)
aux2 = np.sum(theta1[:,1:] ** 2) + np.sum(theta2[:,1:] ** 2)
aux2 = (aux2*lda)/(2*m)
c = aux + aux2
print(c)
return c
def backprop_rec(params_rn, num_entradas, num_ocultas, num_etiquetas, X, y, reg):
theta1 = np.reshape(params_rn[: (num_ocultas * (num_entradas + 1))], (num_ocultas, (num_entradas+1)))
theta2 = np.reshape(params_rn[num_ocultas * (num_entradas + 1):], (num_etiquetas, (num_ocultas + 1)))
m = X.shape[0]
a1 = np.hstack([np.ones([m, 1]), X])
a2, h = forwprop(theta1, theta2, a1)
cost = coste(theta1, theta2, m, y, reg, h)
delta3 = h - y
delta2 = np.dot(theta2.transpose(), delta3.transpose()).transpose() * (a2 * (1-a2))
delta2 = delta2[:,1:]
inc1 = np.dot(delta2.transpose(), a1)
inc2 = np.dot(delta3.transpose(), a2)
D1 = inc1/m
D1[:,1:] = D1[:,1:] + (reg/m)*theta1[:,1:]
D2 = inc2/m
D2[:,1:] = D2[:,1:] + (reg/m)*theta2[:,1:]
#print(cost)
return cost, np.concatenate((np.ravel(D1), np.ravel(D2)))
def fun(h, etiq):
return np.argmax(h) == etiq
def calculate_precision(theta1, theta2, X, Y):
a1 = np.hstack([np.ones([len(X), 1]), X])
_ , h = forwprop(theta1, theta2, a1)
aux = [fun(h[i], Y[i][0]) for i in range(len(X))]
return np.sum(aux)/len(X)
def codificaY(Y, num_etiquetas):
Yp = np.zeros((Y.shape[0], num_etiquetas))
Yp[[
|
np.arange(Y.shape[0])
|
numpy.arange
|
import dexplo as dx
import numpy as np
from numpy import nan, array
import pytest
from dexplo.testing import assert_frame_equal, assert_array_equal
class TestSortValues(object):
def test_sort_values_one(self):
data = {'a': [4, 3, nan, 6, 3, 2],
'b': [None, 'f', 'd', 'f', 'd', 'er'],
'c': [12, 444, -5.6, 5, 1, 7]}
df = dx.DataFrame(data)
df1 = df.sort_values('a')
df2 = dx.DataFrame(data={'a': [2.0, 3.0, 3.0, 4.0, 6.0, nan],
'b': ['er', 'f', 'd', None, 'f', 'd'],
'c': [7.0, 444.0, 1.0, 12.0, 5.0, -5.6]})
assert_frame_equal(df1, df2)
df1 = df.sort_values('b')
df2 = dx.DataFrame({'a': [nan, 3.0, 2.0, 3.0, 6.0, 4.0],
'b': ['d', 'd', 'er', 'f', 'f', None],
'c': [-5.6, 1.0, 7.0, 444.0, 5.0, 12.0]})
assert_frame_equal(df1, df2)
df = dx.DataFrame({'a': [2, 3, nan, 6, 3, 2],
'b': [None, 'f', 'd', 'f', 'd', 'er'],
'c': [12, 444, -5.6, 5, 1, 7]})
df1 = df.sort_values('b', ascending=False)
df2 = dx.DataFrame({'a': [3.0, 6.0, 2.0, nan, 3.0, 2.0],
'b': ['f', 'f', 'er', 'd', 'd', None],
'c': [444.0, 5.0, 7.0, -5.6, 1.0, 12.0]})
assert_frame_equal(df1, df2)
df1 = df.sort_values('a', ascending=False)
df2 = dx.DataFrame({'a': [6.0, 3.0, 3.0, 2.0, 2.0, nan],
'b': ['f', 'f', 'd', None, 'er', 'd'],
'c': [5.0, 444.0, 1.0, 12.0, 7.0, -5.6]})
assert_frame_equal(df1, df2)
def test_sort_values_multiple(self):
df = dx.DataFrame({'a': [2, 3, nan, 6, 3, 2],
'b': [None, 'f', 'd', 'f', 'd', 'er'],
'c': [12, 444, -5.6, 5, 1, 7]})
df1 = df.sort_values(['a', 'b'], ascending=False)
df2 = dx.DataFrame({'a': [6.0, 3.0, 3.0, 2.0, 2.0, nan],
'b': ['f', 'f', 'd', 'er', None, 'd'],
'c': [5.0, 444.0, 1.0, 7.0, 12.0, -5.6]})
assert_frame_equal(df1, df2)
df1 = df.sort_values(['a', 'b'], ascending=True)
df2 = dx.DataFrame({'a': [2.0, 2.0, 3.0, 3.0, 6.0, nan],
'b': ['er', None, 'd', 'f', 'f', 'd'],
'c': [7.0, 12.0, 1.0, 444.0, 5.0, -5.6]})
assert_frame_equal(df1, df2)
df1 = df.sort_values(['a', 'b'], ascending=[True, False])
df2 = dx.DataFrame({'a': [2.0, 2.0, 3.0, 3.0, 6.0, nan],
'b': ['er', None, 'f', 'd', 'f', 'd'],
'c': [7.0, 12.0, 444.0, 1.0, 5.0, -5.6]})
assert_frame_equal(df1, df2)
df1 = df.sort_values(['a', 'b'], ascending=[False, True])
df2 = dx.DataFrame({'a': [6.0, 3.0, 3.0, 2.0, 2.0, nan],
'b': ['f', 'd', 'f', 'er', None, 'd'],
'c': [5.0, 1.0, 444.0, 7.0, 12.0, -5.6]})
assert_frame_equal(df1, df2)
df1 = df.sort_values(['b', 'a'], ascending=[False, True])
df2 = dx.DataFrame({'a': [3.0, 6.0, 2.0, 3.0, nan, 2.0],
'b': ['f', 'f', 'er', 'd', 'd', None],
'c': [444.0, 5.0, 7.0, 1.0, -5.6, 12.0]})
assert_frame_equal(df1, df2)
class TestRank:
def test_rank_min(self):
df = dx.DataFrame({'a': [2, 3, nan, 6, 3, 2],
'b': [None, 'f', 'd', 'f', 'd', 'er'],
'c': [12, 444, -5.6, 5, 1, 7]})
df1 = df.rank()
df2 = dx.DataFrame({'a': [1.0, 3.0, nan, 5.0, 3.0, 1.0],
'b': [nan, 4.0, 1.0, 4.0, 1.0, 3.0],
'c': [5.0, 6.0, 1.0, 3.0, 2.0, 4.0]})
assert_frame_equal(df1, df2)
df1 = df.rank(na_option='top')
df2 = dx.DataFrame({'a': [2, 4, 1, 6, 4, 2], 'b': [1, 5, 2, 5, 2, 4],
'c': [5, 6, 1, 3, 2, 4]})
assert_frame_equal(df1, df2)
df1 = df.rank(na_option='bottom')
df2 = dx.DataFrame({'a': [1, 3, 6, 5, 3, 1], 'b': [6, 4, 1, 4, 1, 3],
'c': [5, 6, 1, 3, 2, 4]})
assert_frame_equal(df1, df2)
df1 = df.rank(na_option='keep', ascending=False)
df2 = dx.DataFrame({'a': [4.0, 2.0, nan, 1.0, 2.0, 4.0],
'b': [nan, 1.0, 4.0, 1.0, 4.0, 3.0],
'c': [2.0, 1.0, 6.0, 4.0, 5.0, 3.0]})
assert_frame_equal(df1, df2)
df1 = df.rank(na_option='top', ascending=False)
df2 = dx.DataFrame({'a': [5, 3, 1, 2, 3, 5], 'b': [1, 2, 5, 2, 5, 4],
'c': [2, 1, 6, 4, 5, 3]})
assert_frame_equal(df1, df2)
df1 = df.rank(na_option='bottom', ascending=False)
df2 = dx.DataFrame(
{'a': [4, 2, 6, 1, 2, 4], 'b': [6, 1, 4, 1, 4, 3], 'c': [2, 1, 6, 4, 5, 3]})
assert_frame_equal(df1, df2)
def test_rank_max(self):
df = dx.DataFrame({'a': [2, 3, nan, 6, 3, 2],
'b': [None, 'f', 'd', 'f', 'd', 'er'],
'c': [12, 444, -5.6, 5, 1, 7]})
df1 = df.rank(method='max', na_option='keep', ascending=True)
df2 = dx.DataFrame({'a': [2.0, 4.0, nan, 5.0, 4.0, 2.0],
'b': [nan, 5.0, 2.0, 5.0, 2.0, 3.0],
'c': [5.0, 6.0, 1.0, 3.0, 2.0, 4.0]})
assert_frame_equal(df1, df2)
df1 = df.rank(method='max', na_option='keep', ascending=False)
df2 = dx.DataFrame({'a': [5.0, 3.0, nan, 1.0, 3.0, 5.0],
'b': [nan, 2.0, 5.0, 2.0, 5.0, 3.0],
'c': [2.0, 1.0, 6.0, 4.0, 5.0, 3.0]})
assert_frame_equal(df1, df2)
df1 = df.rank(method='max', na_option='top', ascending=True)
df2 = dx.DataFrame({'a': [3, 5, 1, 6, 5, 3], 'b': [1, 6, 3, 6, 3, 4],
'c': [5, 6, 1, 3, 2, 4]})
assert_frame_equal(df1, df2)
df1 = df.rank(method='max', na_option='top', ascending=False)
df2 = dx.DataFrame({'a': [6, 4, 1, 2, 4, 6], 'b': [1, 3, 6, 3, 6, 4],
'c': [2, 1, 6, 4, 5, 3]})
assert_frame_equal(df1, df2)
df1 = df.rank(method='max', na_option='bottom', ascending=True)
df2 = dx.DataFrame({'a': [2, 4, 6, 5, 4, 2], 'b': [6, 5, 2, 5, 2, 3],
'c': [5, 6, 1, 3, 2, 4]})
assert_frame_equal(df1, df2)
df1 = df.rank(method='max', na_option='bottom', ascending=False)
df2 = dx.DataFrame({'a': [5, 3, 6, 1, 3, 5], 'b': [6, 2, 5, 2, 5, 3],
'c': [2, 1, 6, 4, 5, 3]})
assert_frame_equal(df1, df2)
def test_rank_dense(self):
df = dx.DataFrame({'a': [2, 3, nan, 6, 3, 2],
'b': [None, 'f', 'd', 'f', 'd', 'er'],
'c': [12, 444, -5.6, 5, 1, 7]})
df1 = df.rank(method='dense', na_option='keep', ascending=True)
df2 = dx.DataFrame({'a': [1.0, 2.0, nan, 3.0, 2.0, 1.0],
'b': [nan, 3.0, 1.0, 3.0, 1.0, 2.0],
'c': [5.0, 6.0, 1.0, 3.0, 2.0, 4.0]})
assert_frame_equal(df1, df2)
df1 = df.rank(method='dense', na_option='keep', ascending=False)
df2 = dx.DataFrame({'a': [3.0, 2.0, nan, 1.0, 2.0, 3.0],
'b': [nan, 1.0, 3.0, 1.0, 3.0, 2.0],
'c': [2.0, 1.0, 6.0, 4.0, 5.0, 3.0]})
assert_frame_equal(df1, df2)
df1 = df.rank(method='dense', na_option='top', ascending=True)
df2 = dx.DataFrame({'a': [2, 3, 1, 4, 3, 2], 'b': [1, 4, 2, 4, 2, 3],
'c': [5, 6, 1, 3, 2, 4]})
assert_frame_equal(df1, df2)
df1 = df.rank(method='dense', na_option='top', ascending=False)
df2 = dx.DataFrame(
{'a': [4, 3, 1, 2, 3, 4], 'b': [1, 2, 4, 2, 4, 3], 'c': [2, 1, 6, 4, 5, 3]})
assert_frame_equal(df1, df2)
df1 = df.rank(method='dense', na_option='bottom', ascending=True)
df2 = dx.DataFrame(
{'a': [1, 2, 4, 3, 2, 1], 'b': [4, 3, 1, 3, 1, 2], 'c': [5, 6, 1, 3, 2, 4]})
assert_frame_equal(df1, df2)
df1 = df.rank(method='dense', na_option='bottom', ascending=False)
df2 = dx.DataFrame(
{'a': [3, 2, 4, 1, 2, 3], 'b': [4, 1, 3, 1, 3, 2], 'c': [2, 1, 6, 4, 5, 3]})
assert_frame_equal(df1, df2)
def test_rank_first(self):
df = dx.DataFrame({'a': [2, 3, nan, 6, 3, 2],
'b': [None, 'f', 'd', 'f', 'd', 'er'],
'c': [12, 444, -5.6, 5, 1, 7]})
df1 = df.rank(method='first', na_option='keep', ascending=True)
df2 = dx.DataFrame({'a': [1.0, 3.0, nan, 5.0, 4.0, 2.0],
'b': [nan, 4.0, 1.0, 5.0, 2.0, 3.0],
'c': [5.0, 6.0, 1.0, 3.0, 2.0, 4.0]})
assert_frame_equal(df1, df2)
df1 = df.rank(method='first', na_option='keep', ascending=False)
df2 = dx.DataFrame({'a': [4.0, 2.0, nan, 1.0, 3.0, 5.0],
'b': [nan, 1.0, 4.0, 2.0, 5.0, 3.0],
'c': [2.0, 1.0, 6.0, 4.0, 5.0, 3.0]})
assert_frame_equal(df1, df2)
df1 = df.rank(method='first', na_option='top', ascending=True)
df2 = dx.DataFrame(
{'a': [2, 4, 1, 6, 5, 3], 'b': [1, 5, 2, 6, 3, 4], 'c': [5, 6, 1, 3, 2, 4]})
assert_frame_equal(df1, df2)
df1 = df.rank(method='first', na_option='top', ascending=False)
df2 = dx.DataFrame(
{'a': [5, 3, 1, 2, 4, 6], 'b': [1, 2, 5, 3, 6, 4], 'c': [2, 1, 6, 4, 5, 3]})
assert_frame_equal(df1, df2)
df1 = df.rank(method='first', na_option='bottom', ascending=True)
df2 = dx.DataFrame(
{'a': [1, 3, 6, 5, 4, 2], 'b': [6, 4, 1, 5, 2, 3], 'c': [5, 6, 1, 3, 2, 4]})
assert_frame_equal(df1, df2)
df1 = df.rank(method='first', na_option='bottom', ascending=False)
df2 = dx.DataFrame(
{'a': [4, 2, 6, 1, 3, 5], 'b': [6, 1, 4, 2, 5, 3], 'c': [2, 1, 6, 4, 5, 3]})
assert_frame_equal(df1, df2)
def test_rank_average(self):
df = dx.DataFrame({'a': [2, 3, nan, 6, 3, 2],
'b': [None, 'f', 'd', 'f', 'd', 'er'],
'c': [12, 444, -5.6, 5, 1, 7]})
df1 = df.rank(method='average', na_option='keep', ascending=True)
df2 = dx.DataFrame({'a': [1.5, 3.5, nan, 5.0, 3.5, 1.5],
'b': [nan, 4.5, 1.5, 4.5, 1.5, 3.0],
'c': [5.0, 6.0, 1.0, 3.0, 2.0, 4.0]})
assert_frame_equal(df1, df2)
df1 = df.rank(method='average', na_option='keep', ascending=False)
df2 = dx.DataFrame({'a': [4.5, 2.5, nan, 1.0, 2.5, 4.5],
'b': [nan, 1.5, 4.5, 1.5, 4.5, 3.0],
'c': [2.0, 1.0, 6.0, 4.0, 5.0, 3.0]})
assert_frame_equal(df1, df2)
df1 = df.rank(method='average', na_option='top', ascending=True)
df2 = dx.DataFrame({'a': [2.5, 4.5, 1.0, 6.0, 4.5, 2.5],
'b': [1.0, 5.5, 2.5, 5.5, 2.5, 4.0],
'c': [5.0, 6.0, 1.0, 3.0, 2.0, 4.0]})
assert_frame_equal(df1, df2)
df1 = df.rank(method='average', na_option='top', ascending=False)
df2 = dx.DataFrame({'a': [5.5, 3.5, 1.0, 2.0, 3.5, 5.5],
'b': [1.0, 2.5, 5.5, 2.5, 5.5, 4.0],
'c': [2.0, 1.0, 6.0, 4.0, 5.0, 3.0]})
assert_frame_equal(df1, df2)
df1 = df.rank(method='average', na_option='bottom', ascending=True)
df2 = dx.DataFrame({'a': [1.5, 3.5, 6.0, 5.0, 3.5, 1.5],
'b': [6.0, 4.5, 1.5, 4.5, 1.5, 3.0],
'c': [5.0, 6.0, 1.0, 3.0, 2.0, 4.0]})
assert_frame_equal(df1, df2)
df1 = df.rank(method='average', na_option='bottom', ascending=False)
df2 = dx.DataFrame({'a': [4.5, 2.5, 6.0, 1.0, 2.5, 4.5],
'b': [6.0, 1.5, 4.5, 1.5, 4.5, 3.0],
'c': [2.0, 1.0, 6.0, 4.0, 5.0, 3.0]})
assert_frame_equal(df1, df2)
class TestStreak:
def test_streak(self):
df = dx.DataFrame(
{'AIRLINE': ['AA', 'AA', 'AA', 'UA', 'DL', 'DL', 'WN', 'WN', 'WN', 'AS', None],
'DAY_OF_WEEK': [2, 3, 6, 6, 6, 6, 4, 4, 1, 2, 2],
'DEPARTURE_DELAY': [nan, nan, -1.0, -1.0, -1.0, 22.0, 3.0, 3.0, 21.0,
-2.0, nan]})
arr1 = df.streak('AIRLINE')
arr2 = array([1, 2, 3, 1, 1, 2, 1, 2, 3, 1, 1])
assert_array_equal(arr1, arr2)
arr1 = df.streak('DAY_OF_WEEK')
arr2 = array([1, 1, 1, 2, 3, 4, 1, 2, 1, 1, 2])
assert_array_equal(arr1, arr2)
arr1 = df.streak('DEPARTURE_DELAY')
arr2 = array([1, 1, 1, 2, 3, 1, 1, 2, 1, 1, 1])
assert_array_equal(arr1, arr2)
def test_streak_value(self):
df = dx.DataFrame(
{'AIRLINE': ['AA', 'AA', 'AA', 'UA', 'DL', 'DL', 'WN', 'WN', 'WN', 'AS', None],
'DAY_OF_WEEK': [2, 3, 6, 6, 6, 6, 4, 4, 1, 2, 2],
'DEPARTURE_DELAY': [nan, nan, -1.0, -1.0, -1.0, 22.0, 3.0, 3.0, 21.0,
-2.0, nan]})
with pytest.raises(TypeError):
df.streak('DEPARTURE_DELAY', 'AA')
arr1 = df.streak('DEPARTURE_DELAY', -1)
arr2 = array([0, 0, 1, 2, 3, 0, 0, 0, 0, 0, 0])
assert_array_equal(arr1, arr2)
arr1 = df.streak('DAY_OF_WEEK', 6)
arr2 =
|
array([0, 0, 1, 2, 3, 4, 0, 0, 0, 0, 0])
|
numpy.array
|
from abc import ABC, abstractmethod
from fastquant import get_stock_data
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import math
#Base model class
class Model(ABC):
# params should be a dict of your parameters that you want to pass to the model
# name should be a string (used for saving results)
# params dict *must* include 'name':name within it
def __init__(self, params):
self.model = None
self.name = params['name']
# wrapper model function for collecting fastquant data
def get_data(self, ticker, start_date, end_date):
return get_stock_data(ticker, start_date, end_date)
# plotting function for standardized plot results
def plot_results(self, preds, actual, title):
fig, ax = plt.subplots(figsize=(15,5))
ax.set_title(title)
time = range(len(preds))
ax.plot(time,preds,color='tab:red',marker='s',markersize=2,linestyle='-',linewidth=1,label='forcast')
ax.plot(time,actual,color='tab:blue',marker='s',markersize=2,linestyle='-',linewidth=1,label='actual')
ax.set_xlabel('time')
ax.set_ylabel('stock price ($)')
ax.set_xticks(np.arange(0,len(preds)+10,10))
ax.set_xlim(0,len(preds)+10)
ax.xaxis.grid(True,ls='--')
ax.yaxis.grid(True,ls='--')
ax.legend()
plt.savefig(f'../imgs/{title}.png')
# plotting function for training data + prediction + actual
def plot_continuous(self, preds, train, actual, title):
last_50 = train['close'].values[-50:]
last = np.append(last_50, actual[0])
fig, ax = plt.subplots(figsize=(15,5))
ax.set_title(title)
pred_time = range(len(last_50),len(last_50)+len(preds))
train_time = range(0,len(last_50)+1)
ax.plot(pred_time,preds,color='tab:red',marker='s',markersize=2,linestyle='-',linewidth=1,label='forcast')
ax.plot(train_time,last,color='tab:blue',marker='s',markersize=2,linestyle='-',linewidth=1,label='actual')
ax.plot(pred_time,actual,color='tab:blue',marker='s',markersize=2,linestyle='-',linewidth=1)
ax.set_xlabel('time')
ax.set_ylabel('stock price ($)')
ax.set_xticks(np.arange(0,len(pred_time)+len(last_50)+10,10))
ax.set_xlim(0,len(pred_time)+len(last_50)+10)
ax.xaxis.grid(True,ls='--')
ax.yaxis.grid(True,ls='--')
ax.legend()
plt.savefig(f'../imgs/{title}.png')
# function to get error of the model based on preds and true values
def mean_abs_percent_error(self, y_pred, y_true):
return (1.0)/(len(y_pred))*((np.abs(y_pred-y_true)/
|
np.abs(y_true)
|
numpy.abs
|
'''
Created on 24-May-2017
@author: aii32199
'''
from sklearn import datasets
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from mlxtend.classifier import StackingClassifier
from sklearn import cross_validation
import numpy as np
from sklearn.tree import DecisionTreeClassifier
iris = datasets.load_iris()
X, y = iris.data[:, 1:3], iris.target
def CalculateAccuracy(y_test,pred_label):
nnz = np.shape(y_test)[0] - np.count_nonzero(pred_label - y_test)
acc = 100*nnz/float(
|
np.shape(y_test)
|
numpy.shape
|
# -*- coding: UTF-8 -*-
# !/usr/bin/python
import numpy as np
import joblib
x_mean = np.array([
83.8996, 97.0520, 36.8055, 126.2240, 86.2907,
66.2070, 18.7280, 33.7373, -3.1923, 22.5352,
0.4597, 7.3889, 39.5049, 96.8883, 103.4265,
22.4952, 87.5214, 7.7210, 106.1982, 1.5961,
0.6943, 131.5327, 2.0262, 2.0509, 3.5130,
4.0541, 1.3423, 5.2734, 32.1134, 10.5383,
38.9974, 10.5585, 286.5404, 198.6777,
60.8711, 0.5435, 0.0615, 0.0727, -59.6769, 28.4551])
x_std = np.array([
17.6494, 3.0163, 0.6895, 24.2988, 16.6459,
14.0771, 4.7035, 11.0158, 3.7845, 3.1567,
6.2684, 0.0710, 9.1087, 3.3971, 430.3638,
19.0690, 81.7152, 2.3992, 4.9761, 2.0648,
1.9926, 45.4816, 1.6008, 0.3793, 1.3092,
0.5844, 2.5511, 20.4142, 6.4362, 2.2302,
29.8928, 7.0606, 137.3886, 96.8997,
16.1887, 0.4981, 0.7968, 0.8029, 160.8846, 29.5367])
mFMax = []
fSum_pre = []
fmax = []
fmin = []
All_grad1 = []
All_grad12 = []
All_grad24 = []
def lgb_features_filter(alpha):
importance_index = np.load('importance_lightgbm.npy')
feature_length = len(importance_index[0])
pick_feature_len = int(alpha * feature_length)
importance_index = np.array(importance_index)[:, feature_length - pick_feature_len:]
f_index = np.unique(importance_index)
f_index = np.sort(f_index)
return f_index
def get_sepsis_score(feature, model):
feature = genFeature(feature)
# f_index = utils.lgb_features_filter(0.5)
# feature = feature[:, f_index]
# generate predictions
label = model.predict(feature)
prob = model.predict_proba(feature)
return prob[0][1], label
def load_sepsis_model():
clf = joblib.load('EasyEnsembleLightGBM.pkl')
return clf
def imputer_missing_mean_numpy(testFtr, start=0):
imr = np.load('imputer_mean_numpy.npy')
h, w = testFtr.shape
for i in range(h):
for j in range(w):
if np.isnan(testFtr[i, j]):
testFtr[i, j] = imr[j + start]
return testFtr
def imputer_missing_median_numpy(testFtr, start=0):
imr = np.load('imputer_median_numpy.npy')
h, w = testFtr.shape
for i in range(h):
for j in range(w):
if np.isnan(testFtr[i, j]):
testFtr[i, j] = imr[j + start]
return testFtr
# 输入所有数据特征
def genFeature(data):
global All_grad1, All_grad12, All_grad24
exlen = 34
# feature = data[:, :-1]
feature = data
h, w = feature.shape
if h == 1:
All_grad1 = []
All_grad12 = []
All_grad24 = []
mFMax = []
fSum_pre = []
fmax = []
fmin = []
for j in range(w):
for i in range(h):
if np.isnan(feature[i, j]):
feature[i, j] = searchNearValue(i, feature[:, j], h)
for j in range(w):
for i in range(h):
if np.isnan(feature[i, j]):
feature[i, j] = x_mean[j]
# norm = data_norm(feature)
res = residual_value(feature[:, :exlen])
rMax = np.nanmax(res[-1])
rMin = np.nanmin(res[-1])
rMean = np.nanmean(res[-1])
rSum = np.nansum(res[-1])
rStat = np.hstack((rMax, rMin, rMean, rSum))
rStat = np.reshape(rStat, (1, len(rStat)))
grad1 = Grad1(res[:, :exlen])
grad12 = Grad12(res[:, :exlen])
grad24 = Grad24(res[:, :exlen])
grad = np.hstack((grad1, grad12, grad24))
grad = np.reshape(grad, (1, len(grad)))
gMax = np.nanmax(grad, axis=1)
gMin = np.nanmin(grad, axis=1)
gMean = np.nanmean(grad, axis=1)
gSum = np.nansum(grad, axis=1)
gStat = np.hstack((gMax, gMin, gMean, gSum))
gStat = np.reshape(gStat, (1, len(gStat)))
All_grad1.append(grad[0, :exlen])
All_grad12.append(grad[0, exlen:2 * exlen])
All_grad24.append(grad[0, 2 * exlen:3 * exlen])
hess1 = Grad1(np.array(All_grad1))
hess12 = Grad12(np.array(All_grad12))
hess24 = Grad24(np.array(All_grad24))
hess = np.hstack((hess1, hess12, hess24))
hess = np.reshape(hess, (1, len(hess)))
hMax = np.nanmax(hess, axis=1)
hMin = np.nanmin(hess, axis=1)
hMean = np.nanmean(hess, axis=1)
hSum = np.nansum(hess, axis=1)
hStat = np.hstack((hMax, hMin, hMean, hSum))
hStat = np.reshape(hStat, (1, len(hStat)))
mutation = mFactor(res)
mutationMax = mFactorMax(res)
mutation12 = mFactor12(res)
mutation12h = mFactor12h(res)
mu = np.hstack((mutation, mutationMax, mutation12, mutation12h))
mMax = np.nanmax(mu, axis=1)
mMin = np.nanmin(mu, axis=1)
mMean = np.nanmean(mu, axis=1)
mSum = np.nansum(mu, axis=1)
mStat = np.hstack((mMax, mMin, mMean, mSum))
mStat = np.reshape(mStat, (1, len(mStat)))
fSum = f_sum(res[:, :exlen])
fSum8 = f_sum8h(res[:, :exlen])
fMax = f_max(res[:, :exlen])
fMin = f_min(res[:, :exlen])
fMean = f_mean(res[:, :exlen])
stat = np.hstack((fSum, fSum8, fMax, fMin, fMean))
# fCov = cov_1d(feature[:, :exlen], [1, 2, 1])
fCov = covFilter(feature[:, :exlen])
norm = normalization(feature[:, :exlen])
kernel = np.array([[1, 2, 1], [2, 4, 2], [1, 2, 1]])
fCov2 = cov_2d(norm, kernel / 16.0)
laplace_kernel = np.array([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]])
fCov_laplace = cov_2d(norm, laplace_kernel)
cov = np.hstack((fCov, fCov2, fCov_laplace))
f = np.hstack((feature[h - 1:h, :], res[h - 1:h, :], grad, hess, mu, stat, cov, rStat, gStat, hStat, mStat))
f[:, 0:34] = imputer_missing_mean_numpy(f[:, 0:34])
f[:, 34:] = imputer_missing_median_numpy(f[:, 34:], 34)
f_index = kind_feature_filter([11, 25])
f = f[:, f_index]
f_index = lgb_features_filter(0.5)
f = np.array(f[:, f_index]).astype('float32')
return f
def kind_feature_filter(pos, dele=True):
origin_f = 0
res_f = 40
grad1_f = 74
grad12_f = 108
grad24_f = 142
hess1_f = 176
hess12_f = 210
hess24_f = 244
mutation_f = 278
mutation_max_f = 312
mutation12_f = 346
mutation12h_f = 380
sum_f = 414
sum8_f = 448
max_f = 482
min_f = 516
mean_f = 550
cov1_f = 584
cov2_f = 618
cov_la_f = 652
r_stat_f = 686
g_stat_f = 690
h_stat_f = 694
m_stat_f = 698
gnb_f = 702
f_length = 704
f_index = []
f_temp = []
for i in range(len(pos)):
id = pos[i]
if id == 1:
for k in range(40):
f_temp.append(k)
elif id < 21:
for k in range(34):
f_temp.append((id - 1) * 34 + 6 + k)
elif id < 25:
for k in range(4):
f_temp.append(686 + 4 * (id - 21) + k)
else:
f_temp.append(702)
f_temp.append(703)
if dele:
for j in range(f_length):
if not np.isin(j, np.array(f_temp)):
f_index.append(j)
else:
f_index = f_temp
f_index = np.sort(f_index)
return np.array(f_index)
def searchNearValue(index, list, range):
indexL = index
indexH = index
while indexL >= max(index - range, 0) and indexH < min(index + range, len(list)):
if np.isnan(list[indexL]) == False:
return list[indexL]
indexL = indexL - 1
return list[index]
fMax = np.array([
280.00, 100.00, 50.00, 300.00, 300.00, 300.00, 100.00, 100.00,
100.00, 55.00, 4000.00, 7.93, 100.00, 100.00, 9961.00, 268.00,
3833.00, 27.90, 145.00, 46.60, 37.50, 988.00, 31.00, 9.80,
18.80, 27.50, 49.60, 440.00, 71.70, 32.00, 250.00, 440.00,
1760.00, 2322.00, 100.00, 1.00, 1.00, 1.00, 23.99, 336.00
])
fMin = np.array([
20.00, 20.00, 20.90, 20.00, 20.00, 20.00, 1.00, 10.00,
-32.00, 0.00, -50.00, 6.62, 10.00, 23.00, 3.00, 1.00,
7.00, 1.00, 26.00, 0.10, 0.01, 10.00, 0.20, 0.20,
0.20, 1.00, 0.10, 0.01, 5.50, 2.20, 12.50, 0.10,
34.00, 1.00, 14.00, 0.00, 0.00, 0.00, -5366.86, 1.00
])
x_mean = np.array([
83.8996, 97.0520, 36.8055, 126.2240, 86.2907,
66.2070, 18.7280, 33.7373, -3.1923, 22.5352,
0.4597, 7.3889, 39.5049, 96.8883, 103.4265,
22.4952, 87.5214, 7.7210, 106.1982, 1.5961,
0.6943, 131.5327, 2.0262, 2.0509, 3.5130,
4.0541, 1.3423, 5.2734, 32.1134, 10.5383,
38.9974, 10.5585, 286.5404, 198.6777,
60.8711, 0.5435, 0.0615, 0.0727, -59.6769, 28.4551])
x_std = np.array([
17.6494, 3.0163, 0.6895, 24.2988, 16.6459,
14.0771, 4.7035, 11.0158, 3.7845, 3.1567,
6.2684, 0.0710, 9.1087, 3.3971, 430.3638,
19.0690, 81.7152, 2.3992, 4.9761, 2.0648,
1.9926, 45.4816, 1.6008, 0.3793, 1.3092,
0.5844, 2.5511, 20.4142, 6.4362, 2.2302,
29.8928, 7.0606, 137.3886, 96.8997,
16.1887, 0.4981, 0.7968, 0.8029, 160.8846, 29.5367])
fN = fMax - fMin
def normalization(item):
h, w = item.shape
global fMin, fMax, fN
temp = (item - fMin[:w]) / fN[:w]
norm = np.ones_like(temp)
lower = np.zeros_like(temp)
temp = np.minimum(temp, norm)
result = np.maximum(temp, lower)
return result
def data_norm(data):
norm = (data[-1] - x_mean) / x_std
return np.reshape(norm, (1, len(norm)))
def Grad1(data):
h, w = data.shape
grad = np.zeros(w)
grad[:] = np.nan
if h > 1:
grad = data[-1, :] - data[-2, :]
return grad
def covFilter(feature):
h, w = feature.shape
f = np.full((h + 2, w), 0)
f[2:, :] = feature
result = np.full((1, w), np.nan)
result[0, :] = (f[-3, :] + f[-2, :] * 2 + f[-1, :]) / 4
return np.reshape(result, (1, w))
def Grad12(data):
h, w = data.shape
grad = np.zeros(w)
grad[:] = np.nan
if h >= 13:
grad = data[-1, :] - data[-13, :]
elif h >= 7:
grad = data[-1, :] - data[0, :]
return grad
def Grad24(data):
h, w = data.shape
grad = np.zeros(w)
grad[:] = np.nan
if h >= 25:
grad = data[-1, :] - data[-25, :]
elif h >= 16:
grad = data[-1, :] - data[0, :]
return grad
def mFCac(data):
h, w = data.shape
m_t =
|
np.nanmean(data, axis=0)
|
numpy.nanmean
|
#-*- coding: utf-8 -*-
import numpy as n
from scipy.io import wavfile as w
H=n.hstack
V=n.vstack
f_a = 44100. # Hz, frequência de amostragem
############## 2.2.1 Tabela de busca (LUT)
Lambda_tilde=Lt=1024.*16
# Senoide
foo=n.linspace(0,2*n.pi,Lt,endpoint=False)
S_i=n.sin(foo) # um período da senóide com T amostras
# Quadrada:
Q_i=n.hstack( ( n.ones(Lt/2)*-1 , n.ones(Lt/2) ) )
# Triangular:
foo=n.linspace(-1,1,Lt/2,endpoint=False)
Tr_i=n.hstack( ( foo , foo*-1 ) )
# Dente de Serra:
D_i=n.linspace(-1,1,Lt)
def v(f=200,d=2.,tab=S_i,fv=2.,nu=2.,tabv=S_i):
Lambda=n.floor(f_a*d)
ii=
|
n.arange(Lambda)
|
numpy.arange
|
# Copyright 2021 Adobe
# All Rights Reserved.
# NOTICE: Adobe permits you to use, modify, and distribute this file in
# accordance with the terms of the Adobe license agreement accompanying
# it.
from .perturb import TextPerturb
import numpy as np
import cv2
from skimage import segmentation
from skimage.future import graph
from .effects import TextEffects
import inspect
import logging
class TextSynthesis(TextPerturb):
def __init__(self, color_model_path=None):
self.image = None
self.image_path = None
self.text_mask = None
self.list_text = []
self.list_bound = []
self.list_char_box = []
self.list_base_line = []
self.effects = TextEffects(color_model_path=color_model_path)
def _set_image(self, img):
"""
set the image to place text on
:param img: array or str (file dir)
"""
assert isinstance(img, (np.ndarray, str))
if isinstance(img, np.ndarray):
assert img.ndim == 3 # RGB
self.image = img
elif isinstance(img, str):
self.image = cv2.imread(img, cv2.IMREAD_COLOR)
if self.image is None:
raise Exception('''Failed reading the image file "%s"''' % img)
self.image = cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB)
self.image_path = img
self.image = self.image.astype(np.uint8)
self.text_mask = np.zeros(self.image.shape[:2])
def _pt_transform(self, src_size, dst_size, pts):
pts = np.array(pts)
pts_shape = pts.shape
pts = pts.reshape((-1, 2))
pts[:, 0] = pts[:, 0] * dst_size[1] / src_size[1]
pts[:, 1] = pts[:, 1] * dst_size[0] / src_size[0]
pts = pts.reshape(pts_shape)
return pts.astype(np.int32)
def _get_text_regions(self, num_regions=5, is_top_left_origin=True, ensure_num_regions=False, check_collision=True):
"""
get smooth quadrilateral regions for placing text
:param n_regions: int, number of regions required, 1 to 10
:param is_top_left_origin: whether keep the first point in a region vector as the top-left corner
:param ensure_num_regions: sometimes, there are not enough suitable regions for placing text
True: force to get num_regions regions
False: only return suitable regions, may less than num_regions
:param check_collision: bool, whether check collision of regions
:return: list, [4x2 array, ...], each item denotes four corners of a quadrilateral region
"""
def top_left_origin(pts):
dist, pts_new_order = [], []
n_pts = len(pts)
for pt in pts:
dist.append(np.linalg.norm(pt))
idx_top_left = int(np.argmin(dist))
for idx in range(idx_top_left, idx_top_left + n_pts):
pts_new_order.append(pts[idx % n_pts])
return np.array(pts_new_order)
# assert isinstance(num_regions, (int, float)) and 1 <= num_regions <= 30
num_regions = round(num_regions)
""" constraints """
min_seg_img_area_ratio = .01
min_seg_bbox_area_ratio = .6
""" segmentation """
h, w = self.image.shape[:2]
n_segments = min(max(1, int(h * w / 128. ** 2)), 10)
segments, labels, areas = TextSynthesis.image_segmentation(
self.image, n_segments=n_segments)
""" get smooth regions """
# TODO: region_filter
""" define quadrilateral for text """
indices = np.arange(len(labels)).astype(np.int32)
np.random.shuffle(indices)
text_regions = []
h, w = self.image.shape[:2]
collision_mask = np.zeros((h, w))
trash_regions = []
cnt = 0
for idx in indices:
if cnt >= num_regions:
break
if float(areas[idx]) / (h * w) < min_seg_img_area_ratio:
continue
""" min bounding rectangle """
ys, xs = np.where(segments == labels[idx])
coords = np.c_[xs, ys].astype('float32')
# hull = cv2.convexHull(coords, clockwise=False, returnPoints=True)
# hull = np.array(hull).squeeze()
rect = cv2.minAreaRect(coords)
if float(areas[idx]) / (rect[1][0] * rect[1][1]) < min_seg_bbox_area_ratio:
continue
box = np.array(cv2.boxPoints(rect))
""" shrink the rectangle """
# mask_center = np.array([np.mean(xs), np.mean(ys)])
# shift = np.abs(mask_center - np.array(rect[0]))
# rect = (tuple(mask_center), tuple((np.array(rect[1]) - shift)), rect[-1])
""" perspective transformation according to depth info """
# TODO:
""" fit inside of the image """
box[box < 0] = 0
box[box[:, 0] > w - 1, 0] = w - 1
box[box[:, 1] > h - 1, 1] = h - 1
""" check collision """
if check_collision:
mask = cv2.drawContours(
np.zeros((h, w)), [box.astype(np.int32)], 0, 1, thickness=cv2.FILLED)
if np.sum(mask * collision_mask):
# shrink
continue
else:
collision_mask += mask
else:
collision_mask = cv2.drawContours(
collision_mask, [box.astype(np.int32)], 0, 1, thickness=cv2.FILLED)
""" arrange the corners to keep the first corner is the top-left corner """
if is_top_left_origin:
box = top_left_origin(box)
if rect[1][0] * rect[1][1] / float(h * w) < min_seg_img_area_ratio:
trash_regions.append([box, rect[1][0] * rect[1][1]])
continue
text_regions.append(box)
cnt += 1
if cnt < num_regions and ensure_num_regions:
trash_regions = sorted(trash_regions, key=lambda x: x[-1], reverse=True)
for r in trash_regions:
text_regions.append(r[0])
cnt += 1
if cnt >= num_regions:
break
return text_regions
def _place_text_on_region(self, text, font, region, effects=None, is_keep_ratio=True):
"""
place text to self.image
:param region: 4x2 array
:param text: str
:param font: str
:param effects: dict, text effect types and parameters, all items are optional
'layer_text' : [feather (True/False), text color (RGB/None/'rand'), opacity (.75~1)]
'layer_border' : [is_rand_color (True/False), color (RGB/None)]
'layer_shadow' : [theta (float/None), shift (float/None), opacity (.75~1/None)]
'layer_background': color (RGB), default None (original background)
'mix_bg' : whether mix background
'text_interval' : text interval
'text_size' : text size
:param is_keep_ratio: whether keep the ratio of rendered text
"""
""" map region patch to rectangle for placing text """
w, h = int(max(region[:, 0]) - min(region[:, 0])
), int(max(region[:, 1]) - min(region[:, 1]))
region_rect = np.float32([
(0, 0),
(w, 0),
(w, h),
(0, h)
])
region = np.float32(region)
M = cv2.getPerspectiveTransform(region, region_rect)
region_rect_rgb = cv2.warpPerspective(self.image, M, (w, h)).astype(np.uint8)
""" render text """
# if effects is not None and 'text_size' in effects:
# size = effects['text_size'] if effects['text_size'] is not None else (h * 96)
# else:
# size = h * 96
size = 96 * 256
if effects is not None and 'text_interval' in effects:
interval = effects['text_interval'] if effects['text_interval'] is not None else 1
else:
interval = 1
text_arr = self.perturb_text_from_render(
perturb_type_params={}, text=text, font=font, size=size, bg=0, interval=interval)
if text_arr is None:
return False
txt_h, txt_w = text_arr.shape[:2]
if txt_h == 0 or txt_w == 0:
return False
text_mask = self.fg_mask
""" fit rendered text to region """
img_h, img_w = region_rect_rgb.shape[:2]
r_h, r_w = float(img_h) / txt_h, float(img_w) / txt_w
if is_keep_ratio:
r_w = min(r_h, r_w)
r_h = r_w if r_h / r_w < 10 else (r_w * 2)
text_arr = cv2.resize(text_arr, (int(txt_w * r_w), int(txt_h * r_h)))
text_mask = cv2.resize(text_mask, (int(txt_w * r_w), int(txt_h * r_h)))
# text_arr = self._pt_scale(text_arr, [.5 * r_h, .5 * r_w, 0])
else:
text_arr = cv2.resize(text_arr, (img_w, img_h), interpolation=cv2.INTER_NEAREST)
text_mask = cv2.resize(text_mask, (img_w, img_h), interpolation=cv2.INTER_NEAREST)
# text_arr = self._pt_scale(text_arr, [.5 * r_h, .5 * r_w, 0])
txt_h_new, txt_w_new = text_arr.shape[:2]
self.char_box = self._pt_transform((txt_h, txt_w), (txt_h_new, txt_w_new), self.char_box)
self.base_line = self._pt_transform((txt_h, txt_w), (txt_h_new, txt_w_new), self.base_line)
self.bound_quadrilateral = self._pt_transform(
(txt_h, txt_w), (txt_h_new, txt_w_new), self.bound_quadrilateral)
""" offset bounding quadrilateral, character box, and baseline """
txt_h, txt_w = text_arr.shape[:2]
delta_w = img_w - txt_w
delta_h = img_h - txt_h
top, bottom = delta_h // 2, delta_h - (delta_h // 2)
left, right = delta_w // 2, delta_w - (delta_w // 2)
text_arr = cv2.copyMakeBorder(text_arr, top=top, bottom=bottom, left=left, right=right,
borderType=cv2.BORDER_CONSTANT, value=0)
text_mask = cv2.copyMakeBorder(text_mask, top=top, bottom=bottom, left=left, right=right,
borderType=cv2.BORDER_CONSTANT, value=0)
self.bound_quadrilateral = np.array(self.bound_quadrilateral) + np.array([left, top])
self.char_box = np.array(self.char_box) + np.array([left, top])
self.base_line = np.array(self.base_line) + np.array([left, top])
""" blend text and region """
min_char_h = min([np.linalg.norm(d)
for d in self.char_box[:, 0, :] - self.char_box[:, -1, :]])
if effects is not None and 'mix_bg' in effects:
is_mix_bg = effects['mix_bg']
else:
is_mix_bg = False
patch_blend = self.effects(text_arr=text_arr, bg_arr=region_rect_rgb, min_char_h=min_char_h,
layer_type_params=effects, is_mix_bg=is_mix_bg)
""" map blended patch back to image """
M = cv2.getPerspectiveTransform(region_rect, region)
blend_region = cv2.warpPerspective(patch_blend, M, self.image.shape[:2][::-1],
borderMode=cv2.BORDER_CONSTANT, borderValue=0)
blend_mask = cv2.warpPerspective(np.zeros(patch_blend.shape[:2]), M, self.image.shape[:2][::-1],
borderMode=cv2.BORDER_CONSTANT, borderValue=1)
text_mask = cv2.warpPerspective(text_mask, M, self.image.shape[:2][::-1],
borderMode=cv2.BORDER_CONSTANT, borderValue=0)
self.image = (self.image * blend_mask[:, :, None] + blend_region).astype(np.uint8)
self.text_mask = np.clip(self.text_mask + text_mask, 0, 255)
""" map bounding quadrilateral and baseline back to image """
self.bound_quadrilateral = self._perspective_warp_pts_with_M(
M, self.bound_quadrilateral).astype(np.int32)
self.char_box = self._perspective_warp_pts_with_M(M, self.char_box).astype(np.int32)
self.base_line = self._perspective_warp_pts_with_M(M, self.base_line).astype(np.int32)
return True
def _place_text_on_image(self, text_list, font_list, region_list=None, effect_list=None,
is_top_left_origin=True, ensure_num_regions=True, is_keep_ratio=True):
"""
place multiple text on a image
:param text_list: list of str
:param font_list: list of font file path
:param region_list: list of regions, a region is a list of 4 corners in x-y coord
:param effect_list: list of effects, a effect is a dict, please refer to self._place_text_on_region()
:param is_top_left_origin: whether keep the first point as the top-left corner in a region vector
:param ensure_num_regions: whether ensure the number of text placed on the image
:param is_keep_ratio: whether keep the ratio of rendered text
:return: RGB image with text on it, and annotations, i.e., bounding box, character box, baseline, and true text
"""
n_text, n_font = len(text_list), len(font_list)
# assert 1 <= n_text <= 10
self.list_text = []
self.list_bound = []
self.list_char_box = []
self.list_base_line = []
""" get regions """
if region_list is None:
region_list = self._get_text_regions(
num_regions=n_text, is_top_left_origin=is_top_left_origin, ensure_num_regions=ensure_num_regions)
""" match text length to regains """
sorted_idx_region = np.argsort([max(r[:, 0]) - min(r[:, 0]) for r in region_list])
sorted_idx_text = np.argsort([len(t) for t in text_list])
else:
if not len(region_list) == n_text:
logging.info('Mismatched length between region_list and text_list')
return None
""" match text length to regains """
sorted_idx_region =
|
np.arange(n_text)
|
numpy.arange
|
import os
import shutil
import numpy as np
import pytest
import scipy.spatial
import autoarray as aa
from autoarray import exc
from autoarray.structures import grids
@pytest.fixture(name="grid")
def make_grid():
mask = aa.mask.manual(
np.array([[True, False, True], [False, False, False], [True, False, True]]),
pixel_scales=(1.0, 1.0),
sub_size=1,
)
return aa.masked.grid.from_mask(mask=mask)
class TestGridAPI:
class TestManual:
def test__grid__makes_scaled_grid_with_pixel_scale(self):
grid = aa.grid.manual_2d(
grid=[[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]],
pixel_scales=1.0,
)
assert type(grid) == grids.Grid
assert (
grid.in_2d
== np.array([[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]])
).all()
assert (
grid.in_1d == np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]])
).all()
assert grid.pixel_scales == (1.0, 1.0)
assert grid.origin == (0.0, 0.0)
grid = aa.grid.manual_1d(
grid=[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]],
shape_2d=(2, 2),
pixel_scales=1.0,
origin=(0.0, 1.0),
)
assert type(grid) == grids.Grid
assert (
grid.in_2d
== np.array([[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]])
).all()
assert (
grid.in_1d == np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]])
).all()
assert grid.pixel_scales == (1.0, 1.0)
assert grid.origin == (0.0, 1.0)
grid = aa.grid.manual_1d(
grid=[[1.0, 2.0], [3.0, 4.0]],
shape_2d=(2, 1),
pixel_scales=(2.0, 3.0),
store_in_1d=True,
)
assert type(grid) == grids.Grid
assert (grid == np.array([[1.0, 2.0], [3.0, 4.0]])).all()
assert (grid.in_2d == np.array([[[1.0, 2.0]], [[3.0, 4.0]]])).all()
assert (grid.in_1d == np.array([[1.0, 2.0], [3.0, 4.0]])).all()
assert grid.pixel_scales == (2.0, 3.0)
assert grid.origin == (0.0, 0.0)
grid = aa.grid.manual_1d(
grid=[[1.0, 2.0], [3.0, 4.0]],
shape_2d=(2, 1),
pixel_scales=(2.0, 3.0),
store_in_1d=False,
)
assert type(grid) == grids.Grid
assert (grid == np.array([[[1.0, 2.0]], [[3.0, 4.0]]])).all()
assert (grid.in_2d == np.array([[[1.0, 2.0]], [[3.0, 4.0]]])).all()
assert (grid.in_1d == np.array([[1.0, 2.0], [3.0, 4.0]])).all()
assert grid.pixel_scales == (2.0, 3.0)
assert grid.origin == (0.0, 0.0)
def test__grid__makes_scaled_sub_grid_with_pixel_scale_and_sub_size(self):
grid = aa.grid.manual_2d(
grid=[[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]],
pixel_scales=1.0,
sub_size=1,
)
assert type(grid) == grids.Grid
assert (
grid.in_2d
== np.array([[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]])
).all()
assert (
grid.in_1d == np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]])
).all()
assert (
grid.in_2d_binned
== np.array([[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]])
).all()
assert (
grid.in_1d_binned
== np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]])
).all()
assert grid.pixel_scales == (1.0, 1.0)
assert grid.origin == (0.0, 0.0)
assert grid.sub_size == 1
grid = aa.grid.manual_1d(
grid=[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]],
shape_2d=(1, 1),
pixel_scales=1.0,
sub_size=2,
origin=(0.0, 1.0),
store_in_1d=True,
)
assert type(grid) == grids.Grid
assert (
grid == np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]])
).all()
assert (
grid.in_2d
== np.array([[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]])
).all()
assert (
grid.in_1d == np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]])
).all()
assert (grid.in_2d_binned == np.array([[[4.0, 5.0]]])).all()
assert (grid.in_1d_binned == np.array([[4.0, 5.0]])).all()
assert grid.pixel_scales == (1.0, 1.0)
assert grid.origin == (0.0, 1.0)
assert grid.sub_size == 2
grid = aa.grid.manual_1d(
grid=[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]],
shape_2d=(1, 1),
pixel_scales=1.0,
sub_size=2,
origin=(0.0, 1.0),
store_in_1d=False,
)
assert type(grid) == grids.Grid
assert (
grid == np.array([[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]])
).all()
assert (
grid.in_2d
== np.array([[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]])
).all()
assert (
grid.in_1d == np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]])
).all()
assert (grid.in_2d_binned == np.array([[[4.0, 5.0]]])).all()
assert (grid.in_1d_binned == np.array([[4.0, 5.0]])).all()
assert grid.pixel_scales == (1.0, 1.0)
assert grid.origin == (0.0, 1.0)
assert grid.sub_size == 2
class TestManualYAndX:
def test__grid__makes_scaled_grid_with_pixel_scale(self):
grid = aa.grid.manual_yx_1d(
y=[1.0, 3.0, 5.0, 7.0],
x=[2.0, 4.0, 6.0, 8.0],
shape_2d=(2, 2),
pixel_scales=1.0,
origin=(0.0, 1.0),
store_in_1d=False,
)
assert type(grid) == grids.Grid
assert (
grid == np.array([[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]])
).all()
assert (
grid.in_2d
== np.array([[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]])
).all()
assert (
grid.in_1d == np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]])
).all()
assert grid.pixel_scales == (1.0, 1.0)
assert grid.origin == (0.0, 1.0)
grid = aa.grid.manual_yx_2d(
y=[[1.0], [3.0]],
x=[[2.0], [4.0]],
pixel_scales=(2.0, 3.0),
store_in_1d=True,
)
assert type(grid) == grids.Grid
assert (grid == np.array([[1.0, 2.0], [3.0, 4.0]])).all()
assert (grid.in_2d == np.array([[[1.0, 2.0]], [[3.0, 4.0]]])).all()
assert (grid.in_1d == np.array([[1.0, 2.0], [3.0, 4.0]])).all()
assert grid.pixel_scales == (2.0, 3.0)
assert grid.origin == (0.0, 0.0)
def test__grid__makes_scaled_sub_grid_with_pixel_scale_and_sub_size(self):
grid = aa.grid.manual_yx_1d(
y=[1.0, 3.0, 5.0, 7.0],
x=[2.0, 4.0, 6.0, 8.0],
shape_2d=(1, 1),
pixel_scales=1.0,
sub_size=2,
origin=(0.0, 1.0),
store_in_1d=True,
)
assert type(grid) == grids.Grid
assert (
grid == np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]])
).all()
assert (
grid.in_2d
== np.array([[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]])
).all()
assert (
grid.in_1d == np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]])
).all()
assert (grid.in_2d_binned == np.array([[[4.0, 5.0]]])).all()
assert (grid.in_1d_binned == np.array([[4.0, 5.0]])).all()
assert grid.pixel_scales == (1.0, 1.0)
assert grid.origin == (0.0, 1.0)
assert grid.sub_size == 2
grid = aa.grid.manual_yx_2d(
y=[[1.0, 3.0], [5.0, 7.0]],
x=[[2.0, 4.0], [6.0, 8.0]],
pixel_scales=1.0,
sub_size=2,
origin=(0.0, 1.0),
store_in_1d=False,
)
assert type(grid) == grids.Grid
assert (
grid == np.array([[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]])
).all()
assert (
grid.in_2d
== np.array([[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]])
).all()
assert (
grid.in_1d == np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]])
).all()
assert (grid.in_2d_binned == np.array([[[4.0, 5.0]]])).all()
assert (grid.in_1d_binned == np.array([[4.0, 5.0]])).all()
assert grid.pixel_scales == (1.0, 1.0)
assert grid.origin == (0.0, 1.0)
assert grid.sub_size == 2
class TestGridUniform:
def test__grid_uniform__makes_scaled_grid_with_pixel_scale(self):
grid = aa.grid.uniform(shape_2d=(2, 2), pixel_scales=2.0)
assert type(grid) == grids.Grid
assert (
grid.in_2d
== np.array([[[1.0, -1.0], [1.0, 1.0]], [[-1.0, -1.0], [-1.0, 1.0]]])
).all()
assert (
grid.in_1d
== np.array([[1.0, -1.0], [1.0, 1.0], [-1.0, -1.0], [-1.0, 1.0]])
).all()
assert grid.pixel_scales == (2.0, 2.0)
assert grid.origin == (0.0, 0.0)
grid = aa.grid.uniform(shape_2d=(2, 2), pixel_scales=2.0, origin=(1.0, 1.0))
assert type(grid) == grids.Grid
assert (
grid.in_2d
== np.array([[[2.0, 0.0], [2.0, 2.0]], [[0.0, 0.0], [0.0, 2.0]]])
).all()
assert (
grid.in_1d == np.array([[2.0, 0.0], [2.0, 2.0], [0.0, 0.0], [0.0, 2.0]])
).all()
assert grid.pixel_scales == (2.0, 2.0)
assert grid.origin == (1.0, 1.0)
grid = aa.grid.uniform(shape_2d=(2, 1), pixel_scales=(2.0, 1.0))
assert type(grid) == grids.Grid
assert (grid.in_2d == np.array([[[1.0, 0.0]], [[-1.0, 0.0]]])).all()
assert (grid.in_1d == np.array([[1.0, 0.0], [-1.0, 0.0]])).all()
assert grid.pixel_scales == (2.0, 1.0)
assert grid.origin == (0.0, 0.0)
grid = aa.grid.uniform(
shape_2d=(2, 2), pixel_scales=2.0, origin=(1.0, 1.0), store_in_1d=True
)
assert type(grid) == grids.Grid
assert (
grid == np.array([[2.0, 0.0], [2.0, 2.0], [0.0, 0.0], [0.0, 2.0]])
).all()
assert (
grid.in_2d
== np.array([[[2.0, 0.0], [2.0, 2.0]], [[0.0, 0.0], [0.0, 2.0]]])
).all()
assert (
grid.in_1d == np.array([[2.0, 0.0], [2.0, 2.0], [0.0, 0.0], [0.0, 2.0]])
).all()
assert grid.pixel_scales == (2.0, 2.0)
assert grid.origin == (1.0, 1.0)
grid = aa.grid.uniform(
shape_2d=(2, 1), pixel_scales=(2.0, 1.0), store_in_1d=False
)
assert type(grid) == grids.Grid
assert (grid == np.array([[[1.0, 0.0]], [[-1.0, 0.0]]])).all()
assert (grid.in_2d == np.array([[[1.0, 0.0]], [[-1.0, 0.0]]])).all()
assert (grid.in_1d == np.array([[1.0, 0.0], [-1.0, 0.0]])).all()
assert grid.pixel_scales == (2.0, 1.0)
assert grid.origin == (0.0, 0.0)
def test__grid__makes_scaled_sub_grid_with_pixel_scale_and_sub_size(self):
grid = aa.grid.uniform(shape_2d=(2, 2), pixel_scales=2.0, sub_size=1)
assert type(grid) == grids.Grid
assert (
grid.in_2d
== np.array([[[1.0, -1.0], [1.0, 1.0]], [[-1.0, -1.0], [-1.0, 1.0]]])
).all()
assert (
grid.in_1d
== np.array([[1.0, -1.0], [1.0, 1.0], [-1.0, -1.0], [-1.0, 1.0]])
).all()
assert (
grid.in_2d_binned
== np.array([[[1.0, -1.0], [1.0, 1.0]], [[-1.0, -1.0], [-1.0, 1.0]]])
).all()
assert (
grid.in_1d_binned
== np.array([[1.0, -1.0], [1.0, 1.0], [-1.0, -1.0], [-1.0, 1.0]])
).all()
assert grid.pixel_scales == (2.0, 2.0)
assert grid.origin == (0.0, 0.0)
assert grid.sub_size == 1
grid = aa.grid.uniform(
shape_2d=(2, 2), pixel_scales=2.0, sub_size=1, origin=(1.0, 1.0)
)
assert type(grid) == grids.Grid
assert (
grid.in_2d
== np.array([[[2.0, 0.0], [2.0, 2.0]], [[0.0, 0.0], [0.0, 2.0]]])
).all()
assert (
grid.in_1d == np.array([[2.0, 0.0], [2.0, 2.0], [0.0, 0.0], [0.0, 2.0]])
).all()
assert (
grid.in_2d_binned
== np.array([[[2.0, 0.0], [2.0, 2.0]], [[0.0, 0.0], [0.0, 2.0]]])
).all()
assert (
grid.in_1d_binned
== np.array([[2.0, 0.0], [2.0, 2.0], [0.0, 0.0], [0.0, 2.0]])
).all()
assert grid.pixel_scales == (2.0, 2.0)
assert grid.origin == (1.0, 1.0)
assert grid.sub_size == 1
grid = aa.grid.uniform(shape_2d=(2, 1), pixel_scales=1.0, sub_size=2)
assert type(grid) == grids.Grid
assert (
grid.in_2d
== np.array(
[
[[0.75, -0.25], [0.75, 0.25]],
[[0.25, -0.25], [0.25, 0.25]],
[[-0.25, -0.25], [-0.25, 0.25]],
[[-0.75, -0.25], [-0.75, 0.25]],
]
)
).all()
assert (
grid.in_1d
== np.array(
[
[0.75, -0.25],
[0.75, 0.25],
[0.25, -0.25],
[0.25, 0.25],
[-0.25, -0.25],
[-0.25, 0.25],
[-0.75, -0.25],
[-0.75, 0.25],
]
)
).all()
assert (grid.in_2d_binned == np.array([[[0.5, 0.0]], [[-0.5, 0.0]]])).all()
assert (grid.in_1d_binned == np.array([[0.5, 0.0], [-0.5, 0.0]])).all()
assert grid.pixel_scales == (1.0, 1.0)
assert grid.origin == (0.0, 0.0)
assert grid.sub_size == 2
class TestGridBoundingBox:
def test__grid_bounding_box__align_at_corners__grid_corner_is_at_bounding_box_corner(
self
):
grid = aa.grid.bounding_box(
bounding_box=[-2.0, 2.0, -2.0, 2.0],
shape_2d=(3, 3),
buffer_around_corners=False,
)
assert grid.in_1d == pytest.approx(
np.array(
[
[1.3333, -1.3333],
[1.3333, 0.0],
[1.3333, 1.3333],
[0.0, -1.3333],
[0.0, 0.0],
[0.0, 1.3333],
[-1.3333, -1.3333],
[-1.3333, 0.0],
[-1.3333, 1.3333],
]
),
1.0e-4,
)
assert grid.pixel_scales == pytest.approx((1.33333, 1.3333), 1.0e-4)
assert grid.origin == (0.0, 0.0)
grid = aa.grid.bounding_box(
bounding_box=[-2.0, 2.0, -2.0, 2.0],
shape_2d=(2, 3),
buffer_around_corners=False,
)
assert grid.in_1d == pytest.approx(
np.array(
[
[1.0, -1.3333],
[1.0, 0.0],
[1.0, 1.3333],
[-1.0, -1.3333],
[-1.0, 0.0],
[-1.0, 1.3333],
]
),
1.0e-4,
)
assert grid.pixel_scales == pytest.approx((2.0, 1.33333), 1.0e4)
assert grid.origin == (0.0, 0.0)
def test__grid_bounding_box__uniform_box__buffer_around_corners__makes_grid_with_correct_pixel_scales_and_origin(
self
):
grid = aa.grid.bounding_box(
bounding_box=[-2.0, 2.0, -2.0, 2.0],
shape_2d=(3, 3),
buffer_around_corners=True,
)
assert (
grid.in_1d
== np.array(
[
[2.0, -2.0],
[2.0, 0.0],
[2.0, 2.0],
[0.0, -2.0],
[0.0, 0.0],
[0.0, 2.0],
[-2.0, -2.0],
[-2.0, 0.0],
[-2.0, 2.0],
]
)
).all()
assert grid.pixel_scales == (2.0, 2.0)
assert grid.origin == (0.0, 0.0)
grid = aa.grid.bounding_box(
bounding_box=[-2.0, 2.0, -2.0, 2.0],
shape_2d=(2, 3),
buffer_around_corners=True,
)
assert (
grid.in_1d
== np.array(
[
[2.0, -2.0],
[2.0, 0.0],
[2.0, 2.0],
[-2.0, -2.0],
[-2.0, 0.0],
[-2.0, 2.0],
]
)
).all()
assert grid.pixel_scales == (4.0, 2.0)
assert grid.origin == (0.0, 0.0)
grid = aa.grid.bounding_box(
bounding_box=[8.0, 10.0, -2.0, 3.0],
shape_2d=(3, 3),
store_in_1d=True,
buffer_around_corners=True,
)
assert grid == pytest.approx(
np.array(
[
[10.0, -2.0],
[10.0, 0.5],
[10.0, 3.0],
[9.0, -2.0],
[9.0, 0.5],
[9.0, 3.0],
[8.0, -2.0],
[8.0, 0.5],
[8.0, 3.0],
]
),
1.0e-4,
)
assert grid.in_1d == pytest.approx(
np.array(
[
[10.0, -2.0],
[10.0, 0.5],
[10.0, 3.0],
[9.0, -2.0],
[9.0, 0.5],
[9.0, 3.0],
[8.0, -2.0],
[8.0, 0.5],
[8.0, 3.0],
]
),
1.0e-4,
)
assert grid.pixel_scales == (1.0, 2.5)
assert grid.origin == (9.0, 0.5)
grid = aa.grid.bounding_box(
bounding_box=[8.0, 10.0, -2.0, 3.0],
shape_2d=(3, 3),
store_in_1d=False,
buffer_around_corners=True,
)
assert grid.in_2d == pytest.approx(
np.array(
[
[[10.0, -2.0], [10.0, 0.5], [10.0, 3.0]],
[[9.0, -2.0], [9.0, 0.5], [9.0, 3.0]],
[[8.0, -2.0], [8.0, 0.5], [8.0, 3.0]],
]
),
1.0e-4,
)
assert grid.in_1d == pytest.approx(
np.array(
[
[10.0, -2.0],
[10.0, 0.5],
[10.0, 3.0],
[9.0, -2.0],
[9.0, 0.5],
[9.0, 3.0],
[8.0, -2.0],
[8.0, 0.5],
[8.0, 3.0],
]
),
1.0e-4,
)
assert grid.pixel_scales == (1.0, 2.5)
assert grid.origin == (9.0, 0.5)
class TestGridMaskedAPI:
class TestManual:
def test__grid__makes_scaled_grid_with_pixel_scale(self):
mask = aa.mask.unmasked(shape_2d=(2, 2), pixel_scales=1.0)
grid = aa.masked.grid.manual_2d(
grid=[[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]], mask=mask
)
assert type(grid) == grids.Grid
assert (
grid.in_2d
== np.array([[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]])
).all()
assert (
grid.in_1d == np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]])
).all()
assert grid.pixel_scales == (1.0, 1.0)
assert grid.origin == (0.0, 0.0)
mask = aa.mask.manual(
[[True, False], [False, False]], pixel_scales=1.0, origin=(0.0, 1.0)
)
grid = aa.masked.grid.manual_1d(
grid=[[3.0, 4.0], [5.0, 6.0], [7.0, 8.0]], mask=mask
)
assert type(grid) == grids.Grid
assert (
grid.in_2d
== np.array([[[0.0, 0.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]])
).all()
assert (grid.in_1d == np.array([[3.0, 4.0], [5.0, 6.0], [7.0, 8.0]])).all()
assert grid.pixel_scales == (1.0, 1.0)
assert grid.origin == (0.0, 1.0)
mask = aa.mask.manual(
[[False], [True]], sub_size=2, pixel_scales=1.0, origin=(0.0, 1.0)
)
grid = aa.masked.grid.manual_2d(
grid=[
[[1.0, 2.0], [3.0, 4.0]],
[[5.0, 6.0], [7.0, 8.0]],
[[1.0, 2.0], [3.0, 4.0]],
[[5.0, 6.0], [7.0, 7.0]],
],
mask=mask,
store_in_1d=True,
)
assert type(grid) == grids.Grid
assert (
grid == np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]])
).all()
assert (
grid.in_2d
== np.array(
[
[[1.0, 2.0], [3.0, 4.0]],
[[5.0, 6.0], [7.0, 8.0]],
[[0.0, 0.0], [0.0, 0.0]],
[[0.0, 0.0], [0.0, 0.0]],
]
)
).all()
assert (
grid.in_1d == np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]])
).all()
assert (grid.in_2d_binned == np.array([[[4.0, 5.0]], [[0.0, 0.0]]])).all()
assert (grid.in_1d_binned == np.array([[4.0, 5.0]])).all()
assert grid.pixel_scales == (1.0, 1.0)
assert grid.origin == (0.0, 1.0)
assert grid.sub_size == 2
grid = aa.masked.grid.manual_2d(
grid=[
[[1.0, 2.0], [3.0, 4.0]],
[[5.0, 6.0], [7.0, 8.0]],
[[1.0, 2.0], [3.0, 4.0]],
[[5.0, 6.0], [7.0, 7.0]],
],
mask=mask,
store_in_1d=False,
)
assert type(grid) == grids.Grid
assert (
grid
== np.array(
[
[[1.0, 2.0], [3.0, 4.0]],
[[5.0, 6.0], [7.0, 8.0]],
[[0.0, 0.0], [0.0, 0.0]],
[[0.0, 0.0], [0.0, 0.0]],
]
)
).all()
assert (
grid.in_2d
== np.array(
[
[[1.0, 2.0], [3.0, 4.0]],
[[5.0, 6.0], [7.0, 8.0]],
[[0.0, 0.0], [0.0, 0.0]],
[[0.0, 0.0], [0.0, 0.0]],
]
)
).all()
assert (
grid.in_1d == np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]])
).all()
assert (grid.in_2d_binned == np.array([[[4.0, 5.0]], [[0.0, 0.0]]])).all()
assert (grid.in_1d_binned == np.array([[4.0, 5.0]])).all()
assert grid.pixel_scales == (1.0, 1.0)
assert grid.origin == (0.0, 1.0)
assert grid.sub_size == 2
def test__exception_raised_if_input_grid_is_2d_and_not_sub_shape_of_mask(self):
with pytest.raises(exc.GridException):
mask = aa.mask.unmasked(shape_2d=(2, 2), pixel_scales=1.0, sub_size=1)
aa.masked.grid.manual_2d(grid=[[[1.0, 1.0], [3.0, 3.0]]], mask=mask)
with pytest.raises(exc.GridException):
mask = aa.mask.unmasked(shape_2d=(2, 2), pixel_scales=1.0, sub_size=2)
aa.masked.grid.manual_2d(
grid=[[[1.0, 1.0], [2.0, 2.0]], [[3.0, 3.0], [4.0, 4.0]]], mask=mask
)
with pytest.raises(exc.GridException):
mask = aa.mask.unmasked(shape_2d=(2, 2), pixel_scales=1.0, sub_size=2)
aa.masked.grid.manual_2d(
grid=[
[[1.0, 1.0], [2.0, 2.0]],
[[3.0, 3.0], [4.0, 4.0]],
[[5.0, 5.0], [6.0, 6.0]],
],
mask=mask,
)
def test__exception_raised_if_input_grid_is_not_number_of_masked_sub_pixels(
self
):
with pytest.raises(exc.GridException):
mask = aa.mask.manual(
mask_2d=[[False, False], [True, False]], sub_size=1
)
aa.masked.grid.manual_1d(
grid=[[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]], mask=mask
)
with pytest.raises(exc.GridException):
mask = aa.mask.manual(
mask_2d=[[False, False], [True, False]], sub_size=1
)
aa.masked.grid.manual_1d(grid=[[1.0, 1.0], [2.0, 2.0]], mask=mask)
with pytest.raises(exc.GridException):
mask = aa.mask.manual(mask_2d=[[False, True], [True, True]], sub_size=2)
aa.masked.grid.manual_2d(
grid=[[[1.0, 1.0], [2.0, 2.0], [4.0, 4.0]]], mask=mask
)
with pytest.raises(exc.GridException):
mask = aa.mask.manual(mask_2d=[[False, True], [True, True]], sub_size=2)
aa.masked.grid.manual_2d(
grid=[[[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0], [5.0, 5.0]]],
mask=mask,
)
class TestFromMask:
def test__from_mask__compare_to_array_util(self):
mask = np.array(
[
[True, True, False, False],
[True, False, True, True],
[True, True, False, False],
]
)
mask = aa.mask.manual(mask_2d=mask, pixel_scales=(2.0, 2.0), sub_size=1)
grid_via_util = aa.util.grid.grid_1d_via_mask_2d(
mask_2d=mask, sub_size=1, pixel_scales=(2.0, 2.0)
)
grid = aa.masked.grid.from_mask(mask=mask)
assert type(grid) == grids.Grid
assert grid == pytest.approx(grid_via_util, 1e-4)
assert grid.pixel_scales == (2.0, 2.0)
assert grid.interpolator == None
grid_2d = mask.mapping.grid_stored_2d_from_sub_grid_1d(sub_grid_1d=grid)
assert (grid.in_2d == grid_2d).all()
mask = np.array(
[[True, True, True], [True, False, False], [True, True, False]]
)
mask = aa.mask.manual(mask, pixel_scales=(3.0, 3.0), sub_size=2)
grid_via_util = aa.util.grid.grid_1d_via_mask_2d(
mask_2d=mask, pixel_scales=(3.0, 3.0), sub_size=2
)
grid = aa.masked.grid.from_mask(mask=mask, store_in_1d=True)
assert len(grid.shape) == 2
assert grid == pytest.approx(grid_via_util, 1e-4)
grid = aa.masked.grid.from_mask(mask=mask, store_in_1d=False)
assert len(grid.shape) == 3
def test__grid__from_mask_method_same_as_masked_grid(self):
mask = np.array(
[
[True, True, False, False],
[True, False, True, True],
[True, True, False, False],
]
)
mask = aa.mask.manual(mask_2d=mask, pixel_scales=(2.0, 2.0), sub_size=1)
grid_via_util = aa.util.grid.grid_1d_via_mask_2d(
mask_2d=mask, sub_size=1, pixel_scales=(2.0, 2.0)
)
grid = aa.grid.from_mask(mask=mask)
assert type(grid) == grids.Grid
assert grid == pytest.approx(grid_via_util, 1e-4)
assert grid.pixel_scales == (2.0, 2.0)
assert grid.interpolator == None
grid_2d = mask.mapping.grid_stored_2d_from_sub_grid_1d(sub_grid_1d=grid)
assert (grid.in_2d == grid_2d).all()
class TestGrid:
def test__blurring_grid_from_mask__compare_to_array_util(self):
mask = np.array(
[
[True, True, True, True, True, True, True, True, True],
[True, True, True, True, True, True, True, True, True],
[True, True, True, True, True, True, True, True, True],
[True, True, True, True, True, True, True, True, True],
[True, True, True, True, False, True, True, True, True],
[True, True, True, True, True, True, True, True, True],
[True, True, True, True, True, True, True, True, True],
[True, True, True, True, True, True, True, True, True],
[True, True, True, True, True, True, True, True, True],
]
)
mask = aa.mask.manual(mask_2d=mask, pixel_scales=(2.0, 2.0), sub_size=2)
blurring_mask_util = aa.util.mask.blurring_mask_2d_from_mask_2d_and_kernel_shape_2d(
mask_2d=mask, kernel_shape_2d=(3, 5)
)
blurring_grid_util = aa.util.grid.grid_1d_via_mask_2d(
mask_2d=blurring_mask_util, pixel_scales=(2.0, 2.0), sub_size=1
)
grid = aa.masked.grid.from_mask(mask=mask)
blurring_grid = grid.blurring_grid_from_kernel_shape(kernel_shape_2d=(3, 5))
assert blurring_grid == pytest.approx(blurring_grid_util, 1e-4)
assert blurring_grid.pixel_scales == (2.0, 2.0)
def test__blurring_grid_from_kernel_shape__compare_to_array_util(self):
mask = np.array(
[
[True, True, True, True, True, True, True, True, True],
[True, True, True, True, True, True, True, True, True],
[True, True, False, True, True, True, False, True, True],
[True, True, True, True, True, True, True, True, True],
[True, True, True, True, True, True, True, True, True],
[True, True, True, True, True, True, True, True, True],
[True, True, False, True, True, True, False, True, True],
[True, True, True, True, True, True, True, True, True],
[True, True, True, True, True, True, True, True, True],
]
)
mask = aa.mask.manual(mask_2d=mask, pixel_scales=(2.0, 2.0), sub_size=2)
blurring_mask_util = aa.util.mask.blurring_mask_2d_from_mask_2d_and_kernel_shape_2d(
mask_2d=mask, kernel_shape_2d=(3, 5)
)
blurring_grid_util = aa.util.grid.grid_1d_via_mask_2d(
mask_2d=blurring_mask_util, pixel_scales=(2.0, 2.0), sub_size=1
)
mask = aa.mask.manual(mask_2d=mask, pixel_scales=(2.0, 2.0), sub_size=2)
blurring_grid = grids.Grid.blurring_grid_from_mask_and_kernel_shape(
mask=mask, kernel_shape_2d=(3, 5)
)
assert blurring_grid == pytest.approx(blurring_grid_util, 1e-4)
assert blurring_grid.pixel_scales == (2.0, 2.0)
def test__masked_shape_2d_arcsec(self):
mask = aa.mask.circular(
shape_2d=(3, 3), radius=1.0, pixel_scales=(1.0, 1.0), sub_size=1
)
grid = grids.Grid(grid=np.array([[1.5, 1.0], [-1.5, -1.0]]), mask=mask)
assert grid.shape_2d_scaled == (3.0, 2.0)
grid = grids.Grid(
grid=np.array([[1.5, 1.0], [-1.5, -1.0], [0.1, 0.1]]), mask=mask
)
assert grid.shape_2d_scaled == (3.0, 2.0)
grid = grids.Grid(
grid=np.array([[1.5, 1.0], [-1.5, -1.0], [3.0, 3.0]]), mask=mask
)
assert grid.shape_2d_scaled == (4.5, 4.0)
grid = grids.Grid(
grid=np.array([[1.5, 1.0], [-1.5, -1.0], [3.0, 3.0], [7.0, -5.0]]),
mask=mask,
)
assert grid.shape_2d_scaled == (8.5, 8.0)
def test__flipped_property__returns_grid_as_x_then_y(self):
grid = aa.grid.manual_2d(
grid=[[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]], pixel_scales=1.0
)
assert (
grid.in_1d_flipped
== np.array([[2.0, 1.0], [4.0, 3.0], [6.0, 5.0], [8.0, 7.0]])
).all()
assert (
grid.in_2d_flipped
== np.array([[[2.0, 1.0], [4.0, 3.0]], [[6.0, 5.0], [8.0, 7.0]]])
).all()
grid = aa.grid.manual_2d(
grid=[[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]], pixel_scales=1.0
)
assert (
grid.in_1d_flipped == np.array([[2.0, 1.0], [4.0, 3.0], [6.0, 5.0]])
).all()
assert (
grid.in_2d_flipped == np.array([[[2.0, 1.0], [4.0, 3.0], [6.0, 5.0]]])
).all()
def test__in_radians(self):
mask = np.array(
[
[True, True, False, False],
[True, False, True, True],
[True, True, False, False],
]
)
mask = aa.mask.manual(mask_2d=mask, pixel_scales=(2.0, 2.0))
grid = aa.masked.grid.from_mask(mask=mask)
assert grid.in_radians[0, 0] == pytest.approx(0.00000969627362, 1.0e-8)
assert grid.in_radians[0, 1] == pytest.approx(0.00000484813681, 1.0e-8)
assert grid.in_radians[0, 0] == pytest.approx(
2.0 * np.pi / (180 * 3600), 1.0e-8
)
assert grid.in_radians[0, 1] == pytest.approx(
1.0 * np.pi / (180 * 3600), 1.0e-8
)
def test__yticks(self):
mask = aa.mask.circular(
shape_2d=(3, 3), radius=1.0, pixel_scales=(1.0, 1.0), sub_size=1
)
grid = grids.Grid(grid=np.array([[1.5, 1.0], [-1.5, -1.0]]), mask=mask)
assert grid.yticks == pytest.approx(np.array([-1.5, -0.5, 0.5, 1.5]), 1e-3)
grid = grids.Grid(grid=np.array([[3.0, 1.0], [-3.0, -1.0]]), mask=mask)
assert grid.yticks == pytest.approx(np.array([-3.0, -1, 1.0, 3.0]), 1e-3)
grid = grids.Grid(grid=np.array([[5.0, 3.5], [2.0, -1.0]]), mask=mask)
assert grid.yticks == pytest.approx(np.array([2.0, 3.0, 4.0, 5.0]), 1e-3)
def test__xticks(self):
mask = aa.mask.circular(
shape_2d=(3, 3), radius=1.0, pixel_scales=(1.0, 1.0), sub_size=1
)
grid = grids.Grid(grid=np.array([[1.0, 1.5], [-1.0, -1.5]]), mask=mask)
assert grid.xticks == pytest.approx(np.array([-1.5, -0.5, 0.5, 1.5]), 1e-3)
grid = grids.Grid(grid=np.array([[1.0, 3.0], [-1.0, -3.0]]), mask=mask)
assert grid.xticks == pytest.approx(np.array([-3.0, -1, 1.0, 3.0]), 1e-3)
grid = grids.Grid(grid=np.array([[3.5, 2.0], [-1.0, 5.0]]), mask=mask)
assert grid.xticks == pytest.approx(np.array([2.0, 3.0, 4.0, 5.0]), 1e-3)
def test__new_grid__with_interpolator__returns_grid_with_interpolator(self):
mask = np.array(
[
[True, True, False, False],
[True, False, True, True],
[True, True, False, False],
]
)
mask = aa.mask.manual(mask_2d=mask, pixel_scales=(2.0, 2.0))
grid = aa.masked.grid.from_mask(mask=mask)
grid_with_interp = grid.new_grid_with_interpolator(
pixel_scale_interpolation_grid=1.0
)
assert (grid[:, :] == grid_with_interp[:, :]).all()
assert (grid.mask == grid_with_interp.mask).all()
interpolator_manual = grids.Interpolator.from_mask_grid_and_pixel_scale_interpolation_grids(
mask=mask, grid=grid, pixel_scale_interpolation_grid=1.0
)
assert (grid.interpolator.vtx == interpolator_manual.vtx).all()
assert (grid.interpolator.wts == interpolator_manual.wts).all()
def test__new_grid__with_binned__returns_grid_with_binned(self):
mask = np.array(
[
[True, True, False, False],
[True, False, True, True],
[True, True, False, False],
]
)
mask = aa.mask.manual(mask_2d=mask, pixel_scales=(2.0, 2.0))
grid = aa.masked.grid.from_mask(mask=mask)
grid.new_grid_with_binned_grid(binned_grid=1)
assert grid.binned == 1
def test__padded_grid_from_kernel_shape__matches_grid_2d_after_padding(self):
grid = grids.Grid.uniform(shape_2d=(4, 4), pixel_scales=3.0, sub_size=1)
padded_grid = grid.padded_grid_from_kernel_shape(kernel_shape_2d=(3, 3))
padded_grid_util = aa.util.grid.grid_1d_via_mask_2d(
mask_2d=np.full((6, 6), False), pixel_scales=(3.0, 3.0), sub_size=1
)
assert padded_grid.shape == (36, 2)
assert (padded_grid.mask == np.full(fill_value=False, shape=(6, 6))).all()
assert (padded_grid == padded_grid_util).all()
assert padded_grid.interpolator is None
grid = grids.Grid.uniform(shape_2d=(4, 5), pixel_scales=2.0, sub_size=1)
padded_grid = grid.padded_grid_from_kernel_shape(kernel_shape_2d=(3, 3))
padded_grid_util = aa.util.grid.grid_1d_via_mask_2d(
mask_2d=np.full((6, 7), False), pixel_scales=(2.0, 2.0), sub_size=1
)
assert padded_grid.shape == (42, 2)
assert (padded_grid == padded_grid_util).all()
grid = grids.Grid.uniform(shape_2d=(5, 4), pixel_scales=1.0, sub_size=1)
padded_grid = grid.padded_grid_from_kernel_shape(kernel_shape_2d=(3, 3))
padded_grid_util = aa.util.grid.grid_1d_via_mask_2d(
mask_2d=np.full((7, 6), False), pixel_scales=(1.0, 1.0), sub_size=1
)
assert padded_grid.shape == (42, 2)
assert (padded_grid == padded_grid_util).all()
grid = grids.Grid.uniform(shape_2d=(5, 5), pixel_scales=8.0, sub_size=1)
padded_grid = grid.padded_grid_from_kernel_shape(kernel_shape_2d=(2, 5))
padded_grid_util = aa.util.grid.grid_1d_via_mask_2d(
mask_2d=np.full((6, 9), False), pixel_scales=(8.0, 8.0), sub_size=1
)
assert padded_grid.shape == (54, 2)
assert (padded_grid == padded_grid_util).all()
mask = aa.mask.manual(
mask_2d=np.full((5, 4), False), pixel_scales=(2.0, 2.0), sub_size=2
)
grid = aa.masked.grid.from_mask(mask=mask)
padded_grid = grid.padded_grid_from_kernel_shape(kernel_shape_2d=(3, 3))
padded_grid_util = aa.util.grid.grid_1d_via_mask_2d(
mask_2d=np.full((7, 6), False), pixel_scales=(2.0, 2.0), sub_size=2
)
assert padded_grid.shape == (168, 2)
assert (padded_grid.mask == np.full(fill_value=False, shape=(7, 6))).all()
assert padded_grid == pytest.approx(padded_grid_util, 1e-4)
assert padded_grid.interpolator is None
mask = aa.mask.manual(
mask_2d=np.full((2, 5), False), pixel_scales=(8.0, 8.0), sub_size=4
)
grid = aa.masked.grid.from_mask(mask=mask)
padded_grid = grid.padded_grid_from_kernel_shape(kernel_shape_2d=(5, 5))
padded_grid_util = aa.util.grid.grid_1d_via_mask_2d(
mask_2d=np.full((6, 9), False), pixel_scales=(8.0, 8.0), sub_size=4
)
assert padded_grid.shape == (864, 2)
assert (padded_grid.mask == np.full(fill_value=False, shape=(6, 9))).all()
assert padded_grid == pytest.approx(padded_grid_util, 1e-4)
def test__padded_grid_from_kernel_shape__has_interpolator_grid_if_had_one_before(
self
):
grid = grids.Grid.uniform(shape_2d=(4, 4), pixel_scales=3.0, sub_size=1)
grid = grid.new_grid_with_interpolator(pixel_scale_interpolation_grid=0.1)
padded_grid = grid.padded_grid_from_kernel_shape(kernel_shape_2d=(3, 3))
assert padded_grid.interpolator is not None
assert padded_grid.interpolator.pixel_scale_interpolation_grid == 0.1
mask = aa.mask.unmasked(shape_2d=(6, 6), pixel_scales=(3.0, 3.0), sub_size=1)
interpolator = grids.Interpolator.from_mask_grid_and_pixel_scale_interpolation_grids(
mask=mask, grid=padded_grid, pixel_scale_interpolation_grid=0.1
)
assert (padded_grid.interpolator.vtx == interpolator.vtx).all()
assert (padded_grid.interpolator.wts == interpolator.wts).all()
mask = aa.mask.manual(
mask_2d=np.full((5, 4), False), pixel_scales=(2.0, 2.0), sub_size=2
)
grid = aa.masked.grid.from_mask(mask=mask)
grid = grid.new_grid_with_interpolator(pixel_scale_interpolation_grid=0.1)
padded_grid = grid.padded_grid_from_kernel_shape(kernel_shape_2d=(3, 3))
assert padded_grid.interpolator is not None
assert padded_grid.interpolator.pixel_scale_interpolation_grid == 0.1
mask = aa.mask.unmasked(shape_2d=(7, 6), pixel_scales=(2.0, 2.0), sub_size=2)
interpolator = grids.Interpolator.from_mask_grid_and_pixel_scale_interpolation_grids(
mask=mask, grid=padded_grid, pixel_scale_interpolation_grid=0.1
)
assert (padded_grid.interpolator.vtx == interpolator.vtx).all()
assert (padded_grid.interpolator.wts == interpolator.wts).all()
def test__sub_border_1d_indexes__compare_to_array_util(self):
mask = np.array(
[
[False, False, False, False, False, False, False, True],
[False, True, True, True, True, True, False, True],
[False, True, False, False, False, True, False, True],
[False, True, False, True, False, True, False, True],
[False, True, False, False, False, True, False, True],
[False, True, True, True, True, True, False, True],
[False, False, False, False, False, False, False, True],
]
)
mask = aa.mask.manual(mask_2d=mask, pixel_scales=(2.0, 2.0), sub_size=2)
sub_border_1d_indexes_util = aa.util.mask.sub_border_pixel_1d_indexes_from_mask_2d_and_sub_size(
mask_2d=mask, sub_size=2
)
grid = aa.masked.grid.from_mask(mask=mask)
assert grid.regions._sub_border_1d_indexes == pytest.approx(
sub_border_1d_indexes_util, 1e-4
)
def test__square_distance_from_coordinate_array(self):
mask = aa.mask.manual(
[[True, False], [False, False]], pixel_scales=1.0, origin=(0.0, 1.0)
)
grid = aa.masked.grid.manual_1d(
grid=[[1.0, 1.0], [2.0, 3.0], [1.0, 2.0]], mask=mask
)
square_distances = grid.squared_distances_from_coordinate(coordinate=(0.0, 0.0))
assert (square_distances.in_1d == np.array([2.0, 13.0, 5.0])).all()
assert (square_distances.mask == mask).all()
square_distances = grid.squared_distances_from_coordinate(coordinate=(0.0, 1.0))
assert (square_distances.in_1d == np.array([1.0, 8.0, 2.0])).all()
assert (square_distances.mask == mask).all()
def test__distance_from_coordinate_array(self):
mask = aa.mask.manual(
[[True, False], [False, False]], pixel_scales=1.0, origin=(0.0, 1.0)
)
grid = aa.masked.grid.manual_1d(
grid=[[1.0, 1.0], [2.0, 3.0], [1.0, 2.0]], mask=mask
)
square_distances = grid.distances_from_coordinate(coordinate=(0.0, 0.0))
assert (
square_distances.in_1d
== np.array([np.sqrt(2.0), np.sqrt(13.0), np.sqrt(5.0)])
).all()
assert (square_distances.mask == mask).all()
square_distances = grid.distances_from_coordinate(coordinate=(0.0, 1.0))
assert (
square_distances.in_1d == np.array([1.0, np.sqrt(8.0), np.sqrt(2.0)])
).all()
assert (square_distances.mask == mask).all()
class TestGridBorder:
def test__sub_border_grid_for_simple_mask(self):
mask = np.array(
[
[False, False, False, False, False, False, False, True],
[False, True, True, True, True, True, False, True],
[False, True, False, False, False, True, False, True],
[False, True, False, True, False, True, False, True],
[False, True, False, False, False, True, False, True],
[False, True, True, True, True, True, False, True],
[False, False, False, False, False, False, False, True],
]
)
mask = aa.mask.manual(mask_2d=mask, pixel_scales=(2.0, 2.0), sub_size=2)
grid = aa.masked.grid.from_mask(mask=mask)
assert (
grid.sub_border_grid
== np.array(
[
[6.5, -7.5],
[6.5, -5.5],
[6.5, -3.5],
[6.5, -0.5],
[6.5, 1.5],
[6.5, 3.5],
[6.5, 5.5],
[4.5, -7.5],
[4.5, 5.5],
[2.5, -7.5],
]
)
).all()
def test__inside_border_no_relocations(self):
mask = aa.mask.circular(
shape_2d=(30, 30), radius=1.0, pixel_scales=(0.1, 0.1), sub_size=1
)
grid = aa.masked.grid.from_mask(mask=mask)
grid_to_relocate = grids.Grid(
grid=np.array([[0.1, 0.1], [0.3, 0.3], [-0.1, -0.2]]), mask=mask
)
relocated_grid = grid.relocated_grid_from_grid(grid=grid_to_relocate)
assert (
relocated_grid == np.array([[0.1, 0.1], [0.3, 0.3], [-0.1, -0.2]])
).all()
assert (relocated_grid.mask == mask).all()
assert relocated_grid.sub_size == 1
mask = aa.mask.circular(
shape_2d=(30, 30), radius=1.0, pixel_scales=(0.1, 0.1), sub_size=2
)
grid = aa.masked.grid.from_mask(mask=mask)
grid_to_relocate = grids.Grid(
grid=np.array([[0.1, 0.1], [0.3, 0.3], [-0.1, -0.2]]), mask=mask
)
relocated_grid = grid.relocated_grid_from_grid(grid=grid_to_relocate)
assert (
relocated_grid == np.array([[0.1, 0.1], [0.3, 0.3], [-0.1, -0.2]])
).all()
assert (relocated_grid.mask == mask).all()
assert relocated_grid.sub_size == 2
def test__outside_border_are_relocations(self):
mask = aa.mask.circular(
shape_2d=(30, 30), radius=1.0, pixel_scales=(0.1, 0.1), sub_size=1
)
grid = aa.masked.grid.from_mask(mask=mask)
grid_to_relocate = grids.Grid(
grid=np.array([[10.1, 0.0], [0.0, 10.1], [-10.1, -10.1]]), mask=mask
)
relocated_grid = grid.relocated_grid_from_grid(grid=grid_to_relocate)
assert relocated_grid == pytest.approx(
np.array([[0.95, 0.0], [0.0, 0.95], [-0.7017, -0.7017]]), 0.1
)
assert (relocated_grid.mask == mask).all()
assert relocated_grid.sub_size == 1
mask = aa.mask.circular(
shape_2d=(30, 30), radius=1.0, pixel_scales=(0.1, 0.1), sub_size=2
)
grid = aa.masked.grid.from_mask(mask=mask)
grid_to_relocate = grids.Grid(
grid=np.array([[10.1, 0.0], [0.0, 10.1], [-10.1, -10.1]]), mask=mask
)
relocated_grid = grid.relocated_grid_from_grid(grid=grid_to_relocate)
assert relocated_grid == pytest.approx(
np.array([[0.9778, 0.0], [0.0, 0.97788], [-0.7267, -0.7267]]), 0.1
)
assert (relocated_grid.mask == mask).all()
assert relocated_grid.sub_size == 2
def test__outside_border_are_relocations__positive_origin_included_in_relocate(
self
):
mask = aa.mask.circular(
shape_2d=(60, 60),
radius=1.0,
pixel_scales=(0.1, 0.1),
centre=(1.0, 1.0),
sub_size=1,
)
grid = aa.masked.grid.from_mask(mask=mask)
grid_to_relocate = grids.Grid(
grid=np.array([[11.1, 1.0], [1.0, 11.1], [-11.1, -11.1]]),
sub_size=1,
mask=mask,
)
relocated_grid = grid.relocated_grid_from_grid(grid=grid_to_relocate)
assert relocated_grid == pytest.approx(
np.array(
[[2.0, 1.0], [1.0, 2.0], [1.0 - np.sqrt(2) / 2, 1.0 - np.sqrt(2) / 2]]
),
0.1,
)
assert (relocated_grid.mask == mask).all()
assert relocated_grid.sub_size == 1
mask = aa.mask.circular(
shape_2d=(60, 60),
radius=1.0,
pixel_scales=(0.1, 0.1),
centre=(1.0, 1.0),
sub_size=2,
)
grid = aa.masked.grid.from_mask(mask=mask)
grid_to_relocate = grids.Grid(
grid=np.array([[11.1, 1.0], [1.0, 11.1], [-11.1, -11.1]]), mask=mask
)
relocated_grid = grid.relocated_grid_from_grid(grid=grid_to_relocate)
assert relocated_grid == pytest.approx(
np.array(
[
[1.9263, 1.0 - 0.0226],
[1.0 - 0.0226, 1.9263],
[1.0 - 0.7267, 1.0 - 0.7267],
]
),
0.1,
)
assert (relocated_grid.mask == mask).all()
assert relocated_grid.sub_size == 2
class TestGridIrregular:
def test__pixelization_grid__attributes(self):
pix_grid = grids.GridIrregular(
grid=np.array([[1.0, 1.0], [2.0, 2.0]]),
nearest_pixelization_1d_index_for_mask_1d_index=np.array([0, 1]),
)
assert type(pix_grid) == grids.GridIrregular
assert (pix_grid == np.array([[1.0, 1.0], [2.0, 2.0]])).all()
assert (
pix_grid.nearest_pixelization_1d_index_for_mask_1d_index == np.array([0, 1])
).all()
def test__from_unmasked_sparse_shape_and_grid(self):
mask = aa.mask.manual(
mask_2d=np.array(
[[True, False, True], [False, False, False], [True, False, True]]
),
pixel_scales=(0.5, 0.5),
sub_size=1,
)
grid = aa.masked.grid.from_mask(mask=mask)
sparse_grid = grids.SparseGrid.from_grid_and_unmasked_2d_grid_shape(
unmasked_sparse_shape=(10, 10), grid=grid
)
pixelization_grid = grids.GridIrregular.from_grid_and_unmasked_2d_grid_shape(
unmasked_sparse_shape=(10, 10), grid=grid
)
assert (sparse_grid.sparse == pixelization_grid).all()
assert (
sparse_grid.sparse_1d_index_for_mask_1d_index
== pixelization_grid.nearest_pixelization_1d_index_for_mask_1d_index
).all()
class TestSparseGrid:
class TestUnmaskedShape:
def test__properties_consistent_with_util(self):
mask = aa.mask.manual(
mask_2d=np.array(
[[True, False, True], [False, False, False], [True, False, True]]
),
pixel_scales=(0.5, 0.5),
sub_size=1,
)
grid = aa.masked.grid.from_mask(mask=mask)
sparse_grid = grids.SparseGrid.from_grid_and_unmasked_2d_grid_shape(
unmasked_sparse_shape=(10, 10), grid=grid
)
unmasked_sparse_grid_util = aa.util.grid.grid_1d_via_shape_2d(
shape_2d=(10, 10),
pixel_scales=(0.15, 0.15),
sub_size=1,
origin=(0.0, 0.0),
)
unmasked_sparse_grid_pixel_centres = aa.util.grid.grid_pixel_centres_1d_from_grid_scaled_1d_shape_2d_and_pixel_scales(
grid_scaled_1d=unmasked_sparse_grid_util,
shape_2d=grid.mask.shape,
pixel_scales=grid.pixel_scales,
).astype(
"int"
)
total_sparse_pixels = aa.util.mask.total_sparse_pixels_from_mask_2d(
mask_2d=mask,
unmasked_sparse_grid_pixel_centres=unmasked_sparse_grid_pixel_centres,
)
regular_to_unmasked_sparse_util = aa.util.grid.grid_pixel_indexes_1d_from_grid_scaled_1d_shape_2d_and_pixel_scales(
grid_scaled_1d=grid,
shape_2d=(10, 10),
pixel_scales=(0.15, 0.15),
origin=(0.0, 0.0),
).astype(
"int"
)
unmasked_sparse_for_sparse_util = aa.util.sparse.unmasked_sparse_for_sparse_from_mask_2d_and_pixel_centres(
total_sparse_pixels=total_sparse_pixels,
mask_2d=mask,
unmasked_sparse_grid_pixel_centres=unmasked_sparse_grid_pixel_centres,
).astype(
"int"
)
sparse_for_unmasked_sparse_util = aa.util.sparse.sparse_for_unmasked_sparse_from_mask_2d_and_pixel_centres(
mask_2d=mask,
unmasked_sparse_grid_pixel_centres=unmasked_sparse_grid_pixel_centres,
total_sparse_pixels=total_sparse_pixels,
).astype(
"int"
)
sparse_1d_index_for_mask_1d_index_util = aa.util.sparse.sparse_1d_index_for_mask_1d_index_from_sparse_mappings(
regular_to_unmasked_sparse=regular_to_unmasked_sparse_util,
sparse_for_unmasked_sparse=sparse_for_unmasked_sparse_util,
)
sparse_grid_util = aa.util.sparse.sparse_grid_from_unmasked_sparse_grid(
unmasked_sparse_grid=unmasked_sparse_grid_util,
unmasked_sparse_for_sparse=unmasked_sparse_for_sparse_util,
)
assert (
sparse_grid.sparse_1d_index_for_mask_1d_index
== sparse_1d_index_for_mask_1d_index_util
).all()
assert (sparse_grid.sparse == sparse_grid_util).all()
def test__sparse_grid_overlaps_mask_perfectly__masked_pixels_in_masked_sparse_grid(
self
):
mask = aa.mask.manual(
mask_2d=np.array(
[[True, False, True], [False, False, False], [True, False, True]]
),
pixel_scales=(1.0, 1.0),
sub_size=1,
)
grid = aa.masked.grid.from_mask(mask=mask)
sparse_grid = grids.SparseGrid.from_grid_and_unmasked_2d_grid_shape(
unmasked_sparse_shape=(3, 3), grid=grid
)
assert (
sparse_grid.sparse_1d_index_for_mask_1d_index
== np.array([0, 1, 2, 3, 4])
).all()
assert (
sparse_grid.sparse
== np.array(
[[1.0, 0.0], [0.0, -1.0], [0.0, 0.0], [0.0, 1.0], [-1.0, 0.0]]
)
).all()
def test__same_as_above_but_4x3_grid_and_mask(self):
mask = aa.mask.manual(
mask_2d=np.array(
[
[True, False, True],
[False, False, False],
[False, False, False],
[True, False, True],
]
),
pixel_scales=(1.0, 1.0),
sub_size=1,
)
grid = aa.masked.grid.from_mask(mask=mask)
sparse_grid = grids.SparseGrid.from_grid_and_unmasked_2d_grid_shape(
unmasked_sparse_shape=(4, 3), grid=grid
)
assert (
sparse_grid.sparse_1d_index_for_mask_1d_index
== np.array([0, 1, 2, 3, 4, 5, 6, 7])
).all()
assert (
sparse_grid.sparse
== np.array(
[
[1.5, 0.0],
[0.5, -1.0],
[0.5, 0.0],
[0.5, 1.0],
[-0.5, -1.0],
[-0.5, 0.0],
[-0.5, 1.0],
[-1.5, 0.0],
]
)
).all()
def test__same_as_above_but_3x4_grid_and_mask(self):
mask = aa.mask.manual(
mask_2d=np.array(
[
[True, False, True, True],
[False, False, False, False],
[True, False, True, True],
]
),
pixel_scales=(1.0, 1.0),
sub_size=1,
)
grid = aa.masked.grid.from_mask(mask=mask)
sparse_grid = grids.SparseGrid.from_grid_and_unmasked_2d_grid_shape(
unmasked_sparse_shape=(3, 4), grid=grid
)
assert (
sparse_grid.sparse_1d_index_for_mask_1d_index
== np.array([0, 1, 2, 3, 4, 5])
).all()
assert (
sparse_grid.sparse
== np.array(
[
[1.0, -0.5],
[0.0, -1.5],
[0.0, -0.5],
[0.0, 0.5],
[0.0, 1.5],
[-1.0, -0.5],
]
)
).all()
def test__mask_with_offset_centre__origin_of_sparse_grid_moves_to_give_same_pairings(
self
):
mask = aa.mask.manual(
mask_2d=np.array(
[
[True, True, True, False, True],
[True, True, False, False, False],
[True, True, True, False, True],
[True, True, True, True, True],
[True, True, True, True, True],
]
),
pixel_scales=(1.0, 1.0),
sub_size=1,
)
grid = aa.masked.grid.from_mask(mask=mask)
# Without a change in origin, only the central 3 pixels are paired as the unmasked sparse grid overlaps
# the central (3x3) pixels only.
sparse_grid = grids.SparseGrid.from_grid_and_unmasked_2d_grid_shape(
unmasked_sparse_shape=(3, 3), grid=grid
)
assert (
sparse_grid.sparse_1d_index_for_mask_1d_index
== np.array([0, 1, 2, 3, 4])
).all()
assert (
sparse_grid.sparse
== np.array(
[[2.0, 1.0], [1.0, 0.0], [1.0, 1.0], [1.0, 2.0], [0.0, 1.0]]
)
).all()
def test__same_as_above_but_different_offset(self):
mask = aa.mask.manual(
mask_2d=np.array(
[
[True, True, True, True, True],
[True, True, True, False, True],
[True, True, False, False, False],
[True, True, True, False, True],
[True, True, True, True, True],
]
),
pixel_scales=(2.0, 2.0),
sub_size=1,
)
grid = aa.masked.grid.from_mask(mask=mask)
# Without a change in origin, only the central 3 pixels are paired as the unmasked sparse grid overlaps
# the central (3x3) pixels only.
sparse_grid = grids.SparseGrid.from_grid_and_unmasked_2d_grid_shape(
unmasked_sparse_shape=(3, 3), grid=grid
)
assert (
sparse_grid.sparse_1d_index_for_mask_1d_index
== np.array([0, 1, 2, 3, 4])
).all()
assert (
sparse_grid.sparse
== np.array(
[[2.0, 2.0], [0.0, 0.0], [0.0, 2.0], [0.0, 4.0], [-2.0, 2.0]]
)
).all()
def test__from_grid_and_unmasked_shape__sets_up_with_correct_shape_and_pixel_scales(
self, mask_7x7
):
grid = aa.masked.grid.from_mask(mask=mask_7x7)
sparse_grid = grids.SparseGrid.from_grid_and_unmasked_2d_grid_shape(
grid=grid, unmasked_sparse_shape=(3, 3)
)
assert (
sparse_grid.sparse_1d_index_for_mask_1d_index
== np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
).all()
assert (
sparse_grid.sparse
== np.array(
[
[1.0, -1.0],
[1.0, 0.0],
[1.0, 1.0],
[0.0, -1.0],
[0.0, 0.0],
[0.0, 1.0],
[-1.0, -1.0],
[-1.0, 0.0],
[-1.0, 1.0],
]
)
).all()
def test__same_as_above__but_4x3_image(self):
mask = aa.mask.manual(
mask_2d=np.array(
[
[True, False, True],
[False, False, False],
[False, False, False],
[True, False, True],
]
),
pixel_scales=(1.0, 1.0),
sub_size=1,
)
grid = aa.masked.grid.from_mask(mask=mask)
sparse_grid = grids.SparseGrid.from_grid_and_unmasked_2d_grid_shape(
unmasked_sparse_shape=(4, 3), grid=grid
)
assert (
sparse_grid.sparse_1d_index_for_mask_1d_index
== np.array([0, 1, 2, 3, 4, 5, 6, 7])
).all()
assert (
sparse_grid.sparse
== np.array(
[
[1.5, 0.0],
[0.5, -1.0],
[0.5, 0.0],
[0.5, 1.0],
[-0.5, -1.0],
[-0.5, 0.0],
[-0.5, 1.0],
[-1.5, 0.0],
]
)
).all()
def test__same_as_above__but_3x4_image(self):
mask = aa.mask.manual(
mask_2d=np.array(
[
[True, False, True, True],
[False, False, False, False],
[True, False, True, True],
]
),
pixel_scales=(1.0, 1.0),
sub_size=1,
)
grid = aa.masked.grid.from_mask(mask=mask)
sparse_grid = grids.SparseGrid.from_grid_and_unmasked_2d_grid_shape(
unmasked_sparse_shape=(3, 4), grid=grid
)
assert (
sparse_grid.sparse_1d_index_for_mask_1d_index
== np.array([0, 1, 2, 3, 4, 5])
).all()
assert (
sparse_grid.sparse
== np.array(
[
[1.0, -0.5],
[0.0, -1.5],
[0.0, -0.5],
[0.0, 0.5],
[0.0, 1.5],
[-1.0, -0.5],
]
)
).all()
def test__from_grid_and_shape__offset_mask__origin_shift_corrects(self):
mask = aa.mask.manual(
mask_2d=np.array(
[
[True, True, False, False, False],
[True, True, False, False, False],
[True, True, False, False, False],
[True, True, True, True, True],
[True, True, True, True, True],
]
),
pixel_scales=(1.0, 1.0),
sub_size=1,
)
grid = aa.masked.grid.from_mask(mask=mask)
sparse_grid = grids.SparseGrid.from_grid_and_unmasked_2d_grid_shape(
unmasked_sparse_shape=(3, 3), grid=grid
)
assert (
sparse_grid.sparse_1d_index_for_mask_1d_index
== np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
).all()
assert (
sparse_grid.sparse
== np.array(
[
[2.0, 0.0],
[2.0, 1.0],
[2.0, 2.0],
[1.0, 0.0],
[1.0, 1.0],
[1.0, 2.0],
[0.0, 0.0],
[0.0, 1.0],
[0.0, 2.0],
]
)
).all()
class TestUnmaskedShapeAndWeightImage:
def test__weight_map_all_ones__kmeans_grid_is_grid_overlapping_image(self):
mask = aa.mask.manual(
mask_2d=np.array(
[
[False, False, False, False],
[False, False, False, False],
[False, False, False, False],
[False, False, False, False],
]
),
pixel_scales=(0.5, 0.5),
sub_size=1,
)
grid = aa.masked.grid.from_mask(mask=mask)
weight_map = np.ones(mask.pixels_in_mask)
sparse_grid_weight = grids.SparseGrid.from_total_pixels_grid_and_weight_map(
total_pixels=8,
grid=grid,
weight_map=weight_map,
n_iter=10,
max_iter=20,
seed=1,
)
assert (
sparse_grid_weight.sparse
== np.array(
[
[-0.25, 0.25],
[0.5, -0.5],
[0.75, 0.5],
[0.25, 0.5],
[-0.5, -0.25],
[-0.5, -0.75],
[-0.75, 0.5],
[-0.25, 0.75],
]
)
).all()
assert (
sparse_grid_weight.sparse_1d_index_for_mask_1d_index
== np.array([1, 1, 2, 2, 1, 1, 3, 3, 5, 4, 0, 7, 5, 4, 6, 6])
).all()
def test__weight_map_changed_from_above(self):
mask = aa.mask.manual(
mask_2d=np.array(
[
[False, False, False, False],
[False, False, False, False],
[False, False, False, False],
[False, False, False, False],
]
),
pixel_scales=(0.5, 0.5),
sub_size=2,
)
grid = aa.masked.grid.from_mask(mask=mask)
weight_map =
|
np.ones(mask.pixels_in_mask)
|
numpy.ones
|
import torch
import numpy as np
import torch.nn.functional as F
import argparse
import cv2
import matplotlib.pyplot as plt
import os
from utils.utils import get_device, get_priorBox_2d, draw_bbox_label
from model.model import Net
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
def detect(image_3dArray, model):
priorBox_2d = get_priorBox_2d()
image_4dArray = np.expand_dims(
|
np.array(image_3dArray)
|
numpy.array
|
#
# Copyright (c) 2017-18 <NAME> <<EMAIL>>
#
# See the file LICENSE for your rights.
#
"""
Utilities for retrieving and processing CFS reanalysis and reforecast data using XArray.
For now, we only implement the regularly-gridded 1-degree data. Support for variables on the native Gaussian ~0.5
degree grid may come in the future.
"""
import os
import warnings
import itertools as it
import numpy as np
import netCDF4 as nc
import pandas as pd
import xarray as xr
from scipy.interpolate import RectBivariateSpline
from datetime import datetime, timedelta
try:
from urllib.request import urlopen
except ImportError:
from urllib import urlopen
try:
import pygrib
except ImportError:
warnings.warn("module 'pygrib' not found; processing of raw CFS data unavailable.")
# ==================================================================================================================== #
# Universal parameters and functions
# ==================================================================================================================== #
def _check_exists(file_name, path=False):
if os.path.exists(file_name):
exists = True
local_file = file_name
else:
exists = False
local_file = None
if path:
return exists, local_file
else:
return exists
# For some reason, multiprocessing.Pool.map is placing arguments passed to the function inside another length-1 tuple.
# Much clearer programming would have required arguments of obj, m, month, *args here so that the user knows to include
# the CFS object, month index, month dates, and other arguments correctly.
def call_process_month(args):
obj = args[0]
obj._process_month(*args[1:])
def call_fetch(args):
obj = args[0]
obj._fetch(*args[1:])
# Format strings for files to read/write
grib_dir_format = '%Y/%Y%m/%Y%m%d'
grib_file_format = 'pgb{:s}{:s}.gdas.%Y%m%d%H.grb2'
reforecast_dir_format = '{:s}/%Y%m'
reforecast_file_format = '{:s}_f.01.{:s}.{:s}.{:s}.grb2'
# Start and end dates of available data
data_start_date = datetime(1979, 1, 1)
data_end_date = datetime(2011, 3, 31)
reforecast_start_date = datetime(1999, 1, 1)
reforecast_end_date = datetime(2009, 12, 31, 18)
# Parameter tables for GRIB data. Should be included in repository.
dir_path = os.path.dirname(os.path.realpath(__file__))
grib2_table = np.genfromtxt('%s/cfsr_pgb_grib_table.csv' % dir_path, dtype='str', delimiter=',')
# netCDF fill value
fill_value = np.array(nc.default_fillvals['f4']).astype(np.float32)
# ==================================================================================================================== #
# CFSReanalysis object class
# ==================================================================================================================== #
class CFSReanalysis(object):
"""
Class for manipulating CFS Reanalysis data with xarray. Class methods include functions to download,
process, and export data. Currently only works with pressure-level data ('pgb').
"""
def __init__(self, root_directory=None, resolution='l', run_type='06', fill_hourly=True, file_id=''):
"""
Initialize an instance of the CFSReanalysis class.
:param root_directory: str: local directory where raw files are stored. If None, defaults to ~/.cfsr
:param resolution: str: 'h' corresponds to the high-res 0.5-degree grid; 'l' the low-res 2.5-degree grid
:param run_type: str: one of the forecast hours or the analysis: ['01', '02', '03', '04', '05', '06', 'nl']
:param fill_hourly: bool: if True, automatically add in 6-hourly time steps even if only 00Z dates are given
:param file_id: str: appended to the processed file names. Useful if files for the same dates will be created
with different parameters, i.e., hours or variables or levels.
"""
self.raw_files = []
self.dataset_dates = []
self.dataset_variables = []
if root_directory is None:
self._root_directory = '%s/.cfsr' % os.path.expanduser('~')
else:
self._root_directory = root_directory
self._resolution = resolution
if resolution == 'h':
self._ny = 361
self._nx = 720
self._root_url = 'https://nomads.ncdc.noaa.gov/modeldata/cmd_pgbh/'
elif resolution == 'l':
self._ny = 73
self._nx = 144
self._root_url = 'https://nomads.ncdc.noaa.gov/modeldata/cmd_grblow'
else:
raise ValueError("resolution must be 'h' or 'l'")
if run_type not in ['01', '02', '03', '04', '05', '06', 'nl']:
raise ValueError("run_type must be 'nl' or a 2-digit forecast hour from '01' to '06'")
else:
self._run_type = run_type
self._fill_hourly = fill_hourly
self._file_id = file_id
self.level_coord = [1, 2, 3, 5, 7, 10, 20, 30, 50, 70, 100, 125, 150, 175, 200, 225, 250, 300, 350, 400, 450,
500, 550, 600, 650, 700, 750] + list(range(775, 1001, 25))
self.inverse_lat = True
# Data
self.Dataset = None
self.basemap = None
self._lat_array = None
self._lon_array = None
@property
def lat(self):
if self._lat_array is not None:
return self._lat_array
try:
lat = self.Dataset.variables['lat'][:]
if len(lat.shape) > 2:
self._lat_array = lat[0, ...].values
return self._lat_array
else:
self._lat_array = lat.values
return self._lat_array
except AttributeError:
raise AttributeError('Call to lat method is only valid after data are opened.')
except KeyError:
return
@property
def lon(self):
if self._lon_array is not None:
return self._lon_array
try:
lon = self.Dataset.variables['lon'][:]
if len(lon.shape) > 2:
self._lon_array = lon[0, ...].values
return self._lon_array
else:
self._lon_array = lon.values
return self._lon_array
except AttributeError:
raise AttributeError('Call to lon method is only valid after data are opened.')
except KeyError:
return
def set_dates(self, dates):
"""
Set the CFSReanalysis object's dataset_dates attribute, a list of datetime objects which determines which
datetimes are retrieved and processed. This attribute is set automatically when using the method 'retrieve',
but may be used when 'retrieve' is not desired or as an override.
:param dates: list of datetime objects.
:return:
"""
self.dataset_dates = sorted(dates)
if self._fill_hourly:
day_set = sorted(set([datetime(d.year, d.month, d.day) for d in self.dataset_dates]))
new_dates = []
for day in day_set:
new_dates.extend((day, day.replace(hour=6), day.replace(hour=12), day.replace(hour=18)))
self.dataset_dates = new_dates
def set_levels(self, levels):
"""
Set the CFSReanalysis object's level_coord attribute, a list of integer height levels which determines which
levels are processed and written to netCDF files. This attribute is set to a default, but may be overriden.
Note that any further processing or reading of data must use the same level coordinate, i.e., choose wisely!
:param levels: list of integer pressure height levels (mb / hPa)
:return:
"""
self.level_coord = sorted([l for l in levels if 0 < int(l) <= 1000])
def closest_lat_lon(self, lat, lon):
"""
Find the grid-point index of the closest point to the specified latitude and longitude values in loaded
CFS reanalysis data.
:param lat: float or int: latitude in degrees
:param lon: float or int: longitude in degrees
:return:
"""
if lon < 0.:
lon += 360.
distance = (self.lat - lat) ** 2 + (self.lon - lon) ** 2
min_dist = 2.5 if self._resolution == 'l' else 1.
if np.min(distance) > min_dist:
raise ValueError('no latitude/longitude points within 1 degree of requested lat/lon!')
return np.unravel_index(np.argmin(distance, axis=None), distance.shape)
def retrieve(self, dates, n_proc=4, verbose=False):
"""
Retrieves CFS reanalysis data for the given datetimes, and writes them to the local directory. The same
directory structure (%Y/%Y%m/%Y%m%d/file_name) is used locally as on the server. Creates subdirectories if
necessary. File types retrieved are given by the object's init parameters.
:param dates: list or tuple: date or datetime objects of of analysis times. May be 'all', in which case
all dates in the object's 'dataset_dates' attributes are retrieved.
:param n_proc: int: if >1, fetches files in parallel. This speeds up performance but may not scale well if
internet I/O is the bottleneck. Set to 0 to use all available threads.
:param verbose: bool: include progress print statements
:return: None
"""
# Check if any parameter is a single value
if dates == 'all':
dates = self.dataset_dates
else:
self.set_dates(dates)
dates = self.dataset_dates
# Determine the files to retrieve
if verbose:
print('CFSReanalysis.retrieve: beginning data retrieval\n')
self.raw_files = []
for dt in dates:
if dt < data_start_date or dt > data_end_date:
print('* Warning: doing nothing for date %s, out of valid data range (%s to %s)' %
(dt, data_start_date, data_end_date))
continue
if dt not in self.dataset_dates:
self.dataset_dates.append(dt)
# Create local directory
grib_file_dir = datetime.strftime(dt, grib_dir_format)
os.makedirs('%s/%s' % (self._root_directory, grib_file_dir), exist_ok=True)
# Add GRIB file to listing
grib_file_name = datetime.strftime(dt, grib_file_format)
grib_file_name = '%s/%s' % (grib_file_dir, grib_file_name.format(self._resolution, self._run_type))
if grib_file_name not in self.raw_files:
self.raw_files.append(grib_file_name)
if n_proc == 0 or n_proc > 1:
try:
import multiprocessing
if n_proc == 0:
n_proc = multiprocessing.cpu_count()
except ImportError:
warnings.warn("'multiprocessing' module not available; falling back to serial")
n_proc = 1
if n_proc == 1:
for file in self.raw_files:
call_fetch((self, file, verbose))
else:
pool = multiprocessing.Pool(processes=n_proc)
pool.map(call_fetch, zip(it.repeat(self), self.raw_files, it.repeat(verbose)))
pool.close()
pool.terminate()
pool.join()
def _fetch(self, f, verbose):
pid = os.getpid()
local_file = '%s/%s' % (self._root_directory, f)
if _check_exists(local_file):
if verbose:
print('PID %s: local file %s exists; omitting' % (pid, local_file))
return
remote_file = '%s/%s' % (self._root_url, f)
if verbose:
print('PID %s: downloading %s' % (pid, remote_file))
try:
response = urlopen(remote_file)
with open(local_file, 'wb') as fd:
fd.write(response.read())
except BaseException as e:
print('warning: failed to download %s, retrying' % remote_file)
try:
response = urlopen(remote_file)
with open(local_file, 'wb') as fd:
fd.write(response.read())
except BaseException as e:
print('warning: failed to download %s' % remote_file)
print('* Reason: "%s"' % str(e))
def write(self, variables='all', dates='all', levels='all', write_into_existing=True, omit_existing=False,
delete_raw_files=False, n_proc=4, verbose=False):
"""
Reads raw CFS reanalysis files for the given dates (list or tuple form) and specified variables and levels and
writes the data to reformatted netCDF files. Processed files are saved under self._root_directory/processed;
one file per month is created.
:param variables: list: list of variables to retrieve from data or 'all'
:param dates: list or tuple of datetime: date or datetime objects of model initialization; may be 'all', in
which case, all the dates in the object's dataset_dates attribute are used (these are set when calling
self.retrieve() or self.set_dates())
:param levels: list or tuple of int: list of pressure levels as int (in mb); must be compatible with existing
processed files; may be 'all', using the object's level_coord attribute
:param write_into_existing: bool: if True, checks for existing files and appends if they exist. If False,
overwrites any existing files.
:param omit_existing: bool: if True, then if a processed file exists, skip it. Only useful if existing data
are known to be complete.
:param delete_raw_files: bool: if True, deletes the original data files from which the processed versions were
made
:param n_proc: int: if >1, runs write tasks in parallel, one per month of data. This speeds up performance but
may not scale well if disk I/O is the bottleneck. Set to 0 to use all available threads.
:param verbose: bool: include progress print statements
:return:
"""
# Parameter checks
if variables == 'all':
variables = list(grib2_table[:, 0])
if dates == 'all':
dates = self.dataset_dates
else:
self.set_dates(dates)
dates = self.dataset_dates
if levels == 'all':
levels = [l for l in self.level_coord]
else:
self.set_levels(levels)
levels = self.level_coord
if len(variables) == 0:
print('CFSReanalysis.write: no variables specified; will do nothing.')
return
if len(dates) == 0:
print('CFSReanalysis.write: no dates specified; will do nothing.')
return
if len(levels) == 0:
print('CFSReanalysis.write: no pressure levels specified; will do nothing.')
return
if int(n_proc) < 0:
raise ValueError("'multiprocess' must be an integer >= 0")
self.dataset_variables = list(variables)
# Generate monthly batches of dates
dates_index = pd.DatetimeIndex(dates).sort_values()
months = dates_index.to_period('M')
unique_months = months.unique()
month_list = []
for nm in range(len(unique_months)):
month_list.append(list(dates_index[months == unique_months[nm]].to_pydatetime()))
if n_proc == 0 or n_proc > 1:
try:
import multiprocessing
if n_proc == 0:
n_proc = multiprocessing.cpu_count()
except ImportError:
warnings.warn("'multiprocessing' module not available; falling back to serial")
n_proc = 1
if n_proc == 1:
for nm, month in enumerate(month_list):
call_process_month((self, nm, month, unique_months, variables, levels, write_into_existing,
omit_existing, delete_raw_files, verbose))
else:
pool = multiprocessing.Pool(processes=n_proc)
pool.map(call_process_month, zip(it.repeat(self), range(len(month_list)), month_list,
it.repeat(unique_months), it.repeat(variables), it.repeat(levels),
it.repeat(write_into_existing), it.repeat(omit_existing),
it.repeat(delete_raw_files), it.repeat(verbose)))
pool.close()
pool.terminate()
pool.join()
# Define a function for multi-processing
def _process_month(self, m, month, unique_months, variables, levels, write_into_existing, omit_existing,
delete_raw_files, verbose):
# Define some data reading functions that also write to the output
def read_write_grib_lat_lon(file_name, nc_fid):
exists, exists_file_name = _check_exists(file_name, path=True)
if not exists:
raise IOError('File %s not found.' % file_name)
grib_data = pygrib.open(file_name)
try:
lats = np.array(grib_data[1]['latitudes'], dtype=np.float32)
lons = np.array(grib_data[1]['longitudes'], dtype=np.float32)
shape = grib_data[1].values.shape
lat = lats.reshape(shape)[:, 0]
lon = lons.reshape(shape)[0, :]
except BaseException:
print('* Warning: cannot get lat/lon from grib file %s' % exists_file_name)
raise
if verbose:
print('PID %s: Writing latitude and longitude' % pid)
nc_var = nc_fid.createVariable('lat', np.float32, ('lat',), zlib=True)
nc_var.setncatts({
'long_name': 'Latitude',
'units': 'degrees_north'
})
nc_fid.variables['lat'][:] = lat
nc_var = nc_fid.createVariable('lon', np.float32, ('lon',), zlib=True)
nc_var.setncatts({
'long_name': 'Longitude',
'units': 'degrees_east'
})
nc_fid.variables['lon'][:] = lon
grib_data.close()
def read_write_grib(file_name, time_index, nc_fid):
exists, exists_file_name = _check_exists(file_name, path=True)
if not exists:
print('* Warning: file %s not found' % file_name)
return
if verbose:
print('PID %s: Reading %s' % (pid, exists_file_name))
grib_data = pygrib.open(file_name)
# Have to do this the hard way, because grib_index doesn't work on these 'multi-field' files
grib_index = []
grib_index_no_level = []
for grb in grib_data:
try:
grib_index.append([int(grb.discipline), int(grb.parameterCategory),
int(grb.parameterNumber), int(grb.level)])
grib_index_no_level.append([int(grb.discipline), int(grb.parameterCategory),
int(grb.parameterNumber)])
except RuntimeError:
grib_index.append([])
grib_index_no_level.append([])
if verbose:
print('PID %s: Variables to fetch: %s' % (pid, variables))
for row in range(grib2_table.shape[0]):
var = grib2_table[row, 0]
if var in variables:
if var not in nc_fid.variables.keys():
if verbose:
print('PID %s: Creating variable %s' % (pid, var))
if grib2_table[row, 6] == 'isobaricInhPa':
nc_var = nc_fid.createVariable(var, np.float32, ('time', 'level', 'lat', 'lon'), zlib=True)
else:
nc_var = nc_fid.createVariable(var, np.float32, ('time', 'lat', 'lon'), zlib=True)
nc_var.setncatts({
'long_name': grib2_table[row, 4],
'units': grib2_table[row, 5],
'_FillValue': fill_value
})
if grib2_table[row, 6] == 'isobaricInhPa':
for level_index, level in enumerate(levels):
try:
if verbose:
print('PID %s: Writing %s at level %d' % (pid, var, level))
# Match a list containing discipline, parameterCategory, parameterNumber, level.
# Add one because grib indexing starts at 1.
grib_key = grib_index.index([int(grib2_table[row, 1]), int(grib2_table[row, 2]),
int(grib2_table[row, 3]), int(level)]) + 1
if verbose:
print(' found %s' % grib_data[grib_key])
data = np.array(grib_data[grib_key].values, dtype=np.float32)
nc_fid.variables[var][time_index, level_index, ...] = data
except OSError: # missing index gives an OS read error
print('* Warning: grib variable %s not found in file %s' % (var, file_name))
pass
except BaseException as e:
print("* Warning: failed to write %s to netCDF file ('%s')" % (var, str(e)))
else:
try:
if verbose:
print('PID %s: Writing %s' % (pid, var))
# Match a list containing discipline, parameterCategory, parameterNumber, level.
# Add one because grib indexing starts at 1.
grib_key = grib_index_no_level.index([int(grib2_table[row, 1]), int(grib2_table[row, 2]),
int(grib2_table[row, 3])]) + 1
if verbose:
print(' found %s' % grib_data[grib_key])
data =
|
np.array(grib_data[grib_key].values, dtype=np.float32)
|
numpy.array
|
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: <NAME> <<EMAIL>>
#
import numpy
import h5py
import tempfile
import scipy
# sort by similarity has problem which flips the ordering of eigenvalues when
# the initial guess is closed to excited state. In this situation, function
# _sort_by_similarity may mark the excited state as the first eigenvalue and
# freeze the first eigenvalue.
SORT_EIG_BY_SIMILARITY = False
# Projecting out converged eigenvectors has problems when conv_tol is loose.
# In this situation, the converged eigenvectors may be updated in the
# following iterations. Projecting out the converged eigenvectors may lead to
# large errors to the yet converged eigenvectors.
PROJECT_OUT_CONV_EIGS = False
# default max_memory 2000 MB
def davidson(
aop,
x0,
precond,
tol=1e-12,
max_cycle=50,
max_space=12,
lindep=1e-14,
max_memory=2000,
dot=numpy.dot,
callback=None,
nroots=1,
lessio=False,
follow_state=False,
):
e, x = davidson1(
lambda xs: [aop(x) for x in xs],
x0,
precond,
tol,
max_cycle,
max_space,
lindep,
max_memory,
dot,
callback,
nroots,
lessio,
follow_state,
)[1:]
if nroots == 1:
return e[0], x[0]
else:
return e, x
def davidson1(
aop,
x0,
precond,
tol=1e-12,
max_cycle=50,
max_space=12,
lindep=1e-14,
max_memory=2000,
dot=numpy.dot,
callback=None,
nroots=1,
lessio=False,
follow_state=False,
):
toloose = numpy.sqrt(tol)
# print('tol %g toloose %g', tol, toloose)
if (not isinstance(x0, list)) and x0.ndim == 1:
x0 = [x0]
# max_cycle = min(max_cycle, x0[0].size)
max_space = max_space + nroots * 3
# max_space*2 for holding ax and xs, nroots*2 for holding axt and xt
_incore = max_memory * 1e6 / x0[0].nbytes > max_space * 2 + nroots * 3
lessio = lessio and not _incore
# print('max_cycle %d max_space %d max_memory %d incore %s',
# max_cycle, max_space, max_memory, _incore)
heff = None
fresh_start = True
e = 0
v = None
conv = [False] * nroots
emin = None
for icyc in range(max_cycle):
if fresh_start:
if _incore:
xs = []
ax = []
else:
xs = _Xlist()
ax = _Xlist()
space = 0
# Orthogonalize xt space because the basis of subspace xs must be orthogonal
# but the eigenvectors x0 might not be strictly orthogonal
xt = None
xt, x0 = _qr(x0, dot), None
max_dx_last = 1e9
if SORT_EIG_BY_SIMILARITY:
conv = numpy.array([False] * nroots)
elif len(xt) > 1:
xt = _qr(xt, dot)
xt = xt[:40] # 40 trial vectors at most
axt = aop(xt)
for k, xi in enumerate(xt):
xs.append(xt[k])
ax.append(axt[k])
rnow = len(xt)
head, space = space, space + rnow
if heff is None: # Lazy initilize heff to determine the dtype
heff = numpy.empty(
(max_space + nroots, max_space + nroots), dtype=ax[0].dtype
)
else:
heff = numpy.asarray(heff, dtype=ax[0].dtype)
elast = e
vlast = v
conv_last = conv
for i in range(space):
if head <= i < head + rnow:
for k in range(i - head + 1):
heff[head + k, i] = dot(xt[k].conj(), axt[i - head])
heff[i, head + k] = heff[head + k, i].conj()
else:
for k in range(rnow):
heff[head + k, i] = dot(xt[k].conj(), ax[i])
heff[i, head + k] = heff[head + k, i].conj()
w, v = scipy.linalg.eigh(heff[:space, :space])
if SORT_EIG_BY_SIMILARITY:
e, v = _sort_by_similarity(w, v, nroots, conv, vlast, emin)
if elast.size != e.size:
de = e
else:
de = e - elast
else:
e = w[:nroots]
v = v[:, :nroots]
x0 = _gen_x0(v, xs)
if lessio:
ax0 = aop(x0)
else:
ax0 = _gen_x0(v, ax)
if SORT_EIG_BY_SIMILARITY:
dx_norm = [0] * nroots
xt = [None] * nroots
for k, ek in enumerate(e):
if not conv[k]:
xt[k] = ax0[k] - ek * x0[k]
dx_norm[k] = numpy.sqrt(dot(xt[k].conj(), xt[k]).real)
if abs(de[k]) < tol and dx_norm[k] < toloose:
# print('root %d converged |r|= %4.3g e= %s max|de|= %4.3g',
# k, dx_norm[k], ek, de[k])
conv[k] = True
else:
elast, conv_last = _sort_elast(elast, conv_last, vlast, v, fresh_start)
de = e - elast
dx_norm = []
xt = []
conv = [False] * nroots
for k, ek in enumerate(e):
xt.append(ax0[k] - ek * x0[k])
dx_norm.append(numpy.sqrt(dot(xt[k].conj(), xt[k]).real))
conv[k] = abs(de[k]) < tol and dx_norm[k] < toloose
# if conv[k] and not conv_last[k]:
# print('root %d converged |r|= %4.3g e= %s max|de|= %4.3g',
# k, dx_norm[k], ek, de[k])
ax0 = None
max_dx_norm = max(dx_norm)
ide = numpy.argmax(abs(de))
if all(conv):
# print('converge %d %d |r|= %4.3g e= %s max|de|= %4.3g',
# icyc, space, max_dx_norm, e, de[ide])
break
elif (
follow_state
and max_dx_norm > 1
and max_dx_norm / max_dx_last > 3
and space > nroots * 1
):
# print('davidson %d %d |r|= %4.3g e= %s max|de|= %4.3g lindep= %4.3g',
# icyc, space, max_dx_norm, e, de[ide], norm_min)
# print('Large |r| detected, restore to previous x0')
x0 = _gen_x0(vlast, xs)
fresh_start = True
continue
if SORT_EIG_BY_SIMILARITY:
if any(conv) and e.dtype == numpy.double:
emin = min(e)
# remove subspace linear dependency
if any(((not conv[k]) and n ** 2 > lindep) for k, n in enumerate(dx_norm)):
for k, ek in enumerate(e):
if (not conv[k]) and dx_norm[k] ** 2 > lindep:
xt[k] = precond(xt[k], e[0], x0[k])
xt[k] *= 1 / numpy.sqrt(dot(xt[k].conj(), xt[k]).real)
else:
xt[k] = None
else:
for k, ek in enumerate(e):
if dx_norm[k] ** 2 > lindep:
xt[k] = precond(xt[k], e[0], x0[k])
xt[k] *= 1 / numpy.sqrt(dot(xt[k].conj(), xt[k]).real)
else:
xt[k] = None
xt = [xi for xi in xt if xi is not None]
for i in range(space):
xsi = xs[i]
for xi in xt:
xi -= xsi * dot(xsi.conj(), xi)
norm_min = 1
for i, xi in enumerate(xt):
norm = numpy.sqrt(dot(xi.conj(), xi).real)
if norm ** 2 > lindep:
xt[i] *= 1 / norm
norm_min = min(norm_min, norm)
else:
xt[i] = None
xt = [xi for xi in xt if xi is not None]
xi = None
# print('davidson %d %d |r|= %4.3g e= %s max|de|= %4.3g lindep= %4.3g',
# icyc, space, max_dx_norm, e, de[ide], norm_min)
if len(xt) == 0:
# print('Linear dependency in trial subspace. |r| for each state %s', dx_norm)
conv = [conv[k] or (norm < toloose) for k, norm in enumerate(dx_norm)]
break
max_dx_last = max_dx_norm
fresh_start = space + nroots > max_space
if callable(callback):
callback(locals())
return conv, e, x0
def _qr(xs, dot):
norm = numpy.sqrt(dot(xs[0].conj(), xs[0]).real)
qs = [xs[0] / norm]
for i in range(1, len(xs)):
xi = xs[i].copy()
for j in range(len(qs)):
xi -= qs[j] * dot(qs[j].conj(), xi)
norm = numpy.sqrt(dot(xi.conj(), xi).real)
if norm > 1e-7:
qs.append(xi / norm)
return qs
def _gen_x0(v, xs):
space, nroots = v.shape
x0 = []
for k in range(nroots):
x0.append(xs[space - 1] * v[space - 1, k])
for i in reversed(range(space - 1)):
xsi = xs[i]
for k in range(nroots):
x0[k] += v[i, k] * xsi
return x0
def _sort_by_similarity(w, v, nroots, conv, vlast, emin=None, heff=None):
if not any(conv) or vlast is None:
return w[:nroots], v[:, :nroots]
head, nroots = vlast.shape
conv = numpy.asarray(conv[:nroots])
ovlp = vlast[:, conv].T.conj().dot(v[:head])
ovlp =
|
numpy.einsum("ij,ij->j", ovlp, ovlp)
|
numpy.einsum
|
import tvm
from tvm import relay
from tvm.contrib import graph_runtime
from tvm.contrib.download import download_testdata
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import networkx as nx
from dgl import DGLGraph
from dgl.data import load_data
from dgl.nn.pytorch import GraphConv
from collections import namedtuple
import timeit
########################
# Load graph data
def load_dataset(dataset="cora"):
args = namedtuple("args", ["dataset"])
data = load_data(args(dataset))
# Remove self-loops to avoid duplicate passing of a node's feature to itself
g = data.graph
g.remove_edges_from(nx.selfloop_edges(g))
# Add self-loops
g.add_edges_from(zip(g.nodes, g.nodes))
return g, data
dataset = "cora"
g, data = load_dataset(dataset)
num_layers = 1
num_hidden = 16
infeat_dim = data.features.shape[1]
num_classes = data.num_labels
features = torch.FloatTensor(data.features)
dgl_g = DGLGraph(g)
########################
# Define GCN in DGL-PyTorch
class GCN(nn.Module):
def __init__(self,
g,
n_infeat,
n_hidden,
n_classes,
n_layers,
activation):
super(GCN, self).__init__()
self.g = g
self.layers = nn.ModuleList()
self.layers.append(GraphConv(n_infeat, n_hidden, activation=activation))
for i in range(n_layers - 1):
self.layers.append(GraphConv(n_hidden, n_hidden, activation=activation))
self.layers.append(GraphConv(n_hidden, n_classes))
def forward(self, features):
h = features
for i, layer in enumerate(self.layers):
h = layer(self.g, h)
return h
torch_model = GCN(dgl_g,
infeat_dim,
num_hidden,
num_classes,
num_layers,
F.relu)
########################
# Load pretrained parameters
model_url = "https://homes.cs.washington.edu/~cyulin/media/gnn_model/gcn_%s.torch"%(dataset)
# OrderedDict(["layers.i.weight",tensor],["layers.i.bias",tensor])
model_path = download_testdata(model_url, "gcn_%s.pickle"%(dataset), module='gcn_model')
torch_model_state_dict = torch.load(model_path)
torch_model.load_state_dict(torch_model_state_dict)
########################
# Evaluate the DGL-Pytorch model
def test_dgl():
torch_model.eval()
with torch.no_grad():
logits_torch = torch_model(features)
return logits_torch
def evaluate(data, logits):
test_mask = data.test_mask # the test set which isn't included in the training phase
pred = logits.argmax(axis=1)
acc = ((pred == data.labels) * test_mask).sum() / test_mask.sum()
return acc
logits_torch = test_dgl()
print("Print the first five outputs from DGL-PyTorch execution\n", logits_torch[:5])
acc = evaluate(data, logits_torch.numpy())
print("Test accuracy of DGL results: {:.2%}".format(acc))
repeat_times = 5
dgl_runing_time = timeit.timeit(setup='from __main__ import test_dgl',
stmt='test_dgl()',
number=repeat_times)
print("DGL running time: {:.2f} ms".format(dgl_runing_time / repeat_times * 1000))
########################
# Define gcn in TVM Relay
def GraphConv(layer_name,
input_dim,
output_dim,
adj,
inputs,
norm=None,
bias=True,
activation=None):
if norm is not None:
inputs = relay.multiply(inputs, norm)
weight = relay.var(layer_name + ".weight", shape=(input_dim, output_dim))
weight_t = relay.transpose(weight)
dense = relay.nn.dense(weight_t, inputs)
output = relay.nn.sparse_dense(dense, adj)
output_t = relay.transpose(output)
if norm is not None:
output_t = relay.multiply(output_t, norm)
if bias is True:
_bias = relay.var(layer_name + ".bias", shape=(output_dim,))
output_t = relay.nn.bias_add(output_t, _bias, axis=-1)
if activation is not None:
output_t = activation(output_t)
return output_t
########################
# Load parameters
params = {}
params['infeats'] = data.features.astype('float32') # Only support float32 as feature for now
# Generate adjacency matrix
adjacency = nx.to_scipy_sparse_matrix(g)
params['g_data'] = adjacency.data.astype('float32')
params['indices'] = adjacency.indices.astype('int32')
params['indptr'] = adjacency.indptr.astype('int32')
# Normalization w.r.t. node degrees
degs = [g.in_degree[i] for i in range(g.number_of_nodes())]
params['norm'] =
|
np.power(degs, -0.5)
|
numpy.power
|
# Copyright 2020 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import numpy as np
import scipy
import theano
import theano.tensor as tt
from ..ode import utils
from ..exceptions import ShapeError, DtypeError
_log = logging.getLogger('pymc3')
floatX = theano.config.floatX
class DifferentialEquation(theano.Op):
"""
Specify an ordinary differential equation
.. math::
\dfrac{dy}{dt} = f(y,t,p) \quad y(t_0) = y_0
Parameters
----------
func: callable
Function specifying the differential equation. Must take arguments y (n_states,), t (scalar), p (n_theta,)
times: array
Array of times at which to evaluate the solution of the differential equation.
n_states: int
Dimension of the differential equation. For scalar differential equations, n_states=1.
For vector valued differential equations, n_states = number of differential equations in the system.
n_theta: int
Number of parameters in the differential equation.
t0: float
Time corresponding to the initial condition
.. code-block:: python
def odefunc(y, t, p):
#Logistic differential equation
return p[0] * y[0] * (1 - y[0])
times = np.arange(0.5, 5, 0.5)
ode_model = DifferentialEquation(func=odefunc, times=times, n_states=1, n_theta=1, t0=0)
"""
_itypes = [
tt.TensorType(floatX, (False,)), # y0 as 1D floatX vector
tt.TensorType(floatX, (False,)) # theta as 1D floatX vector
]
_otypes = [
tt.TensorType(floatX, (False, False)), # model states as floatX of shape (T, S)
tt.TensorType(floatX, (False, False, False)), # sensitivities as floatX of shape (T, S, len(y0) + len(theta))
]
__props__ = ("func", "times", "n_states", "n_theta", "t0")
def __init__(self, func, times, *, n_states, n_theta, t0=0):
if not callable(func):
raise ValueError("Argument func must be callable.")
if n_states < 1:
raise ValueError("Argument n_states must be at least 1.")
if n_theta <= 0:
raise ValueError("Argument n_theta must be positive.")
# Public
self.func = func
self.t0 = t0
self.times = tuple(times)
self.n_times = len(times)
self.n_states = n_states
self.n_theta = n_theta
self.n_p = n_states + n_theta
# Private
self._augmented_times = np.insert(times, 0, t0).astype(floatX)
self._augmented_func = utils.augment_system(func, self.n_states, self.n_theta)
self._sens_ic = utils.make_sens_ic(self.n_states, self.n_theta, floatX)
# Cache symbolic sensitivities by the hash of inputs
self._apply_nodes = {}
self._output_sensitivities = {}
def _system(self, Y, t, p):
"""This is the function that will be passed to odeint. Solves both ODE and sensitivities.
Args:
Y: augmented state vector (n_states + n_states + n_theta)
t: current time
p: parameter vector (y0, theta)
"""
dydt, ddt_dydp = self._augmented_func(Y[:self.n_states], t, p, Y[self.n_states:])
derivatives = np.concatenate([dydt, ddt_dydp])
return derivatives
def _simulate(self, y0, theta):
# Initial condition comprised of state initial conditions and raveled sensitivity matrix
s0 =
|
np.concatenate([y0, self._sens_ic])
|
numpy.concatenate
|
import pandas as pd
import numpy as np
from collections import defaultdict
from sklearn.metrics import f1_score,accuracy_score
import math
split_sequences=True
word2idx = {}
tag2idx = {}
pos2idx = {}
word_idx = 0
tag_idx = 0
pos_idx = 0
Xtrain = []
Ytrain = []
Ptrain=[]
currentX = []
currentY = []
currentP=[]
for line in open('train1_all.txt',encoding='utf-8'):
line = line.rstrip()
if line:
r = line.split()
word, tag, pos = r
if word not in word2idx:
word2idx[word] = word_idx
word_idx += 1
currentX.append(word2idx[word])
if tag not in tag2idx:
tag2idx[tag] = tag_idx
tag_idx += 1
currentY.append(tag2idx[tag])
if pos not in pos2idx:
pos2idx[pos] = pos_idx
pos_idx += 1
currentP.append(pos2idx[pos])
elif split_sequences:
Xtrain.append(currentX)
Ytrain.append(currentY)
Ptrain.append(currentP)
currentX = []
currentY = []
currentP=[]
if not split_sequences:
Xtrain = currentX
Ytrain = currentY
Ptrain=currentP
V = len(word2idx) + 1
M = max(max(p) for p in Ptrain) + 1
A = np.ones((M, M))
pi = np.ones(M)
for p in Ptrain:
pi[p[0]] += 1
for i in range(len(p)-1):
A[p[i], p[i+1]] += 1
A /= A.sum(axis=1, keepdims=True)
pi /= pi.sum()
# find the observation matrix
B = np.ones((M, V)) # add-one smoothing
for x, p in zip(Xtrain, Ptrain):
for xi, pii in zip(x, p):
B[pii, xi] += 1
B /= B.sum(axis=1, keepdims=True)
class HMM:
def __init__(self, M,A,B,C,C1,pi,SUFF,SUFF1,word2idx):
self.M = M # number of hidden states
self.A=A
self.B=B
self.C=C
self.C1=C1
self.pi=pi
self.SUFF=SUFF
self.SUFF1=SUFF1
self.word2idx=word2idx
def get_state_sequence(self, x):
# returns the most likely state sequence given observed sequence x
# using the Viterbi algorithm
T = len(x)
delta = np.zeros((T, self.M))
psi = np.zeros((T, self.M))
try:
delta[0] = np.log(self.pi) + np.log(self.B[:,x[0]])
except IndexError:
try:
delta[0] = np.log(self.pi) + np.log(self.C[:,SUFF.index([*word2idx][x[0]][:2])])
except IndexError:
delta[0] = np.log(self.pi)
except ValueError:
try:
delta[0] = np.log(self.pi) + np.log(self.C1[:,SUFF1.index([*word2idx][x[0]][:1])])
except ValueError:
delta[0] = np.log(self.pi)
for t in range(1, T):
for j in range(self.M):
try:
delta[t,j] = np.max(delta[t-1] + np.log(self.A[:,j])) + np.log(self.B[j, x[t]])
psi[t,j] = np.argmax(delta[t-1] + np.log(self.A[:,j]))
except IndexError:
try:
delta[t,j] = np.max(delta[t-1] + np.log(self.A[:,j])) + np.log(self.C[j, SUFF.index([*word2idx][x[t]][:2])])
psi[t,j] = np.argmax(delta[t-1] + np.log(self.A[:,j]))
except ValueError:
try:
delta[t,j] = np.max(delta[t-1] + np.log(self.A[:,j])) + np.log(self.C1[j, SUFF1.index([*word2idx][x[t]][:1])])
psi[t,j] = np.argmax(delta[t-1] + np.log(self.A[:,j]))
except ValueError:
delta[t,j] = np.max(delta[t-1] + np.log(self.A[:,j]))
psi[t,j] = np.argmax(delta[t-1] + np.log(self.A[:,j]))
except IndexError:
try:
delta[t,j] = np.max(delta[t-1] + np.log(self.A[:,j])) + np.log(self.C1[j, SUFF1.index([*word2idx][x[t]][:1])])
psi[t,j] = np.argmax(delta[t-1] + np.log(self.A[:,j]))
except IndexError:
delta[t,j] = np.max(delta[t-1] + np.log(self.A[:,j]))
psi[t,j] = np.argmax(delta[t-1] + np.log(self.A[:,j]))
# backtrack
states = np.zeros(T, dtype=np.int32)
states[T-1] = np.argmax(delta[T-1])
for t in range(T-2, -1, -1):
states[t] = psi[t+1, states[t+1]]
return states
SUFF=[]
SUFF1=[]
for w in [*word2idx]:
SUFF.append(w[:2])
SUFF1.append(w[:1])
suff_pos = defaultdict(list)
suff_pos1 = defaultdict(list)
idx=0
for suf in SUFF:
suff_pos[suf].append(idx)
idx+=1
idx=0
for suf in SUFF1:
suff_pos1[suf].append(idx)
idx+=1
C=np.ones((M,V))
C1=np.ones((M,V))
for l in suff_pos.values():
C[:,l]=B[:,l].sum(axis=1, keepdims=True)/len(l)
for l in suff_pos1.values():
C1[:,l]=B[:,l].sum(axis=1, keepdims=True)/len(l)
word_idx = len(word2idx)
w_known=len(word2idx)
word2idx_test={}
Xtest = []
currentX = []
for line in open('test1_all.txt',encoding='utf-8'):
line = line.rstrip()
if line:
r = line.split()
word = r[0]
if word not in word2idx:
word2idx_test[word] = word_idx
word2idx[word]= word_idx
word_idx += 1
else:
word2idx_test[word]=word2idx[word]
currentX.append(word2idx_test[word])
elif split_sequences:
Xtest.append(currentX)
currentX = []
hmm = HMM(M,A,B,C,C1,pi,SUFF,SUFF1,word2idx)
P1test = []
for x in Xtest:
p = hmm.get_state_sequence(x)
P1test.append(p)
Ptest=[]
list1=[]
for line in open('test1_all.txt',encoding='utf-8'):
line = line.rstrip()
if line:
r = line.split()
tag = r[2]
list1.append(pos2idx[tag])
elif split_sequences:
Ptest.append(list1)
list1 = []
Ytest=[]
list1=[]
for line in open('test1_all.txt',encoding='utf-8'):
line = line.rstrip()
if line:
r = line.split()
tag = r[1]
list1.append(tag2idx[tag])
elif split_sequences:
Ytest.append(list1)
list1 = []
def accuracy(T, Y):
# inputs are lists of lists
n_correct = 0
n_total = 0
for t, y in zip(T, Y):
n_correct += np.sum(t == y)
n_total += len(y)
return float(n_correct) / n_total
def accuracy_unknown(T, Y,X):
# inputs are lists of lists
n_correct = 0
n_total = 0
for t, y,x in zip(T, Y,X):
for ti,yi,xi in zip (t,y,x):
if xi>w_known :
n_correct += (ti == yi)
n_total += 1
return float(n_correct) / n_total
def accuracy_known(T, Y,X):
# inputs are lists of lists
n_correct = 0
n_total = 0
for t, y,x in zip(T, Y,X):
for ti,yi,xi in zip (t,y,x):
if xi<=w_known :
n_correct += (ti == yi)
n_total += 1
return float(n_correct) / n_total
def total_f1_score(T, Y):
# inputs are lists of lists
T = np.concatenate(T)
Y = np.concatenate(Y)
return f1_score(T, Y, average=None).mean()
print("test accuracy:", accuracy(P1test, Ptest))
accuracy=accuracy(P1test, Ptest)
print("test f1:", total_f1_score(P1test, Ptest))
f1=total_f1_score(P1test, Ptest)
print("test accuracy for unknown words:",accuracy_unknown(P1test, Ptest,Xtest))
unknown_ac=accuracy_unknown(Ptest, P1test,Xtest)
print("test accuracy for known words:",accuracy_known(P1test, Ptest,Xtest))
known_ac=accuracy_known(Ptest, P1test,Xtest)
Y = np.concatenate(Ytest)
P = np.concatenate(Ptest)
Z = np.concatenate(P1test)
X= np.concatenate(Xtest)
print("accuracy score for tag "+list(tag2idx.keys())[0]+" :", accuracy_score(Z[np.where(Y==0)[0]], P[np.where(Y==0)[0]]))
a11= accuracy_score(Z[np.where(Y==0)[0]], P[np.where(Y==0)[0]])
print("accuracy score for tag "+list(tag2idx.keys())[1]+" :", accuracy_score(Z[np.where(Y==1)[0]], P[np.where(Y==1)[0]]))
a12= accuracy_score(Z[np.where(Y==1)[0]], P[np.where(Y==1)[0]])
print("accuracy score for tag "+list(tag2idx.keys())[2]+" :", accuracy_score(Z[np.where(Y==2)[0]], P[np.where(Y==2)[0]]))
a13=accuracy_score(Z[np.where(Y==2)[0]], P[np.where(Y==2)[0]])
print("accuracy score for tag "+list(tag2idx.keys())[3]+" :", accuracy_score(Z[np.where(Y==3)[0]], P[np.where(Y==3)[0]]))
a14=accuracy_score(Z[np.where(Y==3)[0]], P[np.where(Y==3)[0]])
print("accuracy score for tag "+list(tag2idx.keys())[4]+" :",accuracy_score(Z[np.where(Y==4)[0]], P[np.where(Y==4)[0]]))
a15=accuracy_score(Z[np.where(Y==4)[0]], P[np.where(Y==4)[0]])
print("accuracy score for tag "+list(tag2idx.keys())[5]+" :", accuracy_score(Z[np.where(Y==5)[0]], P[np.where(Y==5)[0]]))
a16=accuracy_score(Z[np.where(Y==5)[0]], P[np.where(Y==5)[0]])
print("accuracy score for tag "+list(tag2idx.keys())[6]+" :", accuracy_score(Z[np.where(Y==6)[0]], P[np.where(Y==6)[0]]))
a17=accuracy_score(Z[np.where(Y==6)[0]], P[np.where(Y==6)[0]])
print("accuracy score for tag "+list(tag2idx.keys())[7]+" :", accuracy_score(Z[np.where(Y==7)[0]], P[np.where(Y==7)[0]]))
a18=accuracy_score(Z[np.where(Y==7)[0]], P[np.where(Y==7)[0]])
print("accuracy score for tag "+list(tag2idx.keys())[8]+" :", accuracy_score(Z[np.where(Y==8)[0]], P[np.where(Y==8)[0]]))
a19= accuracy_score(Z[np.where(Y==8)[0]], P[np.where(Y==8)[0]])
print("accuracy score for tag "+list(tag2idx.keys())[9]+" :", accuracy_score(Z[np.where(Y==9)[0]], P[np.where(Y==9)[0]]))
a110= accuracy_score(Z[np.where(Y==9)[0]], P[np.where(Y==9)[0]])
print("accuracy score for tag "+list(tag2idx.keys())[10]+" :", accuracy_score(Z[np.where(Y==10)[0]], P[np.where(Y==10)[0]]))
a111= accuracy_score(Z[np.where(Y==10)[0]], P[np.where(Y==10)[0]])
print("accuracy score for tag "+list(tag2idx.keys())[11]+" :", accuracy_score(Z[np.where(Y==11)[0]], P[np.where(Y==11)[0]]))
a112= accuracy_score(Z[np.where(Y==11)[0]], P[np.where(Y==11)[0]])
print("accuracy score for tag "+list(tag2idx.keys())[12]+" :", accuracy_score(Z[np.where(Y==12)[0]], P[np.where(Y==12)[0]]))
a113= accuracy_score(Z[np.where(Y==12)[0]], P[np.where(Y==12)[0]])
print("accuracy score for tag "+list(tag2idx.keys())[13]+" :", accuracy_score(Z[np.where(Y==13)[0]], P[np.where(Y==13)[0]]))
a114= accuracy_score(Z[np.where(Y==13)[0]], P[np.where(Y==13)[0]])
print("test f1 for tag "+list(tag2idx.keys())[0]+" :", f1_score(Z[np.where(Y==0)[0]], P[np.where(Y==0)[0]], average=None).mean())
a21= f1_score(Z[np.where(Y==0)[0]], P[np.where(Y==0)[0]], average=None).mean()
print("test f1 for tag "+list(tag2idx.keys())[1]+" :", f1_score(Z[np.where(Y==1)[0]], P[np.where(Y==1)[0]], average=None).mean())
a22= f1_score(Z[np.where(Y==1)[0]], P[np.where(Y==1)[0]], average=None).mean()
print("test f1 for tag "+list(tag2idx.keys())[2]+" :", f1_score(Z[np.where(Y==2)[0]], P[np.where(Y==2)[0]], average=None).mean())
a23=f1_score(Z[np.where(Y==2)[0]], P[np.where(Y==2)[0]], average=None).mean()
print("test f1 for tag "+list(tag2idx.keys())[3]+" :", f1_score(Z[np.where(Y==3)[0]], P[np.where(Y==3)[0]], average=None).mean())
a24=f1_score(Z[np.where(Y==3)[0]], P[np.where(Y==3)[0]], average=None).mean()
print("test f1 for tag "+list(tag2idx.keys())[4]+" :", f1_score(Z[np.where(Y==4)[0]], P[np.where(Y==4)[0]], average=None).mean())
a25=f1_score(Z[np.where(Y==4)[0]], P[np.where(Y==4)[0]], average=None).mean()
print("test f1 for tag "+list(tag2idx.keys())[5]+" :", f1_score(Z[np.where(Y==5)[0]], P[np.where(Y==5)[0]], average=None).mean())
a26=f1_score(Z[np.where(Y==5)[0]], P[np.where(Y==5)[0]], average=None).mean()
print("test f1 for tag "+list(tag2idx.keys())[6]+" :", f1_score(Z[np.where(Y==6)[0]], P[np.where(Y==6)[0]], average=None).mean())
a27=f1_score(Z[np.where(Y==6)[0]], P[np.where(Y==6)[0]], average=None).mean()
print("test f1 for tag "+list(tag2idx.keys())[7]+" :", f1_score(Z[np.where(Y==7)[0]], P[np.where(Y==7)[0]], average=None).mean())
a28=f1_score(Z[np.where(Y==7)[0]], P[np.where(Y==7)[0]], average=None).mean()
print("test f1 for tag "+list(tag2idx.keys())[8]+" :", f1_score(Z[np.where(Y==8)[0]], P[np.where(Y==8)[0]], average=None).mean())
a29= f1_score(Z[np.where(Y==8)[0]], P[np.where(Y==8)[0]], average=None).mean()
print("test f1 for tag "+list(tag2idx.keys())[9]+" :", f1_score(Z[np.where(Y==9)[0]], P[np.where(Y==9)[0]], average=None).mean())
a210= f1_score(Z[np.where(Y==9)[0]], P[np.where(Y==9)[0]], average=None).mean()
print("test f1 for tag "+list(tag2idx.keys())[10]+" :", f1_score(Z[np.where(Y==10)[0]], P[np.where(Y==10)[0]], average=None).mean())
a211= f1_score(Z[np.where(Y==10)[0]], P[np.where(Y==10)[0]], average=None).mean()
print("test f1 for tag "+list(tag2idx.keys())[11]+" :", f1_score(Z[np.where(Y==11)[0]], P[np.where(Y==11)[0]], average=None).mean())
a212= f1_score(Z[np.where(Y==11)[0]], P[np.where(Y==11)[0]], average=None).mean()
print("test f1 for tag "+list(tag2idx.keys())[12]+" :", f1_score(Z[np.where(Y==12)[0]], P[np.where(Y==12)[0]], average=None).mean())
a213= f1_score(Z[np.where(Y==12)[0]], P[np.where(Y==12)[0]], average=None).mean()
print("test f1 for tag "+list(tag2idx.keys())[13]+" :", f1_score(Z[np.where(Y==13)[0]], P[np.where(Y==13)[0]], average=None).mean())
a214= f1_score(Z[np.where(Y==13)[0]], P[np.where(Y==13)[0]], average=None).mean()
print("accuracy for unknown words for tag "+list(tag2idx.keys())[0]+" :", accuracy_score(Z[np.where(Y==0)[0][X[np.where(Y==0)[0]]>w_known]],P[np.where(Y==0)[0][X[np.where(Y==0)[0]]>w_known]]))
a31= accuracy_score(Z[np.where(Y==0)[0][X[np.where(Y==0)[0]]>w_known]],P[np.where(Y==0)[0][X[np.where(Y==0)[0]]>w_known]])
print("number of unknown words for tag "+list(tag2idx.keys())[0]+" :",len(set(np.where(X[np.where(Y==0)[0]]>w_known)[0])))
a41= len(set(np.where(X[np.where(Y==0)[0]]>w_known)[0]))
print("accuracy for unknown words for tag "+list(tag2idx.keys())[1]+" :", accuracy_score(Z[np.where(Y==1)[0][X[np.where(Y==1)[0]]>w_known]],P[np.where(Y==1)[0][X[np.where(Y==1)[0]]>w_known]]))
a32= accuracy_score(Z[np.where(Y==1)[0][X[np.where(Y==1)[0]]>w_known]],P[np.where(Y==1)[0][X[np.where(Y==1)[0]]>w_known]])
print("number of unknown words for tag "+list(tag2idx.keys())[1]+" :",len(set(np.where(X[np.where(Y==1)[0]]>w_known)[0])))
a42= len(set(np.where(X[np.where(Y==1)[0]]>w_known)[0]))
print("accuracy for unknown words for tag "+list(tag2idx.keys())[2]+" :", accuracy_score(Z[np.where(Y==2)[0][X[np.where(Y==2)[0]]>w_known]],P[np.where(Y==2)[0][X[np.where(Y==2)[0]]>w_known]]))
a33= accuracy_score(Z[np.where(Y==2)[0][X[np.where(Y==2)[0]]>w_known]],P[np.where(Y==2)[0][X[np.where(Y==2)[0]]>w_known]])
print("number of unknown words for tag "+list(tag2idx.keys())[2]+" :",len(set(np.where(X[np.where(Y==2)[0]]>w_known)[0])))
a43= len(set(np.where(X[np.where(Y==2)[0]]>w_known)[0]))
print("accuracy for unknown words for tag "+list(tag2idx.keys())[3]+" :", accuracy_score(Z[np.where(Y==3)[0][X[np.where(Y==3)[0]]>w_known]],P[np.where(Y==3)[0][X[np.where(Y==3)[0]]>w_known]]))
a34= accuracy_score(Z[np.where(Y==3)[0][X[np.where(Y==3)[0]]>w_known]],P[np.where(Y==3)[0][X[np.where(Y==3)[0]]>w_known]])
print("number of unknown words for tag "+list(tag2idx.keys())[3]+" :",len(set(np.where(X[np.where(Y==3)[0]]>w_known)[0])))
a44= len(set(np.where(X[np.where(Y==3)[0]]>w_known)[0]))
print("accuracy for unknown words for tag "+list(tag2idx.keys())[4]+" :", accuracy_score(Z[np.where(Y==4)[0][X[np.where(Y==4)[0]]>w_known]],P[np.where(Y==4)[0][X[np.where(Y==4)[0]]>w_known]]))
a35= accuracy_score(Z[np.where(Y==4)[0][X[np.where(Y==4)[0]]>w_known]],P[np.where(Y==4)[0][X[np.where(Y==4)[0]]>w_known]])
print("number of unknown words for tag "+list(tag2idx.keys())[4]+" :",len(set(np.where(X[np.where(Y==4)[0]]>w_known)[0])))
a45= len(set(np.where(X[np.where(Y==4)[0]]>w_known)[0]))
print("accuracy for unknown words for tag "+list(tag2idx.keys())[5]+" :", accuracy_score(Z[np.where(Y==5)[0][X[np.where(Y==5)[0]]>w_known]],P[np.where(Y==5)[0][X[np.where(Y==5)[0]]>w_known]]))
a36= accuracy_score(Z[np.where(Y==5)[0][X[np.where(Y==5)[0]]>w_known]],P[np.where(Y==5)[0][X[np.where(Y==5)[0]]>w_known]])
print("number of unknown words for tag "+list(tag2idx.keys())[5]+" :",len(set(np.where(X[np.where(Y==5)[0]]>w_known)[0])))
a46= len(set(np.where(X[np.where(Y==5)[0]]>w_known)[0]))
print("accuracy for unknown words for tag "+list(tag2idx.keys())[6]+" :", accuracy_score(Z[np.where(Y==6)[0][X[np.where(Y==6)[0]]>w_known]],P[np.where(Y==6)[0][X[np.where(Y==6)[0]]>w_known]]))
a37= accuracy_score(Z[np.where(Y==6)[0][X[np.where(Y==6)[0]]>w_known]],P[np.where(Y==6)[0][X[np.where(Y==6)[0]]>w_known]])
print("number of unknown words for tag "+list(tag2idx.keys())[6]+" :",len(set(np.where(X[np.where(Y==6)[0]]>w_known)[0])))
a47= len(set(np.where(X[np.where(Y==6)[0]]>w_known)[0]))
print("accuracy for unknown words for tag "+list(tag2idx.keys())[7]+" :", accuracy_score(Z[np.where(Y==7)[0][X[np.where(Y==7)[0]]>w_known]],P[np.where(Y==7)[0][X[np.where(Y==7)[0]]>w_known]]))
a38= accuracy_score(Z[np.where(Y==7)[0][X[np.where(Y==7)[0]]>w_known]],P[np.where(Y==7)[0][X[np.where(Y==7)[0]]>w_known]])
print("number of unknown words for tag "+list(tag2idx.keys())[7]+" :",len(set(np.where(X[np.where(Y==7)[0]]>608)[0])))
a48= len(set(np.where(X[np.where(Y==7)[0]]>w_known)[0]))
print("accuracy for unknown words for tag "+list(tag2idx.keys())[8]+" :", accuracy_score(Z[np.where(Y==8)[0][X[np.where(Y==8)[0]]>w_known]],P[np.where(Y==8)[0][X[np.where(Y==8)[0]]>w_known]]))
a39= accuracy_score(Z[np.where(Y==8)[0][X[np.where(Y==8)[0]]>w_known]],P[np.where(Y==8)[0][X[np.where(Y==8)[0]]>w_known]])
print("number of unknown words for tag "+list(tag2idx.keys())[8]+" :",len(set(np.where(X[np.where(Y==8)[0]]>w_known)[0])))
a49= len(set(np.where(X[np.where(Y==8)[0]]>w_known)[0]))
print("accuracy for unknown words for tag "+list(tag2idx.keys())[9]+" :", accuracy_score(Z[np.where(Y==9)[0][X[np.where(Y==9)[0]]>w_known]],P[np.where(Y==9)[0][X[np.where(Y==9)[0]]>w_known]]))
a310= accuracy_score(Z[np.where(Y==9)[0][X[np.where(Y==9)[0]]>w_known]],P[np.where(Y==9)[0][X[np.where(Y==9)[0]]>w_known]])
print("number of unknown words for tag "+list(tag2idx.keys())[9]+" :",len(set(np.where(X[np.where(Y==9)[0]]>w_known)[0])))
a410= len(set(np.where(X[np.where(Y==9)[0]]>w_known)[0]))
print("accuracy for unknown words for tag "+list(tag2idx.keys())[10]+" :", accuracy_score(Z[np.where(Y==10)[0][X[np.where(Y==10)[0]]>w_known]],P[np.where(Y==10)[0][X[np.where(Y==10)[0]]>w_known]]))
a311=accuracy_score(Z[np.where(Y==10)[0][X[np.where(Y==10)[0]]>w_known]],P[np.where(Y==10)[0][X[np.where(Y==10)[0]]>w_known]])
print("number of unknown words for tag "+list(tag2idx.keys())[10]+" :",len(set(np.where(X[np.where(Y==10)[0]]>w_known)[0])))
a411= len(set(np.where(X[np.where(Y==10)[0]]>w_known)[0]))
print("accuracy for unknown words for tag "+list(tag2idx.keys())[11]+" :", accuracy_score(Z[np.where(Y==11)[0][X[np.where(Y==11)[0]]>w_known]],P[np.where(Y==11)[0][X[np.where(Y==11)[0]]>w_known]]))
a312= accuracy_score(Z[np.where(Y==11)[0][X[np.where(Y==11)[0]]>w_known]],P[np.where(Y==11)[0][X[np.where(Y==11)[0]]>w_known]])
print("number of unknown words for tag "+list(tag2idx.keys())[11]+" :",len(set(np.where(X[np.where(Y==11)[0]]>w_known)[0])))
a412= len(set(np.where(X[np.where(Y==11)[0]]>w_known)[0]))
print("accuracy for unknown words for tag "+list(tag2idx.keys())[12]+" :", accuracy_score(Z[np.where(Y==12)[0][X[np.where(Y==12)[0]]>w_known]],P[np.where(Y==12)[0][X[np.where(Y==12)[0]]>w_known]]))
a313= accuracy_score(Z[np.where(Y==12)[0][X[np.where(Y==12)[0]]>w_known]],P[np.where(Y==12)[0][X[np.where(Y==12)[0]]>w_known]])
print("number of unknown words for tag "+list(tag2idx.keys())[12]+" :",len(set(np.where(X[np.where(Y==12)[0]]>w_known)[0])))
a413= len(set(np.where(X[np.where(Y==12)[0]]>w_known)[0]))
print("accuracy for unknown words for tag "+list(tag2idx.keys())[13]+" :", accuracy_score(Z[np.where(Y==13)[0][X[np.where(Y==13)[0]]>w_known]],P[np.where(Y==13)[0][X[np.where(Y==13)[0]]>w_known]]))
a314= accuracy_score(Z[np.where(Y==13)[0][X[np.where(Y==13)[0]]>w_known]],P[np.where(Y==13)[0][X[np.where(Y==13)[0]]>w_known]])
print("number of unknown words for tag "+list(tag2idx.keys())[13]+" :",len(set(np.where(X[np.where(Y==13)[0]]>w_known)[0])))
a414= len(set(np.where(X[np.where(Y==13)[0]]>w_known)[0]))
print("accuracy for known words for tag "+list(tag2idx.keys())[0]+" :", accuracy_score(Z[np.where(Y==0)[0][X[np.where(Y==0)[0]]<=w_known]],P[np.where(Y==0)[0][X[np.where(Y==0)[0]]<=w_known]]))
a51= accuracy_score(Z[np.where(Y==0)[0][X[np.where(Y==0)[0]]<=w_known]],P[np.where(Y==0)[0][X[np.where(Y==0)[0]]<=w_known]])
print("number of known words for tag "+list(tag2idx.keys())[0]+" :",len(set(np.where(X[np.where(Y==0)[0]]<=w_known)[0])))
a61= len(set(np.where(X[np.where(Y==0)[0]]<=w_known)[0]))
print("accuracy for known words for tag "+list(tag2idx.keys())[1]+" :", accuracy_score(Z[np.where(Y==1)[0][X[np.where(Y==1)[0]]<=w_known]],P[np.where(Y==1)[0][X[np.where(Y==1)[0]]<=w_known]]))
a52= accuracy_score(Z[np.where(Y==1)[0][X[np.where(Y==1)[0]]<=w_known]],P[np.where(Y==1)[0][X[np.where(Y==1)[0]]<=w_known]])
print("number of known words for tag "+list(tag2idx.keys())[1]+" :",len(set(np.where(X[np.where(Y==1)[0]]<=w_known)[0])))
a62= len(set(np.where(X[np.where(Y==1)[0]]<=w_known)[0]))
print("accuracy for known words for tag "+list(tag2idx.keys())[2]+" :", accuracy_score(Z[np.where(Y==2)[0][X[np.where(Y==2)[0]]<=w_known]],P[np.where(Y==2)[0][X[np.where(Y==2)[0]]<=w_known]]))
a53= accuracy_score(Z[np.where(Y==2)[0][X[np.where(Y==2)[0]]<=w_known]],P[np.where(Y==2)[0][X[np.where(Y==2)[0]]<=w_known]])
print("number of known words for tag "+list(tag2idx.keys())[2]+" :",len(set(np.where(X[np.where(Y==2)[0]]<=w_known)[0])))
a63= len(set(np.where(X[np.where(Y==2)[0]]<=w_known)[0]))
print("accuracy for known words for tag "+list(tag2idx.keys())[3]+" :", accuracy_score(Z[np.where(Y==3)[0][X[np.where(Y==3)[0]]<=w_known]],P[np.where(Y==3)[0][X[np.where(Y==3)[0]]<=w_known]]))
a54= accuracy_score(Z[np.where(Y==3)[0][X[np.where(Y==3)[0]]<=w_known]],P[np.where(Y==3)[0][X[np.where(Y==3)[0]]<=w_known]])
print("number of known words for tag "+list(tag2idx.keys())[3]+" :",len(set(np.where(X[np.where(Y==3)[0]]<=w_known)[0])))
a64=len(set(np.where(X[np.where(Y==3)[0]]<=w_known)[0]))
print("accuracy for known words for tag "+list(tag2idx.keys())[4]+" :", accuracy_score(Z[np.where(Y==4)[0][X[np.where(Y==4)[0]]<=w_known]],P[np.where(Y==4)[0][X[np.where(Y==4)[0]]<=w_known]]))
a55= accuracy_score(Z[np.where(Y==4)[0][X[np.where(Y==4)[0]]<=w_known]],P[np.where(Y==4)[0][X[np.where(Y==4)[0]]<=w_known]])
print("number of known words for tag "+list(tag2idx.keys())[4]+" :",len(set(np.where(X[np.where(Y==4)[0]]<=w_known)[0])))
a65=len(set(np.where(X[np.where(Y==4)[0]]<=w_known)[0]))
print("accuracy for known words for tag "+list(tag2idx.keys())[5]+" :", accuracy_score(Z[np.where(Y==5)[0][X[np.where(Y==5)[0]]<=w_known]],P[np.where(Y==5)[0][X[np.where(Y==5)[0]]<=w_known]]))
a56= accuracy_score(Z[np.where(Y==5)[0][X[np.where(Y==5)[0]]<=w_known]],P[np.where(Y==5)[0][X[np.where(Y==5)[0]]<=w_known]])
print("number of known words for tag "+list(tag2idx.keys())[5]+" :",len(set(np.where(X[np.where(Y==5)[0]]<=w_known)[0])))
a66=len(set(np.where(X[np.where(Y==5)[0]]<=w_known)[0]))
print("accuracy for known words for tag "+list(tag2idx.keys())[6]+" :", accuracy_score(Z[np.where(Y==6)[0][X[
|
np.where(Y==6)
|
numpy.where
|
import os
import math
import numbers
import random
import logging
import numpy as np
import tensorflow as tf
import cv2 as cv
import tfrecord_creator
import torch
from torch.utils.data import Dataset
from torch.nn import functional as F
from torchvision import transforms
from torch.utils.data import BatchSampler, SequentialSampler
from utils import safe_crop, parse_args, maybe_random_interp, InvariantSampler
from config import unknown_code, fg_path, bg_path, a_path, num_valid, valid_ratio
global args
args = parse_args()
interp_list = [cv.INTER_NEAREST, cv.INTER_LINEAR, cv.INTER_CUBIC, cv.INTER_LANCZOS4]
def return_raw_image(dataset):
dataset_raw = []
for image_features in dataset:
image_raw = image_features['image'].numpy()
image = tf.image.decode_jpeg(image_raw)
dataset_raw.append(image)
return dataset_raw
fg_dataset = tfrecord_creator.read("fg", "./data/tfrecord/")
bg_dataset = tfrecord_creator.read("bg", "./data/tfrecord/")
a_dataset = tfrecord_creator.read("a", "./data/tfrecord/")
fg_dataset = list(fg_dataset)
bg_dataset = list(bg_dataset)
a_dataset = list(a_dataset)
# fg_raw = return_raw_image(fg_dataset)
# bg_raw = return_raw_image(bg_dataset)
# a_raw = return_raw_image(a_dataset)
def get_raw(type_of_dataset, count):
if type_of_dataset == 'fg':
temp = fg_dataset[count]['image']
channels=3
elif type_of_dataset == 'bg':
temp = bg_dataset[count]['image']
channels=3
else :
temp = a_dataset[count]['image']
channels=0
temp = tf.image.decode_jpeg(temp, channels=channels)
temp = np.asarray(temp)
return temp
kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (3, 3))
with open('Combined_Dataset/Training_set/training_fg_names.txt') as f:
fg_files = f.read().splitlines()
with open('Combined_Dataset/Training_set/training_bg_names.txt') as f:
bg_files = f.read().splitlines()
with open('Combined_Dataset/Test_set/test_fg_names.txt') as f:
fg_test_files = f.read().splitlines()
with open('Combined_Dataset/Test_set/test_bg_names.txt') as f:
bg_test_files = f.read().splitlines()
def maybe_random_interp(cv_interp):
if args.random_interp:
return np.random.choice(interp_list)
else:
return cv_interp
def gen_trimap(alpha):
k_size = random.choice(range(1, 5))
iterations = np.random.randint(1, 20)
kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (k_size, k_size))
dilated = cv.dilate(alpha, kernel, iterations)
eroded = cv.erode(alpha, kernel, iterations)
trimap = np.zeros(alpha.shape)
trimap.fill(128)
trimap[eroded >= 255] = 255
trimap[dilated <= 0] = 0
return trimap
class RandomAffine(object):
"""
Random affine translation
"""
def __init__(self, degrees, translate=None, scale=None, shear=None, flip=None, resample=False, fillcolor=0):
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError("If degrees is a single number, it must be positive.")
self.degrees = (-degrees, degrees)
else:
assert isinstance(degrees, (tuple, list)) and len(degrees) == 2, \
"degrees should be a list or tuple and it must be of length 2."
self.degrees = degrees
if translate is not None:
assert isinstance(translate, (tuple, list)) and len(translate) == 2, \
"translate should be a list or tuple and it must be of length 2."
for t in translate:
if not (0.0 <= t <= 1.0):
raise ValueError("translation values should be between 0 and 1")
self.translate = translate
if scale is not None:
assert isinstance(scale, (tuple, list)) and len(scale) == 2, \
"scale should be a list or tuple and it must be of length 2."
for s in scale:
if s <= 0:
raise ValueError("scale values should be positive")
self.scale = scale
if shear is not None:
if isinstance(shear, numbers.Number):
if shear < 0:
raise ValueError("If shear is a single number, it must be positive.")
self.shear = (-shear, shear)
else:
assert isinstance(shear, (tuple, list)) and len(shear) == 2, \
"shear should be a list or tuple and it must be of length 2."
self.shear = shear
else:
self.shear = shear
self.resample = resample
self.fillcolor = fillcolor
self.flip = flip
@staticmethod
def get_params(degrees, translate, scale_ranges, shears, flip, img_size):
"""Get parameters for affine transformation
Returns:
sequence: params to be passed to the affine transformation
"""
angle = random.uniform(degrees[0], degrees[1])
if translate is not None:
max_dx = translate[0] * img_size[0]
max_dy = translate[1] * img_size[1]
translations = (np.round(random.uniform(-max_dx, max_dx)),
np.round(random.uniform(-max_dy, max_dy)))
else:
translations = (0, 0)
if scale_ranges is not None:
scale = (random.uniform(scale_ranges[0], scale_ranges[1]),
random.uniform(scale_ranges[0], scale_ranges[1]))
else:
scale = (1.0, 1.0)
if shears is not None:
shear = random.uniform(shears[0], shears[1])
else:
shear = 0.0
if flip is not None:
flip = (np.random.rand(2) < flip).astype(np.int) * 2 - 1
return angle, translations, scale, shear, flip
def __call__(self, sample):
fg = sample['fg']
alpha = sample['alpha']
# fg, alpha = sample['fg'], sample['alpha']
rows, cols, ch = fg.shape
if np.maximum(rows, cols) < 1024:
params = self.get_params((0, 0), self.translate, self.scale, self.shear, self.flip, fg.size)
else:
params = self.get_params(self.degrees, self.translate, self.scale, self.shear, self.flip, fg.size)
center = (cols * 0.5 + 0.5, rows * 0.5 + 0.5)
M = self._get_inverse_affine_matrix(center, *params)
M = np.array(M).reshape((2, 3))
fg = cv.warpAffine(fg, M, (cols, rows),
flags=maybe_random_interp(cv.INTER_NEAREST) + cv.WARP_INVERSE_MAP)
alpha = cv.warpAffine(alpha, M, (cols, rows),
flags=maybe_random_interp(cv.INTER_NEAREST) + cv.WARP_INVERSE_MAP)
sample['fg'], sample['alpha'] = fg, alpha
return sample
@ staticmethod
def _get_inverse_affine_matrix(center, angle, translate, scale, shear, flip):
# Helper method to compute inverse matrix for affine transformation
# As it is explained in PIL.Image.rotate
# We need compute INVERSE of affine transformation matrix: M = T * C * RSS * C^-1
# where T is translation matrix: [1, 0, tx | 0, 1, ty | 0, 0, 1]
# C is translation matrix to keep center: [1, 0, cx | 0, 1, cy | 0, 0, 1]
# RSS is rotation with scale and shear matrix
# It is different from the original function in torchvision
# The order are changed to flip -> scale -> rotation -> shear
# x and y have different scale factors
# RSS(shear, a, scale, f) = [ cos(a + shear)*scale_x*f -sin(a + shear)*scale_y 0]
# [ sin(a)*scale_x*f cos(a)*scale_y 0]
# [ 0 0 1]
# Thus, the inverse is M^-1 = C * RSS^-1 * C^-1 * T^-1
angle = math.radians(angle)
shear = math.radians(shear)
scale_x = 1.0 / scale[0] * flip[0]
scale_y = 1.0 / scale[1] * flip[1]
# Inverted rotation matrix with scale and shear
d = math.cos(angle + shear) * math.cos(angle) + math.sin(angle + shear) * math.sin(angle)
matrix = [
math.cos(angle) * scale_x, math.sin(angle + shear) * scale_x, 0,
-math.sin(angle) * scale_y, math.cos(angle + shear) * scale_y, 0
]
matrix = [m / d for m in matrix]
# Apply inverse of translation and of center translation: RSS^-1 * C^-1 * T^-1
matrix[2] += matrix[0] * (-center[0] - translate[0]) + matrix[1] * (-center[1] - translate[1])
matrix[5] += matrix[3] * (-center[0] - translate[0]) + matrix[4] * (-center[1] - translate[1])
# Apply center translation: C * RSS^-1 * C^-1 * T^-1
matrix[2] += center[0]
matrix[5] += center[1]
return matrix
class RandomJitter(object):
"""
Random change the hue of the image
"""
def __call__(self, sample):
fg, alpha = sample['fg'], sample['alpha']
# if alpha is all 0 skip
if np.all(alpha==0):
return sample
# convert to HSV space, convert to float32 image to keep precision during space conversion.
fg = cv.cvtColor(fg.astype(np.float32)/255.0, cv.COLOR_BGR2HSV)
# Hue noise
hue_jitter = np.random.randint(-40, 40)
fg[:, :, 0] = np.remainder(fg[:, :, 0].astype(np.float32) + hue_jitter, 360)
# Saturation noise
sat_bar = fg[:, :, 1][alpha > 0].mean()
sat_jitter = np.random.rand()*(1.1 - sat_bar)/5 - (1.1 - sat_bar) / 10
sat = fg[:, :, 1]
sat = np.abs(sat + sat_jitter)
sat[sat>1] = 2 - sat[sat>1]
fg[:, :, 1] = sat
# Value noise
val_bar = fg[:, :, 2][alpha > 0].mean()
val_jitter = np.random.rand()*(1.1 - val_bar)/5-(1.1 - val_bar) / 10
val = fg[:, :, 2]
val = np.abs(val + val_jitter)
val[val>1] = 2 - val[val>1]
fg[:, :, 2] = val
# convert back to BGR space
fg = cv.cvtColor(fg, cv.COLOR_HSV2BGR)
sample['fg'] = fg*255
return sample
class RandomHorizontalFlip(object):
"""
Random flip image and label horizontally
"""
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, sample):
fg, alpha = sample['fg'], sample['alpha']
if np.random.uniform(0, 1) < self.prob:
fg = cv.flip(fg, 1)
alpha = cv.flip(alpha, 1)
sample['fg'], sample['alpha'] = fg, alpha
return sample
class RandomCrop(object):
"""
Crop randomly the image in a sample, retain the center 1/4 images, and resize to 'output_size'
:param output_size (tuple or int): Desired output size. If int, square crop
is made.
"""
def __init__(self):
self.logger = logging.getLogger("Logger")
def __call__(self, sample):
crop_size = sample['size']
self.output_size = crop_size
self.margin = self.output_size[0] // 2
fg, alpha, trimap = sample['fg'], sample['alpha'], sample['trimap']
bg = sample['bg']
h, w = trimap.shape
bg = cv.resize(bg, (w, h), interpolation=maybe_random_interp(cv.INTER_CUBIC))
if w < self.output_size[0]+1 or h < self.output_size[1]+1:
ratio = 1.1*self.output_size[0]/h if h < w else 1.1*self.output_size[1]/w
# self.logger.warning("Size of {} is {}.".format(name, (h, w)))
while h < self.output_size[0]+1 or w < self.output_size[1]+1:
fg = cv.resize(fg, (int(w*ratio), int(h*ratio)), interpolation=maybe_random_interp(cv.INTER_NEAREST))
alpha = cv.resize(alpha, (int(w*ratio), int(h*ratio)),
interpolation=maybe_random_interp(cv.INTER_NEAREST))
trimap = cv.resize(trimap, (int(w*ratio), int(h*ratio)), interpolation=cv.INTER_NEAREST)
bg = cv.resize(bg, (int(w*ratio), int(h*ratio)), interpolation=maybe_random_interp(cv.INTER_CUBIC))
h, w = trimap.shape
small_trimap = cv.resize(trimap, (w//4, h//4), interpolation=cv.INTER_NEAREST)
unknown_list = list(zip(*np.where(small_trimap[self.margin//4:(h-self.margin)//4,
self.margin//4:(w-self.margin)//4] == 128)))
unknown_num = len(unknown_list)
if len(unknown_list) < 10:
# self.logger.warning("{} does not have enough unknown area for crop.".format(name))
left_top = (np.random.randint(0, h-self.output_size[0]+1), np.random.randint(0, w-self.output_size[1]+1))
else:
idx = np.random.randint(unknown_num)
left_top = (unknown_list[idx][0]*4, unknown_list[idx][1]*4)
fg_crop = fg[left_top[0]:left_top[0]+self.output_size[0], left_top[1]:left_top[1]+self.output_size[1],:]
alpha_crop = alpha[left_top[0]:left_top[0]+self.output_size[0], left_top[1]:left_top[1]+self.output_size[1]]
bg_crop = bg[left_top[0]:left_top[0]+self.output_size[0], left_top[1]:left_top[1]+self.output_size[1],:]
trimap_crop = trimap[left_top[0]:left_top[0]+self.output_size[0], left_top[1]:left_top[1]+self.output_size[1]]
if len(np.where(trimap==128)[0]) == 0:
self.logger.error("Does not have enough unknown area for crop. Resized to target size."
"left_top: {}".format(left_top))
fg_crop = cv.resize(fg, self.output_size[::-1], interpolation=maybe_random_interp(cv.INTER_NEAREST))
alpha_crop = cv.resize(alpha, self.output_size[::-1], interpolation=maybe_random_interp(cv.INTER_NEAREST))
trimap_crop = cv.resize(trimap, self.output_size[::-1], interpolation=cv.INTER_NEAREST)
bg_crop = cv.resize(bg, self.output_size[::-1], interpolation=maybe_random_interp(cv.INTER_CUBIC))
# cv.imwrite('../tmp/tmp.jpg', fg.astype(np.uint8))
# cv.imwrite('../tmp/tmp.png', (alpha*255).astype(np.uint8))
# cv.imwrite('../tmp/tmp2.png', trimap.astype(np.uint8))
# raise ValueError("{} does not have enough unknown area for crop.".format(name))
sample['fg'], sample['alpha'], sample['trimap'] = fg_crop.copy(), alpha_crop.copy(), trimap_crop.copy()
sample['bg'] = bg_crop.copy()
return sample
class Rescale(object):
"""
Rescale the image in a sample to a given size.
:param output_size (tuple or int): Desired output size. If tuple, output is
matched to output_size. If int, smaller of image edges is matched
to output_size keeping aspect ratio the same.
"""
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
self.output_size = output_size
def __call__(self, sample):
image, alpha, trimap = sample['image'], sample['alpha'], sample['trimap']
h, w = image.shape[:2]
if isinstance(self.output_size, int):
if h > w:
new_h, new_w = self.output_size * h / w, self.output_size
else:
new_h, new_w = self.output_size, self.output_size * w / h
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
image = cv.resize(image, (new_w, new_h), interpolation=cv.INTER_LINEAR)
trimap = cv.resize(trimap, (new_w, new_h), interpolation=cv.INTER_NEAREST)
alpha = cv.resize(alpha, (new_w, new_h), interpolation=cv.INTER_LINEAR)
sample['image'], sample['alpha'], sample['trimap'] = image, alpha, trimap
return sample
class OriginScale(object):
def __call__(self, sample):
h, w = sample["alpha_shape"]
# sample['origin_trimap'] = sample['trimap']
# # if h % 32 == 0 and w % 32 == 0:
# # return sample
# # target_h = h - h % 32
# # target_w = w - w % 32
# target_h = 32 * ((h - 1) // 32 + 1)
# target_w = 32 * ((w - 1) // 32 + 1)
# sample['image'] = cv.resize(sample['image'], (target_w, target_h), interpolation=cv.INTER_CUBIC)
# sample['trimap'] = cv.resize(sample['trimap'], (target_w, target_h), interpolation=cv.INTER_NEAREST)
if h % 32 == 0 and w % 32 == 0:
return sample
target_h = 32 * ((h - 1) // 32 + 1)
target_w = 32 * ((w - 1) // 32 + 1)
pad_h = target_h - h
pad_w = target_w - w
padded_image = np.pad(sample['image'], ((0,pad_h), (0, pad_w), (0,0)), mode="reflect")
padded_trimap = np.pad(sample['trimap'], ((0,pad_h), (0, pad_w)), mode="reflect")
sample['image'] = padded_image
sample['trimap'] = padded_trimap
return sample
class GenTrimap(object):
def __init__(self):
self.erosion_kernels = [None] + [cv.getStructuringElement(cv.MORPH_ELLIPSE, (size, size)) for size in range(1,30)]
def __call__(self, sample):
alpha = sample['alpha']
# Adobe 1K
fg_width = np.random.randint(1, 30)
bg_width = np.random.randint(1, 30)
fg_mask = (alpha + 1e-5).astype(np.int).astype(np.uint8)
bg_mask = (1 - alpha + 1e-5).astype(np.int).astype(np.uint8)
fg_mask = cv.erode(fg_mask, self.erosion_kernels[fg_width])
bg_mask = cv.erode(bg_mask, self.erosion_kernels[bg_width])
trimap = np.ones_like(alpha) * 128
trimap[fg_mask == 1] = 255
trimap[bg_mask == 1] = 0
sample['trimap'] = trimap
return sample
class Composite(object):
def __call__(self, sample):
fg, bg, alpha = sample['fg'], sample['bg'], sample['alpha']
alpha[alpha < 0 ] = 0
alpha[alpha > 1] = 1
fg[fg < 0 ] = 0
fg[fg > 255] = 255
bg[bg < 0 ] = 0
bg[bg > 255] = 255
image = fg * alpha[:, :, None] + bg * (1 - alpha[:, :, None])
sample['image'] = image
return sample
class ToTensor(object):
"""
Convert ndarrays in sample to Tensors with normalization.
"""
def __init__(self, split='train'):
self.split = split
def __call__(self, sample):
image = sample['image']
image = transforms.ToTensor()(image)
image = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(image)
sample['image'] = image
return sample
class DIMDataset(Dataset):
def __init__(self, split="train", test_scale="resize"):
self.split = split
if args.data_augumentation:
train_trans = [
RandomAffine(degrees=30, scale=[0.8, 1.25], shear=10, flip=0.5),
GenTrimap(),
RandomCrop(),
RandomJitter(),
Composite(),
ToTensor(split="train"),]
else:
train_trans = [ GenTrimap(),
RandomCrop(),
Composite(),
ToTensor(split="train") ]
if test_scale.lower() == "origin":
test_trans = [ OriginScale(), ToTensor() ]
elif test_scale.lower() == "resize":
test_trans = [ Rescale((320,320)), ToTensor() ]
elif test_scale.lower() == "crop":
test_trans = [ RandomCrop(), ToTensor() ]
else:
raise NotImplementedError("test_scale {} not implemented".format(test_scale))
self.transform = {
'train':
transforms.Compose(train_trans),
'valid':
transforms.Compose( [
GenTrimap(),
RandomCrop(),
Composite(),
ToTensor(split="train"),]),
'test':
transforms.Compose(test_trans)
}[split]
self.erosion_kernels = [None] + [cv.getStructuringElement(cv.MORPH_ELLIPSE, (size, size)) for size in range(1,20)]
self.split = split
names_train, names_valid = split_name()
if self.split == "train":
self.fgs = names_train
else:
self.fgs = names_valid
self.fg_num_unique = len(self.fgs)
self.fgs = np.repeat(self.fgs, args.batch_size * 8)
print(len(self.fgs))
self.fg_num = len(self.fgs)
self.current_index = -1
self.current_fg = None
self.current_alpha = None
self.is_resize = False
def __getitem__(self, i):
fcount = self.fgs[i]
if i % args.batch_size == 0:
self.current_index = fcount
alpha = get_raw("a", fcount)
alpha = np.reshape(alpha, (alpha.shape[0], alpha.shape[1])).astype(np.float32) / 255.
fg = get_raw("fg", fcount)
if args.data_augumentation:
fg, alpha = self._composite_fg(fg, alpha, i)
self.current_fg = fg
self.current_alpha = alpha
different_sizes = [(320, 320), (480, 480), (640, 640), (512, 512)]
crop_size = random.choice(different_sizes)
self.crop_size = crop_size
# self.is_resize = True if np.random.rand() < 0.25 else False
else:
fg = self.current_fg
alpha = self.current_alpha
crop_size = self.crop_size
bcount =
|
np.random.randint(num_bgs)
|
numpy.random.randint
|
from typing import List
import cv2
import numpy as np
from .textblock import TextBlock
from .utils.imgproc_utils import expand_textwindow, union_area
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
LANG_ENG = 0
LANG_JPN = 1
REFINEMASK_INPAINT = 0
REFINEMASK_ANNOTATION = 1
def get_topk_color(color_list, bins, k=3, color_var=10, bin_tol=0.001):
idx = np.argsort(bins * -1)
color_list, bins = color_list[idx], bins[idx]
top_colors = [color_list[0]]
bin_tol = np.sum(bins) * bin_tol
if len(color_list) > 1:
for color, bin in zip(color_list[1:], bins[1:]):
if np.abs(np.array(top_colors) - color).min() > color_var:
top_colors.append(color)
if len(top_colors) >= k or bin < bin_tol:
break
return top_colors
def minxor_thresh(threshed, mask, dilate=False):
neg_threshed = 255 - threshed
e_size = 1
if dilate:
element = cv2.getStructuringElement(cv2.MORPH_RECT, (2 * e_size + 1, 2 * e_size + 1), (e_size, e_size))
neg_threshed = cv2.dilate(neg_threshed, element, iterations=1)
threshed = cv2.dilate(threshed, element, iterations=1)
neg_xor_sum = cv2.bitwise_xor(neg_threshed, mask).sum()
xor_sum = cv2.bitwise_xor(threshed, mask).sum()
if neg_xor_sum < xor_sum:
return neg_threshed, neg_xor_sum
else:
return threshed, xor_sum
def get_otsuthresh_masklist(img, pred_mask, per_channel=False) -> List[np.ndarray]:
channels = [img[..., 0], img[..., 1], img[..., 2]]
mask_list = []
for c in channels:
_, threshed = cv2.threshold(c, 1, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY)
threshed, xor_sum = minxor_thresh(threshed, pred_mask, dilate=False)
mask_list.append([threshed, xor_sum])
mask_list.sort(key=lambda x: x[1])
if per_channel:
return mask_list
else:
return [mask_list[0]]
def get_topk_masklist(im_grey, pred_mask):
if len(im_grey.shape) == 3 and im_grey.shape[-1] == 3:
im_grey = cv2.cvtColor(im_grey, cv2.COLOR_BGR2GRAY)
msk = np.ascontiguousarray(pred_mask)
candidate_grey_px = im_grey[np.where(cv2.erode(msk, np.ones((3, 3), np.uint8), iterations=1) > 127)]
bin, his = np.histogram(candidate_grey_px, bins=255)
topk_color = get_topk_color(his, bin, color_var=10, k=3)
color_range = 30
mask_list = list()
for ii, color in enumerate(topk_color):
c_top = min(color + color_range, 255)
c_bottom = c_top - 2 * color_range
threshed = cv2.inRange(im_grey, c_bottom, c_top)
threshed, xor_sum = minxor_thresh(threshed, msk)
mask_list.append([threshed, xor_sum])
return mask_list
def merge_mask_list(mask_list, pred_mask, blk: TextBlock = None, pred_thresh=30, text_window=None,
filter_with_lines=False, refine_mode=REFINEMASK_INPAINT):
mask_list.sort(key=lambda x: x[1])
linemask = None
if blk is not None and filter_with_lines:
linemask = np.zeros_like(pred_mask)
lines = blk.lines_array(dtype=np.int64)
for line in lines:
line[..., 0] -= text_window[0]
line[..., 1] -= text_window[1]
cv2.fillPoly(linemask, [line], 255)
linemask = cv2.dilate(linemask, np.ones((3, 3), np.uint8), iterations=3)
if pred_thresh > 0:
e_size = 1
element = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2 * e_size + 1, 2 * e_size + 1), (e_size, e_size))
pred_mask = cv2.erode(pred_mask, element, iterations=1)
_, pred_mask = cv2.threshold(pred_mask, 60, 255, cv2.THRESH_BINARY)
connectivity = 8
mask_merged = np.zeros_like(pred_mask)
for ii, (candidate_mask, xor_sum) in enumerate(mask_list):
num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(candidate_mask, connectivity,
cv2.CV_16U)
for label_index, stat, centroid in zip(range(num_labels), stats, centroids):
if label_index != 0: # skip background label
x, y, w, h, area = stat
if w * h < 3:
continue
x1, y1, x2, y2 = x, y, x + w, y + h
label_local = labels[y1: y2, x1: x2]
label_cordinates = np.where(label_local == label_index)
tmp_merged =
|
np.zeros_like(label_local, np.uint8)
|
numpy.zeros_like
|
""" Code to optimize the BLOSUM-like scoring matrix """
# Imports
import numpy as np
import pandas as pd
# Constants
# The list of amino acids
ALPHABET = [
'A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L',
'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V', 'B', 'Z',
'X', '*']
WEIGHTS = 'top_k' # Weigh examples with a top_k, triangle, or flat dist
TOP_K = 10 # Number of elements to take for top_k
# Functions
def score_matrix_objective(positive_scores, negative_scores):
""" Objective function for the optimizer
:param positive_scores:
The scores for positive examples
:param negative_scores:
The scores for negative examples
:returns:
The sum of true-positive rates for a set of false-positive levels in
the range [0.0, 4.0]
"""
positive_scores = np.array(positive_scores)
negative_scores = np.array(negative_scores)
assert positive_scores.ndim == 1
assert negative_scores.ndim == 1
cutoffs = np.percentile(negative_scores, [100, 90, 80, 70])
positive_len = positive_scores.shape[0]
score = 0.0
for cutoff in cutoffs:
score += np.sum(positive_scores >= cutoff) / positive_len
return score
def calc_distribution(alignments, weights=None):
""" Calculate an empirical distribution of aligned bases
:param alignments:
A list of (seq1, seq2) pairs where seq1 and seq2 were aligned with
the ``smith_waterman()`` function
:returns:
The log2 odds of those sequence counts occuring in the set of
alignments
"""
if weights is None:
weights = np.ones((len(alignments), ))
assert len(alignments) == len(weights)
# Counter with a pseudo-count for every pairing
# This prevents taking log(a number < 1)
counts = np.ones((len(ALPHABET), len(ALPHABET)))
counts = pd.DataFrame(counts, columns=ALPHABET, index=ALPHABET)
# Accumulate counts for every paired base in the empirical alignments
for (seq1, seq2), weight in zip(alignments, weights):
# Ignore anything with a weight that doesn't matter
if weight < 1e-2:
continue
assert len(seq1) == len(seq2)
seq1 = seq1.replace('-', '*') # BLOSUM uses * not -
seq2 = seq2.replace('-', '*')
for a1, a2 in zip(seq1, seq2):
# TODO: This double counts the diagonals
# ...not sure if that's a good thing or a bad thing
# TODO: Should we normalize by sequence length?
counts.loc[a1, a2] += weight
counts.loc[a2, a1] += weight
return np.log2(counts)
def triangle_weight(pos_scores, neg_scores):
""" Trangle weighting of positive and negative scores
:param pos_scores:
The array of scores for positive examples
:param neg_scores:
The array of scores for negative examples
:returns:
A tuple of pos_weights, neg_weights
With 1 indicating weigh this alignment most, 0 ignore this alignment
"""
# We actually don't care about anything other than the negative
# scores that are larger than the smallest positive score and
# vice versa.
# Hence weight the updates to emphasize alignments close to the
# boundary.
min_pos_score = np.min(pos_scores)
max_pos_score = np.max(pos_scores)
max_neg_score = np.max(neg_scores)
min_neg_score = np.min(neg_scores)
# Work out which scores are inliers and which are outliers
score_std = np.std(np.concatenate([pos_scores, neg_scores]))
# Make a triangle function that weights positive scores high that are near
# or less than the descision boundary
pos_mask = pos_scores >= max_neg_score
pos_weights =
|
np.ones_like(pos_scores, dtype=np.float)
|
numpy.ones_like
|
import cv2
import numpy as np
import json
import os
import matplotlib.pyplot as plt
import open3d as o3d
import matplotlib.image as mpimg
##########################################################################################
def ResizeToMaxSize(Im,MaxSize):
h=Im.shape[0]
w=Im.shape[1]
r=np.min([MaxSize/h,MaxSize/w])
if r<1:
Im=cv2.resize(Im,(int(r*w),int(r*h)))
return Im
##########################################################################################
def ResizeToScreen(Im):
h=Im.shape[0]
w=Im.shape[1]
r=np.min([1000/h,1800/w])
Im=cv2.resize(Im,(int(r*w),int(r*h)))
return Im
########################################################################################
def showcv2(Im,txt=""):
cv2.destroyAllWindows()
# print("IM text")
# print(txt)
cv2.imshow(txt,ResizeToScreen(Im.astype(np.uint8)))
# cv2.moveWindow(txt, 1, 1);
ch=cv2.waitKey()
cv2.destroyAllWindows()
# cv2.destroyAllWindows()
return ch
########################################################################################
def show(Im,txt=""):
figManager = plt.get_current_fig_manager()
figManager.window.showMaximized()
if np.ndim(Im)==3:
plt.imshow(Im[:, :, ::-1].astype(np.uint8))
else:
plt.imshow(Im.astype(np.uint8))
plt.title(txt)
plt.show()
########################################################################################
def trshow(Im,txt=""):
figManager = plt.get_current_fig_manager()
figManager.window.showMaximized()
plt.imshow((Im.data.cpu().numpy()).astype(np.uint8))
plt.title(txt)
plt.show()
#############################################################################3
def GreyScaleToRGB(Img):
I=np.expand_dims(Img,2)
rgb=
|
np.concatenate([I,I,I],axis=2)
|
numpy.concatenate
|
import imgaug
import torch
import numpy as np
from mmcv.parallel import DataContainer as DC
from musket_core.utils import save, load
import os
from musket_core.datasets import PredictionBlend, PredictionItem, DataSet, CompressibleWriteableDS
from musket_core.losses import SMOOTH
from mmdet.apis import show_result
from mmdet.datasets.custom import CustomDataset
from mmdet.datasets.utils import to_tensor, random_scale
from mmdet.core.post_processing.merge_augs import merge_aug_bboxes, merge_aug_masks
from typing import Callable
import networkx as nx
import imageio
from mmdetection_pipeline.callbacks import imdraw_det_bboxes
# import time
class MMdetWritableDS(CompressibleWriteableDS):
def __init__(self,orig,name,dsPath, withMasks, threshold=0.5, count = 0,asUints=True,scale=255):
super().__init__(orig,name,dsPath, count,False,scale)
self.withMasks = withMasks
self.threshold = threshold
# def __getitem__(self, item):
# res = super().__getitem__(item)
# if isinstance(item, slice):
# for pi in res:
# self.processPredictionItem(pi)
# else:
# self.processPredictionItem(res)
# return res
#
# def processPredictionItem(self, pi):
# pred = pi.prediction
# tresholdedPrediction = applyTresholdToPrediction(pred,self.withMasks,self.threshold)
# pi.prediction = tresholdedPrediction
def saveItem(self, path:str, item):
# t0 = time.time()
wm = self.withMasks
dire = os.path.dirname(path)
if not os.path.exists(dire):
os.mkdir(dire)
labels = item[0]
probabilities = item[1]
bboxes = item[2]
# t1 = time.time()
if wm:
masks = item[3]
if self.asUints:
if self.scale <= 255:
masks = (masks * self.scale).astype(np.uint8)
else:
masks = (masks * self.scale).astype(np.uint16)
# t2 = time.time()
np.savez_compressed(file=path, labels=labels, probabilities=probabilities, bboxes=bboxes, masks=masks)
# t3 = time.time()
# print(f"MMdetWritableDS.saveItem prepare: {t1 - t0}")
# print(f"MMdetWritableDS.saveItem convert masks: {t2 - t1}")
# print(f"MMdetWritableDS.saveItem save zip: {t3 - t2}")
else:
np.savez_compressed(file=path, labels=labels, probabilities=probabilities, bboxes=bboxes)
def loadItem(self, path:str):
# t0 = time.time()
npzFile = np.load(path,allow_pickle=True)
# t1 = time.time()
labels = npzFile['labels']
# t2 = time.time()
probabilities = npzFile['probabilities']
# t3 = time.time()
bboxes = npzFile['bboxes']
# t4 = time.time()
if self.withMasks:
masks = npzFile['masks']
if self.asUints:
masks=masks.astype(np.float32)/self.scale
# t5 = time.time()
# print(f"MMdetWritableDS.loadItem load file: {t1 - t0}")
# print(f"MMdetWritableDS.loadItem get labels: {t2 - t1}")
# print(f"MMdetWritableDS.loadItem get probabilities: {t3 - t2}")
# print(f"MMdetWritableDS.loadItem get bboxes: {t4 - t3}")
# print(f"MMdetWritableDS.loadItem get masks: {t5 - t4}")
return (labels, probabilities, bboxes, masks)
else:
return (labels, probabilities, bboxes)
class MusketPredictionItemWrapper(object):
def __init__(self, ind: int, ds: DataSet):
self.ind = ind
self.ds = ds
self.callbacks: [Callable[[PredictionItem], None]] = []
def getPredictionItem(self) -> PredictionItem:
predictionItem = self.ds[self.ind]
for x in self.callbacks:
x(predictionItem)
return predictionItem
def addCallback(self, cb: Callable[[PredictionItem], None]):
self.callbacks.append(cb)
class MusketInfo(object):
def __init__(self, predictionItemWrapper: MusketPredictionItemWrapper):
self.initialized = False
self.predictionItemWrapper = predictionItemWrapper
self.predictionItemWrapper.addCallback(self.initializer)
def checkInit(self):
if not self.initialized:
self.getPredictionItem()
def getPredictionItem(self) -> PredictionItem:
result = self.predictionItemWrapper.getPredictionItem()
return result
def initializer(self, pi: PredictionItem):
self._initializer(pi)
self.initialized = True
def _initializer(self, pi: PredictionItem):
raise ValueError("Not implemented")
def dispose(self):
self._free()
self.initialized = False
def _free(self):
raise ValueError("Not implemented")
class MusketImageInfo(MusketInfo):
def __init__(self, piw: MusketPredictionItemWrapper):
super().__init__(piw)
self.ann = MusketAnnotationInfo(piw)
self.img = None
self.id = None
def image(self) -> np.ndarray:
pi = self.getPredictionItem()
self.img = pi.x
self.id = pi.id
return self.img
def __getitem__(self, key):
if key == "height":
self.checkInit()
return self.height
elif key == "width":
self.checkInit()
return self.width
elif key == "ann":
return self.ann
elif key == "file_name" or key == "id":
return self.id
elif key == 'scale_factor':
return 1.0
elif key == 'flip':
return False
elif key == 'img_shape':
return (self.height, self.width)
return None
def _initializer(self, pi: PredictionItem):
img = pi.x
self.width = img.shape[1]
self.height = img.shape[0]
def _free(self):
self.img = None
self.ann._free()
class MusketAnnotationInfo(MusketInfo):
def _initializer(self, pi: PredictionItem):
y = pi.y
if y is not None:
self.labels = y[0]
self.bboxes = y[1]
self.masks = y[2] if len(y) > 2 else None
self.bboxes_ignore = np.zeros(shape=(0, 4), dtype=np.float32)
self.labels_ignore = np.zeros((0), dtype=np.int64)
def __getitem__(self, key):
if key == "bboxes":
self.checkInit()
return self.bboxes
elif key == "labels":
self.checkInit()
return self.labels
elif key == "bboxes_ignore":
self.checkInit()
return self.bboxes_ignore
elif key == 'labels_ignore':
self.checkInit()
return self.labels_ignore
elif key == "masks":
self.checkInit()
return self.masks
return None
def _free(self):
self.masks = None
class DataSetAdapter(CustomDataset):
def __init__(self, ds: DataSet, aug=None, transforms=None, **kwargs):
self.ds = ds
self.aug = aug
self.transforms = transforms
args = kwargs.copy()
if 'type' in args:
args.pop('type')
self.type = 'VOCDataset'
self.img_infos = []
super().__init__(**args)
self.with_crowd = True
def __len__(self):
return len(self.ds)
def augmentor(self, isTrain) -> imgaug.augmenters.Augmenter:
allAug = []
if isTrain:
allAug = allAug + self.aug
allAug = allAug + self.transforms
aug = imgaug.augmenters.Sequential(allAug)
return aug
def _set_group_flag(self):
self.flag = np.zeros(len(self), dtype=np.uint8)
def load_annotations(self, ann_file):
img_infos = []
for idx in range(len(self.ds)):
piw = MusketPredictionItemWrapper(idx, self.ds)
img_info = MusketImageInfo(piw)
img_infos.append(img_info)
return img_infos
def _filter_imgs(self, min_size=32):
print("filter_images")
return list(range(len(self)))
def prepare_train_img(self, idx):
try:
img_info = self.img_infos[idx]
# load image
img = img_info.image() # mmcv.imread(osp.join(self.img_prefix, img_info['filename']))
# load proposals if necessary
if self.proposals is not None:
proposals = self.proposals[idx][:self.num_max_proposals]
# TODO: Handle empty proposals properly. Currently images with
# no proposals are just ignored, but they can be used for
# training in concept.
if len(proposals) == 0:
return None
if not (proposals.shape[1] == 4 or proposals.shape[1] == 5):
raise AssertionError(
'proposals should have shapes (n, 4) or (n, 5), '
'but found {}'.format(proposals.shape))
if proposals.shape[1] == 5:
scores = proposals[:, 4, None]
proposals = proposals[:, :4]
else:
scores = None
ann = self.get_ann_info(idx)
gt_bboxes = ann['bboxes']
gt_labels = ann['labels']
gt_masks = None
gt_bboxes_ignore = None
if self.with_mask:
gt_masks = ann['masks']
if self.with_crowd:
gt_bboxes_ignore = ann['bboxes_ignore']
# dumpData(f"d:/ttt/{img_info.id}_tmp_bbox.jpg", f"d:/ttt/{img_info.id}_tmp_mask.jpg", img, gt_labels-1,
# gt_bboxes, gt_masks, self.CLASSES)
img, gt_bboxes, gt_masks, gt_bboxes_ignore = self.applyAugmentations(img, gt_bboxes, gt_masks,
gt_bboxes_ignore, True)
# dumpData(f"d:/ttt/{img_info.id}_tmp_bbox_aug.jpg", f"d:/ttt/{img_info.id}_tmp_mask_aug.jpg", img, gt_labels-1,
# gt_bboxes, gt_masks, self.CLASSES)
# skip the image if there is no valid gt bbox
if len(gt_bboxes) == 0:
return None
# extra augmentation
if self.extra_aug is not None:
# img = self.extra_aug(img)
img, gt_bboxes, gt_labels = self.extra_aug(img, gt_bboxes,
gt_labels)
# apply transforms
flip = True if np.random.rand() < self.flip_ratio else False
# randomly sample a scale
img_scale = random_scale(self.img_scales, self.multiscale_mode)
img, img_shape, pad_shape, scale_factor = self.img_transform(
img, img_scale, flip, keep_ratio=self.resize_keep_ratio)
img = img.copy()
if self.with_seg:
# gt_seg = mmcv.imread(
# osp.join(self.seg_prefix, img_info['file_name'].replace(
# 'jpg', 'png')),
# flag='unchanged')
# gt_seg = self.seg_transform(gt_seg.squeeze(), img_scale, flip)
# gt_seg = mmcv.imrescale(
# gt_seg, self.seg_scale_factor, interpolation='nearest')
# gt_seg = gt_seg[None, ...]
pass
if self.proposals is not None:
proposals = self.bbox_transform(proposals, img_shape, scale_factor,
flip)
proposals = np.hstack(
[proposals, scores]) if scores is not None else proposals
gt_bboxes = self.bbox_transform(gt_bboxes, img_shape, scale_factor,
flip)
if self.with_crowd:
gt_bboxes_ignore = self.bbox_transform(gt_bboxes_ignore, img_shape,
scale_factor, flip)
if self.with_mask:
gt_masks = self.mask_transform(gt_masks, pad_shape,
scale_factor, flip)
ori_shape = (img_info['height'], img_info['width'], 3)
img_meta = dict(
id=img_info['id'],
ori_shape=ori_shape,
img_shape=img_shape,
pad_shape=pad_shape,
scale_factor=scale_factor,
flip=flip)
# imgt = img.transpose(1, 2, 0)
# imgt -= np.min(imgt)
# imgt *= (255 / np.max(imgt))
# imgt = imgt.astype(np.uint8)
# dumpData(f"d:/ttt/{img_info.id}_tmp_bbox_aug1.jpg", f"d:/ttt/{img_info.id}_tmp_mask_aug1.jpg", imgt,
# gt_labels - 1,
# gt_bboxes, gt_masks, self.CLASSES)
data = dict(
img=DC(to_tensor(img), stack=True),
img_meta=DC(img_meta, cpu_only=True),
gt_bboxes=DC(to_tensor(gt_bboxes)))
if self.proposals is not None:
data['proposals'] = DC(to_tensor(proposals))
if self.with_label:
data['gt_labels'] = DC(to_tensor(gt_labels))
if self.with_crowd:
data['gt_bboxes_ignore'] = DC(to_tensor(gt_bboxes_ignore))
if self.with_mask:
data['gt_masks'] = DC(gt_masks, cpu_only=True)
# if self.with_seg:
# data['gt_semantic_seg'] = DC(to_tensor(gt_seg), stack=True)
return data
finally:
img_info.dispose()
def applyAugmentations(self, img, gt_bboxes, gt_masks, gt_bboxes_ignore, isTrain):
bboxesDType = gt_bboxes.dtype
masksDType = gt_masks.dtype
bbox_split = len(gt_bboxes)
all_bboxes = np.concatenate((gt_bboxes, gt_bboxes_ignore), axis=0)
imgaugBBoxes = [imgaug.BoundingBox(x[0], x[1], x[2], x[3]) for x in all_bboxes]
imgaugBBoxesOnImage = imgaug.BoundingBoxesOnImage(imgaugBBoxes, img.shape)
imgaugSegmentationMapsOnImage = imgaug.SegmentationMapsOnImage(gt_masks.transpose(1, 2, 0),
tuple(gt_masks.shape[1:]))
batch = imgaug.Batch(images=[img], segmentation_maps=imgaugSegmentationMapsOnImage,
bounding_boxes=imgaugBBoxesOnImage)
aug = self.augmentor(isTrain)
augmentedBatch = aug.augment_batch(batch)
img_aug = augmentedBatch.images_aug[0]
all_bboxes_aug = [np.array([bbox.x1, bbox.y1, bbox.x2, bbox.y2], dtype=bboxesDType) for bbox in
augmentedBatch.bounding_boxes_aug.bounding_boxes]
all_bboxes_aug = np.array(all_bboxes_aug, dtype=bboxesDType)
gt_bboxes_aug = all_bboxes_aug[:bbox_split]
gt_bboxes_ignore_aug = all_bboxes_aug[bbox_split:]
masks_aug = augmentedBatch.segmentation_maps_aug.arr.transpose(2, 0, 1).astype(masksDType)
return img_aug, gt_bboxes_aug, masks_aug, gt_bboxes_ignore_aug
def augmentBoundingBoxes(self, aug, gt_bboxes, img):
imgaugBBoxes = [imgaug.BoundingBox(x[0], x[1], x[2], x[3]) for x in gt_bboxes]
imgaugBBoxesOnImage = imgaug.BoundingBoxesOnImage(imgaugBBoxes, img.shape)
imgaugBBoxesOnImageAug = aug.augment_bounding_boxes(imgaugBBoxesOnImage)
dtype = gt_bboxes.dtype
shape = gt_bboxes.shape
gt_bboxes = [np.array([bbox.x1, bbox.y1, bbox.x2, bbox.y2], dtype=dtype) for bbox in
imgaugBBoxesOnImageAug.bounding_boxes]
gt_bboxes = np.array(gt_bboxes, dtype=dtype).reshape(shape)
return gt_bboxes
def prepare_test_img(self, idx):
"""Prepare an image for testing (multi-scale and flipping)"""
try:
img_info = self.img_infos[idx]
img = img_info.image() # mmcv.imread(osp.join(self.img_prefix, img_info['filename']))
if self.proposals is not None:
proposal = self.proposals[idx][:self.num_max_proposals]
if not (proposal.shape[1] == 4 or proposal.shape[1] == 5):
raise AssertionError(
'proposals should have shapes (n, 4) or (n, 5), '
'but found {}'.format(proposal.shape))
else:
proposal = None
def prepare_single(img, scale, flip, proposal=None):
_img, img_shape, pad_shape, scale_factor = self.img_transform(
img, scale, flip, keep_ratio=self.resize_keep_ratio)
_img = to_tensor(_img)
_img_meta = dict(
ori_shape=(img_info['height'], img_info['width'], 3),
img_shape=img_shape,
pad_shape=pad_shape,
scale_factor=scale_factor,
flip=flip)
if proposal is not None:
if proposal.shape[1] == 5:
score = proposal[:, 4, None]
proposal = proposal[:, :4]
else:
score = None
_proposal = self.bbox_transform(proposal, img_shape,
scale_factor, flip)
_proposal = np.hstack(
[_proposal, score]) if score is not None else _proposal
_proposal = to_tensor(_proposal)
else:
_proposal = None
return _img, _img_meta, _proposal
imgs = []
img_metas = []
proposals = []
for scale in self.img_scales:
_img, _img_meta, _proposal = prepare_single(
img, scale, False, proposal)
imgs.append(_img)
img_metas.append(DC(_img_meta, cpu_only=True))
proposals.append(_proposal)
if self.flip_ratio > 0:
_img, _img_meta, _proposal = prepare_single(
img, scale, True, proposal)
imgs.append(_img)
img_metas.append(DC(_img_meta, cpu_only=True))
proposals.append(_proposal)
data = dict(img=imgs, img_meta=img_metas)
if self.proposals is not None:
data['proposals'] = proposals
return data
finally:
img_info.dispose()
def show(self, img, result):
show_result(img, result, self.CLASSES)
class InstanceSegmentationPredictionBlend(PredictionBlend):
def blend_predictions(self, item):
LABEL_INDEX = 0
CONF_INDEX = 1
BBOX_INDEX = 2
MASK_INDEX = 3
byClazz = {}
items = [ds[item] for ds in self.predictions]
for dsInd in range(len(items)):
pi = items[dsInd]
pred = pi.prediction
labels = pred[LABEL_INDEX]
masks = pred[MASK_INDEX]
for i in range(len(labels)):
label = labels[i]
mask = masks[i]
if np.max(mask) == 0:
continue
key = str(label)
if not key in byClazz:
arr = []
byClazz[key] = arr
else:
arr = byClazz[key]
arr.append((dsInd,i,label))
resultLabels = []
resultConfidences = []
resultBBoxes = []
resultMasks = []
for key in byClazz:
arr = byClazz[key]
l = len(arr)
iouMatrix = np.eye(l,dtype=np.float)
for i in range(l):
for j in range(i+1,l):
dsInd_i = arr[i][0]
objInd_i = arr[i][1]
dsInd_j = arr[j][0]
objInd_j = arr[j][1]
# mask_i = items[dsInd_i].prediction[MASK_INDEX][objInd_i]
# mask_j = items[dsInd_j].prediction[MASK_INDEX][objInd_j]
bbox_i = items[dsInd_i].prediction[BBOX_INDEX][objInd_i]
bbox_j = items[dsInd_j].prediction[BBOX_INDEX][objInd_j]
intersection = np.zeros(bbox_i.shape,dtype = bbox_i.dtype)
intersection[:2] = np.maximum(bbox_i[:2], bbox_j[:2])
intersection[2:] =
|
np.minimum(bbox_i[2:], bbox_j[2:])
|
numpy.minimum
|
'''
trimesh.util: utility functions
Only imports from numpy and the standard library are allowed in this file.
'''
import numpy as np
import logging
import hashlib
import base64
from collections import defaultdict, deque
from sys import version_info
if version_info.major >= 3:
basestring = str
log = logging.getLogger('trimesh')
log.addHandler(logging.NullHandler())
# included here so util has only standard library imports
_TOL_ZERO = 1e-12
def unitize(points, check_valid=False):
'''
Turn a list of vectors into a list of unit vectors.
Arguments
---------
points: (n,m) or (j) input array of vectors.
For 1D arrays, points is treated as a single vector
For 2D arrays, each row is treated as a vector
check_valid: boolean, if True enables valid output and checking
Returns
---------
unit_vectors: (n,m) or (j) length array of unit vectors
valid: (n) boolean array, output only if check_valid.
True for all valid (nonzero length) vectors, thus m=sum(valid)
'''
points = np.asanyarray(points)
axis = len(points.shape) - 1
length = np.sum(points ** 2, axis=axis) ** .5
if check_valid:
valid = np.greater(length, _TOL_ZERO)
if axis == 1:
unit_vectors = (points[valid].T / length[valid]).T
elif len(points.shape) == 1 and valid:
unit_vectors = points / length
else:
unit_vectors = np.array([])
return unit_vectors, valid
else:
unit_vectors = (points.T / length).T
return unit_vectors
def transformation_2D(offset=[0.0, 0.0], theta=0.0):
'''
2D homogeonous transformation matrix
'''
T = np.eye(3)
s = np.sin(theta)
c = np.cos(theta)
T[0, 0:2] = [c, s]
T[1, 0:2] = [-s, c]
T[0:2, 2] = offset
return T
def euclidean(a, b):
'''
Euclidean distance between vectors a and b
'''
return np.sum((np.array(a) - b) ** 2) ** .5
def is_file(obj):
return hasattr(obj, 'read')
def is_string(obj):
return isinstance(obj, basestring)
def is_dict(obj):
return isinstance(obj, dict)
def is_sequence(obj):
'''
Returns True if obj is a sequence.
'''
seq = (not hasattr(obj, "strip") and
hasattr(obj, "__getitem__") or
hasattr(obj, "__iter__"))
seq = seq and not isinstance(obj, dict)
# numpy sometimes returns objects that are single float64 values
# but sure look like sequences, so we check the shape
if hasattr(obj, 'shape'):
seq = seq and obj.shape != ()
return seq
def is_shape(obj, shape):
'''
Compare the shape of a numpy.ndarray to a target shape,
with any value less than zero being considered a wildcard
Arguments
---------
obj: np.ndarray to check the shape of
shape: list or tuple of shape.
Any negative term will be considered a wildcard
Any tuple term will be evaluated as an OR
Returns
---------
shape_ok: bool, True if shape of obj matches query shape
Examples
------------------------
In [1]: a = np.random.random((100,3))
In [2]: a.shape
Out[2]: (100, 3)
In [3]: trimesh.util.is_shape(a, (-1,3))
Out[3]: True
In [4]: trimesh.util.is_shape(a, (-1,3,5))
Out[4]: False
In [5]: trimesh.util.is_shape(a, (100,-1))
Out[5]: True
In [6]: trimesh.util.is_shape(a, (-1,(3,4)))
Out[6]: True
In [7]: trimesh.util.is_shape(a, (-1,(4,5)))
Out[7]: False
'''
if (not hasattr(obj, 'shape') or
len(obj.shape) != len(shape)):
return False
for i, target in zip(obj.shape, shape):
# check if current field has multiple acceptable values
if is_sequence(target):
if i in target:
continue
else:
return False
# check if current field is a wildcard
if target < 0:
if i == 0:
return False
else:
continue
# since we have a single target and a single value,
# if they are not equal we have an answer
if target != i:
return False
# since none of the checks failed, the two shapes are the same
return True
def make_sequence(obj):
'''
Given an object, if it is a sequence return, otherwise
add it to a length 1 sequence and return.
Useful for wrapping functions which sometimes return single
objects and other times return lists of objects.
'''
if is_sequence(obj):
return np.array(obj)
else:
return np.array([obj])
def vector_to_spherical(cartesian):
'''
Convert a set of cartesian points to (n,2) spherical vectors
'''
x, y, z = np.array(cartesian).T
# cheat on divide by zero errors
x[np.abs(x) < _TOL_ZERO] = _TOL_ZERO
spherical = np.column_stack((np.arctan(y / x),
np.arccos(z)))
return spherical
def spherical_to_vector(spherical):
"""
Convert a set of nx2 spherical vectors to nx3 vectors
:param spherical:
:return:
author: revised by weiwei
date: 20210120
"""
spherical = np.asanyarray(spherical, dtype=np.float64)
if not is_shape(spherical, (-1, 2)):
raise ValueError('spherical coordinates must be (n, 2)!')
theta, phi = spherical.T
st, ct = np.sin(theta), np.cos(theta)
sp, cp = np.sin(phi), np.cos(phi)
vectors = np.column_stack((ct * sp,
st * sp,
cp))
return vectors
def diagonal_dot(a, b):
'''
Dot product by row of a and b.
Same as np.diag(np.dot(a, b.T)) but without the monstrous
intermediate matrix.
'''
result = (np.array(a) * b).sum(axis=1)
return result
def three_dimensionalize(points, return_2D=True):
'''
Given a set of (n,2) or (n,3) points, return them as (n,3) points
Arguments
----------
points: (n, 2) or (n,3) points
return_2D: boolean flag
Returns
----------
if return_2D:
is_2D: boolean, True if points were (n,2)
points: (n,3) set of points
else:
points: (n,3) set of points
'''
points = np.asanyarray(points)
shape = points.shape
if len(shape) != 2:
raise ValueError('Points must be 2D array!')
if shape[1] == 2:
points = np.column_stack((points, np.zeros(len(points))))
is_2D = True
elif shape[1] == 3:
is_2D = False
else:
raise ValueError('Points must be (n,2) or (n,3)!')
if return_2D:
return is_2D, points
return points
def grid_arange_2D(bounds, step):
'''
Return a 2D grid with specified spacing
Arguments
---------
bounds: (2,2) list of [[minx, miny], [maxx, maxy]]
step: float, separation between points
Returns
-------
grid: (n, 2) list of 2D points
'''
x_grid = np.arange(*bounds[:, 0], step=step)
y_grid = np.arange(*bounds[:, 1], step=step)
grid = np.dstack(np.meshgrid(x_grid, y_grid)).reshape((-1, 2))
return grid
def grid_linspace_2D(bounds, count):
'''
Return a count*count 2D grid
Arguments
---------
bounds: (2,2) list of [[minx, miny], [maxx, maxy]]
count: int, number of elements on a side
Returns
-------
grid: (count**2, 2) list of 2D points
'''
x_grid = np.linspace(*bounds[:, 0], count=count)
y_grid = np.linspace(*bounds[:, 1], count=count)
grid = np.dstack(np.meshgrid(x_grid, y_grid)).reshape((-1, 2))
return grid
def replace_references(data, reference_dict):
# Replace references in place
view = np.array(data).view().reshape((-1))
for i, value in enumerate(view):
if value in reference_dict:
view[i] = reference_dict[value]
return view
def multi_dict(pairs):
'''
Given a set of key value pairs, create a dictionary.
If a key occurs multiple times, stack the values into an array.
Can be called like the regular dict(pairs) constructor
Arguments
----------
pairs: (n,2) array of key, value pairs
Returns
----------
result: dict, with all values stored (rather than last with regular dict)
'''
result = defaultdict(list)
for k, v in pairs:
result[k].append(v)
return result
def tolist_dict(data):
def tolist(item):
if hasattr(item, 'tolist'):
return item.tolist()
else:
return item
result = {k: tolist(v) for k, v in data.items()}
return result
def is_binary_file(file_obj, probe_sz=1024):
'''
Returns True if file has non-ASCII characters (> 0x7F, or 127)
Should work in both Python 2 and 3
'''
try:
start = file_obj.tell()
fbytes = file_obj.read(probe_sz)
file_obj.seek(start)
is_str = isinstance(fbytes, str)
for fbyte in fbytes:
if is_str:
code = ord(fbyte)
else:
code = fbyte
if code > 127: return True
except UnicodeDecodeError:
return True
return False
def decimal_to_digits(decimal, min_digits=None):
digits = abs(int(np.log10(decimal)))
if min_digits is not None:
digits = np.clip(digits, min_digits, 20)
return digits
def md5_object(obj):
'''
If an object is hashable, return the hex string of the MD5.
'''
hasher = hashlib.md5()
hasher.update(obj)
hashed = hasher.hexdigest()
return hashed
def attach_to_log(log_level=logging.DEBUG,
blacklist=['TerminalIPythonApp', 'PYREADLINE']):
'''
Attach a stream handler to all loggers.
'''
try:
from colorlog import ColoredFormatter
formatter = ColoredFormatter(
("%(log_color)s%(levelname)-8s%(reset)s " +
"%(filename)17s:%(lineno)-4s %(blue)4s%(message)s"),
datefmt=None,
reset=True,
log_colors={'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red'})
except ImportError:
formatter = logging.Formatter(
"[%(asctime)s] %(levelname)-7s (%(filename)s:%(lineno)3s) %(message)s",
"%Y-%m-%d %H:%M:%S")
handler_stream = logging.StreamHandler()
handler_stream.setFormatter(formatter)
handler_stream.setLevel(log_level)
for logger in logging.Logger.manager.loggerDict.values():
if (logger.__class__.__name__ != 'Logger' or
logger.name in blacklist):
continue
logger.addHandler(handler_stream)
logger.setLevel(log_level)
np.set_printoptions(precision=5, suppress=True)
def tracked_array(array, dtype=None):
'''
Properly subclass a numpy ndarray to track changes.
'''
result = np.ascontiguousarray(array).view(TrackedArray)
if dtype is None:
return result
return result.astype(dtype)
class TrackedArray(np.ndarray):
'''
Track changes in a numpy ndarray.
Methods
----------
md5: returns hexadecimal string of md5 of array
'''
def __array_finalize__(self, obj):
'''
Sets a modified flag on every TrackedArray
This flag will be set on every change, as well as during copies
and certain types of slicing.
'''
self._modified = True
if isinstance(obj, type(self)):
obj._modified = True
def md5(self):
'''
Return an MD5 hash of the current array in hexadecimal string form.
This is quite fast; on a modern i7 desktop a (1000000,3) floating point
array was hashed reliably in .03 seconds.
This is only recomputed if a modified flag is set which may have false
positives (forcing an unnecessary recompute) but will not have false
negatives which would return an incorrect hash.
'''
if self._modified or not hasattr(self, '_hashed'):
self._hashed = md5_object(self)
self._modified = False
return self._hashed
def __hash__(self):
'''
Hash is required to return an int, so we convert the hex string to int.
'''
return int(self.md5(), 16)
def __setitem__(self, i, y):
self._modified = True
super(self.__class__, self).__setitem__(i, y)
def __setslice__(self, i, j, y):
self._modified = True
super(self.__class__, self).__setslice__(i, j, y)
class Cache:
"""
Class to cache values until an id function changes.
"""
def __init__(self, id_function=None):
if id_function is None:
self._id_function = lambda: None
else:
self._id_function = id_function
self.id_current = None
self._lock = 0
self.cache = {}
def decorator(self, function):
name = function.__name__
if name in self.cache:
return self.cache[name]
result = function()
self.cache[name] = result
return result
def get(self, key):
"""
Get a key from cache.
If the key is unavailable or the cache has been invalidated returns None.
:param key:
:return:
author: revised by weiwei
date: 20201201
"""
self.verify()
if key in self.cache:
return self.cache[key]
return None
def verify(self):
"""
Verify that the cached values are still for the same value of id_function,
and delete all stored items if the value of id_function has changed.
:return:
author: revised by weiwei
date: 20201201
"""
id_new = self._id_function()
if (self._lock == 0) and (id_new != self.id_current):
if len(self.cache) > 0:
log.debug('%d items cleared from cache: %s',
len(self.cache),
str(self.cache.keys()))
self.clear()
self.id_set()
def clear(self, exclude=None):
"""
Remove all elements in the cache.
:param exclude:
:return:
author: revised by weiwei
date: 20201201
"""
if exclude is None:
self.cache = {}
else:
self.cache = {k: v for k, v in self.cache.items() if k in exclude}
def update(self, items):
"""
Update the cache with a set of key, value pairs without checking id_function.
:param items:
:return:
author: revised by weiwei
date: 20201201
"""
self.cache.update(items)
self.id_set()
def id_set(self):
self.id_current = self._id_function()
def set(self, key, value):
self.verify()
self.cache[key] = value
return value
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
return self.set(key, value)
def __contains__(self, key):
self.verify()
return key in self.cache
def __enter__(self):
self._lock += 1
def __exit__(self, *args):
self._lock -= 1
self.id_current = self._id_function()
class DataStore:
@property
def mutable(self):
if not hasattr(self, '_mutable'):
self._mutable = True
return self._mutable
@mutable.setter
def mutable(self, value):
value = bool(value)
for i in self.data.value():
i.flags.writeable = value
self._mutable = value
def is_empty(self):
if len(self.data) == 0:
return True
for v in self.data.values():
if is_sequence(v):
if len(v) > 0:
return False
else:
if bool(np.isreal(v)):
return False
return True
def __init__(self):
self.data = {}
def clear(self):
self.data = {}
def __getitem__(self, key):
try:
return self.data[key]
except KeyError:
return None
def __setitem__(self, key, data):
self.data[key] = tracked_array(data)
def __len__(self):
return len(self.data)
def values(self):
return self.data.values()
def md5(self):
md5 = ''
for key in np.sort(list(self.data.keys())):
md5 += self.data[key].md5()
return md5
def stack_lines(indices):
return np.column_stack((indices[:-1],
indices[1:])).reshape((-1, 2))
def append_faces(vertices_seq, faces_seq):
'''
Given a sequence of zero- indexed faces and vertices,
combine them into a single (n,3) list of faces and (m,3) vertices
Arguments
---------
vertices_seq: (n) sequence of (m,d) vertex arrays
faces_seq (n) sequence of (p,j) faces, zero indexed
and referencing their counterpoint vertices
'''
vertices_len = np.array([len(i) for i in vertices_seq])
face_offset = np.append(0, np.cumsum(vertices_len)[:-1])
for offset, faces in zip(face_offset, faces_seq):
faces += offset
vertices = np.vstack(vertices_seq)
faces = np.vstack(faces_seq)
return vertices, faces
def array_to_encoded(array, dtype=None, encoding='base64'):
'''
Export a numpy array to a compact serializable dictionary.
Arguments
---------
array: numpy array
dtype: optional, what dtype should array be encoded with.
encoding: str, 'base64' or 'binary'
Returns
---------
encoded: dict with keys:
dtype: string of dtype
shape: int tuple of shape
base64: base64 encoded string of flat array
'''
array =
|
np.asanyarray(array)
|
numpy.asanyarray
|
import numpy as np
def sample_state_v0(x0, P, n_steps=10):
x = x0
P_cumulative = P.cumsum(axis=1)
P_cumulative[:, -1] = 1
step_i = 0
while step_i < n_steps:
states = set(x)
for state in states:
mask = x == state
u = np.random.rand(mask.sum())
new_states = np.searchsorted(P_cumulative[state, :], u, side='left')
x[mask] = new_states
step_i += 1
return x
def sample_state_differencedP_v0(x0, P, n_steps=10, self_loop_col=-1):
if self_loop_col < 0:
assert P.shape[1] % 2, 'no center because differencedP has even number of columns'
self_loop_col = int((P.shape[1]-1)/2)
x = x0
P_cumulative = P.cumsum(axis=1)
P_cumulative[:, -1] = 1
step_i = 0
while step_i < n_steps:
states = set(x)
for state in states:
mask = x == state
u = np.random.rand(mask.sum())
new_state_difs = np.searchsorted(P_cumulative[state, :], u, side='left')
x[mask] += new_state_difs - self_loop_col
step_i += 1
return x
def sample_state_history_v0(x0, P, n_steps=10):
x = x0
history = [x.copy()]
P_cumulative = P.cumsum(axis=1)
P_cumulative[:, -1] = 1
step_i = 0
while step_i < n_steps:
states = set(x)
for state in states:
mask = x == state
u = np.random.rand(mask.sum())
new_states = np.searchsorted(P_cumulative[state, :], u, side='left')
x[mask] = new_states
step_i += 1
history.append(x.copy())
return x, history
def sample_state_history_differencedP_v0(x0, P, n_steps=10, self_loop_col=-1):
if self_loop_col < 0:
assert P.shape[1] % 2, 'no center because differencedP has even number of columns'
self_loop_col = int((P.shape[1]-1)/2)
x = x0
history = [x.copy()]
P_cumulative = P.cumsum(axis=1)
P_cumulative[:, -1] = 1
step_i = 0
while step_i < n_steps:
states = set(x)
for state in states:
mask = x == state
u = np.random.rand(mask.sum())
new_state_difs = np.searchsorted(P_cumulative[state, :], u, side='left')
x[mask] += new_state_difs - self_loop_col # where col of P is the *difference* in the state
step_i += 1
history.append(x.copy())
return x, history
def sample_state_obs_weighting_v0(x0, P, obs, log_ll_fcn, w0=(), n_steps=10):
# assume observations starts at the second - x0 already weighted/sampled from first
x = x0
w = w0 if len(w0) > 0 else np.zeros_like(x, dtype=np.float)
P_cumulative = P.cumsum(axis=1)
P_cumulative[:, -1] = 1
step_i = 0
while step_i < n_steps:
states = set(x)
for state in states:
mask = x == state
u = np.random.rand(mask.sum())
new_states =
|
np.searchsorted(P_cumulative[state, :], u, side='left')
|
numpy.searchsorted
|
#!/usr/bin/env python
###############################################################################
"""
@author: vxr131730 - <NAME>
This code simulates the bicycle dynamics of car by steering it using NMPC -
nonlinear model predictive control (multiple shooting technique) and the state
estimation using (UKF) unscented kalman filter. This code uses CARLA simulator.
CARLA SIMULATOR VERSION - 0.9.10
PYTHON VERSION - 3.6.8
VISUAL STUDIO VERSION - 2017
UNREAL ENGINE VERSION - 4.24.3
This script is tested in Python 3.6.8, Windows 10, 64-bit
(C) <NAME>, 2020. Email: <EMAIL>
This program is a free software: you can redistribute it and/or modify it
under the terms of the GNU lesser General Public License, either version
3.7, or any later version. This program is distributed in the hope that it
will be useful, but WITHOUT ANY WARRANTY.
"""
###############################################################################
####################### Import all the required libraries #####################
###############################################################################
import glob
import os
import sys
import random
import math
import time
import casadi as ca
import numpy as np
import cv2
from casadi import *
from casadi.tools import *
from numpy import linalg as LA
try:
sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
import carla
###############################################################################
####################### Define The Global Variable ############################
###############################################################################
IM_WIDTH = 640 # Width of the simulation screen
IM_HEIGHT = 480 # Height of the simulation screen
actor_list = [] # List of actors in the simulation world
l_r = 1.415 # Distance from center of gravity to rear wheels
l_f = 1.6 # Distance from center of gravity to front wheels
L = l_r + l_f # Total length of the vehicle
T = 0.08 # Sampling Time (s)
IM_WIDTH = 640
IM_HEIGHT = 480
red = carla.Color(255, 0, 0)
green = carla.Color(0, 255, 0)
blue = carla.Color(47, 210, 231)
cyan = carla.Color(0, 255, 255)
yellow = carla.Color(255, 255, 0)
orange = carla.Color(255, 162, 0)
white = carla.Color(255, 255, 255)
trail_life_time = 50
waypoint_separation = 5
def draw_transform(debug, trans, col=carla.Color(255, 0, 0), lt=-1):
debug.draw_arrow(
trans.location, trans.location + trans.get_forward_vector(),
thickness=0.05, arrow_size=0.1, color=col, life_time=lt)
def draw_waypoint_union(debug, w0, w1, color=carla.Color(255, 0, 0), lt=5):
debug.draw_line(
w0.transform.location + carla.Location(z=0.25),
w1.transform.location + carla.Location(z=0.25),
thickness=0.1, color=color, life_time=lt, persistent_lines=False)
debug.draw_point(w1.transform.location + carla.Location(z=0.25), 0.1, color, lt, False)
###############################################################################
###############################################################################
################## UNSCENTED KALMAN FILTER IMPLEMENTATION #####################
###############################################################################
###############################################################################
def UKF(ukf_parameters):
# Unbox the input parameters
zMean = ukf_parameters["x_hat"]
u_k = ukf_parameters["u_k"]
zCovar = ukf_parameters["SigmaE"]
n_z = ukf_parameters["n_z"]
SigmaW = ukf_parameters["SigmaW"]
SigmaV = ukf_parameters["SigmaV"]
y_k = ukf_parameters["y_k"]
# Define the global variables
alpha = 1.0
beta = 2.0
n = n_z
kappa = 10 - n
lambda_ = alpha**2 * (n + kappa) - n
num_sigma_pts = 2*n + 1
# Initialize Van der Merwe's weighting matrix
Wc = np.zeros((num_sigma_pts, 1))
Wm = np.zeros((num_sigma_pts, 1))
# Compute the Van der Merwe's weighting matrix values
for i in range(num_sigma_pts):
if i == 0:
Wc[i,:] = lambda_ / (n + lambda_) + (1 - alpha**2 + beta)
Wm[i,:] = lambda_ / (n + lambda_)
continue
Wc[i,:] = 1/(2*(n + lambda_))
Wm[i,:] = 1/(2*(n + lambda_))
# Define the direction matrix
U = LA.cholesky((n + lambda_)*zCovar)
# Generate the sigma points using Van der Merwe algorithm
# Define Place holder for all sigma points
sigmaPoints = np.zeros((n, num_sigma_pts))
# First SigmaPoint is always the mean
sigmaPoints[:,0] = zMean.T
# Generate sigmapoints symmetrically around the mean
for k in range(n):
sigmaPoints[:, k+1] = sigmaPoints[:,0] + U[:, k]
sigmaPoints[:, k+n+1] = sigmaPoints[:,0] - U[:, k]
###################### Apriori Update #####################################
# Compute the apriori output
aprioriOutput = PredictionStep(u_k, T, sigmaPoints, Wm, Wc, SigmaW)
# Unbox the apriori output
aprioriMean = aprioriOutput["mean"]
aprioriCovar = aprioriOutput["Covar"]
aprioriPoints = aprioriOutput["aprioriPoints"]
###########################################################################
###################### Aposteriori Update ###################Mean##############
# Compute the aposteriori output
aposterioriOutput = UpdateStep(aprioriPoints, Wm, Wc, SigmaV)
# Unbox the aposteriori output
aposterioriMean = aposterioriOutput["mean"]
aposterioriCovar = aposterioriOutput["Covar"]
aposterioriPoints = aposterioriOutput["aposterioriPoints"]
# Compute the residual yStar
yStar = y_k - aposterioriMean.reshape(-1,1)
# Prepare dictionary to compute cross covariance matrix
funParam = {"input1": aprioriPoints,
"input2": aposterioriPoints,
"input1Mean": aprioriMean,
"input2Mean": aposterioriMean,
"weightMatrix": Wc}
# Compute the cross covariance matrix
crossCovarMatrix = ComputeCrossCovariance(funParam)
# Compute Unscented Kalman Gain
uKFGain = np.dot(crossCovarMatrix, LA.inv(aposterioriCovar))
# Compute Aposteriori State Update and Covariance Update
x_hat = aprioriMean.reshape(-1,1) + uKFGain @ yStar
SigmaE = aprioriCovar - uKFGain @ aposterioriCovar @ uKFGain.T
# Prepare Output Dictionary
ukfOutput = {"x_hat": x_hat, "SigmaE": SigmaE}
return ukfOutput
###############################################################################
def PredictionStep(u_k, T, sigmaPoints, Wm, Wc, SigmaW):
# Get the shape of sigmaPoints
ro, co = np.shape(sigmaPoints)
# Create the data structure to hold the transformed points
aprioriPoints = np.zeros((ro, co))
# Loop through and pass each and every sigmapoint
for i in range(co):
aprioriPoints[:, i] = MotionModel(sigmaPoints[:, i], u_k, T)
# Compute the mean and covariance of the transformed points
aprioriOutput = ComputeStatistics(aprioriPoints, Wm, Wc, SigmaW)
# Add the aprioriPoints to output
aprioriOutput["aprioriPoints"] = aprioriPoints
return aprioriOutput
###############################################################################
def UpdateStep(sigmaPoints, Wm, Wc, SigmaV):
aprioriPoints = sigmaPoints
# Get the shape of aprioriPoints
ro, M = np.shape(aprioriPoints)
# Get the number of outputs
num_outputs = SigmaV.shape[0]
# Create the data structure to hold the transformed points
aposterioriPoints = np.zeros((num_outputs, M)) #4 states, 2 outputs
# Loop through and pass each and every sigmapoint
for i in range(M):
aposterioriPoints[:, i] = MeasurementModel(aprioriPoints[:, i])
# Compute the mean and covariance of the transformed points
aposterioriOutput = ComputeStatistics(aposterioriPoints, Wm, Wc, SigmaV)
# Add the aposterioriPoints to the output dictionary
aposterioriOutput["aposterioriPoints"] = aposterioriPoints
return aposterioriOutput
###############################################################################
def MotionModel(oldState, u, T):
newState = oldState + [T*oldState[3]*np.cos(oldState[2]),
T*oldState[3]*np.sin(oldState[2]),
T*(oldState[3]/L)*np.tan(u[1]),
T*u[0]]
return newState
###############################################################################
def MeasurementModel(newState):
output = [math.sqrt(newState[0]**2 + newState[1]**2),
math.atan2(newState[1], newState[0])]
return output
###############################################################################
def ComputeCrossCovariance(funParam):
# Compute the crossCovarMatrix
input1Shape = np.shape(funParam["input1"])
input2Shape = np.shape(funParam["input2"])
P = np.zeros((input1Shape[0], input2Shape[0]))
for k in range(input1Shape[1]):
diff1 = funParam["input1"][:,k] - funParam["input1Mean"]
diff2 = funParam["input2"][:,k] - funParam["input2Mean"]
P += funParam["weightMatrix"][k] * np.outer(diff1, diff2)
return P
###############################################################################
def ComputeStatistics(inputPoints, Wm, Wc, noiseCov):
# Compute the weighted mean
inputPointsMean = np.dot(Wm[:,0], inputPoints.T)
# Compute the weighted covariance
inputShape = np.shape(inputPoints)
P = np.zeros((inputShape[0], inputShape[0]))
# Find the weighted covariance
for k in range(inputShape[1]):
y = inputPoints[:, k] - inputPointsMean
P = P + Wc[k] * np.outer(y, y)
# Add the noise covariance
P += noiseCov
# Box the Output data
statsOutput = {"mean": inputPointsMean, "Covar": P}
return statsOutput
###############################################################################
###############################################################################
def TransformTheta(theta):
if theta < 0:
theta = 360 - abs(theta)
return theta
###############################################################################
###############################################################################
def Get_Carla_Steer_Input(steer_angle):
"""
Given a steering angle in radians, returns the steering input between [-1,1]
so that it can be applied to the car in the CARLA simulator.
Max steering angle = 70 degrees = 1.22 radians
Ref: https://github.com/carla-simulator/ros-bridge/blob/master/carla_ackermann_control/src/carla_ackermann_control/carla_control_physics.py
Input:
steer_angle: steering angle in radians
Output:
steer_input: steering input between [-1,1]
"""
steer_input = (1/1.22)*steer_angle
steer_input = np.fmax(np.fmin(steer_input, 1.0), -1.0)
return steer_input
###############################################################################
###############################################################################
def Get_Carla_Throttle_Input(accel):
"""
Given an acceleration in m/s^2, returns the throttle input between [0,1]
so that it can be applied to the car in the CARLA simulator.
Max acceleration = 3.0 m/s^2
Ref: https://github.com/carla-simulator/ros-bridge/blob/master/carla_ackermann_control/src/carla_ackermann_control/carla_control_physics.py
Input:
accel: steering angle in radians
Output:
throttle_input: steering input between [0,1]
"""
throttle_input = (1/3)*accel
throttle_input = np.fmax(np.fmin(throttle_input, 1.0), 0.0)
return throttle_input
###############################################################################
###############################################################################
def Prepare_CARLA_Controls(u_k):
throttle_input = Get_Carla_Throttle_Input(u_k[0])
steer_input = Get_Carla_Steer_Input(u_k[1])
car_controls = [throttle_input, steer_input]
return car_controls
###############################################################################
###############################################################################
####################### MAIN NMPC STEERING CODE ##############################
###############################################################################
###############################################################################
def main():
# Define Simulation Parameters
N = 30 # Prediction Horizon♥
num_ctrls = 2 # Number of controls
num_states = 4 # Number of states
num_outputs = 2 # Number of outputs
# CONTROL BOUNDS
min_accel = -8 # minimum throttle
max_accel = 8 # maximum throttle
min_steer = -1.22 # minimum steering angle
max_steer = 1.22 # maximum steering angle
min_pos = -300 # minimum position
max_pos = 300 # maximum position
# Initiate an instance of opti class of casadi
opti = ca.Opti()
# control variables, linear velocity v and angle velocity omega
opt_controls = opti.variable(N, num_ctrls)
opt_states = opti.variable(N+1, num_states)
accel = opt_controls[:, 0]
steer = opt_controls[:, 1]
x = opt_states[:, 0]
y = opt_states[:, 1]
theta = opt_states[:, 2]
v = opt_states[:, 3]
# Define the State and Control Penalty matrices
Q = 0.1*np.diag([3,3,1,2]) # np.diag([3600,3600,1900,2])
R = 0.1*np.diag([1,1]) # np.diag([1,8000])
# Define the noise means
mu_w = np.zeros(num_states) # Mean of process noises
mu_v = np.zeros(num_outputs) # Mean of sensor noises
# Define Covariance Matrices
SigmaW = np.diag([0.0005, 0.0005, 0, 0]) # Process Noise Covariance
SigmaV = 0.0001*np.identity(num_outputs) # Sensor Noise Covariance
SigmaE = 0.0001*
|
np.identity(num_states)
|
numpy.identity
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.