prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import numpy as np
import zmq
import pyarrow
from gym import spaces
import deepdrive_api.methods as m
import deepdrive_api.constants as c
from deepdrive_api import logs
log = logs.get_log(__name__)
def deserialize_space(resp):
if resp['type'] == "<class 'gym.spaces.box.Box'>":
ret = spaces.Box(resp['low'], resp['high'], dtype=resp['dtype'])
else:
raise RuntimeError('Unsupported action space type')
return ret
class Client(object):
"""
A Client object acts as a remote proxy to the deepdrive gym environment.
Methods that you would call on the env, like step() are also called on
this object, with communication over the network -
rather than over shared memory (for observations) and network
(for transactions like reset) as is the case with the locally run
sim/gym_env.py.
This allows the agent and environment to run on separate machines, but
with the same API as a local agent, namely the gym API.
The local gym environment is then run by api/server.py which proxies
RPC's from this client to the local environment.
All network communication happens over ZMQ to take advantage of their
highly optimized cross-language / cross-OS sockets.
NOTE: This will obviously run more slowly than a local agent which
communicates sensor data over shared memory.
"""
def __init__(self, **kwargs):
"""
:param kwargs['client_render'] (bool): Whether to render on this
side of the client server connection.
Passing kwargs['render'] = True will cause the server to render
an MJPG stream at http://localhost:5000
"""
self.socket = None
self.last_obz = None
self.create_socket()
self.should_render = kwargs.get('client_render', False)
self.is_open = True
kwargs['cameras'] = kwargs.get('cameras', [c.DEFAULT_CAM])
log.info('Waiting for sim to start on server...')
# TODO: Fix connecting to an open sim
self._send(m.START, kwargs=kwargs)
self.is_open = True
log.info('===========> Deepdrive sim started')
def _send(self, method, args=None, kwargs=None):
if method != m.START and not self.is_open:
log.warning('Not sending, env is closed')
return None
args = args or []
kwargs = kwargs or {}
try:
msg = pyarrow.serialize([method, args, kwargs]).to_buffer()
self.socket.send(msg)
return pyarrow.deserialize(self.socket.recv())
except zmq.error.Again:
log.info('Waiting for Deepdrive API server...')
self.create_socket()
return None
def create_socket(self):
if self.socket:
self.socket.close()
context = zmq.Context()
socket = context.socket(zmq.PAIR)
# Creating a new socket on timeout is not working when other ZMQ
# connections are present in the process.
# socket.RCVTIMEO = c.API_TIMEOUT_MS
# socket.SNDTIMEO = c.API_TIMEOUT_MS
connection_str = 'tcp://%s:%s' % (c.SIM_HOST, c.API_PORT)
log.info('Deepdrive API client connecting to %s' % connection_str)
socket.connect(connection_str)
self.socket = socket
return socket
def step(self, action):
if hasattr(action, 'as_gym'):
# Legacy support for original agents written within deepdrive repo
action = action.as_gym()
ret = self._send(m.STEP, args=[action])
obz, reward, done, info = ret
if info.get('closed', False):
self.handle_closed()
if not obz:
obz = None
self.last_obz = obz
if self.should_render:
self.render()
return obz, reward, done, info
def reset(self):
if self.is_open:
return self._send(m.RESET)
else:
log.warning('Env closed, not resetting')
return None
def render(self):
"""
We pass the obz through an instance variable to comply with
the gym api where render() takes 0 arguments
"""
if self.last_obz is not None:
self.renderer.render(self.last_obz)
def change_cameras(self, cameras):
return self._send(m.CHANGE_CAMERAS, args=[cameras])
def close(self):
self._send(m.CLOSE)
self.handle_closed()
def handle_closed(self):
self.is_open = False
try:
self.socket.close()
except Exception as e:
log.debug('Caught exception closing socket')
@property
def action_space(self):
resp = self._send(m.ACTION_SPACE)
ret = deserialize_space(resp)
return ret
@property
def observation_space(self):
resp = self._send(m.OBSERVATION_SPACE)
ret = deserialize_space(resp)
return ret
@property
def metadata(self):
return self._send(m.METADATA)
@property
def reward_range(self):
return self._send(m.REWARD_RANGE)
def get_action(steering=0, throttle=0, brake=0, handbrake=0, has_control=True):
ret = [np.array([steering]),
np.array([throttle]),
np.array([brake]),
|
np.array([handbrake])
|
numpy.array
|
# -*- coding: utf-8 -*-
"""
Definition of a hierarchy of classes for kernel functions to be used
in convolution, e.g., for data smoothing (low pass filtering) or
firing rate estimation.
Examples of usage:
>>> kernel1 = kernels.GaussianKernel(sigma=100*ms)
>>> kernel2 = kernels.ExponentialKernel(sigma=8*mm, invert=True)
:copyright: Copyright 2016 by the Elephant team, see `doc/authors.rst`.
:license: Modified BSD, see LICENSE.txt for details.
"""
import quantities as pq
import numpy as np
import scipy.special
def inherit_docstring(fromfunc, sep=""):
"""
Decorator: Copy the docstring of `fromfunc`
based on:
http://stackoverflow.com/questions/13741998/
is-there-a-way-to-let-classes-inherit-the-documentation-of-their-superclass-with
"""
def _decorator(func):
parent_doc = fromfunc.__doc__
if func.__doc__ is None:
func.__doc__ = parent_doc
else:
func.__doc__ = sep.join([parent_doc, func.__doc__])
return func
return _decorator
class Kernel(object):
"""
This is the base class for commonly used kernels.
General definition of kernel:
A function :math:`K(x, y)` is called a kernel function if
:math:`\\int K(x, y) g(x) g(y)\\ \\textrm{d}x\\ \\textrm{d}y
\\ \\geq 0\\ \\ \\ \\forall\\ g \\in L_2`
Currently implemented kernels are:
- rectangular
- triangular
- epanechnikovlike
- gaussian
- laplacian
- exponential (asymmetric)
- alpha function (asymmetric)
In neuroscience a popular application of kernels is in performing smoothing
operations via convolution. In this case, the kernel has the properties of
a probability density, i.e., it is positive and normalized to one. Popular
choices are the rectangular or Gaussian kernels.
Exponential and alpha kernels may also be used to represent the postynaptic
current / potentials in a linear (current-based) model.
Parameters
----------
sigma : Quantity scalar
Standard deviation of the kernel.
invert: bool, optional
If true, asymmetric kernels (e.g., exponential
or alpha kernels) are inverted along the time axis.
Default: False
"""
def __init__(self, sigma, invert=False):
if not (isinstance(sigma, pq.Quantity)):
raise TypeError("sigma must be a quantity!")
if sigma.magnitude < 0:
raise ValueError("sigma cannot be negative!")
if not isinstance(invert, bool):
raise ValueError("invert must be bool!")
self.sigma = sigma
self.invert = invert
def __call__(self, t):
"""
Evaluates the kernel at all points in the array `t`.
Parameter
---------
t : Quantity 1D
Interval on which the kernel is evaluated, not necessarily
a time interval.
Returns
-------
Quantity 1D
The result of the kernel evaluations.
"""
if not (isinstance(t, pq.Quantity)):
raise TypeError("The argument of the kernel callable must be "
"of type quantity!")
if t.dimensionality.simplified != self.sigma.dimensionality.simplified:
raise TypeError("The dimensionality of sigma and the input array "
"to the callable kernel object must be the same. "
"Otherwise a normalization to 1 of the kernel "
"cannot be performed.")
self._sigma_scaled = self.sigma.rescale(t.units)
# A hidden variable _sigma_scaled is introduced here in order to avoid
# accumulation of floating point errors of sigma upon multiple
# usages of the __call__ - function for the same Kernel instance.
return self._evaluate(t)
def _evaluate(self, t):
"""
Evaluates the kernel.
Parameter
---------
t : Quantity 1D
Interval on which the kernel is evaluated, not necessarily
a time interval.
Returns
-------
Quantity 1D
The result of the kernel evaluation.
"""
raise NotImplementedError("The Kernel class should not be used directly, "
"instead the subclasses for the single kernels.")
def boundary_enclosing_area_fraction(self, fraction):
"""
Calculates the boundary :math:`b` so that the integral from
:math:`-b` to :math:`b` encloses a certain fraction of the
integral over the complete kernel. By definition the returned value
of the method boundary_enclosing_area_fraction is hence non-negative,
even if the whole probability mass of the kernel is concentrated over
negative support for inverted kernels.
Parameter
---------
fraction : float
Fraction of the whole area which has to be enclosed.
Returns
-------
Quantity scalar
Boundary of the kernel containing area `fraction` under the
kernel density.
"""
self._check_fraction(fraction)
sigma_division = 500 # arbitrary choice
interval = self.sigma / sigma_division
self._sigma_scaled = self.sigma
area = 0
counter = 0
while area < fraction:
area += (self._evaluate((counter + 1) * interval) +
self._evaluate(counter * interval)) * interval / 2
area += (self._evaluate(-1 * (counter + 1) * interval) +
self._evaluate(-1 * counter * interval)) * interval / 2
counter += 1
if(counter > 250000):
raise ValueError("fraction was chosen too close to one such "
"that in combination with integral "
"approximation errors the calculation of a "
"boundary was not possible.")
return counter * interval
def _check_fraction(self, fraction):
"""
Checks the input variable of the method boundary_enclosing_area_fraction
for validity of type and value.
Parameter
---------
fraction : float or int
Fraction of the area under the kernel function.
"""
if not isinstance(fraction, (float, int)):
raise TypeError("`fraction` must be float or integer!")
if not 0 <= fraction < 1:
raise ValueError("`fraction` must be in the interval [0, 1)!")
def median_index(self, t):
"""
Estimates the index of the Median of the kernel.
This parameter is not mandatory for symmetrical kernels but it is
required when asymmetrical kernels have to be aligned at their median.
Parameter
---------
t : Quantity 1D
Interval on which the kernel is evaluated,
Returns
-------
int
Index of the estimated value of the kernel median.
Remarks
-------
The formula in this method using retrieval of the sampling interval
from t only works for t with equidistant intervals!
The formula calculates the Median slightly wrong by the potentially
ignored probability in the distribution corresponding to lower values
than the minimum in the array t.
"""
return np.nonzero(self(t).cumsum() *
(t[len(t) - 1] - t[0]) / (len(t) - 1) >= 0.5)[0].min()
def is_symmetric(self):
"""
In the case of symmetric kernels, this method is overwritten in the
class SymmetricKernel, where it returns 'True', hence leaving the
here returned value 'False' for the asymmetric kernels.
"""
return False
class SymmetricKernel(Kernel):
"""
Base class for symmetric kernels.
Derived from:
"""
__doc__ += Kernel.__doc__
def is_symmetric(self):
return True
class RectangularKernel(SymmetricKernel):
"""
Class for rectangular kernels
.. math::
K(t) = \\left\\{\\begin{array}{ll} \\frac{1}{2 \\tau}, & |t| < \\tau \\\\
0, & |t| \\geq \\tau \\end{array} \\right.
with :math:`\\tau = \\sqrt{3} \\sigma` corresponding to the half width
of the kernel.
Besides the standard deviation `sigma`, for consistency of interfaces the
parameter `invert` needed for asymmetric kernels also exists without
having any effect in the case of symmetric kernels.
Derived from:
"""
__doc__ += SymmetricKernel.__doc__
@property
def min_cutoff(self):
min_cutoff = np.sqrt(3.0)
return min_cutoff
@inherit_docstring(Kernel._evaluate)
def _evaluate(self, t):
return (0.5 / (np.sqrt(3.0) * self._sigma_scaled)) * \
(np.absolute(t) < np.sqrt(3.0) * self._sigma_scaled)
@inherit_docstring(Kernel.boundary_enclosing_area_fraction)
def boundary_enclosing_area_fraction(self, fraction):
self._check_fraction(fraction)
return np.sqrt(3.0) * self.sigma * fraction
class TriangularKernel(SymmetricKernel):
"""
Class for triangular kernels
.. math::
K(t) = \\left\\{ \\begin{array}{ll} \\frac{1}{\\tau} (1
- \\frac{|t|}{\\tau}), & |t| < \\tau \\\\
0, & |t| \\geq \\tau \\end{array} \\right.
with :math:`\\tau = \\sqrt{6} \\sigma` corresponding to the half width of
the kernel.
Besides the standard deviation `sigma`, for consistency of interfaces the
parameter `invert` needed for asymmetric kernels also exists without
having any effect in the case of symmetric kernels.
Derived from:
"""
__doc__ += SymmetricKernel.__doc__
@property
def min_cutoff(self):
min_cutoff = np.sqrt(6.0)
return min_cutoff
@inherit_docstring(Kernel._evaluate)
def _evaluate(self, t):
return (1.0 / (np.sqrt(6.0) * self._sigma_scaled)) * np.maximum(
0.0,
(1.0 - (np.absolute(t) /
(np.sqrt(6.0) * self._sigma_scaled)).magnitude))
@inherit_docstring(Kernel.boundary_enclosing_area_fraction)
def boundary_enclosing_area_fraction(self, fraction):
self._check_fraction(fraction)
return np.sqrt(6.0) * self.sigma * (1 - np.sqrt(1 - fraction))
class EpanechnikovLikeKernel(SymmetricKernel):
"""
Class for epanechnikov-like kernels
.. math::
K(t) = \\left\\{\\begin{array}{ll} (3 /(4 d)) (1 - (t / d)^2),
& |t| < d \\\\
0, & |t| \\geq d \\end{array} \\right.
with :math:`d = \\sqrt{5} \\sigma` being the half width of the kernel.
The Epanechnikov kernel under full consideration of its axioms has a half
width of :math:`\\sqrt{5}`. Ignoring one axiom also the respective kernel
with half width = 1 can be called Epanechnikov kernel.
( https://de.wikipedia.org/wiki/Epanechnikov-Kern )
However, arbitrary width of this type of kernel is here preferred to be
called 'Epanechnikov-like' kernel.
Besides the standard deviation `sigma`, for consistency of interfaces the
parameter `invert` needed for asymmetric kernels also exists without
having any effect in the case of symmetric kernels.
Derived from:
"""
__doc__ += SymmetricKernel.__doc__
@property
def min_cutoff(self):
min_cutoff = np.sqrt(5.0)
return min_cutoff
@inherit_docstring(Kernel._evaluate)
def _evaluate(self, t):
return (3.0 / (4.0 * np.sqrt(5.0) * self._sigma_scaled)) * np.maximum(
0.0,
1 - (t / (np.sqrt(5.0) * self._sigma_scaled)).magnitude ** 2)
@inherit_docstring(Kernel.boundary_enclosing_area_fraction)
def boundary_enclosing_area_fraction(self, fraction):
"""
For Epanechnikov-like kernels, integration of its density within
the boundaries 0 and :math:`b`, and then solving for :math:`b` leads
to the problem of finding the roots of a polynomial of third order.
The implemented formulas are based on the solution of this problem
given in https://en.wikipedia.org/wiki/Cubic_function,
where the following 3 solutions are given:
- :math:`u_1 = 1`: Solution on negative side
- :math:`u_2 = \\frac{-1 + i\\sqrt{3}}{2}`: Solution for larger
values than zero crossing of the density
- :math:`u_3 = \\frac{-1 - i\\sqrt{3}}{2}`: Solution for smaller
values than zero crossing of the density
The solution :math:`u_3` is the relevant one for the problem at hand,
since it involves only positive area contributions.
"""
self._check_fraction(fraction)
# Python's complex-operator cannot handle quantities, hence the
# following construction on quantities is necessary:
Delta_0 = complex(1.0 / (5.0 * self.sigma.magnitude**2), 0) / \
self.sigma.units**2
Delta_1 = complex(2.0 * np.sqrt(5.0) * fraction /
(25.0 * self.sigma.magnitude**3), 0) / \
self.sigma.units**3
C = ((Delta_1 + (Delta_1**2.0 - 4.0 * Delta_0**3.0)**(1.0 / 2.0)) /
2.0)**(1.0 / 3.0)
u_3 = complex(-1.0 / 2.0, -np.sqrt(3.0) / 2.0)
b = -5.0 * self.sigma**2 * (u_3 * C + Delta_0 / (u_3 * C))
return b.real
class GaussianKernel(SymmetricKernel):
"""
Class for gaussian kernels
.. math::
K(t) = (\\frac{1}{\\sigma \\sqrt{2 \\pi}})
\\exp(-\\frac{t^2}{2 \\sigma^2})
with :math:`\\sigma` being the standard deviation.
Besides the standard deviation `sigma`, for consistency of interfaces the
parameter `invert` needed for asymmetric kernels also exists without
having any effect in the case of symmetric kernels.
Derived from:
"""
__doc__ += SymmetricKernel.__doc__
@property
def min_cutoff(self):
min_cutoff = 3.0
return min_cutoff
@inherit_docstring(Kernel._evaluate)
def _evaluate(self, t):
return (1.0 / (np.sqrt(2.0 * np.pi) * self._sigma_scaled)) * np.exp(
-0.5 * (t / self._sigma_scaled).magnitude ** 2)
@inherit_docstring(Kernel.boundary_enclosing_area_fraction)
def boundary_enclosing_area_fraction(self, fraction):
self._check_fraction(fraction)
return self.sigma * np.sqrt(2.0) * scipy.special.erfinv(fraction)
class LaplacianKernel(SymmetricKernel):
"""
Class for laplacian kernels
.. math::
K(t) = \\frac{1}{2 \\tau} \\exp(-|\\frac{t}{\\tau}|)
with :math:`\\tau = \\sigma / \\sqrt{2}`.
Besides the standard deviation `sigma`, for consistency of interfaces the
parameter `invert` needed for asymmetric kernels also exists without
having any effect in the case of symmetric kernels.
Derived from:
"""
__doc__ += SymmetricKernel.__doc__
@property
def min_cutoff(self):
min_cutoff = 3.0
return min_cutoff
@inherit_docstring(Kernel._evaluate)
def _evaluate(self, t):
return (1 / (np.sqrt(2.0) * self._sigma_scaled)) * np.exp(
-(np.absolute(t) * np.sqrt(2.0) / self._sigma_scaled).magnitude)
@inherit_docstring(Kernel.boundary_enclosing_area_fraction)
def boundary_enclosing_area_fraction(self, fraction):
self._check_fraction(fraction)
return -self.sigma * np.log(1.0 - fraction) / np.sqrt(2.0)
# Potential further symmetric kernels from Wiki Kernels (statistics):
# Quartic (biweight), Triweight, Tricube, Cosine, Logistics, Silverman
class ExponentialKernel(Kernel):
"""
Class for exponential kernels
.. math::
K(t) = \\left\\{\\begin{array}{ll} (1 / \\tau) \\exp{(-t / \\tau)},
& t > 0 \\\\
0, & t \\leq 0 \\end{array} \\right.
with :math:`\\tau = \\sigma`.
Derived from:
"""
__doc__ += Kernel.__doc__
@property
def min_cutoff(self):
min_cutoff = 3.0
return min_cutoff
@inherit_docstring(Kernel._evaluate)
def _evaluate(self, t):
if not self.invert:
kernel = (t >= 0) * (1. / self._sigma_scaled.magnitude) *\
np.exp((-t / self._sigma_scaled).magnitude) / t.units
elif self.invert:
kernel = (t <= 0) * (1. / self._sigma_scaled.magnitude) *\
np.exp((t / self._sigma_scaled).magnitude) / t.units
return kernel
@inherit_docstring(Kernel.boundary_enclosing_area_fraction)
def boundary_enclosing_area_fraction(self, fraction):
self._check_fraction(fraction)
return -self.sigma * np.log(1.0 - fraction)
class AlphaKernel(Kernel):
"""
Class for alpha kernels
.. math::
K(t) = \\left\\{\\begin{array}{ll} (1 / \\tau^2)
\\ t\\ \\exp{(-t / \\tau)}, & t > 0 \\\\
0, & t \\leq 0 \\end{array} \\right.
with :math:`\\tau = \\sigma / \\sqrt{2}`.
For the alpha kernel an analytical expression for the boundary of the
integral as a function of the area under the alpha kernel function
cannot be given. Hence in this case the value of the boundary is
determined by kernel-approximating numerical integration, inherited
from the Kernel class.
Derived from:
"""
__doc__ += Kernel.__doc__
@property
def min_cutoff(self):
min_cutoff = 3.0
return min_cutoff
@inherit_docstring(Kernel._evaluate)
def _evaluate(self, t):
if not self.invert:
kernel = (t >= 0) * 2. * (t / self._sigma_scaled**2).magnitude *\
np.exp((
-t *
|
np.sqrt(2.)
|
numpy.sqrt
|
import numpy as np
import pytest
from pytest import approx
from numpy.testing import assert_array_equal
from numpy.testing import assert_allclose
from sklearn.preprocessing import OneHotEncoder
from sklearn.ensemble._hist_gradient_boosting.grower import TreeGrower
from sklearn.ensemble._hist_gradient_boosting.binning import _BinMapper
from sklearn.ensemble._hist_gradient_boosting.common import X_BINNED_DTYPE
from sklearn.ensemble._hist_gradient_boosting.common import X_DTYPE
from sklearn.ensemble._hist_gradient_boosting.common import Y_DTYPE
from sklearn.ensemble._hist_gradient_boosting.common import G_H_DTYPE
from sklearn.ensemble._hist_gradient_boosting.common import X_BITSET_INNER_DTYPE
def _make_training_data(n_bins=256, constant_hessian=True):
rng = np.random.RandomState(42)
n_samples = 10000
# Generate some test data directly binned so as to test the grower code
# independently of the binning logic.
X_binned = rng.randint(0, n_bins - 1, size=(n_samples, 2), dtype=X_BINNED_DTYPE)
X_binned = np.asfortranarray(X_binned)
def true_decision_function(input_features):
"""Ground truth decision function
This is a very simple yet asymmetric decision tree. Therefore the
grower code should have no trouble recovering the decision function
from 10000 training samples.
"""
if input_features[0] <= n_bins // 2:
return -1
else:
return -1 if input_features[1] <= n_bins // 3 else 1
target = np.array([true_decision_function(x) for x in X_binned], dtype=Y_DTYPE)
# Assume a square loss applied to an initial model that always predicts 0
# (hardcoded for this test):
all_gradients = target.astype(G_H_DTYPE)
shape_hessians = 1 if constant_hessian else all_gradients.shape
all_hessians = np.ones(shape=shape_hessians, dtype=G_H_DTYPE)
return X_binned, all_gradients, all_hessians
def _check_children_consistency(parent, left, right):
# Make sure the samples are correctly dispatched from a parent to its
# children
assert parent.left_child is left
assert parent.right_child is right
# each sample from the parent is propagated to one of the two children
assert len(left.sample_indices) + len(right.sample_indices) == len(
parent.sample_indices
)
assert set(left.sample_indices).union(set(right.sample_indices)) == set(
parent.sample_indices
)
# samples are sent either to the left or the right node, never to both
assert set(left.sample_indices).intersection(set(right.sample_indices)) == set()
@pytest.mark.parametrize(
"n_bins, constant_hessian, stopping_param, shrinkage",
[
(11, True, "min_gain_to_split", 0.5),
(11, False, "min_gain_to_split", 1.0),
(11, True, "max_leaf_nodes", 1.0),
(11, False, "max_leaf_nodes", 0.1),
(42, True, "max_leaf_nodes", 0.01),
(42, False, "max_leaf_nodes", 1.0),
(256, True, "min_gain_to_split", 1.0),
(256, True, "max_leaf_nodes", 0.1),
],
)
def test_grow_tree(n_bins, constant_hessian, stopping_param, shrinkage):
X_binned, all_gradients, all_hessians = _make_training_data(
n_bins=n_bins, constant_hessian=constant_hessian
)
n_samples = X_binned.shape[0]
if stopping_param == "max_leaf_nodes":
stopping_param = {"max_leaf_nodes": 3}
else:
stopping_param = {"min_gain_to_split": 0.01}
grower = TreeGrower(
X_binned,
all_gradients,
all_hessians,
n_bins=n_bins,
shrinkage=shrinkage,
min_samples_leaf=1,
**stopping_param,
)
# The root node is not yet splitted, but the best possible split has
# already been evaluated:
assert grower.root.left_child is None
assert grower.root.right_child is None
root_split = grower.root.split_info
assert root_split.feature_idx == 0
assert root_split.bin_idx == n_bins // 2
assert len(grower.splittable_nodes) == 1
# Calling split next applies the next split and computes the best split
# for each of the two newly introduced children nodes.
left_node, right_node = grower.split_next()
# All training samples have ben splitted in the two nodes, approximately
# 50%/50%
_check_children_consistency(grower.root, left_node, right_node)
assert len(left_node.sample_indices) > 0.4 * n_samples
assert len(left_node.sample_indices) < 0.6 * n_samples
if grower.min_gain_to_split > 0:
# The left node is too pure: there is no gain to split it further.
assert left_node.split_info.gain < grower.min_gain_to_split
assert left_node in grower.finalized_leaves
# The right node can still be splitted further, this time on feature #1
split_info = right_node.split_info
assert split_info.gain > 1.0
assert split_info.feature_idx == 1
assert split_info.bin_idx == n_bins // 3
assert right_node.left_child is None
assert right_node.right_child is None
# The right split has not been applied yet. Let's do it now:
assert len(grower.splittable_nodes) == 1
right_left_node, right_right_node = grower.split_next()
_check_children_consistency(right_node, right_left_node, right_right_node)
assert len(right_left_node.sample_indices) > 0.1 * n_samples
assert len(right_left_node.sample_indices) < 0.2 * n_samples
assert len(right_right_node.sample_indices) > 0.2 * n_samples
assert len(right_right_node.sample_indices) < 0.4 * n_samples
# All the leafs are pure, it is not possible to split any further:
assert not grower.splittable_nodes
grower._apply_shrinkage()
# Check the values of the leaves:
assert grower.root.left_child.value == approx(shrinkage)
assert grower.root.right_child.left_child.value == approx(shrinkage)
assert grower.root.right_child.right_child.value == approx(-shrinkage, rel=1e-3)
def test_predictor_from_grower():
# Build a tree on the toy 3-leaf dataset to extract the predictor.
n_bins = 256
X_binned, all_gradients, all_hessians = _make_training_data(n_bins=n_bins)
grower = TreeGrower(
X_binned,
all_gradients,
all_hessians,
n_bins=n_bins,
shrinkage=1.0,
max_leaf_nodes=3,
min_samples_leaf=5,
)
grower.grow()
assert grower.n_nodes == 5 # (2 decision nodes + 3 leaves)
# Check that the node structure can be converted into a predictor
# object to perform predictions at scale
# We pass undefined binning_thresholds because we won't use predict anyway
predictor = grower.make_predictor(
binning_thresholds=np.zeros((X_binned.shape[1], n_bins))
)
assert predictor.nodes.shape[0] == 5
assert predictor.nodes["is_leaf"].sum() == 3
# Probe some predictions for each leaf of the tree
# each group of 3 samples corresponds to a condition in _make_training_data
input_data = np.array(
[
[0, 0],
[42, 99],
[128, 254],
[129, 0],
[129, 85],
[254, 85],
[129, 86],
[129, 254],
[242, 100],
],
dtype=np.uint8,
)
missing_values_bin_idx = n_bins - 1
predictions = predictor.predict_binned(input_data, missing_values_bin_idx)
expected_targets = [1, 1, 1, 1, 1, 1, -1, -1, -1]
assert np.allclose(predictions, expected_targets)
# Check that training set can be recovered exactly:
predictions = predictor.predict_binned(X_binned, missing_values_bin_idx)
assert np.allclose(predictions, -all_gradients)
@pytest.mark.parametrize(
"n_samples, min_samples_leaf, n_bins, constant_hessian, noise",
[
(11, 10, 7, True, 0),
(13, 10, 42, False, 0),
(56, 10, 255, True, 0.1),
(101, 3, 7, True, 0),
(200, 42, 42, False, 0),
(300, 55, 255, True, 0.1),
(300, 301, 255, True, 0.1),
],
)
def test_min_samples_leaf(n_samples, min_samples_leaf, n_bins, constant_hessian, noise):
rng = np.random.RandomState(seed=0)
# data = linear target, 3 features, 1 irrelevant.
X = rng.normal(size=(n_samples, 3))
y = X[:, 0] - X[:, 1]
if noise:
y_scale = y.std()
y += rng.normal(scale=noise, size=n_samples) * y_scale
mapper = _BinMapper(n_bins=n_bins)
X = mapper.fit_transform(X)
all_gradients = y.astype(G_H_DTYPE)
shape_hessian = 1 if constant_hessian else all_gradients.shape
all_hessians = np.ones(shape=shape_hessian, dtype=G_H_DTYPE)
grower = TreeGrower(
X,
all_gradients,
all_hessians,
n_bins=n_bins,
shrinkage=1.0,
min_samples_leaf=min_samples_leaf,
max_leaf_nodes=n_samples,
)
grower.grow()
predictor = grower.make_predictor(binning_thresholds=mapper.bin_thresholds_)
if n_samples >= min_samples_leaf:
for node in predictor.nodes:
if node["is_leaf"]:
assert node["count"] >= min_samples_leaf
else:
assert predictor.nodes.shape[0] == 1
assert predictor.nodes[0]["is_leaf"]
assert predictor.nodes[0]["count"] == n_samples
@pytest.mark.parametrize("n_samples, min_samples_leaf", [(99, 50), (100, 50)])
def test_min_samples_leaf_root(n_samples, min_samples_leaf):
# Make sure root node isn't split if n_samples is not at least twice
# min_samples_leaf
rng = np.random.RandomState(seed=0)
n_bins = 256
# data = linear target, 3 features, 1 irrelevant.
X = rng.normal(size=(n_samples, 3))
y = X[:, 0] - X[:, 1]
mapper = _BinMapper(n_bins=n_bins)
X = mapper.fit_transform(X)
all_gradients = y.astype(G_H_DTYPE)
all_hessians = np.ones(shape=1, dtype=G_H_DTYPE)
grower = TreeGrower(
X,
all_gradients,
all_hessians,
n_bins=n_bins,
shrinkage=1.0,
min_samples_leaf=min_samples_leaf,
max_leaf_nodes=n_samples,
)
grower.grow()
if n_samples >= min_samples_leaf * 2:
assert len(grower.finalized_leaves) >= 2
else:
assert len(grower.finalized_leaves) == 1
def assert_is_stump(grower):
# To assert that stumps are created when max_depth=1
for leaf in (grower.root.left_child, grower.root.right_child):
assert leaf.left_child is None
assert leaf.right_child is None
@pytest.mark.parametrize("max_depth", [1, 2, 3])
def test_max_depth(max_depth):
# Make sure max_depth parameter works as expected
rng = np.random.RandomState(seed=0)
n_bins = 256
n_samples = 1000
# data = linear target, 3 features, 1 irrelevant.
X = rng.normal(size=(n_samples, 3))
y = X[:, 0] - X[:, 1]
mapper = _BinMapper(n_bins=n_bins)
X = mapper.fit_transform(X)
all_gradients = y.astype(G_H_DTYPE)
all_hessians = np.ones(shape=1, dtype=G_H_DTYPE)
grower = TreeGrower(X, all_gradients, all_hessians, max_depth=max_depth)
grower.grow()
depth = max(leaf.depth for leaf in grower.finalized_leaves)
assert depth == max_depth
if max_depth == 1:
assert_is_stump(grower)
def test_input_validation():
X_binned, all_gradients, all_hessians = _make_training_data()
X_binned_float = X_binned.astype(np.float32)
with pytest.raises(NotImplementedError, match="X_binned must be of type uint8"):
TreeGrower(X_binned_float, all_gradients, all_hessians)
X_binned_C_array = np.ascontiguousarray(X_binned)
with pytest.raises(
ValueError, match="X_binned should be passed as Fortran contiguous array"
):
TreeGrower(X_binned_C_array, all_gradients, all_hessians)
def test_init_parameters_validation():
X_binned, all_gradients, all_hessians = _make_training_data()
with pytest.raises(ValueError, match="min_gain_to_split=-1 must be positive"):
TreeGrower(X_binned, all_gradients, all_hessians, min_gain_to_split=-1)
with pytest.raises(ValueError, match="min_hessian_to_split=-1 must be positive"):
TreeGrower(X_binned, all_gradients, all_hessians, min_hessian_to_split=-1)
def test_missing_value_predict_only():
# Make sure that missing values are supported at predict time even if they
# were not encountered in the training data: the missing values are
# assigned to whichever child has the most samples.
rng = np.random.RandomState(0)
n_samples = 100
X_binned = rng.randint(0, 256, size=(n_samples, 1), dtype=np.uint8)
X_binned = np.asfortranarray(X_binned)
gradients = rng.normal(size=n_samples).astype(G_H_DTYPE)
hessians = np.ones(shape=1, dtype=G_H_DTYPE)
grower = TreeGrower(
X_binned, gradients, hessians, min_samples_leaf=5, has_missing_values=False
)
grower.grow()
# We pass undefined binning_thresholds because we won't use predict anyway
predictor = grower.make_predictor(
binning_thresholds=np.zeros((X_binned.shape[1], X_binned.max() + 1))
)
# go from root to a leaf, always following node with the most samples.
# That's the path nans are supposed to take
node = predictor.nodes[0]
while not node["is_leaf"]:
left = predictor.nodes[node["left"]]
right = predictor.nodes[node["right"]]
node = left if left["count"] > right["count"] else right
prediction_main_path = node["value"]
# now build X_test with only nans, and make sure all predictions are equal
# to prediction_main_path
all_nans = np.full(shape=(n_samples, 1), fill_value=np.nan)
known_cat_bitsets = np.zeros((0, 8), dtype=X_BITSET_INNER_DTYPE)
f_idx_map = np.zeros(0, dtype=np.uint32)
y_pred = predictor.predict(all_nans, known_cat_bitsets, f_idx_map)
assert np.all(y_pred == prediction_main_path)
def test_split_on_nan_with_infinite_values():
# Make sure the split on nan situations are respected even when there are
# samples with +inf values (we set the threshold to +inf when we have a
# split on nan so this test makes sure this does not introduce edge-case
# bugs). We need to use the private API so that we can also test
# predict_binned().
X = np.array([0, 1, np.inf, np.nan, np.nan]).reshape(-1, 1)
# the gradient values will force a split on nan situation
gradients = np.array([0, 0, 0, 100, 100], dtype=G_H_DTYPE)
hessians = np.ones(shape=1, dtype=G_H_DTYPE)
bin_mapper = _BinMapper()
X_binned = bin_mapper.fit_transform(X)
n_bins_non_missing = 3
has_missing_values = True
grower = TreeGrower(
X_binned,
gradients,
hessians,
n_bins_non_missing=n_bins_non_missing,
has_missing_values=has_missing_values,
min_samples_leaf=1,
)
grower.grow()
predictor = grower.make_predictor(binning_thresholds=bin_mapper.bin_thresholds_)
# sanity check: this was a split on nan
assert predictor.nodes[0]["num_threshold"] == np.inf
assert predictor.nodes[0]["bin_threshold"] == n_bins_non_missing - 1
known_cat_bitsets, f_idx_map = bin_mapper.make_known_categories_bitsets()
# Make sure in particular that the +inf sample is mapped to the left child
# Note that lightgbm "fails" here and will assign the inf sample to the
# right child, even though it's a "split on nan" situation.
predictions = predictor.predict(X, known_cat_bitsets, f_idx_map)
predictions_binned = predictor.predict_binned(
X_binned, missing_values_bin_idx=bin_mapper.missing_values_bin_idx_
)
np.testing.assert_allclose(predictions, -gradients)
np.testing.assert_allclose(predictions_binned, -gradients)
def test_grow_tree_categories():
# Check that the grower produces the right predictor tree when a split is
# categorical
X_binned = np.array([[0, 1] * 11 + [1]], dtype=X_BINNED_DTYPE).T
X_binned = np.asfortranarray(X_binned)
all_gradients = np.array([10, 1] * 11 + [1], dtype=G_H_DTYPE)
all_hessians =
|
np.ones(1, dtype=G_H_DTYPE)
|
numpy.ones
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 31 14:33:56 2017
@author: <NAME>
"""
import numpy as np
from copy import deepcopy
from scipy.spatial.distance import pdist,squareform
from ..QuantumChem.interaction import charge_charge
from ..General.Potential import potential_charge, potential_dipole
from ..QuantumChem.calc import GuessBonds
debug=False
#==============================================================================
# Definition of class for polarizable environment
#==============================================================================
class PolarAtom:
''' Class managing dielectric properties of individual atoms
Parameters
----------
pol : real
Polarizability in direction of the chemical bond
amp : real
Diference polarization between main directions. Should be smaller then
pol
per : integer
Periodicity of the system (for Fluorographene it should be 3 or 6)
phase : real
Angle of the bond from the x-axis
polz : real
Polarizability in direction perpendicular to the fluorographene plane
'''
def __init__(self,polxy,amp,per,polz,phase=0.0):
if abs(polxy)>abs(amp):
self.polxy = abs(polxy)
self.amp = amp
else:
self.polxy = abs(amp)
self.amp = polxy
self.per = per
self.phase = phase
self.polz = abs(polz)
def _polarizability4angle(self,angle):
phi = angle - self.phase
n = self.per
polar = self.polxy+self.amp*(np.cos(n*phi)-1)/2
return np.array([polar,polar,self.polz],dtype='f8')
def get_polarizability4elf(self,E):
Phi=np.arctan2(E[1],E[0])
# calculate polarizability for the angle
polar = self._polarizability4angle(Phi)
return polar
def get_induced_dipole(self,E):
polar = self.get_polarizability4elf(E)
dipole = polar*E
return dipole
class Dielectric:
''' Class managing dielectric properties of the material
Parameters
----------
coor : numpy.array of real (dimension Nx3) where N is number of atoms
origin of density grid
pol_type : list of strings
POlarization atomic types. So far supported types are: ``CF`` for the
fluorographene carbon, ``FC`` for the fluorographne fluorine and ``C``
for the defect carbon.
charge : numpy.array or list of real (dimension N)
charges on individual atoms (initial charges)
dipole : numpy.array of real (dimension Nx3)
dipole on individual atoms (initial dipole)
polar_param : dictionary
Polarization parameters for every polarization atom type.
'''
def __init__(self,coor,pol_type,charge,dipole,polar_param):
self.coor=
|
np.copy(coor)
|
numpy.copy
|
import os,tables,numpy as np,matplotlib.pyplot as plt,pandas as pd,h5py,json
from scipy.io import loadmat
from collections import Counter
from dataholder import Data
class DataMerge():
def __init__(self,split = 0,path=''):
self.split = split
self.x_train = self.y_train = self.y_domain = self.train_parts = None
self.x_val = self.y_val = self.y_valdom = self.val_parts = None
self.val_wav_name = None
self.seg = ('4_segments' in path)
def merge(self,data,train_test):
if(train_test):
if(self.x_val is None):self.x_val = data.trainX
elif(data.seg):self.x_val = {k:np.concatenate((self.x_val[k],data.trainX[k]),axis = -1) for k in data.segments}
else:self.x_val = np.concatenate((self.x_val,data.trainX),axis = -1)
if(self.y_val is None):self.y_val = data.trainY
else:self.y_val = np.concatenate((self.y_val,data.trainY),axis = 0)
if(self.y_valdom is None):self.y_valdom = data.domainY
else:self.y_valdom = self.y_valdom+data.domainY
if(self.val_parts is None):
if(data.train_parts is not None):self.val_parts = data.train_parts
else:
if(data.train_parts is not None):
self.val_parts = np.concatenate((self.val_parts,data.train_parts),axis = 0)
else:
print("Data train parts unavailable")
if(self.val_wav_name is None):
self.val_wav_name = data.wav_name
else:
self.val_wav_name = self.val_wav_name+data.wav_name
if(self.split>0):
if(self.x_train is None):self.x_train = data.valX
elif(data.seg):self.x_train = {k:np.concatenate((self.x_train[k],data.valX[k]),axis = 1) for k in data.segments}
else:self.x_train = np.concatenate((self.x_train,data.valX),axis = 1)
if(self.y_train is None):self.y_train = data.valY
else:self.y_train = np.concatenate((self.y_train,data.valY),axis = 0)
if(self.y_domain is None):self.y_domain = data.valdomY
else:self.y_domain = self.y_domain+data.valdomY
if(self.train_parts is None):self.train_parts = data.val_parts;
else:
if(data.val_parts is not None):
self.train_parts = np.concatenate((self.train_parts,data.val_parts),axis = 0)
else:
print("Data train parts unavailable")
else:
if(self.x_train is None):self.x_train = data.trainX;
elif(self.seg):self.x_train = {k:np.concatenate((self.x_train[k],data.trainX[k]),axis = 1) for k in self.segments}
else:self.x_train = np.concatenate((self.x_train,data.trainX),axis = 1)
if(self.y_train is None):self.y_train = data.trainY;
else:self.y_train = np.concatenate((self.y_train,data.trainY),axis = 0)
if(self.y_domain is None):self.y_domain = data.domainY;
else:self.y_domain = self.y_domain+data.domainY
if(self.train_parts is None):
self.train_parts = data.train_parts;
else:
if(data.train_parts is not None):
self.train_parts = np.concatenate((self.train_parts,data.train_parts),axis = 0)
else:
print("Data train parts nai Train mergee ")
def showDistribution(self, dom = None):
if((dom == "k") | (dom == "l") | (dom == "m") | (dom == "n")):
self.train_normal = Counter(self.y_train[:, 0])[0]
self.train_abnormal = Counter(self.y_train[:, 0])[1]
self.val_normal = Counter(self.y_val[:, 0])[0]
self.val_abnormal = Counter(self.y_val[:, 0])[1]
else:
self.train_normal = Counter(self.y_train)[0]
self.train_abnormal = Counter(self.y_train)[1]
self.val_normal = Counter(self.y_val)[0]
self.val_abnormal = Counter(self.y_val)[1]
self.train_total = self.train_normal+self.train_abnormal
self.val_total = self.val_normal+self.val_abnormal
print("Train normal - ", self.train_normal,"-",self.train_abnormal," Abnormal")
print(" ", int(100*self.train_normal/self.train_total) , " - ", int(100*self.train_abnormal/self.train_total), "%")
print("Test normal - ", self.val_normal,"-",self.val_abnormal," Abnormal")
print(" ",int(100*self.val_normal/self.val_total) , " - ", int(100*self.val_abnormal/self.val_total), "%")
def getData(fold_dir, train_folds, test_folds, split = 0, shuffle = None):
try:
with open('../data/domain_filename.json', 'r') as fp:
foldname = json.load(fp)
except:
raise FileNotFoundError("The json file in Data folder of the repository, that maps domain character to filename is not here")
allData = DataMerge(split,fold_dir)
for c in test_folds:
allData.merge(Data(fold_dir,foldname[c],c,severe = False,split=split,shuffle=shuffle),True)
for c in train_folds:
allData.merge(Data(fold_dir,foldname[c],c,shuffle=shuffle),False)
allData.showDistribution(test_folds)
return allData.x_train, allData.y_train, allData.y_domain, allData.train_parts,allData.x_val,allData.y_val,allData.y_valdom,allData.val_parts,allData.val_wav_name
def reshape_folds(x, y):
x_train = []
for x1 in x:
if(isinstance(x1,dict)):
xD = {}
for k in x1.keys():
xd = np.transpose(x1[k][:, :])
xd = np.reshape(xd, [xd.shape[0], xd.shape[1], 1])
xD[k] = xd
x_train.append(xD)
else:
x1 =
|
np.transpose(x1[:, :])
|
numpy.transpose
|
from torch.nn import Parameter
from torch.nn import functional as F
import torch
import numpy as np
from neuralpredictors.layers.readouts import FullGaussian2d, MultiReadout
from neuralpredictors.utils import get_module_output
class ZIGReadout(FullGaussian2d):
def __init__(self, in_shape, outdims, bias, inferred_params_n=1, **kwargs):
self.inferred_params_n = inferred_params_n
super().__init__(in_shape, outdims, bias, **kwargs)
if bias:
bias = Parameter(torch.Tensor(inferred_params_n, outdims))
self.register_parameter("bias", bias)
else:
self.register_parameter("bias", None)
def initialize_features(self, match_ids=None, shared_features=None):
"""
The internal attribute `_original_features` in this function denotes whether this instance of the FullGuassian2d
learns the original features (True) or if it uses a copy of the features from another instance of FullGaussian2d
via the `shared_features` (False). If it uses a copy, the feature_l1 regularizer for this copy will return 0
"""
c, w, h = self.in_shape
self._original_features = True
if match_ids is not None:
assert self.outdims == len(match_ids)
n_match_ids = len(np.unique(match_ids))
if shared_features is not None:
assert shared_features.shape == (
self.inferred_params_n,
1,
c,
1,
n_match_ids,
), f"shared features need to have shape ({self.inferred_params_n}, 1, {c}, 1, {n_match_ids})"
self._features = shared_features
self._original_features = False
else:
self._features = Parameter(
torch.Tensor(self.inferred_params_n, 1, c, 1, n_match_ids)
) # feature weights for each channel of the core
self.scales = Parameter(
torch.Tensor(self.inferred_params_n, 1, 1, 1, self.outdims)
) # feature weights for each channel of the core
_, sharing_idx = np.unique(match_ids, return_inverse=True)
self.register_buffer("feature_sharing_index", torch.from_numpy(sharing_idx))
self._shared_features = True
else:
self._features = Parameter(
torch.Tensor(self.inferred_params_n, 1, c, 1, self.outdims)
) # feature weights for each channel of the core
self._shared_features = False
def forward(self, x, sample=None, shift=None, out_idx=None):
"""
Propagates the input forwards through the readout
Args:
x: input data
sample (bool/None): sample determines whether we draw a sample from Gaussian distribution, N(mu,sigma), defined per neuron
or use the mean, mu, of the Gaussian distribution without sampling.
if sample is None (default), samples from the N(mu,sigma) during training phase and
fixes to the mean, mu, during evaluation phase.
if sample is True/False, overrides the model_state (i.e training or eval) and does as instructed
shift (bool): shifts the location of the grid (from eye-tracking data)
out_idx (bool): index of neurons to be predicted
Returns:
y: neuronal activity
"""
N, c, w, h = x.size()
c_in, w_in, h_in = self.in_shape
if (c_in, w_in, h_in) != (c, w, h):
raise ValueError(
"the specified feature map dimension is not the readout's expected input dimension"
)
feat = self.features.view(self.inferred_params_n, 1, c, self.outdims)
bias = self.bias
outdims = self.outdims
if self.batch_sample:
# sample the grid_locations separately per image per batch
grid = self.sample_grid(
batch_size=N, sample=sample
) # sample determines sampling from Gaussian
else:
# use one sampled grid_locations for all images in the batch
grid = self.sample_grid(batch_size=1, sample=sample).expand(
N, outdims, 1, 2
)
if out_idx is not None:
if isinstance(out_idx, np.ndarray):
if out_idx.dtype == bool:
out_idx =
|
np.where(out_idx)
|
numpy.where
|
#!/usr/bin/env python3
"""
New colormap classes and colormap normalization classes.
"""
# NOTE: Avoid colormap/color name conflicts by checking
# set(plot.colors._cmap_database) & set(plot.colors.mcolors._colors_full_map)
# whenever new default colormaps are added. Currently result is
# {'gray', 'marine', 'ocean', 'pink'} which correspond to MATLAB and GNUplot maps.
import json
import os
import re
from numbers import Integral, Number
from xml.etree import ElementTree
import matplotlib.cm as mcm
import matplotlib.colors as mcolors
import numpy as np
import numpy.ma as ma
from matplotlib import rcParams
from .internals import ic # noqa: F401
from .internals import _not_none, docstring, warnings
from .utils import to_rgb, to_rgba, to_xyz, to_xyza
if hasattr(mcm, '_cmap_registry'):
_cmap_database_attr = '_cmap_registry'
else:
_cmap_database_attr = 'cmap_d'
_cmap_database = getattr(mcm, _cmap_database_attr)
__all__ = [
'make_mapping_array',
'ListedColormap',
'LinearSegmentedColormap',
'PerceptuallyUniformColormap',
'DiscreteNorm',
'DivergingNorm',
'LinearSegmentedNorm',
'ColorDatabase',
'ColormapDatabase',
'BinNorm', 'MidpointNorm', 'ColorDict', 'CmapDict', # deprecated
]
HEX_PATTERN = r'#(?:[0-9a-fA-F]{3,4}){2}' # 6-8 digit hex
CMAPS_DIVERGING = tuple(
(key1.lower(), key2.lower()) for key1, key2 in (
('PiYG', 'GYPi'),
('PRGn', 'GnRP'),
('BrBG', 'GBBr'),
('PuOr', 'OrPu'),
('RdGy', 'GyRd'),
('RdBu', 'BuRd'),
('RdYlBu', 'BuYlRd'),
('RdYlGn', 'GnYlRd'),
('BR', 'RB'),
('CoolWarm', 'WarmCool'),
('ColdHot', 'HotCold'),
('NegPos', 'PosNeg'),
('DryWet', 'WetDry')
)
)
docstring.snippets['cmap.init'] = """
name : str
The colormap name.
segmentdata : dict-like
Mapping containing the keys ``'hue'``, ``'saturation'``, and
``'luminance'``. The key values can be callable functions that
return channel values given a colormap index, or 3-column
arrays indicating the coordinates and channel transitions.
See `~matplotlib.colors.LinearSegmentedColormap` for a more
detailed explanation.
N : int, optional
Number of points in the colormap lookup table.
Default is :rc:`image.lut`.
alpha : float, optional
The opacity for the entire colormap. Overrides the input
segment data.
cyclic : bool, optional
Whether the colormap is cyclic. If ``True``, this changes how the
leftmost and rightmost color levels are selected, and `extend` can
only be ``'neither'`` (a warning will be issued otherwise).
"""
docstring.snippets['cmap.gamma'] = """
gamma : float, optional
Sets `gamma1` and `gamma2` to this identical value.
gamma1 : float, optional
If >1, makes low saturation colors more prominent. If <1,
makes high saturation colors more prominent. Similar to the
`HCLWizard <http://hclwizard.org:64230/hclwizard/>`_ option.
See `make_mapping_array` for details.
gamma2 : float, optional
If >1, makes high luminance colors more prominent. If <1,
makes low luminance colors more prominent. Similar to the
`HCLWizard <http://hclwizard.org:64230/hclwizard/>`_ option.
See `make_mapping_array` for details.
"""
def _get_channel(color, channel, space='hcl'):
"""
Get the hue, saturation, or luminance channel value from the input color.
The color name `color` can optionally be a string with the format
``'color+x'`` or ``'color-x'``, where `x` specifies the offset from the
channel value.
Parameters
----------
color : color-spec
The color. Sanitized with `to_rgba`.
channel : {'hue', 'chroma', 'saturation', 'luminance'}
The HCL channel to be retrieved.
space : {'hcl', 'hpl', 'hsl', 'hsv', 'rgb'}, optional
The colorspace for the corresponding channel value.
Returns
-------
value : float
The channel value.
"""
# Interpret channel
if callable(color) or isinstance(color, Number):
return color
if channel == 'hue':
channel = 0
elif channel in ('chroma', 'saturation'):
channel = 1
elif channel == 'luminance':
channel = 2
else:
raise ValueError(f'Unknown channel {channel!r}.')
# Interpret string or RGB tuple
offset = 0
if isinstance(color, str):
match = re.search('([-+][0-9.]+)$', color)
if match:
offset = float(match.group(0))
color = color[:match.start()]
return offset + to_xyz(color, space)[channel]
def _clip_colors(colors, clip=True, gray=0.2, warn=False):
"""
Clip impossible colors rendered in an HSL-to-RGB colorspace conversion.
Used by `PerceptuallyUniformColormap`. If `mask` is ``True``, impossible
colors are masked out.
Parameters
----------
colors : list of length-3 tuples
The RGB colors.
clip : bool, optional
If `clip` is ``True`` (the default), RGB channel values >1 are clipped
to 1. Otherwise, the color is masked out as gray.
gray : float, optional
The identical RGB channel values (gray color) to be used if `mask`
is ``True``.
warn : bool, optional
Whether to issue warning when colors are clipped.
"""
colors = np.array(colors)
over = colors > 1
under = colors < 0
if clip:
colors[under] = 0
colors[over] = 1
else:
colors[under | over] = gray
if warn:
msg = 'Clipped' if clip else 'Invalid'
for i, name in enumerate('rgb'):
if under[:, i].any():
warnings._warn_proplot(f'{msg} {name!r} channel ( < 0).')
if over[:, i].any():
warnings._warn_proplot(f'{msg} {name!r} channel ( > 1).')
return colors
def _make_segmentdata_array(values, coords=None, ratios=None):
"""
Return a segmentdata array or callable given the input colors
and coordinates.
Parameters
----------
values : list of float
The channel values.
coords : list of float, optional
The segment coordinates.
ratios : list of float, optional
The relative length of each segment transition.
"""
# Allow callables
if callable(values):
return values
values = np.atleast_1d(values)
if len(values) == 1:
value = values[0]
return [(0, value, value), (1, value, value)]
# Get coordinates
if not np.iterable(values):
raise TypeError('Colors must be iterable, got {values!r}.')
if coords is not None:
coords = np.atleast_1d(coords)
if ratios is not None:
warnings._warn_proplot(
f'Segment coordinates were provided, ignoring '
f'ratios={ratios!r}.'
)
if len(coords) != len(values) or coords[0] != 0 or coords[-1] != 1:
raise ValueError(
f'Coordinates must range from 0 to 1, got {coords!r}.'
)
elif ratios is not None:
coords = np.atleast_1d(ratios)
if len(coords) != len(values) - 1:
raise ValueError(
f'Need {len(values)-1} ratios for {len(values)} colors, '
f'but got {len(ratios)} ratios.'
)
coords = np.concatenate(([0], np.cumsum(coords)))
coords = coords / np.max(coords) # normalize to 0-1
else:
coords = np.linspace(0, 1, len(values))
# Build segmentdata array
array = []
for c, value in zip(coords, values):
array.append((c, value, value))
return array
def make_mapping_array(N, data, gamma=1.0, inverse=False):
r"""
Similar to `~matplotlib.colors.makeMappingArray` but permits
*circular* hue gradations along 0-360, disables clipping of
out-of-bounds channel values, and uses fancier "gamma" scaling.
Parameters
----------
N : int
Number of points in the colormap lookup table.
data : 2D array-like
List of :math:`(x, y_0, y_1)` tuples specifying the channel jump (from
:math:`y_0` to :math:`y_1`) and the :math:`x` coordinate of that
transition (ranges between 0 and 1).
See `~matplotlib.colors.LinearSegmentedColormap` for details.
gamma : float or list of float, optional
To obtain channel values between coordinates :math:`x_i` and
:math:`x_{i+1}` in rows :math:`i` and :math:`i+1` of `data`,
we use the formula:
.. math::
y = y_{1,i} + w_i^{\gamma_i}*(y_{0,i+1} - y_{1,i})
where :math:`\gamma_i` corresponds to `gamma` and the weight
:math:`w_i` ranges from 0 to 1 between rows ``i`` and ``i+1``.
If `gamma` is float, it applies to every transition. Otherwise,
its length must equal ``data.shape[0]-1``.
This is like the `gamma` used with matplotlib's
`~matplotlib.colors.makeMappingArray`, except it controls the
weighting for transitions *between* each segment data coordinate rather
than the coordinates themselves. This makes more sense for
`PerceptuallyUniformColormap`\ s because they usually consist of just
one linear transition for *sequential* colormaps and two linear
transitions for *diverging* colormaps -- and in the latter case, it
is often desirable to modify both "halves" of the colormap in the
same way.
inverse : bool, optional
If ``True``, :math:`w_i^{\gamma_i}` is replaced with
:math:`1 - (1 - w_i)^{\gamma_i}` -- that is, when `gamma` is greater
than 1, this weights colors toward *higher* channel values instead
of lower channel values.
This is implemented in case we want to apply *equal* "gamma scaling"
to different HSL channels in different directions. Usually, this
is done to weight low data values with higher luminance *and* lower
saturation, thereby emphasizing "extreme" data values with stronger
colors.
"""
# Allow for *callable* instead of linearly interpolating between segments
gammas = np.atleast_1d(gamma)
if (gammas < 0.01).any() or (gammas > 10).any():
raise ValueError('Gamma can only be in range [0.01,10].')
if callable(data):
if len(gammas) > 1:
raise ValueError(
'Only one gamma allowed for functional segmentdata.')
x = np.linspace(0, 1, N)**gamma
lut = np.array(data(x), dtype=float)
return lut
# Get array
data = np.array(data)
shape = data.shape
if len(shape) != 2 or shape[1] != 3:
raise ValueError('Data must be nx3 format.')
if len(gammas) != 1 and len(gammas) != shape[0] - 1:
raise ValueError(
f'Need {shape[0]-1} gammas for {shape[0]}-level mapping array, '
f'but got {len(gamma)}.'
)
if len(gammas) == 1:
gammas = np.repeat(gammas, shape[:1])
# Get indices
x = data[:, 0]
y0 = data[:, 1]
y1 = data[:, 2]
if x[0] != 0.0 or x[-1] != 1.0:
raise ValueError(
'Data mapping points must start with x=0 and end with x=1.'
)
if (np.diff(x) < 0).any():
raise ValueError(
'Data mapping points must have x in increasing order.'
)
x = x * (N - 1)
# Get distances from the segmentdata entry to the *left* for each requested
# level, excluding ends at (0,1), which must exactly match segmentdata ends
xq = (N - 1) * np.linspace(0, 1, N)
# where xq[i] must be inserted so it is larger than x[ind[i]-1] but
# smaller than x[ind[i]]
ind = np.searchsorted(x, xq)[1:-1]
distance = (xq[1:-1] - x[ind - 1]) / (x[ind] - x[ind - 1])
# Scale distances in each segment by input gamma
# The ui are starting-points, the ci are counts from that point
# over which segment applies (i.e. where to apply the gamma), the relevant
# 'segment' is to the *left* of index returned by searchsorted
_, uind, cind = np.unique(ind, return_index=True, return_counts=True)
for ui, ci in zip(uind, cind): # length should be N-1
# the relevant segment is to *left* of this number
gamma = gammas[ind[ui] - 1]
if gamma == 1:
continue
ireverse = False
if ci > 1: # i.e. more than 1 color in this 'segment'
# by default want to weight toward a *lower* channel value
ireverse = ((y0[ind[ui]] - y1[ind[ui] - 1]) < 0)
if inverse:
ireverse = not ireverse
if ireverse:
distance[ui:ui + ci] = 1 - (1 - distance[ui:ui + ci])**gamma
else:
distance[ui:ui + ci] **= gamma
# Perform successive linear interpolations all rolled up into one equation
lut = np.zeros((N,), float)
lut[1:-1] = distance * (y0[ind] - y1[ind - 1]) + y1[ind - 1]
lut[0] = y1[0]
lut[-1] = y0[-1]
return lut
class _Colormap(object):
"""
Mixin class used to add some helper methods.
"""
def _get_data(self, ext, alpha=True):
"""
Return a string containing the colormap colors for saving.
Parameters
----------
ext : {'hex', 'txt', 'rgb'}
The filename extension.
alpha : bool, optional
Whether to include an opacity column.
"""
# Get lookup table colors and filter out bad ones
if not self._isinit:
self._init()
colors = self._lut[:-3, :]
# Get data string
if ext == 'hex':
data = ', '.join(mcolors.to_hex(color) for color in colors)
elif ext in ('txt', 'rgb'):
rgb = mcolors.to_rgba if alpha else mcolors.to_rgb
data = [rgb(color) for color in colors]
data = '\n'.join(
' '.join(f'{num:0.6f}' for num in line) for line in data
)
else:
raise ValueError(
f'Invalid extension {ext!r}. Options are: '
"'hex', 'txt', 'rgb', 'rgba'."
)
return data
def _parse_path(self, path, dirname='.', ext=''):
"""
Parse the user input path.
Parameters
----------
dirname : str, optional
The default directory.
ext : str, optional
The default extension.
"""
path = os.path.expanduser(path or '')
dirname = os.path.expanduser(dirname or '')
if not path or os.path.isdir(path):
path = os.path.join(path or dirname, self.name) # default name
dirname, basename = os.path.split(path) # default to current directory
path = os.path.join(dirname or '.', basename)
if not os.path.splitext(path)[-1]:
path = path + '.' + ext # default file extension
return path
@classmethod
def _from_file(cls, filename, warn_on_failure=False):
"""
Read generalized colormap and color cycle files.
"""
filename = os.path.expanduser(filename)
name, ext = os.path.splitext(os.path.basename(filename))
listed = issubclass(cls, mcolors.ListedColormap)
reversed = name[-2:] == '_r'
# Warn if loading failed during `register_cmaps` or `register_cycles`
# but raise error if user tries to load a file.
def _warn_or_raise(msg, error=RuntimeError):
if warn_on_failure:
warnings._warn_proplot(msg)
else:
raise error(msg)
if not os.path.exists(filename):
return _warn_or_raise(f'File {filename!r} not found.', FileNotFoundError)
# Directly read segmentdata json file
# NOTE: This is special case! Immediately return name and cmap
ext = ext[1:]
if ext == 'json':
if listed:
raise TypeError(
f'Cannot load listed colormaps from json files ({filename!r}).'
)
try:
with open(filename, 'r') as fh:
data = json.load(fh)
except json.JSONDecodeError:
return _warn_or_raise(
f'Failed to load {filename!r}.', json.JSONDecodeError
)
kw = {}
for key in ('cyclic', 'gamma', 'gamma1', 'gamma2', 'space'):
if key in data:
kw[key] = data.pop(key, None)
if 'red' in data:
cmap = LinearSegmentedColormap(name, data)
else:
cmap = PerceptuallyUniformColormap(name, data, **kw)
if reversed:
cmap = cmap.reversed(name[:-2])
return cmap
# Read .rgb and .rgba files
if ext in ('txt', 'rgb'):
# Load
# NOTE: This appears to be biggest import time bottleneck! Increases
# time from 0.05s to 0.2s, with numpy loadtxt or with this regex thing.
delim = re.compile(r'[,\s]+')
data = [
delim.split(line.strip())
for line in open(filename)
if line.strip() and line.strip()[0] != '#'
]
try:
data = [[float(num) for num in line] for line in data]
except ValueError:
return _warn_or_raise(
f'Failed to load {filename!r}. Expected a table of comma '
'or space-separated values.'
)
# Build x-coordinates and standardize shape
data = np.array(data)
if data.shape[1] not in (3, 4):
return _warn_or_raise(
f'Failed to load {filename!r}. Got {data.shape[1]} columns, '
f'but expected 3 or 4.'
)
if ext[0] != 'x': # i.e. no x-coordinates specified explicitly
x = np.linspace(0, 1, data.shape[0])
else:
x, data = data[:, 0], data[:, 1:]
# Load XML files created with scivizcolor
# Adapted from script found here:
# https://sciviscolor.org/matlab-matplotlib-pv44/
elif ext == 'xml':
try:
doc = ElementTree.parse(filename)
except ElementTree.ParseError:
return _warn_or_raise(
f'Failed to load {filename!r}. Parsing error.',
ElementTree.ParseError
)
x, data = [], []
for s in doc.getroot().findall('.//Point'):
# Verify keys
if any(key not in s.attrib for key in 'xrgb'):
return _warn_or_raise(
f'Failed to load {filename!r}. Missing an x, r, g, or b '
'specification inside one or more <Point> tags.'
)
# Get data
color = []
for key in 'rgbao': # o for opacity
if key not in s.attrib:
continue
color.append(float(s.attrib[key]))
x.append(float(s.attrib['x']))
data.append(color)
# Convert to array
if not all(
len(data[0]) == len(color) and len(color) in (3, 4)
for color in data
):
return _warn_or_raise(
f'Failed to load {filename!r}. Unexpected number of channels '
'or mixed channels across <Point> tags.'
)
# Read hex strings
elif ext == 'hex':
# Read arbitrary format
string = open(filename).read() # into single string
data = re.findall(HEX_PATTERN, string)
if len(data) < 2:
return _warn_or_raise(
f'Failed to load {filename!r}. Hex strings not found.'
)
# Convert to array
x = np.linspace(0, 1, len(data))
data = [to_rgb(color) for color in data]
# Invalid extension
else:
return _warn_or_raise(
f'Colormap or cycle file {filename!r} has unknown extension.'
)
# Standardize and reverse if necessary to cmap
# TODO: Document the fact that filenames ending in _r return a reversed
# version of the colormap stored in that file.
x, data = np.array(x), np.array(data)
x = (x - x.min()) / (x.max() - x.min()) # ensure they span 0-1
if np.any(data > 2): # from 0-255 to 0-1
data = data / 255
if reversed:
name = name[:-2]
data = data[::-1, :]
x = 1 - x[::-1]
if listed:
return ListedColormap(data, name)
else:
data = [(x, color) for x, color in zip(x, data)]
return LinearSegmentedColormap.from_list(name, data)
class LinearSegmentedColormap(mcolors.LinearSegmentedColormap, _Colormap):
r"""
New base class for all `~matplotlib.colors.LinearSegmentedColormap`\ s.
"""
def __str__(self):
return type(self).__name__ + f'(name={self.name!r})'
def __repr__(self):
string = f" 'name': {self.name!r},\n"
if hasattr(self, '_space'):
string += f" 'space': {self._space!r},\n"
if hasattr(self, '_cyclic'):
string += f" 'cyclic': {self._cyclic!r},\n"
for key, data in self._segmentdata.items():
if callable(data):
string += f' {key!r}: <function>,\n'
else:
string += (
f' {key!r}: [{data[0][2]:.3f}, ..., {data[-1][1]:.3f}],\n'
)
return type(self).__name__ + '({\n' + string + '})'
@docstring.add_snippets
def __init__(
self, name, segmentdata, N=None, gamma=1,
cyclic=False, alpha=None,
):
"""
Parameters
----------
%(cmap.init)s
gamma : float, optional
Gamma scaling used for the *x* coordinates.
"""
N = _not_none(N, rcParams['image.lut'])
super().__init__(name, segmentdata, N=N, gamma=gamma)
self._cyclic = cyclic
if alpha is not None:
self.set_alpha(alpha)
def append(self, *args, ratios=None, name=None, N=None, **kwargs):
"""
Return the concatenation of this colormap with the
input colormaps.
Parameters
----------
*args
Instances of `LinearSegmentedColormap`.
ratios : list of float, optional
Relative extent of each component colormap in the merged colormap.
Length must equal ``len(args) + 1``.
For example, ``cmap1.append(cmap2, ratios=(2, 1))`` generates
a colormap with the left two-thrids containing colors from
``cmap1`` and the right one-third containing colors from ``cmap2``.
name : str, optional
The colormap name. Default is
``'_'.join(cmap.name for cmap in args)``.
N : int, optional
The number of points in the colormap lookup table.
Default is :rc:`image.lut` times ``len(args)``.
Other parameters
----------------
**kwargs
Passed to `LinearSegmentedColormap.copy`
or `PerceptuallyUniformColormap.copy`.
Returns
-------
`LinearSegmentedColormap`
The colormap.
"""
# Parse input args
if not args:
return self
if not all(
isinstance(cmap, mcolors.LinearSegmentedColormap) for cmap in args
):
raise TypeError(
f'Input arguments {args!r} must be instances of '
'LinearSegmentedColormap.'
)
# PerceptuallyUniformColormap --> LinearSegmentedColormap conversions
cmaps = [self, *args]
spaces = {getattr(cmap, '_space', None) for cmap in cmaps}
to_linear_segmented = len(spaces) > 1 # mixed colorspaces *or* mixed types
if to_linear_segmented:
for i, cmap in enumerate(cmaps):
if isinstance(cmap, PerceptuallyUniformColormap):
cmaps[i] = cmap.to_linear_segmented()
# Combine the segmentdata, and use the y1/y2 slots at merge points so
# we never interpolate between end colors of different colormaps
segmentdata = {}
if name is None:
name = '_'.join(cmap.name for cmap in cmaps)
if not np.iterable(ratios):
ratios = [1] * len(cmaps)
ratios = np.asarray(ratios) / np.sum(ratios)
x0 = np.append(0, np.cumsum(ratios)) # coordinates for edges
xw = x0[1:] - x0[:-1] # widths between edges
for key in cmaps[0]._segmentdata.keys(): # not self._segmentdata
# Callable segments
# WARNING: If just reference a global 'funcs' list from inside the
# 'data' function it can get overwritten in this loop. Must
# embed 'funcs' into the definition using a keyword argument.
callable_ = [callable(cmap._segmentdata[key]) for cmap in cmaps]
if all(callable_): # expand range from x-to-w to 0-1
funcs = [cmap._segmentdata[key] for cmap in cmaps]
def xyy(ix, funcs=funcs):
ix = np.atleast_1d(ix)
kx = np.empty(ix.shape)
for j, jx in enumerate(ix.flat):
idx = max(np.searchsorted(x0, jx) - 1, 0)
kx.flat[j] = funcs[idx]((jx - x0[idx]) / xw[idx])
return kx
# Concatenate segment arrays and make the transition at the
# seam instant so we *never interpolate* between end colors
# of different maps.
elif not any(callable_):
datas = []
for x, w, cmap in zip(x0[:-1], xw, cmaps):
xyy =
|
np.array(cmap._segmentdata[key])
|
numpy.array
|
"""
Functions for coordinate transformations.
Contains trasformations from/to the following coordinate systems:
GSE, GSM, SM, GEI, GEO, MAG, J2000
Times are in Unix seconds for consistency.
Notes
-----
These functions are in cotrans_lib.pro of IDL SPEDAS.
For a comparison to IDL, see: http://spedas.org/wiki/index.php?title=Cotrans
"""
import numpy as np
from datetime import datetime
from pyspedas.utilities.igrf import set_igrf_params
from pyspedas.utilities.j2000 import set_j2000_params
def get_time_parts(time_in):
"""
Split time into year, doy, hours, minutes, seconds.fsec.
Parameters
----------
time_in: list of float
Time array.
Returns
-------
iyear: array of int
Year.
idoy: array of int
Day of year.
ih: array of int
Hours.
im: array of int
Minutes.
isec: array of float
Seconds and milliseconds.
"""
tnp = np.vectorize(datetime.utcfromtimestamp)(time_in[:])
iyear = np.array([tt.year for tt in tnp])
idoy = np.array([tt.timetuple().tm_yday for tt in tnp])
ih = np.array([tt.hour for tt in tnp])
im = np.array([tt.minute for tt in tnp])
isec = np.array([tt.second + tt.microsecond/1000000.0 for tt in tnp])
return iyear, idoy, ih, im, isec
def csundir_vect(time_in):
"""
Calculate the direction of the sun.
Parameters
----------
time_in: list of float
Time array.
Returns
-------
gst: list of float
Greenwich mean sideral time (radians).
slong: list of float
Longitude along ecliptic (radians).
sra: list of float
Right ascension (radians).
sdec: list of float
Declination of the sun (radians).
obliq: list of float
Inclination of Earth's axis (radians).
"""
iyear, idoy, ih, im, isec = get_time_parts(time_in)
# Julian day and greenwich mean sideral time
pisd = np.pi / 180.0
fday = (ih * 3600.0 + im * 60.0 + isec)/86400.0
jj = 365 * (iyear-1900) + np.fix((iyear-1901)/4) + idoy
dj = jj - 0.5 + fday
gst = np.mod(279.690983 + 0.9856473354 * dj + 360.0 * fday + 180.0,
360.0) * pisd
# longitude along ecliptic
vl = np.mod(279.696678 + 0.9856473354 * dj, 360.0)
t = dj / 36525.0
g = np.mod(358.475845 + 0.985600267 * dj, 360.0) * pisd
slong = (vl + (1.91946 - 0.004789 * t) * np.sin(g) + 0.020094 *
np.sin(2.0 * g)) * pisd
# inclination of Earth's axis
obliq = (23.45229 - 0.0130125 * t) * pisd
sob = np.sin(obliq)
cob = np.cos(obliq)
# Aberration due to Earth's motion around the sun (about 0.0056 deg)
pre = (0.005686 - 0.025e-4 * t) * pisd
# declination of the sun
slp = slong - pre
sind = sob * np.sin(slp)
cosd = np.sqrt(1.0 - sind**2)
sc = sind / cosd
sdec = np.arctan(sc)
# right ascension of the sun
sra = np.pi - np.arctan2((cob/sob) * sc, -np.cos(slp)/cosd)
return gst, slong, sra, sdec, obliq
def cdipdir(time_in=None, iyear=None, idoy=None):
"""
Compute dipole direction in GEO coordinates.
Parameters
----------
time_in: float
iyear: int
idoy: int
Returns
-------
list of float
Notes
-----
Compute geodipole axis direction from International Geomagnetic Reference
Field (IGRF-13) model for time interval 1970 to 2020.
For time out of interval, computation is made for nearest boundary.
Same as SPEDAS cdipdir.
"""
if (time_in is None) and (iyear is None) and (idoy is None):
print("Error: No time was provided.")
return
if (iyear is None) or (idoy is None):
iyear, idoy, ih, im, isec = get_time_parts(time_in)
# IGRF-13 parameters, 1965-2020.
minyear, maxyear, ga, ha, dg, dh = set_igrf_params()
y = iyear - (iyear % 5)
if y < minyear:
y = minyear
elif y > maxyear:
y = maxyear
year0 = y
year1 = y + 5
g0 = ga[year0]
h0 = ha[year0]
maxind = max(ga.keys())
g = g0
h = h0
# Interpolate for dates.
f2 = (iyear + (idoy-1)/365.25 - year0)/5.
f1 = 1.0 - f2
f3 = iyear + (idoy-1)/365.25 - maxind
nloop = len(g0)
if year1 <= maxind:
# years 1970-2020
g1 = ga[year1]
h1 = ha[year1]
for i in range(nloop):
g[i] = g0[i]*f1 + g1[i]*f2
h[i] = h0[i]*f1 + h1[i]*f2
else:
# years 2020-2025
for i in range(nloop):
g[i] = g0[i] + dg[i]*f3
h[i] = h0[i] + dh[i]*f3
s = 1.0
for i in range(2, 15):
mn = int(i*(i-1.0)/2.0 + 1.0)
s = int(s*(2.0*i-3.0)/(i-1.0))
g[mn] *= s
h[mn] *= s
g[mn-1] *= s
h[mn-1] *= s
p = s
for j in range(2, i):
aa = 1.0
if j == 2:
aa = 2.0
p = p * np.sqrt(aa*(i-j+1)/(i+j-2))
mnn = int(mn + j - 1)
g[mnn] *= p
h[mnn] *= p
g[mnn-1] *= p
h[mnn-1] *= p
g10 = -g[1]
g11 = g[2]
h11 = h[2]
sq = g11**2 + h11**2
sqq = np.sqrt(sq)
sqr = np.sqrt(g10**2 + sq)
s10 = -h11/sqq
c10 = -g11/sqq
st0 = sqq/sqr
ct0 = g10/sqr
stc1 = st0*c10
sts1 = st0*s10
d1 = stc1
d2 = sts1
d3 = ct0
return d1, d2, d3
def cdipdir_vect(time_in=None, iyear=None, idoy=None):
"""
Compute dipole direction in GEO coordinates.
Similar to cdipdir but for arrays.
Parameters
----------
time_in: list of floats
iyear: list of int
idoy: list of int
Returns
-------
list of float
Notes
-----
Same as SPEDAS cdipdir_vec.
"""
if ((time_in is None or not isinstance(time_in, list))
and (iyear is None or not isinstance(iyear, list))
and (idoy is None or not isinstance(idoy, list))):
return cdipdir(time_in, iyear, idoy)
if (iyear is None) or (idoy is None):
iyear, idoy, ih, im, isec = get_time_parts(time_in)
d1 = []
d2 = []
d3 = []
cdipdir_cache = {}
for i in range(len(idoy)):
# check the cache before re-calculating the dipole direction
if cdipdir_cache.get(iyear[i] + idoy[i]) != None:
d1.append(cdipdir_cache.get(iyear[i] + idoy[i])[0])
d2.append(cdipdir_cache.get(iyear[i] + idoy[i])[1])
d3.append(cdipdir_cache.get(iyear[i] + idoy[i])[2])
continue
_d1, _d2, _d3 = cdipdir(None, iyear[i], idoy[i])
d1.append(_d1)
d2.append(_d2)
d3.append(_d3)
cdipdir_cache[iyear[i] + idoy[i]] = [_d1, _d2, _d3]
return np.array(d1), np.array(d2), np.array(d3)
def tgeigse_vect(time_in, data_in):
"""
GEI to GSE transformation.
Parameters
----------
time_in: list of float
Time array.
data_in: list of float
xgei, ygei, zgei cartesian GEI coordinates.
Returns
-------
xgse: list of float
Cartesian GSE coordinates.
ygse: list of float
Cartesian GSE coordinates.
zgse: list of float
Cartesian GSE coordinates.
"""
xgse, ygse, zgse = 0, 0, 0
d = np.array(data_in)
xgei, ygei, zgei = d[:, 0], d[:, 1], d[:, 2]
gst, slong, sra, sdec, obliq = csundir_vect(time_in)
gs1 = np.cos(sra) * np.cos(sdec)
gs2 = np.sin(sra) * np.cos(sdec)
gs3 = np.sin(sdec)
ge1 = 0.0
ge2 = -np.sin(obliq)
ge3 = np.cos(obliq)
gegs1 = ge2 * gs3 - ge3 * gs2
gegs2 = ge3 * gs1 - ge1 * gs3
gegs3 = ge1 * gs2 - ge2 * gs1
xgse = gs1 * xgei + gs2 * ygei + gs3 * zgei
ygse = gegs1 * xgei + gegs2 * ygei + gegs3 * zgei
zgse = ge1 * xgei + ge2 * ygei + ge3 * zgei
return xgse, ygse, zgse
def subgei2gse(time_in, data_in):
"""
Transform data from GEI to GSE.
Parameters
----------
time_in: list of float
Time array.
data_in: list of float
Coordinates in GEI.
Returns
-------
Array of float
Coordinates in GSE.
"""
xgse, ygse, zgse = tgeigse_vect(time_in, data_in)
print("Running transformation: subgei2gse")
return np.column_stack([xgse, ygse, zgse])
def tgsegei_vect(time_in, data_in):
"""
GSE to GEI transformation.
Parameters
----------
time_in: list of float
Time array.
data_in: list of float
xgei, ygei, zgei cartesian GEI coordinates.
Returns
-------
xgei: list of float
Cartesian GEI coordinates.
ygei: list of float
Cartesian GEI coordinates.
zgei: list of float
Cartesian GEI coordinates.
"""
xgei, ygei, zgei = 0, 0, 0
d = np.array(data_in)
xgse, ygse, zgse = d[:, 0], d[:, 1], d[:, 2]
gst, slong, sra, sdec, obliq = csundir_vect(time_in)
gs1 = np.cos(sra) * np.cos(sdec)
gs2 = np.sin(sra) * np.cos(sdec)
gs3 = np.sin(sdec)
ge1 = 0.0
ge2 = -np.sin(obliq)
ge3 = np.cos(obliq)
gegs1 = ge2 * gs3 - ge3 * gs2
gegs2 = ge3 * gs1 - ge1 * gs3
gegs3 = ge1 * gs2 - ge2 * gs1
xgei = gs1 * xgse + gegs1 * ygse + ge1 * zgse
ygei = gs2 * xgse + gegs2 * ygse + ge2 * zgse
zgei = gs3 * xgse + gegs3 * ygse + ge3 * zgse
return xgei, ygei, zgei
def subgse2gei(time_in, data_in):
"""
Transform data from GSE to GEI.
Parameters
----------
time_in: list of float
Time array.
data_in: list of float
Coordinates in GSE.
Returns
-------
Array of float
Coordinates in GEI.
"""
xgei, ygei, zgei = tgsegei_vect(time_in, data_in)
print("Running transformation: subgse2gei")
return np.column_stack([xgei, ygei, zgei])
def tgsegsm_vect(time_in, data_in):
"""
Transform data from GSE to GSM.
Parameters
----------
time_in: list of float
Time array.
data_in: list of float
xgse, ygse, zgse cartesian GSE coordinates.
Returns
-------
xgsm: list of float
Cartesian GSM coordinates.
ygsm: list of float
Cartesian GSM coordinates.
zgsm: list of float
Cartesian GSM coordinates.
"""
xgsm, ygsm, zgsm = 0, 0, 0
d = np.array(data_in)
xgse, ygse, zgse = d[:, 0], d[:, 1], d[:, 2]
gd1, gd2, gd3 = cdipdir_vect(time_in)
gst, slong, sra, sdec, obliq = csundir_vect(time_in)
gs1 = np.cos(sra) * np.cos(sdec)
gs2 = np.sin(sra) * np.cos(sdec)
gs3 = np.sin(sdec)
sgst = np.sin(gst)
cgst = np.cos(gst)
ge1 = 0.0
ge2 = -np.sin(obliq)
ge3 = np.cos(obliq)
gm1 = gd1 * cgst - gd2 * sgst
gm2 = gd1 * sgst + gd2 * cgst
gm3 = gd3
gmgs1 = gm2 * gs3 - gm3 * gs2
gmgs2 = gm3 * gs1 - gm1 * gs3
gmgs3 = gm1 * gs2 - gm2 * gs1
rgmgs = np.sqrt(gmgs1**2 + gmgs2**2 + gmgs3**2)
cdze = (ge1 * gm1 + ge2 * gm2 + ge3 * gm3)/rgmgs
sdze = (ge1 * gmgs1 + ge2 * gmgs2 + ge3 * gmgs3)/rgmgs
xgsm = xgse
ygsm = cdze * ygse + sdze * zgse
zgsm = -sdze * ygse + cdze * zgse
return xgsm, ygsm, zgsm
def subgse2gsm(time_in, data_in):
"""
Transform data from GSE to GSM.
Parameters
----------
time_in: list of float
Time array.
data_in: list of float
Coordinates in GSE.
Returns
-------
Array of float
Coordinates in GSM.
"""
xgsm, ygsm, zgsm = tgsegsm_vect(time_in, data_in)
print("Running transformation: subgse2gsm")
return np.column_stack([xgsm, ygsm, zgsm])
def tgsmgse_vect(time_in, data_in):
"""
Transform data from GSM to GSE.
Parameters
----------
time_in: list of float
Time array.
data_in: list of float
xgsm, ygsm, zgsm GSM coordinates.
Returns
-------
xgse: list of float
Cartesian GSE coordinates.
ygse: list of float
Cartesian GSE coordinates.
zgse: list of float
Cartesian GSE coordinates.
"""
xgse, ygse, zgse = 0, 0, 0
d = np.array(data_in)
xgsm, ygsm, zgsm = d[:, 0], d[:, 1], d[:, 2]
gd1, gd2, gd3 = cdipdir_vect(time_in)
gst, slong, sra, sdec, obliq = csundir_vect(time_in)
gs1 = np.cos(sra) * np.cos(sdec)
gs2 = np.sin(sra) * np.cos(sdec)
gs3 = np.sin(sdec)
sgst = np.sin(gst)
cgst = np.cos(gst)
ge1 = 0.0
ge2 = -np.sin(obliq)
ge3 = np.cos(obliq)
# Dipole direction in GEI system
gm1 = gd1 * cgst - gd2 * sgst
gm2 = gd1 * sgst + gd2 * cgst
gm3 = gd3
gmgs1 = gm2 * gs3 - gm3 * gs2
gmgs2 = gm3 * gs1 - gm1 * gs3
gmgs3 = gm1 * gs2 - gm2 * gs1
rgmgs = np.sqrt(gmgs1**2 + gmgs2**2 + gmgs3**2)
cdze = (ge1 * gm1 + ge2 * gm2 + ge3 * gm3)/rgmgs
sdze = (ge1 * gmgs1 + ge2 * gmgs2 + ge3 * gmgs3)/rgmgs
xgse = xgsm
ygse = cdze * ygsm - sdze * zgsm
zgse = sdze * ygsm + cdze * zgsm
return xgse, ygse, zgse
def subgsm2gse(time_in, data_in):
"""
Transform data from GSM to GSE.
Parameters
----------
time_in: list of float
Time array.
data_in: list of float
Coordinates in GSE.
Returns
-------
Array of float
Coordinates in GSE.
"""
xgse, ygse, zgse = tgsmgse_vect(time_in, data_in)
print("Running transformation: subgsm2gse")
return np.column_stack([xgse, ygse, zgse])
def tgsmsm_vect(time_in, data_in):
"""
Transform data from GSM to SM.
Parameters
----------
time_in: list of float
Time array.
data_in: list of float
xgsm, ygsm, zgsm GSM coordinates.
Returns
-------
xsm: list of float
Cartesian SM coordinates.
ysm: list of float
Cartesian SM coordinates.
zsm: list of float
Cartesian SM coordinates.
"""
xsm, ysm, zsm = 0, 0, 0
d = np.array(data_in)
xgsm, ygsm, zgsm = d[:, 0], d[:, 1], d[:, 2]
gd1, gd2, gd3 = cdipdir_vect(time_in)
gst, slong, sra, sdec, obliq = csundir_vect(time_in)
gs1 = np.cos(sra) * np.cos(sdec)
gs2 = np.sin(sra) * np.cos(sdec)
gs3 =
|
np.sin(sdec)
|
numpy.sin
|
import numpy as np
from SimPEG.NSEM.Utils import plotDataTypes as pDt
import matplotlib.pyplot as plt
# Define the area of interest
bw, be = 557100, 557580
bs, bn = 7133340, 7133960
bb, bt = 0,480
#Visualisation of convergences curves
def convergeCurves(resList,ax1,ax2,color1,color2,fontsize):
its = np.array([res['iter'] for res in resList]).T
ind = np.argsort(its)
phid = np.array([res['phi_d'] for res in resList]).T
try:
phim = np.array([res['phi_m'] for res in resList]).T
except:
phim = np.array([res['phi_ms'] for res in resList]).T + np.array([res['phi_mx'] for res in resList]).T + np.array([res['phi_my'] for res in resList]).T +
|
np.array([res['phi_mz'] for res in resList])
|
numpy.array
|
"""
csalt_models.py
Usage:
- import modules
Outputs:
- various
"""
import os, sys, time
import numpy as np
from vis_sample import vis_sample
from scipy.ndimage import convolve1d
from scipy.interpolate import interp1d
import scipy.constants as sc
import matplotlib.pyplot as plt
from vis_sample.classes import *
from parametric_disk import *
from astropy.io import fits, ascii
_pc = 3.09e18
def cube_to_fits(sky_image, fitsout, RA=0., DEC=0.):
# revert to proper formatting
cube = np.fliplr(np.rollaxis(sky_image.data, -1))
# extract coordinate information
im_nfreq, im_ny, im_nx = cube.shape
pixsize_x = np.abs(np.diff(sky_image.ra)[0])
pixsize_y = np.abs(np.diff(sky_image.dec)[0])
CRVAL3, CDELT3 = sky_image.freqs[0], np.diff(sky_image.freqs)[0]
# generate the primary HDU
hdu = fits.PrimaryHDU(
|
np.float32(cube)
|
numpy.float32
|
import numpy as np
def norm2(x, axis=0, length=1.0):
if axis == 1:
# batch normalization
return length * np.reshape(1 / np.linalg.norm(x, axis=axis), (x.shape[0], 1)) * x
else:
return length * x / np.linalg.norm(x)
def norm_matrix(x, P):
return np.sqrt(x @ P @ x)
def get_eigen_vec(A, option='max', return_eigen_value=False):
q = np.linalg.eigh(A)
if option == 'max':
i = q[0].argmax()
elif option == 'min':
i = q[0].argmin()
elif option == 'max2':
i = [q[0].argsort()[-1], q[0].argsort()[-2]]
else:
return None
if return_eigen_value:
return q[0][i], q[1][i]
else:
return q[1][i]
class User:
def __init__(self, K, rho, alpha, determ_pro):
self.K = K
self.rho0 = rho
self.alpha = alpha
self.determ_pro = determ_pro
self.num_acceptance_total = 0.0
self.empirical_mean_per_arm = np.zeros(self.K)
self.num_acceptance_per_arm =
|
np.zeros(self.K)
|
numpy.zeros
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 9 19:25:00 2018
Modified on 11/27/2018 to Clean up comments
Modified on 12/01/2018 to resolve Save/Import Issue #1
Modified 0n 12/04/2018 to resolve Import Load Error - Issue #11
Modified on 02/25/2019 for version 0.1.0
Modified on 3/4/2019 for Issue #18
modified 1/8/2021 to clean up code as part of upgrade for pvlib 0.8
Modified 01/20/2021 to fix issue with inverter & chgcontrlr functions
@author: <NAME>
-------------------------------------------------------------------------------
Name: SPVSim.py
Purpose: Implement the GUI Features & logic required to implement the
SPVSim Application.
Copyright: (c) <NAME> 2018
License: GNU General Public License, version 3 (GPL-3.0)
This program is distributed WITHOUT ANY WARRANTY;
without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE.
-------------------------------------------------------------------------------
"""
from tkinter import Tk, GROOVE
from tkinter import ttk
from tkinter.filedialog import askopenfilename, asksaveasfilename
from datetime import datetime
import os.path
import pickle
import numpy as np
import pandas as pd
from pandas.plotting import register_matplotlib_converters
from PVSite import PVSite
from PVBattery import PVBattery
from PVBatBank import PVBatBank
from PVPanel import PVPanel
from PVArray import PVArray
from PVInverter import PVInverter
from PVChgControl import PVChgControl
from SiteLoad import SiteLoad
import guiFrames as tbf
from PVUtilities import (read_resource, hourly_load, create_time_indices,
build_monthly_performance, build_overview_report,
computOutputResults)
from SPVSwbrd import spvSwitchboard
# from NasaData import *
# from Parameters import panel_types
# import dateutil.parser
class SPVSIM:
def __init__(self):
register_matplotlib_converters()
self.debug = False
self.perf_rept = False
self.errflg = False
self.wdir = os.getcwd()
self.mdldir = os.path.join(self.wdir, 'Models')
self.rscdir = os.path.join(self.wdir, 'Resources')
self.rptdir = os.path.join(self.wdir, 'Reports')
self.countries = read_resource('Countries.csv', self.rscdir)
self.modules = read_resource('CEC Modules.csv', self.rscdir)
self.inverters = read_resource('CEC Inverters.csv', self.rscdir)
self.sdw = None # System Description Window
self.rdw = None # Results Display Window
self.stw = None # Status Reporting Window
self.array_list = list()
self.filename = None # Complete path to Current File
self.site = PVSite(self)
self.bat = PVBattery(self)
self.pnl = PVPanel(self)
self.ary = self.create_solar_array(self)
self.array_list.append(self.ary)
self.sec_ary = self.create_solar_array(self)
self.array_list.append(self.sec_ary)
self.bnk = PVBatBank(self)
self.bnk.uses(self.bat)
self.inv = PVInverter(self)
self.load = SiteLoad(self)
self.chgc = PVChgControl(self)
self.array_out = None # The Solar Array Output by hour
self.times = None
self.array_out = None
self.power_flow = None
self.outrec = None
self.outfile = None
self.bringUpDisplay()
def bringUpDisplay(self):
""" Create the Display GUI """
self.root = Tk()
self.root.title("SolarPV System Simulator")
self.root.protocol('WM_DELETE_WINDOW', self.on_app_delete)
self.buildMasterDisplay()
self.root.mainloop()
def define_menuBar(self):
""" Format the Menu bar at top of display """
self.menuoptions = {'File': [('Save', self.save_file),
('Save as', self.save_file),
('Load File', self.import_file),
('Exit', self.on_app_delete)],
'Display': [('Daily Load', self.show_load_profile),
{'Array Performance': [
('Overview', self.show_array_performance),
('Best Day', self.show_array_best_day),
('Worst Day', self.show_array_worst_day)]},
{'Power Delivery': [
('Performance', self.show_pwr_performance),
('Best Day', self.show_pwr_best_day ),
('Worst Day', self.show_pwr_worst_day )]},
{'Battery Performance': [
('Overview', self.bnk.show_bank_overview),
('Bank Drain', self.bnk.show_bank_drain),
('Bank SOC', self.bnk.show_bank_soc)]}
],
'Report': [('System Description',
self.create_overview_report),
('Site Load', self.print_load)]}
def buildMasterDisplay(self):
""" Build the Display Window """
self.define_menuBar()
tbf.build_menubar(self.root, self.menuoptions)
self.sdw = ttk.LabelFrame(self.root, text="System Description", borderwidth= 5,
width= 500, height= 500, padding= 15, relief= GROOVE)
self.sdw.grid(row= 1, column= 1)
self.rdw = ttk.Labelframe(self.root, text='Results', borderwidth= 5,
width= 500, height= 500, padding= 15, relief= GROOVE)
self.rdw.grid(row=1, column=3)
self.stw = tbf.status_window(self.root, 'Status Reporting', [2, 1], 3 )
self.buildSwitchboard()
def buildSwitchboard(self):
""" Method to Build Switchboard Display & Switching Logic """
self.swb = spvSwitchboard(self, location= [0,0], parent= self.sdw,
menuTitle= 'Project Details')
def on_app_delete(self):
""" User has selected Window Abort """
if tbf.ask_question('Window Abort', 'Save Existing File?'):
self.save_file()
if tbf.ask_question('Exit Application', 'Exit?'):
self.root.destroy()
def write_file(self, fn):
""" Write DataDict to specified file """
dd = {'fn': self.filename,
'atoms': self.site.atmospherics,
'site': self.site.args,
'bat': self.bat.args,
'pnl': self.pnl.args,
'ary': self.ary.args,
'ary_2':self.sec_ary.args,
'bnk': self.bnk.args,
'inv': self.inv.args,
'load': self.load.export_frame(),
'chgr': self.chgc.args
}
fo = open(fn, 'wb')
pickle.dump(dd, fo)
fo.close()
def read_file(self, fn):
""" Read specified file into DataDict """
fo = open(fn, 'rb')
dd = pickle.load(fo)
fo.close()
self.filename = dd.pop('fn', None)
self.site.atmospherics = dd.pop('atoms', None)
load_in = dd.pop('load', None)
if load_in is not None:
self.load.purge_frame()
if type(load_in) is dict:
self.load.import_frame(load_in)
# This test is for backwards compatability
else:
ldi = load_in.df.to_dict('Index')
self.load.import_frame(ldi)
if self.load.master is None:
self.load.master = self
self.site.write_parameters(dd.pop('site', None))
self.bat.write_parameters(dd.pop('bat', None))
self.pnl.write_parameters(dd.pop('pnl', None))
self.ary.write_parameters(dd.pop('ary', None))
self.sec_ary.write_parameters(dd.pop('ary_2', None))
self.bnk.write_parameters(dd.pop('bnk', None))
self.inv.write_parameters(dd.pop('inv', None))
self.chgc.write_parameters(dd.pop('chgr', None))
def import_file(self):
""" Import Project Data File """
fn = None
while fn is None:
fn = askopenfilename(parent= self.root, title= 'Load Project',
defaultextension= '.spv',
initialdir= self.mdldir)
if fn != '' and type(fn) is not tuple:
# print (fn)
self.filename = fn
self.read_file(fn)
def save_file(self):
""" Method to Create New Project File """
fn = None
while fn is None:
fn = asksaveasfilename(parent= self.root, title= 'New File',
defaultextension= '.spv',
initialfile = self.filename,
initialdir= self.mdldir)
if fn != ''and type(fn) is not tuple:
self.write_file(fn)
self.filename = fn
def create_solar_array(self, src):
sa = PVArray(src)
sa.uses(self.pnl)
return sa
#TODO Should combine_arrays move to PVUtilities
def combine_arrays(self):
""" Combine primary & secondary array outputs to from a unified output
using individual array outputs to include the following:
Array Voltage (AV) = mim voltage for all arrays
Array Current (AI) = sum (ac(i)*AV/av(i))
Array Power (AP) = AV * AC
"""
if len(self.array_list)> 0:
frst_array = self.array_list[0].define_array_performance(self.times.index,
self.site, self.inv, self.stw)
rslt = pd.DataFrame({'ArrayVolts':frst_array['v_mp'],
'ArrayCurrent':frst_array['i_mp'],
'ArrayPower':frst_array['p_mp']},
index = self.times.index)
for ar in range(1, len(self.array_list)):
sarf = self.array_list[ar].is_defined()
if sarf:
sec_array = self.array_list[ar].define_array_performance(self.times.index,
self.site, self.inv, self.stw)
for rw in range(len(rslt)):
if rslt['ArrayPower'].iloc[rw] > 0 and sec_array['p_mp'].iloc[rw] >0:
v_out = min(rslt['ArrayVolts'].iloc[rw], sec_array['v_mp'].iloc[rw])
i_out = (rslt['ArrayCurrent'].iloc[rw] *
(v_out/rslt['ArrayVolts'].iloc[rw]) +
sec_array['i_mp'].iloc[rw] *
(v_out/sec_array['v_mp'].iloc[rw]))
rslt['ArrayVolts'].iloc[rw] = v_out
rslt['ArrayCurrent'].iloc[rw] = i_out
rslt['ArrayPower'].iloc[rw] = v_out * i_out
elif sec_array['p_mp'].iloc[rw] > 0:
rslt['ArrayVolts'].iloc[rw] = sec_array['v_mp'].iloc[rw]
rslt['ArrayCurrent'].iloc[rw] = sec_array['i_mp'].iloc[rw]
rslt['ArrayPower'].iloc[rw] = sec_array['p_mp'].iloc[rw]
rslt = rslt.assign(Month= self.times['Month'],
DayofMonth= self.times['DayofMonth'],
DayofYear= self.times['DayofYear'])
rslt = rslt.join(hourly_load(self.times.index,
self.load.get_load_profile()))
return rslt
return None
#TODO Should compute_powerFlows move to PVUtilities
def compute_powerFlows(self):
""" Computes the distribution of Array power to loads and
a battery bank if it exists. Returns a DataFrame containing
performance data
"""
self.array_out = self.combine_arrays()
PO = np.zeros(len(self.array_out)) # amount of total load satisfied
PS = np.zeros(len(self.array_out)) # fraction of load satisfied Power_out/TotLoad
DE = np.zeros(len(self.array_out)) # amount of Array Power used to provide load
BS = np.zeros(len(self.array_out)) # battery soc
BD = np.zeros(len(self.array_out)) # power drawn from battery
BP = np.zeros(len(self.array_out)) # remaining amount of usable Battery Power
SL = np.zeros(len(self.array_out)) # load imposed by system chgCntlr * inverter
EM = np.empty(len(self.array_out), dtype=object) # recorded error messages
hdr = ' Indx \t ArP \t ArI \t ArV \t dcLd \t acLd \t ttLd '
hdr += '\t PO \t PS \t DE \t SL \t BP \t BD \t BS \t EM\n'
outln = '{0:06}\t{1:6.2f}\t{2:6.2f}\t{3:6.2f}\t{4:6.2f}\t'
outln += '{5:6.2f}\t{6:6.2f}\t{7:6.2f}\t{8:6.2f}\t{9:6.2f}\t'
outln += '{10:6.2f}\t{11:6.2f}\t{12:6.2f}\t{13:6.2f}\t{14}\n'
bflg = self.bnk.is_defined()
self.out_rec = hdr
for tindx in range(len(self.array_out)):
wkDict = dict()
ArP = self.array_out['ArrayPower'].iloc[tindx]
ArV = self.array_out['ArrayVolts'].iloc[tindx]
ArI = self.array_out['ArrayCurrent'].iloc[tindx]
# Correct for possible power backflow into array
if ArP <= 0 or ArV <= 0 or ArI <= 0:
ArP = 0.0
ArV = 0.0
ArI = 0.0
dcLd = self.array_out['DC_Load'].iloc[tindx]
acLd = self.array_out['AC_Load'].iloc[tindx]
sysAttribs = {'Inv': self.inv, 'Chg': self.chgc, 'Bnk': self.bnk}
computOutputResults(sysAttribs, ArP, ArV, ArI, acLd, dcLd, wkDict)
# update arrays for tindx
PO[tindx] = wkDict.pop('PO', 0.0)
PS[tindx] = wkDict.pop('PS', 0.0)
DE[tindx] = wkDict.pop('DE', 0.0)
SL[tindx] = wkDict.pop('SL', 0.0)
if bflg:
BS[tindx] = wkDict.pop('BS', self.bnk.get_soc())*100
BD[tindx] = wkDict.pop('BD', 0.0)
BP[tindx] = wkDict.pop('BP', self.bnk.current_power())
msg = ''
errfrm = None
if 'Error' in wkDict.keys():
days: int = 1 + tindx//24
errfrm = wkDict['Error']
msg = 'After {0} days '.format(days)
EM[tindx] = msg + errfrm[0].replace('\n', ' ')
else:
EM[tindx]= ""
self.out_rec += outln.format(tindx, ArP, ArV, ArI,
dcLd, acLd, dcLd+acLd,
PO[tindx], PS[tindx], DE[tindx],
SL[tindx], BP[tindx], BD[tindx],
BS[tindx], EM[tindx])
if self.debug and errfrm != None:
if self.errflg == False and errfrm[1] != 'Fatal':
self.errflg = True
self.stw.show_message(msg + errfrm[0], errfrm[1])
if errfrm[1] == 'Fatal':
msg = 'After {0} days '.format(days)
self.errflg = True
self.stw.show_message(msg + errfrm[0], errfrm[1])
break
# Create the DataFrame
rslt = pd.DataFrame({'PowerOut': PO,
'ArrayPower': self.array_out['ArrayPower'],
'Service': PS,
'DelvrEff': DE,
'BatSoc': BS,
'BatDrain': BD,
'BatPwr': BP
}, index = self.times.index)
rslt = rslt.assign(Month= self.times['Month'],
DayofMonth= self.times['DayofMonth'],
DayofYear= self.times['DayofYear'])
rslt = rslt.join(hourly_load(self.times.index,
self.load.get_load_profile()))
return rslt
def execute_simulation(self):
""" Perform System Analysis """
if self.rdw.children is not None:
kys = list(self.rdw.children.keys())
while len(kys) > 0:
self.rdw.children[kys.pop()].destroy()
if self.perform_base_error_check():
self.errflg = False
rt = datetime.now()
ft = 'run_{0}_{1:02}_{2}_{3:02}{4:02}{5:02}.txt'
self.outfile = ft.format(rt.year, rt.month, rt.day,
rt.hour, rt.minute, rt.second)
self.outrec = None
bnkflg = self.bnk.is_defined()
if self.stw is not None:
self.stw.show_message('Starting System Analysis')
self.loc = self.site.get_location()
self.times = create_time_indices(self.site.read_attrb('tz'))
self.site.get_atmospherics(self.times.index, self.stw)
if bnkflg:
self.bnk.initialize_bank()
self.array_out = self.combine_arrays()
self.mnthly_array_perfm = build_monthly_performance(self.array_out,
'ArrayPower')
dl = np.array([self.load.get_daily_load()]*12)
dlf = pd.DataFrame({'Daily Load':dl},
index=self.mnthly_array_perfm[0].index.values)
self.mnthly_array_perfm[0] = self.mnthly_array_perfm[0].join(dlf)
if self.stw is not None and self.errflg == False:
self.stw.show_message('Panel Analysis Completed')
if self.stw is not None:
self.stw.show_message('Starting Power Analysis')
self.power_flow = self.compute_powerFlows()
self.mnthly_pwr_perfm = build_monthly_performance(self.power_flow,
'PowerOut')
self.mnthly_pwr_perfm[0] = self.mnthly_pwr_perfm[0].join(dlf)
if self.stw is not None and self.errflg == False:
self.stw.show_message('Power Analysis Completed')
if self.stw is not None:
if self.errflg == False:
srvchrs = self.power_flow['Service'].sum()
dmndhrs = self.load.get_demand_hours()*365
if dmndhrs > 0:
k = srvchrs/dmndhrs
ms = 'System Design provides Power to Load {0:.2f}% of the time'.format(k*100)
if k < 100:
ms += '\n\tDesign delivers required load {0:.2f} hours out of {1} demand hours per year'.format(k*dmndhrs, dmndhrs)
if self.bnk.check_definition():
ms += '\n\tAnnual Battery Charging Cycles = {0:.2f} out of {1} specified lifetime cycles'.format(self.bnk.tot_cycles,
self.bnk.max_dischg_cycles)
self.stw.show_message(ms)
else:
self.stw.show_message('Analysis complete')
if self.debug:
self.debug_next()
def debug_next(self):
""" Handy function for debugging """
# print(self.times)
# calndr = create_calendar_indices(self.site.read_attrb('tz'))
# print(calndr.head())
# print(calndr.index)
if self.perf_rept:
fo = open(self.outfile, 'w')
fo.write(self.out_rec)
fo.close()
def perform_base_error_check(self):
""" method to conduct basic error checks
returns True if and only if no errors are found """
# Tests for Site Definition """
bflg = False
invflg = False
if not self.site.check_definition():
return False
#Tests for panel & Array definition
if not self.ary.check_definition():
return False
# Tests for proper inverter definition """
if sum(self.load.get_load_profile()['AC']) > 0:
if not self.inv.check_definition():
return False
else:
invflg = True
if self.bnk.check_definition():
bflg = True
"""Tests for Charge Controller definition
(only read if an inverter or battery is defined) """
if bflg and not invflg and not self.chgc.check_definition():
return False
return True
#TODO in Print Load improve formatting control for better tabular results
def print_load(self):
""" Method to build a print the load profile """
s = str(self.load)
rpt_ttl = 'Load Report'
self.output_report(rpt_ttl, s)
def create_overview_report(self):
""" Create a formated overview of Project Design data """
s = build_overview_report(self)
rpt_ttl = 'Overview Report'
self.output_report(rpt_ttl, s)
def output_report(self, rpt_ttl, s):
""" Method to ask wheteher to print or create an output file for contents
of s """
if tbf.ask_question(rpt_ttl, 'Save Report to File?'):
fn = None
while fn is None:
fn = asksaveasfilename(parent= self.root, title= 'Report Save',
defaultextension= '.txt',
initialfile = '',
initialdir= self.rptdir)
if fn != '':
self.filename = fn
fo = open(fn, 'w')
fo.write(s)
fo.close()
else:
print(s)
def show_load_profile(self):
""" Method to build & display the load profile graphic """
self.load.show_load_profile(self.rdw)
#TODO can all of these show methods be combined and controlled by variables?
def show_pwr_performance(self):
""" Create graphic of Annual Power Delivery vice Load """
if self.array_out is not None:
mpi = self.mnthly_pwr_perfm[0]
xaxis = np.arange(1,13)
pltslist = [{'label':'Avg Power', 'data':mpi.loc[:,'Avg PowerOut'],
'type': 'Bar', 'color': 'b', 'width': 0.2, 'xaxis': xaxis},
{'label':'Best Power', 'data':mpi.loc[:,'Best PowerOut'],
'type': 'Bar', 'color': 'g', 'width': 0.2,
'xaxis':np.arange(1,13) - 0.2},
{'label':'Worst Power', 'data':mpi.loc[:,'Worst PowerOut'],
'type': 'Bar', 'color': 'y', 'width': 0.3,
'offset': 0.3, 'xaxis':np.arange(1,13) + 0.2},
{'label':'Daily Load', 'data':mpi.loc[:,'Daily Load'],
'type': 'Line', 'color': 'r', 'xaxis': xaxis,
'width': 4.0, 'linestyle': 'solid' }
]
tbf.plot_graphic(self.rdw, 'Month of Year', 'Watts Relative to Load',
np.arange(1,13),
pltslist, 'Power Output Performance',
(6,4))
def show_pwr_best_day(self):
""" Create graphic of Solar Array Best Day Performance """
if self.array_out is not None:
best_day_perform = self.power_flow.loc[self.power_flow['DayofYear'] == self.mnthly_pwr_perfm[1]]
xlabels = np.arange(24)
pltslist = [{'label': 'Power Output',
'data': best_day_perform['PowerOut'],
'type': 'Line', 'xaxis': xlabels,
'width': 2.0, 'color': 'b'},
{'label': 'Hourly Load',
'data': best_day_perform['Total_Load'],
'type': 'Line', 'xaxis': xlabels ,
'width': 2.0, 'color': 'r'}]
tbf.plot_graphic(self.rdw, 'Time of Day', 'Watts', xlabels,
pltslist,
'Best Day Power Output', (6,4))
def show_pwr_worst_day(self):
""" Create graphic of Solar Array Best Day Performance """
if self.array_out is not None:
worst_day_perform = self.power_flow.loc[self.power_flow['DayofYear'] == self.mnthly_pwr_perfm[2]]
xlabels = np.arange(24)
pltslist = [{'label': 'Power Output',
'data': worst_day_perform['PowerOut'],
'type': 'Line', 'xaxis': xlabels,
'width': 2.0, 'color': 'b'},
{'label': 'Hourly Load',
'data': worst_day_perform['Total_Load'],
'type': 'Line', 'xaxis': xlabels ,
'width': 2.0, 'color': 'r'}]
tbf.plot_graphic(self.rdw, 'Time of Day', 'Watts', xlabels,
pltslist,
'Worst Day Power Output', (6,4))
def show_array_performance(self):
""" Create graphic of Annual Solar Array Performance """
if self.array_out is not None:
mpi = self.mnthly_array_perfm[0]
xaxis = np.arange(1,13)
pltslist = [{'label':'Avg Power', 'data':mpi.loc[:,'Avg ArrayPower'],
'type': 'Bar', 'color': 'b', 'width': 0.2, 'xaxis': xaxis},
{'label':'Best Power', 'data':mpi.loc[:,'Best ArrayPower'],
'type': 'Bar', 'color': 'g', 'width': 0.2,
'xaxis':np.arange(1,13) - 0.2},
{'label':'Worst Power', 'data':mpi.loc[:,'Worst ArrayPower'],
'type': 'Bar', 'color': 'y', 'width': 0.3,
'offset': 0.3, 'xaxis':np.arange(1,13) + 0.2},
{'label':'Daily Load', 'data':mpi.loc[:,'Daily Load'],
'type': 'Line', 'color': 'r', 'xaxis': xaxis,
'width': 4.0, 'linestyle': 'solid' }
]
tbf.plot_graphic(self.rdw, 'Month of Year', 'Watts',
np.arange(1,13),
pltslist, 'Annual Array Performance',
(6,4))
def show_array_best_day(self):
""" Create graphic of Solar Array Best Day Performance """
if self.array_out is not None:
best_day_perform = self.array_out.loc[self.array_out['DayofYear'] == self.mnthly_array_perfm[1]]
xlabels =
|
np.arange(24)
|
numpy.arange
|
import sys
sys.path.append('util')
import numpy as np
import myparser as myparser
import simulation as simulation
import data as data
import parallel as par
import time
import monitorTiming as monitorTiming
import postProc as postProc
from plotsUtil import *
from myProgressBar import printProgressBar
def autocorr(x):
nTime = min(int(x.shape[0]/2),250)
nDim = x.shape[1]
nReal = x.shape[2]
meanx = np.mean(x,axis=(0,2),keepdims=True)
x = x-meanx
corr = np.zeros(nTime)
printProgressBar(0, nTime-1, prefix = 'Autocorrelation ' + str(0) + ' / ' +str(nTime-1),suffix = 'Complete', length = 50)
for itime in range(nTime):
xroll = np.roll(x,-itime,axis=0)
corr[itime] = np.mean(x*xroll)
printProgressBar(itime, nTime-1, prefix = 'Autocorrelation ' + str(itime) + ' / ' +str(nTime-1),suffix = 'Complete', length = 50)
corr=corr/corr[0]
for itime in range(nTime):
if corr[itime]<0.1:
break
return corr, itime
# ~~~~ Init
# Parse input
inpt = myparser.parseInputFile()
# Initialize MPI
# Not for now
# Initialize random seed
np.random.seed(seed=42+par.irank)
# Initialize Simulation details
Sim = data.simSetUp(inpt)
# ~~~~ Main
# Run Simulation
Result = simulation.simRun(Sim)
# ~~~~ Monitor
# Timing
monitorTiming.printTiming(Result)
# ~~~~ Post process
# Plot, build CDF
postProc.postProc(Result,Sim)
par.finalize()
# SAVE DATA
if Sim['Simulation name'] == 'KS':
indexLim =
|
np.argwhere(Result['tt']>50)
|
numpy.argwhere
|
# -*- coding: utf-8 -*-
# vim: set fileencoding=utf-8 :
# vim: set foldmethod=marker commentstring=\ \ #\ %s :
#
# Author: <NAME>
# Created: 2018-09-08
#
# Copyright (C) 2018 <NAME>
#
import io
import os
import glob
import dash
import json
import base64
import shutil
import zipfile
import datetime
import PIL.Image
import dash_table
import numpy as np
import pandas as pd
import urllib.parse
import scipy.signal
import my_threshold
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import plotly.graph_objs as go
GROUP_COLORS = ['#ff0000', '#ff7f00', '#e6b422', '#38b48b', '#008000',
'#89c3eb', '#84a2d4', '#3e62ad', '#0000ff', '#7f00ff',
'#56256e', '#000000']
ALPHABETS = [chr(i) for i in range(65, 65 + 26)]
DATA_ROOT = './data_root_for_demo'
THETA = 50
THRESH_FUNC = my_threshold.minmax
app = dash.Dash('Sapphire')
app.css.append_css(
{'external_url': 'https://codepen.io/chriddyp/pen/bWLwgP.css'})
# ================================
# Definition of the viewer page
# ================================
app.layout = html.Div([
html.Header([
html.H1(
'Sapphire',
style={'display': 'inline-block', 'margin': '10px'},
),
html.Div(
os.path.basename(os.path.dirname(DATA_ROOT)),
style={'display': 'inline-block', 'margin': '10px'},
),
]),
dcc.Tabs(id='tabs', value='tab-1', children=[
dcc.Tab(id='tab-1', label='Main', value='tab-1', children=[
html.Div([
'Dataset:',
html.Br(),
html.Div([
dcc.Dropdown(
id='env-dropdown',
placeholder='Select a dataset...',
clearable=False,
),
],
style={
'display': 'inline-block',
'width': '400px',
'vertical-align': 'middle',
# 'white-space': 'nowrap',
},
),
], style={
'display': 'table',
'table-layout': 'auto',
'border-collapse': 'separate',
'border-spacing': '5px 5px',
}),
html.Div([
html.Div([
dcc.RadioItems(
id='detect-target',
options=[
{
'label': 'Pupariation',
'value': 'pupariation',
'disabled': True,
},
{
'label': 'Eclosion',
'value': 'eclosion',
'disabled': True,
},
{
'label': 'Pupariation & eclosion',
'value': 'pupa-and-eclo',
'disabled': True,
},
{
'label': 'Death',
'value': 'death',
'disabled': True,
},
],
),
'Detection Method:',
html.Br(),
dcc.RadioItems(
id='detection-method',
options=[
{
'label': 'Maximum',
'value': 'max',
'disabled': False,
},
{
'label': 'Relative maxima',
'value': 'relmax',
'disabled': False,
},
{
'label': 'Thresholding',
'value': 'thresholding',
'disabled': False,
},
],
value='max',
),
'Inference Data:',
html.Br(),
html.Div([
dcc.Dropdown(
id='larva-dropdown',
placeholder='Select a larva data...',
clearable=False,
),
dcc.Dropdown(
id='adult-dropdown',
placeholder='Select a adult data...',
clearable=False,
),
],
style={
'display': 'inline-block',
'width': '200px',
'vertical-align': 'middle',
},
),
html.Br(),
'Well Index:',
html.Br(),
html.Div([
dcc.Input(
id='well-selector',
type='number',
value=0,
min=0,
size='5',
),
],
style={
'display': 'inline-block',
},
),
html.Br(),
html.Div([
dcc.Slider(
id='well-slider',
value=0,
min=0,
step=1,
),
],
style={
'display': 'inline-block',
'width': '200px',
},
),
html.Br(),
'Frame:',
html.Br(),
html.Div([
dcc.Input(
id='time-selector',
type='number',
value=0,
min=0,
size='5',
),
],
style={
'display': 'inline-block',
},
),
html.Br(),
html.Div([
dcc.Slider(
id='time-slider',
value=0,
min=0,
step=1,
),
],
style={
'display': 'inline-block',
'width': '200px',
},
),
html.Br(),
], style={
'display': 'table-cell',
'vertical-align': 'top',
}),
html.Div([
html.Div(id='org-image'),
html.Div(id='label-and-prob'),
dcc.Checklist(
id='blacklist-check',
options=[{
'label': 'Black List',
'value': 'checked',
'disabled': False,
}],
values=[],
),
html.Div(id='blacklist-link'),
], style={
'display': 'table-cell',
'vertical-align': 'top',
}),
html.Div([
html.Div('Original Image at "t"'),
dcc.Graph(
id='current-well',
config={'displayModeBar': False},
),
], style={
'display': 'table-cell',
'vertical-align': 'top',
}),
html.Div([
'Data root:',
html.Div(DATA_ROOT, id='data-root'),
],
style={
'display': 'none',
'vertical-align': 'top',
},
),
html.Div([
html.Div(id='larva-signal-div', children=[
html.Div([
html.Div([
html.Div([
dcc.Slider(
id='larva-thresh',
value=1,
min=0,
max=2,
step=.1,
updatemode='mouseup',
vertical=True,
),
], style={
'height': '170px',
'width': '10px',
'margin': '0px 25px 10px',
}),
dcc.Input(
id='larva-thresh-selector',
type='number',
value=2,
min=0,
max=2,
step=0.1,
style={
'width': '70px',
},
),
], style={
'display': 'table-cell',
'vertical-align': 'top',
}),
dcc.Graph(
id='larva-signal',
figure={'data': []},
style={
'display': 'table-cell',
'vertical-align': 'top',
'height': '240px',
'width': '550px',
},
),
html.Div([
html.Div('Signal Type:',
style={'margin-left': '10px'},
),
html.Div([
dcc.Dropdown(
id='larva-signal-type',
placeholder='Select a signal...',
clearable=False,
),
],
style={
'margin-left': '10px',
},
),
dcc.Checklist(
id='larva-smoothing-check',
options=[{
'label': 'Smoothing',
'value': 'checked',
}],
values=[],
style={
'margin-left': '10px',
},
),
html.Div([
'Size:',
dcc.Input(
id='larva-window-size',
type='number',
value=10,
min=0,
size='5',
style={
'width': '70px',
'margin-left': '25px',
},
),
], style={'margin': '0px 0px 0px 20px'}),
html.Div([
'Sigma:',
dcc.Input(
id='larva-window-sigma',
type='number',
value=5,
min=0,
size='5',
step=0.1,
style={
'width': '70px',
'margin-left': '10px',
},
),
], style={'margin': '0px 0px 0px 20px'}),
dcc.Checklist(
id='larva-weight-check',
options=[{
'label': 'Weight',
'value': 'checked',
}],
values=[],
style={
'margin-left': '10px',
},
),
dcc.RadioItems(
id='larva-weight-style',
options=[
{
'label': 'Step',
'value': 'step',
'disabled': True,
},
{
'label': 'Ramp',
'value': 'ramp',
'disabled': True,
},
],
value='step',
labelStyle={'display': 'inline-block'},
style={'margin': '0px 0px 0px 20px'},
),
], style={
'display': 'table-cell',
'vertical-align': 'middle',
}),
], style={
'display': 'table',
'table-layout': 'auto',
}),
], style={'width': '810px', 'margin-top': '10px'}),
html.Div(id='adult-signal-div', children=[
html.Div([
html.Div([
html.Div([
dcc.Slider(
id='adult-thresh',
value=1,
min=0,
max=2,
step=.1,
updatemode='mouseup',
vertical=True,
),
], style={
'height': '170px',
'width': '10px',
'margin': '0px 25px 10px',
}),
dcc.Input(
id='adult-thresh-selector',
type='number',
value=2,
min=0,
max=2,
step=0.1,
style={
'width': '70px',
},
),
], style={
'display': 'table-cell',
'vertical-align': 'top',
}),
dcc.Graph(
id='adult-signal',
figure={'data': []},
style={
'display': 'table-cell',
'vertical-align': 'top',
'height': '240px',
'width': '550px',
},
),
html.Div([
html.Div('Signal Type:',
style={'margin-left': '10px'},
),
html.Div([
dcc.Dropdown(
id='adult-signal-type',
placeholder='Select a signal...',
clearable=False,
),
],
style={
'margin-left': '10px',
},
),
dcc.Checklist(
id='adult-smoothing-check',
options=[{
'label': 'Smoothing',
'value': 'checked',
}],
values=[],
style={
'margin-left': '10px',
},
),
html.Div([
'Size:',
dcc.Input(
id='adult-window-size',
type='number',
value=10,
min=0,
size='5',
style={
'width': '70px',
'margin-left': '25px',
},
),
], style={'margin': '0px 0px 0px 20px'}),
html.Div([
'Sigma:',
dcc.Input(
id='adult-window-sigma',
type='number',
value=5,
min=0,
size='5',
step=0.1,
style={
'width': '70px',
'margin-left': '10px',
},
),
], style={'margin': '0px 0px 0px 20px'}),
dcc.Checklist(
id='adult-weight-check',
options=[{
'label': 'Weight',
'value': 'checked',
}],
values=[],
style={
'display': 'inline-block',
'margin': '0px 10px',
},
),
dcc.RadioItems(
id='adult-weight-style',
options=[
{
'label': 'Step',
'value': 'step',
'disabled': True,
},
{
'label': 'Ramp',
'value': 'ramp',
'disabled': True,
},
],
value='step',
labelStyle={'display': 'inline-block'},
style={'margin': '0px 0px 0px 20px'},
),
], style={
'display': 'table-cell',
'vertical-align': 'middle',
}),
], style={
'display': 'table',
'table-layout': 'auto',
}),
], style={'width': '810px', 'margin-top': '10px'}),
html.Div([
dcc.Input(
id='midpoint-selector',
type='number',
min=0,
style={
'width': '70px',
'display': 'inline-block',
'vertical-align': 'middle',
'margin': '0px 10px 0px 120px',
},
),
html.Div([
dcc.Slider(
id='midpoint-slider',
min=0,
step=1,
),
], style={
'width': '450px',
'display': 'inline-block',
}),
]),
], style={
'display': 'table-cell',
'vertical-align': 'middle',
}),
], style={
'display': 'table',
'table-layout': 'auto',
'border-collapse': 'separate',
'border-spacing': '5px 0px',
}),
html.Div([
dcc.Graph(
id='larva-summary',
style={
'display': 'inline-block',
'height': '300px',
'width': '20%',
},
),
dcc.Graph(
id='larva-hist',
style={
'display': 'inline-block',
'height': '300px',
'width': '20%',
},
),
dcc.Graph(
id='larva-boxplot',
style={
'display': 'inline-block',
'height': '300px',
'width': '20%',
},
),
]),
html.Br(),
html.Div([
dcc.Graph(
id='adult-summary',
style={
'display': 'inline-block',
'height': '300px',
'width': '20%',
},
),
dcc.Graph(
id='adult-hist',
style={
'display': 'inline-block',
'height': '300px',
'width': '20%',
},
),
dcc.Graph(
id='adult-boxplot',
style={
'display': 'inline-block',
'height': '300px',
'width': '20%',
},
),
dcc.Graph(
id='pupa-vs-eclo',
style={
'display': 'inline-block',
'height': '300px',
'width': '20%',
},
),
dcc.Graph(
id='survival-curve',
style={
'display': 'inline-block',
'height': '300px',
'width': '20%',
},
),
]),
html.Div(id='dummy-div'),
]),
dcc.Tab(id='tab-2', label='Data table', value='tab-2', children=[
html.Div(id='data-tables', children=[]),
], style={'width': '100%'}),
dcc.Tab(id='tab-3', label='Mask maker', value='tab-3', children=[
html.Div([
html.Div([
html.Div([
'# of rows',
html.Br(),
dcc.Input(id='n-rows', placeholder='# of rows',
debounce=True, type='number', value=8,
max=100, min=0, size='5'),
], style={'display': 'inline-block', 'width': '110px'}),
html.Div([
'# of columns',
html.Br(),
dcc.Input(id='n-clms', placeholder='# of columns',
debounce=True, type='number', value=12,
max=100, min=0, size='5'),
], style={'display': 'inline-block', 'width': '110px'}),
html.Div([
'# of plates',
html.Br(),
dcc.Input(id='n-plates', placeholder='# of plates',
debounce=True, type='number', value=1,
max=10, min=0, size='5'),
], style={'display': 'inline-block', 'width': '110px'}),
html.Div([
'gap between rows',
html.Br(),
dcc.Input(id='row-gap',
placeholder='gap between rows', debounce=True,
type='number', value=1,
max=10, min=0, size='5', step=0.1),
], style={'display': 'inline-block', 'width': '110px'}),
html.Div([
'gap between columns',
html.Br(),
dcc.Input(id='clm-gap',
placeholder='gap between columns',
debounce=True, type='number', value=1,
max=10, min=0, size='5', step=0.1),
], style={'display': 'inline-block', 'width': '110px'}),
html.Div([
'gap between plates',
html.Br(),
dcc.Input(id='plate-gap',
placeholder='gap between plates',
debounce=True, type='number', value=71,
max=800, min=0, size='5'),
], style={'display': 'inline-block', 'width': '110px'}),
html.Div([
'x-coord of the lower left corner',
html.Br(),
dcc.Input(id='x',
placeholder='x-coord of the lower left corner',
debounce=True, type='number', value=0,
max=1500, min=0, size='5'),
], style={'display': 'inline-block', 'width': '110px'}),
html.Div([
'y-coord of the lower left corner',
html.Br(),
dcc.Input(id='y',
placeholder='y-coord of the lower left corner',
debounce=True, type='number', value=0,
max=1500, min=0, size='5'),
], style={'display': 'inline-block', 'width': '110px'}),
html.Div([
'width of a well',
html.Br(),
dcc.Input(id='well_w',
placeholder='width of a well',
debounce=True, type='number', value=0,
max=1500, min=0, size='5'),
], style={'display': 'inline-block', 'width': '110px'}),
html.Div([
'height of a well',
html.Br(),
dcc.Input(id='well_h',
placeholder='height of a well', debounce=True,
type='number', value=0,
max=1500, min=0, size='5'),
], style={'display': 'inline-block', 'width': '110px'}),
html.Div([
'rotation correction (degree)',
html.Br(),
dcc.Input(id='angle',
placeholder='rotation correction (degree)',
debounce=True, type='number', value=0,
max=90, min=0, size='5', step=0.1),
], style={'display': 'inline-block', 'width': '110px'}),
html.Div([
dcc.ConfirmDialogProvider(
id='mask-save-confirm-dialog',
children=html.Button('Save',
id='mask-save-button'),
message='Are you sure you want to overwrite the mask file?',
),
dcc.ConfirmDialog(id='mask-save-notification-dialog',
message=''),
], style={'display': 'inline-block'}),
]),
html.Div([
dcc.Loading([
dcc.Graph(
id='org-img',
figure={
'data': [],
'layout':{},
},
style={
'display': 'inline-block',
'width': '33%',
},
),
dcc.Graph(
id='masked-img',
figure={
'data': [],
'layout':{},
},
style={
'display': 'inline-block',
'width': '33%',
},
),
dcc.Graph(
id='mask-img',
figure={
'data': [],
'layout':{},
},
style={
'display': 'inline-block',
'width': '33%',
},
),
]),
]),
]),
], style={'width': '100%'}),
], style={'width': '100%'}),
dcc.Store(id='hidden-timestamp'),
dcc.Store(id='hidden-midpoint'),
dcc.Store(id='hidden-blacklist'),
html.Div('{"changed": "nobody"}',
id='changed-well', style={'display': 'none'}),
html.Div(id='well-buff', style={'display': 'none'}, children=json.dumps(
{
'nobody': 0,
'current-well': 0,
'larva-summary': 0,
'adult-summary': 0,
'pupa-vs-eclo': 0,
'larva-boxplot': 0,
'adult-boxplot': 0,
}
)
),
html.Div('{"changed": "nobody"}',
id='changed-time', style={'display': 'none'}),
html.Div(id='time-buff', style={'display': 'none'}, children=json.dumps(
{
'nobody': 0,
'larva-signal': 0,
'adult-signal': 0,
}
)
),
], style={'width': '1400px',},)
# =========================================
# Smoothing signals with gaussian window
# =========================================
def my_filter(signals, size=10, sigma=5):
window = scipy.signal.gaussian(size, sigma)
signals = np.array(
[np.convolve(signal, window, mode='same')
for signal in signals])
return signals
# =================================================
# Initialize env-dropdown when opening the page.
# =================================================
@app.callback(
Output('env-dropdown', 'options'),
[Input('data-root', 'children')])
def callback(data_root):
imaging_envs = [os.path.basename(i)
for i in sorted(glob.glob(os.path.join(data_root, '*')))]
return [{'label': i, 'value': i} for i in imaging_envs]
# =================================================================
# Initialize detect-target.
# =================================================================
@app.callback(
Output('detect-target', 'value'),
[Input('env-dropdown', 'value')],
[State('data-root', 'children')])
def callback(env, data_root):
# Guard
if env is None:
return
if not os.path.exists(os.path.join(data_root, env, 'config.json')):
return
with open(os.path.join(data_root, env, 'config.json')) as f:
config = json.load(f)
if config['detect'] == 'pupariation':
return 'pupariation'
elif config['detect'] == 'eclosion':
return 'eclosion'
elif config['detect'] == 'pupa&eclo':
return 'pupa-and-eclo'
elif config['detect'] == 'death':
return 'death'
else:
return
# ======================================================
# Initialize larva-dropdown.
# ======================================================
@app.callback(
Output('larva-dropdown', 'disabled'),
[Input('detect-target', 'value')],
[State('data-root', 'children'),
State('env-dropdown', 'value')])
def callback(detect, data_root, env):
if env is None:
return
if detect == 'pupariation':
return False
elif detect == 'eclosion':
return True
elif detect == 'pupa-and-eclo':
return False
elif detect == 'death':
return True
@app.callback(
Output('larva-dropdown', 'options'),
[Input('detect-target', 'value')],
[State('data-root', 'children'),
State('env-dropdown', 'value')])
def callback(detect, data_root, env):
if env is None:
return []
if detect == 'eclosion' or detect == 'death':
return []
results = [os.path.basename(i)
for i in sorted(glob.glob(os.path.join(
data_root, glob.escape(env), 'inference', 'larva', '*')))
if os.path.isdir(i)]
return [{'label': i, 'value': i} for i in results]
@app.callback(
Output('larva-dropdown', 'value'),
[Input('larva-dropdown', 'options')],
[State('data-root', 'children'),
State('env-dropdown', 'value')])
def callback(_, data_root, env):
return None
# ======================================================
# Initialize adult-dropdown.
# ======================================================
@app.callback(
Output('adult-dropdown', 'disabled'),
[Input('detect-target', 'value')],
[State('data-root', 'children'),
State('env-dropdown', 'value')])
def callback(detect, data_root, env):
if env is None:
return
if detect == 'pupariation':
return True
elif detect == 'eclosion':
return False
elif detect == 'pupa-and-eclo':
return False
elif detect == 'death':
return False
@app.callback(
Output('adult-dropdown', 'options'),
[Input('detect-target', 'value')],
[State('data-root', 'children'),
State('env-dropdown', 'value')])
def callback(detect, data_root, env):
if env is None:
return []
if detect == 'pupariation':
return []
results = [os.path.basename(i)
for i in sorted(glob.glob(os.path.join(
data_root, glob.escape(env), 'inference', 'adult', '*')))
if os.path.isdir(i)]
return [{'label': i, 'value': i} for i in results]
@app.callback(
Output('adult-dropdown', 'value'),
[Input('adult-dropdown', 'options')],
[State('data-root', 'children'),
State('env-dropdown', 'value')])
def callback(_, data_root, env):
return None
# =====================================================
# Callbacks for selecting a well
# =====================================================
@app.callback(
Output('well-selector', 'max'),
[Input('env-dropdown', 'value')],
[State('data-root', 'children')])
def callback(env, data_root):
if env is None or data_root is None:
return
with open(os.path.join(data_root, env, 'mask_params.json')) as f:
params = json.load(f)
return params['n-rows'] * params['n-plates'] * params['n-clms'] - 1
@app.callback(
Output('well-selector', 'value'),
[Input('well-slider', 'value')])
def callback(well_idx):
return well_idx
@app.callback(
Output('well-slider', 'max'),
[Input('env-dropdown', 'value')],
[State('data-root', 'children')])
def callback(env, data_root):
if env is None or data_root is None:
return
with open(os.path.join(data_root, env, 'mask_params.json')) as f:
params = json.load(f)
return params['n-rows'] * params['n-plates'] * params['n-clms'] - 1
@app.callback(
Output('well-slider', 'value'),
[Input('well-buff', 'children')],
[State('changed-well', 'children')])
def callback(buff, changed_data):
buff = json.loads(buff)
changed_data = json.loads(changed_data)['changed']
return buff[changed_data]
# =====================================================
# Callbacks to buffer a ID of a clicked well
# =====================================================
@app.callback(
Output('changed-well', 'children'),
[Input('current-well', 'clickData'),
Input('larva-summary', 'clickData'),
Input('adult-summary', 'clickData'),
Input('pupa-vs-eclo', 'clickData'),
Input('larva-boxplot', 'clickData'),
Input('adult-boxplot', 'clickData')],
[State('well-buff', 'children')])
def callback(current_well, larva_summary, adult_summary,
pupa_vs_eclo, larva_boxplot, adult_boxplot, buff):
# Guard
if current_well is None and \
larva_summary is None and \
adult_summary is None and \
pupa_vs_eclo is None and \
larva_boxplot is None and \
adult_boxplot is None:
return '{"changed": "nobody"}'
if current_well is None:
current_well = 0
else:
current_well = int(current_well['points'][0]['text'])
if larva_summary is None:
larva_summary = 0
else:
larva_summary = int(larva_summary['points'][0]['text'])
if adult_summary is None:
adult_summary = 0
else:
adult_summary = int(adult_summary['points'][0]['text'])
if pupa_vs_eclo is None:
pupa_vs_eclo = 0
else:
pupa_vs_eclo = int(pupa_vs_eclo['points'][0]['text'])
if larva_boxplot is None:
larva_boxplot = 0
else:
larva_boxplot = int(larva_boxplot['points'][0]['text'])
if adult_boxplot is None:
adult_boxplot = 0
else:
adult_boxplot = int(adult_boxplot['points'][0]['text'])
buff = json.loads(buff)
if current_well != buff['current-well']:
return '{"changed": "current-well"}'
if larva_summary != buff['larva-summary']:
return '{"changed": "larva-summary"}'
if adult_summary != buff['adult-summary']:
return '{"changed": "adult-summary"}'
if pupa_vs_eclo != buff['pupa-vs-eclo']:
return '{"changed": "pupa-vs-eclo"}'
if larva_boxplot != buff['larva-boxplot']:
return '{"changed": "larva-boxplot"}'
if adult_boxplot != buff['adult-boxplot']:
return '{"changed": "adult-boxplot"}'
return '{"changed": "nobody"}'
@app.callback(
Output('well-buff', 'children'),
[Input('changed-well', 'children')],
[State('current-well', 'clickData'),
State('larva-summary', 'clickData'),
State('adult-summary', 'clickData'),
State('pupa-vs-eclo', 'clickData'),
State('larva-boxplot', 'clickData'),
State('adult-boxplot', 'clickData'),
State('well-buff', 'children')])
def callback(changed_data, current_well, larva_summary, adult_summary,
pupa_vs_eclo, larva_boxplot, adult_boxplot, buff):
buff = json.loads(buff)
changed_data = json.loads(changed_data)['changed']
print('Previous Value')
print(buff)
if changed_data == 'nobody':
pass
elif changed_data == 'current-well':
buff['current-well'] = int(current_well['points'][0]['text'])
elif changed_data == 'larva-summary':
buff['larva-summary'] = int(larva_summary['points'][0]['text'])
elif changed_data == 'adult-summary':
buff['adult-summary'] = int(adult_summary['points'][0]['text'])
elif changed_data == 'pupa-vs-eclo':
buff['pupa-vs-eclo'] = int(pupa_vs_eclo['points'][0]['text'])
elif changed_data == 'larva-boxplot':
buff['larva-boxplot'] = int(larva_boxplot['points'][0]['text'])
elif changed_data == 'adult-boxplot':
buff['adult-boxplot'] = int(adult_boxplot['points'][0]['text'])
else:
# Never evaluated
pass
print('Current Value')
print(buff)
return json.dumps(buff)
# =====================================================
# Callbacks for selecting time step (frame)
# =====================================================
@app.callback(
Output('time-selector', 'max'),
[Input('env-dropdown', 'value')],
[State('data-root', 'children')])
def callback(env, data_root):
if env is None:
return
return len(glob.glob(os.path.join(
data_root, glob.escape(env), 'original', '*.jpg'))) - 2
@app.callback(
Output('time-selector', 'value'),
[Input('time-slider', 'value')])
def callback(timestep):
return timestep
@app.callback(
Output('time-slider', 'max'),
[Input('env-dropdown', 'value')],
[State('data-root', 'children')])
def callback(env, data_root):
if env is None:
return 100
return len(glob.glob(os.path.join(
data_root, glob.escape(env), 'original', '*.jpg'))) - 2
@app.callback(
Output('time-slider', 'value'),
[Input('env-dropdown', 'value'),
Input('time-buff', 'children')],
[State('changed-time', 'children')])
def callback(_, buff, changed_data):
buff = json.loads(buff)
changed_data = json.loads(changed_data)['changed']
return buff[changed_data]
# =====================================================
# Callbacks to buffer a clicked frame number
# =====================================================
@app.callback(
Output('changed-time', 'children'),
[Input('larva-signal', 'clickData'),
Input('adult-signal', 'clickData')],
[State('time-buff', 'children')])
def callback(larva_signal, adult_signal, buff):
# Guard
if larva_signal is None and adult_signal is None:
return '{"changed": "nobody"}'
if larva_signal is None:
larva_signal = 0
else:
larva_signal = larva_signal['points'][0]['x']
if adult_signal is None:
adult_signal = 0
else:
adult_signal = adult_signal['points'][0]['x']
buff = json.loads(buff)
if larva_signal != buff['larva-signal']:
return '{"changed": "larva-signal"}'
if adult_signal != buff['adult-signal']:
return '{"changed": "adult-signal"}'
return '{"changed": "nobody"}'
@app.callback(
Output('time-buff', 'children'),
[Input('changed-time', 'children')],
[State('larva-signal', 'clickData'),
State('adult-signal', 'clickData'),
State('time-buff', 'children')])
def callback(changed_data, larva_signal, adult_signal, buff):
buff = json.loads(buff)
changed_data = json.loads(changed_data)['changed']
print('Previous Value')
print(buff)
if changed_data == 'nobody':
return json.dumps(buff)
if changed_data == 'larva-signal':
buff['larva-signal'] = larva_signal['points'][0]['x']
print('Current Value')
print(buff)
return json.dumps(buff)
if changed_data == 'adult-signal':
buff['adult-signal'] = adult_signal['points'][0]['x']
print('Current Value')
print(buff)
return json.dumps(buff)
# =====================================================
# Select signal type
# =====================================================
@app.callback(
Output('larva-signal-type', 'options'),
[Input('larva-dropdown', 'value')],
[State('data-root', 'children'),
State('env-dropdown', 'value')])
def callback(larva, data_root, dataset_name):
# Guard
if larva is None or dataset_name is None:
return []
signal_files = sorted(glob.glob(os.path.join(
data_root, glob.escape(dataset_name), 'inference',
'larva', larva, '*signals.npy')))
signal_files = [os.path.basename(file_path) for file_path in signal_files]
return [{'label': i, 'value': i} for i in signal_files]
@app.callback(
Output('adult-signal-type', 'options'),
[Input('adult-dropdown', 'value')],
[State('data-root', 'children'),
State('env-dropdown', 'value')])
def callback(adult, data_root, dataset_name):
# Guard
if adult is None or dataset_name is None:
return []
signal_files = sorted(glob.glob(os.path.join(
data_root, glob.escape(dataset_name), 'inference',
'adult', adult, '*signals.npy')))
signal_files = [os.path.basename(file_path) for file_path in signal_files]
return [{'label': i, 'value': i} for i in signal_files]
@app.callback(
Output('larva-signal-type', 'value'),
[Input('larva-signal-type', 'options')],
[State('data-root', 'children'),
State('env-dropdown', 'value')])
def callback(options, data_root, dataset_name):
# Guard
if options == [] or dataset_name is None:
return None
return options[0]['value']
@app.callback(
Output('adult-signal-type', 'value'),
[Input('adult-signal-type', 'options')],
[State('data-root', 'children'),
State('env-dropdown', 'value')])
def callback(options, data_root, dataset_name):
# Guard
if options == [] or dataset_name is None:
return None
return options[0]['value']
# =====================================================
# Toggle valid/invalid of window size
# =====================================================
@app.callback(
Output('larva-window-size', 'disabled'),
[Input('larva-smoothing-check', 'values')])
def callback(checks):
if len(checks) == 0:
return True
else:
return False
@app.callback(
Output('adult-window-size', 'disabled'),
[Input('adult-smoothing-check', 'values')])
def callback(checks):
if len(checks) == 0:
return True
else:
return False
# ======================================================
# Toggle valid/invalid of window sigma
# ======================================================
@app.callback(
Output('larva-window-sigma', 'disabled'),
[Input('larva-smoothing-check', 'values')])
def callback(checks):
if len(checks) == 0:
return True
else:
return False
@app.callback(
Output('adult-window-sigma', 'disabled'),
[Input('adult-smoothing-check', 'values')])
def callback(checks):
if len(checks) == 0:
return True
else:
return False
# =========================================================
# Toggle valid/invalid of the weight style radio buttons
# =========================================================
@app.callback(
Output('larva-weight-style', 'options'),
[Input('larva-weight-check', 'values')])
def callback(checks):
if len(checks) == 0:
return [
{'label': 'Step', 'value': 'step', 'disabled': True},
{'label': 'Ramp', 'value': 'ramp', 'disabled': True},
]
else:
return [
{'label': 'Step', 'value': 'step', 'disabled': False},
{'label': 'Ramp', 'value': 'ramp', 'disabled': False},
]
@app.callback(
Output('adult-weight-style', 'options'),
[Input('adult-weight-check', 'values')])
def callback(checks):
if len(checks) == 0:
return [
{'label': 'Step', 'value': 'step', 'disabled': True},
{'label': 'Ramp', 'value': 'ramp', 'disabled': True},
]
else:
return [
{'label': 'Step', 'value': 'step', 'disabled': False},
{'label': 'Ramp', 'value': 'ramp', 'disabled': False},
]
# ======================================
# Callbacks for blacklist
# ======================================
@app.callback(
Output('blacklist-check', 'values'),
[Input('well-selector', 'value')],
[State('data-root', 'children'),
State('env-dropdown', 'value'),
State('hidden-blacklist', 'data')])
def callback(well_idx, data_root, env, blacklist):
if well_idx is None or env is None or blacklist is None:
raise dash.exceptions.PreventUpdate
if blacklist['value'][well_idx]:
return ['checked']
else:
return []
def get_trigger_input(callback_context):
if callback_context.triggered:
return callback_context.triggered[0]['prop_id'].split('.')[0]
else:
return 'No inputs'
@app.callback(
Output('hidden-blacklist', 'data'),
[Input('env-dropdown', 'value'),
Input('blacklist-check', 'values')],
[State('hidden-blacklist', 'data'),
State('well-selector', 'value'),
State('data-root', 'children')])
def callback(dataset_name, check, blacklist, well_idx, data_root):
# Guard
if well_idx is None or dataset_name is None:
raise dash.exceptions.PreventUpdate
trigger_input = get_trigger_input(dash.callback_context)
# If triggered by the env-dropdown, initialize the blacklist buffer
if trigger_input == 'env-dropdown':
blacklist, exist = load_blacklist(data_root, dataset_name)
return {'value': list(blacklist)}
# If triggered by the checkbox, put the T/F value in the blacklist buffer
elif trigger_input == 'blacklist-check':
if check:
blacklist['value'][well_idx] = True
else:
blacklist['value'][well_idx] = False
return blacklist
else:
raise Exception
@app.callback(
Output('blacklist-link', 'children'),
[Input('hidden-blacklist', 'data')],
[State('data-root', 'children'),
State('env-dropdown', 'value')])
def callback(blacklist, data_root, dataset_name):
# Guard
if blacklist is None:
return 'Now loading...'
# Load a mask params
with open(os.path.join(data_root, dataset_name, 'mask_params.json')) as f:
params = json.load(f)
n_wells = params['n-rows'] * params['n-plates'] * params['n-clms']
blacklist_table = np.array(blacklist['value'], dtype=int).reshape(
params['n-rows'] * params['n-plates'], params['n-clms'])
df = pd.DataFrame(blacklist_table)
return [
html.A(
'Download the Blacklist',
download='Blacklist({}).csv'.format(dataset_name[0:20]),
href='data:text/csv;charset=utf-8,' + df.to_csv(
index=False, header=False),
target='_blank',
),
]
# ========================
# Update the org-image.
# ========================
@app.callback(
Output('org-image', 'children'),
[Input('time-selector', 'value'),
Input('well-selector', 'value')],
[State('data-root', 'children'),
State('env-dropdown', 'value')])
def callback(time, well_idx, data_root, env):
# Exception handling
if env is None:
return
# Load the mask
mask = np.load(os.path.join(data_root, env, 'mask.npy'))
# Load an original image
orgimg_paths = sorted(glob.glob(
os.path.join(data_root, glob.escape(env), 'original', '*.jpg')))
orgimg1 = np.array(
PIL.Image.open(orgimg_paths[time]).convert('L'), dtype=np.uint8)
orgimg2 = np.array(
PIL.Image.open(orgimg_paths[time+1]).convert('L'), dtype=np.uint8)
# Cut out an well image from the original image
r, c = np.where(mask == well_idx)
orgimg1 = orgimg1[r.min():r.max(), c.min():c.max()]
orgimg2 = orgimg2[r.min():r.max(), c.min():c.max()]
orgimg1 = PIL.Image.fromarray(orgimg1)
orgimg2 = PIL.Image.fromarray(orgimg2)
# Buffer the well image as byte stream
buf1 = io.BytesIO()
buf2 = io.BytesIO()
orgimg1.save(buf1, format='JPEG')
orgimg2.save(buf2, format='JPEG')
return [
html.Div('Image at "t"',
style={'display': 'inline-block', 'margin-right': '25px'}),
html.Div('"t+1"', style={'display': 'inline-block'}),
html.Img(
src='data:image/jpeg;base64,{}'.format(
base64.b64encode(buf1.getvalue()).decode('utf-8')),
style={
'background': '#555555',
'height': '65px',
'width': '65px',
'padding': '5px',
'display': 'inline-block',
},
),
html.Img(
src='data:image/jpeg;base64,{}'.format(
base64.b64encode(buf2.getvalue()).decode('utf-8')),
style={
'background': '#555555',
'height': '65px',
'width': '65px',
'padding': '5px',
'display': 'inline-block',
},
),
]
# =============================
# Update the label-and-prob.
# =============================
@app.callback(
Output('label-and-prob', 'children'),
[Input('time-selector', 'value'),
Input('well-selector', 'value'),
Input('larva-dropdown', 'value'),
Input('adult-dropdown', 'value')],
[State('data-root', 'children'),
State('env-dropdown', 'value'),
State('detect-target', 'value')])
def callback(time, well_idx, larva, adult, data_root, env, detect):
# Guard
if env is None:
return
if detect == 'pupariation':
# Guard
if larva is None:
return
# Load a npz file storing prob images
# and get a prob image
larva_probs = np.load(os.path.join(
data_root, env, 'inference', 'larva', larva, 'probs',
'{:03d}.npz'.format(well_idx)))['arr_0'].astype(np.int32)
larva_prob_img1 = PIL.Image.fromarray(
larva_probs[time] / 100 * 255).convert('L')
larva_prob_img2 = PIL.Image.fromarray(
larva_probs[time+1] / 100 * 255).convert('L')
larva_label_img1 = PIL.Image.fromarray(
((larva_probs[time] > THETA) * 255).astype(np.uint8)
).convert('L')
larva_label_img2 = PIL.Image.fromarray(
((larva_probs[time+1] > THETA) * 255).astype(np.uint8)
).convert('L')
# Buffer the well image as byte stream
larva_prob_buf1 = io.BytesIO()
larva_prob_buf2 = io.BytesIO()
larva_label_buf1 = io.BytesIO()
larva_label_buf2 = io.BytesIO()
larva_prob_img1.save(larva_prob_buf1, format='JPEG')
larva_prob_img2.save(larva_prob_buf2, format='JPEG')
larva_label_img1.save(larva_label_buf1, format='JPEG')
larva_label_img2.save(larva_label_buf2, format='JPEG')
data = [
html.Div('Larva'),
html.Div([
html.Img(
src='data:image/jpeg;base64,{}'.format(
base64.b64encode(
larva_label_buf1.getvalue()).decode('utf-8')),
style={
'background': '#555555',
'height': '65px',
'width': '65px',
'padding': '5px',
'display': 'inline-block',
},
),
html.Img(
src='data:image/jpeg;base64,{}'.format(
base64.b64encode(
larva_label_buf2.getvalue()).decode('utf-8')),
style={
'background': '#555555',
'height': '65px',
'width': '65px',
'padding': '5px',
'display': 'inline-block',
},
),
]),
html.Div([
html.Img(
src='data:image/jpeg;base64,{}'.format(
base64.b64encode(
larva_prob_buf1.getvalue()).decode('utf-8')),
style={
'background': '#555555',
'height': '65px',
'width': '65px',
'padding': '5px',
'display': 'inline-block',
},
),
html.Img(
src='data:image/jpeg;base64,{}'.format(
base64.b64encode(
larva_prob_buf2.getvalue()).decode('utf-8')),
style={
'background': '#555555',
'height': '65px',
'width': '65px',
'padding': '5px',
'display': 'inline-block',
},
),
]),
]
elif detect == 'pupa-and-eclo':
data = []
if larva is None:
pass
else:
# Load a npz file storing prob images
# and get a prob image
larva_probs = np.load(os.path.join(
data_root, env, 'inference', 'larva', larva, 'probs',
'{:03d}.npz'.format(well_idx)))['arr_0'].astype(np.int32)
larva_prob_img1 = PIL.Image.fromarray(
larva_probs[time] / 100 * 255).convert('L')
larva_prob_img2 = PIL.Image.fromarray(
larva_probs[time+1] / 100 * 255).convert('L')
larva_label_img1 = PIL.Image.fromarray(
((larva_probs[time] > THETA) * 255).astype(np.uint8)
).convert('L')
larva_label_img2 = PIL.Image.fromarray(
((larva_probs[time+1] > THETA) * 255).astype(np.uint8)
).convert('L')
# Buffer the well image as byte stream
larva_prob_buf1 = io.BytesIO()
larva_prob_buf2 = io.BytesIO()
larva_label_buf1 = io.BytesIO()
larva_label_buf2 = io.BytesIO()
larva_prob_img1.save(larva_prob_buf1, format='JPEG')
larva_prob_img2.save(larva_prob_buf2, format='JPEG')
larva_label_img1.save(larva_label_buf1, format='JPEG')
larva_label_img2.save(larva_label_buf2, format='JPEG')
data = data + [
html.Div('Larva'),
html.Div([
html.Img(
src='data:image/jpeg;base64,{}'.format(
base64.b64encode(
larva_label_buf1.getvalue()).decode('utf-8')),
style={
'background': '#555555',
'height': '65px',
'width': '65px',
'padding': '5px',
'display': 'inline-block',
},
),
html.Img(
src='data:image/jpeg;base64,{}'.format(
base64.b64encode(
larva_label_buf2.getvalue()).decode('utf-8')),
style={
'background': '#555555',
'height': '65px',
'width': '65px',
'padding': '5px',
'display': 'inline-block',
},
),
]),
html.Div([
html.Img(
src='data:image/jpeg;base64,{}'.format(
base64.b64encode(
larva_prob_buf1.getvalue()).decode('utf-8')),
style={
'background': '#555555',
'height': '65px',
'width': '65px',
'padding': '5px',
'display': 'inline-block',
},
),
html.Img(
src='data:image/jpeg;base64,{}'.format(
base64.b64encode(
larva_prob_buf2.getvalue()).decode('utf-8')),
style={
'background': '#555555',
'height': '65px',
'width': '65px',
'padding': '5px',
'display': 'inline-block',
},
),
]),
]
if adult is None:
pass
else:
adult_probs = np.load(os.path.join(
data_root, env, 'inference', 'adult', adult, 'probs',
'{:03d}.npz'.format(well_idx)))['arr_0'].astype(np.int32)
adult_prob_img1 = PIL.Image.fromarray(
adult_probs[time] / 100 * 255).convert('L')
adult_prob_img2 = PIL.Image.fromarray(
adult_probs[time+1] / 100 * 255).convert('L')
adult_label_img1 = PIL.Image.fromarray(
((adult_probs[time] > THETA) * 255).astype(np.uint8)
).convert('L')
adult_label_img2 = PIL.Image.fromarray(
((adult_probs[time+1] > THETA) * 255).astype(np.uint8)
).convert('L')
adult_prob_buf1 = io.BytesIO()
adult_prob_buf2 = io.BytesIO()
adult_label_buf1 = io.BytesIO()
adult_label_buf2 = io.BytesIO()
adult_prob_img1.save(adult_prob_buf1, format='JPEG')
adult_prob_img2.save(adult_prob_buf2, format='JPEG')
adult_label_img1.save(adult_label_buf1, format='JPEG')
adult_label_img2.save(adult_label_buf2, format='JPEG')
data = data + [
html.Div('Adult'),
html.Div([
html.Img(
src='data:image/jpeg;base64,{}'.format(
base64.b64encode(
adult_label_buf1.getvalue()).decode('utf-8')),
style={
'background': '#555555',
'height': '65px',
'width': '65px',
'padding': '5px',
'display': 'inline-block',
},
),
html.Img(
src='data:image/jpeg;base64,{}'.format(
base64.b64encode(
adult_label_buf2.getvalue()).decode('utf-8')),
style={
'background': '#555555',
'height': '65px',
'width': '65px',
'padding': '5px',
'display': 'inline-block',
},
),
]),
html.Div([
html.Img(
src='data:image/jpeg;base64,{}'.format(
base64.b64encode(
adult_prob_buf1.getvalue()).decode('utf-8')),
style={
'background': '#555555',
'height': '65px',
'width': '65px',
'padding': '5px',
'display': 'inline-block',
},
),
html.Img(
src='data:image/jpeg;base64,{}'.format(
base64.b64encode(
adult_prob_buf2.getvalue()).decode('utf-8')),
style={
'background': '#555555',
'height': '65px',
'width': '65px',
'padding': '5px',
'display': 'inline-block',
},
),
]),
]
elif detect in ('eclosion', 'death'):
# Guard
if adult is None:
return
# Load a npz file storing prob images
# and get a prob image
adult_probs = np.load(os.path.join(
data_root, env, 'inference', 'adult', adult, 'probs',
'{:03d}.npz'.format(well_idx)))['arr_0'].astype(np.int32)
adult_prob_img1 = PIL.Image.fromarray(
adult_probs[time] / 100 * 255).convert('L')
adult_prob_img2 = PIL.Image.fromarray(
adult_probs[time+1] / 100 * 255).convert('L')
adult_label_img1 = PIL.Image.fromarray(
((adult_probs[time] > THETA) * 255).astype(np.uint8)
).convert('L')
adult_label_img2 = PIL.Image.fromarray(
((adult_probs[time+1] > THETA) * 255).astype(np.uint8)
).convert('L')
# Buffer the well image as byte stream
adult_prob_buf1 = io.BytesIO()
adult_prob_buf2 = io.BytesIO()
adult_label_buf1 = io.BytesIO()
adult_label_buf2 = io.BytesIO()
adult_prob_img1.save(adult_prob_buf1, format='JPEG')
adult_prob_img2.save(adult_prob_buf2, format='JPEG')
adult_label_img1.save(adult_label_buf1, format='JPEG')
adult_label_img2.save(adult_label_buf2, format='JPEG')
data = [
html.Div('Adult'),
html.Div([
html.Img(
src='data:image/jpeg;base64,{}'.format(
base64.b64encode(
adult_label_buf1.getvalue()).decode('utf-8')),
style={
'background': '#555555',
'height': '65px',
'width': '65px',
'padding': '5px',
'display': 'inline-block',
},
),
html.Img(
src='data:image/jpeg;base64,{}'.format(
base64.b64encode(
adult_label_buf2.getvalue()).decode('utf-8')),
style={
'background': '#555555',
'height': '65px',
'width': '65px',
'padding': '5px',
'display': 'inline-block',
},
),
]),
html.Div([
html.Img(
src='data:image/jpeg;base64,{}'.format(
base64.b64encode(
adult_prob_buf1.getvalue()).decode('utf-8')),
style={
'background': '#555555',
'height': '65px',
'width': '65px',
'padding': '5px',
'display': 'inline-block',
},
),
html.Img(
src='data:image/jpeg;base64,{}'.format(
base64.b64encode(
adult_prob_buf2.getvalue()).decode('utf-8')),
style={
'background': '#555555',
'height': '65px',
'width': '65px',
'padding': '5px',
'display': 'inline-block',
},
),
]),
]
return data
# ===========================
# Update the current-well.
# ===========================
@app.callback(
Output('current-well', 'figure'),
[Input('time-selector', 'value'),
Input('well-selector', 'value')],
[State('data-root', 'children'),
State('env-dropdown', 'value')])
def callback(time, well_idx, data_root, env):
# Guard
if env is None:
return {'data': []}
# Load a mask params
with open(os.path.join(data_root, env, 'mask_params.json')) as f:
params = json.load(f)
n_wells = params['n-rows'] * params['n-plates'] * params['n-clms']
xs, ys = well_coordinates(params)
# Load an original image
orgimg_paths = sorted(glob.glob(
os.path.join(data_root, glob.escape(env), 'original', '*.jpg')))
org_img = PIL.Image.open(orgimg_paths[time]).convert('L')
# Buffer the well image as byte stream
buf = io.BytesIO()
org_img.save(buf, format='JPEG')
data_uri = 'data:image/jpeg;base64,{}'.format(
base64.b64encode(buf.getvalue()).decode('utf-8'))
height, width = np.array(org_img).shape
# A coordinate of selected well
selected_x = xs[well_idx:well_idx+1]
selected_y = ys[well_idx:well_idx+1]
# Bounding boxes of groups
if os.path.exists(os.path.join(data_root, env, 'grouping.csv')):
mask = np.load(os.path.join(data_root, env, 'mask.npy'))
mask = np.flipud(mask)
groups = np.loadtxt(
os.path.join(data_root, env, 'grouping.csv'),
dtype=np.int32, delimiter=',').flatten()
for well_idx, group_id in enumerate(groups):
mask[mask==well_idx] = group_id
bounding_boxes = [
{
'x': [
np.where(mask == group_id)[1].min(),
np.where(mask == group_id)[1].max(),
np.where(mask == group_id)[1].max(),
np.where(mask == group_id)[1].min(),
np.where(mask == group_id)[1].min(),
],
'y': [
np.where(mask == group_id)[0].min(),
np.where(mask == group_id)[0].min(),
np.where(mask == group_id)[0].max(),
np.where(mask == group_id)[0].max(),
np.where(mask == group_id)[0].min(),
],
'name': 'Group{}'.format(group_id),
'mode': 'lines',
'line': {'width': 3, 'color': GROUP_COLORS[group_id - 1]},
}
for group_id in np.unique(groups)
]
well_points = [
{
'x': xs[groups == group_id],
'y': ys[groups == group_id],
'text': np.where(groups == group_id)[0].astype(str),
'mode': 'markers',
'marker': {
'size': 4,
'color': GROUP_COLORS[group_id - 1],
'opacity': 0.0,
},
'name': 'Group{}'.format(group_id),
}
for group_id in np.unique(groups)
]
else:
bounding_boxes = []
well_points = [
{
'x': xs,
'y': ys,
'text': [str(i) for i in range(n_wells)],
'mode': 'markers',
'marker': {
'size': 4,
'color': '#ffffff',
'opacity': 0.0,
},
'name': '',
},
]
return {
'data': bounding_boxes + well_points + [
{
'x': selected_x,
'y': selected_y,
'text': str(well_idx),
'mode': 'markers',
'marker': {'size': 10, 'color': '#ff0000', 'opacity': 0.5},
'name': 'Selected well',
},
],
'layout': {
'width': 200,
'height': 400,
'margin': go.layout.Margin(l=0, b=0, t=0, r=0),
'xaxis': {
'range': (0, width),
'scaleanchor': 'y',
'scaleratio': 1,
'showgrid': False,
},
'yaxis': {
'range': (0, height),
'showgrid': False,
},
'images': [{
'xref': 'x',
'yref': 'y',
'x': 0,
'y': 0,
'yanchor': 'bottom',
'sizing': 'stretch',
'sizex': width,
'sizey': height,
'layer': 'below',
'source': data_uri,
}],
'dragmode': 'zoom',
'hovermode': 'closest',
'showlegend': False,
}
}
def well_coordinates(params):
n_rows = params['n-rows']
n_clms = params['n-clms']
n_plates = params['n-plates']
row_gap = params['row-gap']
clm_gap = params['clm-gap']
plate_gap = params['plate-gap']
x = params['x']
y = params['y']
well_w = params['well-w']
well_h = params['well-h']
angle = np.deg2rad(params['angle'])
well_idxs = np.flipud(
np.arange(n_rows * n_clms * n_plates, dtype=int).reshape(
n_rows*n_plates, n_clms)).reshape(n_rows * n_clms * n_plates)
xs = []
ys = []
count = 0
for n in range(n_plates):
for idx_r in range(n_rows):
for idx_c in range(n_clms):
c1 = x + round(idx_c*(well_w + clm_gap))
r1 = y + round(idx_r*(well_h + row_gap)) \
+ n*(n_rows*well_h + plate_gap) \
+ round(row_gap*(n - 1))
c1, r1 = np.dot(
np.array(
[[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]]),
np.array([c1-x, r1-y])) + np.array([x, y])
c1, r1 = np.round([c1, r1]).astype(int)
xs.append(c1 + well_w / 2)
ys.append(r1 + well_h / 2)
count += 1
return np.array(xs)[well_idxs], np.array(ys)[well_idxs]
# ======================================================
# Callbacks for threshold
# ======================================================
@app.callback(
Output('larva-thresh-selector', 'value'),
[Input('larva-thresh', 'value')])
def callback(threshold):
return threshold
@app.callback(
Output('adult-thresh-selector', 'value'),
[Input('adult-thresh', 'value')])
def callback(threshold):
return threshold
# ======================================================
# Callbacks for midpoint
# ======================================================
@app.callback(
Output('midpoint-slider', 'max'),
[Input('env-dropdown', 'value')],
[State('data-root', 'children')])
def callback(env, data_root):
if env is None:
return 100
return len(glob.glob(os.path.join(
data_root, glob.escape(env), 'original', '*.jpg'))) - 2
@app.callback(
Output('midpoint-slider', 'value'),
[Input('well-selector', 'value')],
[State('data-root', 'children'),
State('env-dropdown', 'value'),
State('hidden-midpoint', 'data')])
def callback(well_idx, data_root, dataset_name, midpoints):
if well_idx is None or dataset_name is None or midpoints is None:
return 50
return midpoints['midpoint'][well_idx]
@app.callback(
Output('midpoint-selector', 'max'),
[Input('env-dropdown', 'value')],
[State('data-root', 'children')])
def callback(env, data_root):
if env is None:
return 100
return len(glob.glob(os.path.join(
data_root, glob.escape(env), 'original', '*.jpg'))) - 2
@app.callback(
Output('midpoint-selector', 'value'),
[Input('midpoint-slider', 'value')])
def callback(midpoint):
return midpoint
@app.callback(
Output('hidden-midpoint', 'data'),
[Input('midpoint-selector', 'value')],
[State('hidden-midpoint', 'data'),
State('well-selector', 'value'),
State('data-root', 'children'),
State('env-dropdown', 'value')])
def callback(midpoint, midpoints, well_idx, data_root, dataset_name):
# Guard
if well_idx is None or dataset_name is None:
return
# Load a mask params
with open(os.path.join(data_root, dataset_name, 'mask_params.json')) as f:
params = json.load(f)
n_wells = params['n-rows'] * params['n-plates'] * params['n-clms']
# Initialize the buffer
if midpoints is None or len(midpoints['midpoint']) != n_wells:
midpoints = {'midpoint': [50] * n_wells}
return midpoints
if os.path.exists(os.path.join(data_root, dataset_name, 'grouping.csv')):
group_ids = np.loadtxt(
os.path.join(data_root, dataset_name, 'grouping.csv'),
dtype=np.int32, delimiter=',').flatten()
midpoints['midpoint'] = np.array(midpoints['midpoint'])
group_id = group_ids[well_idx]
midpoints['midpoint'][group_ids == group_id] = midpoint
return midpoints
else:
return {'midpoint': [midpoint] * n_wells}
# =========================================
# Update the figure in the larva-signal
# =========================================
@app.callback(
Output('larva-signal', 'figure'),
[Input('well-selector', 'value'),
Input('larva-thresh-selector', 'value'),
Input('time-selector', 'value'),
Input('hidden-midpoint', 'data'),
Input('larva-weight-check', 'values'),
Input('larva-weight-style', 'value'),
Input('larva-smoothing-check', 'values'),
Input('larva-window-size', 'value'),
Input('larva-window-sigma', 'value'),
Input('larva-signal-type', 'value'),
Input('detection-method', 'value')],
[State('data-root', 'children'),
State('env-dropdown', 'value'),
State('detect-target', 'value'),
State('larva-dropdown', 'value'),
State('hidden-timestamp', 'data')])
def callback(well_idx, coef, time, midpoints, weight, style,
checks, size, sigma, signal_name, method,
data_root, env, detect, larva, timestamps):
# Guard
if env is None:
return {'data': []}
if larva is None:
return {'data': []}
larva_data = []
manual_data = []
common_data = []
# Load the data
larva_diffs = np.load(os.path.join(
data_root, env, 'inference', 'larva', larva, signal_name)).T
larva_diffs = seasoning(
larva_diffs, 'larva', detect, size, sigma,
smooth=len(checks) != 0,
weight=len(weight) != 0,
pupar_times=None,
midpoints=midpoints,
weight_style=style)
thresholds = THRESH_FUNC(larva_diffs, coef=coef)
auto_evals = detect_event(larva_diffs, thresholds, 'larva', detect, method)
if os.path.exists(
os.path.join(data_root, env, 'original', 'pupariation.csv')):
manual_evals = np.loadtxt(
os.path.join(
data_root, env, 'original', 'pupariation.csv'),
dtype=np.int32, delimiter=',').flatten()
manual_data = [
{
# Manual evaluation time (vertical line)
'x': [manual_evals[well_idx], manual_evals[well_idx]],
'y': [0, larva_diffs.max()],
'mode': 'lines',
'name': 'Manual',
'line': {'width': 2, 'color': '#ffa500'},
},
]
larva_data = [
{
# Signal
'x': list(range(len(larva_diffs[0, :]))),
'y': list(larva_diffs[well_idx]),
'mode': 'lines',
'marker': {'color': '#4169e1'},
'name': 'Signal',
'opacity':1.0,
'line': {'width': 2, 'color': '#4169e1'},
},
{
# Threshold (horizontal line)
'x': [0, len(larva_diffs[0, :])],
'y': [thresholds[well_idx, 0], thresholds[well_idx, 0]],
'mode': 'lines',
'name': 'Threshold',
'line': {'width': 2, 'color': '#4169e1'},
},
{
# Auto evaluation time (vertical line)
'x': [auto_evals[well_idx], auto_evals[well_idx]],
'y': [0, larva_diffs.max()],
'name': 'Auto',
'mode':'lines',
'line': {'width': 2, 'color': '#4169e1', 'dash': 'dot'},
},
]
common_data = [
{
# Selected data point
'x': [time],
'y': [larva_diffs[well_idx, time]],
'mode': 'markers',
'marker': {'size': 10, 'color': '#ff0000'},
'name': '',
},
]
return {
'data': larva_data + manual_data + common_data,
'layout': {
'annotations': [
{
'x': 0.01 * len(larva_diffs.T),
'y': 1.0 * larva_diffs.max(),
'text':
'Threshold: {:.1f}, Max: {:.1f}, Min: {:.1f}' \
.format(thresholds[well_idx, 0],
larva_diffs[well_idx].max(),
larva_diffs[well_idx].min()),
'showarrow': False,
'xanchor': 'left',
'yanchor': 'top',
},
],
'font': {'size': 15},
'xaxis': {
'title': 'Frame',
'tickfont': {'size': 15},
},
'yaxis': {
'title': 'Activity',
'tickfont': {'size': 15},
'overlaying': 'y',
'range': [-0.1*larva_diffs.max(), larva_diffs.max()],
},
'showlegend': False,
'hovermode': 'closest',
'margin': go.layout.Margin(l=70, r=0, b=50, t=10, pad=0),
'shapes': day_and_night(timestamps),
},
}
@app.callback(
Output('larva-signal-div', 'style'),
[Input('detect-target', 'value')])
def callback(detect):
if detect == 'pupariation':
return {
'width': '810px',
'margin-top': '10px',
}
elif detect == 'eclosion':
return {'display': 'none'}
elif detect == 'pupa-and-eclo':
return {
'width': '810px',
'margin-top': '10px',
}
elif detect == 'death':
return {'display': 'none'}
else:
return {}
# =========================================
# Update the figure in the adult-signal.
# =========================================
@app.callback(
Output('adult-signal', 'figure'),
[Input('larva-thresh-selector', 'value'),
Input('adult-thresh-selector', 'value'),
Input('time-selector', 'value'),
Input('hidden-midpoint', 'data'),
Input('larva-weight-check', 'values'),
Input('larva-weight-style', 'value'),
Input('larva-smoothing-check', 'values'),
Input('larva-window-size', 'value'),
Input('larva-window-sigma', 'value'),
Input('adult-weight-check', 'values'),
Input('adult-weight-style', 'value'),
Input('adult-smoothing-check', 'values'),
Input('adult-window-size', 'value'),
Input('adult-window-sigma', 'value'),
Input('adult-signal-type', 'value'),
Input('detection-method', 'value')],
[State('well-selector', 'value'),
State('data-root', 'children'),
State('env-dropdown', 'value'),
State('detect-target', 'value'),
State('larva-dropdown', 'value'),
State('adult-dropdown', 'value'),
State('hidden-timestamp', 'data'),
State('larva-signal-type', 'value')])
def callback(larva_coef, adult_coef, time, midpoints,
larva_weighting, larva_w_style, larva_smoothing, larva_w_size,
larva_w_sigma, adult_weighting, adult_w_style, adult_smoothing,
adult_w_size, adult_w_sigma, adult_signal_name, method, well_idx,
data_root, env, detect, larva, adult, timestamps,
larva_signal_name):
# Guard
if env is None:
return {'data': []}
if adult is None:
return {'data': []}
adult_data = []
manual_data = []
common_data = []
# ----------------------------------------------------------
# Detect pupariation timing for detecting eclosion timing
# ----------------------------------------------------------
if larva is None:
pupar_times = None
else:
# Load the data
larva_diffs = np.load(os.path.join(data_root,
env, 'inference', 'larva', larva, larva_signal_name)).T
larva_diffs = seasoning(
larva_diffs, 'larva', detect, larva_w_size, larva_w_sigma,
smooth=len(larva_smoothing) != 0,
weight=len(larva_weighting) != 0,
pupar_times=None,
midpoints=midpoints,
weight_style=larva_w_style)
larva_thresh = THRESH_FUNC(larva_diffs, coef=larva_coef)
pupar_times = detect_event(larva_diffs, larva_thresh, 'larva', detect, method)
# ----------------------------------------
# Detection of eclosion or death timing
# ----------------------------------------
# Load the data
adult_diffs = np.load(os.path.join(
data_root, env, 'inference', 'adult', adult, adult_signal_name)).T
adult_diffs = seasoning(
adult_diffs, 'adult', detect, adult_w_size, adult_w_sigma,
smooth=len(adult_smoothing) != 0,
weight=len(adult_weighting) != 0,
pupar_times=pupar_times,
midpoints=midpoints,
weight_style=adult_w_style)
adult_thresh = THRESH_FUNC(adult_diffs, coef=adult_coef)
auto_evals = detect_event(adult_diffs, adult_thresh, 'adult', detect, method)
# Load a manual evaluation of event timing
if detect in ('eclosion', 'pupa-and-eclo') and os.path.exists(
os.path.join(data_root, env, 'original', 'eclosion.csv')):
manual_evals = np.loadtxt(
os.path.join(data_root, env, 'original', 'eclosion.csv'),
dtype=np.int32, delimiter=',').flatten()
manual_data = [
{
# Manual evaluation time (vertical line)
'x': [manual_evals[well_idx], manual_evals[well_idx]],
'y': [0, adult_diffs.max()],
'mode': 'lines',
'name': 'Manual',
'line': {'width': 2, 'color': '#ffa500'},
},
]
elif detect == 'death' and os.path.exists(
os.path.join(data_root, env, 'original', 'death.csv')):
manual_evals = np.loadtxt(
os.path.join(data_root, env, 'original', 'death.csv'),
dtype=np.int32, delimiter=',').flatten()
manual_data = [
{
# Manual evaluation time (vertical line)
'x': [manual_evals[well_idx], manual_evals[well_idx]],
'y': [0, adult_diffs.max()],
'mode': 'lines',
'name': 'Manual',
'line': {'width': 2, 'color': '#ffa500'},
},
]
adult_data = [
{
# Signal
'x': list(range(len(adult_diffs[0, :]))),
'y': list(adult_diffs[well_idx]),
'mode': 'lines',
'marker': {'color': '#4169e1'},
'name': 'Signal',
'opacity':1.0,
'line': {'width': 2, 'color': '#4169e1'},
},
{
# Threshold (horizontal line)
'x': [0, len(adult_diffs[0, :])],
'y': [adult_thresh[well_idx, 0], adult_thresh[well_idx, 0]],
'mode': 'lines',
'name': 'Threshold',
'line': {'width': 2, 'color': '#4169e1'},
},
{
# Auto evaluation time (vertical line)
'x': [auto_evals[well_idx], auto_evals[well_idx]],
'y': [0, adult_diffs.max()],
'name': 'Auto',
'mode':'lines',
'line': {'width': 2, 'color': '#4169e1', 'dash': 'dot'},
},
]
common_data = [
{
# Selected data point
'x': [time],
'y': [adult_diffs[well_idx, time]],
'mode': 'markers',
'marker': {'size': 10, 'color': '#ff0000'},
'name': '',
},
]
return {
'data': adult_data + manual_data + common_data,
'layout': {
'annotations': [
{
'x': 0.01 * len(adult_diffs.T),
'y': 1.0 * adult_diffs.max(),
'text':
'Threshold: {:.1f}, Max: {:.1f}, Min: {:.1f}' \
.format(adult_thresh[well_idx, 0],
adult_diffs[well_idx].max(),
adult_diffs[well_idx].min()),
'showarrow': False,
'xanchor': 'left',
'yanchor': 'top',
},
],
'font': {'size': 15},
'xaxis': {
'title': 'Frame',
'tickfont': {'size': 15},
},
'yaxis': {
'title': 'Activity',
'tickfont': {'size': 15},
'side': 'left',
'range': [-0.1*adult_diffs.max(), adult_diffs.max()],
},
'showlegend': False,
'hovermode': 'closest',
'margin': go.layout.Margin(l=70, r=0, b=50, t=10, pad=0),
'shapes': day_and_night(timestamps),
},
}
@app.callback(
Output('adult-signal-div', 'style'),
[Input('detect-target', 'value')])
def callback(detect):
if detect == 'pupariation':
return {'display': 'none'}
elif detect == 'eclosion':
return {
'width': '810px',
'margin-top': '10px',
}
elif detect == 'pupa-and-eclo':
return {
'width': '810px',
'margin-top': '10px',
}
elif detect == 'death':
return {
'width': '810px',
'margin-top': '10px',
}
else:
return {}
# =========================================
# Update the figure in the larva-summary
# =========================================
@app.callback(
Output('larva-summary', 'figure'),
[Input('larva-thresh-selector', 'value'),
Input('well-selector', 'value'),
Input('hidden-midpoint', 'data'),
Input('larva-weight-check', 'values'),
Input('larva-weight-style', 'value'),
Input('larva-smoothing-check', 'values'),
Input('larva-window-size', 'value'),
Input('larva-window-sigma', 'value'),
Input('larva-signal-type', 'value'),
Input('hidden-blacklist', 'data'),
Input('detection-method', 'value')],
[State('data-root', 'children'),
State('env-dropdown', 'value'),
State('detect-target', 'value'),
State('larva-dropdown', 'value')])
def callback(coef, well_idx, midpoints, weight, style, checks, size, sigma,
signal_name, blacklist, method, data_root, env, detect, larva):
# Guard
if env is None:
return {'data': []}
if larva is None:
return {'data': []}
if not os.path.exists(os.path.join(
data_root, env, 'inference', 'larva', larva, signal_name)):
return {'data': []}
if not os.path.exists(os.path.join(
data_root, env, 'original', 'pupariation.csv')):
return {'data': []}
# Load a manual evaluation of event timing
if not os.path.exists(os.path.join(
data_root, env, 'original', 'pupariation.csv')):
#return {'data': []}
non_manualdata = {'layout': {
'annotations': [
{
'x': 5.0,
'y': 2.0,
'text': 'Not Available',
'showarrow': False,
'xanchor': 'right',
},]}}
return non_manualdata
manual_evals = np.loadtxt(
os.path.join(data_root, env, 'original', 'pupariation.csv'),
dtype=np.int32, delimiter=',').flatten()
# Target wells will be evaluated
exceptions = np.logical_or(blacklist['value'], manual_evals == 0)
targets = np.logical_not(exceptions)
# Load a group table
group_tables = load_grouping_csv(data_root, env)
# Load a manual data
manual_evals = np.loadtxt(
os.path.join(data_root, env, 'original', 'pupariation.csv'),
dtype=np.int32, delimiter=',').flatten()
# Load the data
larva_diffs = np.load(os.path.join(
data_root, env, 'inference', 'larva', larva, signal_name)).T
larva_diffs = seasoning(
larva_diffs, 'larva', detect, size, sigma,
smooth=len(checks) != 0,
weight=len(weight) != 0,
pupar_times=None,
midpoints=midpoints,
weight_style=style)
thresholds = THRESH_FUNC(larva_diffs, coef=coef)
auto_evals = detect_event(larva_diffs, thresholds, 'larva', detect, method)
# Calculate how many frames auto-evaluation is far from manual's one
errors = auto_evals[targets] - manual_evals[targets]
# Calculate the root mean square
rms = np.sqrt((errors**2).sum() / len(errors))
# Create data points
if group_tables == []:
data_list = [
{
'x': list(auto_evals[exceptions]),
'y': list(manual_evals[exceptions]),
'text': [str(i) for i in np.where(exceptions)[0]],
'mode': 'markers',
'marker': {'size': 4, 'color': '#000000'},
'name': 'Exceptions',
},
{
'x': list(auto_evals[targets]),
'y': list(manual_evals[targets]),
'text': [str(i) for i in np.where(targets)[0]],
'mode': 'markers',
'marker': {'size': 4, 'color': '#1f77b4'},
},
]
else:
data_list = []
for group_idx, group_table in enumerate(group_tables):
data_list.append(
{
'x': list(
auto_evals[np.logical_and(targets, group_table)]),
'y': list(
manual_evals[np.logical_and(targets, group_table)]),
'text': [str(i)
for i in np.where(
np.logical_and(targets, group_table))[0]],
'mode': 'markers',
'marker': {'size': 4, 'color': GROUP_COLORS[group_idx]},
'name': 'Group{}'.format(group_idx + 1),
})
data_list.append(
{
'x': list(
auto_evals[np.logical_and(exceptions, group_table)]),
'y': list(
manual_evals[np.logical_and(exceptions, group_table)]),
'text': [str(i)
for i in np.where(
np.logical_and(exceptions, group_table))[0]],
'mode': 'markers',
'marker': {'size': 4, 'color': '#000000'},
'name': 'Group{}<br>Exceptions'.format(group_idx + 1),
})
return {
'data': [
{
'x': [
round(0.05 * len(larva_diffs[0, :])),
len(larva_diffs[0, :])
],
'y': [
0,
len(larva_diffs[0, :]) - \
round(0.05 * len(larva_diffs[0, :]))
],
'mode': 'lines',
'fill': None,
'line': {'width': .1, 'color': '#43d86b'},
'name': 'Lower bound',
},
{
'x': [
-round(0.05 * len(larva_diffs[0, :])),
len(larva_diffs[0, :])
],
'y': [
0,
len(larva_diffs[0, :]) + \
round(0.05 * len(larva_diffs[0, :]))
],
'mode': 'lines',
'fill': 'tonexty',
'line': {'width': .1, 'color': '#c0c0c0'},
'name': 'Upper bound',
},
{
'x': [0, len(larva_diffs[0, :])],
'y': [0, len(larva_diffs[0, :])],
'mode': 'lines',
'line': {'width': .5, 'color': '#000000'},
'name': 'Auto = Manual',
},
] + data_list + [
{
'x': [auto_evals[well_idx]],
'y': [manual_evals[well_idx]],
'text': str(well_idx),
'mode': 'markers',
'marker': {'size': 10, 'color': '#ff0000'},
'name': 'Selected well',
},
],
'layout': {
'annotations': [
{
'x': 0.01 * len(larva_diffs.T),
'y': 1.0 * len(larva_diffs.T),
'text': 'RMS: {:.1f}'.format(rms),
'showarrow': False,
'xanchor': 'left',
},
],
'font': {'size': 15},
'xaxis': {
'title': 'Auto',
'tickfont': {'size': 15},
},
'yaxis': {
'title': 'Manual',
'tickfont': {'size': 15},
},
'showlegend': False,
'hovermode': 'closest',
'margin': go.layout.Margin(l=50, r=0, b=40, t=70, pad=0),
},
}
@app.callback(
Output('larva-summary', 'style'),
[Input('detect-target', 'value')])
def callback(detect):
if detect == 'pupariation':
return {
'display': 'inline-block',
'height': '300px',
'width': '20%',
}
elif detect == 'eclosion':
return {'display': 'none'}
elif detect == 'pupa-and-eclo':
return {
'display': 'inline-block',
'height': '300px',
'width': '20%',
}
elif detect == 'death':
return {'display': 'none'}
else:
return {}
# ==========================================
# Update the figure in the adult-summary.
# ==========================================
@app.callback(
Output('adult-summary', 'figure'),
[Input('larva-thresh-selector', 'value'),
Input('adult-thresh-selector', 'value'),
Input('well-selector', 'value'),
Input('hidden-midpoint', 'data'),
Input('larva-weight-check', 'values'),
Input('larva-weight-style', 'value'),
Input('larva-smoothing-check', 'values'),
Input('larva-window-size', 'value'),
Input('larva-window-sigma', 'value'),
Input('adult-weight-check', 'values'),
Input('adult-weight-style', 'value'),
Input('adult-smoothing-check', 'values'),
Input('adult-window-size', 'value'),
Input('adult-window-sigma', 'value'),
Input('adult-signal-type', 'value'),
Input('hidden-blacklist', 'data'),
Input('detection-method', 'value')],
[State('data-root', 'children'),
State('env-dropdown', 'value'),
State('detect-target', 'value'),
State('larva-dropdown', 'value'),
State('adult-dropdown', 'value'),
State('larva-signal-type', 'value')])
def callback(larva_coef, adult_coef, well_idx, midpoints,
larva_weighting, larva_w_style, larva_smoothing, larva_w_size,
larva_w_sigma, adult_weighting, adult_w_style, adult_smoothing,
adult_w_size, adult_w_sigma, adult_signal_name, blacklist, method,
data_root, env, detect, larva, adult,
larva_signal_name):
# Guard
if env is None:
return {'data': []}
if adult is None:
return {'data': []}
if not os.path.exists(os.path.join(
data_root, env, 'inference', 'adult', adult, adult_signal_name)):
return {'data': []}
if not os.path.exists(os.path.join(
data_root, env, 'original', 'eclosion.csv')) \
and detect == 'pupa-and-eclo':
return {'data': []}
# Load a manual evaluation of event timing
if detect in ('eclosion', 'pupa-and-eclo'):
if not os.path.exists(os.path.join(
data_root, env, 'original', 'eclosion.csv')):
non_manualdata = {
'layout': {
'annotations': [
{
'x': 5.0,
'y': 2.0,
'text': 'Not Available',
'showarrow': False,
'xanchor': 'right',
},
]
}
}
return non_manualdata
else:
manual_evals = np.loadtxt(
os.path.join(data_root, env, 'original', 'eclosion.csv'),
dtype=np.int32, delimiter=',').flatten()
elif detect == 'death':
if not os.path.exists(os.path.join(
data_root, env, 'original', 'death.csv')):
non_manualdata = {
'layout': {
'annotations': [
{
'x': 5.0,
'y': 2.0,
'text': 'Not Available',
'showarrow': False,
'xanchor': 'right',
},
]
}
}
return non_manualdata
else:
manual_evals = np.loadtxt(
os.path.join(data_root, env, 'original', 'death.csv'),
dtype=np.int32, delimiter=',').flatten()
# Target wells will be evaluated
exceptions = np.logical_or(blacklist['value'], manual_evals == 0)
targets = np.logical_not(exceptions)
# Load a group table
group_tables = load_grouping_csv(data_root, env)
# ----------------------------------------------------------
# Detect pupariation timing for detecting eclosion timing
# ----------------------------------------------------------
if larva is None:
pupar_times = None
else:
# Load the data
larva_diffs = np.load(os.path.join(data_root,
env, 'inference', 'larva', larva, larva_signal_name)).T
larva_diffs = seasoning(
larva_diffs, 'larva', detect, larva_w_size, larva_w_sigma,
smooth=len(larva_smoothing) != 0,
weight=len(larva_weighting) != 0,
pupar_times=None,
midpoints=midpoints,
weight_style=larva_w_style)
larva_thresh = THRESH_FUNC(larva_diffs, coef=larva_coef)
pupar_times = detect_event(larva_diffs, larva_thresh, 'larva', detect, method)
# ----------------------------------------
# Detection of eclosion or death timing
# ----------------------------------------
# Load the data
adult_diffs = np.load(os.path.join(
data_root, env, 'inference', 'adult', adult, adult_signal_name)).T
adult_diffs = seasoning(
adult_diffs, 'adult', detect, adult_w_size, adult_w_sigma,
smooth=len(adult_smoothing) != 0,
weight=len(adult_weighting) != 0,
pupar_times=pupar_times,
midpoints=midpoints,
weight_style=adult_w_style)
adult_thresh = THRESH_FUNC(adult_diffs, coef=adult_coef)
auto_evals = detect_event(adult_diffs, adult_thresh, 'adult', detect, method)
# Calculate how many frames auto-evaluation is far from manual's one
errors = auto_evals[targets] - manual_evals[targets]
# Calculate the root mean square
rms = np.sqrt((errors**2).sum() / len(errors))
# Create data points
if group_tables == []:
data_list = [
{
'x': list(auto_evals[exceptions]),
'y': list(manual_evals[exceptions]),
'text': [str(i) for i in np.where(exceptions)[0]],
'mode': 'markers',
'marker': {'size': 4, 'color': '#000000'},
'name': 'Exception',
},
{
'x': list(auto_evals[targets]),
'y': list(manual_evals[targets]),
'text': [str(i) for i in np.where(targets)[0]],
'mode': 'markers',
'marker': {'size': 4, 'color': '#1f77b4'},
},
]
else:
data_list = []
for group_idx, group_table in enumerate(group_tables):
data_list.append(
{
'x': list(
auto_evals[np.logical_and(targets, group_table)]),
'y': list(
manual_evals[np.logical_and(targets, group_table)]),
'text': [str(i)
for i in np.where(
np.logical_and(targets, group_table))[0]],
'mode': 'markers',
'marker': {'size': 4, 'color': GROUP_COLORS[group_idx]},
'name': 'Group{}'.format(group_idx + 1),
})
data_list.append(
{
'x': list(
auto_evals[np.logical_and(exceptions, group_table)]),
'y': list(
manual_evals[np.logical_and(exceptions, group_table)]),
'text': [str(i)
for i in np.where(
np.logical_and(exceptions, group_table))[0]],
'mode': 'markers',
'marker': {'size': 4, 'color': '#000000'},
'name': 'Group{}<br>Exceptions'.format(group_idx + 1),
})
return {
'data': [
{
'x': [
round(0.05 * len(adult_diffs[0, :])),
len(adult_diffs[0, :])
],
'y': [
0,
len(adult_diffs[0, :]) - \
round(0.05 * len(adult_diffs[0, :]))
],
'mode': 'lines',
'fill': None,
'line': {'width': .1, 'color': '#43d86b'},
'name': 'Lower bound',
},
{
'x': [
-round(0.05 * len(adult_diffs[0, :])),
len(adult_diffs[0, :])
],
'y': [
0,
len(adult_diffs[0, :]) + \
round(0.05 * len(adult_diffs[0, :]))
],
'mode': 'lines',
'fill': 'tonexty',
'line': {'width': .1, 'color': '#c0c0c0'},
'name': 'Upper bound',
},
{
'x': [0, len(adult_diffs[0, :])],
'y': [0, len(adult_diffs[0, :])],
'mode': 'lines',
'line': {'width': .5, 'color': '#000000'},
'name': 'Auto = Manual',
},
] + data_list + [
{
'x': [auto_evals[well_idx]],
'y': [manual_evals[well_idx]],
'text': str(well_idx),
'mode': 'markers',
'marker': {'size': 10, 'color': '#ff0000'},
'name': 'Selected well',
},
],
'layout': {
'annotations': [
{
'x': 0.01 * len(adult_diffs.T),
'y': 1.0 * len(adult_diffs.T),
'text': 'RMS: {:.1f}'.format(rms),
'showarrow': False,
'xanchor': 'left',
},
],
'font': {'size': 15},
'xaxis': {
'title': 'Auto',
'tickfont': {'size': 15},
},
'yaxis': {
'title': 'Manual',
'tickfont': {'size': 15},
},
'showlegend': False,
'hovermode': 'closest',
'margin': go.layout.Margin(l=50, r=0, b=40, t=70, pad=0),
},
}
@app.callback(
Output('adult-summary', 'style'),
[Input('detect-target', 'value')])
def callback(detect):
if detect == 'pupariation':
return {'display': 'none'}
elif detect == 'eclosion':
return {
'display': 'inline-block',
'height': '300px',
'width': '20%',
}
elif detect == 'pupa-and-eclo':
return {
'display': 'inline-block',
'height': '300px',
'width': '20%',
}
elif detect == 'death':
return {
'display': 'inline-block',
'height': '300px',
'width': '20%',
}
else:
return {}
# =======================================
# Update the figure in the larva-hist.
# =======================================
@app.callback(
Output('larva-hist', 'figure'),
[Input('larva-thresh-selector', 'value'),
Input('well-selector', 'value'),
Input('hidden-midpoint', 'data'),
Input('larva-weight-check', 'values'),
Input('larva-weight-style', 'value'),
Input('larva-smoothing-check', 'values'),
Input('larva-window-size', 'value'),
Input('larva-window-sigma', 'value'),
Input('larva-signal-type', 'value'),
Input('hidden-blacklist', 'data'),
Input('detection-method', 'value')],
[State('data-root', 'children'),
State('env-dropdown', 'value'),
State('detect-target', 'value'),
State('larva-dropdown', 'value')])
def callback(coef, well_idx, midpoints, weight, style, checks, size, sigma,
signal_name, blacklist, method, data_root, env, detect, larva):
# Guard
if env is None:
return {'data': []}
if larva is None:
return {'data': []}
if not os.path.exists(os.path.join(
data_root, env, 'inference', 'larva', larva, signal_name)):
return {'data': []}
if not os.path.exists(os.path.join(
data_root, env, 'original', 'pupariation.csv')):
return {'data': []}
# Load a manual evaluation of event timing
if not os.path.exists(os.path.join(
data_root, env, 'original', 'pupariation.csv')):
non_manualdata = {'layout': {
'annotations': [
{
'x': 5.0,
'y': 2.0,
'text': 'Not Available',
'showarrow': False,
'xanchor': 'right',
},]}}
return non_manualdata
manual_evals = np.loadtxt(
os.path.join(data_root, env, 'original', 'pupariation.csv'),
dtype=np.int32, delimiter=',').flatten()
# Target wells will be evaluated
exceptions = np.logical_or(blacklist['value'], manual_evals == 0)
targets = np.logical_not(exceptions)
# Load the data
larva_diffs = np.load(os.path.join(
data_root, env, 'inference', 'larva', larva, signal_name)).T
larva_diffs = seasoning(
larva_diffs, 'larva', detect, size, sigma,
smooth=len(checks) != 0,
weight=len(weight) != 0,
pupar_times=None,
midpoints=midpoints,
weight_style=style)
thresholds = THRESH_FUNC(larva_diffs, coef=coef)
auto_evals = detect_event(larva_diffs, thresholds, 'larva', detect, method)
# Calculate how many frames auto-evaluation is far from manual's one
errors = auto_evals - manual_evals
errors = errors[targets]
ns, bins = np.histogram(errors, 1000)
# Calculate the number of inconsistent wells
tmp = np.bincount(abs(errors))
n_consist_5percent = tmp[:round(0.05 * larva_diffs.shape[1])].sum()
n_consist_1percent = tmp[:round(0.01 * larva_diffs.shape[1])].sum()
n_consist_10frames = tmp[:11].sum()
return {
'data': [
{
'x': [
-round(0.05 * larva_diffs.shape[1]),
round(0.05 * larva_diffs.shape[1])
],
'y': [ns.max(), ns.max()],
'mode': 'lines',
'fill': 'tozeroy',
'line': {'width': 0, 'color': '#c0c0c0'},
},
{
'x': list(bins[1:]),
'y': list(ns),
'mode': 'markers',
'type': 'bar',
'marker': {'size': 5, 'color': '#1f77b4'},
},
],
'layout': {
'annotations': [
{
'x': 0.9 * larva_diffs.shape[1],
'y': 1.0 * ns.max(),
'text': '#frames: consistency',
'showarrow': False,
'xanchor': 'right',
},
{
'x': 0.9 * larva_diffs.shape[1],
'y': 0.9 * ns.max(),
'text': '{} (5%): {:.1f}% ({}/{})'.format(
round(0.05 * larva_diffs.shape[1]),
100 * n_consist_5percent / targets.sum(),
n_consist_5percent,
targets.sum()),
'showarrow': False,
'xanchor': 'right',
},
{
'x': 0.9 * larva_diffs.shape[1],
'y': 0.8 * ns.max(),
'text': '{} (1%): {:.1f}% ({}/{})'.format(
round(0.01 * larva_diffs.shape[1]),
100 * n_consist_1percent / targets.sum(),
n_consist_1percent,
targets.sum()),
'showarrow': False,
'xanchor': 'right',
},
{
'x': 0.9 * larva_diffs.shape[1],
'y': 0.7 * ns.max(),
'text': '10: {:.1f}% ({}/{})'.format(
100 * n_consist_10frames / targets.sum(),
n_consist_10frames,
targets.sum()),
'showarrow': False,
'xanchor': 'right',
},
],
'font': {'size': 15},
'xaxis': {
'title': 'Auto - manual',
'range': [-len(larva_diffs.T), len(larva_diffs.T)],
'tickfont': {'size': 15},
},
'yaxis': {
'title': 'Count',
'tickfont': {'size': 15},
},
'showlegend': False,
'hovermode': 'closest',
'margin': go.layout.Margin(l=50, r=0, b=50, t=70, pad=0),
},
}
@app.callback(
Output('larva-hist', 'style'),
[Input('detect-target', 'value')])
def callback(detect):
if detect == 'pupariation':
return {
'display': 'inline-block',
'height': '300px',
'width': '20%',
}
elif detect == 'eclosion':
return {'display': 'none'}
elif detect == 'pupa-and-eclo':
return {
'display': 'inline-block',
'height': '300px',
'width': '20%',
}
elif detect == 'death':
return {'display': 'none'}
else:
return {}
# =======================================
# Update the figure in the adult-hist.
# =======================================
@app.callback(
Output('adult-hist', 'figure'),
[Input('larva-thresh-selector', 'value'),
Input('adult-thresh-selector', 'value'),
Input('well-selector', 'value'),
Input('hidden-midpoint', 'data'),
Input('larva-weight-check', 'values'),
Input('larva-weight-style', 'value'),
Input('larva-smoothing-check', 'values'),
Input('larva-window-size', 'value'),
Input('larva-window-sigma', 'value'),
Input('adult-weight-check', 'values'),
Input('adult-weight-style', 'value'),
Input('adult-smoothing-check', 'values'),
Input('adult-window-size', 'value'),
Input('adult-window-sigma', 'value'),
Input('adult-signal-type', 'value'),
Input('hidden-blacklist', 'data'),
Input('detection-method', 'value')],
[State('data-root', 'children'),
State('env-dropdown', 'value'),
State('detect-target', 'value'),
State('larva-dropdown', 'value'),
State('adult-dropdown', 'value'),
State('larva-signal-type', 'value')])
def callback(larva_coef, adult_coef, well_idx, midpoints,
larva_weighting, larva_w_style, larva_smoothing, larva_w_size,
larva_w_sigma, adult_weighting, adult_w_style, adult_smoothing,
adult_w_size, adult_w_sigma, adult_signal_name, blacklist, method,
data_root, env, detect, larva, adult,
larva_signal_name):
# Guard
if env is None:
return {'data': []}
if adult is None:
return {'data': []}
if not os.path.exists(os.path.join(
data_root, env, 'inference', 'adult', adult, adult_signal_name)):
return {'data': []}
# Load a manual evaluation of event timing
if detect in ('eclosion', 'pupa-and-eclo'):
if not os.path.exists(os.path.join(
data_root, env, 'original', 'eclosion.csv')):
non_manualdata = {
'layout': {
'annotations': [
{
'x': 5.0,
'y': 2.0,
'text': 'Not Available',
'showarrow': False,
'xanchor': 'right',
},
]
}
}
return non_manualdata
manual_evals = np.loadtxt(
os.path.join(data_root, env, 'original', 'eclosion.csv'),
dtype=np.int32, delimiter=',').flatten()
elif detect == 'death':
if not os.path.exists(os.path.join(
data_root, env, 'original', 'death.csv')):
non_manualdata = {
'layout': {
'annotations': [
{
'x': 5.0,
'y': 2.0,
'text': 'Not Available',
'showarrow': False,
'xanchor': 'right',
},
]
}
}
return non_manualdata
manual_evals = np.loadtxt(
os.path.join(data_root, env, 'original', 'death.csv'),
dtype=np.int32, delimiter=',').flatten()
# Target wells will be evaluated
exceptions = np.logical_or(blacklist['value'], manual_evals == 0)
targets = np.logical_not(exceptions)
# ----------------------------------------------------------
# Detect pupariation timing for detecting eclosion timing
# ----------------------------------------------------------
if larva is None:
pupar_times = None
else:
# Load the data
larva_diffs = np.load(os.path.join(data_root,
env, 'inference', 'larva', larva, larva_signal_name)).T
larva_diffs = seasoning(
larva_diffs, 'larva', detect, larva_w_size, larva_w_sigma,
smooth=len(larva_smoothing) != 0,
weight=len(larva_weighting) != 0,
pupar_times=None,
midpoints=midpoints,
weight_style=larva_w_style)
larva_thresh = THRESH_FUNC(larva_diffs, coef=larva_coef)
pupar_times = detect_event(larva_diffs, larva_thresh, 'larva', detect, method)
# ----------------------------------------
# Detection of eclosion or death timing
# ----------------------------------------
# Load the data
adult_diffs = np.load(os.path.join(
data_root, env, 'inference', 'adult', adult, adult_signal_name)).T
adult_diffs = seasoning(
adult_diffs, 'adult', detect, adult_w_size, adult_w_sigma,
smooth=len(adult_smoothing) != 0,
weight=len(adult_weighting) != 0,
pupar_times=pupar_times,
midpoints=midpoints,
weight_style=adult_w_style)
adult_thresh = THRESH_FUNC(adult_diffs, coef=adult_coef)
auto_evals = detect_event(adult_diffs, adult_thresh, 'adult', detect, method)
# Calculate how many frames auto-evaluation is far from manual's one
errors = auto_evals - manual_evals
errors = errors[targets]
ns, bins = np.histogram(errors, 1000)
# Calculate the number of inconsistent wells
tmp = np.bincount(abs(errors))
n_consist_5percent = tmp[:round(0.05 * adult_diffs.shape[1])].sum()
n_consist_1percent = tmp[:round(0.01 * adult_diffs.shape[1])].sum()
n_consist_10frames = tmp[:11].sum()
return {
'data': [
{
'x': [
-round(0.05 * adult_diffs.shape[1]),
round(0.05 * adult_diffs.shape[1])
],
'y': [ns.max(), ns.max()],
'mode': 'lines',
'fill': 'tozeroy',
'line': {'width': 0, 'color': '#c0c0c0'},
},
{
'x': list(bins[1:]),
'y': list(ns),
'mode': 'markers',
'type': 'bar',
'marker': {'size': 5, 'color': '#1f77b4'},
},
],
'layout': {
'annotations': [
{
'x': 0.9 * adult_diffs.shape[1],
'y': 1.0 * ns.max(),
'text': '#frames: consistency',
'showarrow': False,
'xanchor': 'right',
},
{
'x': 0.9 * adult_diffs.shape[1],
'y': 0.9 * ns.max(),
'text': '{} (5%): {:.1f}% ({}/{})'.format(
round(0.05 * adult_diffs.shape[1]),
100 * n_consist_5percent / targets.sum(),
n_consist_5percent,
targets.sum()),
'showarrow': False,
'xanchor': 'right',
},
{
'x': 0.9 * adult_diffs.shape[1],
'y': 0.8 * ns.max(),
'text': '{} (1%): {:.1f}% ({}/{})'.format(
round(0.01 * adult_diffs.shape[1]),
100 * n_consist_1percent / targets.sum(),
n_consist_1percent,
targets.sum()),
'showarrow': False,
'xanchor': 'right',
},
{
'x': 0.9 * adult_diffs.shape[1],
'y': 0.7 * ns.max(),
'text': '10: {:.1f}% ({}/{})'.format(
100 * n_consist_10frames / targets.sum(),
n_consist_10frames,
targets.sum()),
'showarrow': False,
'xanchor': 'right',
},
],
'font': {'size': 15},
'xaxis': {
'title': 'Auto - manual',
'range': [-len(adult_diffs.T), len(adult_diffs.T)],
'tickfont': {'size': 15},
},
'yaxis': {
'title': 'Count',
'tickfont': {'size': 15},
},
'showlegend': False,
'hovermode': 'closest',
'margin': go.layout.Margin(l=50, r=0, b=50, t=70, pad=0),
},
}
@app.callback(
Output('adult-hist', 'style'),
[Input('detect-target', 'value')])
def callback(detect):
if detect == 'pupariation':
return {'display': 'none'}
elif detect in ('eclosion', 'pupa-and-eclo', 'death'):
return {
'display': 'inline-block',
'height': '300px',
'width': '20%',
}
else:
return {}
# ==============================================
# Update the figure in the pupa-vs-eclo plot.
# ==============================================
@app.callback(
Output('pupa-vs-eclo', 'figure'),
[Input('larva-thresh-selector', 'value'),
Input('adult-thresh-selector', 'value'),
Input('well-selector', 'value'),
Input('hidden-midpoint', 'data'),
Input('larva-weight-check', 'values'),
Input('larva-weight-style', 'value'),
Input('larva-smoothing-check', 'values'),
Input('larva-window-size', 'value'),
Input('larva-window-sigma', 'value'),
Input('larva-signal-type', 'value'),
Input('adult-weight-check', 'values'),
Input('adult-weight-style', 'value'),
Input('adult-smoothing-check', 'values'),
Input('adult-window-size', 'value'),
Input('adult-window-sigma', 'value'),
Input('adult-signal-type', 'value'),
Input('hidden-blacklist', 'data'),
Input('detection-method', 'value')],
[State('data-root', 'children'),
State('env-dropdown', 'value'),
State('detect-target', 'value'),
State('larva-dropdown', 'value'),
State('adult-dropdown', 'value')])
def callback(larva_coef, adult_coef, well_idx, midpoints, larva_weighting,
larva_w_style, larva_smoothing, larva_w_size, larva_w_sigma,
larva_signal_name, adult_weighting, adult_w_style, adult_smoothing,
adult_w_size, adult_w_sigma, adult_signal_name,
blacklist, method, data_root, env, detect, larva, adult):
# Guard
if env is None:
return {'data': []}
if larva is None:
return {'data': []}
if adult is None:
return {'data': []}
if not os.path.exists(os.path.join(
data_root, env, 'inference', 'larva', larva, larva_signal_name)):
return {'data': []}
if not os.path.exists(os.path.join(
data_root, env, 'inference', 'adult', adult, adult_signal_name)):
return {'data': []}
if detect == 'death':
return {'data': []}
# Load a blacklist and whitelist
blacklist = np.array(blacklist['value'])
whitelist = np.logical_not(blacklist)
# Evaluation of pupariation timings
larva_diffs = np.load(os.path.join(
data_root, env, 'inference', 'larva', larva, larva_signal_name)).T
larva_diffs = seasoning(
larva_diffs, 'larva', detect, larva_w_size, larva_w_sigma,
smooth=len(larva_smoothing) != 0,
weight=len(larva_weighting) != 0,
pupar_times=None,
midpoints=midpoints,
weight_style=larva_w_style)
larva_thresh = THRESH_FUNC(larva_diffs, coef=larva_coef)
pupar_times = detect_event(larva_diffs, larva_thresh, 'larva', detect, method)
# Evaluation of eclosion timings
adult_diffs = np.load(os.path.join(
data_root, env, 'inference', 'adult', adult, adult_signal_name)).T
adult_diffs = seasoning(
adult_diffs, 'adult', detect, adult_w_size, adult_w_sigma,
smooth=len(adult_smoothing) != 0,
weight=len(adult_weighting) != 0,
pupar_times=pupar_times,
midpoints=midpoints,
weight_style=adult_w_style)
adult_thresh = THRESH_FUNC(adult_diffs, coef=adult_coef)
eclo_times = detect_event(adult_diffs, adult_thresh, 'adult', detect, method)
return {
'data': [
{
'x': list(pupar_times[blacklist]),
'y': list(eclo_times[blacklist]),
'text': [str(i) for i in np.where(blacklist)[0]],
'mode': 'markers',
'marker': {'size': 4, 'color': '#000000'},
'name': 'Well in Blacklist',
},
{
'x': list(pupar_times[whitelist]),
'y': list(eclo_times[whitelist]),
'text': [str(i) for i in np.where(whitelist)[0]],
'mode': 'markers',
'marker': {'size': 4, 'color': '#1f77b4'},
'name': 'Well in Whitelist',
},
{
'x': [pupar_times[well_idx]],
'y': [eclo_times[well_idx]],
'text': str(well_idx),
'mode': 'markers',
'marker': {'size': 10, 'color': '#ff0000'},
'name': 'Selected well',
},
],
'layout': {
'font': {'size': 15},
'xaxis': {
'title': 'Pupariation',
'tickfont': {'size': 15},
'range': [
-0.1 * len(larva_diffs.T),
1.1 * len(larva_diffs.T)],
},
'yaxis': {
'title': 'Eclosion',
'tickfont': {'size': 15},
'range': [
-0.1 * len(adult_diffs.T),
1.1 * len(adult_diffs.T)],
},
'showlegend': False,
'hovermode': 'closest',
'margin': go.layout.Margin(l=50, r=0, b=50, t=70, pad=0),
},
}
@app.callback(
Output('pupa-vs-eclo', 'style'),
[Input('detect-target', 'value')])
def callback(detect):
if detect in ('pupariation', 'eclosion', 'death'):
return {'display': 'none'}
elif detect == 'pupa-and-eclo':
return {
'display': 'inline-block',
'height': '300px',
'width': '20%',
}
else:
return {}
# ===========================================
# Update the figure in the survival-curve.
# ===========================================
@app.callback(
Output('survival-curve', 'figure'),
[Input('adult-thresh-selector', 'value'),
Input('well-selector', 'value'),
Input('hidden-midpoint', 'data'),
Input('adult-weight-check', 'values'),
Input('adult-weight-style', 'value'),
Input('adult-smoothing-check', 'values'),
Input('adult-window-size', 'value'),
Input('adult-window-sigma', 'value'),
Input('adult-signal-type', 'value'),
Input('hidden-blacklist', 'data'),
Input('detection-method', 'value')],
[State('data-root', 'children'),
State('env-dropdown', 'value'),
State('detect-target', 'value'),
State('adult-dropdown', 'value')])
def callback(coef, well_idx, midpoints, weight, style, checks, size, sigma,
signal_name, blacklist, method, data_root, env, detect, adult):
# Guard
if env is None:
return {'data': []}
if adult is None:
return {'data': []}
if not os.path.exists(os.path.join(
data_root, env, 'inference', 'adult', adult, signal_name)):
return {'data': []}
if detect in ('pupariation', 'eclosion', 'pupa-and-eclo'):
return {'data': []}
# Load a blacklist and whitelist
whitelist = np.logical_not(np.array(blacklist['value']))
# Load a group table
group_tables = load_grouping_csv(data_root, env)
# Load the data
adult_diffs = np.load(os.path.join(
data_root, env, 'inference', 'adult', adult, signal_name)).T
adult_diffs = seasoning(
adult_diffs, 'adult', detect, size, sigma,
smooth=len(checks) != 0,
weight=len(weight) != 0,
pupar_times=None,
midpoints=midpoints,
weight_style=style)
thresholds = THRESH_FUNC(adult_diffs, coef=coef)
auto_evals = detect_event(adult_diffs, thresholds, 'adult', detect, method)
if group_tables == []:
# Compute survival ratio of all the animals
survival_ratio = np.zeros_like(adult_diffs)
for well_idx, event_time in enumerate(auto_evals):
survival_ratio[well_idx, :event_time] = 1
survival_ratio = \
100 * survival_ratio[whitelist].sum(axis=0) / \
len(survival_ratio[whitelist])
data_list = [
{
'x': list(range(len(survival_ratio))),
'y': list(survival_ratio),
'mode': 'lines',
'line': {'size': 2, 'color': '#ff4500'},
'name': 'Group1'
}]
else:
survival_ratios = []
for group_idx, group_table in enumerate(group_tables):
survival_ratio = np.zeros_like(adult_diffs)
for well_idx, (event_time, in_group) \
in enumerate(zip(auto_evals, group_table)):
survival_ratio[well_idx, :event_time] = in_group
survival_ratio = \
100 * survival_ratio[whitelist].sum(axis=0) / \
group_table[whitelist].sum()
survival_ratios.append(survival_ratio)
data_list =[]
for group_idx, (group_table, survival_ratio) \
in enumerate(zip(group_tables, survival_ratios)):
data_list.append(
{
'x': list(range(len(survival_ratio))),
'y': list(survival_ratio),
'mode': 'lines',
'marker': {'size': 2, 'color': GROUP_COLORS[group_idx]},
'name': 'Group{}'.format(group_idx + 1),
})
return {
'data': data_list + [
{
'x': [0, len(survival_ratio)],
'y': [100, 100],
'mode': 'lines',
'line': {'width': 1, 'color': '#000000'},
},
],
'layout': {
'font': {'size': 15},
'xaxis': {
'title': 'Frame',
'range': [0, 1.1 * len(survival_ratio)],
'tickfont': {'size': 15},
},
'yaxis': {
'title': 'Survival Ratio [%]',
'tickfont': {'size': 15},
'range': [0, 110],
},
'showlegend': False,
'hovermode': 'closest',
'margin': go.layout.Margin(l=50, r=0, b=50, t=70, pad=0),
},
}
@app.callback(
Output('survival-curve', 'style'),
[Input('detect-target', 'value')])
def callback(detect):
if detect in ('pupariation', 'eclosion', 'pupa-and-eclo'):
return {'display': 'none'}
elif detect == 'death':
return {
'display': 'inline-block',
'height': '300px',
'width': '20%',
}
else:
return {}
# ===========================================
# Update the boxplot for larva
# ===========================================
@app.callback(
Output('larva-boxplot', 'figure'),
[Input('larva-thresh-selector', 'value'),
Input('well-selector', 'value'),
Input('hidden-midpoint', 'data'),
Input('larva-weight-check', 'values'),
Input('larva-weight-style', 'value'),
Input('larva-smoothing-check', 'values'),
Input('larva-window-size', 'value'),
Input('larva-window-sigma', 'value'),
Input('larva-signal-type', 'value'),
Input('hidden-blacklist', 'data'),
Input('detection-method', 'value')],
[State('data-root', 'children'),
State('env-dropdown', 'value'),
State('detect-target', 'value'),
State('larva-dropdown', 'value')])
def callback(coef, well_idx, midpoints, weight, style, checks, size, sigma,
signal_name, blacklist, method, data_root, env, detect, larva):
# Guard
if env is None:
return {'data': []}
if larva is None:
return {'data': []}
if not os.path.exists(os.path.join(
data_root, env, 'inference', 'larva', larva, signal_name)):
return {'data': []}
if detect == 'death':
return {'data': []}
# Load a blacklist and whitelist
whitelist = np.logical_not(np.array(blacklist['value']))
# Load a group table
group_tables = load_grouping_csv(data_root, env)
# Load the data
larva_diffs = np.load(os.path.join(
data_root, env, 'inference', 'larva', larva, signal_name)).T
larva_diffs = seasoning(
larva_diffs, 'larva', detect, size, sigma,
smooth=len(checks) != 0,
weight=len(weight) != 0,
pupar_times=None,
midpoints=midpoints,
weight_style=style)
# Compute thresholds
thresholds = THRESH_FUNC(larva_diffs, coef=coef)
auto_evals = detect_event(larva_diffs, thresholds, 'larva', detect, method)
# Make data to be drawn
if group_tables == []:
data = []
data.append(
go.Box(
x=list(auto_evals[whitelist]),
name='Group1',
boxpoints='all',
pointpos=1.8,
marker={'size': 2},
line={'width': 2},
text=[str(i) for i in np.where(whitelist)[0]],
boxmean='sd',
)
)
else :
data =[]
for group_idx, group_table in enumerate(group_tables):
data.append(
go.Box(
x=list(auto_evals[np.logical_and(whitelist, group_table)]),
name='Group{}'.format(group_idx +1),
boxpoints='all',
pointpos=1.8,
marker={'size': 2, 'color': GROUP_COLORS[group_idx]},
line={'width': 2, 'color': GROUP_COLORS[group_idx]},
text=[str(i)
for i in np.where(
np.logical_and(whitelist, group_table))[0]],
boxmean='sd',
)
)
return {
'data': data,
'layout': {
'font': {'size': 15},
'xaxis': {
'title': 'Frame',
'tickfont': {'size': 15},
'range': [0, 1.1 * len(larva_diffs.T)],
},
'yaxis': {
'tickfont': {'size': 15},
},
'showlegend': False,
'hovermode': 'closest',
'margin': go.layout.Margin(l=70, r=0, b=50, t=70, pad=0),
},
}
@app.callback(
Output('larva-boxplot', 'style'),
[Input('detect-target', 'value')])
def callback(detect):
if detect in ('pupariation', 'pupa-and-eclo'):
return {
'display': 'inline-block',
'height': '300px',
'width': '20%',
}
elif detect in ('eclosion', 'death'):
return {'display': 'none'}
else:
return {}
# ===========================================
# Update the boxplot for adult
# ===========================================
@app.callback(
Output('adult-boxplot', 'figure'),
[Input('larva-thresh-selector', 'value'),
Input('adult-thresh-selector', 'value'),
Input('well-selector', 'value'),
Input('hidden-midpoint', 'data'),
Input('larva-weight-check', 'values'),
Input('larva-weight-style', 'value'),
Input('larva-smoothing-check', 'values'),
Input('larva-window-size', 'value'),
Input('larva-window-sigma', 'value'),
Input('larva-signal-type', 'value'),
Input('adult-weight-check', 'values'),
Input('adult-weight-style', 'value'),
Input('adult-smoothing-check', 'values'),
Input('adult-window-size', 'value'),
Input('adult-window-sigma', 'value'),
Input('adult-signal-type', 'value'),
Input('hidden-blacklist', 'data'),
Input('detection-method', 'value')],
[State('data-root', 'children'),
State('env-dropdown', 'value'),
State('detect-target', 'value'),
State('larva-dropdown', 'value'),
State('adult-dropdown', 'value')])
def callback(larva_coef, adult_coef, well_idx, midpoints,
larva_weighting, larva_w_style, larva_smoothing, larva_w_size,
larva_w_sigma, larva_signal_name, adult_weighting, adult_w_style,
adult_smoothing, adult_w_size, adult_w_sigma, adult_signal_name,
blacklist, method, data_root, env, detect, larva, adult):
# Guard
if env is None:
return {'data': []}
if adult is None:
return {'data': []}
if not os.path.exists(os.path.join(
data_root, env, 'inference', 'adult', adult, adult_signal_name)):
return {'data': []}
if detect == 'pupariation':
return {'data': []}
# Load a blacklist and whitelist
whitelist = np.logical_not(np.array(blacklist['value']))
# Load a group table
group_tables = load_grouping_csv(data_root, env)
# ----------------------------------------------------------
# Detect pupariation timing for detecting eclosion timing
# ----------------------------------------------------------
if larva is None:
pupar_times = None
else:
# Load the data
larva_diffs = np.load(os.path.join(data_root,
env, 'inference', 'larva', larva, larva_signal_name)).T
larva_diffs = seasoning(
larva_diffs, 'larva', detect, larva_w_size, larva_w_sigma,
smooth=len(larva_smoothing) != 0,
weight=len(larva_weighting) != 0,
pupar_times=None,
midpoints=midpoints,
weight_style=larva_w_style)
larva_thresh = THRESH_FUNC(larva_diffs, coef=larva_coef)
pupar_times = detect_event(larva_diffs, larva_thresh, 'larva', detect, method)
# ----------------------------------------
# Detection of eclosion or death timing
# ----------------------------------------
# Load the data
adult_diffs = np.load(os.path.join(
data_root, env, 'inference', 'adult', adult, adult_signal_name)).T
adult_diffs = seasoning(
adult_diffs, 'adult', detect, adult_w_size, adult_w_sigma,
smooth=len(adult_smoothing) != 0,
weight=len(adult_weighting) != 0,
pupar_times=pupar_times,
midpoints=midpoints,
weight_style=adult_w_style)
adult_thresh = THRESH_FUNC(adult_diffs, coef=adult_coef)
auto_evals = detect_event(adult_diffs, adult_thresh, 'adult', detect, method)
# Make data to be drawn
if group_tables == []:
data = []
data.append(
go.Box(
x=list(auto_evals[whitelist]),
name='Group1',
boxpoints='all',
pointpos=1.8,
marker={'size': 2},
line={'width': 2},
text=[str(i) for i in np.where(whitelist)[0]],
boxmean='sd',
)
)
else :
data =[]
for group_idx, group_table in enumerate(group_tables):
data.append(
go.Box(
x=list(auto_evals[np.logical_and(whitelist, group_table)]),
name='Group{}'.format(group_idx +1),
boxpoints='all',
pointpos=1.8,
marker={'size': 2, 'color': GROUP_COLORS[group_idx]},
line={'width': 2, 'color': GROUP_COLORS[group_idx]},
text=[str(i)
for i in np.where(
np.logical_and(whitelist, group_table))[0]],
boxmean='sd',
)
)
return {
'data': data,
'layout': {
'font': {'size': 15},
'xaxis': {
'title': 'Frame',
'tickfont': {'size': 15},
'range': [0, 1.1 * len(adult_diffs.T)],
},
'yaxis': {
'tickfont': {'size': 15},
},
'showlegend': False,
'hovermode': 'closest',
'margin': go.layout.Margin(l=70, r=0, b=50, t=70, pad=0),
},
}
@app.callback(
Output('adult-boxplot', 'style'),
[Input('detect-target', 'value')])
def callback(detect):
if detect == 'pupariation':
return {'display': 'none'}
elif detect in ('eclosion', 'pupa-and-eclo', 'death'):
return {
'display': 'inline-block',
'height': '300px',
'width': '20%',
}
else:
return {}
# =======================================================================
# Store image file names and their timestamps as json in a hidden div.
# =======================================================================
@app.callback(
Output('hidden-timestamp', 'data'),
[Input('env-dropdown', 'value')],
[State('data-root', 'children')])
def callback(env, data_root):
# Guard
if env is None:
return
# Load an original image
orgimg_paths = sorted(glob.glob(os.path.join(
data_root, glob.escape(env), 'original', '*.jpg')))
return {
'Image name': [os.path.basename(path) for path in orgimg_paths],
'Create time': [get_create_time(path) for path in orgimg_paths],
}
def get_create_time(path):
DateTimeDigitized = PIL.Image.open(path)._getexif()[36868]
# '2016:02:05 17:20:53' -> '2016-02-05 17:20:53'
DateTimeDigitized = DateTimeDigitized[:4] + '-' + DateTimeDigitized[5:]
DateTimeDigitized = DateTimeDigitized[:7] + '-' + DateTimeDigitized[8:]
return DateTimeDigitized
# ===================
# Make data tables
# ===================
@app.callback(
Output('data-tables', 'children'),
[Input('tabs', 'value')],
[State('data-root', 'children'),
State('env-dropdown', 'value'),
State('detect-target', 'value'),
State('larva-dropdown', 'value'),
State('adult-dropdown', 'value'),
State('larva-thresh-selector', 'value'),
State('adult-thresh-selector', 'value'),
State('hidden-midpoint', 'data'),
State('larva-weight-check', 'values'),
State('larva-weight-style', 'value'),
State('larva-window-size', 'value'),
State('larva-window-sigma', 'value'),
State('larva-smoothing-check', 'values'),
State('adult-weight-check', 'values'),
State('adult-weight-style', 'value'),
State('adult-window-size', 'value'),
State('adult-window-sigma', 'value'),
State('adult-smoothing-check', 'values'),
State('larva-signal-type', 'value'),
State('adult-signal-type', 'value'),
State('detection-method', 'value'),
State('hidden-timestamp', 'data')])
def callback(tab_name, data_root, env, detect, larva, adult, larva_coef,
adult_coef, midpoints, larva_weighting, larva_w_style, larva_w_size,
larva_w_sigma, larva_smoothing, adult_weighting, adult_w_style,
adult_w_size, adult_w_sigma, adult_smoothing, larva_signal_name,
adult_signal_name, method, timestamps):
# Guard
if data_root is None:
raise dash.exceptions.PreventUpdate
if env is None:
return 'Please select a dataset.'
if tab_name != 'tab-2':
raise dash.exceptions.PreventUpdate
with open(os.path.join(data_root, env, 'mask_params.json')) as f:
params = json.load(f)
larva_man_table = make_manual_table(
data_root, env, 'larva', detect, params)
larva_auto_table = make_auto_table(
data_root, env, 'larva', larva, detect, larva_signal_name,
params, larva_w_size, larva_w_sigma, larva_smoothing,
larva_weighting, midpoints, larva_w_style, larva_coef, method)
adult_man_table = make_manual_table(
data_root, env, 'adult', detect, params)
adult_auto_table = make_auto_table(
data_root, env, 'adult', adult, detect, adult_signal_name,
params, adult_w_size, adult_w_sigma, adult_smoothing,
adult_weighting, midpoints, adult_w_style, adult_coef, method)
timestamp_table = make_timestamp_table(env, timestamps)
return [timestamp_table, larva_man_table,
larva_auto_table, adult_man_table, adult_auto_table]
def make_timestamp_table(env, timestamps):
df = pd.DataFrame([timestamps['Image name'], timestamps['Create time']],
index=['Image name', 'Create time']).T
data = [{'Image name': image_name, 'Create time': create_time}
for image_name, create_time in zip(
timestamps['Image name'], timestamps['Create time'])]
return html.Div(id='timestamp-table', children=[
html.H4('Timestamp'),
dash_table.DataTable(
columns=[
{'id': 'Image name', 'name': 'Image name'},
{'id': 'Create time', 'name': 'Create time'}],
data=data,
n_fixed_rows=1,
style_table={'width': '100%'},
pagination_mode=False,
),
html.A(
'Download',
id='download-link',
download='Timestamp({}).csv'.format(env[0:20]),
href='data:text/csv;charset=utf-8,' + df.to_csv(index=False),
target='_blank',
),
],
style={
'display': 'inline-block',
'vertical-align': 'top',
'margin': '10px',
'width': '200px',
},
)
def make_auto_table(data_root, env, morph, target_dir, detect, signal_name, params, w_size, w_sigma, smoothing, weighting, midpoints, w_style, coef, method):
if target_dir is None:
return html.Div(style={'display': 'inline-block'})
signals = np.load(os.path.join(
data_root, env, 'inference', morph, target_dir, signal_name)).T
signals = seasoning(
signals, morph, detect, w_size, w_sigma,
smooth=len(smoothing) != 0,
weight=len(weighting) != 0,
pupar_times=None,
midpoints=midpoints,
weight_style=w_style)
thresh = THRESH_FUNC(signals, coef=coef)
auto_evals = detect_event(signals, thresh, morph, detect, method)
auto_evals = auto_evals.reshape(
params['n-rows']*params['n-plates'], params['n-clms'])
if method == 'relmax':
detection_method = 'Relative maxima'
elif method == 'thresholding':
detection_method = 'Thresholding'
elif method == 'max':
detection_method = 'Maximum'
else:
detection_method = 'Error'
auto_to_csv = \
'data:text/csv;charset=utf-8,' \
+ 'Dataset,{}\n'.format(urllib.parse.quote(env)) \
+ f'Morphology,{morph}\n' \
+ 'Inference data,{}\n'.format(urllib.parse.quote(target_dir)) \
+ 'Detection method,{}\n'.format(detection_method) \
+ 'Threshold value,' \
+ 'coef * (min + (max - min) / 2) for each individual\n' \
+ 'Coefficient (coef),{}\n'.format(coef) \
+ 'Smoothing window size,{}\n'.format(w_size) \
+ 'Smoothing sigma,{}\nEvent timing\n'.format(w_sigma) \
+ pd.DataFrame(auto_evals).to_csv(
index=False, encoding='utf-8', header=False)
return html.Div(
id=f'{morph}-auto-table',
children = [
html.H4(f'Event timings of {morph} (auto)'),
dash_table.DataTable(
columns=[{'name': f'{clm}', 'id': f'{clm}'}
for clm in ALPHABETS[:params['n-clms']]],
data=pd.DataFrame(auto_evals,
columns=ALPHABETS[:params['n-clms']]).to_dict('rows'),
style_data_conditional=get_cell_style(params, auto_evals),
style_table={'width': '100%'}
),
html.A(
'Download',
download=f'auto_detection_{morph}.csv',
href=auto_to_csv,
target='_blank',
),
],
style={
'display': 'inline-block',
'vertical-align': 'top',
'margin': '10px',
'width': '550px',
},
)
def make_manual_table(data_root, env, morph, detect, params):
if morph == 'larva' and detect == 'pupariation':
try:
auto_evals = np.loadtxt(os.path.join(data_root, env, 'original',
'pupariation.csv'), dtype=np.int32, delimiter=',').flatten()
except:
return make_null_table(morph, 'manual')
elif morph == 'larva' and detect == 'eclosion':
return make_null_table(morph, 'manual')
elif morph == 'larva' and detect == 'death':
return make_null_table(morph, 'manual')
elif morph == 'larva' and detect == 'pupa-and-eclo':
try:
auto_evals = np.loadtxt(os.path.join(data_root, env, 'original',
'pupariation.csv'), dtype=np.int32, delimiter=',').flatten()
except:
return make_null_table(morph, 'manual')
elif morph == 'adult' and detect == 'pupariation':
return make_null_table(morph, 'manual')
elif morph == 'adult' and detect == 'eclosion':
try:
auto_evals = np.loadtxt(
os.path.join(data_root, env, 'original', 'eclosion.csv'),
dtype=np.int32, delimiter=',').flatten()
except:
return make_null_table(morph, 'manual')
elif morph == 'adult' and detect == 'death':
try:
auto_evals = np.loadtxt(
os.path.join(data_root, env, 'original', 'death.csv'),
dtype=np.int32, delimiter=',').flatten()
except:
return make_null_table(morph, 'manual')
elif morph == 'adult' and detect == 'pupa-and-eclo':
try:
auto_evals = np.loadtxt(
os.path.join(data_root, env, 'original', 'eclosion.csv'),
dtype=np.int32, delimiter=',').flatten()
except:
return make_null_table(morph, 'manual')
else:
raise Exception()
auto_evals = auto_evals.reshape(
params['n-rows']*params['n-plates'], params['n-clms'])
auto_csv = \
'data:text/csv;charset=utf-8,' \
+ 'Dataset,{}\n'.format(urllib.parse.quote(env)) \
+ f'Morphology,{morph}\n' \
+ 'Event timing\n' \
+ pd.DataFrame(auto_evals).to_csv(
index=False, encoding='utf-8', header=False)
return html.Div(
id=f'{morph}-manual-table',
children = [
html.H4(f'Event timings of {morph} (manual)'),
dash_table.DataTable(
columns=[{'name': f'{clm}', 'id': f'{clm}'}
for clm in ALPHABETS[:params['n-clms']]],
data=pd.DataFrame(auto_evals,
columns=ALPHABETS[:params['n-clms']]).to_dict('rows'),
style_data_conditional=get_cell_style(params, auto_evals),
style_table={'width': '100%'},
),
html.A(
'Download',
download=f'manual_dtection_{morph}.csv',
href=auto_csv,
target='_blank',
),
],
style={
'display': 'inline-block',
'vertical-align': 'top',
'margin': '10px',
'width': '550px',
},
)
def make_null_table(morph, string):
return html.Div(style={'display': 'inline-block'})
# ====================
# Utility functions
# ====================
def get_cell_style(params, auto_evals):
return [
{
'if': {
'column_id': f'{clm}',
'filter': f'{{{clm}}} < {int(t2)} && {{{clm}}} >= {int(t1)}',
},
'backgroundColor': '#44{:02X}44'.format(int(c), int(c)),
'color': 'black',
}
for clm in ALPHABETS[:params['n-clms']]
for t1, t2, c in zip(
np.linspace(0, auto_evals.max() + 1, 11)[:10],
np.linspace(0, auto_evals.max() + 1, 11)[1:],
np.linspace(50, 255, 10))
] + [
{
'if': {
'column_id': f'{clm}',
'filter': f'{{{clm}}} = 0',
},
'backgroundColor': '#000000',
'color': 'white',
}
for clm in ALPHABETS[:params['n-clms']]
]
def seasoning(signals, signal_type, detect, size, sigma, smooth, weight,
pupar_times, midpoints=None, weight_style=None):
# Smooth the signals
if smooth:
signals = my_filter(signals, size=size, sigma=sigma)
else:
pass
# Apply weight to the signals
if weight:
# Detection of pupariation
if detect == 'pupariation' and signal_type == 'larva':
# Step or ramp weight
if weight_style == 'step':
for well_idx, midpoint in enumerate(midpoints['midpoint']):
signals[well_idx, midpoint:] = 0
elif weight_style == 'ramp':
# Ramp filter
for well_idx, midpoint in enumerate(midpoints['midpoint']):
signals[well_idx] = signals[well_idx] * \
(-1 / midpoint * np.arange(len(signals.T)) + 1)
else:
pass
# Not defined
elif detect == 'pupariation' and signal_type == 'adult':
pass
# Not defined
elif detect == 'eclosion' and signal_type == 'larva':
pass
# Detection of eclosion
elif detect == 'eclosion' and signal_type == 'adult':
# Step or ramp weight
if weight_style == 'step':
for well_idx, midpoint in enumerate(midpoints['midpoint']):
signals[well_idx, :midpoint] = 0
elif weight_style == 'ramp':
# Ramp filter
for well_idx, midpoint in enumerate(midpoints['midpoint']):
signals[well_idx] = signals[well_idx] * (
1 / (len(signals.T) - midpoint)
* np.arange(len(signals.T))
- midpoint / (len(signals.T) - midpoint))
else:
pass
# Detection of pupariation
elif detect == 'pupa-and-eclo' and signal_type == 'larva':
# Step or ramp weight
if weight_style == 'step':
for well_idx, midpoint in enumerate(midpoints['midpoint']):
signals[well_idx, midpoint:] = 0
elif weight_style == 'ramp':
# Ramp filter
for well_idx, midpoint in enumerate(midpoints['midpoint']):
signals[well_idx] = signals[well_idx] * \
(-1 / midpoint * np.arange(len(signals.T)) + 1)
else:
pass
# Detection of eclosion
elif detect == 'pupa-and-eclo' and signal_type == 'adult':
"""
if pupar_times is not None:
mask = -np.ones_like(signals, dtype=float)
for i, event_timing in enumerate(pupar_times):
'''
# Step filter
mask[i, event_timing:] = 1
'''
# Ramp filter
lin_weight = np.linspace(
0, 1, len(signals.T) - event_timing)
mask[i, event_timing:] = lin_weight
signals = signals * mask
else:
# Ramp filter
signals = signals * \
(np.arange(len(signals.T)) / len(signals.T))
"""
# Step or ramp weight
if weight_style == 'step':
for well_idx, midpoint in enumerate(midpoints['midpoint']):
signals[well_idx, :midpoint] = 0
elif weight_style == 'ramp':
# Ramp filter
for well_idx, midpoint in enumerate(midpoints['midpoint']):
signals[well_idx] = signals[well_idx] * (
1 / (len(signals.T) - midpoint)
* np.arange(len(signals.T))
- midpoint / (len(signals.T) - midpoint))
else:
pass
# Not defined
elif detect == 'death' and signal_type == 'larva':
pass
# Detection of death
elif detect == 'death' and signal_type == 'adult':
# Step or ramp weight
if weight_style == 'step':
for well_idx, midpoint in enumerate(midpoints['midpoint']):
signals[well_idx, midpoint:] = 0
elif weight_style == 'ramp':
# Ramp filter
for well_idx, midpoint in enumerate(midpoints['midpoint']):
signals[well_idx] = signals[well_idx] * \
(-1 / midpoint * np.arange(len(signals.T)) + 1)
else:
pass
else:
pass
return signals
def max_amplitude(signals):
amplitudes = []
for signal in signals:
if signal.sum() == 0:
amplitudes.append(0)
else:
amplitudes.append(signal.max() - signal[signal > 0].min())
return np.argmax(amplitudes), np.max(amplitudes)
def calc_threshold(signal, coef=0.5):
max = signal.max()
min = signal.min()
return min + coef * (max - min)
def find_rising_up_and_falling_down(signal, thresh):
# シグナルに閾値処理をして二値化する
icebergs = (signal > thresh).astype(int)
# 二値化されたシグナルから立ち上がりと立ち下がりのインデックスを探す
diff = np.diff(icebergs)
rising_up_idxs = np.where(diff == 1)[0] + 1
falling_down_idxs = np.where(diff == -1)[0] + 1
# 例外処理
# 始めから立ち上がっていた場合、0 を立ち上がりインデックスとして挿入する
if icebergs[0] == 1:
rising_up_idxs = np.concatenate([np.array([0]), rising_up_idxs])
# 最後が立ち上がりのまま終わっていた場合、
# シグナルの最終インデックスを立ち下がりインデックスとして挿入する
if icebergs[-1] == 1:
falling_down_idxs = np.concatenate([falling_down_idxs, np.array([len(icebergs) - 1])])
# 立ち上がりと立ち下がりの数は必ず同数になる
assert len(rising_up_idxs) == len(falling_down_idxs)
return rising_up_idxs, falling_down_idxs
def relmax_by_thresh(signal, thresh):
# 閾値を切ったシグナルに対して、立ち上がりと立ち下がりを探す
rising_up_idxs, falling_down_idxs = find_rising_up_and_falling_down(signal, thresh)
# 極大値の計算
relmax_args = scipy.signal.argrelmax(signal, order=3)[0]
relmax_values = signal[relmax_args]
candidate_args = []
for rising_up_idx, falling_down_idx in zip(rising_up_idxs, falling_down_idxs):
args = []
values = []
for relmax_arg, relmax_value in zip(relmax_args, relmax_values):
if relmax_arg in range(rising_up_idx, falling_down_idx):
args.append(relmax_arg)
values.append(relmax_value)
assert len(args) == len(values)
if len(args) == 0:
if rising_up_idx == falling_down_idx:
candidate_args.append(rising_up_idx)
else:
candidate_args.append(rising_up_idx + np.argmax(signal[rising_up_idx:falling_down_idx]))
elif len(args) == 1:
candidate_args.append(args[0])
elif len(args) >= 2:
candidate_args.append(args[np.argmax(values)])
candidate_args =
|
np.array(candidate_args)
|
numpy.array
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import xarray as xr
import re
import datetime
import os
from pathlib import Path
import hpl2netCDF_client as proc
class hpl_files(object):
name= []
time= []
# The class "constructor" - It's actually an initializer
def __init__(self, name, time):
self.name = name
self.time = time
@staticmethod
def make_file_list(date_chosen, confDict, url):
path = Path(url) / date_chosen.strftime('%Y') / date_chosen.strftime('%Y%m') / date_chosen.strftime('%Y%m%d')
#confDict= config.gen_confDict()
## for halo
if confDict['SYSTEM'] == 'halo':
if (confDict['SCAN_TYPE'] == 'Stare') | (confDict['SCAN_TYPE'] == 'VAD') | (confDict['SCAN_TYPE'] == 'RHI'):
scan_type= confDict['SCAN_TYPE']
else:
scan_type= 'User'
mylist= list(path.glob('**/' + scan_type + '*.hpl'))
if confDict['SCAN_TYPE']=='Stare':
file_time= [ datetime.datetime.strptime(x.stem, scan_type
+ "_"
+ confDict['SYSTEM_ID']
+ "_%Y%m%d_%H")
for x in mylist]
# sort files according to time stamp
file_list = []
for ii,idx in enumerate(np.argsort(file_time).astype(int)):
file_list.append(mylist[idx])
file_time = [ datetime.datetime.strptime(x.stem
, scan_type
+ "_"
+ confDict['SYSTEM_ID']
+ "_%Y%m%d_%H")
for x in file_list]
elif (confDict['SCAN_TYPE']=='VAD') | (confDict['SCAN_TYPE']=='RHI'):
file_time= [ datetime.datetime.strptime(x.stem, scan_type
+ "_"
+ confDict['SYSTEM_ID']
+ "_%Y%m%d_%H%M%S")
for x in mylist]
# sort files according to time stamp
file_list = []
for ii,idx in enumerate(np.argsort(file_time).astype(int)):
file_list.append(mylist[idx])
file_time = [ datetime.datetime.strptime(x.stem
, scan_type
+ "_"
+ confDict['SYSTEM_ID']
+ "_%Y%m%d_%H%M%S")
for x in file_list]
else:
file_time= [ datetime.datetime.strptime(x.stem
, scan_type
+ x.name[4]
+ "_"
+ confDict['SYSTEM_ID']
+ "_%Y%m%d_%H%M%S")
for x in mylist]
# sort files according to time stamp
file_list = []
for ii,idx in enumerate(np.argsort(file_time).astype(int)):
file_list.append(mylist[idx])
file_time = [ datetime.datetime.strptime(x.stem
, scan_type
+ x.name[4]
+ "_"
+ confDict['SYSTEM_ID']
+ "_%Y%m%d_%H%M%S")
for x in file_list]
return hpl_files(file_list, file_time)
## for windcube
elif confDict['SYSTEM'] == 'windcube':
if (confDict['SCAN_TYPE'] == 'Stare') | (confDict['SCAN_TYPE'] == 'VAD') | (confDict['SCAN_TYPE'] == 'RHI'):
scan_type= 'fixed'
else:
print('unknown scantype!')
if abs((date_chosen - datetime.datetime(date_chosen.year, date_chosen.month, date_chosen.day)).total_seconds()) > 0:
mylist= list(path.glob('**/' + 'WCS*' + date_chosen.strftime('%Y-%m-%d_%H*') + scan_type + '*.nc'))
else:
mylist= list(path.glob('**/' + 'WCS*' + scan_type + '*.nc'))
file_time = [ datetime.datetime.strptime( x.stem[0:29]
, x.stem[0:9] + '_%Y-%m-%d_%H-%M-%S')
for x in mylist
]
file_list = [mylist[idx] for idx in np.argsort(file_time).astype(int)]
file_time = [ datetime.datetime.strptime( x.stem[0:29]
, x.stem[0:9] + '_%Y-%m-%d_%H-%M-%S')
for x in file_list
]
return hpl_files(file_list, file_time)
# function used for calculation of range bounds
@staticmethod
def range_calc(rg_vec, confDict):
'''Calculate range bounds, also accounting for overlapping gates. If your hpl-files contain overlapping gates please add the "OVERLAPPING_GATES" argument to the configuration file.'''
if 'OVERLAPPING_GATES' in confDict:
r = lambda x,idx: (x + idx) * float(confDict['RANGE_GATE_LENGTH'])/(1,float(confDict['NUMBER_OF_GATE_POINTS']))[int(confDict['OVERLAPPING_GATES'])]
else:
r = lambda x,idx: (x + idx) * float(confDict['RANGE_GATE_LENGTH'])
return r(rg_vec, .5).astype('f4')
@staticmethod
def split_header(string):
return [x.strip() for x in re.split('[:\=\-]', re.sub('[\n\t]','',string),1)]
@staticmethod
def split_data(string):
return re.split('\s+', re.sub('\n','',string).strip())
#switch_str = {True: split_header(line), False: split_data(line)}
@staticmethod
def split_default(string):
return string
@staticmethod
def switch(case,string):
return {
True: hpl_files.split_header(string),
False: hpl_files.split_data(string)}.get(case, hpl_files.split_default)
@staticmethod
def reader_idx(hpl_list,confDict,chunks=False):
print(hpl_list.time[0:10])
time_file = pd.to_datetime(hpl_list.time)
time_vec= np.arange(pd.to_datetime(hpl_list.time[0].date()),(hpl_list.time[0]+datetime.timedelta(days = 1))
,pd.to_timedelta(int(confDict['AVG_MIN']), unit = 'm'))
if chunks == True:
return [np.where((ii <= time_file)*(time_file < iip1))
for ii,iip1 in zip(time_vec[0:-1],time_vec[1::])]
if chunks == False:
return np.arange(0,len(hpl_list.time))
@staticmethod
def combine_lvl1(hpl_list, confDict, date_chosen):
if confDict['SYSTEM'] == 'halo':
ds = xr.concat((hpl_files.read_hpl(iit,confDict) for iit in hpl_list.name)
,dim='time'#, combine='nested'#,compat='identical'
,data_vars='minimal'
,coords='minimal')
elif confDict['SYSTEM'] == 'windcube':
ds = xr.concat((hpl_files.read_wcsradial(iit,confDict) for iit in hpl_list.name)
, dim='time'#, combine='nested'#,compat='identical'
, data_vars='minimal'
, compat='override'
, coords='minimal')
ds['nqv'].values = ((ds.dv.max() - ds.dv.min()).data/2).astype('f4')
ds['nqf'].values = (2*ds.nqv.data/float(confDict['SYSTEM_WAVELENGTH'])).astype('f4')
ds['resv'].values = (2*ds.nqv.data/float(confDict['FFT_POINTS'])).astype('f4')
## delete 'delv' variable, if all entries are NaN.
if (ds.delv == -999.).all():
ds = ds.drop_vars(['delv'])
# print('dropping "delv" / "spectral width", because all are NaN!')
# if os.name == 'nt':
# ds = ds._drop_vars(['delv'])
#else:
# ds = ds.drop_vars(['delv'])
##!!!NOTE!!!##
# There was an issue under windows, possible due to a version problem,
# so in case an Attribute error occurs change line 126 to following
#ds = ds._drop_vars(['delv'])
## choose only timestamp within a daily range
start_dt = (pd.to_datetime(date_chosen.date()) - pd.Timestamp("1970-01-01")) / pd.Timedelta('1s')
end_dt = (pd.to_datetime(date_chosen + datetime.timedelta(days= +1)) - pd.Timestamp("1970-01-01")) / pd.Timedelta('1s')
ds = ds.isel(time=np.where((ds.time >= start_dt) & (ds.time <= end_dt))[0])
ds.attrs['title']= confDict['NC_TITLE']
ds.attrs['institution']= confDict['NC_INSTITUTION']
ds.attrs['site_location']= confDict['NC_SITE_LOCATION']
ds.attrs['source']= confDict['NC_SOURCE']
ds.attrs['instrument_type']= confDict['NC_INSTRUMENT_TYPE']
ds.attrs['instrument_mode']= confDict['NC_INSTRUMENT_MODE']
if 'NC_INSTRUMENT_FIRMWARE_VERSION' in confDict:
ds.attrs['instrument_firmware_version']= confDict['NC_INSTRUMENT_FIRMWARE_VERSION']
else:
ds.attrs['instrument_firmware_version']= 'N/A'
ds.attrs['instrument_contact']= confDict['NC_INSTRUMENT_CONTACT']
if 'NC_INSTRUMENT_ID' in confDict:
ds.attrs['instrument_id']= confDict['NC_INSTRUMENT_ID']
else:
ds.attrs['instrument_id']= 'N/A'
# ds.attrs['Source']= "HALO Photonics Doppler lidar (system_id: " + confDict['SYSTEM_ID']
ds.attrs['conventions']= confDict['NC_CONVENTIONS']
ds.attrs['processing_date']= str(pd.to_datetime(datetime.datetime.now())) + ' UTC'
# ds.attrs['Author']= confDict['NC_AUTHOR']
ds.attrs['instrument_contact']= confDict['NC_INSTRUMENT_CONTACT']
ds.attrs['data_policy']= confDict['NC_DATA_POLICY']
# attributes for operational use of netCDFs, see E-Profile wind profiler netCDF version 1.7
if 'NC_WIGOS_STATION_ID' in confDict:
ds.attrs['wigos_station_id']= confDict['NC_WIGOS_STATION_ID']
else:
ds.attrs['wigos_station_id']= 'N/A'
if 'NC_WMO_ID' in confDict:
ds.attrs['wmo_id']= confDict['NC_WMO_ID']
else:
ds.attrs['wmo_id']= 'N/A'
if 'NC_PI_ID' in confDict:
ds.attrs['principal_investigator']= confDict['NC_PI_ID']
else:
ds.attrs['principal_investigator']= 'N/A'
if 'NC_INSTRUMENT_SERIAL_NUMBER' in confDict:
ds.attrs['instrument_serial_number']= confDict['NC_INSTRUMENT_SERIAL_NUMBER']
else:
ds.attrs['instrument_serial_number']= ' '
ds.attrs['history']= confDict['NC_HISTORY'] + ' version ' + confDict['VERSION'] + ' on ' + str(pd.to_datetime(datetime.datetime.now())) + ' UTC'
ds.attrs['comments']= confDict['NC_COMMENTS']
## add configuration as attribute used to create the file
configuration = """"""
for dd in confDict:
configuration += dd + '=' + confDict[dd]+'\n'
ds.attrs['File_Configuration']= configuration
# adjust time variable to double (aka float64)
ds.time.data.astype(np.float64)
path= Path(confDict['NC_L1_PATH'] + '/'
+ date_chosen.strftime("%Y") + '/'
+ date_chosen.strftime("%Y%m"))
path.mkdir(parents=True, exist_ok=True)
path= path / Path(confDict['NC_L1_BASENAME'] + 'v' + confDict['VERSION'] + '_' + date_chosen.strftime("%Y%m%d")+ '.nc')
if 'UTC_OFFSET' in confDict:
time_offset = np.timedelta64(int(confDict['UTC_OFFSET']), 'h')
time_delta = int(confDict['UTC_OFFSET'])
else:
time_offset = np.timedelta64(0, 'h')
time_delta = 0
# compress variables
comp = dict(zlib=True, complevel=9)
encoding = {var: comp for var in
|
np.hstack([ds.data_vars,ds.coords])
|
numpy.hstack
|
# -*- coding: utf-8 -*-
# =============================================================================
# Created By : <NAME>
# Copyright : Copyright 2021, Institute of Chemical Engineering, Prof. Dr. <NAME>, Ulm University'
# License : GNU LGPL
# =============================================================================
"""The module (rm = radiometric measurement) rm_evaluation contains the class RM_Evaluation, which helps evaluation
radiometric measurements performed with a radiometric scanning device developed at the Institute of Chemical
Engineering, University Ulm. By creating an instance of the class, it can lead you through the procedure.
Run your python instance in the Folder "01Scripts" and put th measurement files th the folder "02Data".
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# =============================================================================
# Imports
# =============================================================================
import os
import pprint
import logging
import time
import pandas as pd
from pathlib import Path
import re
import math
from scipy import stats
import matplotlib.pyplot as plt
from matplotlib import cm
import matplotlib as mpl
from mpl_toolkits.axes_grid1 import make_axes_locatable
from cycler import cycler
import numpy as np
import scipy
from scipy.optimize import minimize
from supplementary.colour_system import cs_hdtv as cs
from lmfit.lineshapes import skewed_gaussian
import sympy as sp
logging.basicConfig(format="%(asctime)s: %(message)s", level=logging.INFO, datefmt="%H:%M:%S")
class RM_Evaluation:
"""
The class RM_Evaluation can be used for further evaluation of measurements performed by the scanning device.
By initializing an instance of the class the script will scan existing subfolders for exported RM-data. This Data
must be stored in a specific Folder structure: (Normally created by the export function of the measurement script.)
folder: 02Data
|
|-- folder: date + time + reactor name
| |
| |-- folder: z-position 1
| | |-- file: *.feather ....................... contains measured spectra in mW nm**-1
| | |-- file: *.log ........................... contains all set parameters for measurement
| | |-- heatmap pictures and *.csv file ....... not used by this script
| |
| |-- folder: z-position 2
| | |-- ...
| |
| |-- ...
|
|-- ...
-------------------
Attributes constructed from parameters by __init___:
saveram (boolean, optional, default = False):
If True, photon flux spectra are not loaded to use less RAM.
wl_ntsr_borders (tuple of two integers, optional):
For the calculation of the noise to signal ratio the spectrum gets separated in two regimes. By default
the regime used for defining the noise are the spectrometer pixels 0-150. The signal regime are the pixels 150 to -1
(end of list). This default is defined by the method import_raw. Other borders should be defined here as
tuple of integers if the automatic import routine is used. The tuple defines the pixel separating the
two regimes (def: 150) and the end of the signal regime.
predef (list of strings, optional):
Defines a predefined answer to the import questions of the class when initialized. You give a list of
eight the answers, it will be red from right to left (list.pop).
When initializing an instance of the class and predef == False you will be asked
if you want to choose a Directory to evaluate:
Choose a Directory to evaluate?
>?|
Two answers are possible:
• When answering with ’y’ , all export folders in the same directory which have the correct structure and
content will be listed. The program then will ask for the number of the folder
which should be imported.
• If ’n’ is given as answer, all importable .feather files in all subdirectories are listed. The
files can be picked individually by their number.
After choosing the desired data, the program will ask whether it should import these files:
import raw?
>?|
If the answer is ’n’ , you can start the import procedure manually with custom parameters by calling
the function import_raw . Otherwise the program will also ask if colours should be processed:
process colors?
>?|
and for a value of the ntsr_border :
keep ntsr border of 0.550? Or type new
>?|
After that, all files will be red and processed. Note that this can afford some RAM. The answers to the
import questions can be also given in advance by the predef parameter with the class initialization.
In case you want to import the second folder ( answer2 = 2 ) in the directory ( answer1 = ’y’ )
and import( answer3 = ’y’ ) without processing the colours ( answer4 = ’n’ ), while keeping the
ntsr_border unchanged ( answer5 = ’y’ ), the list [’y’, ’n’, ’y’, 2, ’y’])must be passed
as parameter predef . The program reads this list from right to left.
-------------------
Output Attributes:
alldfs (numpy array,
shape: (number of z positions, number of x-pixels, number of y-pixels, number of spectrometer pixels),
dtype='float32'):
Array containing the measured spectra in mW nm**-1.
all_photon_dfs (numpy array,
shape: (number of z positions, number of x-pixels, number of y-pixels, number of spectrometer pixels),
dtype='float32'):
Array containing the measured spectra in photons nm**-1 s**-1.
allwaves (numpy array,
shape: (number of z positions, number of spectrometer pixels),
dtype='float32'):
Array containing the respective wavelengths in nm for the spectrometer pixels.
all_vars (numpy array,
shape: (number of z positions),
dtype='float32'):
Array containing a dict with the respective measurement variables:
Name': Name of the Scan
'z_pos': z-position
'Size': Size of the scan im pixels.
int_time': Integration time.
all_integrals (numpy array,
shape: (number of z positions, number of x-pixels, number of y-pixels),
dtype='float32'):
Containing the received power per pixel in mW.
all_photon_integrals (numpy array,
shape: (number of z positions, number of x-pixels, number of y-pixels),
dtype='float32'):
Containing the received number of photons per pixel.
all_rec_power (numpy array,
shape: (number of z positions),
dtype='float32'):
Containing the received power per scan in mW.
all_rec_photons (numpy array,
shape: (number of z positions),
dtype='float32'):
Containing the received number of photons per scan.
all_nev_abs (numpy array,
shape: (number of z positions, number of x-pixels, number of y-pixels, number of spectrometer
pixels),
dtype='float32'):
Array containing the measured spectra of not absorbable power in mW nm**-1.
all_nev_abs_photons (numpy array,
shape: (number of z positions, number of x-pixels, number of y-pixels, number of
spectrometer pixels),
dtype='float32'):
Array containing the measured spectra of not absorbable photons.
all_ratios (numpy array,
shape: (number of z positions, number of x-pixels, number of y-pixels),
dtype='float32'):
Array containing the peak ratio for every measurement pixel.
all_colors (numpy array,
shape: (number of z positions, number of x-pixels, number of y-pixels, red values, green values,
blue values),
dtype='float32'):
Array containing the RGB values.
all_integrals_nev_abs (numpy array,
shape: (number of z positions, number of x-pixels, number of y-pixels),
dtype='float32'):
Containing the not absorbable received power per pixel in mW.
all_integrals_remaining (numpy array,
shape: (number of z positions, number of x-pixels, number of y-pixels),
dtype='float32'):
Containing the not absorbed received power per pixel in mW.
all_photon_integrals_remaining (numpy array,
shape: (number of z positions, number of x-pixels, number of y-pixels),
dtype='float32'):
Containing the not absorbed received photons per pixel.
"""
def __init__(self, saveram=False, wl_ntsr_borders=False, predef=False):
self.script_starting_time = time.time()
self.saveram = saveram
self.dir_path = os.path.dirname(os.path.realpath(__file__))+'\\..\\02Data'
self.foundthefiles = []
self.thefiles = []
self.dirs = []
self.thedir = ''
self.frames_red = False
self.ntsr_border = 0.55
self.waveleghth_borders_fo_ntsr = wl_ntsr_borders
self.cal_int_time = 0.03551
self.planck_constant = 6.626 * 10 ** (-34)
self.speed_of_light = 2.998 * 10 ** 8
self.avogadro_number = 6.022 * 10 ** 23
self.alldfs = np.array([], dtype='float32')
self.all_photon_dfs = np.array([], dtype='float32')
self.allwaves = np.array([], dtype='float32')
self.all_vars = np.array([])
self.all_integrals = np.array([], dtype='float32')
self.all_photon_integrals = np.array([], dtype='float32')
self.all_rec_power = np.array([], dtype='float32')
self.all_rec_photons = np.array([], dtype='float32')
self.all_nev_abs = np.array([], dtype='float32')
self.all_nev_abs_photons = np.array([], dtype='float32')
self.all_ratios = np.array([], dtype='float32')
self.all_colors = np.array([])
self.df_for_int_exp = pd.DataFrame({})
self.color_cycle = cycler('color', ['blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'purple',
'pink', 'brown', 'orange', 'teal', 'coral', 'lightblue', 'lime', 'lavender',
'turquoise', 'darkgreen', 'tan', 'salmon', 'gold', 'orchid', 'crimson',
'darkblue'])
self.color_cycle_list = ['blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'purple', 'pink',
'brown', 'orange', 'teal', 'coral', 'lightblue', 'lime', 'lavender', 'turquoise',
'darkgreen', 'tan', 'salmon', 'gold', 'orchid', 'crimson', 'darkblue']
# Here we scan all folders for feather data and let the user choose which ones to import
for self.root, dirs, files in os.walk(self.dir_path):
for file in files:
if file.endswith('.feather'):
self.foundthefiles.append(str(self.root+'\\'+str(file)))
if Path(self.root).parent in self.dirs:
pass
else:
self.dirs.append(Path(self.root).parent)
if len(self.foundthefiles) == 1:
pass
self.thefiles = self.foundthefiles
else:
print('Choose a Directory to evaluate?')
if predef is not False:
bool_dir = predef.pop()
else:
bool_dir = input()
logging.info('%s' % bool_dir)
if bool_dir == 'y':
self.chose_dir = True
print('I found these Folders:\n')
counter = 1
for i in self.dirs:
print(str(counter) + ': ' + i.parts[-1])
counter += 1
print('\nWhich one should be processed? Only choose one.')
if predef is not False:
self.user = predef.pop()
else:
self.user = int(input())
print('you chose the path: %s' % self.dirs[self.user-1])
self.thedir = self.dirs[self.user-1]
for i in self.foundthefiles:
if Path(i).parts[0:-2] == Path(self.dirs[self.user-1]).parts[:]:
print('Adding %s' % Path(i).parts[-1])
self.thefiles.append(i)
else:
self.chose_dir = False
print('I found these Files:\n')
counter = 1
for i in self.foundthefiles:
print(str(counter) + ': ' + i)
counter += 1
print(str(counter) + ': all')
print('\nWhich ones should be processed? Separate with blanks.')
if predef is not False:
self.user = predef.pop()
else:
self.user = input()
self.user = self.user.split()
for i in range(0, len(self.user)):
self.user[i] = int(self.user[i])
if counter in self.user:
self.thefiles = self.foundthefiles
else:
for i in self.user:
self.thefiles.append(self.foundthefiles[i-1])
if predef is not False:
import_raw = predef.pop()
else:
import_raw = input('import raw?')
if import_raw == 'y':
if predef is not False:
process_colors = predef.pop()
else:
process_colors = input('process colors?')
if process_colors == 'y':
input_colors = True
else:
input_colors = False
if predef is not False:
input_ntsr = predef.pop()
else:
input_ntsr = input('keep ntsr border of %.3f? Or type new' % self.ntsr_border)
try:
self.ntsr_border = float(input_ntsr)
except ValueError:
logging.info('ntsr border not changed')
self.import_raw(colors=input_colors)
def normalize(self, arr):
"""Helper method to normalize an array."""
arr_min = np.min(arr)
return (arr-arr_min)/(np.max(arr)-arr_min)
def find_nearest(self, array, value):
"""Helper method, finds the index of a e.g. wavelength"""
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx
def pos_to_num(self, pos):
"""Helper method, calculates a position on a heatmap in the respective pixel."""
return int(round(pos/0.39))
def cos_corr_point(self, array, center, distance):
"""
Returns an array containing cosine correction values for a
point light source, e.g. LED
------------------
Parameters:
array (2D numpy array):
The array which should be corrected, here only the correct shape is necessary. Using the actual array
is useful considering the ’auto’ option of the parameter center.
center (tuple or 'auto':
The center of the light source on the array. If center is ’auto’ , the position of the largest value in
the array array is used. Otherwise, pass a tuple of coordinates in cm.
distance (number):
The distance between the measurement canvas and the light source in cm.
"""
if 'auto' in center:
center = np.reshape(np.array(np.where(array == np.amax(array)))*0.39+(0.39/2), 2)
self.olsh_positions = np.indices(array.shape)*0.39+(0.39/2)
self.positions = np.zeros((array.shape[0], array.shape[1], 2))
self.positions[:, :, 0] = self.olsh_positions[0, :, :]
self.positions[:, :, 1] = self.olsh_positions[1, :, :] # we need to give the positions array a different shape
self.vectors = np.subtract(self.positions, center) # we calculate the vectors pointing from the lightsource to the pixel
print('vectors shape: ' + str(self.vectors.shape))
self.norms = np.linalg.norm(self.vectors, axis=-1)
print('norms shape: ' + str(self.norms.shape))
return 1/np.cos(np.arctan(self.norms/distance))
def cos_corr_stick(self, array, x_center=70, y_min=84, y_max=107, distance=4.5):
"""
Returns an array containing cosine correction values for a longish light source. See cos_corr_point.
------------------
Parameters:
array (2D numpy array):
The array which should be corrected, here only the correct shape is necessary. Using the actual array
is useful considering the ’auto’ option of the parameter center.
x_center (integer, default = 70):
Pixel position of a longish light source in x direction.
y_min (integer, default = 84):
Lower pixel position of a longish light source in y direction.
y_max (integer, default = 107):
Upper pixel position of a longish light source in y direction.
distance (number):
The distance between the measurement canvas and the light source in cm.
"""
x_center = x_center*0.39+(0.39/2)
y_min = y_min*0.39+(0.39/2)
y_max = y_max*0.39+(0.39/2)
olsh_positions = np.indices(array.shape)*0.39+(0.39/2)
positions = np.zeros((array.shape[0], array.shape[1], 2))
positions[:, :, 0] = olsh_positions[0, :, :]
positions[:, :, 1] = olsh_positions[1, :, :]
y_max_helper = np.zeros(positions.shape)
y_max_helper[..., 0] = y_max_helper[..., 1] = np.where(positions[:, :, 0] >= y_max, 1, 0)
y_min_helper = np.zeros(positions.shape)
y_min_helper[..., 0] = y_min_helper[..., 1] = np.where(positions[:, :, 0] <= y_min, 1, 0)
y_mid_helper = np.zeros(positions.shape)
y_mid_helper[..., 0] = y_mid_helper[..., 1] = np.where((positions[:, :, 0] > y_min) & (positions[:, :, 0] < y_max), 1, 0)
print(y_mid_helper)
vectors_y_max = np.subtract(positions, [y_max, x_center])*y_max_helper
vectors_y_min = np.subtract(positions, [y_min, x_center])*y_min_helper
mid_positions = np.ones(positions.shape)
mid_positions[:, :, 0] = mid_positions[:, :, 0]*y_max
mid_positions[:, :, 1] = positions[:, :, 1]
vectors_y_mid = np.subtract(mid_positions, [y_max, x_center])*y_mid_helper
vectors = np.where(vectors_y_max == 0, vectors_y_min, vectors_y_max)
vectors = np.where(vectors == 0, vectors_y_mid, vectors)
print('vectors shape: ' + str(vectors.shape))
norms = np.linalg.norm(vectors, axis=-1)
print('norms shape: ' + str(norms.shape))
return 1/np.cos(np.arctan(norms/distance))
def vars_from_log(self, file):
"""
Helper method to extract variables from the logfile.
"""
import re
filepath = file
search_int_time = 'int_time'
matched_int_time = ''
search_Name = 'Name:'
matched_name = ''
search_Size = 'Size:'
matched_size = ''
search_Z = 'z_pos:'
matched_z = ''
with open(filepath, 'r') as file:
for line in file:
if search_int_time in line:
matched_int_time = float(re.sub("\D+[_]\D+", "", line.rstrip()))
with open(filepath, 'r') as file:
for line in file:
if search_Name in line:
matched_name = re.sub("\D+[: ]", "", line.rstrip())
with open(filepath, 'r') as file:
for line in file:
if search_Size in line:
matched_size = int(re.sub("\D", "", line.rstrip()))
with open(filepath, 'r') as file:
for line in file:
if search_Z in line:
matched_z = float(re.sub("\D+[_]\D+", "", line.rstrip()))
out = {'int_time': matched_int_time, 'Name': matched_name, 'Size': matched_size, 'z_pos': matched_z}
pprint.pprint(out)
return out
def noisetosignal(self, a, borders, axis=-1):
"""
Helper method for the import_raw method, calculates the noise to signal ratio for an array.
------------
Parameters:
a (array, shape: (no. of. scans, no. of x-pixels, no. of y-pixels, spec- pixels):
The array.
borders (tuple, shape:(2)):
The borders for ntds calculation, see docstring of this class.
axis (integer, default = -1):
Axis parameter for the numpy std method, no need to change.
"""
c = np.std(a[:, :, :, :borders[0]], axis=axis)
d = np.std(a[:, :, :, borders[0]:borders[1]], axis=axis)
return np.where(d == 0, 1, c/d)
def show_histogram(self, values):
"""
Shows a histogram of the given array.
"""
n, bins, patches = plt.hist(values.reshape(-1), 50, density=1)
bin_centers = 0.5 * (bins[:-1] + bins[1:])
for c, p in zip(self.normalize(bin_centers), patches):
plt.setp(p, 'facecolor', cm.viridis(c))
plt.show()
def normalize(self, arr):
"""
Helper method for show_histogram.
"""
arr_min = np.min(arr)
return (arr-arr_min)/(np.max(arr)-arr_min)
def SI_plot(self, comment=''):
"""
Plots and saves a figure of supporting information, subdivided in 4 parts. Subfigures 1,2 and 3 display
histograms of the NTSR, peak ratios and P of all scans. The latter subfigure displays the received power per
scan over the z-position and the integration time over the z-position. The received power over the z-position
is fitted with a linear regression and the function is given.
"""
fig, axes = plt.subplots(ncols=2,
nrows=2,
figsize=(8, 6),
sharex=False,
sharey=False)
"""overall Title"""
fig.suptitle(self.all_vars[0]['Name']+' SI plot '+comment)
"""Making the Histogram of all ntsr"""
target = (0, 0)
axes[target].set_prop_cycle(self.color_cycle)
axes[target].tick_params(labelsize=7)
axes[target].set_title('histogram of signal to noise ratios', fontsize=9)
axes[target].set_xlabel("signal to noise / 1", fontsize=9, labelpad=0.1)
axes[target].set_ylabel("frequency", fontsize=9, labelpad=0.1)
if self.separate_borders_mode is True:
axes_twin_ntsr = axes[target].twinx()
axes_twin_ntsr.set_prop_cycle(self.color_cycle)
for i in range(0, len(self.thefiles)):
n, bins, patches = axes[target].hist(self.all_ntsr[i].reshape(-1), 50, density=1, histtype='step',
label=str(self.all_vars[i]['z_pos'])+'$\\,$cm')
if self.separate_borders_mode is True:
axes_twin_ntsr.plot([self.separate_ntsr[i, 0, 0], self.separate_ntsr[i, 0, 0]], [0, n.max()/2], '-.',
label='%.3f' % self.separate_ntsr[i, 0, 0], alpha=0.5)
axes_twin_ntsr.set_yticks([])
if self.separate_borders_mode is False:
axes[target].plot([self.ntsr_border, self.ntsr_border], [0, n.max()], 'k-.' # , label='border'
)
xmin, xmax = axes[target].get_xlim()
ymin, ymax = axes[target].get_ylim()
t_xpos = xmax+0.01*(xmax-xmin)
t_ypos = ymin+0.01*(ymax-ymin)
axes[target].text(t_xpos, t_ypos, 'ntsr_border = %.3f' % self.ntsr_border, rotation='vertical', fontsize=6)
if self.separate_borders_mode is True:
axes_twin_ntsr.legend(fontsize=4.5, ncol=2, labelspacing=0.01, loc='center left', title='ntsr borders', title_fontsize=4.5, columnspacing=1)
"""Making the Histogram of all ratios"""
target = (0, 1)
axes[target].tick_params(labelsize=7)
axes[target].set_prop_cycle(self.color_cycle)
axes[target].set_title('histogram of peak ratios', fontsize=9)
axes[target].set_xlabel("peak ratio/ 1", fontsize=9, labelpad=0.1)
axes[target].set_ylabel("frequency", fontsize=9, labelpad=0.1)
for i in range(0, len(self.thefiles)):
n, bins, patches = axes[target].hist(self.all_ratios[i].reshape(-1), 50, density=1, histtype='step',
label=str(self.all_vars[i]['z_pos'])+'$\\,$cm', log=True)
axes[target].legend(fontsize=4.5, ncol=2, labelspacing=0.01, loc='upper right', columnspacing=1)
"""plotting power over distance"""
target = (1, 0)
axes[target].tick_params(labelsize=7)
axes[target].set_prop_cycle(self.color_cycle)
axes[target].set_title('received power and\nintegration time over z', fontsize=9)
axes[target].set_xlabel("z / cm", fontsize=9, labelpad=0.1)
axes[target].set_ylabel("P / W", fontsize=9, labelpad=0.1)
axes_twin = axes[target].twinx()
axes_twin.tick_params(labelsize=7)
axes_twin.set_ylabel("integration time / ms", fontsize=9, labelpad=0.1)
x = np.zeros(len(self.thefiles))
int_times = np.zeros(len(self.thefiles))
# marker: https://matplotlib.org/3.1.0/gallery/lines_bars_and_markers/marker_reference.html
marker_style = dict(linestyle=':', color='0.8', markersize=7, mfc="C0", mec="C0")
marker_style2 = dict(linestyle=':', color='0.8', markersize=7, mfc="C1", mec="C1")
for i in range(0, len(self.thefiles)):
x[i] = self.all_vars[i]['z_pos']
int_times[i] = self.all_vars[i]['int_time']*1000
rec = axes[target].plot(x, self.all_rec_power*1E-6, label='received power', marker='.', **marker_style)
abs = axes[target].plot(x, self.all_nev_abs*1E-6, label='never absorbed power', marker=7, **marker_style)
int = axes_twin.plot(x, int_times, label='integration time', marker='2', **marker_style2)
# legend: https://stackoverflow.com/questions/5484922/secondary-axis-with-twinx-how-to-add-to-legend
all = rec+abs+int
labs = [l.get_label() for l in all]
"""fitting the slope of data"""
coef_z = np.polyfit(x, self.all_rec_power*1E-6, 1)
coef_z_na = np.polyfit(x, self.all_nev_abs*1E-6, 1)
coef_int = np.polyfit(int_times, self.all_rec_power*1E-6, 1)
coef_int_na = np.polyfit(int_times, self.all_nev_abs*1E-6, 1)
poly1d_fn_z = np.poly1d(coef_z)
poly1d_fn_z_na = np.poly1d(coef_z_na)
poly1d_fn_int = np.poly1d(coef_int)
poly1d_fn_int_na = np.poly1d(coef_int_na)
# poly1d_fn is now a function which takes in x and returns an estimate for y
power_at_cal_time = poly1d_fn_int(self.cal_int_time)
power_at_cal_time_na = poly1d_fn_int_na(self.cal_int_time)
axes[target].plot(x, poly1d_fn_z(x), '--k')
axes[target].plot(x, poly1d_fn_z_na(x), '--k')
xmin, xmax = axes[target].get_xlim()
ymin, ymax = axes[target].get_ylim()
t_xpos = xmin+0.5*(xmax-xmin) # the relative position on the xaxis
t_ypos = ymin+0.8*(ymax-ymin) # the relative position on the yaxis
axes[target].text(t_xpos, t_ypos, '$m_{rec}$ = %.2f$\\cdot 10^{-3}\\,$Wcm$^{-1}$\nor %.2f$\\cdot 10^{-3}\\,$Wms$^{-1}$'
'\n$P_{@caltime} = %.3f$' % (poly1d_fn_z[1]*1E3, poly1d_fn_int[1]*1E3,
power_at_cal_time), fontsize=4.5)
t_xpos = xmin+0.5*(xmax-xmin) # the relative position on the xaxis
t_ypos = ymin+0.3*(ymax-ymin) # the relative position on the yaxis
axes[target].text(t_xpos, t_ypos, '$m_{nev abs}$ = %.2f$\\cdot 10^{-3}\\,$Wcm$^{-1}$\nor %.2f$\\cdot 10^{-3}\\,$Wms$^{-1}$'
'\n$P_{@caltime} = %.3f$' % (poly1d_fn_z_na[1]*1E3, poly1d_fn_int_na[1]*1E3,
power_at_cal_time_na), fontsize=4.5)
axes[target].legend(all, labs, fontsize=4.5, ncol=1, labelspacing=1, loc='best',
bbox_to_anchor=(0.05, 0.25, 0.5, 0.5), columnspacing=1)
"""Making the Histogram of all integrals"""
target = (1, 1)
axes[target].tick_params(labelsize=7)
axes[target].set_prop_cycle(self.color_cycle)
axes[target].set_title('histogram of receives power\nper pixel', fontsize=9)
axes[target].set_xlabel("P / \u03BCW ", fontsize=9, labelpad=0.1)
axes[target].set_ylabel("frequency", fontsize=9, labelpad=0.1)
for i in range(0, len(self.thefiles)):
n, bins, patches = axes[target].hist(self.all_integrals[i].reshape(-1), 50, density=1, histtype='step',
label=str(self.all_vars[i]['z_pos'])+'$\\,$cm', log=True)
axes[target].legend(fontsize=4.5, ncol=2, labelspacing=0.01, loc='upper right', columnspacing=1)
fig.subplots_adjust(
# left=0.0, # the left side of the subplots of the figure
# right=0.9, # the right side of the subplots of the figure
bottom=0.1, # the bottom of the subplots of the figure
top=0.9, # the top of the subplots of the figure
wspace=0.35, # the amount of width reserved for space between subplots,
# expressed as a fraction of the average axis width
hspace=0.4)
if self.thedir == '':
self.thedir = Path(self.dir_path)
fig.savefig(self.thedir/Path(self.all_vars[0]['Name'] + 'SI_plot' + comment + '.png'),
dpi=300,
bbox_inches='tight'
)
def read_frames(self):
"""
Helper method for import_raw.
"""
for counter, thefile in np.ndenumerate(self.thefiles):
self.vars = self.vars_from_log(thefile.replace('feather', 'log'))
if counter[0] == 0:
self.all_vars = np.array([self.vars]*(len(self.thefiles)))
self.all_vars[counter] = self.vars
self.all_z = np.zeros(len(self.all_vars))
for num, i in np.ndenumerate(self.all_vars):
self.all_z[num] = self.all_vars[num]['z_pos']
self.all_vars = np.take_along_axis(self.all_vars, np.argsort(self.all_z), axis=0)
self.thefiles = np.take_along_axis(np.array(self.thefiles), np.argsort(self.all_z), axis=0)
for counter, thefile in np.ndenumerate(self.thefiles):
print(thefile)
logging.info('started read')
imported_df = pd.read_feather(thefile)
logging.info('ended read')
if counter[0] == 0:
self.all_integrals = np.zeros((len(self.thefiles), self.vars['Size'], self.vars['Size']), dtype='float32') # now already a 3d array, so the reshaped array sould be written to it
self.all_ratios = np.zeros((len(self.thefiles), self.vars['Size'], self.vars['Size']), dtype='float32') # now already a 3d array, so the reshaped array sould be written to it
self.all_ntsr = np.zeros((len(self.thefiles), self.vars['Size'], self.vars['Size']), dtype='float32') # now already a 3d array, so the reshaped array sould be written to it
self.all_ntsr_helper = np.zeros((len(self.thefiles), self.vars['Size'], self.vars['Size']), dtype='float32') # now already a 3d array, so the reshaped array sould be written to it
self.all_colors = np.zeros((len(self.thefiles), self.vars['Size'], self.vars['Size'], 3), dtype='float32') # now already a 3d array, so the reshaped array sould be written to it
self.alldfs = np.zeros((len(self.thefiles), self.vars['Size'], self.vars['Size'], len(imported_df['wavelength'])), dtype='float32') # now already a 3d array, so the reshaped array sould be written to it
self.all_photon_dfs = np.zeros((len(self.thefiles), self.vars['Size'], self.vars['Size'], len(imported_df['wavelength'])), dtype='float32') # now already a 3d array, so the reshaped array sould be written to it
self.allwaves = np.zeros((len(self.thefiles), len(imported_df['wavelength'])), dtype='float32')
try:
new_trans = np.loadtxt(Path(".\supplementary\TransferCurves\Transfercurve.csv"), delimiter=';')*(self.cal_int_time / self.all_vars[counter]['int_time'])
except:
raise Exception("Can\'t find the transfer curve")
if np.array_equal(new_trans, imported_df['transfer_curve']) is True:
new_data_df = imported_df.iloc[:, 3:]
else:
new_data_df = imported_df.iloc[:, 3:].multiply(new_trans/imported_df['transfer_curve'], axis=0)
self.alldfs[counter] = np.reshape(np.transpose(new_data_df.values), (self.all_vars[counter]['Size'], self.all_vars[counter]['Size'], len(imported_df['wavelength'])))
self.alldfs[counter][1::2] = np.flip(self.alldfs[counter][1::2], 1)
self.allwaves[counter] = imported_df['wavelength']
del imported_df
del new_data_df
self.alldfs = np.where(np.isnan(self.alldfs), 0.0, self.alldfs)
self.frames_red = True
def import_raw(self,
ntsr_border=None,
colors=False,
waveleghth_borders_fo_ntsr=(150, -1),
ntsr_helper_border=0,
separate_borders_mode=False,
analysis=False):
"""
Starts the import procedure, normally automatically started within the class initialization.
------------------
Parameters:
ntsr_border (number, optional):
Pixels with a NTSR (stds) smaller than this border will be set to 0 or black. If not set the value from
class initialization is used.
colors (boolean, optional, default is False):
If True colours will be processed.
waveleghth_borders_fo_ntsr (tuple of 2 integers, default is (150, -1):
See parameter docstring for wl_ntsr_borders of this class.
separate_borders_mode (boolean, optional, default is False):
If True for every slice a new wavelength NTSR border is found. Namely the biggest stds which belongs to
an integral value smaller than stds_helper_border.
ntsr_helper_border (number, optional, default is 0):
Needed for separate_borders_mode.
analysis (boolean, optinal, default is False):
If True, a log file is written showing the total received power over the z-positions.
"""
if ntsr_border is None:
ntsr_border = self.ntsr_border
self.ntsr_border = ntsr_border
self.separate_borders_mode = separate_borders_mode
self.ntsr_helper_border = ntsr_helper_border
if self.frames_red is False:
self.read_frames()
if self.waveleghth_borders_fo_ntsr is False:
self.waveleghth_borders_fo_ntsr = waveleghth_borders_fo_ntsr
logging.info('start ntsr')
self.all_ntsr = self.noisetosignal(self.alldfs, borders=self.waveleghth_borders_fo_ntsr, axis=-1)
logging.info('end ntsr')
logging.info('start photons')
"""making the photons in nanomol per second and nm"""
print('ntsr_border = %.3f'%ntsr_border)
self.conv_to_photons_array = self.allwaves[0]*1e-9*1e6/self.planck_constant/self.speed_of_light/self.avogadro_number
self.all_photon_dfs = self.alldfs*self.conv_to_photons_array
logging.info('end photons')
logging.info('start integrating')
self.all_integrals = np.trapz(self.alldfs, self.allwaves[0], axis=-1) * 0.1521
logging.info('power done')
self.all_photon_integrals = np.trapz(self.all_photon_dfs, self.allwaves[0], axis=-1) * 0.1521
logging.info('photons done')
if self.saveram is True:
del self.all_photon_dfs
self.all_ntsr_helper = np.where(self.all_integrals < ntsr_helper_border, self.all_ntsr, 0)
"""separate borders mode means, that for every slice a new ntsr border is found. Namely the biggest ntsr which
belongs to a integral value smaller than ntsr_helper_border"""
if separate_borders_mode is True:
"""sparate_ntsr is an array with the length of the numper of slices. Containing the biggest ntsr
which belongs to an integral value smaller than ntsr_helper_border"""
self.ntsr_border = 'separate'
self.separate_ntsr = np.max(self.all_ntsr_helper, (1, 2))
logging.info('separate ntsr borders mode, found this:' + str(self.separate_ntsr))
self.separate_ntsr = np.ones(self.all_ntsr.shape)*np.reshape(self.separate_ntsr, (len(self.thefiles), 1, 1))
self.all_integrals = np.where(self.all_ntsr < self.separate_ntsr, self.all_integrals, 0)
self.all_photon_integrals = np.where(self.all_ntsr < self.separate_ntsr, self.all_photon_integrals, 0)
if separate_borders_mode is False:
self.all_integrals = np.where(self.all_ntsr < ntsr_border, self.all_integrals, 0)
self.all_photon_integrals = np.where(self.all_ntsr < ntsr_border, self.all_photon_integrals, 0)
if self.saveram is False:
self.alldfs_nev_abs = self.alldfs/(10**(7*skewed_gaussian(self.allwaves[0], 16386626.6, 567.358141, 37193531.6, -2830490.97)))
self.all_photon_dfs_nev_abs = self.alldfs_nev_abs*self.conv_to_photons_array
self.all_photon_integrals_nev_abs = np.trapz(self.all_photon_dfs_nev_abs, self.allwaves[0], axis=-1) * 0.1521
if separate_borders_mode is False:
self.all_photon_integrals_nev_abs = np.where(self.all_ntsr < ntsr_border, self.all_photon_integrals_nev_abs, 0)
if separate_borders_mode is True:
self.all_photon_integrals_nev_abs = np.where(self.all_ntsr < self.separate_ntsr, self.all_photon_integrals_nev_abs, 0)
self.all_integrals_nev_abs = np.trapz(self.alldfs_nev_abs, self.allwaves[0], axis=-1) * 0.1521
if separate_borders_mode is False:
self.all_integrals_nev_abs = np.where(self.all_ntsr < ntsr_border, self.all_integrals_nev_abs, 0)
if separate_borders_mode is True:
self.all_integrals_nev_abs = np.where(self.all_ntsr < self.separate_ntsr, self.all_integrals_nev_abs, 0)
self.all_integrals_remaining = self.all_integrals-self.all_integrals_nev_abs
self.all_photon_integrals_remaining = self.all_photon_integrals-self.all_photon_integrals_nev_abs
logging.info('end integrating')
logging.info('start making ratios')
if separate_borders_mode is True:
self.all_ratios = self.make_peak_difference(self.alldfs, ntsr_border=self.separate_ntsr, ntsr=self.all_ntsr)
if separate_borders_mode is False:
self.all_ratios = self.make_peak_difference(self.alldfs, ntsr_border=ntsr_border, ntsr=self.all_ntsr)
logging.info('end making ratios')
if colors is True:
logging.info('start making colors')
lam = np.arange(380., 781., 5) # lambda table for spec_to_xyz
for index, i in np.ndenumerate(np.zeros((len(self.thefiles), self.all_vars[0]['Size'], self.all_vars[0]['Size']))):
self.all_colors[index] = cs.spec_to_rgb(np.interp(lam, self.allwaves[0], self.alldfs[index]))
if separate_borders_mode is True:
self.all_colors = self.all_colors * np.reshape(np.where(self.all_ntsr<self.separate_ntsr, 1, 0),(len(self.thefiles), self.all_vars[0]['Size'], self.all_vars[0]['Size'],1))
if separate_borders_mode is False:
self.all_colors = self.all_colors * np.reshape(np.where(self.all_ntsr<ntsr_border, 1, 0),(len(self.thefiles), self.all_vars[0]['Size'], self.all_vars[0]['Size'],1))
logging.info('end making colors')
if self.chose_dir is True:
logging.info('chose_dir is True')
filename = str(self.thedir/Path(time.strftime("%y%m%d_%H%M%S")+'_'+self.vars['Name'] +'_analysis.log'))
outtxt = 'Pos / cm; Received Power / W; Not Absorbable Power / W\n'
self.all_rec_power = np.zeros(len(self.thefiles))
self.all_rec_photons = np.zeros(len(self.thefiles))
self.all_nev_abs = np.zeros(len(self.thefiles))
rec_sum = 0
nev_abs_sum = 0
for i in range(0, len(self.thefiles)):
logging.info('writing %.3f to %i'%(np.sum(self.all_integrals[i]),i))
self.all_rec_power[i] = np.sum(self.all_integrals[i])
self.all_rec_photons[i] = np.sum(self.all_photon_integrals[i])
if self.saveram is False:
self.all_nev_abs[i] = np.sum(self.all_integrals_nev_abs[i])
outtxt += (str(self.all_vars[i]['z_pos'])+ '; ' + str(np.round(self.all_rec_power[i]*1E-6, 3)) + '; ' + str(np.round(self.all_nev_abs[i]*1E-6, 3))+'\n')
outtxt += ('mean; '+str(np.round(np.sum(self.all_rec_power)/len(self.thefiles)*1E-6, 3))+ '; ' +str(np.round(np.sum(self.all_nev_abs)/len(self.thefiles)*1E-6, 3)))
if analysis is True:
with open(filename, 'w') as f:
f.write(outtxt)
def mak_peak_difference_from_cache(self, dataframe, peak1, peak2, ntsr_border=False, total_width=2):
"""
Can be used if you want to calculate a new peak difference after import.
-------------------
Parameters:
dataframe (array of scans): The array containing the scans, eg. alldfs.
peak1 (integer): Index of the first peak.
peak2 (integer): Index of the second peak.
ntsr_border (number, default is ntsr_border of class): The border to determine no signal.
total_width( number, default = 2): Half width of peak heigt determination.
"""
if ntsr_border is False:
ntsr_border = self.ntsr_border
self.all_ratios = self.make_peak_difference(dataframe, self.all_ntsr, ntsr_border, total_width, [peak1, peak2])
def make_peak_difference(self, dataframe, ntsr, ntsr_border, width=2, peakindices=[274, 526]):
"""Helper method fpr import_raw, return an array of the ratio of the two peaks, 0 when no signal"""
ratio = (np.mean(dataframe[:, :, :, peakindices[0]-width:peakindices[0]+width], axis=-1)/np.mean(dataframe[:, :, :, peakindices[1]-width:peakindices[1]+width], axis=-1))*np.where(ntsr < ntsr_border, 1, 0)
ratio = np.clip(ratio, -10, 10)
real_clips = np.array([-2, 8])
ratio = np.clip(ratio, real_clips[0], real_clips[1])
return ratio
def plot(self, mode='integrals', data=False, comment = '', cbar_title='', bar_to_max=False):
"""
Plots and saves heatmaps of all loaded scans.
-------------------
Parameters:
mode (string, default is 'integrals'):
Defines the plotted data and display mode (matplotlib), combinations of data and method are
automatically recognized. Possible data is:
’integrals’
Heatmap of the received power per pixel.
Can be combined with ’nev_abs’ and ’remaining’.
’ratios’
Heatmap of the peak ratios per pixel.
’custom’
Heatmap of a custom 2D array, given by the parameter data.
’photons’
Heatmap of the received photons per pixel.
Can be combined with ’nev_abs’ and ’remaining’.
’color’
Image in heatmap style, showing the calculated color of the pixels.
!!! Can’t be combined with other output options!!!!
’integrals’ and ’photons’Can be combined with:
’nev_abs’
Not absorbable photons or power.
’remaining’.
Not absorbed photons or power.
Except ’color’ all other options also can be combined with:
’contour_clabel’
Contour heatmap with labels. See Matplotlib documetatation.
’contourf’
Contourf heatmap. See Matplotlib documetatation.
’contour’
Contour heatmap. See Matplotlib documetatation.
data (array, default is False):
Numpy array for mode == 'custom'.
comment (string, default = ''):
Additional string to be displayed in the figure.
cbar_title (string, default = ''):
Title of the colorbar for mode == 'custom'.
bar_to_max (boolean, default is False):
If True, all colorbars are scaled to the absolute maximum of the figure set.
"""
boundsstring = ''
"""Here we set the size of the figure, when smaller than 6 we make two cols. when bogger, we make 3"""
if len(self.thefiles) <= 6:
cols = 2
else:
cols = 3
fig, axes = plt.subplots(ncols=cols,
nrows=math.ceil(len(self.thefiles)/cols),
figsize=(cols*3.4, math.ceil(len(self.thefiles)/cols)*3),
sharex=False,
sharey=False)
if cols < len(self.thefiles):
tar = [0,0]
else:
tar = [0]
rest = 0
"""The title of the whole figure gets the name of the Mesurement series"""
if self.separate_borders_mode is True:
fig.suptitle(self.all_vars[0]['Name']+ ' mode: '+mode+ 'separate_ntsr_int_border: %.3f' % self.ntsr_helper_border +comment, fontsize=9)
else:
fig.suptitle(self.all_vars[0]['Name']+' mode: '+mode+ 'ntsr_border: %.3f' % self.ntsr_border +comment, fontsize=9)
for i in range(0, len(self.thefiles)):
# print('tar ist '+ str(tar))
x, y = np.meshgrid(np.arange(0, (self.all_vars[i]['Size']+1) * 0.39, 0.39),
np.arange(0, (self.all_vars[i]['Size']+1) * 0.39, 0.39))
# z = np.reshape(np.array(all_integrals[i]), (all_vars[i]['Size'], all_vars[i]['Size']))
if 'integrals' in mode:
if 'nev_abs' in mode:
z = self.all_integrals_nev_abs[i]
bounds = [self.all_integrals_nev_abs.min(), self.all_integrals_nev_abs.max()]
axes[tuple(tar)].set_title('z pos: '+str(self.all_vars[i]['z_pos'])+' cm'+'\nnever absorbed power: ' + str(np.round(np.sum(z)*1E-6, 3)) + ' W', fontsize=8, style='italic')
# print('z pos: '+str(self.all_vars[i]['z_pos'])+'\nnever absorbed power: ' + str(np.round(np.sum(z)*1E-6, 3)) + ' W')
elif 'remaining' in mode:
z = self.all_integrals_remaining[i]
bounds = [self.all_integrals_remaining.min(), self.all_integrals_remaining.max()]
axes[tuple(tar)].set_title('z pos: '+str(self.all_vars[i]['z_pos'])+' cm'+'\nremaining power: ' + str(np.round(np.sum(z)*1E-6, 3)) + ' W', fontsize=8, style='italic')
# print('z pos: '+str(self.all_vars[i]['z_pos'])+'\nremaining power: ' + str(np.round(np.sum(z)*1E-6, 3)) + ' W')
else:
z = self.all_integrals[i]
bounds = [self.all_integrals.min(), self.all_integrals.max()]
axes[tuple(tar)].set_title('z pos: '+str(self.all_vars[i]['z_pos'])+' cm'+'\nreceived power: ' + str(np.round(np.sum(self.all_integrals[i])*1E-6, 3)) + ' W', fontsize=8, style='italic')
# print('z pos: '+str(self.all_vars[i]['z_pos'])+'\nreceived power: ' + str(np.round(np.sum(self.all_integrals[i])*1E-6, 3)) + ' W')
elif 'ratios' in mode:
z = self.all_ratios[i]
bounds = [self.all_ratios.min(), self.all_ratios.max()]
axes[tuple(tar)].set_title('z pos: '+str(self.all_vars[i]['z_pos'])+' cm', fontsize=8, style='italic')
elif 'custom' in mode:
z = data[i]
bounds = [data.min(), data.max()]
axes[tuple(tar)].set_title('z pos: '+str(self.all_vars[i]['z_pos'])+' cm', fontsize=8, style='italic')
elif 'photons' in mode:
if 'nev_abs' in mode:
z = self.all_photon_integrals_nev_abs[i]
bounds = [self.all_photon_integrals_nev_abs.min(), self.all_photon_integrals_nev_abs.max()]
axes[tuple(tar)].set_title('z pos: '+str(self.all_vars[i]['z_pos'])+' cm'+'\nnever absorbed photons: ' + str(np.round(np.sum(z)*1e-6, 3)) + ' \u03BCmol s$^{-1}$', fontsize=8, style='italic')
elif 'remaining' in mode:
z = self.all_photon_integrals_remaining[i]
bounds = [self.all_photon_integrals_remaining.min(), self.all_photon_integrals_remaining.max()]
axes[tuple(tar)].set_title('z pos: '+str(self.all_vars[i]['z_pos'])+' cm'+'\nremaining photons: ' + str(np.round(np.sum(z)*1e-6, 3)) + ' \u03BCmol s$^{-1}$', fontsize=8, style='italic')
else:
z = self.all_photon_integrals[i]
bounds = [self.all_photon_integrals.min(), self.all_photon_integrals.max()]
axes[tuple(tar)].set_title('z pos: '+str(self.all_vars[i]['z_pos'])+' cm'+'\nreceived photons: ' + str(np.round(np.sum(z)*1e-6, 3)) + ' \u03BCmol s$^{-1}$', fontsize=8, style='italic')
if 'color' not in mode:
if bar_to_max is False:
bounds = [z.min(), z.max()]
boundsstring = ''
else:
boundsstring = '_boundstomax_'
if 'contour_clabel' in mode:
if i == 0:
logging.info('contour_clabel mode detected')
# https://jakevdp.github.io/PythonDataScienceHandbook/04.04-density-and-contour-plots.html
x_c, y_c = np.meshgrid(np.arange(0, (self.all_vars[i]['Size']) * 0.39, 0.39),
np.arange(0, (self.all_vars[i]['Size']) * 0.39, 0.39))
contour = axes[tuple(tar)].contour(x_c, y_c, z, 5, colors='black')
plt.clabel(contour, inline=True, fontsize=4, fmt='%i')
pcm = axes[tuple(tar)].pcolormesh(x, y, z, antialiased=False, shading='flat'
# , ec='face', alpha=0.5
)
elif 'contourf' in mode:
if i == 0:
logging.info('contourf mode detected')
# https://jakevdp.github.io/PythonDataScienceHandbook/04.04-density-and-contour-plots.html
x_c, y_c = np.meshgrid(np.arange(0, (self.all_vars[i]['Size']) * 0.39, 0.39),
np.arange(0, (self.all_vars[i]['Size']) * 0.39, 0.39))
pcm = axes[tuple(tar)].contourf(x_c, y_c, z, 20, cmap='viridis')
elif 'contour' in mode:
if i == 0:
logging.info('contour mode detected')
# https://jakevdp.github.io/PythonDataScienceHandbook/04.04-density-and-contour-plots.html
x_c, y_c = np.meshgrid(np.arange(0, (self.all_vars[i]['Size']) * 0.39, 0.39),
np.arange(0, (self.all_vars[i]['Size']) * 0.39, 0.39))
pcm = axes[tuple(tar)].contour(x_c, y_c, z, 20, cmap='viridis')
else:
x_c, y_c = np.meshgrid(np.arange(0, (self.all_vars[i]['Size']) * 0.39 + 0.39, 0.39),
np.arange(0, (self.all_vars[i]['Size']) * 0.39 + 0.39, 0.39))
pcm = axes[tuple(tar)].pcolormesh(x_c, y_c, z, antialiased=False, shading='flat',vmin=bounds[0], vmax=bounds[1])
if 'color' in mode:
axes[tuple(tar)].set_title('z pos: '+str(self.all_vars[i]['z_pos'])+' cm', fontsize=8, style='italic')
z = self.all_colors[i]
pcm = axes[tuple(tar)].imshow(z, origin='lower',extent=[x.min(), x.max(), y.min(), y.max()])
yticks = np.linspace(0., 0.39 * (self.all_vars[i]['Size']), 7)
yticks[1:-1] = np.round(np.linspace(0., 0.39 * (self.all_vars[i]['Size']), 7)[1:-1])
xticks = np.linspace(0, 0.39 * (self.all_vars[i]['Size']), 7)
xticks[1:-1] = np.round(np.linspace(0, 0.39 * (self.all_vars[i]['Size']), 7)[1:-1])
axes[tuple(tar)].set_yticks(yticks)
axes[tuple(tar)].set_yticks(xticks)
divider = make_axes_locatable(axes[tuple(tar)])
if 'color' not in mode:
cax = divider.append_axes('right', size='5%', pad=0.05)
cbar = fig.colorbar(pcm, cax=cax)
cbar.ax.tick_params(labelsize=7, pad=0.1)
if 'integrals' in mode:
cbar.ax.set_ylabel("P / \u03BCW", fontsize=8, labelpad=0.2)
elif 'ratios' in mode:
cbar.ax.set_ylabel("peak ratio / 1", fontsize=8, labelpad=0.2)
elif 'custom' in mode:
cbar.ax.set_ylabel(cbar_title, fontsize=8, labelpad=0.2)
elif 'photons' in mode:
cbar.ax.set_ylabel('$q_\mathrm{n,p}$ / nmol s$^{-1}$', fontsize=8, labelpad=0.2)
axes[tuple(tar)].tick_params(axis='x', labelsize=7, pad=2)
axes[tuple(tar)].tick_params(axis='x', labelsize=7, pad=2)
axes[tuple(tar)].tick_params(axis='y',labelsize=7, pad=0.1)
axes[tuple(tar)].set_xlabel("x / cm", fontsize=8, labelpad=0.1)
axes[tuple(tar)].set_ylabel("y / cm", fontsize=8, labelpad=0.1)
axes[tuple(tar)].set_aspect('equal', adjustable='box')
if self.separate_borders_mode is True:
axes[tuple(tar)].text(0.1, y.max()+0.2, 'ntsr border: %.3f'%self.separate_ntsr[i, 0, 0], fontsize=4.5)
if cols < len(self.thefiles):
if tar[1] < cols-1:
tar[1] += 1
elif tar[1] == cols-1:
tar[1] = 0
tar[0] +=1
if i == len(self.thefiles)-1:
if len(self.thefiles) % cols == 0:
rest = 0
else:
rest = (len(self.thefiles)//cols+1)*cols-len(self.thefiles)
if rest != 0:
axes[tuple(tar)].remove()
rest -= 1
while rest != 0:
if tar[1] < cols-1:
tar[1] += 1
elif tar[1] == cols-1:
tar[1] = 0
tar[0] += 1
axes[tuple(tar)].remove()
rest -= 1
else:
tar[0] += 1
if len(self.thefiles) < cols and i == len(self.thefiles)-1:
axes[tuple(tar)].remove()
fig.subplots_adjust(
left=0.125, # the left side of the subplots of the figure
# right = 0.9, # the right side of the subplots of the figure
#bottom = 0.1, # the bottom of the subplots of the figure
top=0.95, # the top of the subplots of the figure
wspace=0.4, # the amount of width reserved for space between subplots,
# expressed as a fraction of the average axis width
hspace=0.1)
if self.thedir == '':
self.thedir = Path(self.dir_path)
fig.savefig(self.thedir/Path(self.all_vars[0]['Name'] +'Colormaps_%s'%mode+ comment+ boundsstring + str(len(self.thefiles)) + '.png')
, dpi=300
, bbox_inches='tight'
)
plt.show()
def plot_single(self, mode='integrals', data=False, info=False, comment = '', cbar_title='', bar_to_max=False):
"""
Plots and saves one specific heatmap.
-------------------
Parameters:
mode (string, default = 'integrals'):
See docstring of method plot.
data (2D numpy array):
The array which should be plotted, should match paramaeter mode.
comment (string, default = ''):
Additional string to be displayed in the figure.
info (dictionary):
The dictionary containing the corresponding information to the given array. E.g. a subarray of all_vars.
cbar_title (string, default = ''):
Title of the colorbar for mode == 'custom'.
bar_to_max (boolean, default is False):
If True, all colorbars are scaled to the absolute maximum of the figure set.
"""
fig, axes = plt.subplots(figsize=(3.4, 3))
"""The title of the whole figure becomes the name of the Mesurement series"""
fig.suptitle(self.all_vars[0]['Name']+' mode: '+mode + 'ntsr_border: '+str(self.ntsr_border)+comment, fontsize=9)
x, y = np.meshgrid(np.arange(0, (info['Size']+1) * 0.39, 0.39),
np.arange(0, (info['Size']+1) * 0.39, 0.39))
if mode == 'integrals':
z = data
axes.set_title('z pos: '+str(info['z_pos'])+' cm'+'\nreceived power: ' + str(np.round(np.sum(z)*1E-6, 3)) + ' W', fontsize=8, style='italic')
elif mode == 'ratios':
z = data
axes.set_title('z pos: '+str(info['z_pos'])+' cm', fontsize=8, style='italic')
elif mode == 'custom':
z = data
axes.set_title('z pos: '+str(info['z_pos'])+' cm', fontsize=8, style='italic')
if mode != 'color':
pcm = axes.pcolormesh(x, y, z, antialiased=False, shading='flat')
if mode == 'color':
z = data
pcm = axes.imshow(z, origin='lower',extent=[x.min(), x.max(), y.min(), y.max()])
yticks = np.linspace(0., 0.39 * (info['Size']), 7)
yticks[1:-1] = np.round(np.linspace(0., 0.39 * (info['Size']), 7)[1:-1])
xticks = np.linspace(0, 0.39 * (info['Size']), 7)
xticks[1:-1] = np.round(np.linspace(0, 0.39 * (info['Size']), 7)[1:-1])
axes.set_yticks(yticks)
axes.set_yticks(xticks)
divider = make_axes_locatable(axes)
if mode != 'color':
bounds = [z.min(), z.max()]
cax = divider.append_axes('right', size='5%', pad=0.05)
if bar_to_max is True:
barticks = np.arange(bounds[0], bounds[1], round((bounds[1]-bounds[0])/6))
cbar = fig.colorbar(pcm, cax=cax, boundaries=bounds, ticks=barticks)
else:
cbar = fig.colorbar(pcm, cax=cax)
cbar.ax.tick_params(labelsize=7, pad=0.1)
if mode == 'integrals':
cbar.ax.set_ylabel("P / \u03BCW", fontsize=8, labelpad=0.2)
elif mode == 'ratios':
cbar.ax.set_ylabel("peak ratio / 1", fontsize=8, labelpad=0.2)
elif mode == 'custom':
cbar.ax.set_ylabel(cbar_title, fontsize=8, labelpad=0.2)
axes.tick_params(axis='x', labelsize=7, pad=2)
axes.tick_params(axis='y',labelsize=7, pad=0.1)
axes.set_xlabel("x / cm", fontsize=8, labelpad=0.1)
axes.set_ylabel("y / cm", fontsize=8, labelpad=0.1)
axes.set_aspect('equal', adjustable='box')
if self.thedir == '':
self.thedir = Path(self.dir_path)
fig.savefig(self.thedir/Path(info['Name'] +'Colormap_single_%sat%05.02f'%(mode,info['z_pos'])+ comment + str(len(self.thefiles)) + '.png')
, dpi=300
, bbox_inches='tight'
)
plt.show()
def to_minimize(self, value, percent, array):
"""The function to be minimized by the cutoff at percent function"""
return abs(np.sum(np.where(array>=value, array, 0)) - np.sum(array)*percent/100)
def cutoff_at_percent(self, percent, array):
"""
Returns an array where the outer pixels are set to 0 to achieve a integral percentage value of each array
measurement. Minimze results are stored in self.cutoff_results.
-------------
Parameters:
percent (number):
The percentage of the array to keep.
array (array):
The array to be treated.
"""
self.cutoff_results = np.array([scipy.optimize.OptimizeResult]*len(array))
cutoff_array = np.zeros(array.shape)
for i in range(0, len(array)):
self.cutoff_results[i] = minimize(self.to_minimize, np.max(array[i])*0.1, tol=1, args=(percent, array[i]), method= 'Powell')
cutoff_array[i] = np.where(array[i] >= self.cutoff_results[i]['x'], array[i], 0)
return cutoff_array
def determine_angle(self, array, center = 'auto', comment=''):
"""
Determines the emission angle depending on the input array. returns a plot and the calculated arrays. Also
calculates the theretical orgin of the lightsource.
---------------
Parameters:
array (array):
The array to be treated.
center (tuple of two integers or 'auto', default is 'auto'):
The center of the light source on the array. If center is ’auto’ , the position of the largest value in
the array is used. Otherwise, pass a tuple of coordinates in pixels.
---------------
Returns:
angles (1D numpy array of 4 numbers):
The opening angles in x and y direction.
"""
if 'auto' in center:
center = np.reshape(np.array(np.where(array[-1] == np.amax(array[-1]))), (2))
logging.info('center found at ' + str(center)+ 'resp.'+ str(center*0.39))
x1 = np.zeros(len(array))
x2 = np.zeros(len(array))
y1 = np.zeros(len(array))
y2 = np.zeros(len(array))
try:
self.all_z[0] = self.all_z[0]
except:
self.all_z = np.zeros(len(self.all_vars))
for num, i in np.ndenumerate(self.all_vars):
self.all_z[num] = self.all_vars[num]['z_pos']
for i in range(0, len(array)):
y1[i] = np.min(np.where(array[i, center[0], :] != 0))
y2[i] = self.all_vars[-1]['Size']-np.min(np.where(np.flip(array[i, center[0], :]) != 0))
x1[i] = np.min(np.where(array[i, :, center[1]] != 0))
x2[i] = self.all_vars[-1]['Size']-np.min(np.where(np.flip(array[i, :, center[1]]) != 0))
self.y1_model =
|
np.polyfit(self.all_z, y1, 1)
|
numpy.polyfit
|
"""
==========================
Author: <NAME>
Year: 2019
==========================
This module contains a competition class to handle a competition between two bots.
"""
import numpy as np
from botbowl.core.table import CasualtyType
from botbowl.core import Game, InvalidActionError
from botbowl.core import load_arena, load_rule_set
class TeamResult:
def __init__(self, game, name, team, winner, crashed):
self.name = name
self.win = winner is not None and winner.name == name
self.draw = winner is None
self.loss = not (self.win or self.draw)
self.tds = team.state.score
self.cas = len(game.get_casualties(team))
self.cas_inflicted = len(game.get_casualties(game.get_opp_team(team)))
self.killed = len([player for player in game.get_casualties(team) if CasualtyType.DEAD in player.state.injuries_gained])
self.kills_inflicted = len([player for player in game.get_casualties(game.get_opp_team(team)) if CasualtyType.DEAD in player.state.injuries_gained])
# Count inflicted casualties and kills from reports
self.crashed_win = crashed and self.win
self.crashed_loss = crashed and not self.win and not self.draw
def print(self):
print("-- {}".format(self.name))
print("Result: {}".format("Win" if self.win else ("Draw" if self.draw else "Loss")))
if self.crashed_win or self.crashed_loss:
print("Game crashed")
print("TDs: {}".format(self.tds))
print("Cas: {}".format(self.cas))
print("Cas inflicted: {}".format(self.cas_inflicted))
print("Killed: {}".format(self.killed))
print("Kills: {}".format(self.kills_inflicted))
class GameResult:
def __init__(self, game, crashed=False):
self.home_agent_name = game.home_agent.name
self.away_agent_name = game.away_agent.name
self.crashed = crashed
if crashed:
self.winner = None
else:
self.winner = game.get_winner()
self.home_result = TeamResult(game, game.home_agent.name, game.state.home_team, self.winner, self.crashed)
self.away_result = TeamResult(game, game.away_agent.name, game.state.away_team, self.winner, self.crashed)
self.draw = self.winner is None
self.tds = self.home_result.tds + self.away_result.tds
self.cas_inflicted = self.home_result.cas_inflicted + self.away_result.cas_inflicted
self.kills = self.home_result.kills_inflicted + self.away_result.kills_inflicted
def print(self):
print("############ GAME RESULTS ###########")
print("Final score:")
print("- {} {} - {} {}".format(self.away_agent_name, self.away_result.tds, self.home_result.tds, self.home_agent_name))
print("Casualties inflicted:")
print("- {} {} - {} {}".format(self.away_agent_name, self.away_result.cas_inflicted, self.home_result.cas_inflicted, self.home_agent_name))
print("Kills inflicted:")
print("- {} {} - {} {}".format(self.away_agent_name, self.away_result.kills_inflicted, self.home_result.kills_inflicted, self.home_agent_name))
print("Result:")
if self.winner is not None:
print(f"- Winner: {self.winner.name}")
elif self.crashed:
print("- Game crashed - no winner")
else:
print("- Draw")
print("#####################################")
class CompetitionResults:
def __init__(self, competitor_a_name, competitor_b_name, game_results):
self.game_results = game_results
self.competitor_a_name = competitor_a_name
self.competitor_b_name = competitor_b_name
self.wins = {
competitor_a_name: np.sum([1 if result.winner is not None and result.winner.name.lower() == competitor_a_name.lower() else 0 for result in game_results]),
competitor_b_name: np.sum([1 if result.winner is not None and result.winner.name.lower() == competitor_b_name.lower() else 0 for result in game_results])
}
self.decided = self.wins[competitor_a_name] + self.wins[competitor_b_name]
self.undecided = len(game_results) - self.decided
self.crashes = len([result for result in game_results if result.crashed])
self.a_crashes = len([result for result in game_results if result.crashed and result.winner is not None and result.winner.name.lower() != self.competitor_a_name.lower()])
self.b_crashes = len([result for result in game_results if result.crashed and result.winner is not None and result.winner.name.lower() != self.competitor_b_name.lower()])
self.tds = {
competitor_a_name: [result.home_result.tds if result.home_agent_name.lower() == competitor_a_name.lower() else result.away_result.tds for result in game_results],
competitor_b_name: [result.home_result.tds if result.home_agent_name.lower() == competitor_b_name.lower() else result.away_result.tds for result in game_results]
}
self.cas_inflicted = {
competitor_a_name: [result.home_result.cas_inflicted if result.home_agent_name.lower() == competitor_a_name.lower() else result.away_result.cas_inflicted for result in game_results],
competitor_b_name: [result.home_result.cas_inflicted if result.home_agent_name.lower() == competitor_b_name.lower() else result.away_result.cas_inflicted for result in game_results]
}
self.kills_inflicted = {
competitor_a_name: [result.home_result.kills_inflicted if result.home_agent_name.lower() == competitor_a_name.lower() else result.away_result.kills_inflicted for result in game_results],
competitor_b_name: [result.home_result.kills_inflicted if result.home_agent_name.lower() == competitor_b_name.lower() else result.away_result.kills_inflicted for result in game_results]
}
def print(self):
print("%%%%%%%%% COMPETITION RESULTS %%%%%%%%%")
if len(self.game_results) == 0:
print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
return
print("Wins:")
print("- {}: {}".format(self.competitor_a_name, self.wins[self.competitor_a_name]))
print("- {}: {}".format(self.competitor_b_name, self.wins[self.competitor_b_name]))
print(f"Draws: {self.undecided}")
print(f"Crashes: {self.crashes}")
if self.crashes > 0:
print(f"- {self.competitor_a_name}: {self.a_crashes}")
print(f"- {self.competitor_b_name}: {self.b_crashes}")
print("TDs:")
print("- {}: {} (avg. {})".format(self.competitor_a_name, np.sum(self.tds[self.competitor_a_name]), np.mean(self.tds[self.competitor_a_name])))
print("- {}: {} (avg. {})".format(self.competitor_b_name,
|
np.sum(self.tds[self.competitor_b_name])
|
numpy.sum
|
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import pytest
import openvino.opset8 as ov
from openvino.impl import Shape, Type
from tests.runtime import get_runtime
from tests.test_ngraph.util import run_op_node
@pytest.mark.parametrize(
"ng_api_fn, numpy_fn, range_start, range_end",
[
(ov.absolute, np.abs, -1, 1),
(ov.abs, np.abs, -1, 1),
(ov.acos, np.arccos, -1, 1),
(ov.acosh, np.arccosh, 1, 2),
(ov.asin, np.arcsin, -1, 1),
(ov.asinh, np.arcsinh, -1, 1),
(ov.atan, np.arctan, -100.0, 100.0),
(ov.atanh, np.arctanh, 0.0, 1.0),
(ov.ceiling, np.ceil, -100.0, 100.0),
(ov.ceil, np.ceil, -100.0, 100.0),
(ov.cos, np.cos, -100.0, 100.0),
(ov.cosh, np.cosh, -100.0, 100.0),
(ov.exp, np.exp, -100.0, 100.0),
(ov.floor, np.floor, -100.0, 100.0),
(ov.log, np.log, 0, 100.0),
(ov.relu, lambda x: np.maximum(0, x), -100.0, 100.0),
(ov.sign, np.sign, -100.0, 100.0),
(ov.sin, np.sin, -100.0, 100.0),
(ov.sinh, np.sinh, -100.0, 100.0),
(ov.sqrt, np.sqrt, 0.0, 100.0),
(ov.tan, np.tan, -1.0, 1.0),
(ov.tanh, np.tanh, -100.0, 100.0),
],
)
def test_unary_op_array(ng_api_fn, numpy_fn, range_start, range_end):
np.random.seed(133391)
input_data = (range_start + np.random.rand(2, 3, 4) * (range_end - range_start)).astype(np.float32)
expected = numpy_fn(input_data)
result = run_op_node([input_data], ng_api_fn)
assert np.allclose(result, expected, rtol=0.001)
@pytest.mark.parametrize(
"ng_api_fn, numpy_fn, input_data",
[
pytest.param(ov.absolute, np.abs, np.float32(-3)),
pytest.param(ov.abs, np.abs, np.float32(-3)),
pytest.param(ov.acos, np.arccos, np.float32(-0.5)),
pytest.param(ov.asin, np.arcsin, np.float32(-0.5)),
pytest.param(ov.atan, np.arctan, np.float32(-0.5)),
pytest.param(ov.ceiling, np.ceil, np.float32(1.5)),
pytest.param(ov.ceil, np.ceil, np.float32(1.5)),
pytest.param(ov.cos, np.cos, np.float32(np.pi / 4.0)),
pytest.param(ov.cosh, np.cosh, np.float32(np.pi / 4.0)),
pytest.param(ov.exp, np.exp, np.float32(1.5)),
pytest.param(ov.floor, np.floor, np.float32(1.5)),
pytest.param(ov.log, np.log, np.float32(1.5)),
pytest.param(ov.relu, lambda x: np.maximum(0, x), np.float32(-0.125)),
pytest.param(ov.sign, np.sign, np.float32(0.0)),
pytest.param(ov.sin, np.sin,
|
np.float32(np.pi / 4.0)
|
numpy.float32
|
"""Code for sampling the household and age structure of a population of n
agents.
"""
import numpy as np
import csv
from pkg_resources import resource_filename
def get_age_distribution(country):
age_distribution=[]
with open(resource_filename(__package__, 'ages/World_Age_2019.csv')) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
if row[0]==country:
for i in range(101):
age_distribution.append(float(row[i+1]))
break
return
|
np.array(age_distribution)
|
numpy.array
|
"""
Code to draw plot for the documentation.
This plots an example of non-convergence in asymmetric replicator dynamics.
The code should match the reference code in the documentation.
"""
import matplotlib.pyplot as plt
import numpy as np
import nashpy as nash
A = np.array([[0, -1, 1], [1, 0, -1], [-1, 1, 0]])
B = A.transpose()
game = nash.Game(A, B)
x0 = np.array([0.3, 0.35, 0.35])
y0 =
|
np.array([0.3, 0.35, 0.35])
|
numpy.array
|
"""
File: test_beams.py
Author: <NAME>
Email: <EMAIL>
Github: https://github.com/<NAME>
Description:
"""
if __name__ == '__main__':
import numpy as np
import matplotlib.pyplot as plt
from skimage.restoration import unwrap_phase
from skimage.exposure import rescale_intensity
from mpl_toolkits.axes_grid1 import ImageGrid
from common.constants import j
from beams.GaussianLaguerreBeams import GaussLaguerreModeSet as GLM
from utils.filters import circ_mask
plt.style.use('mint')
wavelength = 624 * 1e-9
k = 2 * np.pi / wavelength
w0 = 20 * 1e-6
N = 70
L = 100 * 1e-6
x0 =
|
np.linspace(-L/2, L/2, N)
|
numpy.linspace
|
# --- VERSION 0.1.3 updated 20211101 by NTA ---
import pandas as pd
import numpy as np
import os
from pathlib import Path
import matplotlib.pyplot as plt
import seaborn as sns
import time
# ---- INITIALIZE VARIABLES ----
lil_del_dict_eth3 = []
lil_del_dict_eth3_UID = []
rmv_msg = []
rmv_meta_list = []
output_sep = '--------------'
# ---- VARIABLES for read_Nu_data function (faster to define constants outside of functions to avoid looping)----
# Get indices reference and sample side measurements
# ref_b1_idx = np.linspace(0, 40, 21, dtype = int)
# ref_b2_idx = np.linspace(41, 81, 21, dtype = int)
# ref_b3_idx = np.linspace(82, 122, 21, dtype = int)
# ref_idx = np.concatenate([ref_b1_idx, ref_b2_idx, ref_b3_idx])
# ---- ASSIGN PLOT DEFAULTS ----
sns.set_palette("colorblind")
pal = sns.color_palette()
medium_font = 10
plt.rc('axes', labelsize=medium_font, labelweight = 'bold')
plt.rcParams['xtick.direction'] = 'in'
plt.rcParams['ytick.direction'] = 'in'
# ---- READ IN PARAMETERS ('params.xlsx') ----
xls = pd.ExcelFile(Path.cwd().parents[0] / 'params.xlsx')
df_anc = pd.read_excel(xls, 'Anchors', index_col = 'Anchor')
df_const = pd.read_excel(xls, 'Constants', index_col = 'Name')
df_threshold = pd.read_excel(xls, 'Thresholds', index_col = 'Type')
df_rnm = pd.read_excel(xls, 'Rename_by_UID')
long_term_d47_SD = df_threshold['Value'].loc['long_term_SD']
num_SD = df_threshold['Value'].loc['num_SD']
SD_thresh = long_term_d47_SD*num_SD # pulled from parameters file
bad_count_thresh = df_threshold['Value'].loc['bad_count_thresh']
transducer_pressure_thresh = df_threshold['Value'].loc['transducer_pressure_thresh']
balance_high = df_threshold['Value'].loc['balance_high']
balance_low = df_threshold['Value'].loc['balance_low']
calc_a18O = df_const['Value'].loc['calc_a18O']
arag_a18O = df_const['Value'].loc['arag_a18O']
dolo_a18O = df_const['Value'].loc['dolo_a18O']
Nominal_D47 = df_anc.to_dict()['D47'] # Sets anchor values for D47crunch as dictionary {Anchor: value}
# ---- DEFINE FUNCTIONS USED TO CALCULATE D47/T/D18Ow ----
def calc_bern_temp(D47_value):
''' Calculates D47 temp using calibration from Bernasconi et al. (2018) 25C '''
return (((0.0449 * 1000000) / (D47_value - 0.167))**0.5) - 273.15
def calc_MIT_temp(D47_value):
''' Calculates D47 temp using preliminary calibration from Anderson et al. (2020) 90C'''
if D47_value > 0.153: #(prevents complex returns)
return (((0.039 * 1000000) / (D47_value - 0.153))**0.5) - 273.15
else:
return np.nan
def calc_Petersen_temp(D47_value):
'''Calculates D47 temperature (C) using calibration from Petersen et al. (2019) 90C'''
return (((0.0383 * 1000000) / (D47_value - 0.170))**0.5) - 273.15
def make_water_KON97(D47_T):
'''Calculates fluid d18O based on D47 temperature from Kim and O'Neil (1997)'''
thousandlna_KON97 = 18.03 * (1e3 * (1/(D47_T + 273.15))) - 32.42
a_KON97 = np.exp((thousandlna_KON97/1000))
eps_KON97 = (a_KON97-1) * 1e3
return eps_KON97
def make_water_A21(D47_T):
'''Calculates fluid d18O based on D47 temperature from Anderson et al. (2021)'''
thousandlna_A21 = 17.5 * (1e3 * (1/(D47_T + 273.15))) - 29.1
a_A21 = np.exp((thousandlna_A21/1000))
eps_A21 = (a_A21-1) * 1e3
return eps_A21
def thousandlna(mineral):
'''Calculates 18O acid fractination factor to convert CO2 d18O to mineral d18O'''
if mineral == 'calcite' or mineral == 'Calcite':
#a = 1.00871 # Kim (2007)
a = calc_a18O
elif mineral == 'dolomite' or mineral == 'Dolomite':
#a = 1.009926 #Rosenbaum and Sheppard (1986) from Easotope
a = dolo_a18O
elif mineral == 'aragonite' or mineral == 'Aragonite':
#a = 1.0090901 # Kim (2007)
a = arag_a18O
else:
a = calc_a18O
return 1000*np.log(a)
# ---- Define helper functions ----
# import fnmatch
# def find_file(pattern, path): # DOESNT WORK YET -- MIGHT BE MORE EFFICIENT
# result = []
# for files in os.walk(path):
# print(files)
# if isinstance(files, str):
# print(files)
# if fnmatch.fnmatch(files, pattern):
# print('found')
# result.append(os.path.join(root, files))
# return result
# ---- READ AND CORRECT DATA ----
def read_Nu_data(data_file, file_number, current_sample, folder_name, run_type):
'''
PURPOSE: Read in raw voltages from Nu data file (e.g., Data_13553 ETH-1.txt), zero correct, calculate R values, and calculate little deltas
INPUTS: Path to Nu data file (.txt); analysis UID (e.g., 10460); sample name (e.g., ETH-1); and run type ('standard' or 'clumped')
OUTPUT: List of mean d45 to d49 (i.e. little delta) values as Pandas dataframe
'''
bad_count = 0 # Keeps track of bad cycles (cycles > 5 SD from sample mean)
bad_rep_count = 0 # Keeps track of bad replicates
# -- Read in file --
# Deals with different .txt file formats starting at UID 1899, 9628 (Nu software updates)
if file_number > 9628: n_skip = 31
elif file_number < 1899: n_skip = 29
else: n_skip = 30
try:
df = pd.read_fwf(data_file, skiprows = n_skip, header = None) # Read in file, skip n_skip rows, no header
except NameError:
print('Data file not found for UID', file_number)
# -- Clean up data --
df = df.drop(columns = [0]) # removes first column (full of zeros)
df = df.dropna(how = 'any')
df = df.astype('float64') # make sure data is read as floats
df = df[(df.T != 0).any()] # remove all zeroes; https://stackoverflow.com/questions/22649693/drop-rows-with-all-zeros-in-pandas-data-frame
df = df.reset_index(drop = 'True')
# -- Read in blank i.e. zero measurement --
df_zero = df.head(6).astype('float64') # first 6 rows are the "Blank" i.e. zero measurement; used to zero-correct entire replicate
df_zero_mean = (df_zero.apply(np.mean, axis = 1)).round(21) # calculates mean of zero for each mass
df_mean = df.mean(axis = 1) # calculates the mean of each row (i.e., averages each individual measurement to calculate a cycle mean)
# Every 6th entry is a particular mass, starting with mass 49. Starts at 6 to avoid zero measurements.
mass_49_index = np.arange(6, len(df), 6)
mass_48_index = np.arange(7, len(df), 6)
mass_47_index = np.arange(8, len(df), 6)
mass_46_index = np.arange(9, len(df), 6)
mass_45_index = np.arange(10, len(df), 6)
mass_44_index = np.arange(11, len(df), 6)
# -- Calculate R values --
# subtract mass_44 zero measurement from each mass_44 meas
m44 = df_mean[mass_44_index] - df_zero_mean[5]
m44 = m44.dropna()
m44 = m44.reset_index(drop = True)
# For all masses, subtract zero measurement from actual measurement, and then calc 4X/49 ratio.
m49 = df_mean[mass_49_index] - df_zero_mean[0]
m49 = m49.dropna()
m49 = m49.reset_index(drop = True)
m49_44 = m49/m44 # calculate raw 49/44 ratio
m48 = df_mean[mass_48_index] - df_zero_mean[1]
m48 = m48.dropna()
m48 = m48.reset_index(drop = True)
m48_44 = m48/m44
m47 = df_mean[mass_47_index] - df_zero_mean[2]
m47 = m47.dropna()
m47 = m47.reset_index(drop = True)
m47_44 = m47/m44
m46 = df_mean[mass_46_index] - df_zero_mean[3]
m46 = m46.dropna()
m46 = m46.reset_index(drop = True)
m46_44 = m46/m44
m45 = df_mean[mass_45_index] - df_zero_mean[4]
m45 = m45.dropna()
m45 = m45.reset_index(drop = True)
m45_44 = m45/m44
# Create a zero-corrected dataframe of R values
df_zero_corr = pd.DataFrame({'m44':m44, 'm45_44':m45_44,'m46_44':m46_44, 'm47_44':m47_44, 'm48_44':m48_44, 'm49_44':m49_44})
# Calculate little deltas (d4X) by correcting each sample side measurement to bracketing ref side measurements
lil_del = []
# if clumped run, index locations of all sample side cycles are defined at top of script so they are not redefined with every analysis
sam_b1_idx = np.linspace(1, 39, 20, dtype = int)
sam_b2_idx = np.linspace(42, 80, 20, dtype = int)
sam_b3_idx = np.linspace(83, 121, 20, dtype = int)
sam_idx = np.concatenate([sam_b1_idx, sam_b2_idx, sam_b3_idx])
# if standard run, index locations of sample side cycles are different
if run_type == 'standard':
sam_idx = np.linspace(1, 11, 6, dtype = int)
# compare sample measurement to bracketing ref gas measurement
for i in df_zero_corr.columns:
for j in sam_idx: # 'sam_idx' defined near top of script
# df_zero_corr[i][j] is the sample side
# df_zero_corr[i][j-1] is the previous ref side
# df_zero_corr[i][j+1] is the following ref side
lil_del.append(((((df_zero_corr[i][j]/df_zero_corr[i][j-1]) + (df_zero_corr[i][j]/df_zero_corr[i][j+1]))/2.)-1)*1000)
# Define each little delta value by index position
if run_type == 'clumped':
d45 = lil_del[60:120]
d46 = lil_del[120:180]
d47 = lil_del[180:240]
d48 = lil_del[240:300]
d49 = lil_del[300:360]
elif run_type == 'standard':
d45 = lil_del[6:12]
d46 = lil_del[12:18]
d47 = lil_del[18:24]
d48 = lil_del[24:30]
d49 = lil_del[30:36]
lil_del_dict = {'d45':d45, 'd46':d46,'d47':d47, 'd48':d48, 'd49':d49}
df_lil_del = pd.DataFrame(lil_del_dict) # export to dataframe -- makes it easier for next function to handle
if 'ETH' in current_sample and '3' in current_sample: # this bit is to provide raw data for joyplots/etc.
lil_del_dict_eth3.extend(d47)
# for i in range(len(lil_del_dict_eth3)):
lil_del_dict_eth3_UID.append(file_number)
batch_data_list = [file_number, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]
# Calculate median of all cycles.
median_47 = df_lil_del['d47'].median()
d47_pre_SE = df_lil_del['d47'].sem()
# -- FIND BAD CYCLES --
# Removes any cycles that have d47 values > 5 SD away from sample median. If more than 'bad_count_thresh' cycles violate this criteria, entire replicate is removed.
if run_type == 'clumped':
for i in range(len(df_lil_del['d47'])):
# If d47 is outside threshold, remove little deltas of ALL masses for that cycle (implemented 20210819)
if (df_lil_del['d47'].iloc[i]) > ((median_47) + (SD_thresh)) or ((df_lil_del['d47'].iloc[i]) < ((median_47) - (SD_thresh))):
df_lil_del['d45'].iloc[i] = np.nan # 'Disables' cycle; sets value to nan
df_lil_del['d46'].iloc[i] = np.nan
df_lil_del['d47'].iloc[i] = np.nan
df_lil_del['d48'].iloc[i] = np.nan
df_lil_del['d49'].iloc[i] = np.nan
bad_count += 1
session = str(folder_name[:8]) # creates name of session; first 8 characters of folder name are date of run start per our naming convention (e.g., 20211008 clumped apatite NTA = 20211008)
d47_post_SE = df_lil_del['d47'].sem()
rmv_analyses = [] # analysis to be removed
this_path = Path.cwd() / 'raw_data' / folder_name
# -- Find bad replicates --
# This goes through batch summary data and checks values against thresholds from params.xlsx
for i in os.listdir(this_path):
if 'Batch Results.csv' in i and 'fail' not in os.listdir(this_path): # checks for and reads results summary file
summ_file = Path.cwd() / 'raw_data' / folder_name / i # i = e.g., 20210505 clumped dolomite apatite calibration 5 NTA Batch Results.csv
df_results_summ = pd.read_csv(summ_file, encoding = 'latin1', skiprows = 3, header = [0,1])
df_results_summ.columns = df_results_summ.columns.map('_'.join).str.strip() # fixes weird headers of Nu Summary files
#Get the index location of the row that corresponds to the given file number (i.e. replicate)
curr_row = df_results_summ.loc[df_results_summ['Data_File'].str.contains(str(file_number))].index
batch_data_list = [file_number, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, d47_pre_SE, d47_post_SE, bad_count]
if len(curr_row) == 1 and run_type == 'clumped': # curr_row is Int64Index, which acts like a list. If prev line finds either 0 or 2 matching lines, it will skip this section.
transduc_press = float(df_results_summ['Transducer_Pressure'][curr_row])
samp_weight = float(df_results_summ['Sample_Weight'][curr_row])
NuCarb_temp = float(df_results_summ['Ave_Temperature'][curr_row])
pumpover = float(df_results_summ['MaxPumpOverPressure_'][curr_row])
init_beam = float(df_results_summ['Initial_Sam Beam'][curr_row])
balance = float(df_results_summ['Balance_%'][curr_row])
vial_loc = float(df_results_summ['Vial_Location'][curr_row])
d13C_SE = float(df_results_summ['Std_Err.5'][curr_row])
d18O_SE = float(df_results_summ['Std_Err.6'][curr_row])
D47_SE = float(df_results_summ['Std_Err.7'][curr_row])
batch_data_list = [file_number, transduc_press, samp_weight, NuCarb_temp, pumpover, init_beam, balance, vial_loc, d13C_SE, d18O_SE, D47_SE, d47_pre_SE, d47_post_SE, bad_count]
# Remove any replicates that fail thresholds, compile a message that will be written to the terminal
if transduc_press < transducer_pressure_thresh:
rmv_analyses.append(file_number)
rmv_msg.append((str(rmv_analyses[0]) + ' failed transducer pressure requirements (transducer_pressure = ' + str(round(transduc_press,1)) + ')' ))
if balance > balance_high or balance < balance_low:
rmv_analyses.append(file_number)
rmv_msg.append((str(rmv_analyses[0]) + ' failed balance requirements (balance = ' + str(round(balance,1)) + ')'))
if bad_count > bad_count_thresh:
rmv_analyses.append(file_number)
rmv_msg.append((str(rmv_analyses[0]) + ' failed cycle-level reproducibility requirements (bad cycles = ' + str(bad_count) + ')'))
break # Found a matching file? There only should be one, so stop here.
else: # Couldn't find matching UID, or got confused. No batch summary data included.
batch_data_list = [file_number, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, d47_pre_SE, d47_post_SE, bad_count]
# If replicate doesn't fail any thresholds, calculate the mean lil delta and return as a list
if bad_count < bad_count_thresh and file_number not in rmv_analyses:
d45_avg = format(df_lil_del['d45'].mean(), 'f')
d46_avg = format(df_lil_del['d46'].mean(), 'f')
d47_avg = format(df_lil_del['d47'].mean(), 'f')
d48_avg = format(df_lil_del['d48'].mean(), 'f')
d49_avg = format(df_lil_del['d49'].mean(), 'f')
data_list = [file_number, session, current_sample, d45_avg, d46_avg, d47_avg, d48_avg, d49_avg]
return data_list, batch_data_list
# If replicate fails any threshold, return list with nans for little deltas and add in metadata
else:
data_list = [file_number, session, current_sample, np.nan, np.nan, np.nan, np.nan, np.nan]
batch_data_list.append(current_sample)
rmv_meta_list.append(batch_data_list)
return None, None
def fix_names(df):
'''
PURPOSE: Changes names of standards and samples to uniform entries based on conversion spreadsheet
INPUT: Pandas Dataframe of little deltas, names_to_change tab in params.csv
OUTPUT: Fully corrected, accurately named little deltas (raw_deltas.csv)'''
df['Sample'] = df['Sample'].str.strip() # strip whitespace
df_new = pd.read_excel(xls, 'Names_to_change')
# rename based on name (names_to_change; i.e. EHT-1 --> ETH-1)
for i in range(len(df_new)):
df['Sample']=df['Sample'].str.replace(df_new['old_name'][i], df_new['new_name'][i])
# rename samples based on UID (Rename_by_UID; i.e. whatever the name of 10155 is, change to 'ETH-1')
if len(df_rnm) > 0: # check if there's anything to rename
for i in range(len(df_rnm)):
rnm_loc =
|
np.where(df['UID'] == df_rnm['UID'][i])
|
numpy.where
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""image"""
import numbers
import numpy as np
import mindspore.common.dtype as mstype
from mindspore.common.tensor import Tensor
from mindspore.ops import operations as P
from mindspore.ops import functional as F
from mindspore.ops.primitive import constexpr
from mindspore._checkparam import Rel, Validator as validator
from .conv import Conv2d
from .container import CellList
from .pooling import AvgPool2d
from .activation import ReLU
from ..cell import Cell
__all__ = ['ImageGradients', 'SSIM', 'MSSSIM', 'PSNR', 'CentralCrop']
class ImageGradients(Cell):
r"""
Returns two tensors, the first is along the height dimension and the second is along the width dimension.
Assume an image shape is :math:`h*w`. The gradients along the height and the width are :math:`dy` and :math:`dx`,
respectively.
.. math::
dy[i] = \begin{cases} image[i+1, :]-image[i, :], &if\ 0<=i<h-1 \cr
0, &if\ i==h-1\end{cases}
dx[i] = \begin{cases} image[:, i+1]-image[:, i], &if\ 0<=i<w-1 \cr
0, &if\ i==w-1\end{cases}
Inputs:
- **images** (Tensor) - The input image data, with format 'NCHW'.
Outputs:
- **dy** (Tensor) - vertical image gradients, the same type and shape as input.
- **dx** (Tensor) - horizontal image gradients, the same type and shape as input.
Examples:
>>> net = nn.ImageGradients()
>>> image = Tensor(np.array([[[[1,2],[3,4]]]]), dtype=mstype.int32)
>>> net(image)
[[[[2,2]
[0,0]]]]
[[[[1,0]
[1,0]]]]
"""
def __init__(self):
super(ImageGradients, self).__init__()
def construct(self, images):
check = _check_input_4d(F.shape(images), "images", self.cls_name)
images = F.depend(images, check)
batch_size, depth, height, width = P.Shape()(images)
if height == 1:
dy = P.Fill()(P.DType()(images), (batch_size, depth, 1, width), 0)
else:
dy = images[:, :, 1:, :] - images[:, :, :height - 1, :]
dy_last = P.Fill()(P.DType()(images), (batch_size, depth, 1, width), 0)
dy = P.Concat(2)((dy, dy_last))
if width == 1:
dx = P.Fill()(P.DType()(images), (batch_size, depth, height, 1), 0)
else:
dx = images[:, :, :, 1:] - images[:, :, :, :width - 1]
dx_last = P.Fill()(P.DType()(images), (batch_size, depth, height, 1), 0)
dx = P.Concat(3)((dx, dx_last))
return dy, dx
def _convert_img_dtype_to_float32(img, max_val):
"""convert img dtype to float32"""
# Ususally max_val is 1.0 or 255, we will do the scaling if max_val > 1.
# We will scale img pixel value if max_val > 1. and just cast otherwise.
ret = F.cast(img, mstype.float32)
max_val = F.scalar_cast(max_val, mstype.float32)
if max_val > 1.:
scale = 1. / max_val
ret = ret * scale
return ret
@constexpr
def _get_dtype_max(dtype):
"""get max of the dtype"""
np_type = mstype.dtype_to_nptype(dtype)
if issubclass(np_type, numbers.Integral):
dtype_max = np.float64(np.iinfo(np_type).max)
else:
dtype_max = 1.0
return dtype_max
@constexpr
def _check_input_4d(input_shape, param_name, func_name):
if len(input_shape) != 4:
raise ValueError(f"{func_name} {param_name} should be 4d, but got shape {input_shape}")
return True
@constexpr
def _check_input_filter_size(input_shape, param_name, filter_size, func_name):
_check_input_4d(input_shape, param_name, func_name)
validator.check(param_name + " shape[2]", input_shape[2], "filter_size", filter_size, Rel.GE, func_name)
validator.check(param_name + " shape[3]", input_shape[3], "filter_size", filter_size, Rel.GE, func_name)
@constexpr
def _check_input_dtype(input_dtype, param_name, allow_dtypes, cls_name):
validator.check_type_name(param_name, input_dtype, allow_dtypes, cls_name)
def _conv2d(in_channels, out_channels, kernel_size, weight, stride=1, padding=0):
return Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride,
weight_init=weight, padding=padding, pad_mode="valid")
def _create_window(size, sigma):
x_data, y_data = np.mgrid[-size // 2 + 1:size // 2 + 1, -size // 2 + 1:size // 2 + 1]
x_data = np.expand_dims(x_data, axis=-1).astype(np.float32)
x_data = np.expand_dims(x_data, axis=-1) ** 2
y_data = np.expand_dims(y_data, axis=-1).astype(np.float32)
y_data =
|
np.expand_dims(y_data, axis=-1)
|
numpy.expand_dims
|
from __future__ import division
import os
import numpy as np
import cv2
from libs import utils
from torch.utils.data import Dataset
import json
from PIL import Image
class DAVIS2017(Dataset):
"""DAVIS 2017 dataset constructed using the PyTorch built-in functionalities"""
def __init__(self,
split='val',
root='',
num_frames=None,
custom_frames=None,
transform=None,
retname=False,
seq_name=None,
obj_id=None,
gt_only_first_frame=False,
no_gt=False,
batch_gt=False,
rgb=False,
effective_batch=None,
prev_round_masks = None,#f,h,w
):
"""Loads image to label pairs for tool pose estimation
split: Split or list of splits of the dataset
root: dataset directory with subfolders "JPEGImages" and "Annotations"
num_frames: Select number of frames of the sequence (None for all frames)
custom_frames: List or Tuple with the number of the frames to include
transform: Data transformations
retname: Retrieve meta data in the sample key 'meta'
seq_name: Use a specific sequence
obj_id: Use a specific object of a sequence (If None and sequence is specified, the batch_gt is True)
gt_only_first_frame: Provide the GT only in the first frame
no_gt: No GT is provided
batch_gt: For every frame sequence batch all the different objects gt
rgb: Use RGB channel order in the image
"""
if isinstance(split, str):
self.split = [split]
else:
split.sort()
self.split = split
self.db_root_dir = root
self.transform = transform
self.seq_name = seq_name
self.obj_id = obj_id
self.num_frames = num_frames
self.custom_frames = custom_frames
self.retname = retname
self.rgb = rgb
if seq_name is not None and obj_id is None:
batch_gt = True
self.batch_gt = batch_gt
self.all_seqs_list = []
self.seqs = []
for splt in self.split:
with open(os.path.join(self.db_root_dir, 'ImageSets', '2017', splt + '.txt')) as f:
seqs_tmp = f.readlines()
seqs_tmp = list(map(lambda elem: elem.strip(), seqs_tmp))
self.seqs.extend(seqs_tmp)
self.seq_list_file = os.path.join(self.db_root_dir, 'ImageSets', '2017',
'_'.join(self.split) + '_instances.txt')
# Precompute the dictionary with the objects per sequence
if not self._check_preprocess():
self._preprocess()
if self.seq_name is None:
img_list = []
labels = []
prevmask_list= []
for seq in self.seqs:
images = np.sort(os.listdir(os.path.join(self.db_root_dir, 'JPEGImages/480p/', seq.strip())))
images_path = list(map(lambda x: os.path.join('JPEGImages/480p/', seq.strip(), x), images))
lab = np.sort(os.listdir(os.path.join(self.db_root_dir, 'Annotations/480p/', seq.strip())))
lab_path = list(map(lambda x: os.path.join('Annotations/480p/', seq.strip(), x), lab))
if num_frames is not None:
seq_len = len(images_path)
num_frames = min(num_frames, seq_len)
frame_vector = np.arange(num_frames)
frames_ids = list(np.round(frame_vector*seq_len/float(num_frames)).astype(np.int))
frames_ids[-1] = min(frames_ids[-1], seq_len)
images_path = [images_path[x] for x in frames_ids]
if no_gt:
lab_path = [None] * len(images_path)
else:
lab_path = [lab_path[x] for x in frames_ids]
elif isinstance(custom_frames, tuple) or isinstance(custom_frames, list):
assert min(custom_frames) >= 0 and max(custom_frames) <= len(images_path)
images_path = [images_path[x] for x in custom_frames]
prevmask_list = [prev_round_masks[x] for x in custom_frames]
if no_gt:
lab_path = [None] * len(images_path)
else:
lab_path = [lab_path[x] for x in custom_frames]
if gt_only_first_frame:
lab_path = [lab_path[0]]
lab_path.extend([None] * (len(images_path) - 1))
elif no_gt:
lab_path = [None] * len(images_path)
if self.batch_gt:
obj = self.seq_dict[seq]
if -1 in obj:
obj.remove(-1)
for ii in range(len(img_list), len(images_path)+len(img_list)):
self.all_seqs_list.append([obj, ii])
else:
for obj in self.seq_dict[seq]:
if obj != -1:
for ii in range(len(img_list), len(images_path)+len(img_list)):
self.all_seqs_list.append([obj, ii])
img_list.extend(images_path)
labels.extend(lab_path)
else:
# Initialize the per sequence images for online training
assert self.seq_name in self.seq_dict.keys(), '{} not in {} set.'.format(self.seq_name, '_'.join(self.split))
names_img = np.sort(os.listdir(os.path.join(self.db_root_dir, 'JPEGImages/480p/', str(seq_name))))
img_list = list(map(lambda x: os.path.join('JPEGImages/480p/', str(seq_name), x), names_img))
name_label = np.sort(os.listdir(os.path.join(self.db_root_dir, 'Annotations/480p/', str(seq_name))))
labels = list(map(lambda x: os.path.join('Annotations/480p/', str(seq_name), x), name_label))
prevmask_list = []
if num_frames is not None:
seq_len = len(img_list)
num_frames = min(num_frames, seq_len)
frame_vector = np.arange(num_frames)
frames_ids = list(np.round(frame_vector * seq_len / float(num_frames)).astype(np.int))
frames_ids[-1] = min(frames_ids[-1], seq_len)
img_list = [img_list[x] for x in frames_ids]
if no_gt:
labels = [None] * len(img_list)
else:
labels = [labels[x] for x in frames_ids]
elif isinstance(custom_frames, tuple) or isinstance(custom_frames, list):
assert min(custom_frames) >= 0 and max(custom_frames) <= len(img_list)
img_list = [img_list[x] for x in custom_frames]
prevmask_list = [prev_round_masks[x] for x in custom_frames]
if no_gt:
labels = [None] * len(img_list)
else:
labels = [labels[x] for x in custom_frames]
if gt_only_first_frame:
labels = [labels[0]]
labels.extend([None]*(len(img_list)-1))
elif no_gt:
labels = [None] * len(img_list)
if obj_id is not None:
assert obj_id in self.seq_dict[self.seq_name], \
"{} doesn't have this object id {}.".format(self.seq_name, str(obj_id))
if self.batch_gt:
self.obj_id = self.seq_dict[self.seq_name]
if -1 in self.obj_id:
self.obj_id.remove(-1)
self.obj_id = [0]+self.obj_id
assert (len(labels) == len(img_list))
if effective_batch:
self.img_list = img_list * effective_batch
self.labels = labels * effective_batch
else:
self.img_list = img_list
self.labels = labels
self.prevmasks_list = prevmask_list
# print('Done initializing DAVIS2017 '+'_'.join(self.split)+' Dataset')
# print('Number of images: {}'.format(len(self.img_list)))
# if self.seq_name is None:
# print('Number of elements {}'.format(len(self.all_seqs_list)))
def _check_preprocess(self):
_seq_list_file = self.seq_list_file
if not os.path.isfile(_seq_list_file):
return False
else:
self.seq_dict = json.load(open(self.seq_list_file, 'r'))
return True
def _preprocess(self):
self.seq_dict = {}
for seq in self.seqs:
# Read object masks and get number of objects
name_label = np.sort(os.listdir(os.path.join(self.db_root_dir, 'Annotations/480p/', seq)))
label_path = os.path.join(self.db_root_dir, 'Annotations/480p/', seq, name_label[0])
_mask = np.array(Image.open(label_path))
_mask_ids = np.unique(_mask)
n_obj = _mask_ids[-1]
self.seq_dict[seq] = list(range(1, n_obj+1))
with open(self.seq_list_file, 'w') as outfile:
outfile.write('{{\n\t"{:s}": {:s}'.format(self.seqs[0], json.dumps(self.seq_dict[self.seqs[0]])))
for ii in range(1, len(self.seqs)):
outfile.write(',\n\t"{:s}": {:s}'.format(self.seqs[ii], json.dumps(self.seq_dict[self.seqs[ii]])))
outfile.write('\n}\n')
print('Preprocessing finished')
def __len__(self):
if self.seq_name is None:
return len(self.all_seqs_list)
else:
return len(self.img_list)
def __getitem__(self, idx):
# print(idx)
img, gt, prev_round_mask = self.make_img_gt_mask_pair(idx)
pad_img, pad_info = utils.apply_pad(img)
pad_gt= utils.apply_pad(gt, padinfo = pad_info)#h,w,n
sample = {'image': pad_img, 'gt': pad_gt}
if self.retname:
if self.seq_name is None:
obj_id = self.all_seqs_list[idx][0]
img_path = self.img_list[self.all_seqs_list[idx][1]]
else:
obj_id = self.obj_id
img_path = self.img_list[idx]
seq_name = img_path.split('/')[-2]
frame_id = img_path.split('/')[-1].split('.')[-2]
sample['meta'] = {'seq_name': seq_name,
'frame_id': frame_id,
'obj_id': obj_id,
'im_size': (img.shape[0], img.shape[1]),
'pad_size': (pad_img.shape[0], pad_img.shape[1]),
'pad_info': pad_info}
if self.transform is not None:
sample = self.transform(sample)
return sample
def make_img_gt_mask_pair(self, idx):
"""
Make the image-ground-truth pair
"""
prev_round_mask_tmp = self.prevmasks_list[idx]
if self.seq_name is None:
obj_id = self.all_seqs_list[idx][0]
img_path = self.img_list[self.all_seqs_list[idx][1]]
label_path = self.labels[self.all_seqs_list[idx][1]]
else:
obj_id = self.obj_id
img_path = self.img_list[idx]
label_path = self.labels[idx]
seq_name = img_path.split('/')[-2]
n_obj = 1 if isinstance(obj_id, int) else len(obj_id)
img = cv2.imread(os.path.join(self.db_root_dir, img_path))
img = np.array(img, dtype=np.float32)
if self.rgb:
img = img[:, :, [2, 1, 0]]
if label_path is not None:
label = Image.open(os.path.join(self.db_root_dir, label_path))
else:
if self.batch_gt:
gt = np.zeros(
|
np.append(img.shape[:-1], n_obj)
|
numpy.append
|
import pytest
import numpy as np
from numpy.testing import assert_allclose
import emg3d
from emg3d import maps
from . import alternatives
# Import soft dependencies.
try:
import discretize
except ImportError:
discretize = None
# MAPS
class TestMaps:
mesh = emg3d.TensorMesh([[1, 1], [1, 1], [1]], (0, 0, 0))
values = np.array([0.01, 10, 3, 4])
def test_new(self):
class MapNew(maps.BaseMap):
def __init__(self):
super().__init__(description='my new map')
testmap = MapNew()
assert "MapNew: my new map" in testmap.__repr__()
with pytest.raises(NotImplementedError, match='Forward map not imple'):
testmap.forward(1)
with pytest.raises(NotImplementedError, match='Backward map not impl'):
testmap.backward(1)
with pytest.raises(NotImplementedError, match='Derivative chain not '):
testmap.derivative_chain(1, 1)
def test_conductivity(self):
model = emg3d.Model(self.mesh, self.values, mapping='Conductivity')
# Forward
forward = model.map.forward(self.values)
assert_allclose(forward, self.values)
# Backward
backward = model.map.backward(forward)
assert_allclose(backward, self.values)
# Derivative
gradient = 2*np.ones(model.property_x.shape)
derivative = gradient.copy()
model.map.derivative_chain(gradient, model.property_x)
assert_allclose(derivative, derivative)
def test_lgconductivity(self):
model = emg3d.Model(self.mesh, np.log10(self.values),
mapping='LgConductivity')
# Forward
forward = model.map.forward(self.values)
assert_allclose(forward, np.log10(self.values))
# Backward
backward = model.map.backward(forward)
assert_allclose(backward, self.values)
# Derivative
gradient = 2*np.ones(model.property_x.shape)
derivative = gradient.copy()
model.map.derivative_chain(gradient, model.property_x)
assert_allclose(gradient, derivative*10**model.property_x*np.log(10))
def test_lnconductivity(self):
model = emg3d.Model(self.mesh, np.log(self.values),
mapping='LnConductivity')
# Forward
forward = model.map.forward(self.values)
assert_allclose(forward, np.log(self.values))
# Backward
backward = model.map.backward(forward)
assert_allclose(backward, self.values)
# Derivative
gradient = 2*np.ones(model.property_x.shape)
derivative = gradient.copy()
model.map.derivative_chain(gradient, model.property_x)
assert_allclose(gradient, derivative*np.exp(model.property_x))
def test_resistivity(self):
model = emg3d.Model(self.mesh, 1/self.values, mapping='Resistivity')
# Forward
forward = model.map.forward(self.values)
assert_allclose(forward, 1/self.values)
# Backward
backward = model.map.backward(forward)
assert_allclose(backward, self.values)
# Derivative
gradient = 2*np.ones(model.property_x.shape)
derivative = gradient.copy()
model.map.derivative_chain(gradient, model.property_x)
assert_allclose(gradient, -derivative*(1/model.property_x)**2)
def test_lgresistivity(self):
model = emg3d.Model(self.mesh, np.log10(1/self.values),
mapping='LgResistivity')
# Forward
forward = model.map.forward(self.values)
assert_allclose(forward, np.log10(1/self.values))
# Backward
backward = model.map.backward(forward)
assert_allclose(backward, self.values)
# Derivative
gradient = 2*np.ones(model.property_x.shape)
derivative = gradient.copy()
model.map.derivative_chain(gradient, model.property_x)
assert_allclose(gradient, -derivative*10**-model.property_x*np.log(10))
def test_lnresistivity(self):
model = emg3d.Model(self.mesh, np.log(self.values),
mapping='LnResistivity')
# Forward
forward = model.map.forward(self.values)
assert_allclose(forward, np.log(1/self.values))
# Backward
backward = model.map.backward(forward)
assert_allclose(backward, self.values)
# Derivative
gradient = 2*np.ones(model.property_x.shape)
derivative = gradient.copy()
model.map.derivative_chain(gradient, model.property_x)
assert_allclose(gradient, -derivative*np.exp(-model.property_x))
# INTERPOLATIONS
class TestInterpolate:
# emg3d.interpolate is only a dispatcher function, calling other
# interpolation routines; there are lots of small dummy tests here, but in
# the end it is not up to emg3d.interpolate to check the accuracy of the
# actual interpolation; the only relevance is to check if it calls the
# right function.
def test_linear(self):
igrid = emg3d.TensorMesh(
[np.array([1, 1]), np.array([1, 1, 1]), np.array([1, 1, 1])],
[0, -1, -1])
ogrid = emg3d.TensorMesh(
[np.array([1]), np.array([1]), np.array([1])],
[0.5, 0, 0])
values = np.r_[9*[1.0, ], 9*[2.0, ]].reshape(igrid.shape_cells)
# Simple, linear example.
out = maps.interpolate(
grid=igrid, values=values, xi=ogrid, method='linear')
assert_allclose(out[0, 0, 0], 1.5)
# Provide ogrid.gridCC.
ogrid._gridCC = np.array([[0.5, 0.5, 0.5]])
out2 = maps.interpolate(igrid, values, ogrid, 'linear')
assert_allclose(out2[0, 0, 0], 1.5)
def test_linear_cubic(self):
# Check 'linear' and 'cubic' yield almost the same result for a well
# determined, very smoothly changing example.
# Fine grid.
fgrid = emg3d.TensorMesh(
[np.ones(2**6)*10, np.ones(2**5)*100, np.ones(2**4)*1000],
origin=np.array([-320., -1600, -8000]))
# Smoothly changing model for fine grid.
cmodel = np.arange(1, fgrid.n_cells+1).reshape(
fgrid.shape_cells, order='F')
# Coarser grid.
cgrid = emg3d.TensorMesh(
[np.ones(2**5)*15, np.ones(2**4)*150, np.ones(2**3)*1500],
origin=np.array([-240., -1200, -6000]))
# Interpolate linearly and cubic spline.
lin_model = maps.interpolate(fgrid, cmodel, cgrid, 'linear')
cub_model = maps.interpolate(fgrid, cmodel, cgrid, 'cubic')
# Compare
assert np.max(np.abs((lin_model-cub_model)/lin_model*100)) < 1.0
def test_nearest(self):
# Assert it is 'nearest' or extrapolate if points are outside.
tgrid = emg3d.TensorMesh(
[np.array([1, 1, 1, 1]), np.array([1, 1, 1, 1]),
np.array([1, 1, 1, 1])], origin=np.array([0., 0, 0]))
tmodel = np.ones(tgrid.n_cells).reshape(tgrid.shape_cells, order='F')
tmodel[:, 0, :] = 2
t2grid = emg3d.TensorMesh(
[np.array([1]), np.array([1]), np.array([1])],
origin=np.array([2, -1, 2]))
# Nearest with cubic.
out = maps.interpolate(tgrid, tmodel, t2grid, 'cubic')
assert_allclose(out, 2.)
# Same, but with log.
vlog = maps.interpolate(tgrid, tmodel, t2grid, 'cubic', log=True)
vlin = maps.interpolate(tgrid, np.log10(tmodel), t2grid, 'cubic')
assert_allclose(vlog, 10**vlin)
# Extrapolate with linear.
out = maps.interpolate(tgrid, tmodel, t2grid, 'linear')
assert_allclose(out, 3.)
# Same, but with log.
vlog = maps.interpolate(tgrid, tmodel, t2grid, 'linear', log=True)
vlin = maps.interpolate(tgrid, np.log10(tmodel), t2grid, 'linear')
assert_allclose(vlog, 10**vlin)
# Assert it is 0 if points are outside.
out = maps.interpolate(tgrid, tmodel, t2grid, 'cubic', False)
assert_allclose(out, 0.)
out = maps.interpolate(tgrid, tmodel, t2grid, 'linear', False)
assert_allclose(out, 0.)
def test_volume(self):
# == X == Simple 1D model
grid_in = emg3d.TensorMesh(
[np.ones(5)*10, np.array([1, ]), np.array([1, ])],
origin=np.array([0, 0, 0]))
grid_out = emg3d.TensorMesh(
[[10, 25, 10, 5, 2], [1, ], [1, ]], origin=(-5, 0, 0))
values_in = np.array([1., 5., 3, 7, 2])[:, None, None]
values_out = maps.interpolate(grid_in, values_in, grid_out, 'volume')
# Result 2nd cell: (5*1+10*5+10*3)/25=3.4
assert_allclose(values_out[:, 0, 0], np.array([1, 3.4, 7, 2, 2]))
# Check log:
vlogparam = maps.interpolate(
grid_in, values_in, grid_out, 'volume', log=True)
vlinloginp = maps.interpolate(
grid_in, np.log10(values_in), grid_out, 'volume')
assert_allclose(vlogparam, 10**vlinloginp)
# == Y == Reverse it
grid_out = emg3d.TensorMesh(
[np.array([1, ]), np.ones(5)*10, np.array([1, ])],
origin=np.array([0, 0, 0]))
grid_in = emg3d.TensorMesh(
[[1, ], [10, 25, 10, 5, 2], [1, ]], origin=[0, -5, 0])
values_in = np.array([1, 3.4, 7, 2, 2])[None, :, None]
values_out = maps.interpolate(grid_in, values_in, grid_out, 'volume')
# Result 1st cell: (5*1+5*3.4)/10=2.2
assert_allclose(values_out[0, :, 0], np.array([2.2, 3.4, 3.4, 7, 2]))
# == Z == Another 1D test
grid_in = emg3d.TensorMesh(
[np.array([1, ]), np.array([1, ]), np.ones(9)*10],
origin=np.array([0, 0, 0]))
grid_out = emg3d.TensorMesh(
[np.array([1, ]), np.array([1, ]), np.array([20, 41, 9, 30])],
origin=np.array([0, 0, 0]))
values_in = np.arange(1., 10)[None, None, :]
values_out = maps.interpolate(grid_in, values_in, grid_out, 'volume')
assert_allclose(values_out[0, 0, :],
np.array([1.5, 187/41, 7, 260/30]))
# == 3D ==
grid_in = emg3d.TensorMesh(
[[1, 1, 1], [10, 10, 10, 10, 10], [10, 2, 10]],
origin=(0, 0, 0))
grid_out = emg3d.TensorMesh(
[[1, 2, ], [10, 25, 10, 5, 2], [4, 4, 4]], origin=[0, -5, 6])
create = np.array([[1, 2., 1]])
create = np.array([create, 2*create, create])
values_in = create*np.array([1., 5., 3, 7, 2])[None, :, None]
values_out = maps.interpolate(grid_in, values_in, grid_out, 'volume')
check = np.array([[1, 1.5, 1], [1.5, 2.25, 1.5]])[:, None, :]
check = check*np.array([1, 3.4, 7, 2, 2])[None, :, None]
assert_allclose(values_out, check)
# == If the extent is the same, volume*values must remain constant. ==
grid_in = emg3d.TensorMesh(
[np.array([1, 1, 1]), np.array([10, 10, 10, 10, 10]),
np.array([10, 2, 10])], origin=np.array([0, 0, 0]))
grid_out = emg3d.TensorMesh(
[np.array([1, 2, ]), np.array([5, 25, 10, 5, 5]),
np.array([9, 4, 9])], origin=np.array([0, 0, 0]))
create = np.array([[1, 2., 1]])
create = np.array([create, 2*create, create])
values_in = create*np.array([1., 5., 3, 7, 2])[None, :, None]
vol_in = np.outer(np.outer(
grid_in.h[0], grid_in.h[1]).ravel('F'), grid_in.h[2])
vol_in = vol_in.ravel('F').reshape(grid_in.shape_cells, order='F')
values_out = maps.interpolate(grid_in, values_in, grid_out, 'volume')
vol_out = np.outer(np.outer(grid_out.h[0], grid_out.h[1]).ravel('F'),
grid_out.h[2])
vol_out = vol_out.ravel('F').reshape(grid_out.shape_cells, order='F')
assert_allclose(np.sum(values_out*vol_out), np.sum(values_in*vol_in))
def test_all_run(self):
hx = [1, 1, 1, 2, 4, 8]
grid = emg3d.TensorMesh([hx, hx, hx], (0, 0, 0))
grid2 = emg3d.TensorMesh([[2, 4, 5], [1, 1], [4, 5]], (0, 1, 0))
field = emg3d.Field(grid)
field.fx = np.arange(1, field.fx.size+1).reshape(
field.fx.shape, order='F')
model = emg3d.Model(grid, 1, 2, 3)
model.property_x[1, :, :] = 2
model.property_x[2, :, :] = 3
model.property_x[3, :, :] = 4
model.property_x[4, :, :] = np.arange(1, 37).reshape((6, 6), order='F')
model.property_x[5, :, :] = 200
xi = (1, [8, 7, 6, 8, 9], [1])
# == NEAREST ==
# property - grid
_ = maps.interpolate(grid, model.property_x, grid2, method='nearest')
# field - grid
_ = maps.interpolate(grid, field.fx, grid2, method='nearest')
# property - points
_ = maps.interpolate(grid, model.property_x, xi, method='nearest')
# field - points
_ = maps.interpolate(grid, field.fx, xi, method='nearest')
# == LINEAR ==
# property - grid
_ = maps.interpolate(grid, model.property_x, grid2, method='linear')
# field - grid
_ = maps.interpolate(grid, field.fx, grid2, method='linear')
# property - points
_ = maps.interpolate(grid, model.property_x, xi, method='linear')
# field - points
_ = maps.interpolate(grid, field.fx, xi, method='linear')
# == CUBIC ==
# property - grid
_ = maps.interpolate(grid, model.property_x, grid2, method='cubic')
# field - grid
_ = maps.interpolate(grid, field.fx, grid2, method='cubic')
# property - points
_ = maps.interpolate(grid, model.property_x, xi, method='cubic')
# field - points
_ = maps.interpolate(grid, field.fx, xi, method='cubic')
# == VOLUME ==
# property - grid
_ = maps.interpolate(grid, model.property_x, grid2, method='volume')
# field - grid
with pytest.raises(ValueError, match="for cell-centered properties"):
maps.interpolate(grid, field.fx, grid2, method='volume')
# property - points
with pytest.raises(ValueError, match="only implemented for TensorM"):
maps.interpolate(grid, model.property_x, xi, method='volume')
# field - points
with pytest.raises(ValueError, match="only implemented for TensorM"):
maps.interpolate(grid, field.fx, xi, method='volume')
def test_2d_arrays(self):
hx = [1, 1, 1, 2, 4, 8]
grid = emg3d.TensorMesh([hx, hx, hx], (0, 0, 0))
field = emg3d.Field(grid)
field.fx = np.arange(1, field.fx.size+1).reshape(
field.fx.shape, order='F')
model = emg3d.Model(grid, 1, 2, 3)
model.property_x[1, :, :] = 2
model.property_x[2, :, :] = 3
model.property_x[3, :, :] = 4
model.property_x[4, :, :] = np.arange(1, 37).reshape((6, 6), order='F')
model.property_x[5, :, :] = 200
xi = (np.ones((3, 2)), 5, np.ones((3, 2)))
# == NEAREST ==
# property - points
_ = maps.interpolate(grid, model.property_x, xi, method='nearest')
# field - points
_ = maps.interpolate(grid, field.fx, xi, method='nearest')
# == LINEAR ==
# property - points
_ = maps.interpolate(grid, model.property_x, xi, method='linear')
# field - points
_ = maps.interpolate(grid, field.fx, xi, method='linear')
# == CUBIC ==
# property - points
_ = maps.interpolate(grid, model.property_x, xi, method='cubic')
# field - points
_ = maps.interpolate(grid, field.fx, xi, method='cubic')
def test_points_from_grids():
hx = [1, 1, 1, 2, 4, 8]
grid = emg3d.TensorMesh([hx, hx, hx], (0, 0, 0))
grid2 = emg3d.TensorMesh([[2, 4, 5], [1, 1], [4, 5]], (0, 1, 0))
xi_tuple = (1, [8, 7, 6, 8, 9], [1])
xi_array = np.arange(18).reshape(-1, 3)
v_prop = np.ones(grid.shape_cells)
v_field_e = np.ones(grid.shape_edges_x)
v_field_f = np.ones(grid.shape_faces_x)
# linear - values = prop - xi = grid
out = maps._points_from_grids(grid, v_prop, grid2, 'linear')
assert isinstance(out[1], np.ndarray)
assert_allclose(out[0][0], [0.5, 1.5, 2.5, 4., 7., 13.])
assert_allclose(out[1][0, :], [1., 1.5, 2])
assert out[2] == (3, 2, 2)
# nearest - values = field - xi = grid
out = maps._points_from_grids(grid, v_field_e, grid2, 'nearest')
assert isinstance(out[1], np.ndarray)
assert_allclose(out[0][1], [0., 1., 2., 3., 5., 9., 17.])
assert_allclose(out[1][1, :], [4., 1., 0.])
assert out[2] == (3, 3, 3)
# nearest - values = field - xi = grid
out = maps._points_from_grids(grid, v_field_f, grid2, 'nearest')
assert isinstance(out[1], np.ndarray)
assert_allclose(out[0][1], [0.5, 1.5, 2.5, 4., 7., 13.])
assert_allclose(out[1][1, :], [2., 1.5, 2.])
assert out[2] == (4, 2, 2)
# cubic - values = prop - xi = tuple
out = maps._points_from_grids(grid, v_prop, xi_tuple, 'cubic')
assert isinstance(out[1], np.ndarray)
assert_allclose(out[0][2], [0.5, 1.5, 2.5, 4., 7., 13.])
assert_allclose(out[1][2, :], [1., 6., 1.])
assert out[2] == (5, )
# linear - values = field - xi = tuple
out = maps._points_from_grids(grid, v_field_e, xi_tuple, 'linear')
assert isinstance(out[1], np.ndarray)
assert_allclose(out[0][0], [0.5, 1.5, 2.5, 4., 7., 13.])
assert_allclose(out[1][-1, :], [1., 9., 1.])
assert out[2] == (5, )
# nearest - values = prop - xi = ndarray
out = maps._points_from_grids(grid, v_prop, xi_array, 'nearest')
assert isinstance(out[1], np.ndarray)
assert_allclose(out[0][0], [0.5, 1.5, 2.5, 4., 7., 13.])
assert_allclose(out[1], xi_array)
assert out[2] == (6, )
# cubic - values = field - xi = ndarray
out = maps._points_from_grids(grid, v_field_e, xi_array, 'cubic')
assert isinstance(out[1], np.ndarray)
assert_allclose(out[0][0], [0.5, 1.5, 2.5, 4., 7., 13.])
assert_allclose(out[1], xi_array)
assert out[2] == (6, )
# cubic - values = 1Darr - xi = grid - FAILS
with pytest.raises(ValueError, match='must be a 3D ndarray'):
maps._points_from_grids(grid, v_field_e.ravel(), grid2, 'cubic')
# volume - values = prop - xi = grid
out = maps._points_from_grids(grid, v_prop, grid2, 'volume')
assert isinstance(out[1], tuple)
assert_allclose(out[0][0], [0., 1, 2, 3, 5, 9, 17])
assert_allclose(out[1][0], [0., 2, 6, 11])
assert out[2] == (3, 2, 2)
# volume - values = field - xi = grid - FAILS
with pytest.raises(ValueError, match='only implemented for cell-centered'):
maps._points_from_grids(grid, v_field_e, grid2, 'volume')
# volume - values = prop - xi = tuple - FAILS
with pytest.raises(ValueError, match='only implemented for TensorMesh'):
maps._points_from_grids(grid, v_prop, xi_tuple, 'volume')
# tuple can contain any dimension; it will work (undocumented).
shape = (3, 2, 4, 5)
coords = np.arange(np.prod(shape)).reshape(shape, order='F')
xi_tuple2 = (1, coords, 10)
out = maps._points_from_grids(grid, v_field_e, xi_tuple2, 'nearest')
assert isinstance(out[1], np.ndarray)
assert_allclose(out[0][0], [0.5, 1.5, 2.5, 4., 7., 13.])
assert_allclose(out[1][-1, :], [1., 119, 10])
assert out[2] == shape
def test_interp_spline_3d():
x = np.array([1., 2, 4, 5])
pts = (x, x, x)
p1 = np.array([[3, 1, 3], [3, 3, 3], [3, 4, 3]])
v1 = np.ones((4, 4, 4), order='F')
v1[1, :, :] = 4.0
v1[2, :, :] = 16.0
v1[3, :, :] = 25.0
out = maps.interp_spline_3d(points=pts, values=v1, xi=p1, order=0)
assert_allclose(out, 16)
out = maps.interp_spline_3d(points=pts, values=v1, xi=p1, order=1)
assert_allclose(out, 10)
out = maps.interp_spline_3d(points=pts, values=v1, xi=p1, order=2)
assert_allclose(out, 9.4)
out = maps.interp_spline_3d(points=pts, values=v1, xi=p1)
assert_allclose(out, 9.25)
out = maps.interp_spline_3d(points=pts, values=v1, xi=p1, order=4)
assert_allclose(out, 9.117647)
out = maps.interp_spline_3d(points=pts, values=v1, xi=p1, order=5)
assert_allclose(out, 9.0625)
p2 = np.array([[1, 3, 3], [3, 3, 3], [4, 3, 3]])
v2 = np.rollaxis(v1, 1)
out = maps.interp_spline_3d(points=pts, values=v2, xi=p2)
assert_allclose(out, 9.25)
p3 = np.array([[3, 3, 1], [3, 3, 3], [3, 3, 4]])
v3 = np.rollaxis(v2, 1)
v3 = v3 + 1j*v3
out = maps.interp_spline_3d(points=pts, values=v3, xi=p3)
assert_allclose(out, 9.25 + 9.25j)
p1 = np.array([[3, 100, 3]])
v1 = np.ones((4, 4, 4), order='F')
v1[1, :, :] = 4.0
v1[2, :, :] = 16.0
v1[3, :, :] = 25.0
out = maps.interp_spline_3d(points=pts, values=v1, xi=p1, cval=999)
assert_allclose(out, 999)
@pytest.mark.parametrize("njit", [True, False])
def test_interp_volume_average(njit):
if njit:
interp_volume_average = maps.interp_volume_average
else:
interp_volume_average = maps.interp_volume_average.py_func
# Comparison to alt_version.
grid_in = emg3d.TensorMesh(
[np.ones(30), np.ones(20)*5, np.ones(10)*10],
origin=np.array([0, 0, 0]))
grid_out = emg3d.TensorMesh(
[np.arange(7)+1, np.arange(13)+1, np.arange(13)+1],
origin=np.array([0.5, 3.33, 5]))
values = np.arange(grid_in.n_cells, dtype=np.float64).reshape(
grid_in.shape_cells, order='F')
points = (grid_in.nodes_x, grid_in.nodes_y, grid_in.nodes_z)
new_points = (grid_out.nodes_x, grid_out.nodes_y, grid_out.nodes_z)
# Compute volume.
vol = np.outer(np.outer(
grid_out.h[0], grid_out.h[1]).ravel('F'), grid_out.h[2])
vol = vol.ravel('F').reshape(grid_out.shape_cells, order='F')
# New solution.
new_values = np.zeros(grid_out.shape_cells, dtype=values.dtype)
interp_volume_average(*points, values, *new_points, new_values, vol)
# Old solution.
new_values_alt = np.zeros(grid_out.shape_cells, dtype=values.dtype)
alternatives.alt_volume_average(
*points, values, *new_points, new_values_alt)
assert_allclose(new_values, new_values_alt)
@pytest.mark.parametrize("njit", [True, False])
def test_volume_average_weights(njit):
if njit:
volume_avg_weights = maps._volume_average_weights
else:
volume_avg_weights = maps._volume_average_weights.py_func
grid_in = emg3d.TensorMesh(
[np.ones(11), np.ones(10)*2, np.ones(3)*10],
origin=np.array([0, 0, 0]))
grid_out = emg3d.TensorMesh(
[np.arange(4)+1, np.arange(5)+1, np.arange(6)+1],
origin=np.array([0.5, 3.33, 5]))
wx, ix_in, ix_out = volume_avg_weights(grid_in.nodes_x, grid_out.nodes_x)
assert_allclose(wx,
[0.5, 0.5, 0.5, 1, 0.5, 0.5, 1, 1, 0.5, 0.5, 1, 1, 1, 0.5])
assert_allclose(ix_in, [0, 1, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 10])
assert_allclose(ix_out, [0, 0, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3])
wy, iy_in, iy_out = volume_avg_weights(grid_in.nodes_y, grid_out.nodes_y)
assert_allclose(wy, [0.67, 0.33, 1.67, 0.33, 1.67, 1.33, 0.67, 2.,
1.33, 0.67, 2, 2, 0.33])
assert_allclose(iy_in, [1, 2, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 9])
assert_allclose(iy_out, [0, 0, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 4])
wz, iz_in, iz_out = volume_avg_weights(grid_in.nodes_z, grid_out.nodes_z)
assert_allclose(wz, [1, 2, 2, 1, 4, 5, 6])
assert_allclose(iz_in, [0, 0, 0, 1, 1, 1, 2])
assert_allclose(iz_out, [0, 1, 2, 2, 3, 4, 5])
w, inp, out = volume_avg_weights(x_i=np.array([0., 5, 7, 10]),
x_o=np.array([-1., 1, 4, 6, 7, 11]))
assert_allclose(w, [1, 1, 3, 1, 1, 1, 3, 1])
assert_allclose(inp, [0, 0, 0, 0, 1, 1, 2, 2])
assert_allclose(out, [0, 0, 1, 2, 2, 3, 4, 4])
@pytest.mark.parametrize("njit", [True, False])
def test_interp_edges_to_vol_averages(njit):
if njit:
edges_to_vol_averages = maps.interp_edges_to_vol_averages
else:
edges_to_vol_averages = maps.interp_edges_to_vol_averages.py_func
# To test it, we create a mesh 2x2x2 cells,
# where all hx/hy/hz have distinct lengths.
x0, x1 = 2, 3
y0, y1 = 4, 5
z0, z1 = 6, 7
grid = emg3d.TensorMesh([[x0, x1], [y0, y1], [z0, z1]], [0, 0, 0])
field = emg3d.Field(grid)
# Only three edges have a value, one in each direction.
fx = 1.23+9.87j
fy = 2.68-5.48j
fz = 1.57+7.63j
field.fx[0, 1, 1] = fx
field.fy[1, 1, 1] = fy
field.fz[1, 1, 0] = fz
# Initiate gradient.
grad_x = np.zeros(grid.shape_cells, order='F', dtype=complex)
grad_y = np.zeros(grid.shape_cells, order='F', dtype=complex)
grad_z = np.zeros(grid.shape_cells, order='F', dtype=complex)
cell_volumes = grid.cell_volumes.reshape(grid.shape_cells, order='F')
# Call function.
edges_to_vol_averages(ex=field.fx, ey=field.fy, ez=field.fz,
volumes=cell_volumes,
ox=grad_x, oy=grad_y, oz=grad_z)
grad = grad_x + grad_y + grad_z
# Check all eight cells explicitly by
# - computing the volume of the cell;
# - multiplying with the present fields in that cell.
assert_allclose(x0*y0*z0*(fx+fz)/4, grad[0, 0, 0])
assert_allclose(x1*y0*z0*fz/4, grad[1, 0, 0])
assert_allclose(x0*y1*z0*(fx+fy+fz)/4, grad[0, 1, 0])
assert_allclose(x1*y1*z0*(fy+fz)/4, grad[1, 1, 0])
assert_allclose(x0*y0*z1*fx/4, grad[0, 0, 1])
assert_allclose(0j, grad[1, 0, 1])
|
assert_allclose(x0*y1*z1*(fx+fy)/4, grad[0, 1, 1])
|
numpy.testing.assert_allclose
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 15 10:13:13 2018
@author: thieunv
Link : http://sci-hub.tw/10.1109/iccat.2013.6521977
https://en.wikipedia.org/wiki/Laguerre_polynomials
"""
import numpy as np
def itself(x):
return x
def elu(x, alpha=1):
return np.where(x < 0, alpha * (np.exp(x) - 1), x)
def relu(x):
return np.maximum(0, x)
def tanh(x):
return np.tanh(x)
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
def derivative_self(x):
return 1
def derivative_elu(x, alpha=1):
return np.where(x < 0, x + alpha, 1)
def derivative_relu(x):
return np.where(x < 0, 0, 1)
def derivative_tanh(x):
return 1 - np.power(x, 2)
def derivative_sigmoid(x):
return np.multiply(x, 1-x)
def expand_chebyshev(x):
x1 = x
x2 = 2 * np.power(x, 2) - 1
x3 = 4 * np.power(x, 3) - 3 * x
x4 = 8 *
|
np.power(x, 4)
|
numpy.power
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import numpy as np
import re
from .linalg import dot, matmul, transpose
from .manipulation import squeeze, unsqueeze, reshape
from .math import multiply
from .math import sum as paddle_sum
from ..fluid.framework import _in_legacy_dygraph
from paddle import _C_ops
from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype
from ..fluid.layer_helper import LayerHelper
from ..fluid.framework import _non_static_mode, in_dygraph_mode, _in_legacy_dygraph
import collections
import string
import opt_einsum
from paddle.common_ops_import import dygraph_only
__all__ = []
def parse_op_labels(labelstr, operand):
'''
Parse labels for an input operand.
Parameters
----------
labelstr:
the input label string
operand:
the input operand
Returns
-------
the input operand's full label string in which all anonymous dimensions are
labeled in dots.
'''
# Sanity checks
for c in labelstr.replace('.', ''):
assert c.isalpha(), (
f"Invalid equation: {c} is not a valid label, which should be letters."
)
assert labelstr.replace('...', '', 1).find('.') == -1, (
f"Invalid equation: `.` is found outside of an ellipsis.")
# Check shape. Note, in Paddle a tensor rank is always nonzero
ndims = len(operand.shape)
assert ndims > 0
full_labelstr = labelstr.replace('...', '.' * (ndims - len(labelstr) + 3))
assert len(full_labelstr) == ndims, (
f"Invalid equation: the label string '{labelstr}' misses dimensions.")
return full_labelstr
def parse_labels(labelstr, operands):
'''
Parse label strings for all input operands.
Parameters
----------
labelstr:
The equation's label string
operands:
The input operands
Returns
-------
list of full label strings for all input operands
'''
nop_labels = labelstr.split(',')
assert len(nop_labels) == len(operands), (
f"Invalid equation: the number of operands is {len(operands)}, "
f"but found {len(nop_labels)} segments in the label equation.")
return list(map(parse_op_labels, nop_labels, operands))
def validate_rhs(rhs, input_labels, n_bcast_dims):
'''
Check whether the equation's right hand side is valid
'''
# Sanity check.
if n_bcast_dims > 0:
assert '...' in rhs, (
f"Invalid equation: missing ellipsis in output labels.")
rhs = rhs.replace('...', '')
rhs_set = set(rhs)
# Hidden assumption: availble labels don't include '.'
assert '.' not in input_labels
# Verify that output labels all come from the set of input labels
non_input_labels = rhs_set.difference(input_labels)
assert not non_input_labels, (
f"Invalid equation: "
f"output label {sorted(non_input_labels)} not used by any input.")
# Verify that output labels are not duplicate
assert len(rhs) == len(rhs_set), (
f"Invalid equation: duplicate output labels are found.")
def build_view(in_labels, out_labels):
'''
Build an inverse map of dimension indices. Three conditions must hold for
the result to be meaningful.
First, no duplicate letter labels in each label string.
Second, the number of dots in dimout_labels >= that in in_labels.
Third, dots are contiguous in each label string.
Parameters
----------
in_labels:
The dimension labels to map to
out_labels:
The dimension labels to map from
Returns
-------
The inverse map from out_labels to in_labels. The length of the inverse map equals that of
out_labels. -1 is filled if there's no matching intput dimension for a specific label.
Examples
--------
in_labels = 'ij..', out_labels = '..ji'
inv_map = [2, 3, 1, 0]
in_labels = 'ij..', out_labels = '..kji'
inv_map = [2, 3, -1, 1, 0]
'''
inv_map = [-1] * len(out_labels)
# First build the broadcast dimension mapping
# Find the broadcast index range in out_labels
r = re.search(r'\.+', out_labels)
if r:
start, end = r.start(), r.end()
s = re.search(r'\.+', in_labels)
# fill the broadcast dimension indices from right to left.
if s:
for ax, dim in zip(
range(start, end)[::-1], range(s.start(), s.end())[::-1]):
inv_map[ax] = dim
# Now work on non-broadcast dimensions
if r:
it = itertools.chain(range(start), range(end, len(out_labels)))
else:
it = iter(range(len(out_labels)))
for i in it:
inv_map[i] = in_labels.find(out_labels[i])
return inv_map
def build_global_view(nop_labels, rhs, n_bcast_dims):
'''
Build the global view, which is a layout of all dimension labels
plus an index table that maps from the layout to the dimensions
in each operand. In the global view, the dimensions are arranged
such that output ones are put on the left and contraction ones
are put on the right.
Parameters
----------
nop_labels:
The input full label strings of all input operands
rhs:
The equation right hand side
n_bcast_dims:
The maxium number of broadcast dimensions
Returns
-------
A tuple of g_labels, g_view, g_nout, g_count
g_labels:
the layout of all labels in a string
g_view:
the index table
g_nout:
the number of output dimensions
g_count:
the counter array for dimension contractions
'''
# Put all labels in alphabetical order
concat = sorted(''.join(nop_labels).replace('.', ''))
labels, count = [], []
for a, b in zip(['.'] + concat, concat):
if a != b:
labels.append(b)
count.append(1)
else:
count[-1] += 1
if rhs != None:
validate_rhs(rhs, labels, n_bcast_dims)
g_labels_out = rhs.replace('...', '.' * n_bcast_dims)
else:
g_labels_out = '.' * n_bcast_dims + ''.join(
l for l, c in zip(labels, count) if c == 1)
for i in range(len(count))[::-1]:
if labels[i] in g_labels_out:
labels.pop(i)
count.pop(i)
g_labels_sum = ''.join(labels)
g_labels = g_labels_out + g_labels_sum
g_view = list(map(lambda i: build_view(i, g_labels), nop_labels))
g_nout = len(g_labels_out)
g_count = count
return g_labels, g_view, g_nout, g_count
def build_global_shape(g_view, g_labels, op_shapes):
'''
The global shape is the shape of all dimensions rearranged and broadcasting
to the global view. It's a reference data structure for einsum planning.
Parameters
----------
g_view:
the global view
op_shapes:
the shapes of the all operands
Returns
-------
g_shape:
the global shape vector
g_masks:
list of shape masks for each operand. A dimension's shape mask is a boolean
indicating whether its size > 1, in other words, it's not squeezable
'''
view_shapes = []
g_masks = []
for view, op_shape in zip(g_view, op_shapes):
view_shapes.append([op_shape[dim] if dim > -1 else 1 for dim in view])
g_shape = [set(sizes_per_ax) - {1} for sizes_per_ax in zip(*view_shapes)]
non_bcastable = [ax for ax, sizes in enumerate(g_shape) if len(sizes) > 1]
assert not non_bcastable, (
f"Invalid operands: label {g_labels[non_bcastable[0]]} "
f"corresponds to non-broadcastable dimensions.")
g_shape = [sizes.pop() if len(sizes) > 0 else 1 for sizes in g_shape]
g_masks = [[s > 1 or s == -1 for s in view_shape]
for view_shape in view_shapes]
return g_shape, g_masks
def has_duplicated_labels(labels):
'''
Returns True if there is any duplicate label.
'''
labels = labels.replace('.', '')
return len(labels) > len(set(labels))
def diagonalize(labels, operand):
'''
Merges dimensions with duplicate labels.
For those dimensions with duplicate labels, merge them into one dimension
which represents the diagonal elements. This requires the dimensions with
duplicate labels are equal sized.
Examples
--------
'ijj...i' would be merged into 'ij...'
'''
assert not has_duplicated_labels(labels), (
f'Duplicate labels are not supported.')
return labels, operand
def plan_reduce(plan, op, reduce_dims, keepdim):
'''
Add reduce to the plan
'''
varname = f'op{op}'
f = lambda var, dims: paddle_sum(var, dims, keepdim=keepdim)
step = f, [varname], varname, reduce_dims
plan.add_step(step)
def plan_scalar_prod(plan, op1, op2):
varnames = [f'op{op1}', f'op{op2}']
f = lambda var1, var2: paddle_sum(var1) * var2
# f = lambda var1, var2: var1 * var2
step = f, varnames, varnames[1]
plan.add_step(step)
def plan_matmul(plan, g_view, op1, op2, g_supports, g_shape, I, J1, J2, K):
'''
plan matmul
'''
# Transpose and re-shape op1 and op2 in I, J1, K and I, J2, K
# Then apply matmul(x, y, transpose_x=False, tranpose_y=True)
var1, var2 = f'op{op1}', f'op{op2}'
op1_view, op2_view = [g_view[op] for op in (op1, op2)]
I1 = [idx for idx in I if op1_view[idx] >= 0]
I2 = [idx for idx in I if op2_view[idx] >= 0]
op1_view =
|
np.array(op1_view)
|
numpy.array
|
# Copyright 2021 Garena Online Private Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for classic control environments."""
from typing import Any, Tuple
import gym
import numpy as np
from absl.testing import absltest
from envpool.classic_control import (
AcrobotEnvSpec,
AcrobotGymEnvPool,
CartPoleEnvSpec,
CartPoleGymEnvPool,
MountainCarContinuousEnvSpec,
MountainCarContinuousGymEnvPool,
MountainCarEnvSpec,
MountainCarGymEnvPool,
PendulumEnvSpec,
PendulumGymEnvPool,
)
class _ClassicControlEnvPoolTest(absltest.TestCase):
def run_space_check(self, spec_cls: Any) -> None:
"""Check if envpool.observation_space == gym.make().observation_space."""
# TODO(jiayi): wait for #27
def run_deterministic_check(
self,
spec_cls: Any,
envpool_cls: Any,
obs_range: Tuple[np.ndarray, np.ndarray],
**kwargs: Any,
) -> None:
num_envs = 4
env0 = envpool_cls(
spec_cls(spec_cls.gen_config(num_envs=num_envs, seed=0, **kwargs))
)
env1 = envpool_cls(
spec_cls(spec_cls.gen_config(num_envs=num_envs, seed=0, **kwargs))
)
env2 = envpool_cls(
spec_cls(spec_cls.gen_config(num_envs=num_envs, seed=1, **kwargs))
)
act_space = env0.action_space
eps = np.finfo(np.float32).eps
obs_min, obs_max = obs_range[0] - eps, obs_range[1] + eps
for _ in range(5000):
action = np.array([act_space.sample() for _ in range(num_envs)])
obs0 = env0.step(action)[0]
obs1 = env1.step(action)[0]
obs2 = env2.step(action)[0]
np.testing.assert_allclose(obs0, obs1)
self.assertFalse(np.allclose(obs0, obs2))
self.assertTrue(np.all(obs_min <= obs0), obs0)
self.assertTrue(np.all(obs_min <= obs2), obs2)
self.assertTrue(np.all(obs0 <= obs_max), obs0)
self.assertTrue(np.all(obs2 <= obs_max), obs2)
def run_align_check(self, env0: gym.Env, env1: Any, reset_fn: Any) -> None:
for _ in range(10):
reset_fn(env0, env1)
d0 = False
while not d0:
a = env0.action_space.sample()
o0, r0, d0, _ = env0.step(a)
o1, r1, d1, _ = env1.step(np.array([a]), np.array([0]))
np.testing.assert_allclose(o0, o1[0], atol=1e-6)
np.testing.assert_allclose(r0, r1[0])
np.testing.assert_allclose(d0, d1[0])
def test_cartpole(self) -> None:
fmax = np.finfo(np.float32).max
obs_max =
|
np.array([4.8, fmax, np.pi / 7.5, fmax])
|
numpy.array
|
from collections import defaultdict, Counter
from itertools import product, permutations
from glob import glob
import json
import os
from pathlib import Path
import pickle
import sqlite3
import sys
import time
import matplotlib as mpl
from matplotlib import colors
from matplotlib import pyplot as plt
from matplotlib.gridspec import GridSpec
from matplotlib.lines import Line2D
import matplotlib.patches as mpatches
from multiprocessing import Pool
import numpy as np
import pandas as pd
from palettable.colorbrewer.qualitative import Paired_12
from palettable.colorbrewer.diverging import PuOr_5, RdYlGn_6, PuOr_10, RdBu_10
from palettable.scientific.diverging import Cork_10
from scipy.stats import linregress, pearsonr
import seaborn as sns
import svgutils.compose as sc
from asym_io import PATH_BASE, PATH_ASYM
import asym_utils as utils
PATH_FIG = PATH_ASYM.joinpath("Figures")
PATH_FIG_DATA = PATH_FIG.joinpath("Data")
####################################################################
### FIG 1
def fig1(pdb):
fig = plt.figure(figsize=(11, 10))
gs = GridSpec(5,48, wspace=1.0, hspace=0.0, height_ratios=[1,0.3,1,0.8,1.8])
ax = [fig.add_subplot(gs[i*2,:14]) for i in [0,1]] + \
[fig.add_subplot(gs[i*2,17:31]) for i in [0,1]] + \
[fig.add_subplot(gs[i*2,34:]) for i in [0,1]] + \
[fig.add_subplot(gs[3:,i:i+j]) for i,j in zip([3,27],[15,15])]
custom_cmap = sns.diverging_palette(230, 22, s=100, l=47, n=13)
c_helix = custom_cmap[2]
c_sheet = custom_cmap[10]
col = [c_helix, c_sheet, "#CB7CE6", "#79C726"]
ttls = ['Full sample', 'Eukaryotes', 'Prokaryotes']
lbls = ['Helix', 'Sheet', 'Coil', 'Disorder']
cat = 'HS.D'
ss_stats = [pickle.load(open(PATH_FIG_DATA.joinpath(f"pdb_ss_dist_boot{s}.pickle"), 'rb')) for s in ['', '_euk', '_prok']]
X = np.arange(50)
for i, data in enumerate(ss_stats):
for j, s in enumerate(cat):
ax[i*2].plot(X, data[0][s]['mean']/2, '-', c=col[j], label=f"{s} N")
ax[i*2].fill_between(X, data[0][s]['hi']/2, data[0][s]['lo']/2, color="grey", label=f"{s} N", alpha=0.5)
ax[i*2].plot(X, data[1][s]['mean']/2, '--', c=col[j], label=f"{s} N")
ax[i*2].fill_between(X, data[1][s]['hi']/2, data[1][s]['lo']/2, color="grey", label=f"{s} N", alpha=0.2)
print(i, s, round(np.mean(data[2][s]['mean']), 2), round(np.mean(data[2][s]['mean'][:20]), 2), round(np.mean(data[2][s]['mean'][20:40]), 2))
ax[i*2+1].plot(X, np.log2(data[2][s]['mean']), '-', c=col[j], label=lbls[j])
ax[i*2+1].fill_between(X, np.log2(data[2][s]['hi']), np.log2(data[2][s]['lo']), color="grey", label=f"{s} N", alpha=0.2)
# Y = [[y*100-100 if y >= 1 else -((1/y)*100-100) for y in data[2][s][p]] for p in ['mean', 'lo', 'hi']]
# ax[i*2+1].plot(X, Y[0], '-', c=col[j], label=lbls[j])
# ax[i*2+1].fill_between(X, Y[1], Y[2], color="grey", label=f"{s} N", alpha=0.2)
for i in range(3):
ax[i*2].set_title(ttls[i])
ax[i*2].set_ylim(0, 0.6)
# ax[i*2+1].set_ylim(-100, 130)
ax[i*2+1].set_ylim(-1, 1.3)
ax[i*2+1].plot([0]*50, '-', c='k')
# ax[i*2+1].set_yticks(np.arange(-1,1.5,0.5))
# ax[0].plot(X, (X+1)**-0.78, '-k')
# ax[2].plot(X, (X+1)**-0.5, '-k')
# ax[2].plot(X, (X+1)**-0.6, '-k')
# ax[4].plot(X, (X+1)**-1.0, '-k')
ax[1].set_ylabel('Structural asymmetry\n$\\log_2 (N / C)$')
handles = [Line2D([0], [0], ls=ls, c=c, label=l) for ls, c, l in zip(['-', '--'], ['k']*2, ['N', 'C'])] + \
[Line2D([0], [0], ls='-', c=c, label=l) for l, c in zip(lbls, col)]
ax[4].legend(handles=handles, bbox_to_anchor=(0.20, 1.43), frameon=False,
ncol=6, columnspacing=1.5, handlelength=2.0)
ax[0].set_ylabel('Secondary structure\nprobability')
for i in range(6):
ax[i].set_xticks(range(0, 60, 10))
ax[i].set_xlabel('Sequence distance from ends')
fig1b(pdb, ax=ax[6:], fig=fig)
fs = 14
for i, b in zip([0,1,6,7], list('ABCDEFGHI')):
ax[i].text( -0.20, 1.10, b, transform=ax[i].transAxes, fontsize=fs)
for a in ax:
a.tick_params(axis='both', which='major', direction='in')
fig.savefig(PATH_FIG.joinpath("fig1.pdf"), bbox_inches='tight')
def ss_by_seq(pdb, cat='SH.D'):
countN, countC = utils.pdb_end_stats_disorder_N_C(pdb, N=50, s1='SEQ_PDB2', s2='SS_PDB2')
base = np.zeros(len(countN['S']), dtype=float)
Yt = np.array([[sum(p.values()) for p in countN[s]] for s in cat]).sum(axis=0)
X = np.arange(base.size)
out_dict = {}
for i, s in enumerate(cat):
YN = np.array([sum(p.values()) for p in countN[s]])
YC = np.array([sum(p.values()) for p in countC[s]])
out_dict[s] = {'N':YN/Yt, 'C':YC/Yt, 'asym':YN/YC}
return out_dict
def fig1b(df, X='AA_PDB', Y='CO', w=.1, ax='', fig=''):
if isinstance(ax, str):
fig, ax = plt.subplots(1,3, figsize=(15,4))
fig.subplots_adjust(wspace=0.5)
q = np.arange(w,1+w,w)
quant1 = [df[X].min()] + list(df[X].quantile(q).values)
quant2 = [df[Y].min()] + list(df[Y].quantile(q).values)
lbls = ['Helix', 'Sheet']
cb_lbl = [r"$asym_{\alpha}$", r"$asym_{\beta}$"]
vmax = 0.03
vmin = -vmax
count = []
for i, Z in enumerate(['H_ASYM', 'S_ASYM']):
mean = []
for l1, h1 in zip(quant1[:-1], quant1[1:]):
for l2, h2 in zip(quant2[:-1], quant2[1:]):
samp = df.loc[(df[X]>=l1)&(df[X]<h1)&(df[Y]>=l2)&(df[Y]<h2), Z]
mean.append(samp.mean())
# left = len(df.loc[(df[X]>=l1)&(df[X]<h1)&(df[Y]>=l2)&(df[Y]<h2)&(df[Z]<0)])
# right = len(df.loc[(df[X]>=l1)&(df[X]<h1)&(df[Y]>=l2)&(df[Y]<h2)&(df[Z]>0)])
# tot = max(len(df.loc[(df[X]>=l1)&(df[X]<h1)&(df[Y]>=l2)&(df[Y]<h2)]), 1)
# mean.append((right - left)/tot)
if not i:
count.append(len(samp))
# print(len(samp))
mean = np.array(mean).reshape(q.size, q.size)
count = np.array(count).reshape(q.size, q.size)
mean[count<20] = 0
cmap = sns.diverging_palette(230, 22, s=100, l=47, as_cmap=True)
norm = colors.BoundaryNorm([vmin, vmax], cmap.N)
bounds = np.linspace(vmin, vmax, 3)
im = ax[i].imshow(mean.T, cmap=cmap, vmin=vmin, vmax=vmax)
cbar = fig.colorbar(im, cmap=cmap, ticks=bounds, ax=ax[i], fraction=0.046, pad=0.04)
cbar.set_label(cb_lbl[i], labelpad=-5)
ax[i].set_title(lbls[i])
ax[i].set_xticks(
|
np.arange(q.size+1)
|
numpy.arange
|
import cv2
import numpy as np
from copy import copy
from ...util import get_logger
from ..object_detection.object_detector import Detector
from ..object_detection.body_detector import BodyDetector
from ..instance_segmentation.instance_segmentor import InstanceSegmentor
logger = get_logger(__name__)
class HumanPoseDetector(Detector):
"""
https://github.com/opencv/opencv/blob/master/samples/dnn/openpose.py
"""
def __init__(self, device=None,
model_fp=None, model_dir=None,
cpu_extension=None, path_config=None):
self.task = 'estimate_humanpose'
super().__init__(self.task, device,
model_fp, model_dir,
cpu_extension, path_config)
self.thr_point = 0.1
self.detector_body = BodyDetector()
self.segmentor = InstanceSegmentor()
self.BODY_PARTS = {"Nose": 0, "Neck": 1, "RShoulder": 2, "RElbow": 3, "RWrist": 4,
"LShoulder": 5, "LElbow": 6, "LWrist": 7, "RHip": 8, "RKnee": 9,
"RAnkle": 10, "LHip": 11, "LKnee": 12, "LAnkle": 13, "REye": 14,
"LEye": 15, "REar": 16, "LEar": 17}
self.POSE_PAIRS = [["Neck", "RShoulder"], ["Neck", "LShoulder"], ["RShoulder", "RElbow"],
["RElbow", "RWrist"], ["LShoulder", "LElbow"], ["LElbow", "LWrist"],
["Neck", "RHip"], ["RHip", "RKnee"], ["RKnee", "RAnkle"], ["Neck", "LHip"],
["LHip", "LKnee"], ["LKnee", "LAnkle"], ["Neck", "Nose"], ["Nose", "REye"],
["REye", "REar"], ["Nose", "LEye"], ["LEye", "LEar"]]
self.POSE_PARTS_FLATTEN = ['Nose_x', 'Nose_y', 'Neck_x', 'Neck_y', 'RShoulder_x', 'RShoulder_y',
'RElbow_x', 'RElbow_y', 'RWrist_x', 'RWrist_y', 'LShoulder_x',
'LShoulder_y', 'LElbow_x', 'LElbow_y', 'LWrist_x', 'LWrist_y', 'RHip_x',
'RHip_y', 'RKnee_x', 'RKnee_y', 'RAnkle_x', 'RAnkle_y', 'LHip_x',
'LHip_y', 'LKnee_x', 'LKnee_y', 'LAnkle_x', 'LAnkle_y', 'REye_x',
'REye_y', 'LEye_x', 'LEye_y', 'REar_x', 'REar_y', 'LEar_x', 'LEar_y']
def _get_heatmaps(self, frame):
result = self.get_result(frame)
# pairwise_relations = result['Mconv7_stage2_L1']
heatmaps = result['Mconv7_stage2_L2']
return heatmaps
def get_points(self, frame):
"""get one person's pose points.
Args:
frame (np.ndarray): image include only one person. other part should be masked.
Returns (np.ndarray): human jointt points
"""
assert isinstance(frame, np.ndarray)
heatmaps = self._get_heatmaps(frame)
points = np.zeros((len(self.BODY_PARTS), 2))
for num_parts in range(len(self.BODY_PARTS)):
# Slice heatmap of corresponging body's part.
heatMap = heatmaps[0, num_parts, :, :]
_, conf, _, point = cv2.minMaxLoc(heatMap)
frame_width = frame.shape[1]
frame_height = frame.shape[0]
x, y = np.nan, np.nan
# Add a point if it's confidence is higher than threshold.
if conf > self.thr_point:
x = int((frame_width * point[0]) / heatmaps.shape[3])
y = int((frame_height * point[1]) / heatmaps.shape[2])
points[num_parts] = x, y
assert isinstance(points, np.ndarray)
return points
def mask_compute(self, bbox_frame):
results_mask = self.segmentor.compute(bbox_frame, pred_flag=True, max_mask_num=1)
if len(results_mask['masks']) > 0:
mask = results_mask['masks'][0]
mask_canvas = np.zeros((3, mask.shape[0], mask.shape[1]))
for num in range(len(mask_canvas)):
mask_canvas[num] = mask
mask_canvas = mask_canvas.transpose(1, 2, 0)
bbox_frame = (bbox_frame * mask_canvas).astype(int)
else:
logger.info('no mask')
return bbox_frame
def draw_pose(self, init_frame, points):
"""draw pose points and line to frame
Args:
init_frame: frame to draw
points: joints position values for all person
Returns:
"""
frame = init_frame.copy()
for pair in self.POSE_PAIRS:
partFrom = pair[0]
partTo = pair[1]
assert (partFrom in self.BODY_PARTS)
assert (partTo in self.BODY_PARTS)
idFrom = self.BODY_PARTS[partFrom]
idTo = self.BODY_PARTS[partTo]
if not (np.isnan(points[idFrom][0]) or np.isnan(points[idTo][1])):
points_from = tuple(points[idFrom].astype('int64'))
points_to = tuple(points[idTo].astype('int64'))
cv2.line(frame, points_from, points_to, (0, 255, 0), 3)
cv2.ellipse(frame, points_from, (3, 3), 0, 0, 360, (0, 0, 255), cv2.FILLED)
cv2.ellipse(frame, points_to, (3, 3), 0, 0, 360, (0, 0, 255), cv2.FILLED)
return frame
def _filter_points(self, points, xmin, ymin, xmax, ymax):
x = points.T[0]
y = points.T[1]
x = np.where(x < xmin, np.nan, x)
x = np.where(x > xmax, np.nan, x)
y = np.where(y < ymin, np.nan, y)
y = np.where(y > ymax, np.nan, y)
filtered_points = np.asarray([x, y]).T
filtered_points[(np.isnan(filtered_points.prod(axis=1)))] = np.nan
return filtered_points
def _normalize_points(self, points, xmin, ymin, xmax, ymax):
x = points.T[0]
y = points.T[1]
values_x = (x - xmin) / (xmax - xmin)
values_y = (y - ymin) / (ymax - ymin)
norm_points = np.asarray([values_x, values_y]).T
return norm_points
def generate_canvas(self, xmin, ymin, xmax, ymax, ratio_frame=16/9):
height_bbox = ymax - ymin
width_bbox = xmax - xmin
ratio_bbox = width_bbox / height_bbox
if ratio_bbox < ratio_frame:
width_canvas = int(height_bbox * ratio_frame)
canvas_org = np.zeros((height_bbox, width_canvas, 3), np.uint8)
elif ratio_bbox > ratio_frame:
height_canvas = int(width_bbox / ratio_frame)
canvas_org = np.zeros((height_canvas, width_bbox, 3), np.uint8)
elif ratio_bbox == ratio_frame:
canvas_org =
|
np.zeros((height_bbox, width_bbox, 3), np.uint8)
|
numpy.zeros
|
import numpy as np
import tensorflow as tf
from ampligraph.latent_features import INITIALIZER_REGISTRY
def test_random_normal():
"""Random normal initializer test
"""
tf.reset_default_graph()
tf.random.set_random_seed(0)
rnormal_class = INITIALIZER_REGISTRY['normal']
rnormal_obj = rnormal_class({"mean":0.5, "std":0.1})
tf_init = rnormal_obj.get_entity_initializer(init_type='tf')
var1 = tf.get_variable(shape=(1000, 100), initializer=tf_init, name="var1")
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
tf_var = sess.run(var1)
np_var = rnormal_obj.get_entity_initializer(1000, 100, init_type='np')
# print(np.mean(np_var), np.std(np_var))
# print(np.mean(tf_var), np.std(tf_var))
assert(np.round(np.mean(np_var),1)==np.round(np.mean(tf_var),1))
assert(np.round(np.std(np_var),1)==np.round(np.std(tf_var),1))
def test_xavier_normal():
"""Xavier normal initializer test
"""
tf.reset_default_graph()
tf.random.set_random_seed(0)
xnormal_class = INITIALIZER_REGISTRY['xavier']
xnormal_obj = xnormal_class({"uniform":False})
tf_init = xnormal_obj.get_entity_initializer(init_type='tf')
var1 = tf.get_variable(shape=(2000, 100), initializer=tf_init, name="var1")
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
tf_var = sess.run(var1)
np_var = xnormal_obj.get_entity_initializer(2000, 100, init_type='np')
# print(np.mean(np_var), np.std(np_var))
# print(np.mean(tf_var), np.std(tf_var))
assert(np.round(np.mean(np_var),2)==np.round(np.mean(tf_var),2))
assert(np.round(np.std(np_var),2)==np.round(np.std(tf_var),2))
def test_xavier_uniform():
"""Xavier uniform initializer test
"""
tf.reset_default_graph()
tf.random.set_random_seed(0)
xuniform_class = INITIALIZER_REGISTRY['xavier']
xuniform_obj = xuniform_class({"uniform":True})
tf_init = xuniform_obj.get_entity_initializer(init_type='tf')
var1 = tf.get_variable(shape=(200, 1000), initializer=tf_init, name="var1")
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
tf_var = sess.run(var1)
np_var = xuniform_obj.get_entity_initializer(200, 1000, init_type='np')
# print(np.min(np_var), np.max(np_var))
# print(np.min(tf_var), np.max(tf_var))
assert(np.round(np.min(np_var),2)==np.round(np.min(tf_var),2))
assert(np.round(np.max(np_var),2)==np.round(np.max(tf_var),2))
def test_random_uniform():
"""Random uniform initializer test
"""
tf.reset_default_graph()
tf.random.set_random_seed(0)
runiform_class = INITIALIZER_REGISTRY['uniform']
runiform_obj = runiform_class({"low":0.1, "high":0.4})
tf_init = runiform_obj.get_entity_initializer(init_type='tf')
var1 = tf.get_variable(shape=(1000, 100), initializer=tf_init, name="var1")
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
tf_var = sess.run(var1)
np_var = runiform_obj.get_entity_initializer(1000, 100, init_type='np')
# print(np.min(np_var), np.max(np_var))
# print(np.min(tf_var), np.max(tf_var))
assert(np.round(np.min(np_var),2)==np.round(np.min(tf_var),2))
assert(np.round(np.max(np_var),2)==np.round(np.max(tf_var),2))
def test_constant():
"""Constant initializer test
"""
tf.reset_default_graph()
tf.random.set_random_seed(117)
runiform_class = INITIALIZER_REGISTRY['constant']
ent_init = np.random.normal(1, 1, size=(300, 30))
rel_init = np.random.normal(2, 2, size=(10, 30))
runiform_obj = runiform_class({"entity":ent_init, "relation":rel_init})
var1 = tf.get_variable(shape=(300, 30),
initializer=runiform_obj.get_entity_initializer(300, 30, init_type='tf'),
name="ent_var")
var2 = tf.get_variable(shape=(10, 30),
initializer=runiform_obj.get_relation_initializer(10, 30, init_type='tf'),
name="rel_var")
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
tf_var1, tf_var2 = sess.run([var1, var2])
np_var1 = runiform_obj.get_entity_initializer(300, 30, init_type='np')
np_var2 = runiform_obj.get_relation_initializer(10, 30, init_type='np')
assert(np.round(
|
np.mean(tf_var1)
|
numpy.mean
|
import numpy as np
from itertools import cycle, islice
from scipy.stats.kde import gaussian_kde
from sklearn.decomposition import PCA
import statsmodels.api as sm
import matplotlib.pyplot as mplot
import matplotlib.cm as cm
from mpl_toolkits.mplot3d import Axes3D
def plot_clusters(clone, data, path, headers=None):
color_list = np.array(['yellowgreen', 'orange', 'crimson', 'mediumpurple', 'deepskyblue', 'Aquamarine', 'DarkGoldenRod',
'Khaki', 'SteelBlue', 'Olive', 'Violet', 'DarkSeaGreen', 'RosyBrown', 'LightPink', 'DodgerBlue',
'lightcoral', 'chocolate', 'burlywood', 'cyan', 'olivedrab', 'palegreen', 'turquoise', 'gold', 'teal',
'hotpink', 'moccasin', 'lawngreen', 'sandybrown', 'blueviolet', 'powderblue', 'plum', 'springgreen',
'mediumaquamarine', 'rebeccapurple', 'peru', 'lightsalmon', 'khaki', 'sienna', 'lightseagreen', 'lightcyan'])
colors = np.array(list(islice(cycle(color_list),len(clone.centers))))
if headers is None:
headers = ["C%i"%x for x in range(data.shape[1])]
if data.shape[1] == 1:
plot_1d_clusters(clone, data, path, headers, colors)
elif data.shape[1] == 2:
plot_2d_clusters(clone, data, path, headers, colors)
else:
plot_nd_clusters(clone, data, path, headers, colors)
mplot.show()
def plot_1d_clusters(clone, data, path, headers, colors):
centers = np.array(clone.centers)
labels = np.array(clone.labels_)
labels_all = np.array(clone.labels_all)
core = clone.core_card
rho = clone.rho
# Mask for plotting
assigned_mask = np.where(labels != -1)
outliers_mask = np.where(labels == -1)
# KDE
kde = sm.nonparametric.KDEUnivariate(data.astype(np.float))
kde.fit()
# Sort some values for better visualization after
arcore = np.argsort(core)
s_cores = core[arcore]
s_x = data[arcore]
# Plot core
mplot.figure(figsize=(4, 2))
mplot.scatter(s_x, [0] * len(data), marker='|', linewidth=0.1, s=150, c=s_cores, cmap=cm.nipy_spectral)
mplot.yticks([])
mplot.xlabel(headers[0], fontsize=15)
cbar = mplot.colorbar()
mplot.tick_params(axis='x', which='major', length=8, width=2, labelsize=15)
cbar.ax.tick_params(axis='x', which='major', length=8, width=2, labelsize=15)
cbar.ax.set_xlabel("#core", fontsize=15)
mplot.tight_layout()
mplot.savefig(path + "/cores.png", dpi=300)
# Plot clusters
mplot.figure(figsize=(4, 4))
mplot.scatter(data[assigned_mask, 0], [0] * len(data[assigned_mask]), marker='|', color=colors[labels[assigned_mask]])
mplot.scatter(data[outliers_mask, 0], [0] * len(data[outliers_mask]), marker='x', s=10, color='black')
mplot.scatter(data[centers, 0], [0] * len(data[centers]), marker='*', s=100, color='black')
mplot.plot(kde.support, kde.density, c="grey", marker="None", linestyle="--")
mplot.xlabel(headers[0])
mplot.ylabel("KDE")
mplot.tight_layout()
mplot.savefig(path + "/clusters.png", dpi=300)
def plot_2d_clusters(clone, data, path, headers, colors):
centers = np.array(clone.centers)
labels = np.array(clone.labels_)
core_card = clone.core_card
rho = clone.rho
# Core cardinality mapped to each point
size = (3.5,4)
cluster_fig = mplot.figure(figsize=size)
arcore = np.argsort(core_card)
s_cores = core_card[arcore]
s_data = data[arcore]
mplot.scatter(s_data[:, 0], s_data[:, 1], marker='o', c=s_cores, cmap=cm.nipy_spectral)
mplot.xlabel("#core", labelpad=20, fontsize=15)
cbar = mplot.colorbar(orientation='horizontal')
cbar.ax.tick_params(axis='x', which='major', length=8, width=2, labelsize=15)
cbar.ax.tick_params(axis='y', which='major', length=8, width=2, labelsize=15)
mplot.scatter(data[centers, 0], data[centers, 1], marker='*', s=150, c='black', cmap=cm.nipy_spectral)
mplot.xticks([])
mplot.yticks([])
mplot.tight_layout()
mplot.savefig(path + "/cores.png")
# Cluster plot by labels
size = (4,4)
cluster_fig = mplot.figure(figsize=size)
ax_clus = cluster_fig.add_subplot(111)
assigned_mask = np.where(labels != -1)
outliers_mask =
|
np.where(labels == -1)
|
numpy.where
|
'''
Here we consider a controller trained for the reacher environment in
OpenAI Gym. The controller was taken from the baselines. The controller is
based on deepq.
'''
import gym
import numpy as np
from baselines.ddpg.ddpg import DDPG
from baselines.ddpg.noise import *
from baselines.ddpg.models import Actor, Critic
from baselines.ddpg.memory import Memory
from baselines.common import set_global_seeds
import baselines.common.tf_util as U
from mpi4py import MPI
from collections import deque
def train_return(env, param_noise, actor, critic, memory,nb_epochs=250, nb_epoch_cycles=20, reward_scale=1.,
render=False,normalize_returns=False, normalize_observations=True, critic_l2_reg=1e-2, actor_lr=1e-4,
critic_lr=1e-3,
action_noise=None, popart=False, gamma=0.99, clip_norm=None,nb_train_steps=50, nb_rollout_steps=2048,
batch_size=64,tau=0.01, param_noise_adaption_interval=50):
rank = MPI.COMM_WORLD.Get_rank()
assert (np.abs(env.action_space.low) == env.action_space.high).all() # we assume symmetric actions.
max_action = env.action_space.high
agent = DDPG(actor, critic, memory, env.observation_space.shape, env.action_space.shape,
gamma=gamma, tau=tau, normalize_returns=normalize_returns,
normalize_observations=normalize_observations,
batch_size=batch_size, action_noise=action_noise, param_noise=param_noise, critic_l2_reg=critic_l2_reg,
actor_lr=actor_lr, critic_lr=critic_lr, enable_popart=popart, clip_norm=clip_norm,
reward_scale=reward_scale)
# Set up logging stuff only for a single worker.
episode_rewards_history = deque(maxlen=100)
#with U.single_threaded_session() as sess:
# Prepare everything.
agent.initialize(sess)
sess.graph.finalize()
agent.reset()
obs = env.reset()
episode_reward = 0.
episode_step = 0
episodes = 0
t = 0
epoch_episode_rewards = []
epoch_episode_steps = []
epoch_actions = []
epoch_qs = []
epoch_episodes = 0
for epoch in range(nb_epochs):
print('epoch number:', epoch)
for cycle in range(nb_epoch_cycles):
# Perform rollouts.
for t_rollout in range(nb_rollout_steps):
# Predict next action.
action, q = agent.pi(obs, apply_noise=True, compute_Q=True)
assert action.shape == env.action_space.shape
# Execute next action.
if rank == 0 and render:
env.render()
assert max_action.shape == action.shape
new_obs, r, done, info = env.step(
max_action * action) # scale for execution in env (as far as DDPG is concerned, every action is in [-1, 1])
t += 1
if rank == 0 and render:
env.render()
episode_reward += r
episode_step += 1
# Book-keeping.
epoch_actions.append(action)
epoch_qs.append(q)
agent.store_transition(obs, action, r, new_obs, done)
obs = new_obs
if done:
# Episode done.
epoch_episode_rewards.append(episode_reward)
episode_rewards_history.append(episode_reward)
epoch_episode_steps.append(episode_step)
episode_reward = 0.
episode_step = 0
epoch_episodes += 1
episodes += 1
agent.reset()
obs = env.reset()
# Train.
epoch_actor_losses = []
epoch_critic_losses = []
epoch_adaptive_distances = []
for t_train in range(nb_train_steps):
# Adapt param noise, if necessary.
if memory.nb_entries >= batch_size and t % param_noise_adaption_interval == 0:
distance = agent.adapt_param_noise()
epoch_adaptive_distances.append(distance)
cl, al = agent.train()
epoch_critic_losses.append(cl)
epoch_actor_losses.append(al)
agent.update_target_net()
return agent
seed = 2146337346
set_global_seeds(seed)
env = gym.make("Reacher-v1")
env.seed(seed)
sess = U.make_session(num_cpu=1).__enter__()
nb_actions = env.action_space.shape[-1]
layer_norm=True
param_noise = AdaptiveParamNoiseSpec(initial_stddev=float(0.2), desired_action_stddev=float(0.2))
memory = Memory(limit=int(1e6), action_shape=env.action_space.shape, observation_shape=env.observation_space.shape)
critic = Critic(layer_norm=layer_norm)
actor = Actor(nb_actions, layer_norm=layer_norm)
agent = train_return(env=env,actor=actor, critic=critic, memory=memory, param_noise=param_noise)
max_action = env.action_space.high
def compute_traj(max_steps,early=False,done_early=False,**kwargs):
env.reset()
# This sets the init_qpos
if 'init_state' in kwargs:
env.env.init_qpos = kwargs['init_state']
# This sets the goal
if 'goal' in kwargs:
env.env.goal = kwargs['goal']
# This is the init_qvel
if 'init_velocity' in kwargs:
env.env.init_qvel = kwargs['init_velocity']
# State perturbation
if 'state_per' in kwargs:
state_per = kwargs['state_per']
# Velocity perturbation
if 'vel_per' in kwargs:
vel_per = kwargs['vel_per']
qpos = state_per+env.env.init_qpos
qvel = vel_per+env.env.init_qvel
qpos[-2:] = env.env.goal
qvel[-2:] = 0.
env.env.set_state(qpos,qvel)
ob = env.env._get_obs()
traj = [ob]
reward = 0
iters = 0
closest = np.inf
total_theta1 = 0.
total_theta2 = 0.
pt1 = np.arccos(ob[0]) if ob[2] > 0 else np.pi + np.arccos(ob[0])
pt2 = np.arccos(ob[1]) if ob[3] > 0 else np.pi +np.arccos(ob[1])
for _ in range(max_steps):
action, _ = agent.pi(ob, apply_noise=False, compute_Q=True)
ob, r, done, additional_data = env.step(max_action * action)
if early and np.linalg.norm(env.env.get_body_com("fingertip")\
-env.env.get_body_com("target")) < 0.1:
break
nt1 = np.arccos(ob[0]) if ob[2] > 0 else np.pi + np.arccos(ob[0])
nt2 = np.arccos(ob[1]) if ob[3] > 0 else np.pi + np.arccos(ob[1])
total_theta1 += nt1 - pt1
total_theta2 += nt2 - pt2
pt1 = nt1
pt2 = nt2
if -additional_data['reward_dist']< closest:
closest = -additional_data['reward_dist']
if done_early and done:
break
reward += r
traj.append(ob)
iters+=1.
additional_data = {}
additional_data['reward']=reward
additional_data['iters'] = iters
additional_data['closest'] = closest
additional_data['tot_theta1'] = np.abs(total_theta1/(2*np.pi))
additional_data['tot_theta2'] = np.abs(total_theta2/(2*np.pi))
return traj, additional_data
# ------------------------------------------------------------------------------
from active_testing import pred_node, max_node, min_node, test_module
from active_testing.utils import sample_from
rand_nums = [3547645943,
3250606528,
2906727341,
772456798,
2103980956,
2264249721,
1171067901,
3643734338,
854527104,
260127400,
578423204,
3152488971,
261317259,
2798623267,
3165387405]
bounds = [(-0.2, 0.2)] * 2 # Bounds on the goal
bounds.append((-0.1, 0.1)) # Bounds on the state perturbations
bounds.append((-0.1, 0.1)) # Bounds on the state perturbations
bounds.append((-0.005, 0.005)) # Bounds on the velocity perturbations
bounds.append((-0.005, 0.005)) # Bounds on the velocity perturbations
def sut(max_steps,x0,early=False, done_early=False):
goal = np.array(x0[0:2])
state_per = np.zeros(4)
state_per[0:2] += x0[2:4]
vel_per = np.zeros(4)
vel_per[0:2] += x0[4:6]
return compute_traj(max_steps,early, done_early,goal=goal, state_per=state_per,
vel_per=vel_per)
# Requirement 1: Find the initial state, goal state that minimizes the reward
# We need only one node for the reward. The reward is a smooth function
# given that the closed loop system is deterministic
smooth_details_r1 = []
random_details_r1 = []
# This set assumes random sampling and checking
for r in rand_nums:
np.random.seed(r)
node0 = pred_node(f=lambda traj: traj[1]['reward'])
TM = test_module(bounds=bounds, sut=lambda x0: sut(2048,x0, done_early=True),
f_tree = node0, with_random = True, init_sample = 70,
optimize_restarts=5, exp_weight=10, seed=r)
TM.initialize()
TM.run_BO(30)
TM.k = 5
TM.run_BO(50)
TM.k=2
TM.run_BO(50)
smooth_details_r1.append([np.sum(TM.f_acqu.GP.Y < -10.),
TM.smooth_min_x,TM.smooth_min_val])
random_details_r1.append([np.sum(np.array(TM.random_Y) < -10.),
TM.rand_min_x, TM.rand_min_val])
print(r, smooth_details_r1[-1], random_details_r1[-1])
# Requirement 2: Find the initial state, goal state that maximizes the time
# taken to reach near the goal.
# We need only one node for the time. The time taken is a smooth function
# given that the closed loop system is deterministic.
smooth_details_r2 = []
random_details_r2 = []
# This set assumes random sampling and checking
for r in rand_nums:
np.random.seed(r)
node0 = pred_node(f=lambda traj: -traj[1]['iters'])
TM = test_module(bounds=bounds, sut=lambda x0: sut(2048,x0, early=True),
f_tree = node0, with_random = True, init_sample = 70,
optimize_restarts=3, exp_weight=10,normalizer=True)
TM.initialize()
TM.run_BO(30)
TM.k = 5
TM.run_BO(50)
TM.k = 2
TM.run_BO(50)
smooth_details_r2.append([np.sum(TM.f_acqu.GP.Y < -50),
TM.smooth_min_x,TM.smooth_min_val])
random_details_r2.append([np.sum(np.array(TM.random_Y) < -50),
TM.rand_min_x, TM.rand_min_val])
print(r, smooth_details_r2[-1], random_details_r2[-1])
# Requirement 3: Find the initial state, goal state that maximizes the minimum
# distance the reacher gets to a goal or minimize the number of rotations.
smooth_details_r3 = []
random_details_r3 = []
ns_details_r3 = []
def compare_s_ns(TM, TM_ns, K, num_sample, r, k):
# This is the same routine as implemented in sample_opt.
# Here, I re-implement it as I am comparing the different methods, and
# hence the sampled X needs to remain the same.
np.random.seed(r)
for l in range(K):
print("Iteration:", l)
X = sample_from(num_sample, bounds)
smooth_vals = TM.f_acqu.evaluate(X, k=k)
m_ns, v_ns = TM_ns.ns_GP.predict(X)
ns_vals = m_ns - k*np.sqrt(v_ns)
k_smooth = smooth_vals.argmin()
k_ns = ns_vals.argmin()
print(TM.f_acqu.eval_robustness(TM.system_under_test(X[k_smooth])), \
TM.f_acqu.eval_robustness(TM.system_under_test(X[k_ns])))
TM.f_acqu.update_GPs(np.vstack((TM.f_acqu.children[0].GP.X, \
|
np.atleast_2d(X[k_smooth])
|
numpy.atleast_2d
|
# Configure paths
import os, sys, subprocess
sys.path.append('../scripts')
sys.path.append('../shape_recognition')
sys.path.append('../shape_recognition/libraries/general')
sys.path.append('../shape_recognition/libraries/iLimb')
sys.path.append('../shape_recognition/libraries/UR10')
sys.path.append('../shape_recognition/libraries/neuromorphic')
# Import libraries
# PyQt
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.uic import loadUiType
# Standard
import numpy as np
import tensorflow as tf
import pyqtgraph as pg
# from threading import Thread
from collections import deque
import serial
# Switch to using white background and black foreground
pg.setConfigOption('background', 'w')
pg.setConfigOption('foreground', 'k')
# Custom
from iLimb import *
from UR10 import *
from threadhandler import ThreadHandler
from serialhandler import list_serial_ports
from pcd_io import save_point_cloud
from ur10_simulation import ur10_simulator
from iLimb_trajectory import get_coords
from visualize import render3D
from detect import detect_shape
from vcnn import vCNN
from dataprocessing import MovingAverage
# Load UI file
Ui_MainWindow, QMainWindow = loadUiType('formMain_gui.ui')
# Constants
MAPPING = {'index':0, 'thumb':1}
THRESHOLD = {'index':0.01, 'thumb':0.01}
# iLimb dimensions (mm)
IDX_TO_BASE = 185 + 40
THB_TO_BASE = 105 + 30
IDX_0 = 50
IDX_1 = 30
THB = 65
""" Abstract class for storing state variables """
class STATE:
ROTATION_ANGLE = 30
ROTATION_POS = 0 # 0,1,2,3
ROTATION_DIR = -1 # -1/1
NUM_POINTS = 0
CONTACT_POINTS = []
CONTROL_POS = [0 for _ in range(5)]
FINGER_POS = {'index':[], 'thumb':[]}
XYZR = []
UNIT_VECTOR = []
STOP = False
HEIGHT = 0
ESTIMATED_HEIGHT = 200
STARTED = False
class port:
def __init__(self, widget):
self.view = widget
def write(self, *args):
self.view.append(*args)
def flush(self, *args):
pass
""" Main class for controlling the GUI """
class FormMain(QMainWindow, Ui_MainWindow):
def __init__(self, parent=None):
# Constructor
super(FormMain, self).__init__()
self.setupUi(self)
# Handlers
self.ur10 = None
self.iLimb = None
# Set available ports
self.port_ur10.setText("10.1.1.6")
self.port_iLimb.addItems(list_serial_ports())
self.port_tactile.addItems(list_serial_ports())
# For tactile values
self.ser = None
self.dataQueue = deque([], maxlen=1000)
self.receiveThread = ThreadHandler(_worker=self.receive)
self.MIN = np.array([0,0])
self.MAX = np.array([4096, 4096])
self.mvaFilt = [MovingAverage(_windowSize = 1000), MovingAverage(_windowSize=1000)]
# Connect buttons to callback functions
self.initialize.clicked.connect(self.init_handlers)
self.configure.clicked.connect(self.configure_handlers)
self.init_main.clicked.connect(self.start_main)
self.stop_main.clicked.connect(self.break_main)
self.toHome.clicked.connect(self.move_to_home)
self.toBase.clicked.connect(lambda: self.move_to_base(t=5))
self.moveUp.clicked.connect(lambda: self.move_vertical(_dir=1))
self.moveDown.clicked.connect(lambda: self.move_vertical(_dir=-1))
self.cw.clicked.connect(self.rotate_hand_CW)
self.ccw.clicked.connect(self.rotate_hand_CCW)
self.toPose.clicked.connect(self.move_iLimb_to_pose)
self.pinch.clicked.connect(lambda: self.close_hand())
self.moveAway.clicked.connect(lambda: self.move_away())
self.startSensors.clicked.connect(lambda: [ self.sensors_timer.start(0), self.receiveThread.start() ])
self.stopSensors.clicked.connect(lambda: [ self.sensors_timer.stop(), self.receiveThread.pause()])
self.calibrate.clicked.connect(self.calibrate_sensors)
self.visualize.clicked.connect(lambda: [self.save_points(), render3D('run.pcd', stage=1)])
self.convexHull.clicked.connect(lambda: [self.save_points(), render3D('run.pcd', stage=2)])
self.detectShape.clicked.connect(self.recognize_shape)
self.clear.clicked.connect(self.reset_stored_values)
# Initialize PoV graphs
views = [self.view0, self.view1, self.view2, self.view3, self.view4, self.view5]
self.povBoxes = []
self.povPlots = []
for view in views:
view.ci.layout.setContentsMargins(0,0,0,0)
view.ci.layout.setSpacing(0)
self.povBoxes.append(view.addPlot())
self.povPlots.append(pg.ScatterPlotItem())
self.povBoxes[-1].addItem(self.povPlots[-1])
# Initialize tactile sensor graphs
self.timestep = [0]
self.sensorData = [[], []]
self.sensorBoxes = []
self.sensorPlots = []
for view in [self.sensor_index, self.sensor_thumb]:
view.ci.layout.setContentsMargins(0,0,0,0)
view.ci.layout.setSpacing(0)
self.sensorBoxes.append(view.addPlot())
self.sensorPlots.append(pg.PlotCurveItem(pen=pg.mkPen('b',width=1)))
self.sensorBoxes[-1].addItem(self.sensorPlots[-1])
self.sensorBoxes[-1].setXRange(min=0, max=20, padding=0.1)
# self.sensorBoxes[-1].setYRange(min=0, max=1, padding=0.1)
self.pov_timer = QtCore.QTimer()
self.pov_timer.timeout.connect(self.update_pov)
self.sensors_timer = QtCore.QTimer()
self.sensors_timer.timeout.connect(self.update_sensor_readings)
self.main_thread = Thread(target=self._palpation_routine)
self.main_thread.daemon = True
# Redirect console output to textBrowser
sys.stdout = port(self.textBrowser)
# Create TensorFlow session and load pretrained model
self.load_session()
""" Function to initialize handler objects """
def init_handlers(self):
# Create handlers
print('Initializing handlers ...')
self.ur10 = UR10Controller(self.port_ur10.text())
print('UR10 done.')
self.iLimb = iLimbController(self.port_iLimb.currentText())
self.iLimb.connect()
print('iLimb done.')
self.ser = serial.Serial(self.port_tactile.currentText(),117964800)
self.ser.flushInput()
self.ser.flushOutput()
print('TactileBoard done')
""" Functions to set all handlers to default configuration """
def move_to_home(self):
print('Setting UR10 to default position ...')
UR10pose = URPoseManager()
UR10pose.load('shape_recog_home.urpose')
UR10pose.moveUR(self.ur10,'home_j',5)
time.sleep(5.2)
def move_iLimb_to_pose(self):
print('Setting iLimb to default pose ...')
self.iLimb.setPose('openHand')
time.sleep(3)
self.iLimb.control(['thumbRotator'], ['position'], [700])
time.sleep(3)
def calibrate_sensors(self):
self.receiveThread.start()
print('Calibrating tactile sensors ...')
# Clear data queue
self.dataQueue.clear()
# Wait till queue has sufficient readings
while len(self.dataQueue) < 500:
pass
# Calculate lower and upper bounds
samples = np.asarray(copy(self.dataQueue))
self.MIN = np.mean(samples, axis=0)
self.MAX = self.MIN + 500
self.dataQueue.clear()
# Set Y-range
for box in self.sensorBoxes:
box.setYRange(min=0, max=1, padding=0.1)
print("Done")
def configure_handlers(self):
self.move_to_home()
self.move_iLimb_to_pose()
self.calibrate_sensors()
print('Done.')
""" Function to create and load pretrained model """
def load_session(self):
self.model = vCNN()
self.session = tf.Session(graph=self.model.graph)
with self.session.as_default():
with self.session.graph.as_default():
saver = tf.train.Saver(max_to_keep=3)
saver.restore(self.session, tf.train.latest_checkpoint('../shape_recognition/save'))
""" Function to clear all collected values """
def reset_stored_values(self):
STATE.NUM_POINTS = 0
STATE.CONTACT_POINTS = []
STATE.CONTROL_POS = [0 for _ in range(5)]
STATE.FINGER_POS = {'index':[], 'thumb':[]}
STATE.XYZR = []
STATE.UNIT_VECTOR = []
""" Function to close fingers until all fingers touch surface """
def close_hand(self, fingers=['index', 'thumb']):
touched = [False] * len(fingers)
touched_once = False
fingerArray = [[x, MAPPING[x], THRESHOLD[x]] for x in fingers]
while not all(touched):
time.sleep(0.005)
q = self.get_sensor_data()
for _ in range(len(q)):
tactileSample = q.popleft()
touched = self.iLimb.doFeedbackPinchTouch(tactileSample, fingerArray, 1)
# update control_pos for fingers that have touched a surface
for i in range(len(fingerArray)):
if touched[i]:
touched_once = True
STATE.CONTROL_POS[fingerArray[i][1]] = self.iLimb.controlPos
#----------------------------------------------------------
# Collect information
STATE.FINGER_POS[fingerArray[i][0]].append(self.iLimb.controlPos)
#----------------------------------------------------------
# Self-touching condition
# Can be modified later
if self.iLimb.controlPos > 200 and not touched_once:
return False
elif self.iLimb.controlPos > 200 and touched_once:
for i in range(len(fingerArray)):
if not touched[i]:
#----------------------------------------------------------
# Collect information
STATE.FINGER_POS[fingerArray[i][0]].append(-1)
#----------------------------------------------------------
return True
if all(touched):
return True
else:
# update fingerArray
fingerArray = [fingerArray[i] for i in range(len(touched)) if not touched[i]]
""" Function to calculate coordinates of points of contact """
def compute_coordinates(self):
self.ur10.read_joints_and_xyzR()
xyzR = copy(self.ur10.xyzR)
joints = copy(self.ur10.joints)
sim = ur10_simulator()
sim.set_joints(joints)
_ = sim.joints2pose()
_, rm = sim.get_Translation_and_Rotation_Matrix()
# Calculate the direction in which the end effector is pointing
# aVlue corresponding to z-direction is ignored
direction = rm[:2,2] # x and y direction vector only
direction /= np.linalg.norm(direction)
# Calculate unit vector direction
dir_ang = np.arctan(abs(direction[1]/direction[0]))
if direction[0] < 0:
if direction[1] < 0:
dir_ang += np.pi
else:
dir_ang = np.pi - dir_ang
else:
if direction[1] < 0:
dir_ang = 2*np.pi - dir_ang
# Find point of contact for index finger
idx_control = STATE.CONTROL_POS[MAPPING['index']]
if idx_control > 0:
theta = 30 + 60/500 * idx_control
if idx_control < 210:
# Normal circular motion
rel_theta = 30
else:
rel_theta = 30 + 60/290 * (idx_control - 210)
# rel_theta = 30 + 60/500 * idx_control
axis = IDX_0 * np.cos(np.deg2rad(theta)) + IDX_1 * np.cos(np.deg2rad(theta+rel_theta))
perp = IDX_0 * np.sin(np.deg2rad(theta)) + IDX_1 * np.sin(np.deg2rad(theta+rel_theta))
axis += IDX_TO_BASE
pt_1 = [axis * np.cos(dir_ang) - perp * np.sin(dir_ang) + xyzR[0],
axis * np.sin(dir_ang) + perp * np.cos(dir_ang) + xyzR[1],
xyzR[2]]
STATE.NUM_POINTS += 1
STATE.CONTACT_POINTS.append(pt_1)
# Find point of contact for thumb
thb_control = STATE.CONTROL_POS[MAPPING['thumb']]
if thb_control > 0:
theta = 90 * (1 - thb_control/500)
axis = THB * np.cos(np.deg2rad(theta)) + THB_TO_BASE
perp = THB * np.sin(np.deg2rad(theta))
pt_2 = [axis * np.cos(dir_ang) - perp * np.sin(dir_ang) + xyzR[0],
axis * np.sin(dir_ang) + perp * np.cos(dir_ang) + xyzR[1],
xyzR[2]]
STATE.NUM_POINTS += 1
STATE.CONTACT_POINTS.append(pt_2)
#--------------------------------------------------
# Collect information
STATE.XYZR.append(xyzR)
STATE.UNIT_VECTOR.append(direction)
#--------------------------------------------------
""" Functions to rotate hand for next reading """
def rotate_hand_CCW(self):
self.ur10.read_joints()
joints = copy(self.ur10.joints)
if STATE.ROTATION_POS < 180//STATE.ROTATION_ANGLE - 1:
STATE.ROTATION_POS += 1
joints[4] += STATE.ROTATION_ANGLE * -1
xyzR = self.ur10.move_joints_with_grasp_constraints(joints, dist_pivot=220, grasp_pivot=60, constant_axis='z')
self.ur10.movej(xyzR, 3)
time.sleep(3.2)
def rotate_hand_CW(self):
self.ur10.read_joints()
joints = copy(self.ur10.joints)
if STATE.ROTATION_POS > 0:
STATE.ROTATION_POS -= 1
joints[4] += STATE.ROTATION_ANGLE * 1
xyzR = self.ur10.move_joints_with_grasp_constraints(joints, dist_pivot=220, grasp_pivot=60, constant_axis='z')
self.ur10.movej(xyzR, 3)
time.sleep(3.2)
def rotate_hand(self):
# Boundary checks
if STATE.ROTATION_POS == 0 and STATE.ROTATION_DIR == -1:
STATE.ROTATION_DIR = 1
if STATE.ROTATION_POS == 180//STATE.ROTATION_ANGLE - 1 and STATE.ROTATION_DIR == 1:
STATE.ROTATION_DIR = -1
# Rotate the hand according to direction
if STATE.ROTATION_DIR == 1:
self.rotate_hand_CCW()
else:
self.rotate_hand_CW()
""" Function to move hand in vertical direction """
def move_vertical(self, _dir=1):
# move one step up while palpating
self.ur10.read_joints_and_xyzR()
x, y, z, rx, ry, rz = copy(self.ur10.xyzR)
new_joint_pos = np.array([x, y, z+10*_dir, rx, ry, rz])
self.ur10.movej(new_joint_pos, 0.5)
time.sleep(0.7)
STATE.HEIGHT += 10*_dir
""" Function to move hand away from the object """
def move_away(self, fingers=['thumb', 'index']):
self.iLimb.control(fingers, ['position']*len(fingers), [0]*len(fingers))
time.sleep(1)
""" Function to move UR10 to base """
def move_to_base(self, t=1):
self.ur10.read_joints_and_xyzR()
x, y, z, rx, ry, rz = copy(self.ur10.xyzR)
new_joint_pos =
|
np.array([x, y, -200, rx, ry, rz])
|
numpy.array
|
# Copyright 2021-2022 The DADApy Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This module contains some essential plotting functions."""
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
from matplotlib import cm
from matplotlib.collections import LineCollection
from scipy import cluster
from sklearn import manifold
def plot_ID_line_fit_estimation(Data, decimation=0.9, fraction_used=0.9):
"""Plot the 2NN scatter plot and line fit."""
mus = Data.distances[:, 2] / Data.distances[:, 1]
idx = np.arange(mus.shape[0])
idx = np.random.choice(
idx, size=(int(np.around(Data.N * decimation))), replace=False
)
mus = mus[idx]
mus = np.sort(np.sort(mus))
Nele_eff = int(np.around(fraction_used * Data.N, decimals=0))
x = np.log(mus)
y = -np.log(1.0 - np.arange(0, mus.shape[0]) / mus.shape[0])
x_, y_ = np.atleast_2d(x[:Nele_eff]).T, y[:Nele_eff]
slope, residuals, rank, s = np.linalg.lstsq(
x_, y_, rcond=None
) # x[:Nele_eff, None]?
plt.plot(x, y, "o")
plt.plot(x[:Nele_eff], y[:Nele_eff], "o")
plt.plot(x, x * slope, "-")
print(
"slope is {:f}, average residual is {:f}".format(
slope[0], residuals[0] / Nele_eff
)
)
plt.xlabel("log(mu)")
plt.ylabel("-log(1-F(mu))")
plt.show()
# plt.savefig('ID_line_fit_plot.png')
def plot_SLAn(Data, linkage="single"):
"""Plot a basic visualisation of the density peaks."""
assert Data.cluster_assignment is not None
nd = int((Data.N_clusters * Data.N_clusters - Data.N_clusters) / 2)
Dis = np.empty(nd, dtype=float)
nl = 0
Fmax = max(Data.log_den)
Rho_bord_m = np.copy(Data.log_den_bord)
for i in range(Data.N_clusters - 1):
for j in range(i + 1, Data.N_clusters):
Dis[nl] = Fmax - Rho_bord_m[i][j]
nl = nl + 1
if linkage == "single":
DD = sp.cluster.hierarchy.single(Dis)
elif linkage == "complete":
DD = sp.cluster.hierarchy.complete(Dis)
elif linkage == "average":
DD = sp.cluster.hierarchy.average(Dis)
elif linkage == "weighted":
DD = sp.cluster.hierarchy.weighted(Dis)
else:
print("ERROR: select a valid linkage criterion")
fig, ax = plt.subplots(nrows=1, ncols=1) # create figure & 1 axis
dn = sp.cluster.hierarchy.dendrogram(DD)
# fig.savefig('dendrogramm.png') # save the figure to file
plt.show()
def plot_MDS(Data, cmap="viridis", savefig=""):
"""Plot a multi-dimensional scaling visualisation of the density peaks."""
Fmax = max(Data.log_den)
Rho_bord_m = np.copy(Data.log_den_bord)
d_dis = np.zeros((Data.N_clusters, Data.N_clusters), dtype=float)
model = manifold.MDS(n_components=2, n_jobs=None, dissimilarity="precomputed")
for i in range(Data.N_clusters):
for j in range(Data.N_clusters):
d_dis[i][j] = Fmax - Rho_bord_m[i][j]
for i in range(Data.N_clusters):
d_dis[i][i] = 0.0
out = model.fit_transform(d_dis)
fig, ax = plt.subplots(nrows=1, ncols=1)
s = []
col = []
for i in range(Data.N_clusters):
s.append(20.0 * np.sqrt(len(Data.cluster_indices[i])))
col.append(i)
plt.scatter(out[:, 0], out[:, 1], s=s, c=col, cmap=cmap)
left, right = plt.xlim()
xr = right - left
plt.xlim((left - xr * 0.05, right + xr * 0.05))
bottom, up = plt.ylim()
yr = up - bottom
plt.ylim((bottom - yr * 0.05, up + yr * 0.05))
plt.xticks([])
plt.yticks([])
cmal = cm.get_cmap(cmap, Data.N_clusters)
colors = cmal(np.arange(0, cmal.N))
for i in range(Data.N_clusters):
cc = "k"
r = colors[i][0]
g = colors[i][1]
b = colors[i][2]
luma = (0.2126 * r + 0.7152 * g + 0.0722 * b) * 255
if luma < 156:
cc = "w"
plt.annotate(
i,
(out[i, 0], out[i, 1]),
horizontalalignment="center",
verticalalignment="center",
c=cc,
weight="bold",
)
# for i in range(Data.N_clusters):
# ax.annotate(i, (out[i, 0], out[i, 1]))
# Add edges
rr = np.amax(Rho_bord_m)
if rr > 0.0:
Rho_bord_m = Rho_bord_m / rr * 100.0
start_idx, end_idx = np.where(out)
segments = [
[out[i, :], out[j, :]] for i in range(len(out)) for j in range(len(out))
]
values = np.abs(Rho_bord_m)
lc = LineCollection(segments, zorder=0, norm=plt.Normalize(0, values.max()))
lc.set_array(Rho_bord_m.flatten())
lc.set_edgecolor(np.full(len(segments), "black"))
lc.set_facecolor(np.full(len(segments), "black"))
lc.set_linewidths(0.02 * Rho_bord_m.flatten())
ax.add_collection(lc)
if savefig != "":
plt.savefig(savefig)
plt.show()
def plot_matrix(Data, savefig=""):
"""Plot a matrix of density peaks and density saddle points intensity."""
Rho_bord_m = np.copy(Data.log_den_bord)
topography = np.copy(Rho_bord_m)
for j in range(Data.N_clusters):
topography[j, j] = Data.log_den[Data.cluster_centers[j]]
fig, ax = plt.subplots(nrows=1, ncols=1)
plt.imshow(topography, cmap="gray_r", interpolation=None)
plt.xticks(np.arange(0, Data.N_clusters, step=1))
plt.yticks(np.arange(0, Data.N_clusters, step=1))
if savefig != "":
plt.savefig(savefig)
plt.show()
def plot_DecGraph(Data, savefig=""):
"""Plot the decision graph for the Density Peak clustering method."""
plt.xlabel(r"$\rho$")
plt.ylabel(r"$\delta$")
plt.scatter(Data.log_den, Data.delta)
if savefig != "":
plt.savefig(savefig)
plt.show()
def get_dendrogram(Data, cmap="viridis", savefig="", logscale=True):
"""Generate a visualisation of the topography computed with ADP.
This visualisation fundamentally corresponds to a hierarchy of the clusters build
with Single Linkage taking as similarity measure the density at the
border between clusters.
At difference from classical dendrograms, where all the branches have the same height,
in this case the height of the branches is proportional to the density of the cluster
centre.
To convey more information, the distance in the x-axis between
clusters is proportional to the population (or its logarithm). TODO: Alex, is this sentence true?
Args:
Data: A dadapy data object for which ADP has been already run.
cmap: (optional) The color map for representing the different clusters,
the default is "viridis".
savefig: (str, optional) A string with the name of the file in which the dendrogram
will be saved. The default is empty, so no file is generated.
logscale: (bool, optional) Makes the distances in the x-axis between clusters proportional
to the logarithm of the population of the clusters instead of
proportional to the population itself. In very unbalanced clusterings,
it makes the dendrogram more human readable. The default is True.
Returns:
"""
# Prepare some auxiliary lists
e1 = []
e2 = []
d12 = []
L = []
Li1 = []
Li2 = []
Ldis = []
Fmax = max(Data.log_den)
Rho_bord_m = np.copy(Data.log_den_bord)
# Obtain populations of the clusters for fine tunning the x-axis
pop = np.zeros((Data.N_clusters), dtype=int)
for i in range(Data.N_clusters):
pop[i] = len(Data.cluster_indices[i])
if logscale:
pop[i] =
|
np.log(pop[i])
|
numpy.log
|
''' Agents: stop/random/shortest/seq2seq '''
import json
import os
import sys
import numpy as np
import random
import time
import pickle
import torch
import torch.nn as nn
import torch.distributions as D
from torch.autograd import Variable
from torch import optim
import torch.nn.functional as F
import copy
from env import debug_beam
from utils import padding_idx, to_contiguous, clip_gradient
from agent_utils import basic_actions, sort_batch, teacher_action, discount_rewards, backchain_inference_states, path_element_from_observation, InferenceState, WorldState, least_common_viewpoint_path
from collections import Counter, defaultdict
import pdb
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
testing_settingA=False
# region Simple Agents
class BaseAgent(object):
''' Base class for an R2R agent to generate and save trajectories. '''
def __init__(self, env, results_path, seed=1):
self.env = env
self.results_path = results_path
if seed != 'resume': random.seed(seed)
self.results = {}
self.losses = [] # For learning agents
self.testing_settingA = False
def write_results(self, dump_file=False):
if '_' in list(self.results.keys())[0]:
#if testing_settingA:
if self.testing_settingA:
# choose one from three according to prob
for id in self.results:
bestp, best = self.results[id][1], self.results[id]
for ii in range(4):
temp_id = "%s_%d" % (id[:-2], ii)
if temp_id in self.results and self.results[temp_id][1] > bestp:
bestp = self.results[temp_id][1]
best = self.results[temp_id]
self.results[id] = best
output = [{'instr_id': k, 'trajectory': v[0]} for k, v in self.results.items()]
else:
output = [{'instr_id': k, 'trajectory': v} for k, v in self.results.items()]
else:
output = [{'instr_id':'%s_%d' % (k, i), 'trajectory': v} for k,v in self.results.items() for i in range(self.env.traj_n_sents[k])]
if dump_file:
with open(self.results_path, 'w') as f:
json.dump(output, f)
return output
def rollout(self, beam_size=1):
''' Return a list of dicts containing instr_id:'xx', path:[(viewpointId, heading_rad, elevation_rad)] '''
raise NotImplementedError
@staticmethod
def get_agent(name):
return globals()[name+"Agent"]
def test(self, beam_size=1, successors=1, speaker=(None,None,None,None)):
self.env.reset_epoch()
self.losses = []
self.results = {}
# We rely on env showing the entire batch before repeating anything
#print 'Testing %s' % self.__class__.__name__
speaker, speaker_weights, speaker_merge, evaluator = speaker
looped = False
batch_i, index_count = 0, [Counter() for _ in range(len(speaker_weights))] if speaker else []# for beam search
while True:
trajs = self.rollout(beam_size, successors)
if beam_size > 1 or debug_beam:
trajs, completed, traversed_list = trajs
for ti, traj in enumerate(trajs):
if (beam_size == 1 and debug_beam) or (beam_size>1 and speaker is None):
traj = traj[0]
elif beam_size>1 and (speaker is not None):#use speaker
traj = speaker_rank(speaker, speaker_weights, speaker_merge, traj, completed[ti], traversed_list[ti] if traversed_list else None, index_count)
else:
assert (beam_size == 1 and not debug_beam)
if traj['instr_id'] in self.results:
looped = True
else:
#if testing_settingA:
if self.testing_settingA:
self.results[traj['instr_id']] = (traj['path'], traj['prob']) # choose one from three according to prob
else:
self.results[traj['instr_id']] = traj['path']
if looped:
break
if beam_size>1: print('batch',batch_i)
batch_i+=1
# if use speaker, find best weight
if beam_size>1 and (speaker is not None): # speaker's multiple choices
best_sr, best_speaker_weight_i = -1, -1
for spi, speaker_weight in enumerate(speaker_weights):
if '_' in list(self.results.keys())[0]:
output = [{'instr_id': k, 'trajectory': v[spi]} for k, v in self.results.items()]
else:
output = [{'instr_id': '%s_%d' % (k, i), 'trajectory': v[spi]} for k, v in self.results.items() for i in
range(self.env.traj_n_sents[k])]
score_summary, _ = evaluator.score_output(output)
data_log = defaultdict(list)
for metric, val in score_summary.items():
data_log['%s %s' % (''.join(evaluator.splits), metric)].append(val)
print(index_count[spi])
print(speaker_weights[spi], '\n'.join([str((k, round(v[0], 4))) for k, v in sorted(data_log.items())]))
sr = score_summary['success_rate']
if sr>best_sr:
best_sr, best_speaker_weight_i = sr, spi
print('best sr:',best_sr,' speaker weight:',speaker_weights[best_speaker_weight_i])
print('best sr counter', index_count[best_speaker_weight_i])
self.results = {k: v[best_speaker_weight_i] for k, v in self.results.items()}
def speaker_rank(speaker, speaker_weights, speaker_merge, beam_candidates, this_completed, traversed_lists, index_count): # todo: this_completed is not sorted!! so not corresponding to beam_candidates
cand_obs, cand_actions, multi = [], [], isinstance(beam_candidates[0]['instr_encoding'], list)
cand_instr = [[] for _ in beam_candidates[0]['instr_encoding']] if multi else [] # else should be np.narray
for candidate in beam_candidates:
cand_obs.append(candidate['observations'])
cand_actions.append(candidate['actions'])
if multi:
for si, encoding in enumerate(candidate['instr_encoding']):
cand_instr[si].append(np.trim_zeros(encoding)[:-1])
else:
cand_instr.append(np.trim_zeros(candidate['instr_encoding'])[:-1])
if multi:
speaker_scored_candidates = [[] for _ in (beam_candidates)]
for si, sub_cand_instr in enumerate(cand_instr):
speaker_scored_candidates_si, _ = \
speaker._score_obs_actions_and_instructions(
cand_obs, cand_actions, sub_cand_instr, feedback='teacher')
for sc_i, sc in enumerate(speaker_scored_candidates_si):
speaker_scored_candidates[sc_i].append(sc)
else:
speaker_scored_candidates, _ = \
speaker._score_obs_actions_and_instructions(
cand_obs, cand_actions, cand_instr, feedback='teacher')
assert len(speaker_scored_candidates) == len(beam_candidates)
follower_scores = []
speaker_scores = []
score_merge = {'mean':np.mean,'max':np.max,'min':np.min}[speaker_merge]
for i, candidate in enumerate(beam_candidates): # different to speaker follower, our beam_candidates is not nested, we already got a subset from the outside of this function, so we do not need flatten it before enumerate
speaker_scored_candidate = speaker_scored_candidates[i]
if multi:
assert candidate['instr_id'] == speaker_scored_candidate[0]['instr_id']
candidate['speaker_score'] = score_merge([s['score'] for s in speaker_scored_candidate])
else:
assert candidate['instr_id'] == speaker_scored_candidate['instr_id']
candidate['speaker_score'] = speaker_scored_candidate['score']
candidate['follower_score'] = candidate['score']
del candidate['observations']
if traversed_lists:# physical_traversal:
last_traversed = traversed_lists[-1]
candidate_inf_state = \
this_completed[i]
path_from_last_to_next = least_common_viewpoint_path(
last_traversed, candidate_inf_state)
assert path_from_last_to_next[0].world_state.viewpointId \
== last_traversed.world_state.viewpointId
assert path_from_last_to_next[-1].world_state.viewpointId \
== candidate_inf_state.world_state.viewpointId
inf_traj = (traversed_lists +
path_from_last_to_next[1:])
physical_trajectory = [
path_element_from_observation(inf_state.observation)
for inf_state in inf_traj]
# make sure the viewpointIds match
assert (physical_trajectory[-1][0] ==
candidate['path'][-1][0])
candidate['path'] = physical_trajectory
follower_scores.append(candidate['follower_score'])
speaker_scores.append(candidate['speaker_score'])
speaker_std = np.std(speaker_scores)
follower_std = np.std(follower_scores)
instr_id = beam_candidates[0]['instr_id']
result_path = []
for spi, speaker_weight in enumerate(speaker_weights):
speaker_scaled_weight = float(speaker_weight) / speaker_std
follower_scaled_weight = (1 - float(speaker_weight)) / follower_std
best_ix, best_cand = max(
enumerate(beam_candidates),
key=lambda tp: (
tp[1]['speaker_score'] * speaker_scaled_weight +
tp[1]['follower_score'] * follower_scaled_weight))
result_path.append(best_cand['path'])
index_count[spi][best_ix] += 1
return {'instr_id': instr_id, 'path': result_path}
class StopAgent(BaseAgent):
''' An agent that doesn't move! '''
def rollout(self, beam_size=1):
world_states = self.env.reset()
obs = np.array(self.env._get_obs(world_states))
traj = [{
'instr_id': ob['instr_id'],
'path': [(ob['viewpoint'], ob['heading'], ob['elevation'])]
} for ob in obs]
return traj
class RandomAgent(BaseAgent):
''' An agent that picks a random direction then tries to go straight for
five viewpoint steps and then stops. '''
def __init__(self, env, results_path):
super(RandomAgent, self).__init__(env, results_path)
random.seed(1)
def rollout(self, beam_size=1):
world_states = self.env.reset()
obs = self.env._get_obs(world_states)
traj = [{
'instr_id': ob['instr_id'],
'path': [(ob['viewpoint'], ob['heading'], ob['elevation'])]
} for ob in obs]
self.steps = random.sample(list(range(-11,1)), len(obs))
ended = [False] * len(obs)
for t in range(30):
actions = []
for i,ob in enumerate(obs):
if self.steps[i] >= 5:
actions.append((0, 0, 0)) # do nothing, i.e. end
ended[i] = True
elif self.steps[i] < 0:
actions.append((0, 1, 0)) # turn right (direction choosing)
self.steps[i] += 1
elif len(ob['navigableLocations']) > 1:
actions.append((1, 0, 0)) # go forward
self.steps[i] += 1
else:
actions.append((0, 1, 0)) # turn right until we can go forward
obs = self.env._get_obs(self.env.step(actions))
for i,ob in enumerate(obs):
if not ended[i]:
traj[i]['path'].append((ob['viewpoint'], ob['heading'], ob['elevation']))
return traj
class ShortestAgent(BaseAgent):
''' An agent that always takes the shortest path to goal. '''
def rollout(self, beam_size=1):
world_states = self.env.reset()
obs = np.array(self.env._get_obs(world_states))
traj = [{
'instr_id': ob['instr_id'],
'path': [(ob['viewpoint'], ob['heading'], ob['elevation'])]
} for ob in obs]
ended = np.array([False] * len(obs))
while True:
actions = [ob['teacher'] for ob in obs]
obs = self.env._get_obs(self.env.step(actions))
for i,a in enumerate(actions):
if a == (0, 0, 0):
ended[i] = True
for i,ob in enumerate(obs):
if not ended[i]:
traj[i]['path'].append((ob['viewpoint'], ob['heading'], ob['elevation']))
if ended.all():
break
return traj
class ShortestCollectAgent(BaseAgent):
''' An agent that always takes the shortest path to goal. '''
def __init__(self, env, results_path, max_episode_len, name=""):
super(ShortestCollectAgent, self).__init__(env, results_path)
self.episode_len = max_episode_len
self.name = name
def collect(self):
idx = 0
total_traj = len(self.env.data)
data = list()
while len(data) < total_traj:
traj = self.rollout()
data.extend(traj)
print("you collected %d shortest paths" % (len(data)))
file_name = "/shortest_{}.json".format(self.name)
with open(self.results_path + file_name, 'w+') as f:
json.dump(data, f)
def rollout(self, beam_size=1):
world_states = self.env.reset(False)
obs = np.array(self.env._get_obs(world_states))
traj = [{
'instr_id': ob['instr_id'],
'path': [(ob['scan'], ob['viewpoint'], ob['viewIndex'],ob['heading'], ob['elevation'])],
'teacher_actions':[],
'teacher_action_emd':[],
'instr_encoding':ob['instr_encoding'].tolist()
} for ob in obs]
ended = np.array([False] * len(obs))
#while True:
for t in range(self.episode_len):
actions = [ob['teacher'] for ob in obs]
for i,a in enumerate(actions):
if not ended[i]:
traj[i]['teacher_actions'].append(a)
if a == 0:
traj[i]['teacher_action_emd'].append((-1,90,90))
else:
traj[i]['teacher_action_emd'].append((obs[i]['adj_loc_list'][a]['absViewIndex'], obs[i]['adj_loc_list'][a]['rel_heading'],obs[i]['adj_loc_list'][a]['rel_elevation']))
obs = self.env._get_obs(self.env.step(actions, obs))
for i,a in enumerate(actions):
if a == (0, 0, 0) or a == 0:
ended[i] = True
for i,ob in enumerate(obs):
if not ended[i]:
traj[i]['path'].append((ob['scan'], ob['viewpoint'], ob['viewIndex'],ob['heading'], ob['elevation']))
if ended.all():
break
return traj
# endregion
class Seq2SeqAgent(BaseAgent):
''' An agent based on an LSTM seq2seq model with attention. '''
model_actions, env_actions = basic_actions()
feedback_options = ['teacher', 'argmax', 'sample']
def __init__(self, env, results_path, encoder, decoder, seed, aux_ratio, decoder_init,
params=None, monotonic=False, episode_len=20, state_factored=False, accu_n_iters = 0): # , subgoal
super(Seq2SeqAgent, self).__init__(env, results_path, seed=seed)
self.encoder, self.decoder = encoder, decoder # encoder2 is only for self_critic
self.encoder2, self.decoder2 = None, None
self.monotonic = monotonic
if self.monotonic:
self.copy_seq2seq()
self.episode_len = episode_len
self.losses = []
self.losses_ctrl_f = [] # For learning auxiliary tasks
self.aux_ratio = aux_ratio
self.decoder_init = decoder_init
self.clip_gradient = params['clip_gradient']
self.clip_gradient_norm = params['clip_gradient_norm']
self.reward_func = params['reward_func']
self.schedule_ratio = params['schedule_ratio']
self.temp_alpha = params['temp_alpha']
self.testing_settingA = params['test_A']
if self.decoder.action_space == 6:
self.ignore_index = self.model_actions.index('<ignore>')
else:
self.ignore_index = -1
self.criterion = nn.CrossEntropyLoss(ignore_index=self.ignore_index)
if self.decoder.ctrl_feature:
assert self.decoder.action_space == -1 # currently only implement this
self.criterion_ctrl_f = nn.MSELoss() # todo: MSE or ?
self.state_factored = state_factored
self.accu_n_iters = accu_n_iters
@staticmethod
def n_inputs():
return len(Seq2SeqAgent.model_actions)
@staticmethod
def n_outputs():
return len(Seq2SeqAgent.model_actions)-2 # Model doesn't output start or ignore
def _sort_batch(self, obs):
sorted_tensor, mask, seq_lengths, perm_idx = sort_batch(obs)
if isinstance(sorted_tensor, list):
sorted_tensors, masks, seqs_lengths = [], [], []
for i in range(len(sorted_tensor)):
sorted_tensors.append(Variable(sorted_tensor[i], requires_grad=False).long().to(device))
masks.append(mask[i].byte().to(device))
seqs_lengths.append(seq_lengths[i])
return sorted_tensors, masks, seqs_lengths, perm_idx
return Variable(sorted_tensor, requires_grad=False).long().to(device), \
mask.byte().to(device), \
list(seq_lengths), list(perm_idx)
def _feature_variable(self, obs):
''' Extract precomputed features into variable. '''
#feature_size = obs[0]['feature'].shape[0]
#features = np.empty((len(obs),feature_size), dtype=np.float32)
if isinstance(obs[0]['feature'],tuple): # todo?
features_pano = np.repeat(np.expand_dims(np.zeros_like(obs[0]['feature'][0], dtype=np.float32), 0), len(obs), axis=0) # jolin
features = np.repeat(np.expand_dims(np.zeros_like(obs[0]['feature'][1], dtype=np.float32), 0), len(obs), axis=0) # jolin
for i,ob in enumerate(obs):
features_pano[i] = ob['feature'][0]
features[i] = ob['feature'][1]
return (Variable(torch.from_numpy(features_pano), requires_grad=False).to(device),
Variable(torch.from_numpy(features), requires_grad=False).to(device))
else:
features = np.repeat(np.expand_dims(np.zeros_like(obs[0]['feature'], dtype=np.float32),0),len(obs),axis=0) # jolin
for i,ob in enumerate(obs):
features[i] = ob['feature']
return Variable(torch.from_numpy(features), requires_grad=False).to(device)
def get_next(self, feedback, target, logit):
if feedback == 'teacher':
a_t = target # teacher forcing
elif feedback == 'argmax':
_, a_t = logit.max(1) # student forcing - argmax
a_t = a_t.detach()
elif feedback == 'sample':
probs = F.softmax(logit, dim=1)
m = D.Categorical(probs)
a_t = m.sample() # sampling an action from model
else:
sys.exit('Invalid feedback option')
return a_t
def _action_variable(self, obs):
# get the maximum number of actions of all sample in this batch
max_num_a = -1
for i, ob in enumerate(obs):
max_num_a = max(max_num_a, len(ob['adj_loc_list']))
is_valid = np.zeros((len(obs), max_num_a), np.float32)
action_embedding_dim = obs[0]['action_embedding'].shape[-1]
action_embeddings = np.zeros((len(obs), max_num_a, action_embedding_dim), dtype=np.float32)
for i, ob in enumerate(obs):
adj_loc_list = ob['adj_loc_list']
num_a = len(adj_loc_list)
is_valid[i, 0:num_a] = 1.
action_embeddings[i, :num_a, :] = ob['action_embedding'] #bug: todo
#for n_a, adj_dict in enumerate(adj_loc_list):
# action_embeddings[i, :num_a, :] = ob['action_embedding']
return (Variable(torch.from_numpy(action_embeddings), requires_grad=False).to(device),
Variable(torch.from_numpy(is_valid), requires_grad=False).to(device),
is_valid)
def _teacher_action_variable(self, obs):
# get the maximum number of actions of all sample in this batch
action_embedding_dim = obs[0]['action_embedding'].shape[-1]
action_embeddings = np.zeros((len(obs), action_embedding_dim), dtype=np.float32)
for i, ob in enumerate(obs):
adj_loc_list = ob['adj_loc_list']
action_embeddings[i, :] = ob['action_embedding'][ob['teacher']] #bug: todo
#for n_a, adj_dict in enumerate(adj_loc_list):
# action_embeddings[i, :num_a, :] = ob['action_embedding']
return Variable(torch.from_numpy(action_embeddings), requires_grad=False).to(device)
def _teacher_action(self, obs, ended):
a = teacher_action(self.model_actions, self.decoder.action_space, obs, ended, self.ignore_index)
return Variable(a, requires_grad=False).to(device)
def _teacher_feature(self, obs, ended):#, max_num_a):
''' Extract teacher look ahead auxiliary features into variable. '''
# todo: 6 action space
ctrl_features_dim = -1
for i, ob in enumerate(obs): # todo: whether include <stop> ?
# max_num_a = max(max_num_a, len(ob['ctrl_features']))
if ctrl_features_dim<0 and len(ob['ctrl_features']):
ctrl_features_dim = ob['ctrl_features'].shape[-1] #[0].shape[-1]
break
#is_valid no need to create. already created
ctrl_features_tensor = np.zeros((len(obs), ctrl_features_dim), dtype=np.float32)
for i, ob in enumerate(obs):
if not ended[i]:
ctrl_features_tensor[i, :] = ob['ctrl_features']
return Variable(torch.from_numpy(ctrl_features_tensor), requires_grad=False).to(device)
def rollout(self, beam_size=1, successors=1):
if beam_size ==1 and not debug_beam:
if self.encoder.__class__.__name__ in ['BertImgEncoder','MultiVilBertEncoder','BertAddEncoder','MultiVilAddEncoder','MultiAddLoadEncoder', 'HugAddEncoder','MultiHugAddEncoder']:
return self.bert_rollout_with_loss()
elif self.encoder.__class__.__name__ in ['BertLangEncoder']:
return self.langbert_rollout_with_loss()
else:
return self.rollout_with_loss()
# beam
with torch.no_grad():
if self.state_factored:
beams = self.state_factored_search(beam_size, successors, first_n_ws_key=4)
else:
beams = self.beam_search(beam_size)
return beams
def state_factored_search(self, completion_size, successor_size, first_n_ws_key=4):
assert self.decoder.panoramic
world_states = self.env.reset(sort=True)
initial_obs = (self.env._get_obs(world_states))
batch_size = len(world_states)
# Reorder the language input for the encoder
seq, seq_mask, seq_lengths, perm_idx = self._sort_batch([o for ob in initial_obs for o in ob])
world_states = [[world_state for f, world_state in states] for states in world_states]
ctx, h_t, c_t, seq_mask = self.encoder(seq, seq_mask, seq_lengths)
if not self.decoder_init:
h_t, c_t = torch.zeros_like(h_t), torch.zeros_like(c_t)
completed = []
completed_holding = []
for _ in range(batch_size):
completed.append({})
completed_holding.append({})
state_cache = [
{ws[0][0:first_n_ws_key]: (InferenceState(prev_inference_state=None,
world_state=ws[0],
observation=o[0],
flat_index=None,
last_action=-1,
last_action_embedding=self.decoder.u_begin,
action_count=0,
score=0.0, h_t=h_t[i], c_t=c_t[i], last_alpha=None), True)}
for i, (ws, o) in enumerate(zip(world_states, initial_obs))
]
beams = [[inf_state for world_state, (inf_state, expanded) in sorted(instance_cache.items())]
for instance_cache in state_cache] # sorting is a noop here since each instance_cache should only contain one
last_expanded_list = []
traversed_lists = []
for beam in beams:
assert len(beam)==1
first_state = beam[0]
last_expanded_list.append(first_state)
traversed_lists.append([first_state])
def update_traversed_lists(new_visited_inf_states):
assert len(new_visited_inf_states) == len(last_expanded_list)
assert len(new_visited_inf_states) == len(traversed_lists)
for instance_index, instance_states in enumerate(new_visited_inf_states):
last_expanded = last_expanded_list[instance_index]
# todo: if this passes, shouldn't need traversed_lists
assert last_expanded.world_state.viewpointId == traversed_lists[instance_index][-1].world_state.viewpointId
for inf_state in instance_states:
path_from_last_to_next = least_common_viewpoint_path(last_expanded, inf_state)
# path_from_last should include last_expanded's world state as the first element, so check and drop that
assert path_from_last_to_next[0].world_state.viewpointId == last_expanded.world_state.viewpointId
assert path_from_last_to_next[-1].world_state.viewpointId == inf_state.world_state.viewpointId
traversed_lists[instance_index].extend(path_from_last_to_next[1:])
last_expanded = inf_state
last_expanded_list[instance_index] = last_expanded
# Do a sequence rollout and calculate the loss
while any(len(comp) < completion_size for comp in completed):
beam_indices = []
u_t_list = []
h_t_list = []
c_t_list = []
flat_obs = []
for beam_index, beam in enumerate(beams):
for inf_state in beam:
beam_indices.append(beam_index)
u_t_list.append(inf_state.last_action_embedding)
h_t_list.append(inf_state.h_t.unsqueeze(0))
c_t_list.append(inf_state.c_t.unsqueeze(0))
flat_obs.append(inf_state.observation)
u_t_prev = torch.stack(u_t_list, dim=0)
assert len(u_t_prev.shape) == 2
# Image features from obs
# if self.decoder.panoramic:
f_t_all, f_t = self._feature_variable(flat_obs)
# Action feature from obs
# if self.decoder.action_space == 6:
# u_t_features, is_valid = np.zeros((batch_size, 1)), None
# else:
u_t_features, is_valid, is_valid_numpy = self._action_variable(flat_obs)
h_t = torch.cat(h_t_list, dim=0)
c_t = torch.cat(c_t_list, dim=0)
h_t, c_t, alpha, logit, pred_f = self.decoder(None, u_t_prev, u_t_features, f_t,
f_t_all, h_t, c_t, [ctx_si[beam_indices] for ctx_si in ctx] if isinstance(ctx, list) else ctx[beam_indices],
[seq_mask_si[beam_indices] for seq_mask_si in seq_mask] if isinstance(ctx, list) else seq_mask[beam_indices])
# Mask outputs of invalid actions
logit[is_valid == 0] = -float('inf')
# # Mask outputs where agent can't move forward
# no_forward_mask = [len(ob['navigableLocations']) <= 1 for ob in flat_obs]
masked_logit = logit
log_probs = F.log_softmax(logit, dim=1).data
# force ending if we've reached the max time steps
# if t == self.episode_len - 1:
# action_scores = log_probs[:,self.end_index].unsqueeze(-1)
# action_indices = torch.from_numpy(np.full((log_probs.size()[0], 1), self.end_index))
# else:
#_, action_indices = masked_logit.data.topk(min(successor_size, logit.size()[1]), dim=1)
_, action_indices = masked_logit.data.topk(logit.size()[1], dim=1) # todo: fix this
action_scores = log_probs.gather(1, action_indices)
assert action_scores.size() == action_indices.size()
start_index = 0
assert len(beams) == len(world_states)
all_successors = []
for beam_index, (beam, beam_world_states) in enumerate(zip(beams, world_states)):
successors = []
end_index = start_index + len(beam)
assert len(beam_world_states) == len(beam)
if beam:
for inf_index, (inf_state, world_state, action_score_row) in \
enumerate(zip(beam, beam_world_states, log_probs[start_index:end_index])):
flat_index = start_index + inf_index
for action_index, action_score in enumerate(action_score_row):
if is_valid_numpy[flat_index, action_index] == 0:
continue
successors.append(
InferenceState(prev_inference_state=inf_state, world_state=world_state,
observation=flat_obs[flat_index],
flat_index=None,
last_action=action_index,
last_action_embedding=u_t_features[flat_index, action_index].detach(),
action_count=inf_state.action_count + 1,
score=float(inf_state.score + action_score),
h_t=h_t[flat_index], c_t=c_t[flat_index],
last_alpha=[alpha_si[flat_index].data for alpha_si in alpha] if isinstance(alpha, list) else alpha[flat_index].data)
)
start_index = end_index
successors = sorted(successors, key=lambda t: t.score, reverse=True)
all_successors.append(successors)
successor_world_states = [
[inf_state.world_state for inf_state in successors]
for successors in all_successors
]
successor_env_actions = [
[inf_state.last_action for inf_state in successors]
for successors in all_successors
]
successor_last_obs = [
[inf_state.observation for inf_state in successors]
for successors in all_successors
]
successor_world_states = self.env.step(successor_env_actions, successor_last_obs, successor_world_states)
successor_world_states = [[world_state for f, world_state in states] for states in successor_world_states]
acc = []
for ttt in zip(all_successors, successor_world_states):
mapped = [inf._replace(world_state=ws) for inf, ws in zip(*ttt)]
acc.append(mapped)
all_successors = acc
assert len(all_successors) == len(state_cache)
new_beams = []
for beam_index, (successors, instance_cache) in enumerate(zip(all_successors, state_cache)):
# early stop if we've already built a sizable completion list
instance_completed = completed[beam_index]
instance_completed_holding = completed_holding[beam_index]
if len(instance_completed) >= completion_size:
new_beams.append([])
continue
for successor in successors:
ws_keys = successor.world_state[0:first_n_ws_key]
if successor.last_action == 0 or successor.action_count == self.episode_len:
if ws_keys not in instance_completed_holding or instance_completed_holding[ws_keys][
0].score < successor.score:
instance_completed_holding[ws_keys] = (successor, False)
else:
if ws_keys not in instance_cache or instance_cache[ws_keys][0].score < successor.score:
instance_cache[ws_keys] = (successor, False)
# third value: did this come from completed_holding?
uncompleted_to_consider = ((ws_keys, inf_state, False) for (ws_keys, (inf_state, expanded)) in
instance_cache.items() if not expanded)
completed_to_consider = ((ws_keys, inf_state, True) for (ws_keys, (inf_state, expanded)) in
instance_completed_holding.items() if not expanded)
import itertools
import heapq
to_consider = itertools.chain(uncompleted_to_consider, completed_to_consider)
ws_keys_and_inf_states = heapq.nlargest(successor_size, to_consider, key=lambda pair: pair[1].score)
new_beam = []
for ws_keys, inf_state, is_completed in ws_keys_and_inf_states:
if is_completed:
assert instance_completed_holding[ws_keys] == (inf_state, False)
instance_completed_holding[ws_keys] = (inf_state, True)
if ws_keys not in instance_completed or instance_completed[ws_keys].score < inf_state.score:
instance_completed[ws_keys] = inf_state
else:
instance_cache[ws_keys] = (inf_state, True)
new_beam.append(inf_state)
if len(instance_completed) >= completion_size:
new_beams.append([])
else:
new_beams.append(new_beam)
beams = new_beams
# Early exit if all ended
if not any(beam for beam in beams):
break
world_states = [
[inf_state.world_state for inf_state in beam]
for beam in beams
]
successor_obs = np.array(self.env._get_obs(self.env.world_states2feature_states(world_states)))
acc = []
for tttt in zip(beams, successor_obs):
mapped = [inf._replace(observation=o) for inf, o in zip(*tttt)]
acc.append(mapped)
beams = acc
update_traversed_lists(beams)
completed_list = []
for this_completed in completed:
completed_list.append(sorted(this_completed.values(), key=lambda t: t.score, reverse=True)[:completion_size])
completed_ws = [
[inf_state.world_state for inf_state in comp_l]
for comp_l in completed_list
]
completed_obs = np.array(self.env._get_obs(self.env.world_states2feature_states(completed_ws)))
accu = []
for ttttt in zip(completed_list, completed_obs):
mapped = [inf._replace(observation=o) for inf, o in zip(*ttttt)]
accu.append(mapped)
completed_list = accu
update_traversed_lists(completed_list)
trajs = []
for this_completed in completed_list:
assert this_completed
this_trajs = []
for inf_state in this_completed:
path_states, path_observations, path_actions, path_scores, path_attentions = backchain_inference_states(inf_state)
# this will have messed-up headings for (at least some) starting locations because of
# discretization, so read from the observations instead
## path = [(obs.viewpointId, state.heading, state.elevation)
## for state in path_states]
trajectory = [path_element_from_observation(ob) for ob in path_observations]
this_trajs.append({
'instr_id': path_observations[0]['instr_id'],
'instr_encoding': path_observations[0]['instr_encoding'],
'path': trajectory,
'observations': path_observations,
'actions': path_actions,
'score': inf_state.score,
'scores': path_scores,
'attentions': path_attentions
})
trajs.append(this_trajs)
return trajs, completed_list, traversed_lists
def beam_search(self, beam_size):
assert self.decoder.panoramic
# assert self.env.beam_size >= beam_size
world_states = self.env.reset(True) # [(feature, state)]
obs = np.array(self.env._get_obs(world_states))
batch_size = len(world_states)
# Reorder the language input for the encoder
seq, seq_mask, seq_lengths, perm_idx = self._sort_batch([o for ob in obs for o in ob])
world_states = [[world_state for f, world_state in states] for states in world_states]
ctx, h_t, c_t, seq_mask = self.encoder(seq, seq_mask, seq_lengths)
if not self.decoder_init:
h_t, c_t = torch.zeros_like(h_t), torch.zeros_like(c_t)
completed = []
for _ in range(batch_size):
completed.append([])
beams = [[InferenceState(prev_inference_state=None,
world_state=ws[0],
observation=o[0],
flat_index=i,
last_action=-1,
last_action_embedding=self.decoder.u_begin,
action_count=0,
score=0.0,
h_t=None, c_t=None,
last_alpha=None)]
for i, (ws, o) in enumerate(zip(world_states, obs))]
#
# batch_size x beam_size
for t in range(self.episode_len):
flat_indices = []
beam_indices = []
u_t_list = []
for beam_index, beam in enumerate(beams):
for inf_state in beam:
beam_indices.append(beam_index)
flat_indices.append(inf_state.flat_index)
u_t_list.append(inf_state.last_action_embedding)
u_t_prev = torch.stack(u_t_list, dim=0)
flat_obs = [ob for obs_beam in obs for ob in obs_beam]
# Image features from obs
# if self.decoder.panoramic:
f_t_all, f_t = self._feature_variable(flat_obs)
# Action feature from obs
# if self.decoder.action_space == 6:
# u_t_features, is_valid = np.zeros((batch_size, 1)), None
# else:
u_t_features, is_valid, is_valid_numpy = self._action_variable(flat_obs)
h_t, c_t, alpha, logit, pred_f = self.decoder(None, u_t_prev, u_t_features,
f_t, f_t_all, h_t[flat_indices], c_t[flat_indices],
[ctx_si[beam_indices] for ctx_si in ctx] if isinstance(ctx, list) else ctx[beam_indices],
[seq_mask_si[beam_indices] for seq_mask_si in seq_mask] if isinstance(ctx, list) else seq_mask[beam_indices])
# Mask outputs where agent can't move forward
logit[is_valid == 0] = -float('inf')
masked_logit = logit # for debug
log_probs = F.log_softmax(logit, dim=1).data
_, action_indices = masked_logit.data.topk(min(beam_size, logit.size()[1]), dim=1)
action_scores = log_probs.gather(1, action_indices)
assert action_scores.size() == action_indices.size()
start_index = 0
new_beams = []
assert len(beams) == len(world_states)
all_successors = []
for beam_index, (beam, beam_world_states, beam_obs) in enumerate(zip(beams, world_states, obs)):
successors = []
end_index = start_index + len(beam)
assert len(beam_obs) == len(beam) and len(beam_world_states) == len(beam)
if beam:
for inf_index, (inf_state, world_state, ob, action_score_row, action_index_row) in \
enumerate(zip(beam, beam_world_states, beam_obs, action_scores[start_index:end_index],
action_indices[start_index:end_index])):
flat_index = start_index + inf_index
for action_score, action_index in zip(action_score_row, action_index_row):
if is_valid_numpy[flat_index, action_index] == 0:
continue
successors.append(
InferenceState(prev_inference_state=inf_state,
world_state=world_state,
# will be updated later after successors are pruned
observation=ob, # will be updated later after successors are pruned
flat_index=flat_index,
last_action=action_index,
last_action_embedding=u_t_features[flat_index, action_index].detach(),
action_count=inf_state.action_count + 1,
score=float(inf_state.score + action_score), h_t=None, c_t=None,
last_alpha=[alpha_si[flat_index].data for alpha_si in alpha] if isinstance(alpha, list) else alpha[flat_index].data)
)
start_index = end_index
successors = sorted(successors, key=lambda t: t.score, reverse=True)[:beam_size]
all_successors.append(successors)
successor_world_states = [
[inf_state.world_state for inf_state in successors]
for successors in all_successors
]
successor_env_actions = [
[inf_state.last_action for inf_state in successors]
for successors in all_successors
]
successor_last_obs = [
[inf_state.observation for inf_state in successors]
for successors in all_successors
]
successor_world_states=self.env.step(successor_env_actions, successor_last_obs, successor_world_states)
successor_obs = np.array(self.env._get_obs(successor_world_states))
successor_world_states = [[world_state for f, world_state in states] for states in successor_world_states]
acc = []
for ttt in zip(all_successors, successor_world_states, successor_obs):
mapped = [inf._replace(world_state=ws, observation=o) for inf, ws, o in zip(*ttt)]
acc.append(mapped)
all_successors=acc
for beam_index, successors in enumerate(all_successors):
new_beam = []
for successor in successors:
if successor.last_action == 0 or t == self.episode_len - 1:
completed[beam_index].append(successor)
else:
new_beam.append(successor)
if len(completed[beam_index]) >= beam_size:
new_beam = []
new_beams.append(new_beam)
beams = new_beams
world_states = [
[inf_state.world_state for inf_state in beam]
for beam in beams
]
obs = [
[inf_state.observation for inf_state in beam]
for beam in beams
]
# Early exit if all ended
if not any(beam for beam in beams):
break
trajs = []
for this_completed in completed:
assert this_completed
this_trajs = []
for inf_state in sorted(this_completed, key=lambda t: t.score, reverse=True)[:beam_size]:
path_states, path_observations, path_actions, path_scores, path_attentions = backchain_inference_states(inf_state)
# this will have messed-up headings for (at least some) starting locations because of
# discretization, so read from the observations instead
## path = [(obs.viewpointId, state.heading, state.elevation)
## for state in path_states]
trajectory = [path_element_from_observation(ob) for ob in path_observations]
this_trajs.append({
'instr_id': path_observations[0]['instr_id'],
'instr_encoding': path_observations[0]['instr_encoding'],
'path': trajectory,
'observations': path_observations,
'actions': path_actions,
'score': inf_state.score,
'scores': path_scores,
'attentions': path_attentions
})
trajs.append(this_trajs)
traversed_lists = None # todo
return trajs, completed, traversed_lists
def rollout_with_loss(self):
world_states = self.env.reset(False)
obs = np.array(self.env._get_obs(world_states))
batch_size = len(obs)
# Reorder the language input for the encoder
seq, seq_mask, seq_lengths, perm_idx = self._sort_batch(obs)
perm_obs = obs[perm_idx]
# when there are multiple sentences, perm_idx will simply be range(batch_size).
# but perm_idx for each batch of i-th(i=1 or 2 or 3) will be inside seq_length.
# this means:
# seq_lengths=[(seq_lengths,perm_idx),(seq_lengths,perm_idx),(seq_lengths,perm_idx)]
# Record starting point
traj = [{'instr_id': ob['instr_id'], 'path': [(ob['viewpoint'], ob['heading'], ob['elevation'])]} for ob in perm_obs]
# Forward through encoder, giving initial hidden state and memory cell for decoder
ctx, h_t, c_t, seq_mask = self.encoder(seq, seq_mask, seq_lengths)
if not self.decoder_init:
#h_t, c_t = torch.zeros_like(h_t), torch.zeros_like(c_t)
c_t = torch.zeros_like(c_t) # debug
# Initial action
a_t_prev, u_t_prev = None, None # different action space
if self.decoder.action_space == -1:
u_t_prev = self.decoder.u_begin.expand(batch_size, -1)
else:
a_t_prev = Variable(torch.ones(batch_size).long() * self.model_actions.index('<start>'), requires_grad=False).to(device)
ended = np.array([False] * batch_size) # Indices match permuation of the model, not env
# Do a sequence rollout and calculate the loss
self.loss = 0
self.loss_ctrl_f = 0
env_action = [None] * batch_size
# for plot
#all_alpha = []
action_scores = np.zeros((batch_size,))
for t in range(self.episode_len):
f_t = self._feature_variable(perm_obs)
# Image features from obs
if self.decoder.panoramic: f_t_all, f_t = f_t
else: f_t_all = np.zeros((batch_size, 1))
# Action feature from obs
if self.decoder.action_space == 6:
u_t_features, is_valid = np.zeros((batch_size, 1)), None
else:
u_t_features, is_valid, _ = self._action_variable(perm_obs)
# Supervised training
target = self._teacher_action(perm_obs, ended)
h_t, c_t, alpha, logit, pred_f = self.decoder(a_t_prev, u_t_prev, u_t_features, f_t, f_t_all, h_t, c_t, ctx, seq_mask)
# all_alpha.append(alpha)
# Mask outputs where agent can't move forward
if self.decoder.action_space == 6:
for i,ob in enumerate(perm_obs):
if len(ob['navigableLocations']) <= 1:
logit[i, self.model_actions.index('forward')] = -float('inf')
else:
logit[is_valid == 0] = -float('inf')
# if self.decoder.ctrl_feature:
# pred_f[is_valid == 0] = 0
if self.temp_alpha != 0: # add temperature
logit = logit/self.temp_alpha
self.loss += self.criterion(logit, target)
# Auxiliary training
if self.decoder.ctrl_feature:
target_f = self._teacher_feature(perm_obs, ended)#, is_valid.shape[-1])
self.loss_ctrl_f += self.aux_ratio * self.criterion_ctrl_f(pred_f, target_f)
# todo: add auxiliary tasks to sc-rl training?
# Determine next model inputs
# scheduled sampling
if self.schedule_ratio >= 0 and self.schedule_ratio <= 1:
sample_feedback = random.choices(['sample', 'teacher'], [self.schedule_ratio, 1 - self.schedule_ratio], k=1)[0] # schedule sampling
if self.feedback != 'argmax': # ignore test case
self.feedback = sample_feedback
a_t = self.get_next(self.feedback, target, logit)
# setting A
if self.testing_settingA:
log_probs = F.log_softmax(logit, dim=1).data
action_score = log_probs[torch.arange(batch_size), a_t].cpu().data.numpy()
# log_probs = F.log_softmax(logit, dim=1).data
# action_score = log_probs[torch.arange(batch_size), a_t].cpu().data.numpy()
a_t_prev = a_t
if self.decoder.action_space != 6: # update the previous action
u_t_prev = u_t_features[np.arange(batch_size), a_t, :].detach()
# Updated 'ended' list and make environment action
for i,idx in enumerate(perm_idx):
action_idx = a_t[i].item()
if self.decoder.action_space == 6:
if action_idx == self.model_actions.index('<end>'):
ended[i] = True
env_action[idx] = self.env_actions[action_idx]
else:
if action_idx == 0:
ended[i] = True
env_action[idx] = action_idx
# state transitions
new_states = self.env.step(env_action, obs)
obs = np.array(self.env._get_obs(new_states))
#obs = np.array(self.env._get_obs(self.env.step(env_action, obs))) # , sub_stages
perm_obs = obs[perm_idx]
# Save trajectory output
for i,ob in enumerate(perm_obs):
if not ended[i]:
traj[i]['path'].append((ob['viewpoint'], ob['heading'], ob['elevation']))
for i,idx in enumerate(perm_idx):
action_idx = a_t[i].item()
if self.testing_settingA:
if not ended[i]:
action_scores[idx] = action_scores[idx] + action_score[i]
if self.decoder.action_space == 6:
if action_idx == self.model_actions.index('<end>'):
ended[i] = True
else:
if action_idx == 0:
ended[i] = True
# Early exit if all ended
if ended.all(): break
# episode_len is just a constant so it doesn't matter
self.losses.append(self.loss.item()) # / self.episode_len)
if self.decoder.ctrl_feature:
self.losses_ctrl_f.append(self.loss_ctrl_f.item()) # / self.episode_len)
# with open('preprocess/alpha.pkl', 'wb') as alpha_f: # TODO: remove for release!!!!
# pickle.dump(all_alpha, alpha_f)
# chooes one from three according to prob
# for t,p in zip(traj, action_scores):
# t['prob'] = p
# chooes one from three according to prob
if self.testing_settingA:
for t, p in zip(traj, action_scores):
t['prob'] = p
return traj
def img_shrink(self, feat_all):
feat_dim = feat_all.shape[-1]
f_t, act_t = feat_all[:,:, :feat_dim-128], feat_all[:,:,-128:]
shrink = torch.cat([f_t, act_t, act_t], -1)[:,:,::3]
return shrink
def bert_rollout_with_loss(self):
world_states = self.env.reset(False)
obs = np.array(self.env._get_obs(world_states))
batch_size = len(obs)
# Reorder the language input for the encoder
seq, seq_mask, seq_lengths, perm_idx = self._sort_batch(obs)
perm_obs = obs[perm_idx]
# when there are multiple sentences, perm_idx will simply be range(batch_size).
# but perm_idx for each batch of i-th(i=1 or 2 or 3) will be inside seq_length.
# this means:
# seq_lengths=[(seq_lengths,perm_idx),(seq_lengths,perm_idx),(seq_lengths,perm_idx)]
# Record starting point
traj = [{'instr_id': ob['instr_id'], 'path': [(ob['viewpoint'], ob['heading'], ob['elevation'])]} for ob in perm_obs]
## Forward through encoder, giving initial hidden state and memory cell for decoder
#f_t = self._feature_variable(perm_obs)
#if self.decoder.panoramic: f_t_all, f_t = f_t
#else: f_t_all = np.zeros((batch_size, 1))
#ctx, h_t, c_t, seq_mask = self.encoder(seq, seq_mask, seq_lengths,f_t_all=f_t_all)
###ctx, en_ht, en_ct, vl_mask = self.encoder(seq, seq_mask, seq_lengths,f_t_all=f_t_all)
#if not self.decoder_init:
# h_t, c_t = torch.zeros_like(h_t), torch.zeros_like(c_t)
# #c_t = torch.zeros_like(c_t) # debug
# Initial action
a_t_prev, u_t_prev = None, None # different action space
if self.decoder.action_space == -1:
u_t_prev = self.decoder.u_begin.expand(batch_size, -1)
else:
a_t_prev = Variable(torch.ones(batch_size).long() * self.model_actions.index('<start>'), requires_grad=False).to(device)
ended = np.array([False] * batch_size) # Indices match permuation of the model, not env
# Do a sequence rollout and calculate the loss
self.loss = 0
self.loss_ctrl_f = 0
env_action = [None] * batch_size
# for plot
#all_alpha = []
action_scores = np.zeros((batch_size,))
for t in range(self.episode_len):
f_t = self._feature_variable(perm_obs)
# Image features from obs
if self.decoder.panoramic: f_t_all, f_t = f_t
else: f_t_all = np.zeros((batch_size, 1))
# Action feature from obs
if self.decoder.action_space == 6:
u_t_features, is_valid = np.zeros((batch_size, 1)), None
else:
u_t_features, is_valid, _ = self._action_variable(perm_obs)
# Supervised training
target = self._teacher_action(perm_obs, ended)
if self.encoder.__class__.__name__ in ['MultiVilAddEncoder','MultiAddLoadEncoder','MultiHugAddEncoder']:
ctx, en_ht, en_ct, vl_mask = self.encoder(seq, seq_mask, seq_lengths, f_t_all=f_t_all)
else:
ctx, en_ht, en_ct, vl_mask = self.encoder(seq, seq_mask, torch.tensor(seq_lengths), f_t_all=f_t_all)
if t == 0: # use encoder's ht and ct as init
de_ht, de_ct, alpha, logit, pred_f = self.decoder(a_t_prev, u_t_prev, u_t_features, f_t, f_t_all, en_ht, en_ct, ctx, vl_mask)
else: # otherwise unroll as lstm
de_ht, de_ct, alpha, logit, pred_f = self.decoder(a_t_prev, u_t_prev, u_t_features, f_t, f_t_all, de_ht, de_ct, ctx, vl_mask)
# all_alpha.append(alpha)
# Mask outputs where agent can't move forward
if self.decoder.action_space == 6:
for i,ob in enumerate(perm_obs):
if len(ob['navigableLocations']) <= 1:
logit[i, self.model_actions.index('forward')] = -float('inf')
else:
logit[is_valid == 0] = -float('inf')
# if self.decoder.ctrl_feature:
# pred_f[is_valid == 0] = 0
if self.temp_alpha != 0: # add temperature
logit = logit/self.temp_alpha
self.loss += self.criterion(logit, target)
# Auxiliary training
if self.decoder.ctrl_feature:
target_f = self._teacher_feature(perm_obs, ended)#, is_valid.shape[-1])
self.loss_ctrl_f += self.aux_ratio * self.criterion_ctrl_f(pred_f, target_f)
# todo: add auxiliary tasks to sc-rl training?
# Determine next model inputs
# scheduled sampling
if self.schedule_ratio >= 0 and self.schedule_ratio <= 1:
sample_feedback = random.choices(['sample', 'teacher'], [self.schedule_ratio, 1 - self.schedule_ratio], k=1)[0] # schedule sampling
if self.feedback != 'argmax': # ignore test case
self.feedback = sample_feedback
a_t = self.get_next(self.feedback, target, logit)
# setting A
if self.testing_settingA:
log_probs = F.log_softmax(logit, dim=1).data
action_score = log_probs[torch.arange(batch_size), a_t].cpu().data.numpy()
# log_probs = F.log_softmax(logit, dim=1).data
# action_score = log_probs[torch.arange(batch_size), a_t].cpu().data.numpy()
a_t_prev = a_t
if self.decoder.action_space != 6: # update the previous action
u_t_prev = u_t_features[np.arange(batch_size), a_t, :].detach()
# Updated 'ended' list and make environment action
for i,idx in enumerate(perm_idx):
action_idx = a_t[i].item()
if self.decoder.action_space == 6:
if action_idx == self.model_actions.index('<end>'):
# # sub_stages[idx] = max(sub_stages[idx]-1, 0)
# # ended[i] = (sub_stages[idx]==0)
ended[i] = True
env_action[idx] = self.env_actions[action_idx]
else:
if action_idx == 0:
# # sub_stages[idx] = max(sub_stages[idx] - 1, 0)
# # ended[i] = (sub_stages[idx] == 0)
ended[i] = True
env_action[idx] = action_idx
# state transitions
new_states = self.env.step(env_action, obs)
obs = np.array(self.env._get_obs(new_states))
#obs = np.array(self.env._get_obs(self.env.step(env_action, obs))) # , sub_stages
perm_obs = obs[perm_idx]
# Save trajectory output
for i,ob in enumerate(perm_obs):
if not ended[i]:
traj[i]['path'].append((ob['viewpoint'], ob['heading'], ob['elevation']))
for i,idx in enumerate(perm_idx):
action_idx = a_t[i].item()
if self.testing_settingA:
if not ended[i]:
action_scores[idx] = action_scores[idx] + action_score[i]
if self.decoder.action_space == 6:
if action_idx == self.model_actions.index('<end>'):
ended[i] = True
else:
if action_idx == 0:
ended[i] = True
# for i,idx in enumerate(perm_idx):
# action_idx = a_t[i].item()
# # if not ended[i]:
# # action_scores[idx] = action_scores[idx] + action_score[i]
# if self.decoder.action_space == 6:
# if action_idx == self.model_actions.index('<end>'):
# ended[i] = True
# else:
# if action_idx == 0:
# ended[i] = True
# Early exit if all ended
if ended.all(): break
# episode_len is just a constant so it doesn't matter
self.losses.append(self.loss.item()) # / self.episode_len)
if self.decoder.ctrl_feature:
self.losses_ctrl_f.append(self.loss_ctrl_f.item()) # / self.episode_len)
# with open('preprocess/alpha.pkl', 'wb') as alpha_f: # TODO: remove for release!!!!
# pickle.dump(all_alpha, alpha_f)
# chooes one from three according to prob
# for t,p in zip(traj, action_scores):
# t['prob'] = p
# chooes one from three according to prob
if self.testing_settingA:
for t, p in zip(traj, action_scores):
t['prob'] = p
return traj
def langbert_rollout_with_loss(self):
world_states = self.env.reset(False)
obs = np.array(self.env._get_obs(world_states))
batch_size = len(obs)
# Reorder the language input for the encoder
seq, seq_mask, seq_lengths, perm_idx = self._sort_batch(obs)
perm_obs = obs[perm_idx]
# when there are multiple sentences, perm_idx will simply be range(batch_size).
# but perm_idx for each batch of i-th(i=1 or 2 or 3) will be inside seq_length.
# this means:
# seq_lengths=[(seq_lengths,perm_idx),(seq_lengths,perm_idx),(seq_lengths,perm_idx)]
# Record starting point
traj = [{'instr_id': ob['instr_id'], 'path': [(ob['viewpoint'], ob['heading'], ob['elevation'])]} for ob in perm_obs]
## Forward through encoder, giving initial hidden state and memory cell for decoder
#f_t = self._feature_variable(perm_obs)
#if self.decoder.panoramic: f_t_all, f_t = f_t
#else: f_t_all = np.zeros((batch_size, 1))
#ctx, h_t, c_t, seq_mask = self.encoder(seq, seq_mask, seq_lengths,f_t_all=f_t_all)
ctx, en_ht, en_ct, vl_mask = self.encoder(seq, seq_mask, seq_lengths,f_t_all=None)
#if not self.decoder_init:
# h_t, c_t = torch.zeros_like(h_t), torch.zeros_like(c_t)
# #c_t = torch.zeros_like(c_t) # debug
# Initial action
a_t_prev, u_t_prev = None, None # different action space
if self.decoder.action_space == -1:
u_t_prev = self.decoder.u_begin.expand(batch_size, -1)
else:
a_t_prev = Variable(torch.ones(batch_size).long() * self.model_actions.index('<start>'), requires_grad=False).to(device)
ended = np.array([False] * batch_size) # Indices match permuation of the model, not env
# Do a sequence rollout and calculate the loss
self.loss = 0
self.loss_ctrl_f = 0
env_action = [None] * batch_size
# for plot
#all_alpha = []
action_scores = np.zeros((batch_size,))
for t in range(self.episode_len):
f_t = self._feature_variable(perm_obs)
# Image features from obs
if self.decoder.panoramic: f_t_all, f_t = f_t
else: f_t_all = np.zeros((batch_size, 1))
# Action feature from obs
if self.decoder.action_space == 6:
u_t_features, is_valid = np.zeros((batch_size, 1)), None
else:
u_t_features, is_valid, _ = self._action_variable(perm_obs)
# Supervised training
target = self._teacher_action(perm_obs, ended)
#ctx, en_ht, en_ct, vl_mask = self.encoder(seq, seq_mask, torch.tensor(seq_lengths), f_t_all=f_t_all)
if t == 0: # use encoder's ht and ct as init
de_ht, de_ct, alpha, logit, pred_f = self.decoder(a_t_prev, u_t_prev, u_t_features, f_t, f_t_all, en_ht, en_ct, ctx, vl_mask)
else: # otherwise unroll as lstm
de_ht, de_ct, alpha, logit, pred_f = self.decoder(a_t_prev, u_t_prev, u_t_features, f_t, f_t_all, de_ht, de_ct, ctx, vl_mask)
# all_alpha.append(alpha)
# Mask outputs where agent can't move forward
if self.decoder.action_space == 6:
for i,ob in enumerate(perm_obs):
if len(ob['navigableLocations']) <= 1:
logit[i, self.model_actions.index('forward')] = -float('inf')
else:
logit[is_valid == 0] = -float('inf')
# if self.decoder.ctrl_feature:
# pred_f[is_valid == 0] = 0
if self.temp_alpha != 0: # add temperature
logit = logit/self.temp_alpha
self.loss += self.criterion(logit, target)
# Auxiliary training
if self.decoder.ctrl_feature:
target_f = self._teacher_feature(perm_obs, ended)#, is_valid.shape[-1])
self.loss_ctrl_f += self.aux_ratio * self.criterion_ctrl_f(pred_f, target_f)
# todo: add auxiliary tasks to sc-rl training?
# Determine next model inputs
# scheduled sampling
if self.schedule_ratio >= 0 and self.schedule_ratio <= 1:
sample_feedback = random.choices(['sample', 'teacher'], [self.schedule_ratio, 1 - self.schedule_ratio], k=1)[0] # schedule sampling
if self.feedback != 'argmax': # ignore test case
self.feedback = sample_feedback
a_t = self.get_next(self.feedback, target, logit)
# setting A
if self.testing_settingA:
log_probs = F.log_softmax(logit, dim=1).data
action_score = log_probs[torch.arange(batch_size), a_t].cpu().data.numpy()
# log_probs = F.log_softmax(logit, dim=1).data
# action_score = log_probs[torch.arange(batch_size), a_t].cpu().data.numpy()
a_t_prev = a_t
if self.decoder.action_space != 6: # update the previous action
u_t_prev = u_t_features[np.arange(batch_size), a_t, :].detach()
# Updated 'ended' list and make environment action
for i,idx in enumerate(perm_idx):
action_idx = a_t[i].item()
if self.decoder.action_space == 6:
if action_idx == self.model_actions.index('<end>'):
# # sub_stages[idx] = max(sub_stages[idx]-1, 0)
# # ended[i] = (sub_stages[idx]==0)
ended[i] = True
env_action[idx] = self.env_actions[action_idx]
else:
if action_idx == 0:
# # sub_stages[idx] = max(sub_stages[idx] - 1, 0)
# # ended[i] = (sub_stages[idx] == 0)
ended[i] = True
env_action[idx] = action_idx
# state transitions
new_states = self.env.step(env_action, obs)
obs = np.array(self.env._get_obs(new_states))
#obs = np.array(self.env._get_obs(self.env.step(env_action, obs))) # , sub_stages
perm_obs = obs[perm_idx]
# Save trajectory output
for i,ob in enumerate(perm_obs):
if not ended[i]:
traj[i]['path'].append((ob['viewpoint'], ob['heading'], ob['elevation']))
for i,idx in enumerate(perm_idx):
action_idx = a_t[i].item()
if self.testing_settingA:
if not ended[i]:
action_scores[idx] = action_scores[idx] + action_score[i]
if self.decoder.action_space == 6:
if action_idx == self.model_actions.index('<end>'):
ended[i] = True
else:
if action_idx == 0:
ended[i] = True
# for i,idx in enumerate(perm_idx):
# action_idx = a_t[i].item()
# # if not ended[i]:
# # action_scores[idx] = action_scores[idx] + action_score[i]
# if self.decoder.action_space == 6:
# if action_idx == self.model_actions.index('<end>'):
# ended[i] = True
# else:
# if action_idx == 0:
# ended[i] = True
# Early exit if all ended
if ended.all(): break
# episode_len is just a constant so it doesn't matter
self.losses.append(self.loss.item()) # / self.episode_len)
if self.decoder.ctrl_feature:
self.losses_ctrl_f.append(self.loss_ctrl_f.item()) # / self.episode_len)
# with open('preprocess/alpha.pkl', 'wb') as alpha_f: # TODO: remove for release!!!!
# pickle.dump(all_alpha, alpha_f)
# chooes one from three according to prob
# for t,p in zip(traj, action_scores):
# t['prob'] = p
# chooes one from three according to prob
if self.testing_settingA:
for t, p in zip(traj, action_scores):
t['prob'] = p
return traj
def test(self, use_dropout=False, feedback='argmax', allow_cheat=False, beam_size=1, successors=1, speaker=(None,None,None,None)):
''' Evaluate once on each instruction in the current environment '''
if not allow_cheat: # permitted for purpose of calculating validation loss only
assert feedback in ['argmax', 'sample'] # no cheating by using teacher at test time!
self.feedback = feedback
if use_dropout:
self.encoder.train()
self.decoder.train()
else:
self.encoder.eval()
self.decoder.eval()
super(Seq2SeqAgent, self).test(beam_size, successors, speaker)
def train(self, encoder_optimizer, decoder_optimizer, n_iters, aux_n_iter, feedback='teacher'):
''' Train for a given number of iterations '''
assert feedback in self.feedback_options
self.feedback = feedback
self.encoder.train()
self.decoder.train()
self.losses = []
self.losses_ctrl_f = []
epo_inc = 0
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
for iter in range(1, n_iters + 1):
if self.accu_n_iters == 0:
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
self.rollout()
if (not self.decoder.ctrl_feature) or (iter % aux_n_iter):
self.loss.backward()
else:
self.loss_ctrl_f.backward()
if self.clip_gradient != 0: # clip gradient
clip_gradient(encoder_optimizer, self.clip_gradient)
clip_gradient(decoder_optimizer, self.clip_gradient)
if self.clip_gradient_norm > 0: # clip gradient norm
torch.nn.utils.clip_grad_norm_(self.encoder.parameters(), self.clip_gradient_norm)
torch.nn.utils.clip_grad_norm_(self.decoder.parameters(), self.clip_gradient_norm)
encoder_optimizer.step()
decoder_optimizer.step()
epo_inc += self.env.epo_inc
else:
self.rollout()
if (not self.decoder.ctrl_feature) or (iter % aux_n_iter):
self.loss /= self.accu_n_iters
self.loss.backward()
else:
self.loss_ctrl_f.backward()
if iter % self.accu_n_iters == 0:
if self.clip_gradient != 0: # clip gradient
clip_gradient(encoder_optimizer, self.clip_gradient)
clip_gradient(decoder_optimizer, self.clip_gradient)
if self.clip_gradient_norm > 0: # clip gradient norm
torch.nn.utils.clip_grad_norm_(self.encoder.parameters(), self.clip_gradient_norm)
torch.nn.utils.clip_grad_norm_(self.decoder.parameters(), self.clip_gradient_norm)
encoder_optimizer.step()
decoder_optimizer.step()
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
epo_inc += self.env.epo_inc
return epo_inc
"""
def train(self, encoder_optimizer, decoder_optimizer, n_iters, aux_n_iter, feedback='teacher'):
''' Train for a given number of iterations '''
assert feedback in self.feedback_options
self.feedback = feedback
self.encoder.train()
self.decoder.train()
self.losses = []
self.losses_ctrl_f = []
epo_inc = 0
for iter in range(1, n_iters + 1):
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
self.rollout()
if (not self.decoder.ctrl_feature) or (iter % aux_n_iter):
self.loss.backward()
else:
self.loss_ctrl_f.backward()
if self.clip_gradient != 0: # clip gradient
clip_gradient(encoder_optimizer, self.clip_gradient)
clip_gradient(decoder_optimizer, self.clip_gradient)
if self.clip_gradient_norm > 0: # clip gradient norm
torch.nn.utils.clip_grad_norm_(self.encoder.parameters(), self.clip_gradient_norm)
torch.nn.utils.clip_grad_norm_(self.decoder.parameters(), self.clip_gradient_norm)
encoder_optimizer.step()
decoder_optimizer.step()
epo_inc += self.env.epo_inc
return epo_inc
"""
def rollout_notrain(self, n_iters): # jolin
epo_inc = 0
for iter in range(1, n_iters + 1):
self.env._next_minibatch(False)
epo_inc += self.env.epo_inc
return epo_inc
def rl_rollout(self, obs, perm_obs, seq, seq_mask, seq_lengths, perm_idx, feedback,
encoder, decoder):
batch_size = len(perm_obs)
# Record starting point
traj = [{'instr_id': ob['instr_id'],
'path': [(ob['viewpoint'], ob['heading'], ob['elevation'])]
} for ob in perm_obs]
# Forward through encoder, giving initial hidden state and memory cell for decoder
ctx, h_t, c_t, seq_mask = encoder(seq, seq_mask, seq_lengths)
if not self.decoder_init:
h_t, c_t = torch.zeros_like(h_t), torch.zeros_like(c_t)
# Initial action
a_t_prev, u_t_prev = None, None # different action space
if decoder.action_space==-1:
u_t_prev = decoder.u_begin.expand(batch_size, -1)
else:
a_t_prev = Variable(torch.ones(batch_size).long() *
self.model_actions.index('<start>'), requires_grad=False).to(device)
ended=np.array([False] * batch_size)
# Do a sequence rollout not don't calculate the loss for policy gradient
# self.loss = 0
env_action = [None] * batch_size
# Initialize seq log probs for policy gradient
if feedback == 'sample1':
seqLogprobs = h_t.new_zeros(batch_size, self.episode_len)
mask = np.ones((batch_size, self.episode_len))
elif feedback == 'argmax1':
seqLogprobs, mask = None, None
else:
raise NotImplementedError('other feedback not supported.')
# only for supervised auxiliary tasks
#assert (not self.decoder.ctrl_feature) # not implemented
for t in range(self.episode_len):
f_t = self._feature_variable(perm_obs)
# Image features from obs
if decoder.panoramic: f_t_all, f_t = f_t
else: f_t_all = np.zeros((batch_size, 1))
# Action feature from obs
if decoder.action_space == 6:
u_t_features, is_valid = np.zeros((batch_size, 1)), None
else:
u_t_features, is_valid, _ = self._action_variable(perm_obs)
# Decoding actions
h_t, c_t, alpha, logit, pred_f = decoder(a_t_prev, u_t_prev, u_t_features,
f_t, f_t_all, h_t, c_t, ctx, seq_mask)
# Mask outputs where agent can't move forward
if decoder.action_space == 6:
for i,ob in enumerate(perm_obs):
if len(ob['navigableLocations']) <= 1:
logit[i, self.model_actions.index('forward')] = -float('inf')
else: logit[is_valid == 0] = -float('inf')
# Supervised training
# target = self._teacher_action(perm_obs, ended)
# self.loss += self.criterion(logit, target)
# Determine next model inputs
if feedback == 'argmax1':
_, a_t = logit.max(1)
elif feedback == 'sample1':
logprobs = F.log_softmax(logit, dim=1)
probs = torch.exp(logprobs.data)
m = D.Categorical(probs)
a_t = m.sample() # sampling an action from model
sampleLogprobs = logprobs.gather(1, a_t.unsqueeze(1))
else:
sys.exit('invalid feedback method %s'%feedback)
# if self.decoder.panoramic:
# a_t_feature = all_a_t_features[np.arange(batch_size), a_t, :].detach()
# Updated 'ended' list and make environment action
for i,idx in enumerate(perm_idx):
action_idx = a_t[i].item()
if decoder.action_space == 6:
if ended[i] and mask is not None:
mask[i, t] = 0
elif action_idx == self.model_actions.index('<end>'):
ended[i] = True
env_action[idx] = self.env_actions[action_idx]
else:
if ended[i] and mask is not None:
mask[i, t] = 0
elif action_idx == 0: ended[i] = True
env_action[idx] = action_idx
obs = np.array(self.env._get_obs(self.env.step(env_action, obs)))
perm_obs = obs[perm_idx]
# Save trajectory output
for i,ob in enumerate(perm_obs):
if not ended[i]:
traj[i]['path'].append((ob['viewpoint'],
ob['heading'], ob['elevation']))
if seqLogprobs is not None: # feedback == 'sample1'
seqLogprobs[:, t] = sampleLogprobs.view(-1)
# Early exit if all ended
if ended.all(): break
path_res = {}
for t in traj:
path_res[t['instr_id']] = t['path']
return traj, mask, seqLogprobs, path_res
def rl_train(self, train_Eval, encoder_optimizer, decoder_optimizer,
n_iters, sc_reward_scale, sc_discouted_immediate_r_scale, sc_length_scale):
''' jolin: self-critical finetuning'''
self.losses = []
epo_inc = 0
self.encoder.train()
self.decoder.train()
for iter in range(1, n_iters + 1): # n_iters=interval
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
# copy from self.rollout():
# one minibatch (100 instructions)
world_states = self.env.reset(False)
obs = np.array(self.env._get_obs(world_states))
epo_inc += self.env.epo_inc
# Reorder the language input for the encoder
seq, seq_mask, seq_lengths, perm_idx = self._sort_batch(obs)
perm_obs = obs[perm_idx]
gen_traj, mask, seqLogprobs, gen_results = self.rl_rollout(obs, perm_obs, seq, seq_mask,
seq_lengths, perm_idx, 'sample1',
self.encoder, self.decoder)
# jolin: get greedy decoding baseline
# Just like self.test(use_dropout=False, feedback='argmax').
# But we should not do env.reset_epoch(), because we do not
# test the whole split. So DON'T reuse test()!
world_states = self.env.reset_batch()
obs = np.array(self.env._get_obs(world_states))# for later 'sample' feedback batch
perm_obs = obs[perm_idx]
if self.monotonic:
encoder2, decoder2 = self.encoder2, self.decoder2
else:
self.encoder.eval()
self.decoder.eval()
encoder2, decoder2 = self.encoder, self.decoder
with torch.no_grad():
greedy_traj, _, _, greedy_res = self.rl_rollout(obs, perm_obs, seq, seq_mask,
seq_lengths, perm_idx, 'argmax1',
encoder2, decoder2)
if not self.monotonic:
self.encoder.train()
self.decoder.train()
# jolin: get self-critical reward
reward = self.get_self_critical_reward(gen_traj, train_Eval, gen_results, greedy_res, sc_reward_scale, sc_discouted_immediate_r_scale, sc_length_scale)
# jolin: RewardCriterion
self.loss = self.PG_reward_criterion(seqLogprobs, reward, mask)
self.losses.append(self.loss.item())
self.loss.backward()
#clip_gradient(encoder_optimizer)
#clip_gradient(decoder_optimizer)
if self.clip_gradient != 0: # clip gradient
clip_gradient(encoder_optimizer, self.clip_gradient)
clip_gradient(decoder_optimizer, self.clip_gradient)
if self.clip_gradient_norm > 0: # clip gradient norm
torch.nn.utils.clip_grad_norm_(self.encoder.parameters(), self.clip_gradient_norm)
torch.nn.utils.clip_grad_norm_(self.decoder.parameters(), self.clip_gradient_norm)
encoder_optimizer.step()
decoder_optimizer.step()
return epo_inc
def PG_reward_criterion(self, seqLogprobs, reward, mask):
# jolin: RewardCriterion
input = to_contiguous(seqLogprobs).view(-1)
reward = to_contiguous(torch.from_numpy(reward).float().to(device)).view(-1)
#mask = to_contiguous(torch.cat([mask.new(mask.size(0), 1).fill_(1), mask[:, :-1]], 1)).view(-1)
mask = to_contiguous(torch.from_numpy(mask).float().to(device)).view(-1)
output = - input * reward * mask
loss = torch.sum(output) / torch.sum(mask)
return loss
def get_self_critical_reward(self, traj, Eval, gen_results, greedy_res, sc_reward_scale, sc_discouted_immediate_r_scale, sc_length_scale):
# get self-critical reward
instr_id_order = [t['instr_id'] for t in traj]
gen_scores = Eval.score_batch(gen_results, instr_id_order)
greedy_scores = Eval.score_batch(greedy_res, instr_id_order)
# normal score
gen_hits = (np.array(gen_scores['nav_errors']) <= 3.0).astype(float)
greedy_hits = (np.array(greedy_scores['nav_errors']) <= 3.0).astype(float)
gen_lengths = (np.array(gen_scores['trajectory_lengths'])).astype(float)
# sr_sc
hits = gen_hits - greedy_hits
#reward = np.repeat(hits[:, np.newaxis], self.episode_len, 1)*sc_reward_scale
# spl
gen_spls = (np.array(gen_scores['spl'])).astype(float)
greedy_spls = (np.array(greedy_scores['spl'])).astype(float)
ave_steps = (np.array(gen_scores['trajectory_steps'])).sum()/float(len(instr_id_order))
steps = (np.array(gen_scores['trajectory_steps']) - self.episode_len).sum()
if self.reward_func == 'sr_sc':
reward = np.repeat(hits[:, np.newaxis], self.episode_len, 1)*sc_reward_scale
elif self.reward_func == 'spl':
reward = np.repeat(gen_spls[:, np.newaxis], self.episode_len, 1) * sc_reward_scale
elif self.reward_func == 'spl_sc':
# spl_sc
spl_sc = gen_spls - greedy_spls
reward = np.repeat(spl_sc[:, np.newaxis], self.episode_len, 1) * sc_reward_scale
elif self.reward_func == 'spl_last': # does not work
tj_steps = [s - 1 for s in gen_scores['trajectory_steps']] # tj_steps
reward = np.zeros((gen_spls.shape[0], self.episode_len), dtype=float)
reward[range(gen_spls.shape[0]), tj_steps] = gen_scores['spl']
elif self.reward_func == 'spl_last_sc':
tj_steps = [s - 1 for s in gen_scores['trajectory_steps']] # tj_steps
reward = np.zeros((gen_spls.shape[0], self.episode_len), dtype=float)
reward[range(gen_spls.shape[0]), tj_steps] = [x - y for x, y in zip(gen_scores['spl'], greedy_scores['spl'])]
elif self.reward_func == 'spl_psc': # test
tj_steps = [s - 1 for s in gen_scores['trajectory_steps']] # tj_steps
reward = np.full((gen_spls.shape[0], self.episode_len), -sc_length_scale, dtype=float) # penalty
reward[range(gen_spls.shape[0]), tj_steps] = gen_scores['spl']
# discounted immediate reward
if sc_discouted_immediate_r_scale>0:
discounted_r = discount_rewards(gen_scores['immediate_rewards'], self.episode_len) * sc_discouted_immediate_r_scale
reward = reward + discounted_r
# panelty for length
if sc_length_scale:
length_panelty = np.repeat(gen_lengths[:, np.newaxis], self.episode_len, 1)*sc_length_scale
reward = reward - length_panelty
return reward
def save(self, encoder_path, decoder_path):
''' Snapshot models '''
write_num = 0
while (write_num < 10):
try:
torch.save(self.encoder.state_dict(), encoder_path)
torch.save(self.decoder.state_dict(), decoder_path)
if torch.cuda.is_available():
torch.save(torch.cuda.random.get_rng_state(), decoder_path + '.rng.gpu')
torch.save(torch.random.get_rng_state(), decoder_path + '.rng')
with open(decoder_path + '.rng2', 'wb') as f:
pickle.dump(random.getstate(), f)
break
except:
write_num += 1
def delete(self, encoder_path, decoder_path):
''' Delete models '''
os.remove(encoder_path)
os.remove(decoder_path)
os.remove(decoder_path+'.rng.gpu')
os.remove(decoder_path+'.rng')
os.remove(decoder_path+'.rng2')
def load(self, encoder_path, decoder_path):
''' Loads parameters (but not training state) '''
self.encoder.load_state_dict(torch.load(encoder_path, 'cuda:0' if torch.cuda.is_available() else 'cpu'))
self.decoder.load_state_dict(torch.load(decoder_path, 'cuda:0' if torch.cuda.is_available() else 'cpu'), strict=False)
self.encoder.to(device)
self.decoder.to(device)
if self.monotonic:
self.copy_seq2seq()
try:
with open(decoder_path+'.rng2','rb') as f:
random.setstate(pickle.load(f))
torch.random.set_rng_state(torch.load(decoder_path + '.rng'))
torch.cuda.random.set_rng_state(torch.load(decoder_path + '.rng.gpu'))
except FileNotFoundError:
print('Warning: failed to find random seed file')
def copy_seq2seq(self):
self.encoder2=copy.deepcopy(self.encoder)
self.decoder2=copy.deepcopy(self.decoder)
self.encoder2.eval()
self.decoder2.eval()
for param in self.encoder2.parameters():
param.requires_grad = False
for param in self.decoder2.parameters():
param.requires_grad = False
class PretrainVLAgent(BaseAgent):
''' An agent based on an LSTM seq2seq model with attention. '''
model_actions, env_actions = basic_actions()
feedback_options = ['teacher', 'argmax', 'sample']
def __init__(self, env, results_path, encoder, decoder, seed, aux_ratio, decoder_init,
params=None, monotonic=False, episode_len=20, state_factored=False): # , subgoal
super(Seq2SeqAgent, self).__init__(env, results_path, seed=seed)
self.encoder, self.decoder = encoder, decoder # encoder2 is only for self_critic
self.encoder2, self.decoder2 = None, None
self.monotonic = monotonic
if self.monotonic:
self.copy_seq2seq()
self.episode_len = episode_len
self.losses = []
self.losses_ctrl_f = [] # For learning auxiliary tasks
self.aux_ratio = aux_ratio
self.decoder_init = decoder_init
self.clip_gradient = params['clip_gradient']
self.clip_gradient_norm = params['clip_gradient_norm']
self.reward_func = params['reward_func']
self.schedule_ratio = params['schedule_ratio']
self.temp_alpha = params['temp_alpha']
self.testing_settingA = params['test_A']
if self.decoder.action_space == 6:
self.ignore_index = self.model_actions.index('<ignore>')
else:
self.ignore_index = -1
self.criterion = nn.CrossEntropyLoss(ignore_index=self.ignore_index)
if self.decoder.ctrl_feature:
assert self.decoder.action_space == -1 # currently only implement this
self.criterion_ctrl_f = nn.MSELoss() # todo: MSE or ?
self.state_factored = state_factored
@staticmethod
def n_inputs():
return len(Seq2SeqAgent.model_actions)
@staticmethod
def n_outputs():
return len(Seq2SeqAgent.model_actions)-2 # Model doesn't output start or ignore
def _sort_batch(self, obs):
sorted_tensor, mask, seq_lengths, perm_idx = sort_batch(obs)
if isinstance(sorted_tensor, list):
sorted_tensors, masks, seqs_lengths = [], [], []
for i in range(len(sorted_tensor)):
sorted_tensors.append(Variable(sorted_tensor[i], requires_grad=False).long().to(device))
masks.append(mask[i].byte().to(device))
seqs_lengths.append(seq_lengths[i])
return sorted_tensors, masks, seqs_lengths, perm_idx
return Variable(sorted_tensor, requires_grad=False).long().to(device), \
mask.byte().to(device), \
list(seq_lengths), list(perm_idx)
def _feature_variable(self, obs):
''' Extract precomputed features into variable. '''
#feature_size = obs[0]['feature'].shape[0]
#features = np.empty((len(obs),feature_size), dtype=np.float32)
if isinstance(obs[0]['feature'],tuple): # todo?
features_pano = np.repeat(np.expand_dims(np.zeros_like(obs[0]['feature'][0], dtype=np.float32), 0), len(obs), axis=0) # jolin
features = np.repeat(np.expand_dims(np.zeros_like(obs[0]['feature'][1], dtype=np.float32), 0), len(obs), axis=0) # jolin
for i,ob in enumerate(obs):
features_pano[i] = ob['feature'][0]
features[i] = ob['feature'][1]
return (Variable(torch.from_numpy(features_pano), requires_grad=False).to(device),
Variable(torch.from_numpy(features), requires_grad=False).to(device))
else:
features = np.repeat(np.expand_dims(np.zeros_like(obs[0]['feature'], dtype=np.float32),0),len(obs),axis=0) # jolin
for i,ob in enumerate(obs):
features[i] = ob['feature']
return Variable(torch.from_numpy(features), requires_grad=False).to(device)
def get_next(self, feedback, target, logit):
if feedback == 'teacher':
a_t = target # teacher forcing
elif feedback == 'argmax':
_, a_t = logit.max(1) # student forcing - argmax
a_t = a_t.detach()
elif feedback == 'sample':
probs = F.softmax(logit, dim=1)
m = D.Categorical(probs)
a_t = m.sample() # sampling an action from model
else:
sys.exit('Invalid feedback option')
return a_t
def _action_variable(self, obs):
# get the maximum number of actions of all sample in this batch
max_num_a = -1
for i, ob in enumerate(obs):
max_num_a = max(max_num_a, len(ob['adj_loc_list']))
is_valid = np.zeros((len(obs), max_num_a), np.float32)
action_embedding_dim = obs[0]['action_embedding'].shape[-1]
action_embeddings = np.zeros((len(obs), max_num_a, action_embedding_dim), dtype=np.float32)
for i, ob in enumerate(obs):
adj_loc_list = ob['adj_loc_list']
num_a = len(adj_loc_list)
is_valid[i, 0:num_a] = 1.
action_embeddings[i, :num_a, :] = ob['action_embedding'] #bug: todo
#for n_a, adj_dict in enumerate(adj_loc_list):
# action_embeddings[i, :num_a, :] = ob['action_embedding']
return (Variable(torch.from_numpy(action_embeddings), requires_grad=False).to(device),
Variable(torch.from_numpy(is_valid), requires_grad=False).to(device),
is_valid)
def _teacher_action_variable(self, obs):
# get the maximum number of actions of all sample in this batch
action_embedding_dim = obs[0]['action_embedding'].shape[-1]
action_embeddings = np.zeros((len(obs), action_embedding_dim), dtype=np.float32)
for i, ob in enumerate(obs):
adj_loc_list = ob['adj_loc_list']
action_embeddings[i, :] = ob['action_embedding'][ob['teacher']] #bug: todo
#for n_a, adj_dict in enumerate(adj_loc_list):
# action_embeddings[i, :num_a, :] = ob['action_embedding']
return Variable(torch.from_numpy(action_embeddings), requires_grad=False).to(device)
def _teacher_action(self, obs, ended):
a = teacher_action(self.model_actions, self.decoder.action_space, obs, ended, self.ignore_index)
return Variable(a, requires_grad=False).to(device)
def _teacher_feature(self, obs, ended):#, max_num_a):
''' Extract teacher look ahead auxiliary features into variable. '''
# todo: 6 action space
ctrl_features_dim = -1
for i, ob in enumerate(obs): # todo: whether include <stop> ?
# max_num_a = max(max_num_a, len(ob['ctrl_features']))
if ctrl_features_dim<0 and len(ob['ctrl_features']):
ctrl_features_dim = ob['ctrl_features'].shape[-1] #[0].shape[-1]
break
#is_valid no need to create. already created
ctrl_features_tensor = np.zeros((len(obs), ctrl_features_dim), dtype=np.float32)
for i, ob in enumerate(obs):
if not ended[i]:
ctrl_features_tensor[i, :] = ob['ctrl_features']
return Variable(torch.from_numpy(ctrl_features_tensor), requires_grad=False).to(device)
def rollout(self, beam_size=1, successors=1):
if beam_size ==1 and not debug_beam:
if self.encoder.__class__.__name__ == 'BertImgEncoder':
return self.pretrain_rollout_with_loss()
else:
return self.rollout_with_loss()
# beam
with torch.no_grad():
if self.state_factored:
beams = self.state_factored_search(beam_size, successors, first_n_ws_key=4)
else:
beams = self.beam_search(beam_size)
return beams
def state_factored_search(self, completion_size, successor_size, first_n_ws_key=4):
assert self.decoder.panoramic
world_states = self.env.reset(sort=True)
initial_obs = (self.env._get_obs(world_states))
batch_size = len(world_states)
# Reorder the language input for the encoder
seq, seq_mask, seq_lengths, perm_idx = self._sort_batch([o for ob in initial_obs for o in ob])
world_states = [[world_state for f, world_state in states] for states in world_states]
ctx, h_t, c_t, seq_mask = self.encoder(seq, seq_mask, seq_lengths)
if not self.decoder_init:
h_t, c_t = torch.zeros_like(h_t), torch.zeros_like(c_t)
completed = []
completed_holding = []
for _ in range(batch_size):
completed.append({})
completed_holding.append({})
state_cache = [
{ws[0][0:first_n_ws_key]: (InferenceState(prev_inference_state=None,
world_state=ws[0],
observation=o[0],
flat_index=None,
last_action=-1,
last_action_embedding=self.decoder.u_begin,
action_count=0,
score=0.0, h_t=h_t[i], c_t=c_t[i], last_alpha=None), True)}
for i, (ws, o) in enumerate(zip(world_states, initial_obs))
]
beams = [[inf_state for world_state, (inf_state, expanded) in sorted(instance_cache.items())]
for instance_cache in state_cache] # sorting is a noop here since each instance_cache should only contain one
last_expanded_list = []
traversed_lists = []
for beam in beams:
assert len(beam)==1
first_state = beam[0]
last_expanded_list.append(first_state)
traversed_lists.append([first_state])
def update_traversed_lists(new_visited_inf_states):
assert len(new_visited_inf_states) == len(last_expanded_list)
assert len(new_visited_inf_states) == len(traversed_lists)
for instance_index, instance_states in enumerate(new_visited_inf_states):
last_expanded = last_expanded_list[instance_index]
# todo: if this passes, shouldn't need traversed_lists
assert last_expanded.world_state.viewpointId == traversed_lists[instance_index][-1].world_state.viewpointId
for inf_state in instance_states:
path_from_last_to_next = least_common_viewpoint_path(last_expanded, inf_state)
# path_from_last should include last_expanded's world state as the first element, so check and drop that
assert path_from_last_to_next[0].world_state.viewpointId == last_expanded.world_state.viewpointId
assert path_from_last_to_next[-1].world_state.viewpointId == inf_state.world_state.viewpointId
traversed_lists[instance_index].extend(path_from_last_to_next[1:])
last_expanded = inf_state
last_expanded_list[instance_index] = last_expanded
# Do a sequence rollout and calculate the loss
while any(len(comp) < completion_size for comp in completed):
beam_indices = []
u_t_list = []
h_t_list = []
c_t_list = []
flat_obs = []
for beam_index, beam in enumerate(beams):
for inf_state in beam:
beam_indices.append(beam_index)
u_t_list.append(inf_state.last_action_embedding)
h_t_list.append(inf_state.h_t.unsqueeze(0))
c_t_list.append(inf_state.c_t.unsqueeze(0))
flat_obs.append(inf_state.observation)
u_t_prev = torch.stack(u_t_list, dim=0)
assert len(u_t_prev.shape) == 2
# Image features from obs
# if self.decoder.panoramic:
f_t_all, f_t = self._feature_variable(flat_obs)
# Action feature from obs
# if self.decoder.action_space == 6:
# u_t_features, is_valid = np.zeros((batch_size, 1)), None
# else:
u_t_features, is_valid, is_valid_numpy = self._action_variable(flat_obs)
h_t = torch.cat(h_t_list, dim=0)
c_t = torch.cat(c_t_list, dim=0)
h_t, c_t, alpha, logit, pred_f = self.decoder(None, u_t_prev, u_t_features, f_t,
f_t_all, h_t, c_t, [ctx_si[beam_indices] for ctx_si in ctx] if isinstance(ctx, list) else ctx[beam_indices],
[seq_mask_si[beam_indices] for seq_mask_si in seq_mask] if isinstance(ctx, list) else seq_mask[beam_indices])
# Mask outputs of invalid actions
logit[is_valid == 0] = -float('inf')
# # Mask outputs where agent can't move forward
# no_forward_mask = [len(ob['navigableLocations']) <= 1 for ob in flat_obs]
masked_logit = logit
log_probs = F.log_softmax(logit, dim=1).data
# force ending if we've reached the max time steps
# if t == self.episode_len - 1:
# action_scores = log_probs[:,self.end_index].unsqueeze(-1)
# action_indices = torch.from_numpy(np.full((log_probs.size()[0], 1), self.end_index))
# else:
#_, action_indices = masked_logit.data.topk(min(successor_size, logit.size()[1]), dim=1)
_, action_indices = masked_logit.data.topk(logit.size()[1], dim=1) # todo: fix this
action_scores = log_probs.gather(1, action_indices)
assert action_scores.size() == action_indices.size()
start_index = 0
assert len(beams) == len(world_states)
all_successors = []
for beam_index, (beam, beam_world_states) in enumerate(zip(beams, world_states)):
successors = []
end_index = start_index + len(beam)
assert len(beam_world_states) == len(beam)
if beam:
for inf_index, (inf_state, world_state, action_score_row) in \
enumerate(zip(beam, beam_world_states, log_probs[start_index:end_index])):
flat_index = start_index + inf_index
for action_index, action_score in enumerate(action_score_row):
if is_valid_numpy[flat_index, action_index] == 0:
continue
successors.append(
InferenceState(prev_inference_state=inf_state, world_state=world_state,
observation=flat_obs[flat_index],
flat_index=None,
last_action=action_index,
last_action_embedding=u_t_features[flat_index, action_index].detach(),
action_count=inf_state.action_count + 1,
score=float(inf_state.score + action_score),
h_t=h_t[flat_index], c_t=c_t[flat_index],
last_alpha=[alpha_si[flat_index].data for alpha_si in alpha] if isinstance(alpha, list) else alpha[flat_index].data)
)
start_index = end_index
successors = sorted(successors, key=lambda t: t.score, reverse=True)
all_successors.append(successors)
successor_world_states = [
[inf_state.world_state for inf_state in successors]
for successors in all_successors
]
successor_env_actions = [
[inf_state.last_action for inf_state in successors]
for successors in all_successors
]
successor_last_obs = [
[inf_state.observation for inf_state in successors]
for successors in all_successors
]
successor_world_states = self.env.step(successor_env_actions, successor_last_obs, successor_world_states)
successor_world_states = [[world_state for f, world_state in states] for states in successor_world_states]
acc = []
for ttt in zip(all_successors, successor_world_states):
mapped = [inf._replace(world_state=ws) for inf, ws in zip(*ttt)]
acc.append(mapped)
all_successors = acc
assert len(all_successors) == len(state_cache)
new_beams = []
for beam_index, (successors, instance_cache) in enumerate(zip(all_successors, state_cache)):
# early stop if we've already built a sizable completion list
instance_completed = completed[beam_index]
instance_completed_holding = completed_holding[beam_index]
if len(instance_completed) >= completion_size:
new_beams.append([])
continue
for successor in successors:
ws_keys = successor.world_state[0:first_n_ws_key]
if successor.last_action == 0 or successor.action_count == self.episode_len:
if ws_keys not in instance_completed_holding or instance_completed_holding[ws_keys][
0].score < successor.score:
instance_completed_holding[ws_keys] = (successor, False)
else:
if ws_keys not in instance_cache or instance_cache[ws_keys][0].score < successor.score:
instance_cache[ws_keys] = (successor, False)
# third value: did this come from completed_holding?
uncompleted_to_consider = ((ws_keys, inf_state, False) for (ws_keys, (inf_state, expanded)) in
instance_cache.items() if not expanded)
completed_to_consider = ((ws_keys, inf_state, True) for (ws_keys, (inf_state, expanded)) in
instance_completed_holding.items() if not expanded)
import itertools
import heapq
to_consider = itertools.chain(uncompleted_to_consider, completed_to_consider)
ws_keys_and_inf_states = heapq.nlargest(successor_size, to_consider, key=lambda pair: pair[1].score)
new_beam = []
for ws_keys, inf_state, is_completed in ws_keys_and_inf_states:
if is_completed:
assert instance_completed_holding[ws_keys] == (inf_state, False)
instance_completed_holding[ws_keys] = (inf_state, True)
if ws_keys not in instance_completed or instance_completed[ws_keys].score < inf_state.score:
instance_completed[ws_keys] = inf_state
else:
instance_cache[ws_keys] = (inf_state, True)
new_beam.append(inf_state)
if len(instance_completed) >= completion_size:
new_beams.append([])
else:
new_beams.append(new_beam)
beams = new_beams
# Early exit if all ended
if not any(beam for beam in beams):
break
world_states = [
[inf_state.world_state for inf_state in beam]
for beam in beams
]
successor_obs = np.array(self.env._get_obs(self.env.world_states2feature_states(world_states)))
acc = []
for tttt in zip(beams, successor_obs):
mapped = [inf._replace(observation=o) for inf, o in zip(*tttt)]
acc.append(mapped)
beams = acc
update_traversed_lists(beams)
completed_list = []
for this_completed in completed:
completed_list.append(sorted(this_completed.values(), key=lambda t: t.score, reverse=True)[:completion_size])
completed_ws = [
[inf_state.world_state for inf_state in comp_l]
for comp_l in completed_list
]
completed_obs = np.array(self.env._get_obs(self.env.world_states2feature_states(completed_ws)))
accu = []
for ttttt in zip(completed_list, completed_obs):
mapped = [inf._replace(observation=o) for inf, o in zip(*ttttt)]
accu.append(mapped)
completed_list = accu
update_traversed_lists(completed_list)
trajs = []
for this_completed in completed_list:
assert this_completed
this_trajs = []
for inf_state in this_completed:
path_states, path_observations, path_actions, path_scores, path_attentions = backchain_inference_states(inf_state)
# this will have messed-up headings for (at least some) starting locations because of
# discretization, so read from the observations instead
## path = [(obs.viewpointId, state.heading, state.elevation)
## for state in path_states]
trajectory = [path_element_from_observation(ob) for ob in path_observations]
this_trajs.append({
'instr_id': path_observations[0]['instr_id'],
'instr_encoding': path_observations[0]['instr_encoding'],
'path': trajectory,
'observations': path_observations,
'actions': path_actions,
'score': inf_state.score,
'scores': path_scores,
'attentions': path_attentions
})
trajs.append(this_trajs)
return trajs, completed_list, traversed_lists
def beam_search(self, beam_size):
assert self.decoder.panoramic
# assert self.env.beam_size >= beam_size
world_states = self.env.reset(True) # [(feature, state)]
obs = np.array(self.env._get_obs(world_states))
batch_size = len(world_states)
# Reorder the language input for the encoder
seq, seq_mask, seq_lengths, perm_idx = self._sort_batch([o for ob in obs for o in ob])
world_states = [[world_state for f, world_state in states] for states in world_states]
ctx, h_t, c_t, seq_mask = self.encoder(seq, seq_mask, seq_lengths)
if not self.decoder_init:
h_t, c_t = torch.zeros_like(h_t), torch.zeros_like(c_t)
completed = []
for _ in range(batch_size):
completed.append([])
beams = [[InferenceState(prev_inference_state=None,
world_state=ws[0],
observation=o[0],
flat_index=i,
last_action=-1,
last_action_embedding=self.decoder.u_begin,
action_count=0,
score=0.0,
h_t=None, c_t=None,
last_alpha=None)]
for i, (ws, o) in enumerate(zip(world_states, obs))]
#
# batch_size x beam_size
for t in range(self.episode_len):
flat_indices = []
beam_indices = []
u_t_list = []
for beam_index, beam in enumerate(beams):
for inf_state in beam:
beam_indices.append(beam_index)
flat_indices.append(inf_state.flat_index)
u_t_list.append(inf_state.last_action_embedding)
u_t_prev = torch.stack(u_t_list, dim=0)
flat_obs = [ob for obs_beam in obs for ob in obs_beam]
# Image features from obs
# if self.decoder.panoramic:
f_t_all, f_t = self._feature_variable(flat_obs)
# Action feature from obs
# if self.decoder.action_space == 6:
# u_t_features, is_valid = np.zeros((batch_size, 1)), None
# else:
u_t_features, is_valid, is_valid_numpy = self._action_variable(flat_obs)
h_t, c_t, alpha, logit, pred_f = self.decoder(None, u_t_prev, u_t_features,
f_t, f_t_all, h_t[flat_indices], c_t[flat_indices],
[ctx_si[beam_indices] for ctx_si in ctx] if isinstance(ctx, list) else ctx[beam_indices],
[seq_mask_si[beam_indices] for seq_mask_si in seq_mask] if isinstance(ctx, list) else seq_mask[beam_indices])
# Mask outputs where agent can't move forward
logit[is_valid == 0] = -float('inf')
masked_logit = logit # for debug
log_probs = F.log_softmax(logit, dim=1).data
_, action_indices = masked_logit.data.topk(min(beam_size, logit.size()[1]), dim=1)
action_scores = log_probs.gather(1, action_indices)
assert action_scores.size() == action_indices.size()
start_index = 0
new_beams = []
assert len(beams) == len(world_states)
all_successors = []
for beam_index, (beam, beam_world_states, beam_obs) in enumerate(zip(beams, world_states, obs)):
successors = []
end_index = start_index + len(beam)
assert len(beam_obs) == len(beam) and len(beam_world_states) == len(beam)
if beam:
for inf_index, (inf_state, world_state, ob, action_score_row, action_index_row) in \
enumerate(zip(beam, beam_world_states, beam_obs, action_scores[start_index:end_index],
action_indices[start_index:end_index])):
flat_index = start_index + inf_index
for action_score, action_index in zip(action_score_row, action_index_row):
if is_valid_numpy[flat_index, action_index] == 0:
continue
successors.append(
InferenceState(prev_inference_state=inf_state,
world_state=world_state,
# will be updated later after successors are pruned
observation=ob, # will be updated later after successors are pruned
flat_index=flat_index,
last_action=action_index,
last_action_embedding=u_t_features[flat_index, action_index].detach(),
action_count=inf_state.action_count + 1,
score=float(inf_state.score + action_score), h_t=None, c_t=None,
last_alpha=[alpha_si[flat_index].data for alpha_si in alpha] if isinstance(alpha, list) else alpha[flat_index].data)
)
start_index = end_index
successors = sorted(successors, key=lambda t: t.score, reverse=True)[:beam_size]
all_successors.append(successors)
successor_world_states = [
[inf_state.world_state for inf_state in successors]
for successors in all_successors
]
successor_env_actions = [
[inf_state.last_action for inf_state in successors]
for successors in all_successors
]
successor_last_obs = [
[inf_state.observation for inf_state in successors]
for successors in all_successors
]
successor_world_states=self.env.step(successor_env_actions, successor_last_obs, successor_world_states)
successor_obs = np.array(self.env._get_obs(successor_world_states))
successor_world_states = [[world_state for f, world_state in states] for states in successor_world_states]
acc = []
for ttt in zip(all_successors, successor_world_states, successor_obs):
mapped = [inf._replace(world_state=ws, observation=o) for inf, ws, o in zip(*ttt)]
acc.append(mapped)
all_successors=acc
for beam_index, successors in enumerate(all_successors):
new_beam = []
for successor in successors:
if successor.last_action == 0 or t == self.episode_len - 1:
completed[beam_index].append(successor)
else:
new_beam.append(successor)
if len(completed[beam_index]) >= beam_size:
new_beam = []
new_beams.append(new_beam)
beams = new_beams
world_states = [
[inf_state.world_state for inf_state in beam]
for beam in beams
]
obs = [
[inf_state.observation for inf_state in beam]
for beam in beams
]
# Early exit if all ended
if not any(beam for beam in beams):
break
trajs = []
for this_completed in completed:
assert this_completed
this_trajs = []
for inf_state in sorted(this_completed, key=lambda t: t.score, reverse=True)[:beam_size]:
path_states, path_observations, path_actions, path_scores, path_attentions = backchain_inference_states(inf_state)
# this will have messed-up headings for (at least some) starting locations because of
# discretization, so read from the observations instead
## path = [(obs.viewpointId, state.heading, state.elevation)
## for state in path_states]
trajectory = [path_element_from_observation(ob) for ob in path_observations]
this_trajs.append({
'instr_id': path_observations[0]['instr_id'],
'instr_encoding': path_observations[0]['instr_encoding'],
'path': trajectory,
'observations': path_observations,
'actions': path_actions,
'score': inf_state.score,
'scores': path_scores,
'attentions': path_attentions
})
trajs.append(this_trajs)
traversed_lists = None # todo
return trajs, completed, traversed_lists
def rollout_with_loss(self):
world_states = self.env.reset(False)
obs = np.array(self.env._get_obs(world_states))
batch_size = len(obs)
# Reorder the language input for the encoder
seq, seq_mask, seq_lengths, perm_idx = self._sort_batch(obs)
perm_obs = obs[perm_idx]
# when there are multiple sentences, perm_idx will simply be range(batch_size).
# but perm_idx for each batch of i-th(i=1 or 2 or 3) will be inside seq_length.
# this means:
# seq_lengths=[(seq_lengths,perm_idx),(seq_lengths,perm_idx),(seq_lengths,perm_idx)]
# Record starting point
traj = [{'instr_id': ob['instr_id'], 'path': [(ob['viewpoint'], ob['heading'], ob['elevation'])]} for ob in perm_obs]
# Forward through encoder, giving initial hidden state and memory cell for decoder
ctx, h_t, c_t, seq_mask = self.encoder(seq, seq_mask, seq_lengths)
if not self.decoder_init:
#h_t, c_t = torch.zeros_like(h_t), torch.zeros_like(c_t)
c_t = torch.zeros_like(c_t) # debug
# Initial action
a_t_prev, u_t_prev = None, None # different action space
if self.decoder.action_space == -1:
u_t_prev = self.decoder.u_begin.expand(batch_size, -1)
else:
a_t_prev = Variable(torch.ones(batch_size).long() * self.model_actions.index('<start>'), requires_grad=False).to(device)
ended = np.array([False] * batch_size) # Indices match permuation of the model, not env
# Do a sequence rollout and calculate the loss
self.loss = 0
self.loss_ctrl_f = 0
env_action = [None] * batch_size
# for plot
#all_alpha = []
action_scores = np.zeros((batch_size,))
for t in range(self.episode_len):
f_t = self._feature_variable(perm_obs)
# Image features from obs
if self.decoder.panoramic: f_t_all, f_t = f_t
else: f_t_all = np.zeros((batch_size, 1))
# Action feature from obs
if self.decoder.action_space == 6:
u_t_features, is_valid = np.zeros((batch_size, 1)), None
else:
u_t_features, is_valid, _ = self._action_variable(perm_obs)
# Supervised training
target = self._teacher_action(perm_obs, ended)
h_t, c_t, alpha, logit, pred_f = self.decoder(a_t_prev, u_t_prev, u_t_features, f_t, f_t_all, h_t, c_t, ctx, seq_mask)
# all_alpha.append(alpha)
# Mask outputs where agent can't move forward
if self.decoder.action_space == 6:
for i,ob in enumerate(perm_obs):
if len(ob['navigableLocations']) <= 1:
logit[i, self.model_actions.index('forward')] = -float('inf')
else:
logit[is_valid == 0] = -float('inf')
# if self.decoder.ctrl_feature:
# pred_f[is_valid == 0] = 0
if self.temp_alpha != 0: # add temperature
logit = logit/self.temp_alpha
self.loss += self.criterion(logit, target)
# Auxiliary training
if self.decoder.ctrl_feature:
target_f = self._teacher_feature(perm_obs, ended)#, is_valid.shape[-1])
self.loss_ctrl_f += self.aux_ratio * self.criterion_ctrl_f(pred_f, target_f)
# todo: add auxiliary tasks to sc-rl training?
# Determine next model inputs
# scheduled sampling
if self.schedule_ratio >= 0 and self.schedule_ratio <= 1:
sample_feedback = random.choices(['sample', 'teacher'], [self.schedule_ratio, 1 - self.schedule_ratio], k=1)[0] # schedule sampling
if self.feedback != 'argmax': # ignore test case
self.feedback = sample_feedback
a_t = self.get_next(self.feedback, target, logit)
# setting A
if self.testing_settingA:
log_probs = F.log_softmax(logit, dim=1).data
action_score = log_probs[torch.arange(batch_size), a_t].cpu().data.numpy()
# log_probs = F.log_softmax(logit, dim=1).data
# action_score = log_probs[torch.arange(batch_size), a_t].cpu().data.numpy()
a_t_prev = a_t
if self.decoder.action_space != 6: # update the previous action
u_t_prev = u_t_features[np.arange(batch_size), a_t, :].detach()
# Updated 'ended' list and make environment action
for i,idx in enumerate(perm_idx):
action_idx = a_t[i].item()
if self.decoder.action_space == 6:
if action_idx == self.model_actions.index('<end>'):
# # sub_stages[idx] = max(sub_stages[idx]-1, 0)
# # ended[i] = (sub_stages[idx]==0)
ended[i] = True
env_action[idx] = self.env_actions[action_idx]
else:
if action_idx == 0:
# # sub_stages[idx] = max(sub_stages[idx] - 1, 0)
# # ended[i] = (sub_stages[idx] == 0)
ended[i] = True
env_action[idx] = action_idx
# state transitions
new_states = self.env.step(env_action, obs)
obs = np.array(self.env._get_obs(new_states))
#obs = np.array(self.env._get_obs(self.env.step(env_action, obs))) # , sub_stages
perm_obs = obs[perm_idx]
# Save trajectory output
for i,ob in enumerate(perm_obs):
if not ended[i]:
traj[i]['path'].append((ob['viewpoint'], ob['heading'], ob['elevation']))
for i,idx in enumerate(perm_idx):
action_idx = a_t[i].item()
if self.testing_settingA:
if not ended[i]:
action_scores[idx] = action_scores[idx] + action_score[i]
if self.decoder.action_space == 6:
if action_idx == self.model_actions.index('<end>'):
ended[i] = True
else:
if action_idx == 0:
ended[i] = True
# for i,idx in enumerate(perm_idx):
# action_idx = a_t[i].item()
# # if not ended[i]:
# # action_scores[idx] = action_scores[idx] + action_score[i]
# if self.decoder.action_space == 6:
# if action_idx == self.model_actions.index('<end>'):
# ended[i] = True
# else:
# if action_idx == 0:
# ended[i] = True
# Early exit if all ended
if ended.all(): break
# episode_len is just a constant so it doesn't matter
self.losses.append(self.loss.item()) # / self.episode_len)
if self.decoder.ctrl_feature:
self.losses_ctrl_f.append(self.loss_ctrl_f.item()) # / self.episode_len)
# with open('preprocess/alpha.pkl', 'wb') as alpha_f: # TODO: remove for release!!!!
# pickle.dump(all_alpha, alpha_f)
# chooes one from three according to prob
# for t,p in zip(traj, action_scores):
# t['prob'] = p
# chooes one from three according to prob
if self.testing_settingA:
for t, p in zip(traj, action_scores):
t['prob'] = p
return traj
def pretrain_rollout_with_loss(self):
world_states = self.env.reset(False)
obs = np.array(self.env._get_obs(world_states))
batch_size = len(obs)
# Reorder the language input for the encoder
seq, seq_mask, seq_lengths, perm_idx = self._sort_batch(obs)
perm_obs = obs[perm_idx]
# when there are multiple sentences, perm_idx will simply be range(batch_size).
# but perm_idx for each batch of i-th(i=1 or 2 or 3) will be inside seq_length.
# this means:
# seq_lengths=[(seq_lengths,perm_idx),(seq_lengths,perm_idx),(seq_lengths,perm_idx)]
# Record starting point
traj = [{'instr_id': ob['instr_id'], 'path': [(ob['viewpoint'], ob['heading'], ob['elevation'])]} for ob in perm_obs]
## Forward through encoder, giving initial hidden state and memory cell for decoder
#f_t = self._feature_variable(perm_obs)
#if self.decoder.panoramic: f_t_all, f_t = f_t
#else: f_t_all = np.zeros((batch_size, 1))
#ctx, h_t, c_t, seq_mask = self.encoder(seq, seq_mask, seq_lengths,f_t_all=f_t_all)
###ctx, en_ht, en_ct, vl_mask = self.encoder(seq, seq_mask, seq_lengths,f_t_all=f_t_all)
#if not self.decoder_init:
# h_t, c_t = torch.zeros_like(h_t), torch.zeros_like(c_t)
# #c_t = torch.zeros_like(c_t) # debug
# Initial action
a_t_prev, u_t_prev = None, None # different action space
if self.decoder.action_space == -1:
u_t_prev = self.decoder.u_begin.expand(batch_size, -1)
else:
a_t_prev = Variable(torch.ones(batch_size).long() * self.model_actions.index('<start>'), requires_grad=False).to(device)
ended = np.array([False] * batch_size) # Indices match permuation of the model, not env
# Do a sequence rollout and calculate the loss
self.loss = 0
self.loss_ctrl_f = 0
env_action = [None] * batch_size
# for plot
#all_alpha = []
action_scores = np.zeros((batch_size,))
for t in range(self.episode_len):
f_t = self._feature_variable(perm_obs)
# Image features from obs
if self.decoder.panoramic: f_t_all, f_t = f_t
else: f_t_all = np.zeros((batch_size, 1))
# Action feature from obs
if self.decoder.action_space == 6:
u_t_features, is_valid = np.zeros((batch_size, 1)), None
else:
u_t_features, is_valid, _ = self._action_variable(perm_obs)
# Supervised training
target = self._teacher_action(perm_obs, ended)
ctx, en_ht, en_ct, vl_mask = self.encoder(seq, seq_mask, seq_lengths, f_t_all=f_t_all)
if t == 0: # use encoder's ht and ct as init
de_ht, de_ct, alpha, logit, pred_f = self.decoder(a_t_prev, u_t_prev, u_t_features, f_t, f_t_all, en_ht, en_ct, ctx, vl_mask)
else: # otherwise unroll as lstm
de_ht, de_ct, alpha, logit, pred_f = self.decoder(a_t_prev, u_t_prev, u_t_features, f_t, f_t_all, de_ht, de_ct, ctx, vl_mask)
# all_alpha.append(alpha)
# Mask outputs where agent can't move forward
if self.decoder.action_space == 6:
for i,ob in enumerate(perm_obs):
if len(ob['navigableLocations']) <= 1:
logit[i, self.model_actions.index('forward')] = -float('inf')
else:
logit[is_valid == 0] = -float('inf')
# if self.decoder.ctrl_feature:
# pred_f[is_valid == 0] = 0
if self.temp_alpha != 0: # add temperature
logit = logit/self.temp_alpha
self.loss += self.criterion(logit, target)
# Auxiliary training
if self.decoder.ctrl_feature:
target_f = self._teacher_feature(perm_obs, ended)#, is_valid.shape[-1])
self.loss_ctrl_f += self.aux_ratio * self.criterion_ctrl_f(pred_f, target_f)
# todo: add auxiliary tasks to sc-rl training?
# Determine next model inputs
# scheduled sampling
if self.schedule_ratio >= 0 and self.schedule_ratio <= 1:
sample_feedback = random.choices(['sample', 'teacher'], [self.schedule_ratio, 1 - self.schedule_ratio], k=1)[0] # schedule sampling
if self.feedback != 'argmax': # ignore test case
self.feedback = sample_feedback
a_t = self.get_next(self.feedback, target, logit)
# setting A
if self.testing_settingA:
log_probs = F.log_softmax(logit, dim=1).data
action_score = log_probs[torch.arange(batch_size), a_t].cpu().data.numpy()
# log_probs = F.log_softmax(logit, dim=1).data
# action_score = log_probs[torch.arange(batch_size), a_t].cpu().data.numpy()
a_t_prev = a_t
if self.decoder.action_space != 6: # update the previous action
u_t_prev = u_t_features[np.arange(batch_size), a_t, :].detach()
# Updated 'ended' list and make environment action
for i,idx in enumerate(perm_idx):
action_idx = a_t[i].item()
if self.decoder.action_space == 6:
if action_idx == self.model_actions.index('<end>'):
# # sub_stages[idx] = max(sub_stages[idx]-1, 0)
# # ended[i] = (sub_stages[idx]==0)
ended[i] = True
env_action[idx] = self.env_actions[action_idx]
else:
if action_idx == 0:
# # sub_stages[idx] = max(sub_stages[idx] - 1, 0)
# # ended[i] = (sub_stages[idx] == 0)
ended[i] = True
env_action[idx] = action_idx
# state transitions
new_states = self.env.step(env_action, obs)
obs = np.array(self.env._get_obs(new_states))
#obs = np.array(self.env._get_obs(self.env.step(env_action, obs))) # , sub_stages
perm_obs = obs[perm_idx]
# Save trajectory output
for i,ob in enumerate(perm_obs):
if not ended[i]:
traj[i]['path'].append((ob['viewpoint'], ob['heading'], ob['elevation']))
for i,idx in enumerate(perm_idx):
action_idx = a_t[i].item()
if self.testing_settingA:
if not ended[i]:
action_scores[idx] = action_scores[idx] + action_score[i]
if self.decoder.action_space == 6:
if action_idx == self.model_actions.index('<end>'):
ended[i] = True
else:
if action_idx == 0:
ended[i] = True
# for i,idx in enumerate(perm_idx):
# action_idx = a_t[i].item()
# # if not ended[i]:
# # action_scores[idx] = action_scores[idx] + action_score[i]
# if self.decoder.action_space == 6:
# if action_idx == self.model_actions.index('<end>'):
# ended[i] = True
# else:
# if action_idx == 0:
# ended[i] = True
# Early exit if all ended
if ended.all(): break
# episode_len is just a constant so it doesn't matter
self.losses.append(self.loss.item()) # / self.episode_len)
if self.decoder.ctrl_feature:
self.losses_ctrl_f.append(self.loss_ctrl_f.item()) # / self.episode_len)
# with open('preprocess/alpha.pkl', 'wb') as alpha_f: # TODO: remove for release!!!!
# pickle.dump(all_alpha, alpha_f)
# chooes one from three according to prob
# for t,p in zip(traj, action_scores):
# t['prob'] = p
# chooes one from three according to prob
if self.testing_settingA:
for t, p in zip(traj, action_scores):
t['prob'] = p
return traj
def test(self, use_dropout=False, feedback='argmax', allow_cheat=False, beam_size=1, successors=1, speaker=(None,None,None,None)):
''' Evaluate once on each instruction in the current environment '''
if not allow_cheat: # permitted for purpose of calculating validation loss only
assert feedback in ['argmax', 'sample'] # no cheating by using teacher at test time!
self.feedback = feedback
if use_dropout:
self.encoder.train()
self.decoder.train()
else:
self.encoder.eval()
self.decoder.eval()
super(Seq2SeqAgent, self).test(beam_size, successors, speaker)
def train(self, encoder_optimizer, decoder_optimizer, n_iters, aux_n_iter, feedback='teacher'):
''' Train for a given number of iterations '''
assert feedback in self.feedback_options
self.feedback = feedback
self.encoder.train()
self.decoder.train()
self.losses = []
self.losses_ctrl_f = []
epo_inc = 0
for iter in range(1, n_iters + 1):
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
self.rollout()
if (not self.decoder.ctrl_feature) or (iter % aux_n_iter):
self.loss.backward()
else:
self.loss_ctrl_f.backward()
if self.clip_gradient != 0: # clip gradient
clip_gradient(encoder_optimizer, self.clip_gradient)
clip_gradient(decoder_optimizer, self.clip_gradient)
if self.clip_gradient_norm > 0: # clip gradient norm
torch.nn.utils.clip_grad_norm_(self.encoder.parameters(), self.clip_gradient_norm)
torch.nn.utils.clip_grad_norm_(self.decoder.parameters(), self.clip_gradient_norm)
encoder_optimizer.step()
decoder_optimizer.step()
epo_inc += self.env.epo_inc
return epo_inc
def rollout_notrain(self, n_iters): # jolin
epo_inc = 0
for iter in range(1, n_iters + 1):
self.env._next_minibatch(False)
epo_inc += self.env.epo_inc
return epo_inc
def rl_rollout(self, obs, perm_obs, seq, seq_mask, seq_lengths, perm_idx, feedback,
encoder, decoder):
batch_size = len(perm_obs)
# Record starting point
traj = [{'instr_id': ob['instr_id'],
'path': [(ob['viewpoint'], ob['heading'], ob['elevation'])]
} for ob in perm_obs]
# Forward through encoder, giving initial hidden state and memory cell for decoder
ctx, h_t, c_t, seq_mask = encoder(seq, seq_mask, seq_lengths)
if not self.decoder_init:
h_t, c_t = torch.zeros_like(h_t), torch.zeros_like(c_t)
# Initial action
a_t_prev, u_t_prev = None, None # different action space
if decoder.action_space==-1:
u_t_prev = decoder.u_begin.expand(batch_size, -1)
else:
a_t_prev = Variable(torch.ones(batch_size).long() *
self.model_actions.index('<start>'), requires_grad=False).to(device)
ended=np.array([False] * batch_size)
# Do a sequence rollout not don't calculate the loss for policy gradient
# self.loss = 0
env_action = [None] * batch_size
# Initialize seq log probs for policy gradient
if feedback == 'sample1':
seqLogprobs = h_t.new_zeros(batch_size, self.episode_len)
mask = np.ones((batch_size, self.episode_len))
elif feedback == 'argmax1':
seqLogprobs, mask = None, None
else:
raise NotImplementedError('other feedback not supported.')
# only for supervised auxiliary tasks
#assert (not self.decoder.ctrl_feature) # not implemented
for t in range(self.episode_len):
f_t = self._feature_variable(perm_obs)
# Image features from obs
if decoder.panoramic: f_t_all, f_t = f_t
else: f_t_all = np.zeros((batch_size, 1))
# Action feature from obs
if decoder.action_space == 6:
u_t_features, is_valid = np.zeros((batch_size, 1)), None
else:
u_t_features, is_valid, _ = self._action_variable(perm_obs)
# Decoding actions
h_t, c_t, alpha, logit, pred_f = decoder(a_t_prev, u_t_prev, u_t_features,
f_t, f_t_all, h_t, c_t, ctx, seq_mask)
# Mask outputs where agent can't move forward
if decoder.action_space == 6:
for i,ob in enumerate(perm_obs):
if len(ob['navigableLocations']) <= 1:
logit[i, self.model_actions.index('forward')] = -float('inf')
else: logit[is_valid == 0] = -float('inf')
# Supervised training
# target = self._teacher_action(perm_obs, ended)
# self.loss += self.criterion(logit, target)
# Determine next model inputs
if feedback == 'argmax1':
_, a_t = logit.max(1)
elif feedback == 'sample1':
logprobs = F.log_softmax(logit, dim=1)
probs = torch.exp(logprobs.data)
m = D.Categorical(probs)
a_t = m.sample() # sampling an action from model
sampleLogprobs = logprobs.gather(1, a_t.unsqueeze(1))
else:
sys.exit('invalid feedback method %s'%feedback)
# if self.decoder.panoramic:
# a_t_feature = all_a_t_features[np.arange(batch_size), a_t, :].detach()
# Updated 'ended' list and make environment action
for i,idx in enumerate(perm_idx):
action_idx = a_t[i].item()
if decoder.action_space == 6:
if ended[i] and mask is not None:
mask[i, t] = 0
elif action_idx == self.model_actions.index('<end>'):
ended[i] = True
env_action[idx] = self.env_actions[action_idx]
else:
if ended[i] and mask is not None:
mask[i, t] = 0
elif action_idx == 0: ended[i] = True
env_action[idx] = action_idx
obs = np.array(self.env._get_obs(self.env.step(env_action, obs)))
perm_obs = obs[perm_idx]
# Save trajectory output
for i,ob in enumerate(perm_obs):
if not ended[i]:
traj[i]['path'].append((ob['viewpoint'],
ob['heading'], ob['elevation']))
if seqLogprobs is not None: # feedback == 'sample1'
seqLogprobs[:, t] = sampleLogprobs.view(-1)
# Early exit if all ended
if ended.all(): break
path_res = {}
for t in traj:
path_res[t['instr_id']] = t['path']
return traj, mask, seqLogprobs, path_res
def rl_train(self, train_Eval, encoder_optimizer, decoder_optimizer,
n_iters, sc_reward_scale, sc_discouted_immediate_r_scale, sc_length_scale):
''' jolin: self-critical finetuning'''
self.losses = []
epo_inc = 0
self.encoder.train()
self.decoder.train()
for iter in range(1, n_iters + 1): # n_iters=interval
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
# copy from self.rollout():
# one minibatch (100 instructions)
world_states = self.env.reset(False)
obs = np.array(self.env._get_obs(world_states))
epo_inc += self.env.epo_inc
# Reorder the language input for the encoder
seq, seq_mask, seq_lengths, perm_idx = self._sort_batch(obs)
perm_obs = obs[perm_idx]
gen_traj, mask, seqLogprobs, gen_results = self.rl_rollout(obs, perm_obs, seq, seq_mask,
seq_lengths, perm_idx, 'sample1',
self.encoder, self.decoder)
# jolin: get greedy decoding baseline
# Just like self.test(use_dropout=False, feedback='argmax').
# But we should not do env.reset_epoch(), because we do not
# test the whole split. So DON'T reuse test()!
world_states = self.env.reset_batch()
obs = np.array(self.env._get_obs(world_states))# for later 'sample' feedback batch
perm_obs = obs[perm_idx]
if self.monotonic:
encoder2, decoder2 = self.encoder2, self.decoder2
else:
self.encoder.eval()
self.decoder.eval()
encoder2, decoder2 = self.encoder, self.decoder
with torch.no_grad():
greedy_traj, _, _, greedy_res = self.rl_rollout(obs, perm_obs, seq, seq_mask,
seq_lengths, perm_idx, 'argmax1',
encoder2, decoder2)
if not self.monotonic:
self.encoder.train()
self.decoder.train()
# jolin: get self-critical reward
reward = self.get_self_critical_reward(gen_traj, train_Eval, gen_results, greedy_res, sc_reward_scale, sc_discouted_immediate_r_scale, sc_length_scale)
# jolin: RewardCriterion
self.loss = self.PG_reward_criterion(seqLogprobs, reward, mask)
self.losses.append(self.loss.item())
self.loss.backward()
#clip_gradient(encoder_optimizer)
#clip_gradient(decoder_optimizer)
if self.clip_gradient != 0: # clip gradient
clip_gradient(encoder_optimizer, self.clip_gradient)
clip_gradient(decoder_optimizer, self.clip_gradient)
if self.clip_gradient_norm > 0: # clip gradient norm
torch.nn.utils.clip_grad_norm_(self.encoder.parameters(), self.clip_gradient_norm)
torch.nn.utils.clip_grad_norm_(self.decoder.parameters(), self.clip_gradient_norm)
encoder_optimizer.step()
decoder_optimizer.step()
return epo_inc
def PG_reward_criterion(self, seqLogprobs, reward, mask):
# jolin: RewardCriterion
input = to_contiguous(seqLogprobs).view(-1)
reward = to_contiguous(torch.from_numpy(reward).float().to(device)).view(-1)
#mask = to_contiguous(torch.cat([mask.new(mask.size(0), 1).fill_(1), mask[:, :-1]], 1)).view(-1)
mask = to_contiguous(torch.from_numpy(mask).float().to(device)).view(-1)
output = - input * reward * mask
loss = torch.sum(output) / torch.sum(mask)
return loss
def get_self_critical_reward(self, traj, Eval, gen_results, greedy_res, sc_reward_scale, sc_discouted_immediate_r_scale, sc_length_scale):
# get self-critical reward
instr_id_order = [t['instr_id'] for t in traj]
gen_scores = Eval.score_batch(gen_results, instr_id_order)
greedy_scores = Eval.score_batch(greedy_res, instr_id_order)
# normal score
gen_hits = (
|
np.array(gen_scores['nav_errors'])
|
numpy.array
|
import numpy as np
from scipy.ndimage import interpolation
# нахождеение центра масс изображения
def moments(image):
# создание сетки
c0, c1 = np.mgrid[:image.shape[0], :image.shape[1]]
# сумма пикселей
total_image = np.sum(image)
# mu_x - момент по x
m0 =
|
np.sum(c0 * image)
|
numpy.sum
|
#!/usr/bin/env python2
# -*- coding: UTF-8 -*-
# File: conv.py
# Author: <NAME> <<EMAIL>>
import scipy.io as sio
import theano
import numpy as np
from theano.tensor.nnet import conv
import theano.printing as PP
import theano.tensor as T
from common import Layer
float_x = theano.config.floatX
class InputLayer(Layer):
""" A input and preprocesing layer"""
NAME = 'input'
def __init__(self, rng, input_train, input_test,input_label,
image_shape, label_shape, deal_label,
angle,
#below here, not tested:
translation,
zoom,
magnitude,
pflip,
invert_image,
sigma,
nearest):
""" deal_label: for patch predicting, need to process label
"""
if invert_image:
if input_train is not None:
input_train = 1-input_train
if input_test is not None:
input_test = 1-input_test
super(InputLayer, self).__init__(rng, input_train, input_test)
self.input_train = self.input_train.reshape(image_shape)
self.input_test = self.input_test.reshape(image_shape)
self.label_shape = label_shape
self.image_shape = image_shape
self.deal_label = deal_label
self.input_label = input_label.reshape(label_shape)
self.angle = angle
self.translation = translation
self.zoom = zoom
self.magnitude = magnitude
self.sigma = sigma
self.plip=plip
self.invert = invert_image
self.nearest = nearest
assert zoom > 0
self.need_proc = (not (magnitude or translation or pflip or angle) and zoom == 1)
# do the preprocessing
def do_preproc(input,label):
if not self.need_proc:
return input,label
if self.deal_label:
assert len(label_shape)==3
else:
outlabel=label
srs=self.rng
w = self.image_shape[-1]
h = self.image_shape[-2]
target = T.as_tensor_variable(np.indices((self.image_shape[-2], self.image_shape[-1])))
if self.deal_label:
tarlab = T.as_tensor_variable(np.indices((self.label_shape[-2], label_shape[-1])))
lw = self.label_shape[-1]
lh = self.label_shape[-2]
# Translate
if self.translation:
transln = self.translation * srs.uniform((2, 1, 1), -1)
target += transln
if self.deal_label:
tarlab += transln
# Apply elastic transform
if self.magnitude:
# Build a gaussian filter
var = self.sigma ** 2
filt = np.array([[np.exp(-.5 * (i * i + j * j) / var)
for i in range(-self.sigma, self.sigma + 1)]
for j in range(-self.sigma, self.sigma + 1)], dtype=float_x)
filt /= 2 * np.pi * var
# Elastic
elast = self.magnitude * srs.normal((2, h, w))
elast = sigconv.conv2d(elast, filt, (2, h, w), filt.shape, 'full')
elast = elast[:, self.sigma:h + self.sigma, self.sigma:w + self.sigma]
target += elast
if deal_label:
raise NotImplementedError()
# Center at 'about' half way
if self.zoom-1 or self.angle:
origin = srs.uniform((2, 1, 1), .25, .75) * \
np.array((h, w)).reshape((2, 1, 1))
if self.deal_label:
lorigin = srs.uniform((2, 1, 1), .25, .75) * \
|
np.array((lh, lw))
|
numpy.array
|
# Copyright (C) <NAME> 2021.
# Distributed under the MIT License (see the accompanying README.md and LICENSE files).
import argparse
import numpy as np
import pickle
import time
import json
import utils.dataset as dataset
import utils.pretrained_models as prtr
import utils.clicks as clk
import utils.dcg_ips as ips
import utils.ranking as rnk
import utils.evaluate as evl
parser = argparse.ArgumentParser()
parser.add_argument("input_file", type=str,
help="Path to click input.")
parser.add_argument("output_path", type=str,
help="Path to output model.")
parser.add_argument("--loss", type=str,
help="Loss to optimize.",
default='dcg2')
parser.add_argument("--dataset_info_path", type=str,
default="datasets_info.txt",
help="Path to dataset info file.")
args = parser.parse_args()
print('Reading clicks from: %s' % args.input_file)
with open(args.input_file, 'rb') as f:
innp = pickle.load(f)
train_clicks = innp['train']
validation_clicks = innp['validation']
train_doc_clicks = {'clicks_per_doc': train_clicks['train_clicks_per_doc']}
select_doc_clicks = {'clicks_per_doc': train_clicks['select_clicks_per_doc']}
validation_doc_clicks = {'clicks_per_doc': validation_clicks['train_clicks_per_doc']}
bandit_lambdas = train_clicks['lambdas']
click_model = train_clicks['click_model']
eta = train_clicks['eta']
dataset_name = train_clicks['dataset_name']
fold_id = train_clicks['dataset_fold']
binarize_labels = 'binarized' in click_model
num_proc = 0
data = dataset.get_dataset_from_json_info(
dataset_name,
args.dataset_info_path,
shared_resource = False,
)
data = data.get_data_folds()[fold_id]
start = time.time()
data.read_data()
print('Time past for reading data: %d seconds' % (time.time() - start))
def process_loaded_clicks(loaded_clicks, data_split):
train_weights = compute_weights(
data_split,
loaded_clicks['train_query_freq'],
loaded_clicks['train_clicks_per_doc'],
loaded_clicks['train_observance_prop'],
)
select_weights = compute_weights(
data_split,
loaded_clicks['select_query_freq'],
loaded_clicks['select_clicks_per_doc'],
loaded_clicks['select_observance_prop'],
)
train_mask = np.equal(loaded_clicks['train_observance_prop'], 0).astype(np.float64)
select_mask = np.equal(loaded_clicks['select_observance_prop'], 0).astype(np.float64)
return (train_weights,
1./(loaded_clicks['train_observance_prop'] + train_mask),
select_weights,
1./(loaded_clicks['select_observance_prop'] + select_mask))
def compute_weights(data_split, query_freq, clicks_per_doc, observe_prop):
doc_weights = np.zeros(data_split.num_docs())
for qid in np.arange(data_split.num_queries()):
q_freq = query_freq[qid]
if q_freq <= 0:
continue
s_i, e_i = data_split.query_range(qid)
q_click = clicks_per_doc[s_i:e_i]
q_obs_prop = observe_prop[s_i:e_i]
if np.sum(q_click) <= 0:
continue
click_prob = q_click.astype(np.float64)/np.amax(q_click)
unnorm_weights = click_prob/q_obs_prop
if np.sum(unnorm_weights) == 0:
norm_weights = unnorm_weights
else:
norm_weights = unnorm_weights/np.sum(unnorm_weights)
norm_weights *= float(q_freq)/
|
np.sum(query_freq)
|
numpy.sum
|
import streamlit as st, numpy as np, os, cv2, pydicom
import matplotlib.pyplot as plt
import skimage.segmentation as seg
from PIL import Image
st.set_page_config(
page_title="Brain Segmentation",
page_icon="https://www.pngfind.com/pngs/m/327-3271821_brain-png-image-brain-side-view-vector-transparent.png",
initial_sidebar_state="expanded",
)
st.write("""
# Brain Segmentation
Berikut ini algoritma yang digunakan untuk Segmentasi Otak
""")
IMAGE_PATHS = os.listdir("dicom")
option = st.sidebar.selectbox('Pilih File Dicom?',IMAGE_PATHS)
st.sidebar.write('You selected:', option)
st.sidebar.subheader('Parameter Threshold')
foreground = st.sidebar.slider('Berapa Foreground?', 0, 128, 255)
nilai_threshold = st.sidebar.slider('Berapa Threshold?', 141, 161, 155)
iterasi = st.sidebar.slider('Berapa Iterasi?', 0, 10, 4)
ukuran = st.sidebar.slider('Berapa ukuran?', 0, 10, 4)
start_ukuran, end_ukuran = st.sidebar.select_slider(
'Select Range?',
options=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
value=(1, ukuran-1))
st.sidebar.subheader('Parameter Cluster')
nilai_iterasi = st.sidebar.slider('Berapa Iterasi cluster?', 6, 100, 50)
nilai_cluster = st.sidebar.slider('Berapa Cluster?', 3, 10, 4)
nilai_repetition = st.sidebar.slider('Berapa Repetition?', 9, 98, 10)
def bukadata(file):
# get the data
d = pydicom.read_file('dicom/'+file)
file = np.array(d.pixel_array)
img = file
img_2d = img.astype(float)
img_2d_scaled = (np.maximum(img_2d,0) / img_2d.max()) * foreground
img_2d_scaled = np.uint8(img_2d_scaled)
hasil = img_2d_scaled
st.image(hasil, caption='Gambar Origin', use_column_width=True)
return hasil
def otsuthreshold(image):
#OTSU THRESHOLDING
_,binarized = cv2.threshold(image, 0, foreground, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
foreground_value = foreground
mask = np.uint8(binarized == foreground_value)
labels, stats = cv2.connectedComponentsWithStats(mask, ukuran)[start_ukuran:end_ukuran]
largest_label = 1 + np.argmax(stats[1:, cv2.CC_STAT_AREA])
binarized = np.zeros_like(binarized)
binarized[labels == largest_label] = foreground_value
st.image(binarized, caption='Otsu Image', use_column_width=True)
return binarized
def gaussianthreshold(image):
gaussian = cv2.adaptiveThreshold(image, foreground, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
cv2.THRESH_BINARY,nilai_threshold, 1)
# masking(gaussian)
foreground_value = foreground
mask = np.uint8(gaussian == foreground_value)
labels, stats = cv2.connectedComponentsWithStats(mask, ukuran)[start_ukuran:end_ukuran]
largest_label = 1 + np.argmax(stats[1:, cv2.CC_STAT_AREA])
gaussian = np.zeros_like(gaussian)
gaussian[labels == largest_label] = foreground_value
st.image(gaussian, caption='Gaussian Image', use_column_width=True)
return gaussian
def erosion(image):
# erosion from otsu
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(end_ukuran,end_ukuran))
erosion = cv2.erode(image,kernel,iterations = iterasi)
foreground_value = foreground
mask = np.uint8(erosion == foreground_value)
labels, stats = cv2.connectedComponentsWithStats(mask, ukuran)[start_ukuran:end_ukuran]
largest_label = 1 + np.argmax(stats[start_ukuran:, cv2.CC_STAT_AREA])
erosion = np.zeros_like(erosion)
erosion[labels == largest_label] = foreground_value
st.image(erosion, caption='Erosion Image', use_column_width=True)
return erosion
def opening(image):
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(end_ukuran,end_ukuran))
opening = cv2.morphologyEx(image, cv2.MORPH_OPEN, kernel, iterations= iterasi)
foreground_value = foreground
mask = np.uint8(opening == foreground_value)
labels, stats = cv2.connectedComponentsWithStats(mask, ukuran)[start_ukuran:end_ukuran]
largest_label = 1 + np.argmax(stats[start_ukuran:, cv2.CC_STAT_AREA])
opening = np.zeros_like(opening)
opening[labels == largest_label] = foreground_value
st.image(opening, caption='Opening Image', use_column_width=True)
return opening
def closing(image):
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(end_ukuran,end_ukuran))
closing = cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernel, iterations= iterasi)
foreground_value = foreground
mask_closing = np.uint8(closing >= foreground_value)
labels, stats = cv2.connectedComponentsWithStats(mask_closing, ukuran)[start_ukuran:end_ukuran]
largest_label = 1 +
|
np.argmax(stats[start_ukuran:, cv2.CC_STAT_AREA])
|
numpy.argmax
|
# Author: <NAME>
# <NAME>
# License: BSD 3 clause
import argparse
import sys
from pathlib import Path
import os
import numpy as np
import matplotlib.pylab as plt
from matplotlib import patches as patches
from sklearn.metrics.pairwise import euclidean_distances
try:
import joblib
except ImportError:
from sklearn.externals import joblib
import ot
from smoothot.dual_solvers import solve_semi_dual, get_plan_from_semi_dual
from smoothot.dual_solvers import NegEntropy, SquaredL2
import dataset
# make needed directories if they do not already exist
root_dir = os.path.dirname(os.path.abspath(__file__))
Path(os.path.join(root_dir, 'images')).mkdir(exist_ok=True)
Path(os.path.join(root_dir, 'res')).mkdir(exist_ok=True)
parser = argparse.ArgumentParser()
parser.add_argument('--n_colors', type=int, default=256, help='number of color clusters')
parser.add_argument('--method', type=str, default='l2_sd', help='OT method')
parser.add_argument('--gamma', type=float, default=1.0, help='regularization parameter')
parser.add_argument('--max_iter', type=int, default=1000)
parser.add_argument('--img1', type=str, default='comunion')
parser.add_argument('--img2', type=str, default='autumn')
args = parser.parse_args()
def map_img(T, img1, img2, weights1):
"""Transfer colors from img2 to img1"""
return np.dot(T / weights1[:, np.newaxis], img2)
n_colors = args.n_colors
method = args.method
gamma = args.gamma
max_iter = args.max_iter
img1 = args.img1
img2 = args.img2
pair = img1+'-'+img2
print("images:", img1, '|', img2)
print("n_colors:", n_colors)
print("gamma:", gamma)
print("max_iter:", max_iter)
print()
hist1, hist2, C, centers1, centers2, labels1, labels2, shape1, shape2 = \
dataset.load_color_transfer(img1=img1, img2=img2, n_colors=n_colors,
transpose=False)
m = len(hist1)
n = len(hist2)
# Obtain transportation plan.
if method == "l2_sd":
regul = SquaredL2(gamma=gamma)
alpha = solve_semi_dual(hist1, hist2, C, regul, max_iter=max_iter, tol=1e-6)
T = get_plan_from_semi_dual(alpha, hist2, C, regul)
name = "Squared 2-norm"
elif method == "ent_sd":
regul = NegEntropy(gamma=gamma)
alpha = solve_semi_dual(hist1, hist2, C, regul, max_iter=max_iter, tol=1e-6)
T = get_plan_from_semi_dual(alpha, hist2, C, regul)
name = "Entropy"
elif method == "lp":
T = ot.emd(hist1, hist2, C)
name = "Unregularized"
else:
raise ValueError("Invalid method")
sparsity = np.sum(T > 1e-10) / T.size
print("Sparsity:", sparsity)
T1 = np.sum(T, axis=1)
Tt1 = np.sum(T, axis=0)
err_a = hist1 - np.sum(T, axis=1)
err_b = hist2 - np.sum(T, axis=0)
print("Marginal a", np.dot(err_a, err_a))
print("Marginal b", np.dot(err_b, err_b))
#print(np.sum(T * C) + 0.5 / gamma * np.dot(err_a, err_a) + 0.5 / gamma * np.dot(err_b, err_b))
print('Objective value:', np.sum(T * C))
T_ = ot.emd(hist1, hist2, C)
print('Unregularized objective value:', np.sum(T_ * C))
img1 = centers1[labels1]
img2 = centers2[labels2]
centers1_mapped = map_img(T, centers1, centers2, T1)
img1_mapped = centers1_mapped[labels1]
centers2_mapped = map_img(T.T, centers2, centers1, Tt1)
img2_mapped = centers2_mapped[labels2]
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(221)
ax.imshow(img1.reshape(shape1))
ax.axis("off")
ax = fig.add_subplot(222)
ax.imshow(img2.reshape(shape2))
ax.axis("off")
ax = fig.add_subplot(223)
ax.imshow(img1_mapped.reshape(shape1))
ax.axis("off")
ax = fig.add_subplot(224)
ax.imshow(img2_mapped.reshape(shape2))
ax.axis("off")
plt.tight_layout()
# plot original and transformed images
out = "%s/images/%s_%d_%s_%0.3e.jpg" % (root_dir, method, n_colors, pair, gamma)
plt.savefig(out)
print()
print('Saved image to:', out)
out = "%s/res/img_%s_%d_%s_%0.3e.pkl" % (root_dir, method, n_colors, pair, gamma)
tup = (img1_mapped.reshape(shape1), img2_mapped.reshape(shape2), sparsity)
joblib.dump(tup, out)
print('Saved pickle to:', out)
plt.show()
if n_colors <= 32:
# plot transport plan and color histogram
def draw_blocks(ax, T):
# find contiguous chunks between coefficients
for k, attn_row in enumerate(T):
brk = np.diff(attn_row)
brk = np.where(brk != 0)[0]
brk = np.append(0, brk + 1)
brk = np.append(brk, T.shape[0])
right_border = True
for s, t in zip(brk[:-1], brk[1:]):
if attn_row[s:t].sum() == 0:
right_border = False
continue
lines = [(s, k), (t, k), (t, k + 1), (s, k + 1)]
lines =
|
np.array(lines, dtype=np.float)
|
numpy.array
|
import pytest
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability import distributions as tfd
from careless.models.merging.surrogate_posteriors import RiceWoolfson
from careless.models.priors.empirical import *
import numpy as np
from careless.utils.device import disable_gpu
status = disable_gpu()
assert status
observed = np.random.choice([True, False], 100)
observed[0] = True #just in case
observed[1] = False #just in case
Fobs,SigFobs = np.random.random((2, 100)).astype(np.float32)
Fobs[~observed] = 1.
SigFobs[~observed] = 1.
def ReferencePrior_test(p, ref, mc_samples):
#This part checks indexing and gradient numerics
q = tfd.TruncatedNormal( #<-- use this dist because RW has positive support
tf.Variable(Fobs),
tfp.util.TransformedVariable(
SigFobs,
tfp.bijectors.Softplus(),
),
low=1e-5,
high=1e10,
)
with tf.GradientTape() as tape:
z = q.sample(mc_samples)
log_probs = p.log_prob(z)
grads = tape.gradient(log_probs, q.trainable_variables)
assert np.all(np.isfinite(log_probs))
for grad in grads:
assert np.all(np.isfinite(grad))
assert np.all(log_probs.numpy()[...,~observed] == 0.)
#This tests that the observed values follow the correct distribution
z = ref.sample(mc_samples)
expected = ref.log_prob(z).numpy()[...,observed]
result = p.log_prob(z).numpy()[...,observed]
assert
|
np.allclose(expected, result, atol=1e-5)
|
numpy.allclose
|
# Copyright 2019 Google LLC
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for methods in utils.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pytest
from tensorflow.keras.layers import *
from tensorflow.keras.models import *
from qkeras import *
from qkeras.utils import get_model_sparsity
from qkeras.utils import model_quantize
def create_quantized_network():
"""Creates a simple quantized conv net model."""
# Create a simple model
xi = Input((28, 28, 1))
x = Conv2D(32, (3, 3))(xi)
x = Activation("relu")(x)
x = Conv2D(32, (3, 3), activation="relu")(x)
x = Activation("softmax")(x)
model = Model(inputs=xi, outputs=x)
# Quantize the model
quantizer_config = {
"QConv2D": {
"kernel_quantizer": "quantized_bits(4)",
"bias_quantizer": "quantized_bits(4)"
},
"QActivation": {
"relu": "ternary"
}
}
activation_bits = 4
qmodel = model_quantize(model, quantizer_config, activation_bits)
return qmodel
def create_quantized_po2_network():
"""Creates a simple quantized conv net model with po2 quantizers."""
xi = Input((28, 28, 1))
x = QConv2D(32, (3, 3), kernel_quantizer=quantized_po2(4))(xi)
x = QActivation(quantized_bits(8))(x)
x = QConv2D(32, (3, 3), kernel_quantizer=quantized_po2(4))(x)
x = QActivation(quantized_bits(8))(x)
qmodel = Model(xi, x, name='simple_po2_qmodel')
return qmodel
def set_network_sparsity(model, sparsity):
"""Set the sparsity of the given model using random weights."""
for layer in model.layers:
new_weights = []
for w in layer.get_weights():
# Create weights with desired sparsity
sparse_weights = np.random.rand(w.size)+0.1
sparse_weights[:int(w.size*sparsity)] = 0
np.random.shuffle(sparse_weights)
new_weights.append(sparse_weights.reshape(w.shape))
layer.set_weights(new_weights)
return model
def test_get_model_sparsity():
"""Tests if the method get_model_sparsity in utils.py works correctly."""
qmodel = create_quantized_network()
# Generate sparsity levels to test
sparsity_levels = np.concatenate((np.random.rand(10), [1.0, 0.0])).round(2)
# Test various sparsity levels
for true_sparsity in sparsity_levels:
qmodel = set_network_sparsity(qmodel, true_sparsity)
calc_sparsity = get_model_sparsity(qmodel)
assert np.abs(calc_sparsity - true_sparsity) < 0.01
def test_get_po2_model_sparsity():
"""Tests get_model_sparsity on a po2-quantized model.
Models quantized with po2 quantizers should have a sparsity near 0 because
if the exponent is set to 0, the value of the weight will equal 2^0 == 1 != 0
"""
qmodel = create_quantized_po2_network()
# Generate sparsity levels to test
sparsity_levels = np.concatenate((np.random.rand(10), [1.0, 0.0])).round(2)
# Test various sparsity levels
for set_sparsity in sparsity_levels:
qmodel = set_network_sparsity(qmodel, set_sparsity)
calc_sparsity = get_model_sparsity(qmodel)
assert
|
np.abs(calc_sparsity - 0)
|
numpy.abs
|
import os, sys, math, time
import pandas as pd, numpy as np
data_filepath = sys.argv[1]#"C:/Phoenix/School/Harvard/Research/Beiwe/Studies/John_Schizophrenia/Data/2017.01.09"
results_filepath = sys.argv[2]#"C:/Phoenix/School/Harvard/Research/Beiwe/Studies/John_Schizophrenia/Output/Preprocessed_Data/Individual/4noygnj9/accelerometer_bursts.txt"
patient_name = sys.argv[3]#"4noygnj9"
stream = sys.argv[4] #"accelerometer"
milliseconds = int(sys.argv[5]) # 30000
def find_changes(G, patient, timestamps, UTCs, change_val):
change = np.where(
|
np.diff(timestamps)
|
numpy.diff
|
"""Central data base keeping track of positions, velocities, relative positions, and distances of all simulated fishes
"""
import math
import random
import numpy as np
from scipy.spatial.distance import cdist
import sys
U_LED_DX = 86 # [mm] leds x-distance on BlueBot
U_LED_DZ = 86 # [mm] leds z-distance on BlueBot
class Environment():
"""Simulated fish environment
Fish get their visible neighbors and corresponding relative positions and distances from here. Fish also update their own positions after moving in here. Environmental tracking data is used for simulation analysis.
"""
def __init__(self, pos, vel, fish_specs, arena):
# Arguments
self.pos = pos # x, y, z, phi; [no_robots X 4]
self.vel = vel # pos_dot
self.v_range = fish_specs[0] # visual range, [mm]
self.w_blindspot = fish_specs[1] # width of blindspot, [mm]
self.r_sphere = fish_specs[2] # radius of blocking sphere for occlusion, [mm]
self.n_magnitude = fish_specs[3] # visual noise magnitude, [% of distance]
self.arena_size = arena # x, y, z
# Parameters
self.no_robots = self.pos.shape[0]
self.no_states = self.pos.shape[1]
# Initialize robot states
self.init_states()
# Initialize tracking
self.init_tracking()
# Initialize LEDs
#self.leds_pos = [np.zeros((3,3))]*self.no_robots # empty init, filled with update_leds() below
#for robot in range(self.no_robots):
# self.update_leds(robot)
def log_to_file(self, filename):
"""Logs tracking data to file
"""
#np.savetxt('./logfiles/{}_data.txt'.format(filename), self.tracking, fmt='%.2f', delimiter=',')
np.savetxt('./logfiles/{}.txt'.format(filename), self.tracking, fmt='%.2f', delimiter=',')
def init_tracking(self):
"""Initializes tracking
"""
pos = np.reshape(self.pos, (1,self.no_robots*self.no_states))
vel = np.reshape(self.vel, (1,self.no_robots*self.no_states))
self.tracking = np.concatenate((pos,vel), axis=1)
self.updates = 0
def update_tracking(self):
"""Updates tracking after every fish took a turn
"""
pos = np.reshape(self.pos, (1,self.no_robots*self.no_states))
vel = np.reshape(self.vel, (1,self.no_robots*self.no_states))
current_state = np.concatenate((pos,vel), axis=1)
self.tracking = np.concatenate((self.tracking,current_state), axis=0)
def update_leds(self, source_index):
""" Updates the position of the three leds based on self.pos, which is the position of led1
"""
pos = self.pos[source_index,:3]
phi = self.pos[source_index,3]
x1 = pos[0]
x2 = x1
x3 = x1 + math.cos(phi)*U_LED_DX
y1 = pos[1]
y2 = y1
y3 = y1 + math.sin(phi)*U_LED_DX
z1 = pos[2]
z2 = z1 + U_LED_DZ
z3 = z1
self.leds_pos[source_index] = np.array([[x1, x2, x3],[y1, y2, y3],[z1, z2, z3]])
def init_states(self):
"""Initializes fish positions and velocities
"""
# Restrict initial positions to arena size
self.pos[:,0] = np.clip(self.pos[:,0], 0, self.arena_size[0])
self.pos[:,1] = np.clip(self.pos[:,1], 0, self.arena_size[1])
self.pos[:,2] = np.clip(self.pos[:,2], 0, self.arena_size[2])
# Initial relative positions
a_ = np.reshape(self.pos, (1, self.no_robots*self.no_states))
a = np.tile(a_, (self.no_robots,1))
b = np.tile(self.pos, (1,self.no_robots))
self.rel_pos = a - b # [4*no_robots X no_robots]
# Initial distances
self.dist = cdist(self.pos[:,:3], self.pos[:,:3], 'euclidean') # without phi; [no_robots X no_robots]
def update_states(self, source_id, pos, vel): # add noise
"""Updates a fish state and affected realtive positions and distances
"""
# Position and velocity
self.pos[source_id,0] = np.clip(pos[0], 0, self.arena_size[0])
self.pos[source_id,1] = np.clip(pos[1], 0, self.arena_size[1])
self.pos[source_id,2] = np.clip(pos[2], 0, self.arena_size[2])
self.pos[source_id,3] = pos[3]
self.vel[source_id,:] = vel
# Relative positions
pos_others = np.reshape(self.pos, (1,self.no_robots*self.no_states))
pos_self = np.tile(self.pos[source_id,:], (1,self.no_robots))
rel_pos = pos_others - pos_self
self.rel_pos[source_id,:] = rel_pos # row
rel_pos_ = np.reshape(rel_pos, (self.no_robots, self.no_states))
self.rel_pos[:,source_id*self.no_states:source_id*self.no_states+self.no_states] = -rel_pos_ # columns
# Relative distances
dist = np.linalg.norm(rel_pos_[:,:3], axis=1) # without phi
self.dist[source_id,:] = dist
self.dist[:,source_id] = dist.T
# Update LEDs
#self.update_leds(source_id)
# Update tracking
self.updates += 1
if self.updates >= self.no_robots:
self.updates = 0
self.update_tracking()
def get_robots(self, source_id, visual_noise=False):
"""Provides visible neighbors and relative positions and distances to a fish
"""
robots = set(range(self.no_robots)) # all robots
robots.discard(source_id) # discard self
rel_pos = np.reshape(self.rel_pos[source_id], (self.no_robots, self.no_states))
return (robots, rel_pos, self.dist[source_id])
# perfect vision here
'''
self.visual_range(source_id, robots)
self.blind_spot(source_id, robots, rel_pos)
self.occlusions(source_id, robots, rel_pos)
leds = self.calc_relative_leds(source_id, robots)
if self.n_magnitude: # no overwrites of self.rel_pos and self.dist
n_rel_pos, n_dist = self.visual_noise(source_id, rel_pos)
return (robots, n_rel_pos, n_dist, leds)
return (robots, rel_pos, self.dist[source_id], leds)
'''
def visual_range(self, source_id, robots):
"""Deletes fishes outside of visible range
"""
conn_drop = 0.005
candidates = robots.copy()
for robot in candidates:
d_robot = self.dist[source_id][robot]
x = conn_drop * (d_robot - self.v_range)
if x < -5:
sigmoid = 1
elif x > 5:
sigmoid = 0
else:
sigmoid = 1 / (1 + math.exp(x))
prob = random.random()
if sigmoid < prob:
robots.remove(robot)
def blind_spot(self, source_id, robots, rel_pos):
"""Omits fishes within the blind spot behind own body
"""
r_blockage = self.w_blindspot/2
phi = self.pos[source_id,3]
phi_xy = [math.cos(phi), math.sin(phi)]
mag_phi = np.linalg.norm(phi_xy)
candidates = robots.copy()
for robot in candidates:
dot = np.dot(phi_xy, rel_pos[robot,:2])
if dot < 0:
d_robot = np.linalg.norm(rel_pos[robot,:2])
angle = abs(math.acos(dot / (mag_phi * d_robot))) - math.pi / 2 # cos(a-b) = ca*cb+sa*sb = sa
if math.cos(angle) * d_robot < r_blockage:
robots.remove(robot)
def occlusions(self, source_id, robots, rel_pos):
"""Omits invisible fishes occluded by others
"""
rel_dist = self.dist[source_id]
id_by_dist = np.argsort(rel_dist)
n_valid = []
for robot in id_by_dist[1:]:
if not robot in robots:
continue
occluded = False
d_robot = rel_dist[robot]
if d_robot == 0: # "collision"
continue
coord_robot = rel_pos[robot,:3]
for verified in n_valid:
d_verified = rel_dist[verified]
coord_verified = rel_pos[verified,:3]
theta_min = math.atan(self.r_sphere / d_verified)
theta = abs(math.acos(np.dot(coord_robot, coord_verified) / (d_robot * d_verified)))
if theta < theta_min:
occluded = True
robots.remove(robot)
if not robots:
return
break
if not occluded:
n_valid.append(robot)
def visual_noise(self, source_id, rel_pos):
"""Adds visual noise
"""
magnitudes = self.n_magnitude * np.array([self.dist[source_id]]).T
noise = magnitudes * (np.random.rand(self.no_robots, self.no_states) - 0.5) # zero-mean uniform noise
n_rel_pos = rel_pos + noise
n_dist = np.linalg.norm(n_rel_pos[:,:3], axis=1) # new dist without phi
return (n_rel_pos, n_dist)
def see_circlers(self, source_id, robots, rel_pos, sensing_angle):
'''For circle formation
'''
phi = self.pos[source_id,3]
phi_xy = [math.cos(phi), math.sin(phi)]
mag_phi = np.linalg.norm(phi_xy)
candidates = robots.copy()
for robot in candidates:
dot = np.dot(phi_xy, rel_pos[robot,:2])
if dot > 0:
d_robot = np.linalg.norm(rel_pos[robot,:2])
angle = abs(math.acos(dot / (mag_phi * d_robot)))
if (angle*180/math.pi) < (sensing_angle/2):
return True
return False
def rot_global_to_robot(self, phi):
"""Rotate global coordinates to robot coordinates. Used before simulation of dynamics.
"""
return np.array([[math.cos(phi), math.sin(phi), 0], [-math.sin(phi), math.cos(phi), 0], [0, 0, 1]])
def rot_robot_to_global(self, phi):
"""Rotate robot coordinates to global coordinates. Used after simulation of dynamics.
"""
return np.array([[math.cos(phi), -math.sin(phi), 0], [math.sin(phi), math.cos(phi), 0], [0, 0, 1]])
def calc_reflections(self, leds_list):
"""Calculates the position of the reflected leds
"""
refl_list = []
for led in leds_list:
if led[2] > 10: # at least 10 mm below surface to have a reflection
refl = led +
|
np.array([0,0, -2*led[2]])
|
numpy.array
|
import random as random1 # importing as random created name conflict
import numpy as np
import pycxsimulator
from pylab import *
import math
from matplotlib import colors
import matplotlib.pyplot as plt
import nn_toolbox # Another script
class RabbitClass:
""" Rabbit class to represent each rabbit """
def __init__(self,
grid_size,
speed=int(round(8 + np.random.randn() * 2, 0)),
mass=int(round(4 + np.random.randn() * 2, 0)),
list_of_experiences= [],
list_of_experienced_consequences= [],
health=500):
"""
Description
Initialise Rabbit class instance
Arguments:
grid_size - integer, size of grid in simulation
speed - integer, speed of rabbits
mass - integer, mass of rabbits
list_of_experiences - list
list_of_experienced_consequences, list
health - float, health of rabbit at birth
Returns:
none
"""
self.speed = int(round(speed, 0))
if self.speed < 1: # Lower limit for speed is 1
self.speed = 1
self.health = health
self.mass = int(round(mass, 0))
if self.mass < 1: # Lower limit for mass is 1
self.mass = 1
if self.mass > 10: # Lower limit for mass is 1
self.mass = 10
self.digestion_efficiency = 0.5 / (1 + np.exp(-2 * self.mass + 7)) + 0.5 # Sigmoid like function
self.health = health
self.layer_dims = [6, 4, 1] # 11 inputs, 2 hidden nodes in 1st and only hidden layer, 1 output node
self.list_of_experiences = list_of_experiences
self.list_of_experienced_consequences = list_of_experienced_consequences
self.weights = nn_toolbox.initialize_parameters_deep(self.layer_dims)
if self.list_of_experiences != []: # If born with memories, learn from them at initiation
RabbitClass.learn(self)
self.position = [random1.randint(0, grid_size-1), random1.randint(0, grid_size-1)] # Random position
self.grid_size = grid_size
self.iq = 50 # Default IQ
self.genotype = [] # Default genotype
def get_speed(self):
""" Return rabbit speed """
return self.speed
def get_mass(self):
""" Return rabbit mass """
return self.mass
def set_speed(self, speed):
""" Set rabbit speed """
self.speed = speed
def get_position(self):
""" Return rabbit position """
return self.position[0], self.position[1]
def get_health(self):
""" Return rabbit health """
return self.health
def get_genotype(self):
""" Return rabbit genotype """
return self.genotype
def get_iq(self):
""" Return rabbit IQ """
return self.iq
def update_genotype(self):
""" Update rabbit IQ """
self.genotype = [self.speed, self.mass, [self.list_of_experiences, self.list_of_experienced_consequences]]
def update_iq(self, all_flower_information):
"""
Description
Benchmark rabbit intelligence and save it as rabbit IQ
Arguments:
all_flower_information - dict, information on flowers
Returns:
accuracy - float, 0..100, rabbit intelligence
"""
# Construct training examples
for flower in all_flower_information.values(): # Loop for flower
for flower_size in range(1, 5+1): # Loop for flower size
flower_with_size = flower.copy()
flower_with_size['nutrition value'] = flower['nutrition value'][flower_size - 1]
flower_with_size['flower size'] = flower_size
x = RabbitClass.encode_input(self, flower_with_size)
if flower_with_size['nutrition value'] > 0:
y = 1
else:
y = 0
# Add experience
self.list_of_experiences.append(list(x[0]))
self.list_of_experienced_consequences.append(y)
# Format experiences and experienced consequence to numpy array
X = np.array(self.list_of_experiences)
Y = np.array(self.list_of_experienced_consequences)
X = X.T
Y = Y.reshape((X.shape[1], 1))
Y = np.squeeze(Y.T)
Y_p = nn_toolbox.predict(X, self.weights)
# Calculate accuracy of prediction
m = len(Y)
P = np.sum(Y)
N = m - P
Tp = np.dot(Y_p.T, Y)
Fp =
|
np.sum(Y_p)
|
numpy.sum
|
"""
concavity_automator comports multiple scripts automating concavity constraining method for landscape
"""
import lsdtopytools as lsd
import numpy as np
import numba as nb
import pandas as pd
from matplotlib import pyplot as plt
import sys
import matplotlib
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
import math
from lsdtopytools.numba_tools import travelling_salesman_algortihm, remove_outliers_in_drainage_divide
import random
import matplotlib.gridspec as gridspec
from multiprocessing import Pool, current_process
from scipy import spatial,stats
import numba as nb
import copy
from pathlib import Path
import pylab as pl
from scipy.signal import savgol_filter
from scipy.interpolate import interp1d
def norm_by_row(A):
"""
Subfunction used to vectorised normalisation of disorder by max of row using apply_along_axis function
B.G
"""
return A/A.max()
def norm_by_row_by_range(A):
"""
Subfunction used to vectorised normalisation of disorder by range of concavity using apply_along_axis function
B.G
"""
return (A - A.min())/(A.max() - A.min())
def numfmt(x, pos):
"""
Plotting subfunction to automate tick formatting from metres to kilometres
B.G
"""
s = '{:d}'.format(int(round(x / 1000.0)))
return s
def get_best_bit_and_err_from_Dstar(thetas, medD, fstD, thdD):
"""
Takes ouput from concavity calculation to calculate the best-fit theta and its error
"""
# Calculating the index of minimum medium disorder to get the best-fit
index_of_BF = np.argmin(medD)
# Getting the Dstar value of the best-fit
dstar_val = medD[index_of_BF]
# Getting the acutal best-fit
BF = thetas[index_of_BF]
# Preformatting 2 arrays for calculating the error: I am just interested by the first half for the first error and the second for the second
A = np.copy(fstD)
A[index_of_BF+1:] = 9999
B = np.copy(fstD)
B[:index_of_BF] = 9999
# calculating the error by extracting the closest theta with a Dstar close to the median best fit ones
err = ( thetas[np.abs(A - dstar_val).argmin()] , thetas[np.abs(B - dstar_val).argmin()] )
# REturning a tuple with [0] being the best fit and [1] another tuple f error
return BF,err
def process_basin(ls, **kwargs):
"""
Main function processing the concavity. It looks a bit convoluted but it is required for clean multiprocessing.
Takes at least one argument: ls, which is a list of arguments
ls[0] -> the number of the basin (heavily used by automatic multiprocessing)
ls[1] -> the X coordinate of the basin outlet
ls[2] -> the Y coordinate of the basin outlet
ls[3] -> area_threshold used for the analysis
ls[4] -> prefix befor the number of the basin to read the file input
Also takes option kwargs argument:
ignore_numbering: jsut use the prefix as name for the DEM
extension: if your extension is not .tif, you can give it here WITHOUT THE DOT
overwrite_dem_name: used if you want to use thefunction from outside the automations: you need to provide the dem name WITH THE EXTENSION
"""
number = ls[0]
X = ls[1]
Y = ls[2]
area_threshold = ls[3]
prefix = ls[4]
print("Processing basin ", number, " with proc ", current_process())
if("ignore_numbering" not in kwargs):
kwargs["ignore_numbering"] = False
if("extension" not in kwargs):
kwargs["extension"] = "tif"
if("n_tribs_by_combo" not in kwargs):
kwargs["n_tribs_by_combo"] = 4
if(kwargs["ignore_numbering"] == True):
name = prefix
else:
name = prefix + "%s"%(number)
if(kwargs["precipitation_raster"] == ""):
precipitation = False
else:
precipitation = True
# I spent a significant amount of time preprocessing it, see SM
n_rivers = 0
dem_name ="%s.%s"%(name,kwargs["extension"])
if("overwrite_dem_name" in kwargs):
dem_name = kwargs["overwrite_dem_name"]
MD = lsd.LSDDEM(file_name = dem_name, already_preprocessed = True)
# Extracting basins
if(precipitation):
MD.CommonFlowRoutines( ingest_precipitation_raster = kwargs["precipitation_raster"], precipitation_raster_multiplier = 1, discharge = True)
else:
MD.CommonFlowRoutines()
MD.ExtractRiverNetwork( method = "area_threshold", area_threshold_min = area_threshold)
print("River extracted")
MD.DefineCatchment( method="from_XY", X_coords = [X], Y_coords = [Y], coord_search_radius_nodes = 10 )#, X_coords = [X_coordinates_outlets[7]], Y_coords = [Y_coordinates_outlets[7]])
print("CAtchment defined")
MD.GenerateChi(theta = 0.4, A_0 = 1)
print("River_network_generated")
n_rivers = MD.df_base_river.source_key.unique().shape[0]
print("You have", n_rivers, "rivers and",MD.df_base_river.shape[0],"river pixels")
MD.df_base_river.to_feather("%s_rivers.feather"%(name))
print("Starting the movern calculation")
MD.cppdem.calculate_movern_disorder(0.05, 0.025, 38, 1, area_threshold, kwargs["n_tribs_by_combo"])
print("DONE with movern, let's format the output")
OVR_dis = MD.cppdem.get_disorder_dict()[0]
OVR_tested = MD.cppdem.get_disorder_vec_of_tested_movern()
pd.DataFrame({"overall_disorder":OVR_dis, "tested_movern":OVR_tested }).to_feather("%s_overall_test.feather"%(name))
normalizer = MD.cppdem.get_n_pixels_by_combinations()[0]
np.save("%s_disorder_normaliser.npy"%(name), normalizer)
all_disorder = MD.cppdem.get_best_fits_movern_per_BK()
np.save("%s_concavity_tot.npy"%(name), all_disorder[0])
print("Getting results")
results = np.array(MD.cppdem.get_all_disorder_values()[0])
np.save("%s_disorder_tot.npy"%(name), results)
XY = MD.cppdem.query_xy_for_each_basin()["0"]
tdf = pd.DataFrame(XY)
tdf.to_feather("%s_XY.feather"%(name))
return 0
def theta_quick_constrain_single_basin(MD,X_coordinate_outlet = 0, Y_coordinate_outlet = 0, area_threshold = 1500):
"""
Main function processing the concavity. It looks a bit convoluted but it is required for clean multiprocessing.
Takes at least one argument: ls, which is a list of arguments
ls[0] -> the number of the basin (heavily used by automatic multiprocessing)
ls[1] -> the X coordinate of the basin outlet
ls[2] -> the Y coordinate of the basin outlet
ls[3] -> area_threshold used for the analysis
ls[4] -> prefix befor the number of the basin to read the file input
Also takes option kwargs argument:
ignore_numbering: jsut use the prefix as name for the DEM
extension: if your extension is not .tif, you can give it here WITHOUT THE DOT
overwrite_dem_name: used if you want to use thefunction from outside the automations: you need to provide the dem name WITH THE EXTENSION
"""
# number = ls[0]
# X = ls[1]
# Y = ls[2]
# area_threshold = ls[3]
# prefix = ls[4]
# print("Processing basin ", number, " with proc ", current_process())
# if("ignore_numbering" not in kwargs):
# kwargs["ignore_numbering"] = False
# if("extension" not in kwargs):
# kwargs["extension"] = "tif"
# if("n_tribs_by_combo" not in kwargs):
# kwargs["n_tribs_by_combo"] = 4
# if(kwargs["ignore_numbering"] == True):
# name = prefix
# else:
# name = prefix + "%s"%(number)
# if(kwargs["precipitation_raster"] == ""):
# precipitation = False
# else:
# precipitation = True
# I spent a significant amount of time preprocessing it, see SM
n_rivers = 0
# dem_name ="%s.%s"%(name,kwargs["extension"])
# if("overwrite_dem_name" in kwargs):
# dem_name = kwargs["overwrite_dem_name"]
# MD = lsd.LSDDEM(file_name = dem_name, already_preprocessed = True)
# # Extracting basins
# if(precipitation):
# MD.CommonFlowRoutines( ingest_precipitation_raster = kwargs["precipitation_raster"], precipitation_raster_multiplier = 1, discharge = True)
# else:
# MD.CommonFlowRoutines()
# print("Experimental function (Gailleton et al., submitted), if it crashes restart from a clean LSDDEM object with only the flow routines processed.")
MD.ExtractRiverNetwork( method = "area_threshold", area_threshold_min = area_threshold)
# print("River pre-extracted")
MD.DefineCatchment( method="from_XY", X_coords = X_coordinate_outlet, Y_coords = Y_coordinate_outlet, coord_search_radius_nodes = 10 )#, X_coords = [X_coordinates_outlets[7]], Y_coords = [Y_coordinates_outlets[7]])
# print("CAtchment defined")
MD.GenerateChi(theta = 0.4, A_0 = 1)
# print("River_network_generated")
n_rivers = MD.df_base_river.source_key.unique().shape[0]
print("DEBUG::You have", n_rivers, "rivers and",MD.df_base_river.shape[0],"river pixels \n")
# MD.df_base_river.to_feather("%s_rivers.feather"%(name))
# print("Starting the movern calculation")
MD.cppdem.calculate_movern_disorder(0.05, 0.025, 38, 1, area_threshold, 4)
# print("DONE with movern, let's format the output")
OVR_dis = MD.cppdem.get_disorder_dict()[0]
OVR_tested = MD.cppdem.get_disorder_vec_of_tested_movern()
# pd.DataFrame({"overall_disorder":OVR_dis, "tested_movern":OVR_tested }).to_feather("%s_overall_test.feather"%(name))
normalizer = MD.cppdem.get_n_pixels_by_combinations()[0]
# np.save("%s_disorder_normaliser.npy"%(name), normalizer)
all_disorder = MD.cppdem.get_best_fits_movern_per_BK()
# np.save("%s_concavity_tot.npy"%(name), all_disorder[0])
# print("Getting results")
results = np.array(MD.cppdem.get_all_disorder_values()[0])
# np.save("%s_disorder_tot.npy"%(name), results)
# XY = MD.cppdem.query_xy_for_each_basin()["0"]
# tdf = pd.DataFrame(XY)
# tdf.to_feather("%s_XY.feather"%(name))
# print("\n\n")
try:
from IPython.display import display, Markdown, Latex
todusplay = r"""
**Thanks for constraning** $\theta$ with the disorder algorithm from _Mudd et al., 2018_ and _Gailleton et al, submitted_.
Keep in mind that it is not straightforward and that the "best fit" we suggest is most of the time the "least worst" value maximising the collinearity in $\chi$ space.
Especially in large, complex basin, several $\theta$ actually fit different areas and the best fit is just a try to make everyone happy where it is not necessarily possible.
$\theta$ constraining results:
median $\theta$ | $1^{st}$ Q | $3^{rd}$ Q
--- | --- | ---
%s | %s | %s
"""%(round(np.nanmedian(all_disorder[0]),3), round(np.nanpercentile(all_disorder[0],25),3), round(np.nanpercentile(all_disorder[0],75),3))
display(Markdown(todusplay))
except:
pass
return all_disorder
def get_median_first_quartile_Dstar(ls):
"""
Function which post-process results from one analysis to return the median and first quartile curve of all best-fits
param:
ls: full prefix (= including basin number if needed)
B.G
"""
print("Normalising D* for ", ls)
name_to_load = ls
# loading the file containng ALL the data
all_data = np.load(name_to_load + "_disorder_tot.npy")
if(all_data.shape[0]>1):
# normalise by max each row
all_data = np.apply_along_axis(norm_by_row,1,all_data)
# Median by column
ALLDmed = np.apply_along_axis(np.median,0,all_data)
# Percentile by column
ALLDfstQ = np.apply_along_axis(lambda z: np.percentile(z,25),0,all_data)
else:
return name_to_load
return ALLDmed, ALLDfstQ, ls
def get_median_first_quartile_Dstar_r(ls):
"""
Function which post-process results from one analysis to return the median and first quartile curve of all best-fits
param:
ls: full prefix (= including basin number if needed)
B.G
"""
print("Normalising D*_r for ", ls)
name_to_load = ls
# loading the file containng ALL the data
all_data = np.load(name_to_load + "_disorder_tot.npy")
if(all_data.shape[0]>1):
# normalise by max each row
all_data = np.apply_along_axis(norm_by_row_by_range,1,all_data)
# Median by column
ALLDmed = np.apply_along_axis(np.median,0,all_data)
# Percentile by column
ALLDfstQ = np.apply_along_axis(lambda z: np.percentile(z,25),0,all_data)
else:
return name_to_load
return ALLDmed, ALLDfstQ, ls
def plot_single_theta(ls, **kwargs):
"""
For a multiple analysis on the same DEM this plot the global with each basins colored by D^*
Need post-processing function to pre-analyse the ouputs.
The layout of this function might seems a bit convoluted, but that's making multiprocessing easy, as they take time to plot
param
"""
this_theta = ls[0]
prefix = ls[1]
# Loading the small summary df
df = pd.read_csv(prefix +"summary_results.csv")
# Loading the HillShade
HS = lsd.raster_loader.load_raster(prefix + "HS.tif")
# Formatting ticks
import matplotlib.ticker as tkr # has classes for tick-locating and -formatting
yfmt = tkr.FuncFormatter(numfmt)
xfmt = tkr.FuncFormatter(numfmt)
print("plotting D* for theta", this_theta)
# Getting the Figure and the ticks right
fig,ax = plt.subplots()
ax.yaxis.set_major_formatter(yfmt)
ax.xaxis.set_major_formatter(xfmt)
# Normalising the Hillshade and taking care of the no data
HS["array"] = HS["array"]/HS["array"].max()
HS["array"][HS["array"]<0] = np.nan
# Plotting the hillshade
ax.imshow(HS["array"], extent = HS["extent"], cmap = "gray", vmin= 0.2, vmax = 0.8)
# Building the array of concavity
A = np.zeros(HS["array"].shape)
A[:,:] = np.nan
# For each raster, I am reading rows and col corresponding to the main raster and potting it with the requested value
for name in df["raster_name"]:
row = np.load(name + "_row.npy")
col = np.load(name + "_col.npy")
val = df["D*_%s"%this_theta][df["raster_name"] == name].values[0] # A wee convoluted but it work and it is fast so...
A[row,col] = val
# PLOTTING THE D*
ax.imshow(A, extent = HS["extent"], cmap= "gnuplot2", zorder = 2, alpha = 0.75, vmin = 0.1, vmax = 0.9)
# You may want to change the extents of the plot
if("xlim" in kwargs):
ax.set_xlim(kwargs["xlim"])
if("ylim" in kwargs):
ax.set_ylim(kwargs["ylim"])
ax.set_xlabel("Easting (km)")
ax.set_ylabel("Northing (km)")
# Saving the figure
plt.tight_layout()#
plt.savefig(prefix + "MAP_disorder_%s.png"%(this_theta), dpi = 500)
plt.close(fig)
print("plotting D*_r for theta", this_theta)
# Getting the Figure and the ticks right
fig,ax = plt.subplots()
ax.yaxis.set_major_formatter(yfmt)
ax.xaxis.set_major_formatter(xfmt)
# Plotting the hillshade
ax.imshow(HS["array"], extent = HS["extent"], cmap = "gray", vmin= 0.2, vmax = 0.8)
# Building the array of concavity
A = np.zeros(HS["array"].shape)
A[:,:] = np.nan
# For each raster, I am reading rows and col corresponding to the main raster and potting it with the requested value
for name in df["raster_name"]:
row = np.load(name + "_row.npy")
col = np.load(name + "_col.npy")
val = df["D*_r_%s"%this_theta][df["raster_name"] == name].values[0] # A wee convoluted but it work and it is fast so...
A[row,col] = val
# PLOTTING THE D*
ax.imshow(A, extent = HS["extent"], cmap= "gnuplot2", zorder = 2, alpha = 0.75, vmin = 0.1, vmax = 0.9)
# You may want to change the extents of the plot
if("xlim" in kwargs):
ax.set_xlim(kwargs["xlim"])
if("ylim" in kwargs):
ax.set_ylim(kwargs["ylim"])
ax.set_xlabel("Easting (km)")
ax.set_ylabel("Northing (km)")
# Saving the figure
plt.tight_layout()#
plt.savefig(prefix + "MAP_disorder_by_range_%s.png"%(this_theta), dpi = 500)
plt.close(fig)
def plot_min_D_star_map(ls, **kwargs):
"""
For a multiple analysis on the same DEM this plot the global with each basins colored by D^*
Need post-processing function to pre-analyse the ouputs.
The layout of this function might seems a bit convoluted, but that's making multiprocessing easy, as they take time to plot
param
"""
this_theta = ls[0]
prefix = ls[1]
# Loading the small summary df
df = pd.read_csv(prefix +"summary_results.csv")
# Loading the HillShade
HS = lsd.raster_loader.load_raster(prefix + "HS.tif")
# Formatting ticks
import matplotlib.ticker as tkr # has classes for tick-locating and -formatting
yfmt = tkr.FuncFormatter(numfmt)
xfmt = tkr.FuncFormatter(numfmt)
print("plotting D* for theta", this_theta)
# Getting the Figure and the ticks right
fig,ax = plt.subplots()
ax.yaxis.set_major_formatter(yfmt)
ax.xaxis.set_major_formatter(xfmt)
# Normalising the Hillshade and taking care of the no data
HS["array"] = HS["array"]/HS["array"].max()
HS["array"][HS["array"]<0] = np.nan
# Plotting the hillshade
ax.imshow(HS["array"], extent = HS["extent"], cmap = "gray", vmin= 0.2, vmax = 0.8)
# Building the array of concavity
A = np.zeros(HS["array"].shape)
A[:,:] = np.nan
df_theta = pd.read_csv(prefix + "all_raster_names.csv")
thetas = np.round(pd.read_feather(df["raster_name"].iloc[0] + "_overall_test.feather")["tested_movern"].values,decimals = 3)
# For each raster, I am reading rows and col corresponding to the main raster and potting it with the requested value
for name in df["raster_name"]:
row = np.load(name + "_row.npy")
col = np.load(name + "_col.npy")
val = 1e12
for tval in thetas:
valtest = df["D*_%s"%tval][df["raster_name"] == name].values[0] # A wee convoluted but it work and it is fast so...
if(valtest<val):
val=valtest
A[row,col] = val
# PLOTTING THE D*
ax.imshow(A, extent = HS["extent"], cmap= "gnuplot2", zorder = 2, alpha = 0.75, vmin = 0.1, vmax = 0.9)
# You may want to change the extents of the plot
if("xlim" in kwargs):
ax.set_xlim(kwargs["xlim"])
if("ylim" in kwargs):
ax.set_ylim(kwargs["ylim"])
ax.set_xlabel("Easting (km)")
ax.set_ylabel("Northing (km)")
# Saving the figure
plt.tight_layout()#
plt.savefig(prefix + "MAP_minimum_disorder_across_theta_%s.png"%(this_theta), dpi = 500)
plt.close(fig)
def post_process_analysis_for_Dstar(prefix, n_proc = 1, base_raster_full_name = "SEC_PP.tif"):
# Loading the list of raster
df = pd.read_csv(prefix + "all_raster_names.csv")
# Preparing the multiprocessing
d_of_med = {}
d_of_fst = {}
d_of_med_r = {}
d_of_fst_r = {}
params = df["raster_name"].tolist()
ras_to_ignore = {}
ras_to_ignore_list = []
for i in params:
ras_to_ignore[i] = False
# running the multiprocessing
with Pool(n_proc) as p:
fprocesses = []
for i in params:
fprocesses.append(p.apply_async(get_median_first_quartile_Dstar, args = (i,)))
for gut in fprocesses:
gut.wait()
# getting the results in the right dictionaries
for gut in fprocesses:
# print(gut.get())
if(isinstance(gut.get(),tuple)):
d_of_med[gut.get()[2]] = gut.get()[0]
d_of_fst[gut.get()[2]] = gut.get()[1]
else:
# print("IGNORING",gut.get() )
ras_to_ignore[gut.get()] = True
ras_to_ignore_list.append(gut.get())
# running the multiprocessing
with Pool(n_proc) as p:
fprocesses = []
for i in params:
fprocesses.append(p.apply_async(get_median_first_quartile_Dstar_r, args = (i,)))
for gut in fprocesses:
gut.wait()
# getting the results in the right dictionaries
for gut in fprocesses:
# print(gut.get())
if(isinstance(gut.get(),tuple)):
d_of_med_r[gut.get()[2]] = gut.get()[0]
d_of_fst_r[gut.get()[2]] = gut.get()[1]
else:
# print("IGNORING",gut.get() )
ras_to_ignore[gut.get()] = True
ras_to_ignore_list.append(gut.get())
# Getting the list of thetas tested
thetas = np.round(pd.read_feather(params[0] + "_overall_test.feather")["tested_movern"].values,decimals = 3)
df["best_fit"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
df["err_neg"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
df["err_pos"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
df["best_fit_norm_by_range"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
df["err_neg_norm_by_range"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
df["err_pos_norm_by_range"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
# Preparing my dataframe to ingest
for t in thetas:
df["D*_%s"%t] = pd.Series(np.zeros(df.shape[0]), index = df.index)
df["D*_r_%s"%t] = pd.Series(np.zeros(df.shape[0]), index = df.index)
# Ingesting hte results
for i in range(df.shape[0]):
if(ras_to_ignore[df["raster_name"].iloc[i]]):
continue
BF,err = get_best_bit_and_err_from_Dstar(thetas, d_of_med[df["raster_name"].iloc[i]], d_of_fst[df["raster_name"].iloc[i]], 10)
BF_r,err_r = get_best_bit_and_err_from_Dstar(thetas, d_of_med_r[df["raster_name"].iloc[i]], d_of_fst_r[df["raster_name"].iloc[i]], 10)
df["best_fit"].iloc[i] = BF
df["err_neg"].iloc[i] = err[0]
df["err_pos"].iloc[i] = err[1]
df["best_fit_norm_by_range"].iloc[i] = BF_r
df["err_neg_norm_by_range"].iloc[i] = err_r[0]
df["err_pos_norm_by_range"].iloc[i] = err_r[1]
for t in range(thetas.shape[0]):
df["D*_%s"%thetas[t]].iloc[i] = d_of_med[df["raster_name"].iloc[i]][t]
df["D*_r_%s"%thetas[t]].iloc[i] = d_of_med_r[df["raster_name"].iloc[i]][t]
# Getting the hillshade
mydem = lsd.LSDDEM(file_name = base_raster_full_name,already_preprocessed = True)
HS = mydem.get_hillshade(altitude = 45, angle = 315, z_exageration = 1)
mydem.save_array_to_raster_extent( HS, name = prefix + "HS", save_directory = "./")
# will add X-Y to the sumarry dataframe
df["X_median"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
df["X_firstQ"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
df["X_thirdtQ"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
df["Y_median"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
df["Y_firstQ"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
df["Y_thirdtQ"] = pd.Series(np.zeros(df.shape[0]), index = df.index)
# I do not mutiprocess here: it would require load the mother raster for each process and would eat a lot of memory
for i in params:
if(ras_to_ignore[i]):
continue
XY = pd.read_feather(i + "_XY.feather")
row,col = mydem.cppdem.query_rowcol_from_xy(XY["X"].values, XY["Y"].values)
np.save(i + "_row.npy", row)
np.save(i + "_col.npy", col)
df["X_median"][df["raster_name"] == i] = XY["X"].median()
df["X_firstQ"][df["raster_name"] == i] = XY["X"].quantile(0.25)
df["X_thirdtQ"][df["raster_name"] == i] = XY["X"].quantile(0.75)
df["Y_median"][df["raster_name"] == i] = XY["Y"].median()
df["Y_firstQ"][df["raster_name"] == i] = XY["Y"].quantile(0.25)
df["Y_thirdtQ"][df["raster_name"] == i] = XY["Y"].quantile(0.75)
#Removing the unwanted
df = df[~df["raster_name"].isin(ras_to_ignore_list)]
# Saving the DataFrame
df.to_csv(prefix +"summary_results.csv", index = False)
print("Done with the post processing")
def plot_main_figures(prefix, **kwargs):
# Loading the list of raster
dfrast = pd.read_csv(prefix + "all_raster_names.csv")
df = pd.read_csv(prefix +"summary_results.csv")
# Creating the folder
Path("./%s_figures"%(prefix)).mkdir(parents=True, exist_ok=True)
print("Printing your histograms first")
fig, ax = plt.subplots()
ax.grid(ls = "--")
ax.hist(df["best_fit"], bins = 19, histtype = "stepfilled", edgecolor = "k", facecolor = "orange", lw = 2)
ax.set_xlabel(r"$\theta$")
plt.tight_layout()
plt.savefig("./%s_figures/%shistogram_all_fits.png"%(prefix, prefix), dpi = 500)
plt.close(fig)
print("Building the IQ CDF")
IQR,bin_edge = np.histogram(df["err_pos"].values - df["err_neg"].values)
fig, ax = plt.subplots()
CSIQR = np.cumsum(IQR)
CSIQR = CSIQR/np.nanmax(CSIQR)*100
bin_edge = bin_edge[1:] - np.diff(bin_edge)
ax.plot(bin_edge, CSIQR, lw = 2, color = "k", alpha = 1)
# ax.axhspan(np.percentile(CSIQR,25),np.percentile(CSIQR,75), lw = 0, color = "r", alpha = 0.2)
ax.fill_between(bin_edge,0,CSIQR, lw = 0, color = "k", alpha = 0.1)
ax.set_xlabel(r"IQR $\theta$ best-fit")
ax.set_ylabel(r"%")
ax.grid(ls = "--", lw = 1)
plt.savefig("./%s_figures/%sCDF_IQR.png"%(prefix, prefix), dpi = 500)
plt.close(fig)
print("plotting the map of best-fit")
# Loading the small summary df
df = pd.read_csv(prefix +"summary_results.csv")
# Loading the HillShade
HS = lsd.raster_loader.load_raster(prefix + "HS.tif")
# Formatting ticks
import matplotlib.ticker as tkr # has classes for tick-locating and -formatting
yfmt = tkr.FuncFormatter(numfmt)
xfmt = tkr.FuncFormatter(numfmt)
print("plotting best-fit")
# Getting the Figure and the ticks right
fig,ax = plt.subplots()
ax.yaxis.set_major_formatter(yfmt)
ax.xaxis.set_major_formatter(xfmt)
# Normalising the Hillshade and taking care of the no data
HS["array"] = HS["array"]/HS["array"].max()
HS["array"][HS["array"]<0] = np.nan
# Plotting the hillshade
ax.imshow(HS["array"], extent = HS["extent"], cmap = "gray", vmin= 0.2, vmax = 0.8)
# Building the array of concavity
A = np.zeros(HS["array"].shape)
A[:,:] = np.nan
# For each raster, I am reading rows and col corresponding to the main raster and potting it with the requested value
for name in df["raster_name"]:
row = np.load(name + "_row.npy")
col = np.load(name + "_col.npy")
val = df["best_fit"][df["raster_name"] == name].values[0] # A wee convoluted but it work and it is fast so...
A[row,col] = val
# PLOTTING THE D*
ax.imshow(A, extent = HS["extent"], cmap= "RdYlBu_r", zorder = 2, alpha = 0.75, vmin = 0.1, vmax = 0.9)
# You may want to change the extents of the plot
if("xlim" in kwargs):
ax.set_xlim(kwargs["xlim"])
if("ylim" in kwargs):
ax.set_ylim(kwargs["ylim"])
ax.set_xlabel("Easting (km)")
ax.set_ylabel("Northing (km)")
# Saving the figure
plt.tight_layout()#
plt.savefig("./%s_figures/"%(prefix) +prefix + "MAP_best_fit.png", dpi = 500)
plt.close(fig)
a = np.array([[0,1]])
pl.figure(figsize=(9, 1.5))
img = pl.imshow(a, cmap="RdYlBu_r")
pl.gca().set_visible(False)
cax = pl.axes([0.1, 0.2, 0.8, 0.6])
pl.colorbar(orientation="horizontal", cax=cax)
pl.title(r"$\theta$ best-fit")
pl.savefig("./%s_figures/"%(prefix) +"colorbar_mapbest_fit.png")
pl.close(fig)
# Formatting ticks
import matplotlib.ticker as tkr # has classes for tick-locating and -formatting
yfmt = tkr.FuncFormatter(numfmt)
xfmt = tkr.FuncFormatter(numfmt)
print("plotting min theta")
# Getting the Figure and the ticks right
fig,ax = plt.subplots()
ax.yaxis.set_major_formatter(yfmt)
ax.xaxis.set_major_formatter(xfmt)
# Plotting the hillshade
ax.imshow(HS["array"], extent = HS["extent"], cmap = "gray", vmin= 0.2, vmax = 0.8)
# Building the array of concavity
# Building the array of concavity
A = np.zeros(HS["array"].shape)
A[:,:] = np.nan
df_theta = pd.read_csv(prefix + "all_raster_names.csv")
thetas = np.round(pd.read_feather(df["raster_name"].iloc[0] + "_overall_test.feather")["tested_movern"].values,decimals = 3)
# For each raster, I am reading rows and col corresponding to the main raster and potting it with the requested value
for name in df["raster_name"]:
row = np.load(name + "_row.npy")
col = np.load(name + "_col.npy")
val = 1e12
for tval in thetas:
valtest = df["D*_%s"%tval][df["raster_name"] == name].values[0] # A wee convoluted but it work and it is fast so...
if(valtest<val):
val=valtest
A[row,col] = val
# PLOTTING THE D*
ax.imshow(A, extent = HS["extent"], cmap= "gnuplot2", zorder = 2, alpha = 0.75, vmin = 0.05, vmax = 0.65)
# You may want to change the extents of the plot
if("xlim" in kwargs):
ax.set_xlim(kwargs["xlim"])
if("ylim" in kwargs):
ax.set_ylim(kwargs["ylim"])
ax.set_xlabel("Easting (km)")
ax.set_ylabel("Northing (km)")
# Saving the figure
plt.tight_layout()#
plt.savefig("./%s_figures/"%(prefix) +prefix + "min_Dstar_for_each_basins.png", dpi = 500)
plt.close(fig)
a = np.array([[0,1]])
pl.figure(figsize=(9, 1.5))
img = pl.imshow(a, cmap="gnuplot2", vmin = 0.05, vmax = 0.65)
pl.gca().set_visible(False)
cax = pl.axes([0.1, 0.2, 0.8, 0.6])
pl.colorbar(orientation="horizontal", cax=cax, label = r"Min. $D^{*}$")
pl.title(r"Min. $D^{*}$")
pl.savefig("./%s_figures/"%(prefix) +"colorbar_map_minDstar.png")
pl.close(fig)
# Formatting ticks
import matplotlib.ticker as tkr # has classes for tick-locating and -formatting
yfmt = tkr.FuncFormatter(numfmt)
xfmt = tkr.FuncFormatter(numfmt)
print("plotting best-fit theta range yo")
min_theta = 99999
min_Dsum = 1e36
for this_theta in thetas:
this_sum = np.sum(df["D*_r_%s"%this_theta].values)
if(this_sum < min_Dsum):
min_theta = this_theta
min_Dsum = this_sum
this_theta = min_theta
print("Which is ", this_theta)
# Getting the Figure and the ticks right
fig,ax = plt.subplots()
ax.yaxis.set_major_formatter(yfmt)
ax.xaxis.set_major_formatter(xfmt)
# Plotting the hillshade
ax.imshow(HS["array"], extent = HS["extent"], cmap = "gray", vmin= 0.2, vmax = 0.8)
# Building the array of concavity
A = np.zeros(HS["array"].shape)
A[:,:] = np.nan
# For each raster, I am reading rows and col corresponding to the main raster and potting it with the requested value
for name in df["raster_name"]:
row = np.load(name + "_row.npy")
col = np.load(name + "_col.npy")
val = df["D*_r_%s"%this_theta][df["raster_name"] == name].values[0] # A wee convoluted but it work and it is fast so... A[row,col] = val
A[row,col] = val
# PLOTTING THE D*
ax.imshow(A, extent = HS["extent"], cmap= "gnuplot2", zorder = 2, alpha = 0.75, vmin = 0.05, vmax = 0.65)
# You may want to change the extents of the plot
if("xlim" in kwargs):
ax.set_xlim(kwargs["xlim"])
if("ylim" in kwargs):
ax.set_ylim(kwargs["ylim"])
ax.set_xlabel("Easting (km)")
ax.set_ylabel("Northing (km)")
# Saving the figure
plt.tight_layout()#
plt.savefig("./%s_figures/"%(prefix) +prefix + "MAP_D_star_range_theta_%s.png" % this_theta, dpi = 500)
plt.close(fig)
a = np.array([[0,1]])
pl.figure(figsize=(9, 1.5))
img = pl.imshow(a, cmap="gnuplot2", vmin = 0.05, vmax = 0.65)
pl.gca().set_visible(False)
cax = pl.axes([0.1, 0.2, 0.8, 0.6])
pl.colorbar(orientation="horizontal", cax=cax, label = r"$D^{*}_{r}$")
pl.title(r"$D^{*}_{r}$")
pl.savefig("./%s_figures/"%(prefix) +"colorbar_map_Dstar_range.png")
pl.close(fig)
def plot_Dstar_maps_for_all_concavities(prefix, n_proc = 1):
# Loading the list of raster
df = pd.read_csv(prefix + "all_raster_names.csv")
params = df["raster_name"].tolist()
thetas = np.round(pd.read_feather(params[0] + "_overall_test.feather")["tested_movern"].values,decimals = 3) # running the multiprocessing
params = []
for t in thetas:
params.append((t,prefix))
# plot_single_theta(params[0])
with Pool(n_proc) as p:
fprocesses = []
for i in params:
fprocesses.append(p.apply_async(plot_single_theta, args = (i,)))
for gut in fprocesses:
gut.wait()
plot_min_D_star_map(params[0])
def plot_basin(ls, **kwargs):
number = ls[0]
X = ls[1]
Y = ls[2]
area_threshold = ls[3]
prefix = ls[4]
nbins = None
if("nbins" in kwargs):
nbins = kwargs["nbins"]
print("Plotting basin ", number, " with proc ", current_process())
if("ignore_numbering" not in kwargs):
kwargs["ignore_numbering"] = False
if(kwargs["ignore_numbering"]):
name = prefix
else:
name = prefix + "%s"%(number)
# Alright, loading the previous datasets
df_rivers = pd.read_feather("%s_rivers.feather"%(name))
df_overall = pd.read_feather("%s_overall_test.feather"%(name))
all_concavity_best_fits = np.load("%s_concavity_tot.npy"%(name))
all_disorders = np.load("%s_disorder_tot.npy"%(name))
XY = pd.read_feather("%s_XY.feather"%(name))
thetas = df_overall["tested_movern"].values
if nbins is None:
nbins = (thetas.shape[0], 50)
res_dict = {}
# Plotting the different figures for the basin
# First, normalising the disorder
AllDval = np.apply_along_axis(norm_by_row,1,all_disorders)
AllDthet = np.tile(thetas,(all_disorders.shape[0],1))
ALLDmed = np.apply_along_axis(np.median,0,AllDval)
ALLDfstQ = np.apply_along_axis(lambda z: np.percentile(z,25),0,AllDval)
ALLDthdQ = np.apply_along_axis(lambda z: np.percentile(z,75),0,AllDval)
AllDval = AllDval.ravel()
AllDthet = AllDthet.ravel()
# Then, plotting it:
###### First plotting the
fig,ax = plt.subplots()
H,x,y = np.histogram2d(AllDthet,AllDval, bins = nbins, density = True)
ax.hist2d(AllDthet,AllDval, bins = nbins, density = True, cmap = "magma", vmin = np.percentile(H,10), vmax = np.percentile(H,90))
ax.plot(thetas,ALLDmed, lw = 1, ls = "-.", color = "#00F3FF")
ax.plot(thetas,ALLDfstQ, lw = 1, ls = "--", color = "#00F3FF")
ax.plot(thetas,ALLDthdQ, lw = 1, ls = "--", color = "#00F3FF")
# Finding the suggested best-fit
minimum_theta , flub = get_best_bit_and_err_from_Dstar(thetas, ALLDmed, ALLDfstQ, ALLDthdQ)
err_neg,err_pos = flub
res_dict["BF_normalised_disorder"] = minimum_theta
res_dict["err_normalised_disorder"] = [err_neg,err_pos]
print("Detected best-fit minimising normalised disorder is", minimum_theta, "tolerance between:", err_neg, "--",err_pos)
ax.scatter(minimum_theta, np.min(ALLDmed), facecolor = "orange", edgecolor = "grey", s = 30, zorder = 3)
ax.plot([err_neg,err_pos], [np.min(ALLDmed),np.min(ALLDmed)], color = "orange", lw = 2, zorder = 2)
ax.set_xlabel(r"$\theta$")
ax.set_ylabel(r"$D^{*}$")
ax.set_xticks(np.arange(0.05,1,0.05))
ax.set_yticks(np.arange(0.05,1,0.05))
ax.grid(alpha = 0.3)
ax.tick_params(labelsize = 8,)
if("return_mode" not in kwargs):
kwargs["return_mode"] = "save"
if(kwargs["return_mode"].lower() == "save"):
plt.savefig(name + "_D_star.png", dpi = 500)
plt.close(fig)
# Plotting the different figures for the basin
# First, normalising the disorder by range
AllDval = np.apply_along_axis(norm_by_row_by_range,1,all_disorders)
AllDthet = np.tile(thetas,(all_disorders.shape[0],1))
ALLDmed = np.apply_along_axis(np.median,0,AllDval)
ALLDfstQ = np.apply_along_axis(lambda z: np.percentile(z,25),0,AllDval)
ALLDthdQ = np.apply_along_axis(lambda z: np.percentile(z,75),0,AllDval)
AllDval = AllDval.ravel()
AllDthet = AllDthet.ravel()
# Then, plotting it:
###### First plotting the
fig,ax = plt.subplots()
H,x,y = np.histogram2d(AllDthet,AllDval, bins = nbins, density = True)
ax.hist2d(AllDthet,AllDval, bins = nbins, density = True, cmap = "magma", vmin = np.percentile(H,10), vmax = np.percentile(H,90))
ax.plot(thetas,ALLDmed, lw = 1, ls = "-.", color = "#00F3FF")
ax.plot(thetas,ALLDfstQ, lw = 1, ls = "--", color = "#00F3FF")
ax.plot(thetas,ALLDthdQ, lw = 1, ls = "--", color = "#00F3FF")
minimum_theta , flub = get_best_bit_and_err_from_Dstar(thetas, ALLDmed, ALLDfstQ, ALLDthdQ)
err_neg,err_pos = flub
# Finding the suggested best-fit
res_dict["BF_normalised_disorder_range"] = minimum_theta
res_dict["err_normalised_disorder_range"] = [err_neg,err_pos]
print("Detected best-fit minimising normalised disorder is", minimum_theta, "tolerance between:", err_neg, "--",err_pos)
ax.scatter(minimum_theta, np.min(ALLDmed), facecolor = "orange", edgecolor = "grey", s = 30, zorder = 3)
ax.plot([err_neg,err_pos], [np.min(ALLDmed),np.min(ALLDmed)], color = "orange", lw = 2, zorder = 2)
ax.set_xlabel(r"$\theta$")
ax.set_ylabel(r"$D^{*}$")
ax.set_xticks(np.arange(0.05,1,0.05))
ax.set_yticks(np.arange(0.05,1,0.05))
ax.grid(alpha = 0.3)
ax.tick_params(labelsize = 8,)
if("return_mode" not in kwargs):
kwargs["return_mode"] = "save"
if(kwargs["return_mode"].lower() == "save"):
plt.savefig(name + "_D_star_norm_by_range.png", dpi = 500)
plt.close(fig)
#### Now plotting the rest
df_overall = pd.read_feather("%s_overall_test.feather"%(name))
res_dict["overall_best_fit"] = df_overall["tested_movern"][np.argmin(df_overall["overall_disorder"].values)]
res_dict["median_all_lowest_values"] = np.median(all_concavity_best_fits)
res_dict["IQ_all_lowest_values"] = [np.percentile(all_concavity_best_fits,25),np.percentile(all_concavity_best_fits,75)]
u,c = np.unique(all_concavity_best_fits,return_counts = True)
res_dict["max_combinations"] = u[np.argmax(c)]
fig,ax = plt.subplots()
# ax.hist(all_concavity_best_fits,bins = bins[0], edgecolor = "k", facecolor = "orange", zorder = 1, alpha = 0.3)
ax.plot([0,0],res_dict["IQ_all_lowest_values"], color = "purple", lw = 2,zorder = 1)
ax.scatter(0,res_dict["median_all_lowest_values"],edgecolor = "k", facecolor = "purple", s = 50, label = "Stats all values", zorder = 2)
ax.scatter(1,res_dict["overall_best_fit"],edgecolor = "k", facecolor = "green", s = 50, label = "All values", zorder = 2)
ax.scatter(2,res_dict["max_combinations"],edgecolor = "k", facecolor = "black", s = 50, label = "Max N tribs.", zorder = 2)
ax.plot([3,3],res_dict["err_normalised_disorder"], color = "orange", lw = 2,zorder = 1)
ax.scatter(3,res_dict["BF_normalised_disorder"],edgecolor = "k", facecolor = "orange", s = 50, label = r"$D^{*}$ norm. max.s", zorder = 2)
ax.plot([4,4],res_dict["err_normalised_disorder_range"], color = "red", lw = 2,zorder = 1)
ax.scatter(4,res_dict["BF_normalised_disorder_range"],edgecolor = "k", facecolor = "red", s = 50, label = r"$D^{*}$ norm. ranges", zorder = 2)
ax.violinplot(all_concavity_best_fits,[5], showmeans = False, showextrema =False,points = 100, bw_method= "silverman")
# ax.legend()
ax.set_xticks([0,1,2,3,4,5])
ax.set_xticklabels(["Stats all values","All data best fit","Max N tribs.",r"$D^{*}$ norm. max.", r"$D^{*}$ norm. ranges", "data"])
ax.set_yticks(np.round(np.arange(0.05,1,0.05), decimals = 2))
ax.set_facecolor("grey")
ax.grid(alpha = 0.5)
ax.tick_params(axis = "x",labelrotation =45)
ax.tick_params(axis = "both",labelsize = 8)
# ax.hist(min_theta,bins = 38, edgecolor = "green", facecolor = "none", alpha = 0.6)
ax.set_ylabel(r"$\theta$")
plt.tight_layout()
plt.savefig(name +"_all_best_fits_from_disorder_methods.png", dpi = 500)
plt.close(fig)
# elif(kwargs["return_mode"].lower() == "return"):
# return fig,ax
# elif(kwargs["return_mode"].lower() == "nothing"):
# return 0
def get_all_concavity_in_range_of_DA_from_baselevel(dem_name, dem_path = "./",already_preprocessed = False , X_outlet = 0, Y_outlet = 0,
min_DA = 1e7, max_DA = 2e8, area_threshold = 2000,area_threshold_main_basin = 25000 , n_proc = 4, prefix = "", n_tribs_by_combo = 4):
print("First, elt me extract all the basins")
# First, I need to extract the basin into separated rasters
X = X_outlet
Y = Y_outlet
# Loading the dem
mydem = lsd.LSDDEM(file_name = dem_name, path = dem_path, already_preprocessed = already_preprocessed)
# Preprocessing
if(already_preprocessed == False):
mydem.PreProcessing()
# Getting DA and otehr stuffs
mydem.CommonFlowRoutines()
A = mydem.cppdem.get_DA_raster()
mydem.save_array_to_raster_extent( A, name = prefix + "drainage_area")
# This define the river network, it is required to actually calculate other metrics
mydem.ExtractRiverNetwork( method = "area_threshold", area_threshold_min = 800)
print("DEBUG::RIVEREXTRACTED")
# Extracting all the basins
coord_bas = mydem.cppdem.calculate_outlets_min_max_draining_to_baselevel(X, Y, min_DA, max_DA,500)
print("DEBUG::BASINEXTRACTED")
print(coord_bas)
# Getting the rasters
rasts = mydem.cppdem.get_individual_basin_raster()
coord_bas["ID"] = list(range(len(rasts[0])))
for i in range(len(rasts[0])):
lsd.raster_loader.save_raster(rasts[0][i],rasts[1][i]["x_min"],rasts[1][i]["x_max"],rasts[1][i]["y_max"],rasts[1][i]["y_min"],rasts[1][i]["res"],mydem.crs, prefix + "%s.tif"%(i), fmt = 'GTIFF')
df = pd.DataFrame(coord_bas)
df.to_csv(prefix + "basin_outlets.csv", index = False)
# Freeing memory
del mydem
del rasts
print("Done with basin extraction, I am now multiprocessing the concavity analysis, this can take a while if you have a high number of river!")
th = np.full(df["ID"].shape[0],area_threshold)
tprefix = np.full(df["ID"].shape[0],prefix)
N = list(zip(df["ID"].values,df["X"].values,df["Y"].values, th, tprefix)) # for single analysis
these_kwarges = []
for i in N:
these_kwarges.append({"n_tribs_by_combo":n_tribs_by_combo})
# print(N)
p = Pool(n_proc)
# results = p.map_async(process_basin, N)
# results.wait()
# p.close()
# p.join()
with Pool(n_proc) as p:
fprocesses = []
for i in range(len(N)):
fprocesses.append(p.apply_async(process_basin, args = (N[i],),kwds = these_kwarges[i]))
for gut in fprocesses:
gut.wait()
# A = p.get()
print("Done with all the sub basins, now I will process the main basin")
def process_multiple_basins(dem_name, dem_path = "./",already_preprocessed = False , prefix = "", X_outlets = [0], Y_outlets = [0], n_proc = 1, area_threshold = [5000],
area_thershold_basin_extraction = 500, n_tribs_by_combo = 5,use_precipitation_raster = False, name_precipitation_raster = "prec.tif"):
# IDs = np.array(IDs)
X_outlets = np.array(X_outlets)
Y_outlets = np.array(Y_outlets)
area_threshold = np.array(area_threshold)
mydem = lsd.LSDDEM(file_name = dem_name, path = dem_path, already_preprocessed = already_preprocessed)
# Preprocessing
if(already_preprocessed == False):
mydem.PreProcessing()
# Getting DA and otehr stuffs
mydem.CommonFlowRoutines()
A = mydem.cppdem.get_DA_raster()
mydem.save_array_to_raster_extent( A, name = prefix + "drainage_area")
# This define the river network, it is required to actually calculate other metrics
mydem.ExtractRiverNetwork( method = "area_threshold", area_threshold_min = area_thershold_basin_extraction)
mydem.DefineCatchment( method="from_XY", X_coords = X_outlets, Y_coords = Y_outlets, coord_search_radius_nodes = 0)
# Getting the rasters
rasts = mydem.cppdem.get_individual_basin_raster()
# IDs = np.array(IDs)
IDs = np.array(range(len(rasts[0])))
out_names = {"raster_name": []}
for i in range(len(rasts[0])):
lsd.raster_loader.save_raster(rasts[0][i],rasts[1][i]["x_min"],rasts[1][i]["x_max"],rasts[1][i]["y_max"],rasts[1][i]["y_min"],rasts[1][i]["res"],mydem.crs, prefix + "%s.tif"%(i), fmt = 'GTIFF')
out_names["raster_name"].append(prefix + "%s"%(i))
pd.DataFrame(out_names).to_csv(prefix + "all_raster_names.csv", index = False)
del mydem
del rasts
th = np.full(IDs.shape[0],area_threshold)
tprefix = np.full(IDs.shape[0],prefix)
N = list(zip(IDs,X_outlets,Y_outlets, th, tprefix)) # for single analysis
these_kwarges = []
for i in N:
dico ={}
dico["n_tribs_by_combo"] = n_tribs_by_combo
if(use_precipitation_raster):
dico["precipitation_raster"]=dem_path+name_precipitation_raster
else:
dico["precipitation_raster"]=""
these_kwarges.append(dico)
# print(N)
p = Pool(n_proc)
# results = p.map_async(process_basin, N)
# results.wait()
# p.close()
# p.join()
for i in range(len(N)):
process_basin(N[i],**these_kwarges[i])
# with Pool(n_proc) as p:
# fprocesses = []
# for i in range(len(N)):
# fprocesses.append(p.apply_async(process_basin, args = (N[i],),kwds = these_kwarges[i]))
# for gut in fprocesses:
# gut.wait()
def plot_one_thetha(ls):
import matplotlib.ticker as tkr # has classes for tick-locating and -formatting
yfmt = tkr.FuncFormatter(numfmt)
xfmt = tkr.FuncFormatter(numfmt)
that_theta = ls[0]
print("plotting D* for theta", that_theta)
fig,ax = plt.subplots()
ax.yaxis.set_major_formatter(yfmt)
ax.xaxis.set_major_formatter(xfmt)
ax.imshow(ls[1],extent = ls[2], vmin = 0, vmax = 1, cmap = "gray", zorder = 1)
cb = ax.imshow(ls[3],extent = ls[2], vmin = 0.1, vmax = 0.9, cmap = "magma_r", zorder = 2, alpha = 0.8)
plt.colorbar(cb, orientation = "horizontal", label = r"$D^{*}$ for $\theta=%s$"%(round(that_theta,3)))
ax.set_xlabel("Easting (km)")
ax.set_xlabel("Northing (km)")
plt.savefig(ls[4], dpi = 500)
plt.close(fig)
print("Done plotting D* for theta", that_theta)
def plot_multiple_basins(dem_name, dem_path = "./",already_preprocessed = False , prefix = "", X_outlets = [0], Y_outlets = [0],
n_proc = 1, area_threshold = [5000], area_thershold_basin_extraction = 500, plot_Dstar = False):
"""
This function plot the data for a list of basins
"""
# reloading the df needed ro process the output
df = pd.read_csv(prefix + "summary_results.csv")
# Outputs stored in lists
df_rivers = []
df_overall = []
all_concavity_best_fits = []
all_disorders = []
XY = []
thetas = []
for i in range(df["raster_name"].shape[0]):
name = df["raster_name"].iloc[i]
# Alright, loading the previous datasets
df_rivers.append(pd.read_feather("%s_rivers.feather"%(name)))
df_overall.append(pd.read_feather("%s_overall_test.feather"%(name)))
all_concavity_best_fits.append(np.load("%s_concavity_tot.npy"%(name)))
all_disorders.append(np.load("%s_disorder_tot.npy"%(name)))
XY.append(pd.read_feather("%s_XY.feather"%(name)))
thetas = (df_overall[-1]["tested_movern"].values)
size_of_stuff = len(df_rivers)
# First, a condensed of all informations
easting = []
northing = []
easting_err = []
northing_err = []
best_fits_Dstar = []
err_Dstar = []
min_Dstar = []
for i in range(size_of_stuff):
AllDval = np.apply_along_axis(norm_by_row,1,all_disorders[i])
AllDthet = np.tile(thetas,(all_disorders[i].shape[0],1))
ALLDmed = np.apply_along_axis(np.median,0,AllDval)
ALLDfstQ = np.apply_along_axis(lambda z: np.percentile(z,25),0,AllDval)
ALLDthdQ = np.apply_along_axis(lambda z: np.percentile(z,75),0,AllDval)
AllDval = AllDval.ravel()
AllDthet = AllDthet.ravel()
# Finding the suggested best-fit
minimum_theta , flub = get_best_bit_and_err_from_Dstar(thetas, ALLDmed, ALLDfstQ, ALLDthdQ)
err_neg,err_pos = flub
easting.append(np.median(XY[i]["X"]))
northing.append(np.median(XY[i]["Y"]))
easting_err.append([np.percentile(XY[i]["X"],25),np.percentile(XY[i]["X"],75)])
northing_err.append([np.percentile(XY[i]["Y"],25),np.percentile(XY[i]["Y"],75)])
best_fits_Dstar.append(minimum_theta)
min_Dstar.append(np.min(ALLDmed))
err_Dstar.append([err_neg,err_pos])
easting = np.array(easting)
northing = np.array(northing)
easting_err = np.array(easting_err)
northing_err = np.array(northing_err)
best_fits_Dstar = np.array(best_fits_Dstar)
err_Dstar = np.array(err_Dstar)
min_Dstar = np.array(min_Dstar)
fig,ax = plt.subplots()
ax.scatter(easting, best_fits_Dstar, edgecolor = "k", facecolor = "orange", s = 50, zorder = 5)
for i in range(size_of_stuff):
ax.plot([easting[i], easting[i]], err_Dstar[i], color = "orange", lw = 2, zorder = 1, alpha = 0.7)
ax.plot(easting_err[i], [best_fits_Dstar[i],best_fits_Dstar[i]], color = "orange", lw = 2, zorder = 1, alpha = 0.7)
xticks = np.arange(easting.min(), easting.max()+1, (easting.max() - easting.min())/5)
ax.set_xticks(xticks)
xticks = xticks / 1000
ax.set_xticklabels(np.round(xticks).astype(str))
ax.set_xlabel(r"Easting (km)")
ax.set_ylabel(r"$\theta$")
ax.set_facecolor("grey")
ax.grid(zorder = 1, ls = "--", alpha = 0.7)
plt.savefig(prefix + "_best_fit_by_easting", dpi=500)
plt.close()
fig,ax = plt.subplots()
ax.scatter(northing, best_fits_Dstar, edgecolor = "k", facecolor = "orange", s = 50, zorder = 5)
for i in range(size_of_stuff):
ax.plot([northing[i], northing[i]], err_Dstar[i], color = "orange", lw = 2, zorder = 1, alpha = 0.7)
ax.plot(northing_err[i], [best_fits_Dstar[i],best_fits_Dstar[i]], color = "orange", lw = 2, zorder = 1, alpha = 0.7)
xticks = np.arange(northing.min(), northing.max()+1, (northing.max() - northing.min())/5)
ax.set_xticks(xticks)
xticks = xticks / 1000
ax.set_xticklabels(
|
np.round(xticks)
|
numpy.round
|
import numpy as np
from numpy.ctypeslib import as_array
from numpy.testing import assert_array_equal
from meshkernel import (
Contacts,
GeometryList,
Mesh1d,
Mesh2d,
MeshRefinementParameters,
OrthogonalizationParameters,
)
from meshkernel.c_structures import (
CContacts,
CGeometryList,
CMesh1d,
CMesh2d,
CMeshRefinementParameters,
COrthogonalizationParameters,
)
def test_cmesh2d_from_mesh2d():
"""Tests `from_mesh2d` of the `CMesh2D` class with a simple mesh."""
# 2---3
# | |
# 0---1
node_x = np.array([0.0, 1.0, 1.0, 0.0], dtype=np.double)
node_y =
|
np.array([0.0, 0.0, 1.0, 1.0], dtype=np.double)
|
numpy.array
|
import json
import os
import sys
from collections import OrderedDict
from contextlib import contextmanager
from dataclasses import dataclass
from enum import Enum
from pathlib import Path
from typing import Generator, NamedTuple, Union, cast
import gym
import gym.spaces as spaces
import gym.utils
import gym.utils.seeding
import numpy as np
import PIL.Image
import pybullet as p
import torch
from pybullet_utils import bullet_client
from torch.nn.utils.rnn import pad_sequence
from transformers import GPT2Tokenizer
from torchbeast.lazy_frames import LazyFrames
CAMERA_DISTANCE = 3
CAMERA_PITCH = -45
CAMERA_YAW = 225
class ObservationSpace(NamedTuple):
mission: spaces.MultiDiscrete
image: spaces.Box
class Observation(NamedTuple):
mission: np.ndarray
image: Union[np.ndarray, LazyFrames]
class Action(NamedTuple):
turn: float = 0
forward: float = 0
done: bool = False
take_picture: bool = False
class Actions(Enum):
LEFT = Action(3, 0)
RIGHT = Action(-3, 0)
FORWARD = Action(0, 0.18)
BACKWARD = Action(0, -0.18)
DONE = Action(done=True)
PICTURE = Action(take_picture=True)
NO_OP = Action()
ACTIONS = [*Actions]
class URDF(NamedTuple):
name: str
path: Path
z: float
@contextmanager
def suppress_stdout():
"""from https://stackoverflow.com/a/17954769/4176597"""
fd = sys.stdout.fileno()
def _redirect_stdout(to):
sys.stdout.close() # + implicit flush()
os.dup2(to.fileno(), fd) # fd writes to 'to' file
sys.stdout = os.fdopen(fd, "w") # Python writes to fd
with os.fdopen(os.dup(fd), "w") as old_stdout:
with open(os.devnull, "w") as file:
_redirect_stdout(to=file)
try:
yield # allow code to be run with the redirected stdout
finally:
_redirect_stdout(to=old_stdout) # restore stdout.
# buffering and flags such as
# CLOEXEC may be different
@dataclass
class PointMassEnv(gym.Env):
metadata = {"render.modes": ["human", "rgb_array"], "video.frames_per_second": 60}
cameraYaw: float = 35
env_bounds: float = 5
image_height: float = 72
image_width: float = 96
is_render: bool = False
max_episode_steps: int = 200
model_name: str = "gpt2"
reindex_tokens: bool = False
def __post_init__(
self,
):
tokenizer = GPT2Tokenizer.from_pretrained(self.model_name)
self.action_space = spaces.Discrete(5)
with Path("model_ids.json").open() as f:
self.model_ids = set(json.load(f))
def urdfs():
for subdir in Path("dataset").iterdir():
urdf = Path(subdir, "mobility.urdf")
assert urdf.exists()
with Path(subdir, "meta.json").open() as f:
meta = json.load(f)
with Path(subdir, "bounding_box.json").open() as f:
box = json.load(f)
_, _, z_min = box["min"]
model_id = meta["model_id"]
if model_id in self.model_ids:
yield URDF(name=meta["model_cat"], path=urdf, z=-z_min)
self.urdfs = list(urdfs())
names, paths, zs = zip(*urdfs())
def tokens() -> Generator[torch.Tensor, None, None]:
for k in names:
encoded = tokenizer.encode(k, return_tensors="pt")
tensor = cast(torch.Tensor, encoded)
yield tensor.squeeze(0)
padded = pad_sequence(
list(tokens()),
padding_value=tokenizer.eos_token_id,
).T.numpy()
if self.reindex_tokens:
_, indices = np.unique(padded, return_inverse=True)
padded = indices.reshape(padded.shape)
self.tokens = OrderedDict(zip(names, padded))
image_space = spaces.Box(
low=0,
high=255,
shape=[self.image_height, self.image_width, 3],
)
max_padded = padded.max()
nvec = np.ones_like(padded[0]) * (max_padded + 1)
mission_space = spaces.MultiDiscrete(nvec)
self.observation_space = spaces.Tuple(
ObservationSpace(
mission=mission_space,
image=image_space,
)
)
self._seed()
self.iterator = None
self.relativeChildPosition = [0, 0, 0]
self.relativeChildOrientation = [0, 0, 0, 1]
if self.is_render:
with suppress_stdout():
self._p = bullet_client.BulletClient(connection_mode=p.GUI)
self._p.configureDebugVisualizer(self._p.COV_ENABLE_SHADOWS, 0)
else:
with suppress_stdout():
self._p = bullet_client.BulletClient(connection_mode=p.DIRECT)
sphereRadius = 0.2
mass = 1
visualShapeId = 2
colSphereId = self._p.createCollisionShape(
self._p.GEOM_SPHERE, radius=sphereRadius
)
self.mass = self._p.createMultiBody(
mass, colSphereId, visualShapeId, [0, 0, 0.4]
)
self.mass_cid = self._p.createConstraint(
self.mass,
-1,
-1,
-1,
self._p.JOINT_FIXED,
[0, 0, 0],
[0, 0, 0],
self.relativeChildPosition,
self.relativeChildOrientation,
)
def get_observation(self) -> Observation:
pos, _ = self._p.getBasePositionAndOrientation(self.mass)
(_, _, rgbaPixels, _, _,) = self._p.getCameraImage(
self.image_width,
self.image_height,
viewMatrix=self._p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=pos,
distance=CAMERA_DISTANCE,
yaw=self.cameraYaw,
pitch=CAMERA_PITCH,
roll=0,
upAxisIndex=2,
),
shadow=0,
flags=self._p.ER_NO_SEGMENTATION_MASK,
renderer=self._p.ER_BULLET_HARDWARE_OPENGL,
)
rgbaPixels = rgbaPixels[..., :-1].astype(np.float32)
obs = Observation(
image=rgbaPixels,
mission=self.tokens[self.mission],
)
assert self.observation_space.contains(obs)
return obs
def generator(self):
missions = []
goals = []
urdfs = [
self.urdfs[i]
for i in self.np_random.choice(len(self.urdfs), size=2, replace=False)
]
for base_position, urdf in zip(
[
[self.env_bounds / 3, self.env_bounds / 3, 0],
[-self.env_bounds / 3, -self.env_bounds / 3, 0],
],
urdfs,
):
missions.append(urdf.name)
base_position[-1] = urdf.z
try:
with suppress_stdout():
goal = self._p.loadURDF(
str(urdf.path), basePosition=base_position, useFixedBase=True
)
except self._p.error:
print(self._p.error)
raise RuntimeError(f"Error while loading {urdf.path}")
goals.append(goal)
collisionFilterGroup = 0
collisionFilterMask = 0
self._p.setCollisionFilterGroupMask(
goal, -1, collisionFilterGroup, collisionFilterMask
)
self._p.createConstraint(
goal,
-1,
-1,
-1,
self._p.JOINT_FIXED,
[1, 1, 1.4],
[0, 0, 0],
self.relativeChildPosition,
self.relativeChildOrientation,
)
choice = self.np_random.choice(2)
self.goal = goals[choice]
self.mission = missions[choice]
i = dict(mission=self.mission, goals=goals)
self._p.configureDebugVisualizer(self._p.COV_ENABLE_GUI, False)
self._p.setGravity(0, 0, -10)
halfExtents = [1.5 * self.env_bounds, 1.5 * self.env_bounds, 0.1]
floor_collision = self._p.createCollisionShape(
self._p.GEOM_BOX, halfExtents=halfExtents
)
floor_visual = self._p.createVisualShape(
self._p.GEOM_BOX, halfExtents=halfExtents, rgbaColor=[1, 1, 1, 0.5]
)
self._p.createMultiBody(0, floor_collision, floor_visual, [0, 0, -0.2])
self._p.resetBasePositionAndOrientation(self.mass, [0, 0, 0.6], [0, 0, 0, 1])
action = yield self.get_observation()
for global_step in range(self.max_episode_steps):
a = ACTIONS[action].value
self.cameraYaw += a.turn
x, y, _, _ = self._p.getQuaternionFromEuler(
[np.pi, 0,
|
np.deg2rad(2 * self.cameraYaw)
|
numpy.deg2rad
|
import unittest
import numpy.testing as npt
from .testing import gradient_checking
from . import _nn_activations
import warnings
import numpy as np
class NNActivationsTestCase(unittest.TestCase):
####
# Activations
####
def testReluActivation_Forward(self):
activation = _nn_activations.activation("relu")
Z = np.array([[1., -2.], [0., 0.25]])
A = activation.forward(Z)
expected_A = np.array([[1., 0.], [0., 0.25]])
npt.assert_almost_equal(A, expected_A)
def testReluActivation_GradientCheck(self):
self._testActivation_GradientCheck("relu")
def testSigmoidActivation_GradientCheck(self):
self._testActivation_GradientCheck("sigmoid")
def testTanhActivation_GradientCheck(self):
self._testActivation_GradientCheck("tanh")
def testActivationError_UnknownFunction(self):
with self.assertRaises(ValueError):
_nn_activations.activation("dne")
def _testActivation_GradientCheck(self, fn):
self._activation = _nn_activations.activation(fn)
Z = self._createZ(3)
diff, _, _ = gradient_checking.check(
self._activationCost, self._activationGradients, Z)
self.assertLess(diff, 1e-7)
def _createZ(self, num_cols):
np.random.seed(195262)
Z_shape = (3, num_cols)
Z = np.random.normal(0, 1, Z_shape)
return Z
def _activationCost(self, Z):
activation = self._activation
A = activation.forward(Z)
cost = np.sum(A)
return cost
def _activationGradients(self, Z):
activation = self._activation
A = activation.forward(Z)
dA = np.ones(A.shape)
dZ = activation.backward(dA)
return (dZ,)
####
# Outputs
####
def testBinaryOutput_Properties(self):
output = _nn_activations.binary_output()
self.assertFalse(output.is_multiclass)
self.assertEqual(output.C, 2)
def testBinaryOutput_Predict(self):
output = _nn_activations.binary_output()
Z = np.log(np.array([[3.], [1.], [0.25]]))
expected_pred = np.array([1, 0, 0])
expected_prob = np.array([0.75, 0.5, 0.2])
self._testOutput_Predict(output, Z, expected_pred, expected_prob)
def testBinaryOutput_GradientCheck(self):
output = _nn_activations.binary_output()
output.Y = np.array([[0.], [1.], [0.]])
self._testOutput_GradientCheck(output)
def testMulticlassOutput_Properties(self):
output = _nn_activations.multiclass_output(3)
self.assertTrue(output.is_multiclass)
self.assertEqual(output.C, 3)
def testMulticlassOutput_Predict(self):
output = _nn_activations.multiclass_output(3)
Z = np.log(np.array([[1., 2., 1.], [5., 3., 2.], [1., 1., 3.]]))
expected_pred = np.array([1, 0, 2])
expected_prob = np.array(
[[0.25, 0.50, 0.25], [0.5, 0.3, 0.2], [0.2, 0.2, 0.6]])
self._testOutput_Predict(output, Z, expected_pred, expected_prob)
def testMulticlassOutput_GradientCheck(self):
output = _nn_activations.multiclass_output(3)
output.Y = np.array([[0., 1., 0.], [1., 0., 0.], [0., 0., 1.]])
self._testOutput_GradientCheck(output)
def testOutput_CostClipping(self):
output = _nn_activations.binary_output()
output.Y = np.array([[1.]])
Z = np.array([[-1000]])
with warnings.catch_warnings():
warnings.simplefilter("ignore")
output.forward(Z)
self.assertTrue(np.isfinite(output.cost))
def testMulticlasOutputError_IllegalC(self):
with self.assertRaises(ValueError):
_nn_activations.multiclass_output(2)
def testOutputError_YNotSet(self):
output = _nn_activations.binary_output()
with self.assertRaises(RuntimeError):
output.Y
Z = np.zeros((3, 1))
output.forward(Z)
with self.assertRaises(RuntimeError):
output.cost
with self.assertRaises(RuntimeError):
output.backward(None)
def _testOutput_Predict(self, output, Z, expected_pred, expected_prob):
output.forward(Z)
pred = output.pred
prob = output.prob
self.assertEqual(pred.shape, expected_pred.shape)
npt.assert_almost_equal(pred, expected_pred)
self.assertEqual(prob.shape, expected_prob.shape)
npt.assert_almost_equal(prob, expected_prob)
def _testOutput_GradientCheck(self, output):
num_cols = output.C if output.is_multiclass else 1
Z = self._createZ(num_cols)
self._output = output
diff, _, _ = gradient_checking.check(
self._outputCost, self._outputGradients, Z)
self.assertLess(diff, 1e-7)
def _outputCost(self, Z):
output = self._output
output.forward(Z)
return output.cost
def _outputGradients(self, Z):
output = self._output
output.forward(Z)
dZ = output.backward(None)
return (dZ,)
####
# Activation functions
####
def testSigmoid(self):
log_odds = np.log(np.array([[3., 4.], [1., 0.25]]))
A = _nn_activations._sigmoid(log_odds)
expected_prob = np.array([[0.75, 0.80], [0.50, 0.20]])
npt.assert_almost_equal(A, expected_prob)
def testSoftmax(self):
log_odds = np.log(np.array([[1., 3., 1.], [2., 5., 3.]]))
A = _nn_activations._softmax(log_odds)
expected_prob = np.array([[0.2, 0.6, 0.2], [0.2, 0.5, 0.3]])
|
npt.assert_almost_equal(A, expected_prob)
|
numpy.testing.assert_almost_equal
|
import pandas as pd
import numpy as np
import wget
from zipfile import ZipFile
from DeepPurpose.utils import *
import json
import os
'''
Acknowledgement:
The BindingDB dataset is hosted in https://www.bindingdb.org/bind/index.jsp.
The Davis Dataset can be found in http://staff.cs.utu.fi/~aatapa/data/DrugTarget/.
The KIBA dataset can be found in https://jcheminf.biomedcentral.com/articles/10.1186/s13321-017-0209-z.
The Drug Target Common Dataset can be found in https://drugtargetcommons.fimm.fi/.
The COVID-19 Dataset including SARS-CoV, Broad Repurposing Hub can be found in https://www.aicures.mit.edu/data; and https://pubchem.ncbi.nlm.nih.gov/bioassay/1706.
We use some existing files from https://github.com/yangkevin2/coronavirus_data
We use the SMILES, protein sequence from DeepDTA github repo: https://github.com/hkmztrk/DeepDTA/tree/master/data.
'''
def read_file_training_dataset_bioassay(path):
# a line in the file is SMILES score, the first line is the target sequence
try:
file = open(path, "r")
except:
print('Path Not Found, please double check!')
target = file.readline()
if target[-1:] == '\n':
target = target[:-1]
X_drug = []
y = []
for aline in file:
values = aline.split()
X_drug.append(values[0])
y.append(float(values[1]))
file.close()
return
|
np.array(X_drug)
|
numpy.array
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
subregions_portrait_diagram.py
Use OCW to download, normalize, evaluate and plot (portrait diagram)
three local datasets against a reference dataset.
In this example:
1. Download three netCDF files from a local site.
AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_pr.nc
AFRICA_ICTP-REGCM3_CTL_ERAINT_MM_50km-rg_1989-2008_pr.nc
AFRICA_UCT-PRECIS_CTL_ERAINT_MM_50km_1989-2008_pr.nc
2. Load the local files into OCW dataset objects.
3. Interface with the Regional Climate Model Evaluation Database (https://rcmes.jpl.nasa.gov/)
to load the CRU3.1 Daily Precipitation dataset (https://rcmes.jpl.nasa.gov/content/cru31).
4. Process each dataset to the same same shape.
a.) Restrict the datasets re: geographic and time boundaries.
b.) Convert the dataset water flux to common units.
c.) Normalize the dataset date / times to monthly.
d.) Spatially regrid each dataset.
5. Calculate the mean annual value for each dataset.
6. Separate each dataset into 13 subregions.
7. Extract the metrics used for the evaluation and evaluate
against a reference dataset.
8. Create a portrait diagram of the results of the evaluation.
OCW modules demonstrated:
1. datasource/local
2. datasource/rcmed
3. dataset
4. dataset_processor
5. metrics
6. evaluation
7. plotter
8. utils
"""
from __future__ import print_function
import datetime
import ssl
import sys
from os import path
import numpy as np
import ocw.data_source.local as local
import ocw.data_source.rcmed as rcmed
import ocw.dataset_processor as dsp
import ocw.evaluation as evaluation
import ocw.metrics as metrics
import ocw.plotter as plotter
import ocw.utils as utils
from ocw.dataset import Bounds
if sys.version_info[0] >= 3:
from urllib.request import urlretrieve
else:
# Not Python 3 - today, it is most likely to be Python 2
# But note that this might need an update when Python 4
# might be around one day
from urllib import urlretrieve
if hasattr(ssl, '_create_unverified_context'):
ssl._create_default_https_context = ssl._create_unverified_context
# File URL leader
FILE_LEADER = 'http://zipper.jpl.nasa.gov/dist/'
# Three Local Model Files
FILE_1 = 'AFRICA_KNMI-RACMO2.2b_CTL_ERAINT_MM_50km_1989-2008_pr.nc'
FILE_2 = 'AFRICA_ICTP-REGCM3_CTL_ERAINT_MM_50km-rg_1989-2008_pr.nc'
FILE_3 = 'AFRICA_UCT-PRECIS_CTL_ERAINT_MM_50km_1989-2008_pr.nc'
# Filename for the output image/plot (without file extension)
OUTPUT_PLOT = 'portrait_diagram'
# Spatial and temporal configurations
LAT_MIN = -45.0
LAT_MAX = 42.24
LON_MIN = -24.0
LON_MAX = 60.0
START = datetime.datetime(2000, 1, 1)
END = datetime.datetime(2007, 12, 31)
EVAL_BOUNDS = Bounds(lat_min=LAT_MIN, lat_max=LAT_MAX, lon_min=LON_MIN,
lon_max=LON_MAX, start=START, end=END)
# variable that we are analyzing
varName = 'pr'
# regridding parameters
gridLonStep = 0.5
gridLatStep = 0.5
# some vars for this evaluation
target_datasets_ensemble = []
target_datasets = []
allNames = []
# Download necessary NetCDF file if not present
if not path.exists(FILE_1):
urlretrieve(FILE_LEADER + FILE_1, FILE_1)
if not path.exists(FILE_2):
urlretrieve(FILE_LEADER + FILE_2, FILE_2)
if not path.exists(FILE_3):
urlretrieve(FILE_LEADER + FILE_3, FILE_3)
# Step 1: Load Local NetCDF File into OCW Dataset Objects and store in list
target_datasets.append(local.load_file(FILE_1, varName, name='KNMI'))
target_datasets.append(local.load_file(FILE_2, varName, name='REGCM'))
target_datasets.append(local.load_file(FILE_3, varName, name='UCT'))
# Step 2: Fetch an OCW Dataset Object from the data_source.rcmed module
print('Working with the rcmed interface to get CRU3.1 Monthly Mean Precipitation')
# the dataset_id and the parameter id were determined from
# https://rcmes.jpl.nasa.gov/content/data-rcmes-database
CRU31 = rcmed.parameter_dataset(
10, 37, LAT_MIN, LAT_MAX, LON_MIN, LON_MAX, START, END)
# Step 3: Processing Datasets so they are the same shape
print('Processing datasets ...')
CRU31 = dsp.normalize_dataset_datetimes(CRU31, 'monthly')
print('... on units')
CRU31 = dsp.water_flux_unit_conversion(CRU31)
for member, each_target_dataset in enumerate(target_datasets):
target_datasets[member] = dsp.subset(target_datasets[member], EVAL_BOUNDS)
target_datasets[member] = \
dsp.water_flux_unit_conversion(target_datasets[member])
target_datasets[member] = dsp.normalize_dataset_datetimes(
target_datasets[member], 'monthly')
print('... spatial regridding')
new_lats = np.arange(LAT_MIN, LAT_MAX, gridLatStep)
new_lons = np.arange(LON_MIN, LON_MAX, gridLonStep)
CRU31 = dsp.spatial_regrid(CRU31, new_lats, new_lons)
for member, each_target_dataset in enumerate(target_datasets):
target_datasets[member] = dsp.spatial_regrid(
target_datasets[member], new_lats, new_lons)
# find the total annual mean. Note the function exists in util.py as def
# calc_climatology_year(dataset):
_, CRU31.values = utils.calc_climatology_year(CRU31)
for member, each_target_dataset in enumerate(target_datasets):
_, target_datasets[member].values = \
utils.calc_climatology_year(target_datasets[member])
# make the model ensemble
target_datasets_ensemble = dsp.ensemble(target_datasets)
target_datasets_ensemble.name = 'ENS'
# append to the target_datasets for final analysis
target_datasets.append(target_datasets_ensemble)
for target in target_datasets:
allNames.append(target.name)
list_of_regions = [
Bounds(lat_min=-10.0, lat_max=0.0, lon_min=29.0, lon_max=36.5),
Bounds(lat_min=0.0, lat_max=10.0, lon_min=29.0, lon_max=37.5),
Bounds(lat_min=10.0, lat_max=20.0, lon_min=25.0, lon_max=32.5),
Bounds(lat_min=20.0, lat_max=33.0, lon_min=25.0, lon_max=32.5),
Bounds(lat_min=-19.3, lat_max=-10.2, lon_min=12.0, lon_max=20.0),
Bounds(lat_min=15.0, lat_max=30.0, lon_min=15.0, lon_max=25.0),
Bounds(lat_min=-10.0, lat_max=10.0, lon_min=7.3, lon_max=15.0),
Bounds(lat_min=-10.9, lat_max=10.0, lon_min=5.0, lon_max=7.3),
Bounds(lat_min=33.9, lat_max=40.0, lon_min=6.9, lon_max=15.0),
Bounds(lat_min=10.0, lat_max=25.0, lon_min=0.0, lon_max=10.0),
Bounds(lat_min=10.0, lat_max=25.0, lon_min=-10.0, lon_max=0.0),
Bounds(lat_min=30.0, lat_max=40.0, lon_min=-15.0, lon_max=0.0),
Bounds(lat_min=33.0, lat_max=40.0, lon_min=25.0, lon_max=35.00)]
region_list = ['R' + str(i + 1) for i in range(13)]
# metrics
pattern_correlation = metrics.PatternCorrelation()
# create the Evaluation object
RCMs_to_CRU_evaluation = evaluation.Evaluation(CRU31, # Reference dataset for the evaluation
# 1 or more target datasets for
# the evaluation
target_datasets,
# 1 or more metrics to use in
# the evaluation
[pattern_correlation],
# list of subregion Bounds
# Objects
list_of_regions)
RCMs_to_CRU_evaluation.run()
new_patcor = np.squeeze(
|
np.array(RCMs_to_CRU_evaluation.results)
|
numpy.array
|
import os
import json
import numpy as np
from PIL import Image
#By default, assigned to the data augmentation directories
TRAIN_IMAGE_DIR = "Data/images_train_data_augmentation/"
VALIDATION_IMAGE_DIR = "Data/images_validation_data_augmentation/"
TEST_IMAGE_DIR = "Data/images_test/"
DESCR_DIR = "Data/descriptions/"
IMAGE_SIZE = 224
#Change the directories if data augmentation is not used
def set_directories(data_augmentation = False):
if(not(data_augmentation)):
global TRAIN_IMAGE_DIR
TRAIN_IMAGE_DIR = "Data/images_train/"
global VALIDATION_IMAGE_DIR
VALIDATION_IMAGE_DIR = "Data/images_validation/"
#Selects and returns the appropriate directory
def get_directory(image_or_description = "image", is_training = False, is_validation = False):
if(image_or_description == "image"):
if(is_training):
return TRAIN_IMAGE_DIR
elif(is_validation):
return VALIDATION_IMAGE_DIR
else:
return TEST_IMAGE_DIR
else:
if(is_training):
file_name = "descriptions_train.json"
elif(is_validation):
file_name = "descriptions_validation.json"
else:
file_name = "descriptions_test.json"
return DESCR_DIR, file_name
#Returns the selected image's pixels in the shape (batch, width, height, channels)
def get_image_pixels(image_number, is_training = False, is_validation = False):
#Gets the directory and image_name
directory = get_directory(is_training = is_training, is_validation = is_validation)
image_name = str(image_number) + ".jpg"
#Opens the image and resizes it
image = Image.open(directory + image_name)
image = image.resize((IMAGE_SIZE, IMAGE_SIZE), Image.BICUBIC)
#Converts the image to numpy array with shape (batch, width, height, channels)
#Makes sure all the values are between 0 and 1
image_pixels = np.array(image, dtype = "float32")
image_pixels /= 255.
image_pixels =
|
np.expand_dims(image_pixels, 0)
|
numpy.expand_dims
|
import numpy as np
# TODO add separate class for this functionality
def get_euler_angles_from_rot(R):
"""Compute Euler angles from rotation matrix.
yaw, pitch, roll: 3, 2, 1 rot sequence
Note frame relationship: e^b = e^v R^{vb}
"""
psi = np.arctan2(R[1, 0], R[0, 0]) # yaw angle
theta = np.arcsin(-R[2, 0]) # pitch angle
phi = np.arctan2(R[2, 1], R[2, 2]) # roll angle
return (psi, theta, phi)
def skew(a):
"""Returns skew symmetric matrix, given a 3-vector"""
#a = np.flatten(a) # convert to 3-array
a = a.flatten()
return np.array([
[ 0, -a[2], a[1]],
[ a[2], 0, -a[0]],
[-a[1], a[0], 0]
])
def quat_prod(p, q):
p0 = p[0]; p = p[1:4]
P = np.zeros((4,4))
P[0, 0] = p0; P[0, 1:] = -p.T
P[1:, 0] = p.flatten()
P[1:, 1:] = -skew(p) + p0*np.eye(3)
return P @ q
def Euler2Quaternion(y, p, r):
psi2 = y/2
theta2 = p/2
phi2 = r/2
return np.array([
[np.sin(phi2)*np.sin(psi2)*np.sin(theta2) + np.cos(phi2)*np.cos(psi2)*np.cos(theta2)],
[np.sin(phi2)*np.cos(psi2)*np.cos(theta2) - np.sin(psi2)*np.sin(theta2)*np.cos(phi2)],
[np.sin(phi2)*np.sin(psi2)*np.cos(theta2) + np.sin(theta2)*np.cos(phi2)*
|
np.cos(psi2)
|
numpy.cos
|
import sys
import Bio
import statsmodels
import math
import numpy as np
import pandas as pd
from statsmodels.compat.python import range
from statsmodels.compat.collections import OrderedDict
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqUtils.ProtParam import ProteinAnalysis
from collections import defaultdict
from itertools import islice, chain, product, combinations_with_replacement
from time import process_time
########## main function ##############################################################################
if __name__ == "__main__":
fasta_file = sys.argv[1]
nullomers_file = sys.argv[2]
threshold = float(sys.argv[3])
level = sys.argv[4]
correction_method = sys.argv[5]
print_log = sys.argv[6]
else:
print("\n**An error occured**\n")
raise SystemExit()
if (correction_method != "FDR") and (correction_method != "TARONE") and (correction_method != "BONF"):
print("\n**Please choose one of the following attributes for statistical correction method: BONF or TARONE or FDR**\n")
raise SystemExit()
if (print_log != "TRUE") and (print_log != "FALSE"):
print("\n**The 'print_log' parameter should be either TRUE or FALSE**\n")
raise SystemExit()
if (level == "DNA"):
alphabet = "TCGA"
elif (level == "PROT"):
alphabet = "ACDEFGHIKLMNPQRSTVWY"
else:
print("\n**Please declare the type of sequences. Accepted attributes are DNA or PROT**\n")
raise SystemExit()
#################################################################################################################
######### secondary functions ###################################################################################
def return_indices(array, value):
return np.where(array==value)
def nth_order_probs(sequences, n):
num = defaultdict(int)
for seq in sequences:
for i in range(len(seq) - n):
j = i + n + 1
key = seq[i:j]
num[key] += 1
denom = defaultdict(int)
for key, value in (num.items()):
denom[key[0:n]] += value
return {key: value/denom[key[0:n]] for key, value in num.items()}
def _ecdf(x):
nobs = len(x)
return np.arange(1,nobs+1)/float(nobs)
def fdrcorrection(pvals, thresh, is_sorted=False):
###FDR correction -- more info at: http://www.statsmodels.org/devel/_modules/statsmodels/stats/multitest.html#multipletests
pvals = np.asarray(pvals)
if not is_sorted:
pvals_sortind = np.argsort(pvals)
pvals_sorted = np.take(pvals, pvals_sortind)
else:
pvals_sorted = pvals # alias
ecdffactor = _ecdf(pvals_sorted)
reject = pvals_sorted <= ecdffactor*thresh
if reject.any():
rejectmax = max(
|
np.nonzero(reject)
|
numpy.nonzero
|
# -*- coding: utf-8 -*-
# Copyright 2020 PyePAL authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Use PAL with the neural tangent library.
This allows to perform
1. Exact Bayesian inference (NNGP)
2. Inference using gradient descent with MSE loss (NTK)
Note that the neural tangent code usually assumes mean-zero Gaussians
Reducing predict_fn_kwargs['diag_reg'] typically improves the interpolation
quality
"""
from typing import Sequence, Tuple
import numpy as np
from sklearn.preprocessing import StandardScaler
from ..models.nt import NTModel
from .pal_base import PALBase
from .validate_inputs import validate_nt_models
__all__ = ["PALNT", "NTModel"]
# We move those functions out of the class so that we can parallelize them
def _set_one_infinite_width_model( # pylint:disable=too-many-arguments
i: int,
models: Sequence[NTModel],
design_space: np.ndarray,
objectives: np.ndarray,
sampled: np.ndarray,
predict_fn_kwargs: dict = None,
) -> Tuple[callable, StandardScaler]:
from jax.config import config # pylint:disable=import-outside-toplevel
config.update("jax_enable_x64", True)
import neural_tangents as nt # pylint:disable=import-outside-toplevel
if predict_fn_kwargs is None:
predict_fn_kwargs = {"diag_reg": 1e-3}
model = models[i]
kernel_fn = model.kernel_fn
scaler = StandardScaler()
y = scaler.fit_transform( # pylint:disable=invalid-name
objectives[sampled[:, i], i].reshape(-1, 1)
)
predict_fn = nt.predict.gradient_descent_mse_ensemble(
kernel_fn,
design_space[sampled[:, i]],
y,
**predict_fn_kwargs,
)
return predict_fn, scaler
def _predict_one_infinite_width_model(
i: int, models: Sequence[NTModel], design_space: np.ndarray, kernel: str
):
predict_fn = models[i].predict_fn
mean, covariance = predict_fn( # type: ignore
x_test=design_space,
get=kernel,
compute_cov=True,
)
return mean.flatten(), np.sqrt(
|
np.diag(covariance)
|
numpy.diag
|
import numpy as np
from sklearn.ensemble import BaggingClassifier
from sklearn.utils import indices_to_mask
def bootstrap_prediction(X, y, score_func, base_estimator=None, n_estimators=10,
max_samples=1.0, max_features=1.0, bootstrap=True,
bootstrap_features=False, n_jobs=None, random_state=None):
"""Bootstrap the scores from an `sklearn` estimator.
Args:
X (array-like, dtype=float64, , size=[n_samples, n_features]): Feature matrix.
y (array, dtype=float64, size=[n_samples]): Target vector.
score_func (callable): Score function (or loss function) with signature
score_func(y, y_pred, **kwargs).
base_estimator (object or None, optional): Defaults to None. The base estimator
to fit on random subsets of the dataset. If None, then the base estimator
is a decision tree.
n_estimators (int, optional): Defaults to 10. The number of base estimators in
the ensemble.
max_samples (int or float, optional): Defaults to 1.0. The number of samples
to draw from X to train each base estimator. If int, then draw max_samples
samples. If float, then draw max_samples * X.shape[0] samples.
max_features (int or float, optional): Defaults to 1.0. The number of features
to draw from X to train each base estimator. If int, then draw max_features
features. If float, then draw max_features * X.shape[1] features.
bootstrap (bool, optional): Defaults to True. Whether samples are drawn with
replacement.
bootstrap_features (bool, optional): Defaults to False. Whether features are
drawn with replacement.
n_jobs (int or None, optional): Defaults to None. The number of jobs to run in
parallel for both fit and predict. None means 1 unless in a
joblib.parallel_backend context.
random_state (int, RandomState instance or None, optional): Defaults to None.
If int, random_state is the seed used by the random number generator; If
RandomState instance, random_state is the random number generator; If None,
the random number generator is the RandomState instance used by np.random.
Returns:
numpy.ndarray: Distribution of score function statistic.
"""
bag = BaggingClassifier(
base_estimator=base_estimator, n_estimators=n_estimators, max_samples=max_samples,
max_features=max_features, bootstrap=bootstrap, bootstrap_features=bootstrap_features,
n_jobs=n_jobs, random_state=random_state)
bag.fit(X, y)
stats = []
for estimator, samples in zip(bag.estimators_, bag.estimators_samples_):
# Create mask for OOB samples
mask = ~indices_to_mask(samples, len(y))
# Compute predictions on out-of-bag samples
y_pred = estimator.predict(X[mask])
# Compute statistic
stat = score_func(y[mask], y_pred)
stats.append(stat)
stats = np.array(stats)
return stats
def percentile_conf_int(data, alpha=0.05):
"""Compute the percentile confidence interval from some data.
Args:
data (numpy.ndarray): Data.
alpha (float, optional): Defaults to 0.05. Significance level.
(0 < alpha < 1).
Returns:
np.ndarray: Lower and upper percentile confidence interval.
"""
lower = 100 * alpha / 2
upper = 100 * (1. - alpha / 2)
conf_int =
|
np.percentile(data, [lower, upper])
|
numpy.percentile
|
import numpy as np
import warnings
# from https://github.com/tinghuiz/SfMLearner
def dump_xyz(source_to_target_transformations):
xyzs = []
cam_to_world =
|
np.eye(4)
|
numpy.eye
|
import numpy as np
import gym
import gym.spaces
import tensorflow as tf
env = gym.make('NChain-v0')
N= 5 # No of physical states
Q=6 #no of memory states
A =2 #no_of_actions
intial_state=1
gamma= .995
def dirichlet_sample(alphas):
r = np.random.standard_gamma(alphas)
r /= r.sum(-1).reshape(-1, 1)
return r
if __name__ == "__main__":
alphas1 = np.array([[200,0,800,0,0],[200,0,800,0,0],[200,0,0,800,0],[200,0,0,0,800],[200,0,0,0,800]])
alphas2= np.array([[800,200,0,0,0],[800,0,200,0,0],[800,0,0,200,0],[800,0,0,0,200],[800,0,0,0,200]])
transition_probablity1 = dirichlet_sample(alphas1)
transition_probablity2 = dirichlet_sample(alphas2)
#print("dirichlet_sample1:",transition_probablity1)
#print()
#print("dirichlet_sample2:",transition_probablity2)
#print()
transitionMatrix=
|
np.dstack((transition_probablity2,transition_probablity1))
|
numpy.dstack
|
# Quantile utilities for processing MERRA/AIRS data
import numpy
import numpy.ma as ma
import calculate_VPD
import netCDF4
from netCDF4 import Dataset
from numpy import random, linalg
import datetime
import pandas
import os, sys
from scipy import stats
import h5py
def quantile_cloud_locmask(airsdr, mtdr, indr, dtdr, yrlst, mnst, mnfn, hrchc, rgchc, msk):
# Construct cloud variable quantiles and z-scores, with a possibly irregular location mask
# Read probs and pressure levels
rnm = '%s/AIRS_Levels_Quantiles.nc' % (airsdr)
f = Dataset(rnm,'r')
plev = f['level'][:]
prbs = f['probability'][:]
alts = f['altitude'][:]
f.close()
nyr = len(yrlst)
nprb = prbs.shape[0]
# Mask, lat, lon
fnm = '%s/interpolated_merra2_for_SARTA_two_slab_%d_JJA_South_Southeast_US_%02dUTC_no_vertical_variation_for_missing.nc' % (mtdr,yrlst[0],hrchc)
f = Dataset(fnm,'r')
mask = f[msk][:,:]
latmet = f['plat'][:]
lonmet = f['plon'][:]
f.close()
mask[mask <= 0] = 0
lnsq = numpy.arange(lonmet.shape[0])
ltsq = numpy.arange(latmet.shape[0])
# Subset a bit
lnsm = numpy.sum(mask,axis=0)
print(lnsq.shape)
print(lnsm.shape)
print(lnsm)
ltsm = numpy.sum(mask,axis=1)
print(ltsq.shape)
print(ltsm.shape)
print(ltsm)
lnmn = numpy.amin(lnsq[lnsm > 0])
lnmx = numpy.amax(lnsq[lnsm > 0]) + 1
ltmn = numpy.amin(ltsq[ltsm > 0])
ltmx = numpy.amax(ltsq[ltsm > 0]) + 1
stridx = 'Lon Range: %d, %d\nLat Range: %d, %d \n' % (lnmn,lnmx,ltmn,ltmx)
print(stridx)
#latflt = latin.flatten()
#lonflt = lonin.flatten()
#mskflt = mask.flatten()
#lcsq = numpy.arange(mskflt.shape[0])
#lcsb = lcsq[mskflt > 0]
nx = lnmx - lnmn
ny = ltmx - ltmn
lnrp = numpy.tile(lonmet[lnmn:lnmx],ny)
ltrp = numpy.repeat(latmet[ltmn:ltmx],nx)
mskblk = mask[ltmn:ltmx,lnmn:lnmx]
mskflt = mskblk.flatten()
tsmp = 0
for k in range(nyr):
dyinit = datetime.date(yrlst[k],6,1)
dyst = datetime.date(yrlst[k],mnst,1)
ttst = dyst.timetuple()
jst = ttst.tm_yday
if mnfn < 12:
dyfn = datetime.date(yrlst[k],mnfn+1,1)
ttfn = dyfn.timetuple()
jfn = ttfn.tm_yday
else:
dyfn = datetime.date(yrlst[k]+1,1,1)
dy31 = datetime.date(yrlst[k],12,31)
tt31 = dy31.timetuple()
jfn = tt31.tm_yday + 1
dystidx = abs((dyst-dyinit).days)
dyfnidx = abs((dyfn-dyinit).days)
jdsq = numpy.arange(jst,jfn)
print(jdsq)
tmhld = numpy.repeat(jdsq,nx*ny)
print(tmhld.shape)
print(numpy.amin(tmhld))
print(numpy.amax(tmhld))
stridx = 'Day Range: %d, %d\n' % (dystidx,dyfnidx)
print(stridx)
fnm = '%s/interpolated_merra2_for_SARTA_two_slab_%d_JJA_South_Southeast_US_%02dUTC_no_vertical_variation_for_missing_IncludesCloudParams.h5' % (indr,yrlst[k],hrchc)
f = h5py.File(fnm,'r')
ctyp1 = f['/ctype'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
ctyp2 = f['/ctype2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cprt1 = f['/cprtop'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cprt2 = f['/cprtop2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cprb1 = f['/cprbot'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cprb2 = f['/cprbot2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cfrc1 = f['/cfrac'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cfrc2 = f['/cfrac2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cfrc12 = f['/cfrac12'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cngwt1 = f['/cngwat'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cngwt2 = f['/cngwat2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cttp1 = f['/cstemp'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cttp2 = f['/cstemp2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
f.close()
mtnm = '%s/interpolated_merra2_for_SARTA_two_slab_%d_JJA_South_Southeast_US_%02dUTC_no_vertical_variation_for_missing.nc' % (mtdr,yrlst[k],hrchc)
f = Dataset(mtnm,'r')
psfc = f.variables['spres'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
f.close()
nt = ctyp1.shape[0]
mskall = numpy.tile(mskflt,nt)
msksq = numpy.arange(mskall.shape[0])
msksb = msksq[mskall > 0]
mskstr = 'Total Obs: %d, Within Mask: %d \n' % (msksq.shape[0],msksb.shape[0])
print(mskstr)
lthld = numpy.tile(ltrp,nt)
lnhld = numpy.tile(lnrp,nt)
ctyp1 = ctyp1.flatten()
ctyp2 = ctyp2.flatten()
cfrc1 = cfrc1.flatten()
cfrc2 = cfrc2.flatten()
cfrc12 = cfrc12.flatten()
cngwt1 = cngwt1.flatten()
cngwt2 = cngwt2.flatten()
cttp1 = cttp1.flatten()
cttp2 = cttp2.flatten()
psfc = psfc.flatten()
# Number of slabs
nslbtmp = numpy.zeros((ctyp1.shape[0],),dtype=numpy.int16)
nslbtmp[(ctyp1 > 100) & (ctyp2 > 100)] = 2
nslbtmp[(ctyp1 > 100) & (ctyp2 < 100)] = 1
if tsmp == 0:
nslabout = numpy.zeros((msksb.shape[0],),dtype=numpy.int16)
nslabout[:] = nslbtmp[msksb]
else:
nslabout = numpy.append(nslabout,nslbtmp[msksb])
flsq = numpy.arange(ctyp1.shape[0])
# For two slabs, slab 1 must have highest cloud bottom pressure
cprt1 = cprt1.flatten()
cprt2 = cprt2.flatten()
cprb1 = cprb1.flatten()
cprb2 = cprb2.flatten()
slabswap = numpy.zeros((ctyp1.shape[0],),dtype=numpy.int16)
swpsq = flsq[(nslbtmp == 2) & (cprb1 < cprb2)]
slabswap[swpsq] = 1
print(numpy.mean(slabswap))
# Cloud Pressure variables
pbttmp1 = numpy.zeros((ctyp1.shape[0],)) - 9999.0
pbttmp1[nslbtmp >= 1] = cprb1[nslbtmp >= 1]
pbttmp1[swpsq] = cprb2[swpsq]
ptptmp1 = numpy.zeros((ctyp1.shape[0],)) - 9999.0
ptptmp1[nslbtmp >= 1] = cprt1[nslbtmp >= 1]
ptptmp1[swpsq] = cprt2[swpsq]
pbttmp2 = numpy.zeros((ctyp1.shape[0],)) - 9999.0
pbttmp2[nslbtmp == 2] = cprb2[nslbtmp == 2]
pbttmp2[swpsq] = cprb1[swpsq]
ptptmp2 = numpy.zeros((ctyp1.shape[0],)) - 9999.0
ptptmp2[nslbtmp == 2] = cprt2[nslbtmp == 2]
ptptmp2[swpsq] = cprt1[swpsq]
# DP Cloud transformation
dptmp1 = numpy.zeros((ctyp1.shape[0],)) - 9999.0
dptmp1[nslbtmp >= 1] = pbttmp1[nslbtmp >= 1] - ptptmp1[nslbtmp >= 1]
dpslbtmp = numpy.zeros((ctyp1.shape[0],)) - 9999.0
dpslbtmp[nslbtmp == 2] = ptptmp1[nslbtmp == 2] - pbttmp2[nslbtmp == 2]
dptmp2 = numpy.zeros((ctyp1.shape[0],)) - 9999.0
dptmp2[nslbtmp == 2] = pbttmp2[nslbtmp == 2] - ptptmp2[nslbtmp == 2]
# Adjust negative DPSlab values
dpnsq = flsq[(nslbtmp == 2) & (dpslbtmp < 0.0) & (dpslbtmp > -1000.0)]
dpadj = numpy.zeros((ctyp1.shape[0],))
dpadj[dpnsq] = numpy.absolute(dpslbtmp[dpnsq])
dpslbtmp[dpnsq] = 1.0
dptmp1[dpnsq] = dptmp1[dpnsq] / 2.0
dptmp2[dpnsq] = dptmp2[dpnsq] / 2.0
# Sigma / Logit Adjustments
zpbtmp = numpy.zeros((psfc.shape[0],)) - 9999.0
zdp1tmp = numpy.zeros((psfc.shape[0],)) - 9999.0
zdslbtmp = numpy.zeros((psfc.shape[0],)) - 9999.0
zdp2tmp = numpy.zeros((psfc.shape[0],)) - 9999.0
ncldct = 0
for t in range(psfc.shape[0]):
if ( (pbttmp1[t] >= 0.0) and (dpslbtmp[t] >= 0.0) ):
prptmp = numpy.array( [ (psfc[t] - pbttmp1[t]) / psfc[t], \
dptmp1[t] / psfc[t], dpslbtmp[t] / psfc[t], \
dptmp2[t] / psfc[t], 0.0 ] )
if (prptmp[0] < 0.0):
# Adjustment needed
prpadj = prptmp[0]
prptmp[0] = 0.01
prptmp[1] = prptmp[1] + prpadj*prptmp[1]
prptmp[2] = prptmp[2] + prpadj*prptmp[2]
prptmp[3] = prptmp[3] + prpadj*prptmp[3]
ncldct = ncldct + 1
elif (prptmp[0] == 0.0):
# Adjustment needed
prpadj = -0.01
prptmp[0] = 0.01
prptmp[1] = prptmp[1] + prpadj*prptmp[1]
prptmp[2] = prptmp[2] + prpadj*prptmp[2]
prptmp[3] = prptmp[3] + prpadj*prptmp[3]
ncldct = ncldct + 1
prptmp[4] = 1.0 - prptmp[0] - prptmp[1] - prptmp[2] - prptmp[3]
ztmp = calculate_VPD.lgtzs(prptmp)
zpbtmp[t] = ztmp[0]
zdp1tmp[t] = ztmp[1]
zdslbtmp[t] = ztmp[2]
zdp2tmp[t] = ztmp[3]
elif ( pbttmp1[t] >= 0.0 ):
prptmp = numpy.array( [ (psfc[t] - pbttmp1[t]) / psfc[t], \
dptmp1[t] / psfc[t], 0.0 ] )
if (prptmp[0] < 0.0):
# Adjustment needed
prpadj = prptmp[0]
prptmp[0] = 0.01
prptmp[1] = prptmp[1] + prpadj*prptmp[1]
ncldct = ncldct + 1
elif (prptmp[0] == 0.0):
# Adjustment needed
prpadj = -0.01
prptmp[0] = 0.01
prptmp[1] = prptmp[1] + prpadj*prptmp[1]
ncldct = ncldct + 1
prptmp[2] = 1.0 - prptmp[0] - prptmp[1]
ztmp = calculate_VPD.lgtzs(prptmp)
zpbtmp[t] = ztmp[0]
zdp1tmp[t] = ztmp[1]
zdslbtmp[t] = -9999.0
zdp2tmp[t] = -9999.0
else:
zpbtmp[t] = -9999.0
zdp1tmp[t] = -9999.0
zdslbtmp[t] = -9999.0
zdp2tmp[t] = -9999.0
str1 = 'Cloud Bot Pres Below Sfc: %d ' % (ncldct)
print(str1)
if tsmp == 0:
psfcout = numpy.zeros((msksb.shape[0],)) - 9999.0
psfcout[:] = psfc[msksb]
prsbot1out = numpy.zeros((msksb.shape[0],)) - 9999.0
prsbot1out[:] = zpbtmp[msksb]
dpcld1out = numpy.zeros((msksb.shape[0],)) - 9999.0
dpcld1out[:] = zdp1tmp[msksb]
dpslbout = numpy.zeros((msksb.shape[0],)) - 9999.0
dpslbout[:] = zdslbtmp[msksb]
dpcld2out = numpy.zeros((msksb.shape[0],)) - 9999.0
dpcld2out[:] = zdp2tmp[msksb]
else:
psfcout = numpy.append(psfcout,psfc[msksb])
prsbot1out = numpy.append(prsbot1out,zpbtmp[msksb])
dpcld1out = numpy.append(dpcld1out,zdp1tmp[msksb])
dpslbout = numpy.append(dpslbout,zdslbtmp[msksb])
dpcld2out = numpy.append(dpcld2out,zdp2tmp[msksb])
# Slab Types: 101.0 = Liquid, 201.0 = Ice, None else
# Output: 0 = Liquid, 1 = Ice
typtmp1 = numpy.zeros((ctyp1.shape[0],),dtype=numpy.int16) - 99
typtmp1[nslbtmp >= 1] = (ctyp1[nslbtmp >= 1] - 1.0) / 100.0 - 1.0
typtmp1[swpsq] = (ctyp2[swpsq] - 1.0) / 100.0 - 1.0
typtmp2 = numpy.zeros((ctyp1.shape[0],),dtype=numpy.int16) - 99
typtmp2[nslbtmp == 2] = (ctyp2[nslbtmp == 2] - 1.0) / 100.0 - 1.0
typtmp2[swpsq] = (ctyp1[swpsq] - 1.0) / 100.0 - 1.0
if tsmp == 0:
slbtyp1out = numpy.zeros((msksb.shape[0],),dtype=numpy.int16)
slbtyp1out[:] = typtmp1[msksb]
slbtyp2out = numpy.zeros((msksb.shape[0],),dtype=numpy.int16)
slbtyp2out[:] = typtmp2[msksb]
else:
slbtyp1out = numpy.append(slbtyp1out,typtmp1[msksb])
slbtyp2out = numpy.append(slbtyp2out,typtmp2[msksb])
# Cloud Fraction Logit, still account for swapping
z1tmp = numpy.zeros((cfrc1.shape[0],)) - 9999.0
z2tmp = numpy.zeros((cfrc1.shape[0],)) - 9999.0
z12tmp = numpy.zeros((cfrc1.shape[0],)) - 9999.0
for t in range(z1tmp.shape[0]):
if ( (cfrc1[t] > 0.0) and (cfrc2[t] > 0.0) and (cfrc12[t] > 0.0) ):
# Must adjust amounts
if (slabswap[t] == 0):
prptmp = numpy.array( [cfrc1[t]-cfrc12[t], cfrc2[t]-cfrc12[t], cfrc12[t], 0.0] )
else:
prptmp = numpy.array( [cfrc2[t]-cfrc12[t], cfrc1[t]-cfrc12[t], cfrc12[t], 0.0] )
prptmp[3] = 1.0 - prptmp[0] - prptmp[1] - prptmp[2]
ztmp = calculate_VPD.lgtzs(prptmp)
z1tmp[t] = ztmp[0]
z2tmp[t] = ztmp[1]
z12tmp[t] = ztmp[2]
elif ( (cfrc1[t] > 0.0) and (cfrc2[t] > 0.0) ):
if (slabswap[t] == 0):
prptmp = numpy.array( [cfrc1[t], cfrc2[t], 0.0] )
else:
prptmp = numpy.array( [cfrc2[t], cfrc1[t], 0.0] )
prptmp[2] = 1.0 - prptmp[0] - prptmp[1]
ztmp = calculate_VPD.lgtzs(prptmp)
z1tmp[t] = ztmp[0]
z2tmp[t] = ztmp[1]
z12tmp[t] = -9999.0
elif ( cfrc1[t] > 0.0 ):
prptmp = numpy.array( [cfrc1[t], 1.0 - cfrc1[t] ] )
ztmp = calculate_VPD.lgtzs(prptmp)
z1tmp[t] = ztmp[0]
z2tmp[t] = -9999.0
z12tmp[t] = -9999.0
else:
z1tmp[t] = -9999.0
z2tmp[t] = -9999.0
z12tmp[t] = -9999.0
if tsmp == 0:
cfclgt1out = numpy.zeros((msksb.shape[0],)) - 9999.0
cfclgt1out[:] = z1tmp[msksb]
cfclgt2out = numpy.zeros((msksb.shape[0],)) - 9999.0
cfclgt2out[:] = z2tmp[msksb]
cfclgt12out = numpy.zeros((msksb.shape[0],)) - 9999.0
cfclgt12out[:] = z12tmp[msksb]
else:
cfclgt1out = numpy.append(cfclgt1out,z1tmp[msksb])
cfclgt2out = numpy.append(cfclgt2out,z2tmp[msksb])
cfclgt12out = numpy.append(cfclgt12out,z12tmp[msksb])
# Cloud Non-Gas Water
ngwttmp1 = numpy.zeros(cngwt1.shape[0]) - 9999.0
ngwttmp1[nslbtmp >= 1] = cngwt1[nslbtmp >= 1]
ngwttmp1[swpsq] = cngwt2[swpsq]
ngwttmp2 = numpy.zeros(cngwt1.shape[0]) - 9999.0
ngwttmp2[nslbtmp == 2] = cngwt2[nslbtmp == 2]
ngwttmp2[swpsq] = cngwt1[swpsq]
if tsmp == 0:
ngwt1out = numpy.zeros((msksb.shape[0],)) - 9999.0
ngwt1out[:] = ngwttmp1[msksb]
ngwt2out = numpy.zeros((msksb.shape[0],)) - 9999.0
ngwt2out[:] = ngwttmp2[msksb]
else:
ngwt1out = numpy.append(ngwt1out,ngwttmp1[msksb])
ngwt2out = numpy.append(ngwt2out,ngwttmp2[msksb])
# Cloud Top Temperature
cttptmp1 = numpy.zeros(cttp1.shape[0]) - 9999.0
cttptmp1[nslbtmp >= 1] = cttp1[nslbtmp >= 1]
cttptmp1[swpsq] = cttp2[swpsq]
cttptmp2 = numpy.zeros(cttp1.shape[0]) - 9999.0
cttptmp2[nslbtmp == 2] = cttp2[nslbtmp == 2]
cttptmp2[swpsq] = cttp1[swpsq]
if tsmp == 0:
cttp1out = numpy.zeros((msksb.shape[0],)) - 9999.0
cttp1out[:] = cttptmp1[msksb]
cttp2out = numpy.zeros((msksb.shape[0],)) - 9999.0
cttp2out[:] = cttptmp2[msksb]
else:
cttp1out = numpy.append(cttp1out,cttptmp1[msksb])
cttp2out = numpy.append(cttp2out,cttptmp2[msksb])
# Loc/Time
if tsmp == 0:
latout = numpy.zeros((msksb.shape[0],)) - 9999.0
latout[:] = lthld[msksb]
lonout = numpy.zeros((msksb.shape[0],)) - 9999.0
lonout[:] = lnhld[msksb]
yrout = numpy.zeros((msksb.shape[0],),dtype=numpy.int16)
yrout[:] = yrlst[k]
jdyout = numpy.zeros((msksb.shape[0],),dtype=numpy.int16)
jdyout[:] = tmhld[msksb]
else:
latout = numpy.append(latout,lthld[msksb])
lonout = numpy.append(lonout,lnhld[msksb])
yrtmp = numpy.zeros((msksb.shape[0],),dtype=numpy.int16)
yrtmp[:] = yrlst[k]
yrout = numpy.append(yrout,yrtmp)
jdyout = numpy.append(jdyout,tmhld[msksb])
tsmp = tsmp + msksb.shape[0]
# Process quantiles
nslbqs = calculate_VPD.quantile_msgdat_discrete(nslabout,prbs)
str1 = '%.2f Number Slab Quantile: %d' % (prbs[53],nslbqs[53])
print(str1)
print(nslbqs)
psfcqs = calculate_VPD.quantile_msgdat(psfcout,prbs)
str1 = '%.2f Surface Pressure Quantile: %.3f' % (prbs[53],psfcqs[53])
print(str1)
prsbt1qs = calculate_VPD.quantile_msgdat(prsbot1out,prbs)
str1 = '%.2f CldBot1 Pressure Quantile: %.3f' % (prbs[53],prsbt1qs[53])
print(str1)
dpcld1qs = calculate_VPD.quantile_msgdat(dpcld1out,prbs)
str1 = '%.2f DPCloud1 Quantile: %.3f' % (prbs[53],dpcld1qs[53])
print(str1)
dpslbqs = calculate_VPD.quantile_msgdat(dpslbout,prbs)
str1 = '%.2f DPSlab Quantile: %.3f' % (prbs[53],dpslbqs[53])
print(str1)
dpcld2qs = calculate_VPD.quantile_msgdat(dpcld2out,prbs)
str1 = '%.2f DPCloud2 Quantile: %.3f' % (prbs[53],dpcld2qs[53])
print(str1)
slb1qs = calculate_VPD.quantile_msgdat_discrete(slbtyp1out,prbs)
str1 = '%.2f Type1 Quantile: %d' % (prbs[53],slb1qs[53])
print(str1)
slb2qs = calculate_VPD.quantile_msgdat_discrete(slbtyp2out,prbs)
str1 = '%.2f Type2 Quantile: %d' % (prbs[53],slb2qs[53])
print(str1)
lgt1qs = calculate_VPD.quantile_msgdat(cfclgt1out,prbs)
str1 = '%.2f Logit 1 Quantile: %.3f' % (prbs[53],lgt1qs[53])
print(str1)
lgt2qs = calculate_VPD.quantile_msgdat(cfclgt2out,prbs)
str1 = '%.2f Logit 2 Quantile: %.3f' % (prbs[53],lgt2qs[53])
print(str1)
lgt12qs = calculate_VPD.quantile_msgdat(cfclgt12out,prbs)
str1 = '%.2f Logit 1/2 Quantile: %.3f' % (prbs[53],lgt12qs[53])
print(str1)
ngwt1qs = calculate_VPD.quantile_msgdat(ngwt1out,prbs)
str1 = '%.2f NGWater1 Quantile: %.3f' % (prbs[53],ngwt1qs[53])
print(str1)
ngwt2qs = calculate_VPD.quantile_msgdat(ngwt2out,prbs)
str1 = '%.2f NGWater2 Quantile: %.3f' % (prbs[53],ngwt2qs[53])
print(str1)
cttp1qs = calculate_VPD.quantile_msgdat(cttp1out,prbs)
str1 = '%.2f CTTemp1 Quantile: %.3f' % (prbs[53],cttp1qs[53])
print(str1)
cttp2qs = calculate_VPD.quantile_msgdat(cttp2out,prbs)
str1 = '%.2f CTTemp2 Quantile: %.3f' % (prbs[53],cttp2qs[53])
print(str1)
# Should be no missing for number of slabs
print('Slab summary')
print(numpy.amin(nslabout))
print(numpy.amax(nslabout))
print(tsmp)
# Output Quantiles
mstr = dyst.strftime('%b')
qfnm = '%s/%s_US_JJA_%02dUTC_%04d_Cloud_Quantile.nc' % (dtdr,rgchc,hrchc,yrlst[k])
qout = Dataset(qfnm,'w')
dimp = qout.createDimension('probability',nprb)
varprb = qout.createVariable('probability','f4',['probability'], fill_value = -9999)
varprb[:] = prbs
varprb.long_name = 'Probability break points'
varprb.units = 'none'
varprb.missing_value = -9999
varnslb = qout.createVariable('NumberSlab_quantile','i2',['probability'], fill_value = -99)
varnslb[:] = nslbqs
varnslb.long_name = 'Number of cloud slabs quantiles'
varnslb.units = 'Count'
varnslb.missing_value = -99
varcbprs = qout.createVariable('CloudBot1Logit_quantile','f4',['probability'], fill_value = -9999)
varcbprs[:] = prsbt1qs
varcbprs.long_name = 'Slab 1 cloud bottom pressure logit quantiles'
varcbprs.units = 'hPa'
varcbprs.missing_value = -9999
vardpc1 = qout.createVariable('DPCloud1Logit_quantile','f4',['probability'], fill_value = -9999)
vardpc1[:] = dpcld1qs
vardpc1.long_name = 'Slab 1 cloud pressure depth logit quantiles'
vardpc1.units = 'hPa'
vardpc1.missing_value = -9999
vardpslb = qout.createVariable('DPSlabLogit_quantile','f4',['probability'], fill_value = -9999)
vardpslb[:] = dpslbqs
vardpslb.long_name = 'Two-slab vertical separation logit quantiles'
vardpslb.units = 'hPa'
vardpslb.missing_value = -9999
vardpc2 = qout.createVariable('DPCloud2Logit_quantile','f4',['probability'], fill_value = -9999)
vardpc2[:] = dpcld2qs
vardpc2.long_name = 'Slab 2 cloud pressure depth logit quantiles'
vardpc2.units = 'hPa'
vardpc2.missing_value = -9999
vartyp1 = qout.createVariable('CType1_quantile','i2',['probability'], fill_value = -99)
vartyp1[:] = slb1qs
vartyp1.long_name = 'Slab 1 cloud type quantiles'
vartyp1.units = 'None'
vartyp1.missing_value = -99
vartyp1.comment = 'Cloud slab type: 0=Liquid, 1=Ice'
vartyp2 = qout.createVariable('CType2_quantile','i2',['probability'], fill_value = -99)
vartyp2[:] = slb2qs
vartyp2.long_name = 'Slab 2 cloud type quantiles'
vartyp2.units = 'None'
vartyp2.missing_value = -99
vartyp2.comment = 'Cloud slab type: 0=Liquid, 1=Ice'
varlgt1 = qout.createVariable('CFrcLogit1_quantile','f4',['probability'], fill_value = -9999)
varlgt1[:] = lgt1qs
varlgt1.long_name = 'Slab 1 cloud fraction (cfrac1x) logit quantiles'
varlgt1.units = 'None'
varlgt1.missing_value = -9999
varlgt2 = qout.createVariable('CFrcLogit2_quantile','f4',['probability'], fill_value = -9999)
varlgt2[:] = lgt2qs
varlgt2.long_name = 'Slab 2 cloud fraction (cfrac2x) logit quantiles'
varlgt2.units = 'None'
varlgt2.missing_value = -9999
varlgt12 = qout.createVariable('CFrcLogit12_quantile','f4',['probability'], fill_value = -9999)
varlgt12[:] = lgt12qs
varlgt12.long_name = 'Slab 1/2 overlap fraction (cfrac12) logit quantiles'
varlgt12.units = 'None'
varlgt12.missing_value = -9999
varngwt1 = qout.createVariable('NGWater1_quantile','f4',['probability'], fill_value = -9999)
varngwt1[:] = ngwt1qs
varngwt1.long_name = 'Slab 1 cloud non-gas water quantiles'
varngwt1.units = 'g m^-2'
varngwt1.missing_value = -9999
varngwt2 = qout.createVariable('NGWater2_quantile','f4',['probability'], fill_value = -9999)
varngwt2[:] = ngwt2qs
varngwt2.long_name = 'Slab 2 cloud non-gas water quantiles'
varngwt2.units = 'g m^-2'
varngwt2.missing_value = -9999
varcttp1 = qout.createVariable('CTTemp1_quantile','f4',['probability'], fill_value = -9999)
varcttp1[:] = cttp1qs
varcttp1.long_name = 'Slab 1 cloud top temperature'
varcttp1.units = 'K'
varcttp1.missing_value = -9999
varcttp2 = qout.createVariable('CTTemp2_quantile','f4',['probability'], fill_value = -9999)
varcttp2[:] = cttp2qs
varcttp2.long_name = 'Slab 2 cloud top temperature'
varcttp2.units = 'K'
varcttp2.missing_value = -9999
qout.close()
# Set up transformations
znslb = calculate_VPD.std_norm_quantile_from_obs(nslabout, nslbqs, prbs, msgval=-99)
zpsfc = calculate_VPD.std_norm_quantile_from_obs(psfcout, psfcqs, prbs, msgval=-9999.)
zprsbt1 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(prsbot1out, prsbt1qs, prbs, msgval=-9999.)
zdpcld1 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(dpcld1out, dpcld1qs, prbs, msgval=-9999.)
zdpslb = calculate_VPD.std_norm_quantile_from_obs_fill_msg(dpslbout, dpslbqs, prbs, msgval=-9999.)
zdpcld2 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(dpcld2out, dpcld2qs, prbs, msgval=-9999.)
zctyp1 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(slbtyp1out, slb1qs, prbs, msgval=-99)
zctyp2 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(slbtyp2out, slb2qs, prbs, msgval=-99)
zlgt1 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(cfclgt1out, lgt1qs, prbs, msgval=-9999.)
zlgt2 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(cfclgt2out, lgt2qs, prbs, msgval=-9999.)
zlgt12 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(cfclgt12out, lgt12qs, prbs, msgval=-9999.)
zngwt1 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(ngwt1out, ngwt1qs, prbs, msgval=-9999.)
zngwt2 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(ngwt2out, ngwt2qs, prbs, msgval=-9999.)
zcttp1 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(cttp1out, cttp1qs, prbs, msgval=-9999.)
zcttp2 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(cttp2out, cttp2qs, prbs, msgval=-9999.)
# Output transformed quantile samples
zfnm = '%s/%s_US_JJA_%02dUTC_%04d_Cloud_StdGausTrans.nc' % (dtdr,rgchc,hrchc,yrlst[k])
zout = Dataset(zfnm,'w')
dimsmp = zout.createDimension('sample',tsmp)
varlon = zout.createVariable('Longitude','f4',['sample'])
varlon[:] = lonout
varlon.long_name = 'Longitude'
varlon.units = 'degrees_east'
varlat = zout.createVariable('Latitude','f4',['sample'])
varlat[:] = latout
varlat.long_name = 'Latitude'
varlat.units = 'degrees_north'
varjdy = zout.createVariable('JulianDay','i2',['sample'])
varjdy[:] = jdyout
varjdy.long_name = 'JulianDay'
varjdy.units = 'day'
varyr = zout.createVariable('Year','i2',['sample'])
varyr[:] = yrout
varyr.long_name = 'Year'
varyr.units = 'year'
varnslb = zout.createVariable('NumberSlab_StdGaus','f4',['sample'], fill_value = -9999)
varnslb[:] = znslb
varnslb.long_name = 'Quantile transformed number of cloud slabs'
varnslb.units = 'None'
varnslb.missing_value = -9999.
varcbprs = zout.createVariable('CloudBot1Logit_StdGaus','f4',['sample'], fill_value = -9999)
varcbprs[:] = zprsbt1
varcbprs.long_name = 'Quantile transformed slab 1 cloud bottom pressure logit'
varcbprs.units = 'None'
varcbprs.missing_value = -9999.
vardpc1 = zout.createVariable('DPCloud1Logit_StdGaus','f4',['sample'], fill_value = -9999)
vardpc1[:] = zdpcld1
vardpc1.long_name = 'Quantile transformed slab 1 cloud pressure depth logit'
vardpc1.units = 'None'
vardpc1.missing_value = -9999.
vardpslb = zout.createVariable('DPSlabLogit_StdGaus','f4',['sample'], fill_value = -9999)
vardpslb[:] = zdpslb
vardpslb.long_name = 'Quantile transformed two-slab vertical separation logit'
vardpslb.units = 'None'
vardpslb.missing_value = -9999.
vardpc2 = zout.createVariable('DPCloud2Logit_StdGaus','f4',['sample'], fill_value = -9999)
vardpc2[:] = zdpcld2
vardpc2.long_name = 'Quantile transformed slab 2 cloud pressure depth logit'
vardpc2.units = 'None'
vardpc2.missing_value = -9999.
vartyp1 = zout.createVariable('CType1_StdGaus','f4',['sample'], fill_value = -9999)
vartyp1[:] = zctyp1
vartyp1.long_name = 'Quantile transformed slab 1 cloud type logit'
vartyp1.units = 'None'
vartyp1.missing_value = -9999.
vartyp2 = zout.createVariable('CType2_StdGaus','f4',['sample'], fill_value = -9999)
vartyp2[:] = zctyp2
vartyp2.long_name = 'Quantile transformed slab 2 cloud type'
vartyp2.units = 'None'
vartyp2.missing_value = -9999.
varlgt1 = zout.createVariable('CFrcLogit1_StdGaus','f4',['sample'], fill_value = -9999)
varlgt1[:] = zlgt1
varlgt1.long_name = 'Quantile transformed slab 1 cloud fraction logit'
varlgt1.units = 'None'
varlgt1.missing_value = -9999.
varlgt2 = zout.createVariable('CFrcLogit2_StdGaus','f4',['sample'], fill_value = -9999)
varlgt2[:] = zlgt2
varlgt2.long_name = 'Quantile transformed slab 2 cloud fraction logit'
varlgt2.units = 'None'
varlgt2.missing_value = -9999.
varlgt12 = zout.createVariable('CFrcLogit12_StdGaus','f4',['sample'], fill_value = -9999)
varlgt12[:] = zlgt12
varlgt12.long_name = 'Quantile transformed slab 1/2 overlap fraction logit'
varlgt12.units = 'None'
varlgt12.missing_value = -9999.
varngwt1 = zout.createVariable('NGWater1_StdGaus','f4',['sample'], fill_value = -9999)
varngwt1[:] = zngwt1
varngwt1.long_name = 'Quantile transformed slab 1 non-gas water'
varngwt1.units = 'None'
varngwt1.missing_value = -9999.
varngwt2 = zout.createVariable('NGWater2_StdGaus','f4',['sample'], fill_value = -9999)
varngwt2[:] = zngwt2
varngwt2.long_name = 'Quantile transformed slab 2 non-gas water'
varngwt2.units = 'None'
varngwt2.missing_value = -9999.
varcttp1 = zout.createVariable('CTTemp1_StdGaus','f4',['sample'], fill_value = -9999)
varcttp1[:] = zcttp1
varcttp1.long_name = 'Quantile transformed slab 1 cloud top temperature'
varcttp1.units = 'None'
varcttp1.missing_value = -9999.
varcttp2 = zout.createVariable('CTTemp2_StdGaus','f4',['sample'], fill_value = -9999)
varcttp2[:] = zcttp2
varcttp2.long_name = 'Quantile transformed slab 2 cloud top temperature'
varcttp2.units = 'None'
varcttp2.missing_value = -9999.
zout.close()
return
# Temp/RH Quantiles
def quantile_profile_locmask(airsdr, mtdr, indr, dtdr, yrlst, mnst, mnfn, hrchc, rgchc, msk):
# Construct profile/sfc variable quantiles and z-scores, with a possibly irregular location mask
# Read probs and pressure levels
rnm = '%s/AIRS_Levels_Quantiles.nc' % (airsdr)
f = Dataset(rnm,'r')
plev = f['level'][:]
prbs = f['probability'][:]
alts = f['altitude'][:]
f.close()
nyr = len(yrlst)
nprb = prbs.shape[0]
nzout = 101
tmpqout = numpy.zeros((nzout,nprb)) - 9999.
rhqout = numpy.zeros((nzout,nprb)) - 9999.
sftmpqs = numpy.zeros((nprb,)) - 9999.
sfaltqs = numpy.zeros((nprb,)) - 9999.
psfcqs = numpy.zeros((nprb,)) - 9999.
altmed = numpy.zeros((nzout,)) - 9999.
# Mask, lat, lon
fnm = '%s/interpolated_merra2_for_SARTA_two_slab_%d_JJA_South_Southeast_US_%02dUTC_no_vertical_variation_for_missing.nc' % (mtdr,yrlst[0],hrchc)
f = Dataset(fnm,'r')
mask = f[msk][:,:]
latmet = f['plat'][:]
lonmet = f['plon'][:]
f.close()
mask[mask <= 0] = 0
lnsq = numpy.arange(lonmet.shape[0])
ltsq = numpy.arange(latmet.shape[0])
# Subset a bit
lnsm = numpy.sum(mask,axis=0)
print(lnsq.shape)
print(lnsm.shape)
print(lnsm)
ltsm = numpy.sum(mask,axis=1)
print(ltsq.shape)
print(ltsm.shape)
print(ltsm)
lnmn = numpy.amin(lnsq[lnsm > 0])
lnmx = numpy.amax(lnsq[lnsm > 0]) + 1
ltmn = numpy.amin(ltsq[ltsm > 0])
ltmx = numpy.amax(ltsq[ltsm > 0]) + 1
stridx = 'Lon Range: %d, %d\nLat Range: %d, %d \n' % (lnmn,lnmx,ltmn,ltmx)
print(stridx)
nx = lnmx - lnmn
ny = ltmx - ltmn
lnrp = numpy.tile(lonmet[lnmn:lnmx],ny)
ltrp = numpy.repeat(latmet[ltmn:ltmx],nx)
mskblk = mask[ltmn:ltmx,lnmn:lnmx]
mskflt = mskblk.flatten()
tsmp = 0
for k in range(nyr):
dyinit = datetime.date(yrlst[k],6,1)
dyst = datetime.date(yrlst[k],mnst,1)
ttst = dyst.timetuple()
jst = ttst.tm_yday
if mnfn < 12:
dyfn = datetime.date(yrlst[k],mnfn+1,1)
ttfn = dyfn.timetuple()
jfn = ttfn.tm_yday
else:
dyfn = datetime.date(yrlst[k]+1,1,1)
dy31 = datetime.date(yrlst[k],12,31)
tt31 = dy31.timetuple()
jfn = tt31.tm_yday + 1
dystidx = abs((dyst-dyinit).days)
dyfnidx = abs((dyfn-dyinit).days)
jdsq = numpy.arange(jst,jfn)
tmhld = numpy.repeat(jdsq,nx*ny)
stridx = 'Day Range: %d, %d\n' % (dystidx,dyfnidx)
print(stridx)
mtnm = '%s/interpolated_merra2_for_SARTA_two_slab_%d_JJA_South_Southeast_US_%02dUTC_no_vertical_variation_for_missing.nc' % (mtdr,yrlst[k],hrchc)
f = h5py.File(mtnm,'r')
stparr = f['/stemp'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
psfarr = f['/spres'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
salarr = f['/salti'][ltmn:ltmx,lnmn:lnmx]
f.close()
nt = psfarr.shape[0]
msksq1 = numpy.arange(mskflt.shape[0])
msksb1 = msksq1[mskflt > 0]
mskall = numpy.tile(mskflt,nt)
msksq = numpy.arange(mskall.shape[0])
msksb = msksq[mskall > 0]
mskstr = 'Total Obs: %d, Within Mask: %d \n' % (msksq.shape[0],msksb.shape[0])
print(mskstr)
lthld = numpy.tile(ltrp,nt)
lnhld = numpy.tile(lnrp,nt)
stparr = stparr.flatten()
psfarr = psfarr.flatten()
salarr = salarr.flatten()
if tsmp == 0:
sftmpout = numpy.zeros((msksb.shape[0],)) - 9999.0
sftmpout[:] = stparr[msksb]
psfcout = numpy.zeros((msksb.shape[0],)) - 9999.0
psfcout[:] = psfarr[msksb]
sfaltout = numpy.zeros((msksb.shape[0],)) - 9999.0
sfaltout[:] = numpy.tile(salarr[msksb1],nt)
else:
sftmpout = numpy.append(sftmpout,stparr[msksb])
psfcout = numpy.append(psfcout,psfarr[msksb])
sfaltout = numpy.append(sfaltout,numpy.tile(salarr[msksb1],nt))
# Loc/Time
if tsmp == 0:
latout = numpy.zeros((msksb.shape[0],)) - 9999.0
latout[:] = lthld[msksb]
lonout = numpy.zeros((msksb.shape[0],)) - 9999.0
lonout[:] = lnhld[msksb]
yrout = numpy.zeros((msksb.shape[0],),dtype=numpy.int16)
yrout[:] = yrlst[k]
jdyout = numpy.zeros((msksb.shape[0],),dtype=numpy.int16)
jdyout[:] = tmhld[msksb]
else:
latout = numpy.append(latout,lthld[msksb])
lonout = numpy.append(lonout,lnhld[msksb])
yrtmp = numpy.zeros((msksb.shape[0],),dtype=numpy.int16)
yrtmp[:] = yrlst[k]
yrout = numpy.append(yrout,yrtmp)
jdyout = numpy.append(jdyout,tmhld[msksb])
tsmp = tsmp + msksb.shape[0]
# Vertical profiles
tmpmerout = numpy.zeros((tsmp,nzout)) - 9999.
h2omerout = numpy.zeros((tsmp,nzout)) - 9999.
altout = numpy.zeros((tsmp,nzout)) - 9999.
sidx = 0
for k in range(nyr):
dyinit = datetime.date(yrlst[k],6,1)
dyst = datetime.date(yrlst[k],mnst,1)
ttst = dyst.timetuple()
jst = ttst.tm_yday
if mnfn < 12:
dyfn = datetime.date(yrlst[k],mnfn+1,1)
ttfn = dyfn.timetuple()
jfn = ttfn.tm_yday
else:
dyfn = datetime.date(yrlst[k]+1,1,1)
dy31 = datetime.date(yrlst[k],12,31)
tt31 = dy31.timetuple()
jfn = tt31.tm_yday + 1
dystidx = abs((dyst-dyinit).days)
dyfnidx = abs((dyfn-dyinit).days)
jdsq = numpy.arange(jst,jfn)
tmhld = numpy.repeat(jdsq,nx*ny)
stridx = 'Day Range: %d, %d\n' % (dystidx,dyfnidx)
print(stridx)
mtnm = '%s/interpolated_merra2_for_SARTA_two_slab_%d_JJA_South_Southeast_US_%02dUTC_no_vertical_variation_for_missing.nc' % (mtdr,yrlst[k],hrchc)
f = h5py.File(mtnm,'r')
tmparr = f['/ptemp'][dystidx:dyfnidx,:,ltmn:ltmx,lnmn:lnmx]
h2oarr = f['/rh'][dystidx:dyfnidx,:,ltmn:ltmx,lnmn:lnmx]
altarr = f['/palts'][dystidx:dyfnidx,:,ltmn:ltmx,lnmn:lnmx]
f.close()
nt = tmparr.shape[0]
mskall = numpy.tile(mskflt,nt)
msksq = numpy.arange(mskall.shape[0])
msksb = msksq[mskall > 0]
lthld = numpy.tile(ltrp,nt)
lnhld = numpy.tile(lnrp,nt)
fidx = sidx + msksb.shape[0]
for j in range(nzout):
tmpvec = tmparr[:,j,:,:].flatten()
tmpvec[tmpvec > 1e30] = -9999.
tmpmerout[sidx:fidx,j] = tmpvec[msksb]
altvec = altarr[:,j,:,:].flatten()
altout[sidx:fidx,j] = altvec[msksb]
h2ovec = h2oarr[:,j,:,:].flatten()
h2ovec[h2ovec > 1e30] = -9999.
h2omerout[sidx:fidx,j] = h2ovec[msksb]
sidx = sidx + msksb.shape[0]
# Quantiles
ztmpout = numpy.zeros((tsmp,nzout)) - 9999.
zrhout = numpy.zeros((tsmp,nzout)) - 9999.
zsftmpout = numpy.zeros((tsmp,)) - 9999.
zsfaltout = numpy.zeros((tsmp,)) - 9999.
zpsfcout = numpy.zeros((tsmp,)) - 9999.
for j in range(nzout):
tmptmp = calculate_VPD.quantile_msgdat(tmpmerout[:,j],prbs)
tmpqout[j,:] = tmptmp[:]
str1 = 'Plev %.2f, %.2f Temp Quantile: %.3f' % (plev[j],prbs[103],tmptmp[103])
print(str1)
# Transform
ztmp = calculate_VPD.std_norm_quantile_from_obs(tmpmerout[:,j], tmptmp, prbs, msgval=-9999.)
ztmpout[:,j] = ztmp[:]
alttmp = calculate_VPD.quantile_msgdat(altout[:,j],prbs)
altmed[j] = alttmp[103]
str1 = 'Plev %.2f, %.2f Alt Quantile: %.3f' % (plev[j],prbs[103],alttmp[103])
print(str1)
# Adjust RH over 100
rhadj = h2omerout[:,j]
rhadj[rhadj > 1.0] = 1.0
rhqtmp = calculate_VPD.quantile_msgdat(rhadj,prbs)
rhqout[j,:] = rhqtmp[:]
str1 = 'Plev %.2f, %.2f RH Quantile: %.4f' % (plev[j],prbs[103],rhqtmp[103])
print(str1)
zrh = calculate_VPD.std_norm_quantile_from_obs(rhadj, rhqtmp, prbs, msgval=-9999.)
zrhout[:,j] = zrh[:]
psfcqs = calculate_VPD.quantile_msgdat(psfcout,prbs)
str1 = '%.2f PSfc Quantile: %.2f' % (prbs[103],psfcqs[103])
print(str1)
zpsfcout = calculate_VPD.std_norm_quantile_from_obs(psfcout, psfcqs, prbs, msgval=-9999.)
sftpqs = calculate_VPD.quantile_msgdat(sftmpout,prbs)
str1 = '%.2f SfcTmp Quantile: %.2f' % (prbs[103],sftpqs[103])
print(str1)
zsftmpout = calculate_VPD.std_norm_quantile_from_obs(sftmpout, sftpqs, prbs, msgval=-9999.)
sfalqs = calculate_VPD.quantile_msgdat(sfaltout,prbs)
str1 = '%.2f SfcAlt Quantile: %.2f' % (prbs[103],sfalqs[103])
print(str1)
zsfaltout = calculate_VPD.std_norm_quantile_from_obs(sfaltout, sfalqs, prbs, msgval=-9999.)
# Output Quantiles
mstr = dyst.strftime('%b')
qfnm = '%s/%s_US_JJA_%02dUTC_%04d_TempRHSfc_Quantile.nc' % (dtdr,rgchc,hrchc,yrlst[k])
qout = Dataset(qfnm,'w')
dimz = qout.createDimension('level',nzout)
dimp = qout.createDimension('probability',nprb)
varlvl = qout.createVariable('level','f4',['level'], fill_value = -9999)
varlvl[:] = plev
varlvl.long_name = 'AIRS/SARTA pressure levels'
varlvl.units = 'hPa'
varlvl.missing_value = -9999
varprb = qout.createVariable('probability','f4',['probability'], fill_value = -9999)
varprb[:] = prbs
varprb.long_name = 'Probability break points'
varprb.units = 'none'
varprb.missing_value = -9999
# Altitude grid
varalt = qout.createVariable('Altitude_median', 'f4', ['level'], fill_value = -9999)
varalt[:] = altmed
varalt.long_name = 'Altitude median value'
varalt.units = 'm'
varalt.missing_value = -9999
vartmp = qout.createVariable('Temperature_quantile', 'f4', ['level','probability'], fill_value = -9999)
vartmp[:] = tmpqout
vartmp.long_name = 'Temperature quantiles'
vartmp.units = 'K'
vartmp.missing_value = -9999.
varrh = qout.createVariable('RH_quantile', 'f4', ['level','probability'], fill_value = -9999)
varrh[:] = rhqout
varrh.long_name = 'Relative humidity quantiles'
varrh.units = 'Unitless'
varrh.missing_value = -9999.
varstmp = qout.createVariable('SfcTemp_quantile', 'f4', ['probability'], fill_value = -9999)
varstmp[:] = sftpqs
varstmp.long_name = 'Surface temperature quantiles'
varstmp.units = 'K'
varstmp.missing_value = -9999.
varpsfc = qout.createVariable('SfcPres_quantile', 'f4', ['probability'], fill_value = -9999)
varpsfc[:] = psfcqs
varpsfc.long_name = 'Surface pressure quantiles'
varpsfc.units = 'hPa'
varpsfc.missing_value = -9999.
varsalt = qout.createVariable('SfcAlt_quantile', 'f4', ['probability'], fill_value = -9999)
varsalt[:] = sfalqs
varsalt.long_name = 'Surface altitude quantiles'
varsalt.units = 'm'
varsalt.missing_value = -9999.
qout.close()
# Output transformed quantile samples
zfnm = '%s/%s_US_JJA_%02dUTC_%04d_TempRHSfc_StdGausTrans.nc' % (dtdr,rgchc,hrchc,yrlst[k])
zout = Dataset(zfnm,'w')
dimz = zout.createDimension('level',nzout)
dimsmp = zout.createDimension('sample',tsmp)
varlvl = zout.createVariable('level','f4',['level'], fill_value = -9999)
varlvl[:] = plev
varlvl.long_name = 'AIRS/SARTA pressure levels'
varlvl.units = 'hPa'
varlvl.missing_value = -9999
varlon = zout.createVariable('Longitude','f4',['sample'])
varlon[:] = lonout
varlon.long_name = 'Longitude'
varlon.units = 'degrees_east'
varlat = zout.createVariable('Latitude','f4',['sample'])
varlat[:] = latout
varlat.long_name = 'Latitude'
varlat.units = 'degrees_north'
varjdy = zout.createVariable('JulianDay','i2',['sample'])
varjdy[:] = jdyout
varjdy.long_name = 'JulianDay'
varjdy.units = 'day'
varyr = zout.createVariable('Year','i2',['sample'])
varyr[:] = yrout
varyr.long_name = 'Year'
varyr.units = 'year'
varsrt3 = zout.createVariable('Temperature_StdGaus', 'f4', ['sample','level'], fill_value = -9999)
varsrt3[:] = ztmpout
varsrt3.long_name = 'Quantile transformed temperature'
varsrt3.units = 'None'
varsrt3.missing_value = -9999.
varsrt4 = zout.createVariable('RH_StdGaus', 'f4', ['sample','level'], fill_value = -9999)
varsrt4[:] = zrhout
varsrt4.long_name = 'Quantile transformed relative humidity'
varsrt4.units = 'None'
varsrt4.missing_value = -9999.
varsrts1 = zout.createVariable('SfcTemp_StdGaus', 'f4', ['sample'], fill_value = -9999)
varsrts1[:] = zsftmpout
varsrts1.long_name = 'Quantile transformed surface temperature'
varsrts1.units = 'None'
varsrts1.missing_value = -9999.
varsrts2 = zout.createVariable('SfcPres_StdGaus', 'f4', ['sample'], fill_value = -9999)
varsrts2[:] = zpsfcout
varsrts2.long_name = 'Quantile transformed surface pressure'
varsrts2.units = 'None'
varsrts2.missing_value = -9999.
varsrts3 = zout.createVariable('SfcAlt_StdGaus', 'f4', ['sample'], fill_value = -9999)
varsrts3[:] = zsfaltout
varsrts3.long_name = 'Quantile transformed surface pressure'
varsrts3.units = 'None'
varsrts3.missing_value = -9999.
zout.close()
return
def expt_near_sfc_summary(inpdr, outdr, expfl, qclrfl, outfnm):
# Produce experiment near-surface summaries
# inpdr: Name of input directory
# outdr: Name of output directory
# expfl: Name of file with experiment results
# qclrfl: Input quantile file
# outfnm: Ouptut file name
nzairs = 100
nzsrt = 101
# Read simulation results
f = h5py.File(expfl,'r')
tmprtr = f['airs_ptemp'][:,:]
h2ortr = f['airs_h2o'][:,:]
tqflg = f['airs_ptemp_qc'][:,:]
hqflg = f['airs_h2o_qc'][:,:]
tmpsrt = f['ptemp'][:,1:nzsrt]
h2osrt = f['gas_1'][:,1:nzsrt]
psfc = f['spres'][:]
lvs = f['level'][1:nzsrt]
f.close()
nszout = tmprtr.shape[0]
tqflg = tqflg.astype(numpy.int16)
hqflg = hqflg.astype(numpy.int16)
# Altitude info
qin = Dataset(qclrfl,'r')
alts = qin['Altitude_median'][:]
qin.close()
alth2o = numpy.zeros((nszout,nzsrt))
alth2o[:,nzsrt-4] = alts[nzsrt-4]
curdlt = 0.0
for j in range(nzsrt-5,-1,-1):
#str1 = 'Level %d: %.4f' % (j,curdlt)
#print(str1)
if (alts[j] > alts[j+1]):
curdlt = alts[j] - alts[j+1]
alth2o[:,j] = alts[j]
else:
alth2o[:,j] = alts[j+1] + curdlt * 2.0
curdlt = curdlt * 2.0
alth2o[:,97] = 0.0
tsfcsrt = calculate_VPD.near_sfc_temp(tmpsrt, lvs, psfc, passqual = False, qual = None)
print(tsfcsrt[0:10])
tsfcrtr, tqflgsfc = calculate_VPD.near_sfc_temp(tmprtr, lvs, psfc, passqual = True, qual = tqflg)
print(tsfcrtr[0:10])
print(tqflgsfc[0:10])
qvsrt, rhsrt, vpdsrt = calculate_VPD.calculate_QV_and_VPD(h2osrt,tmpsrt,lvs,alth2o[:,1:nzsrt])
qvrtr, rhrtr, vpdrtr = calculate_VPD.calculate_QV_and_VPD(h2ortr,tmprtr,lvs,alth2o[:,1:nzsrt])
qsfsrt, rhsfsrt = calculate_VPD.near_sfc_qv_rh(qvsrt, tsfcsrt, lvs, psfc, passqual = False, qual = None)
qsfrtr, rhsfrtr, qflgsfc = calculate_VPD.near_sfc_qv_rh(qvrtr, tsfcrtr, lvs, psfc, passqual = True, qual = hqflg)
print(tqflgsfc.dtype)
print(qflgsfc.dtype)
# Output: Sfc Temp and qflg, SfC QV, RH and qflg
fldbl = numpy.array([-9999.],dtype=numpy.float64)
flflt = numpy.array([-9999.],dtype=numpy.float32)
flshrt = numpy.array([-99],dtype=numpy.int16)
#outfnm = '%s/MAGIC_%s_%s_%02dUTC_SR%02d_Sfc_UQ_Output.h5' % (outdr,rgchc,mnchc,hrchc,scnrw)
f = h5py.File(outfnm,'w')
dft1 = f.create_dataset('TSfcAir_True',data=tsfcsrt)
dft1.attrs['missing_value'] = fldbl
dft1.attrs['_FillValue'] = fldbl
dft2 = f.create_dataset('TSfcAir_Retrieved',data=tsfcrtr)
dft2.attrs['missing_value'] = fldbl
dft2.attrs['_FillValue'] = fldbl
dft3 = f.create_dataset('TSfcAir_QC',data=tqflgsfc)
dfq1 = f.create_dataset('QVSfcAir_True',data=qsfsrt)
dfq1.attrs['missing_value'] = fldbl
dfq1.attrs['_FillValue'] = fldbl
dfq2 = f.create_dataset('QVSfcAir_Retrieved',data=qsfrtr)
dfq2.attrs['missing_value'] = fldbl
dfq2.attrs['_FillValue'] = fldbl
dfq3 = f.create_dataset('RHSfcAir_True',data=rhsfsrt)
dfq3.attrs['missing_value'] = fldbl
dfq3.attrs['_FillValue'] = fldbl
dfq4 = f.create_dataset('RHSfcAir_Retrieved',data=rhsfrtr)
dfq4.attrs['missing_value'] = fldbl
dfq4.attrs['_FillValue'] = fldbl
dfq5 = f.create_dataset('RHSfcAir_QC',data=qflgsfc)
dfp1 = f.create_dataset('SfcPres',data=psfc)
dfp1.attrs['missing_value'] = fldbl
dfp1.attrs['_FillValue'] = fldbl
f.close()
return
def quantile_cfrac_locmask_conus(rfdr, mtdr, csdr, airdr, dtdr, yrlst, mnst, mnfn, hrchc, rgchc, mskvr, mskvl):
# Construct cloud variable quantiles and z-scores, with a possibly irregular location mask
# rfdr: Directory for reference data (Levels/Quantiles)
# mtdr: Directory for MERRA data
# csdr: Directory for cloud slab data
# airdr: Directory for AIRS cloud fraction
# dtdr: Output directory
# yrlst: List of years to process
# mnst: Starting Month
# mnfn: Ending Month
# hrchc: Template Hour Choice
# rgchc: Template Region Choice
# mskvr: Name of region mask variable
# mskvl: Value of region mask for Region Choice
# Read probs and pressure levels
rnm = '%s/AIRS_Levels_Quantiles.nc' % (rfdr)
f = Dataset(rnm,'r')
plev = f['level'][:]
prbs = f['probability'][:]
alts = f['altitude'][:]
f.close()
nyr = len(yrlst)
nprb = prbs.shape[0]
# RN generator
sdchc = 542354 + yrlst[0] + hrchc
random.seed(sdchc)
# Mask, lat, lon
fnm = '%s/interpolated_merra2_for_SARTA_two_slab_%d_JJA_CONUS_with_NCA_regions_%02dUTC_no_vertical_variation_for_missing.nc' % (mtdr,yrlst[0],hrchc)
f = Dataset(fnm,'r')
mask = f.variables[mskvr][:,:]
latmet = f.variables['plat'][:]
lonmet = f.variables['plon'][:]
tminf = f.variables['time'][:]
tmunit = f.variables['time'].units[:]
f.close()
mskind = numpy.zeros((mask.shape),dtype=mask.dtype)
print(mskvl)
mskind[mask == mskvl] = 1
lnsq = numpy.arange(lonmet.shape[0])
ltsq = numpy.arange(latmet.shape[0])
# Subset a bit
lnsm = numpy.sum(mskind,axis=0)
#print(lnsq.shape)
#print(lnsm.shape)
#print(lnsm)
ltsm = numpy.sum(mskind,axis=1)
#print(ltsq.shape)
#print(ltsm.shape)
#print(ltsm)
lnmn = numpy.amin(lnsq[lnsm > 0])
lnmx = numpy.amax(lnsq[lnsm > 0]) + 1
ltmn = numpy.amin(ltsq[ltsm > 0])
ltmx = numpy.amax(ltsq[ltsm > 0]) + 1
stridx = 'Lon Range: %d, %d\nLat Range: %d, %d \n' % (lnmn,lnmx,ltmn,ltmx)
print(stridx)
nx = lnmx - lnmn
ny = ltmx - ltmn
lnrp = numpy.tile(lonmet[lnmn:lnmx],ny)
ltrp = numpy.repeat(latmet[ltmn:ltmx],nx)
mskblk = mskind[ltmn:ltmx,lnmn:lnmx]
mskflt = mskblk.flatten()
tsmp = 0
for k in range(nyr):
fnm = '%s/interpolated_merra2_for_SARTA_two_slab_%d_JJA_CONUS_with_NCA_regions_%02dUTC_no_vertical_variation_for_missing.nc' % (mtdr,yrlst[k],hrchc)
f = Dataset(fnm,'r')
tminf = f.variables['time'][:]
tmunit = f.variables['time'].units[:]
f.close()
tmunit = tmunit.replace("days since ","")
dybs = datetime.datetime.strptime(tmunit,"%Y-%m-%d %H:%M:%S")
print(dybs)
dy0 = dybs + datetime.timedelta(days=tminf[0])
dyinit = datetime.date(dy0.year,dy0.month,dy0.day)
print(dyinit)
dyst = datetime.date(yrlst[k],mnst,1)
ttst = dyst.timetuple()
jst = ttst.tm_yday
if mnfn < 12:
dyfn = datetime.date(yrlst[k],mnfn+1,1)
ttfn = dyfn.timetuple()
jfn = ttfn.tm_yday
else:
dyfn = datetime.date(yrlst[k]+1,1,1)
dy31 = datetime.date(yrlst[k],12,31)
tt31 = dy31.timetuple()
jfn = tt31.tm_yday + 1
dystidx = abs((dyst-dyinit).days)
dyfnidx = abs((dyfn-dyinit).days)
jdsq = numpy.arange(jst,jfn)
print(jdsq)
tmhld = numpy.repeat(jdsq,nx*ny)
#print(tmhld.shape)
#print(numpy.amin(tmhld))
#print(numpy.amax(tmhld))
stridx = 'Day Range: %d, %d\n' % (dystidx,dyfnidx)
print(stridx)
fnm = '%s/interpolated_merra2_for_SARTA_two_slab_%d_JJA_CONUS_with_NCA_regions_%02dUTC_no_vertical_variation_for_missing_IncludesCloudParams.h5' % (csdr,yrlst[k],hrchc)
f = h5py.File(fnm,'r')
tms = f['/time'][:,dystidx:dyfnidx]
ctyp1 = f['/ctype'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
ctyp2 = f['/ctype2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cprt1 = f['/cprtop'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cprt2 = f['/cprtop2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cprb1 = f['/cprbot'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cprb2 = f['/cprbot2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cfrc1 = f['/cfrac'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cfrc2 = f['/cfrac2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cfrc12 = f['/cfrac12'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cngwt1 = f['/cngwat'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cngwt2 = f['/cngwat2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cttp1 = f['/cstemp'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
cttp2 = f['/cstemp2'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
f.close()
tmflt = tms.flatten()
nt = tmflt.shape[0]
lnhld = numpy.tile(lnrp,nt)
lthld = numpy.tile(ltrp,nt)
mtnm = '%s/interpolated_merra2_for_SARTA_two_slab_%d_JJA_CONUS_with_NCA_regions_%02dUTC_no_vertical_variation_for_missing.nc' % (mtdr,yrlst[k],hrchc)
f = Dataset(mtnm,'r')
psfc = f.variables['spres'][dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
f.close()
nt = ctyp1.shape[0]
mskall = numpy.tile(mskflt,nt)
msksq = numpy.arange(mskall.shape[0])
msksb = msksq[mskall > 0]
mskstr = 'Total Obs: %d, Within Mask: %d \n' % (msksq.shape[0],msksb.shape[0])
print(mskstr)
# lthld = numpy.tile(ltrp,nt)
# lnhld = numpy.tile(lnrp,nt)
nslbtmp = numpy.zeros((ctyp1.shape),dtype=numpy.int16)
nslbtmp[(ctyp1 > 100) & (ctyp2 > 100)] = 2
nslbtmp[(ctyp1 > 100) & (ctyp2 < 100)] = 1
# AIRS clouds
anm = '%s/CONUS_AIRS_CldFrc_Match_JJA_%d_%02d_UTC.nc' % (airdr,yrlst[k],hrchc)
f = Dataset(anm,'r')
arsfrc1 = f.variables['AIRS_CldFrac_1'][:,dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
arsfrc2 = f.variables['AIRS_CldFrac_2'][:,dystidx:dyfnidx,ltmn:ltmx,lnmn:lnmx]
f.close()
# Sum
frctot = arsfrc1 + arsfrc2
# Construct Clr/PC/Ovc indicator for AIRS total cloud frac
totclr = numpy.zeros(frctot.shape,dtype=numpy.int16)
totclr[frctot == 0.0] = -1
totclr[frctot == 1.0] = 1
totclr = ma.masked_array(totclr, mask = frctot.mask)
frc0 = frctot[0,:,:,:]
frc0 = frc0.flatten()
frcsq = numpy.arange(tmhld.shape[0])
# Subset by AIRS matchup and location masks
frcsb = frcsq[(numpy.logical_not(frc0.mask)) & (mskall > 0)]
nairs = frcsb.shape[0]
print(tmhld.shape)
print(frcsb.shape)
ctyp1 = ctyp1.flatten()
ctyp2 = ctyp2.flatten()
nslbtmp = nslbtmp.flatten()
cngwt1 = cngwt1.flatten()
cngwt2 = cngwt2.flatten()
cttp1 = cttp1.flatten()
cttp2 = cttp2.flatten()
psfc = psfc.flatten()
# Number of slabs
if tsmp == 0:
nslabout = numpy.zeros((nairs,),dtype=numpy.int16)
nslabout[:] = nslbtmp[frcsb]
else:
nslabout = numpy.append(nslabout,nslbtmp[frcsb])
# For two slabs, slab 1 must have highest cloud bottom pressure
cprt1 = cprt1.flatten()
cprt2 = cprt2.flatten()
cprb1 = cprb1.flatten()
cprb2 = cprb2.flatten()
slabswap = numpy.zeros((ctyp1.shape[0],),dtype=numpy.int16)
swpsq = frcsq[(nslbtmp == 2) & (cprb1 < cprb2)]
slabswap[swpsq] = 1
# Cloud Pressure variables
pbttmp1 = numpy.zeros((ctyp1.shape[0],)) - 9999.0
pbttmp1[nslbtmp >= 1] = cprb1[nslbtmp >= 1]
pbttmp1[swpsq] = cprb2[swpsq]
ptptmp1 = numpy.zeros((ctyp1.shape[0],)) - 9999.0
ptptmp1[nslbtmp >= 1] = cprt1[nslbtmp >= 1]
ptptmp1[swpsq] = cprt2[swpsq]
pbttmp2 = numpy.zeros((ctyp1.shape[0],)) - 9999.0
pbttmp2[nslbtmp == 2] = cprb2[nslbtmp == 2]
pbttmp2[swpsq] = cprb1[swpsq]
ptptmp2 = numpy.zeros((ctyp1.shape[0],)) - 9999.0
ptptmp2[nslbtmp == 2] = cprt2[nslbtmp == 2]
ptptmp2[swpsq] = cprt1[swpsq]
# DP Cloud transformation
dptmp1 = numpy.zeros((ctyp1.shape[0],)) - 9999.0
dptmp1[nslbtmp >= 1] = pbttmp1[nslbtmp >= 1] - ptptmp1[nslbtmp >= 1]
dpslbtmp = numpy.zeros((ctyp1.shape[0],)) - 9999.0
dpslbtmp[nslbtmp == 2] = ptptmp1[nslbtmp == 2] - pbttmp2[nslbtmp == 2]
dptmp2 = numpy.zeros((ctyp1.shape[0],)) - 9999.0
dptmp2[nslbtmp == 2] = pbttmp2[nslbtmp == 2] - ptptmp2[nslbtmp == 2]
# Adjust negative DPSlab values
dpnsq = frcsq[(nslbtmp == 2) & (dpslbtmp < 0.0) & (dpslbtmp > -1000.0)]
dpadj = numpy.zeros((ctyp1.shape[0],))
dpadj[dpnsq] = numpy.absolute(dpslbtmp[dpnsq])
dpslbtmp[dpnsq] = 1.0
dptmp1[dpnsq] = dptmp1[dpnsq] / 2.0
dptmp2[dpnsq] = dptmp2[dpnsq] / 2.0
# Sigma / Logit Adjustments
zpbtmp = numpy.zeros((psfc.shape[0],)) - 9999.0
zdp1tmp = numpy.zeros((psfc.shape[0],)) - 9999.0
zdslbtmp = numpy.zeros((psfc.shape[0],)) - 9999.0
zdp2tmp = numpy.zeros((psfc.shape[0],)) - 9999.0
ncldct = 0
for t in range(psfc.shape[0]):
if ( (pbttmp1[t] >= 0.0) and (dpslbtmp[t] >= 0.0) ):
prptmp = numpy.array( [ (psfc[t] - pbttmp1[t]) / psfc[t], \
dptmp1[t] / psfc[t], dpslbtmp[t] / psfc[t], \
dptmp2[t] / psfc[t], 0.0 ] )
if (prptmp[0] < 0.0):
# Adjustment needed
prpadj = prptmp[0]
prptmp[0] = 0.01
prptmp[1] = prptmp[1] + prpadj*prptmp[1]
prptmp[2] = prptmp[2] + prpadj*prptmp[2]
prptmp[3] = prptmp[3] + prpadj*prptmp[3]
ncldct = ncldct + 1
elif (prptmp[0] == 0.0):
# Adjustment needed
prpadj = -0.01
prptmp[0] = 0.01
prptmp[1] = prptmp[1] + prpadj*prptmp[1]
prptmp[2] = prptmp[2] + prpadj*prptmp[2]
prptmp[3] = prptmp[3] + prpadj*prptmp[3]
ncldct = ncldct + 1
prptmp[4] = 1.0 - prptmp[0] - prptmp[1] - prptmp[2] - prptmp[3]
ztmp = calculate_VPD.lgtzs(prptmp)
zpbtmp[t] = ztmp[0]
zdp1tmp[t] = ztmp[1]
zdslbtmp[t] = ztmp[2]
zdp2tmp[t] = ztmp[3]
elif ( pbttmp1[t] >= 0.0 ):
prptmp = numpy.array( [ (psfc[t] - pbttmp1[t]) / psfc[t], \
dptmp1[t] / psfc[t], 0.0 ] )
if (prptmp[0] < 0.0):
# Adjustment needed
prpadj = prptmp[0]
prptmp[0] = 0.01
prptmp[1] = prptmp[1] + prpadj*prptmp[1]
ncldct = ncldct + 1
elif (prptmp[0] == 0.0):
# Adjustment needed
prpadj = -0.01
prptmp[0] = 0.01
prptmp[1] = prptmp[1] + prpadj*prptmp[1]
ncldct = ncldct + 1
prptmp[2] = 1.0 - prptmp[0] - prptmp[1]
ztmp = calculate_VPD.lgtzs(prptmp)
zpbtmp[t] = ztmp[0]
zdp1tmp[t] = ztmp[1]
zdslbtmp[t] = -9999.0
zdp2tmp[t] = -9999.0
else:
zpbtmp[t] = -9999.0
zdp1tmp[t] = -9999.0
zdslbtmp[t] = -9999.0
zdp2tmp[t] = -9999.0
str1 = 'Cloud Bot Pres Below Sfc: %d ' % (ncldct)
print(str1)
if tsmp == 0:
psfcout = numpy.zeros((frcsb.shape[0],)) - 9999.0
psfcout[:] = psfc[frcsb]
prsbot1out = numpy.zeros((frcsb.shape[0],)) - 9999.0
prsbot1out[:] = zpbtmp[frcsb]
dpcld1out = numpy.zeros((frcsb.shape[0],)) - 9999.0
dpcld1out[:] = zdp1tmp[frcsb]
dpslbout = numpy.zeros((frcsb.shape[0],)) - 9999.0
dpslbout[:] = zdslbtmp[frcsb]
dpcld2out = numpy.zeros((frcsb.shape[0],)) - 9999.0
dpcld2out[:] = zdp2tmp[frcsb]
else:
psfcout = numpy.append(psfcout,psfc[frcsb])
prsbot1out = numpy.append(prsbot1out,zpbtmp[frcsb])
dpcld1out = numpy.append(dpcld1out,zdp1tmp[frcsb])
dpslbout = numpy.append(dpslbout,zdslbtmp[frcsb])
dpcld2out = numpy.append(dpcld2out,zdp2tmp[frcsb])
# Slab Types: 101.0 = Liquid, 201.0 = Ice, None else
# Output: 0 = Liquid, 1 = Ice
typtmp1 = numpy.zeros((ctyp1.shape[0],),dtype=numpy.int16) - 99
typtmp1[nslbtmp >= 1] = (ctyp1[nslbtmp >= 1] - 1.0) / 100.0 - 1.0
typtmp1[swpsq] = (ctyp2[swpsq] - 1.0) / 100.0 - 1.0
typtmp2 = numpy.zeros((ctyp1.shape[0],),dtype=numpy.int16) - 99
typtmp2[nslbtmp == 2] = (ctyp2[nslbtmp == 2] - 1.0) / 100.0 - 1.0
typtmp2[swpsq] = (ctyp1[swpsq] - 1.0) / 100.0 - 1.0
if tsmp == 0:
slbtyp1out = numpy.zeros((frcsb.shape[0],),dtype=numpy.int16)
slbtyp1out[:] = typtmp1[frcsb]
slbtyp2out = numpy.zeros((frcsb.shape[0],),dtype=numpy.int16)
slbtyp2out[:] = typtmp2[frcsb]
else:
slbtyp1out = numpy.append(slbtyp1out,typtmp1[frcsb])
slbtyp2out = numpy.append(slbtyp2out,typtmp2[frcsb])
# Cloud Cover Indicators
totclrtmp = numpy.zeros((frcsb.shape[0],3,3),dtype=numpy.int16)
cctr = 0
for frw in range(3):
for fcl in range(3):
clrvec = totclr[cctr,:,:,:].flatten()
totclrtmp[:,frw,fcl] = clrvec[frcsb]
cctr = cctr + 1
if tsmp == 0:
totclrout = numpy.zeros(totclrtmp.shape,dtype=numpy.int16)
totclrout[:,:,:] = totclrtmp
else:
totclrout = numpy.append(totclrout,totclrtmp,axis=0)
# Cloud Fraction Logit, still account for swapping
z1tmp = numpy.zeros((frcsb.shape[0],3,3)) - 9999.0
z2tmp = numpy.zeros((frcsb.shape[0],3,3)) - 9999.0
z12tmp = numpy.zeros((frcsb.shape[0],3,3)) - 9999.0
# Cloud Fraction
cctr = 0
for frw in range(3):
for fcl in range(3):
frcvect = frctot[cctr,:,:,:].flatten()
frcvec1 = arsfrc1[cctr,:,:,:].flatten()
frcvec2 = arsfrc2[cctr,:,:,:].flatten()
# Quick fix for totals over 1.0
fvsq = numpy.arange(frcvect.shape[0])
fvsq2 = fvsq[frcvect > 1.0]
frcvect[fvsq2] = frcvect[fvsq2] / 1.0
frcvec1[fvsq2] = frcvec1[fvsq2] / 1.0
frcvec2[fvsq2] = frcvec2[fvsq2] / 1.0
for t in range(nairs):
crslb = nslbtmp[frcsb[t]]
crclr = totclrtmp[t,frw,fcl]
if ( (crslb == 0) or (crclr == -1) ):
z1tmp[t,frw,fcl] = -9999.0
z2tmp[t,frw,fcl] = -9999.0
z12tmp[t,frw,fcl] = -9999.0
elif ( (crslb == 1) and (crclr == 1) ):
z1tmp[t,frw,fcl] = -9999.0
z2tmp[t,frw,fcl] = -9999.0
z12tmp[t,frw,fcl] = -9999.0
elif ( (crslb == 1) and (crclr == 0) ):
prptmp = numpy.array( [frcvect[frcsb[t]], 1.0 - frcvect[frcsb[t]] ] )
ztmp = calculate_VPD.lgtzs(prptmp)
z1tmp[t,frw,fcl] = ztmp[0]
z2tmp[t,frw,fcl] = -9999.0
z12tmp[t,frw,fcl] = -9999.0
# For 2 slabs, recall AIRS cloud layers go upper/lower, ours is opposite
# Also apply random overlap adjust AIRS zero values
elif ( (crslb == 2) and (crclr == 0) ):
frcs = numpy.array([frcvec2[frcsb[t]],frcvec1[frcsb[t]]])
if (numpy.sum(frcs) < 0.01):
frcs[0] = 0.005
frcs[1] = 0.005
elif frcs[0] < 0.005:
frcs[0] = 0.005
frcs[1] = frcs[1] - 0.005
elif frcs[1] < 0.005:
frcs[1] = 0.005
frcs[0] = frcs[0] - 0.005
mnfrc = numpy.amin(frcs)
c12tmp = random.uniform(0.0,mnfrc,size=1)
prptmp = numpy.array( [frcs[0] - c12tmp[0]*frcs[1], \
frcs[1] - c12tmp[0]*frcs[0], c12tmp[0], 0.0])
prptmp[3] = 1.0 - prptmp[0] - prptmp[1] - prptmp[2]
ztmp = calculate_VPD.lgtzs(prptmp)
z1tmp[t,frw,fcl] = ztmp[0]
z2tmp[t,frw,fcl] = ztmp[1]
z12tmp[t,frw,fcl] = ztmp[2]
elif ( (crslb == 2) and (crclr == 1) ):
frcs = numpy.array([frcvec2[frcsb[t]],frcvec1[frcsb[t]]])
if frcs[0] < 0.005:
frcs[0] = 0.005
frcs[1] = frcs[1] - 0.005
elif frcs[1] < 0.005:
frcs[1] = 0.005
frcs[0] = frcs[0] - 0.005
mnfrc = numpy.amin(frcs)
c12tmp = random.uniform(0.0,mnfrc,size=1)
prptmp = numpy.array( [0.999 * (frcs[0] - c12tmp[0]*frcs[1]), \
0.999 * (frcs[1] - c12tmp[0]*frcs[0]), 0.999 * c12tmp[0], 0.001])
prptmp[3] = 1.0 - prptmp[0] - prptmp[1] - prptmp[2]
ztmp = calculate_VPD.lgtzs(prptmp)
z1tmp[t,frw,fcl] = ztmp[0]
z2tmp[t,frw,fcl] = ztmp[1]
z12tmp[t,frw,fcl] = ztmp[2]
cctr = cctr + 1
if tsmp == 0:
cfclgt1out = numpy.zeros(z1tmp.shape)
cfclgt1out[:,:,:] = z1tmp
cfclgt2out = numpy.zeros(z2tmp.shape)
cfclgt2out[:,:,:] = z2tmp
cfclgt12out = numpy.zeros(z12tmp.shape)
cfclgt12out[:,:,:] = z12tmp
else:
cfclgt1out = numpy.append(cfclgt1out,z1tmp,axis=0)
cfclgt2out = numpy.append(cfclgt2out,z2tmp,axis=0)
cfclgt12out = numpy.append(cfclgt12out,z12tmp,axis=0)
# Cloud Non-Gas Water
ngwttmp1 = numpy.zeros(cngwt1.shape[0]) - 9999.0
ngwttmp1[nslbtmp >= 1] = cngwt1[nslbtmp >= 1]
ngwttmp1[swpsq] = cngwt2[swpsq]
ngwttmp2 = numpy.zeros(cngwt1.shape[0]) - 9999.0
ngwttmp2[nslbtmp == 2] = cngwt2[nslbtmp == 2]
ngwttmp2[swpsq] = cngwt1[swpsq]
if tsmp == 0:
ngwt1out = numpy.zeros((frcsb.shape[0],)) - 9999.0
ngwt1out[:] = ngwttmp1[frcsb]
ngwt2out = numpy.zeros((frcsb.shape[0],)) - 9999.0
ngwt2out[:] = ngwttmp2[frcsb]
else:
ngwt1out = numpy.append(ngwt1out,ngwttmp1[frcsb])
ngwt2out = numpy.append(ngwt2out,ngwttmp2[frcsb])
# Cloud Top Temperature
cttptmp1 = numpy.zeros(cttp1.shape[0]) - 9999.0
cttptmp1[nslbtmp >= 1] = cttp1[nslbtmp >= 1]
cttptmp1[swpsq] = cttp2[swpsq]
cttptmp2 = numpy.zeros(cttp1.shape[0]) - 9999.0
cttptmp2[nslbtmp == 2] = cttp2[nslbtmp == 2]
cttptmp2[swpsq] = cttp1[swpsq]
if tsmp == 0:
cttp1out = numpy.zeros((frcsb.shape[0],)) - 9999.0
cttp1out[:] = cttptmp1[frcsb]
cttp2out = numpy.zeros((frcsb.shape[0],)) - 9999.0
cttp2out[:] = cttptmp2[frcsb]
else:
cttp1out = numpy.append(cttp1out,cttptmp1[frcsb])
cttp2out = numpy.append(cttp2out,cttptmp2[frcsb])
# Loc/Time
if tsmp == 0:
latout = numpy.zeros((frcsb.shape[0],)) - 9999.0
latout[:] = lthld[frcsb]
lonout = numpy.zeros((frcsb.shape[0],)) - 9999.0
lonout[:] = lnhld[frcsb]
yrout = numpy.zeros((frcsb.shape[0],),dtype=numpy.int16)
yrout[:] = yrlst[k]
jdyout = numpy.zeros((frcsb.shape[0],),dtype=numpy.int16)
jdyout[:] = tmhld[frcsb]
else:
latout = numpy.append(latout,lthld[frcsb])
lonout = numpy.append(lonout,lnhld[frcsb])
yrtmp = numpy.zeros((frcsb.shape[0],),dtype=numpy.int16)
yrtmp[:] = yrlst[k]
yrout = numpy.append(yrout,yrtmp)
jdyout = numpy.append(jdyout,tmhld[frcsb])
tsmp = tsmp + nairs
# Process quantiles
nslbqs = calculate_VPD.quantile_msgdat_discrete(nslabout,prbs)
str1 = '%.2f Number Slab Quantile: %d' % (prbs[103],nslbqs[103])
print(str1)
print(nslbqs)
# psfcqs = calculate_VPD.quantile_msgdat(psfcout,prbs)
# str1 = '%.2f Surface Pressure Quantile: %.3f' % (prbs[53],psfcqs[53])
# print(str1)
prsbt1qs = calculate_VPD.quantile_msgdat(prsbot1out,prbs)
str1 = '%.2f CldBot1 Pressure Quantile: %.3f' % (prbs[103],prsbt1qs[103])
print(str1)
dpcld1qs = calculate_VPD.quantile_msgdat(dpcld1out,prbs)
str1 = '%.2f DPCloud1 Quantile: %.3f' % (prbs[103],dpcld1qs[103])
print(str1)
dpslbqs = calculate_VPD.quantile_msgdat(dpslbout,prbs)
str1 = '%.2f DPSlab Quantile: %.3f' % (prbs[103],dpslbqs[103])
print(str1)
dpcld2qs = calculate_VPD.quantile_msgdat(dpcld2out,prbs)
str1 = '%.2f DPCloud2 Quantile: %.3f' % (prbs[103],dpcld2qs[103])
print(str1)
slb1qs = calculate_VPD.quantile_msgdat_discrete(slbtyp1out,prbs)
str1 = '%.2f Type1 Quantile: %d' % (prbs[103],slb1qs[103])
print(str1)
slb2qs = calculate_VPD.quantile_msgdat_discrete(slbtyp2out,prbs)
str1 = '%.2f Type2 Quantile: %d' % (prbs[103],slb2qs[103])
print(str1)
# Indicators
totclrqout = numpy.zeros((3,3,nprb)) - 99
lgt1qs = numpy.zeros((3,3,nprb)) - 9999.0
lgt2qs = numpy.zeros((3,3,nprb)) - 9999.0
lgt12qs = numpy.zeros((3,3,nprb)) - 9999.0
for frw in range(3):
for fcl in range(3):
tmpclr = calculate_VPD.quantile_msgdat_discrete(totclrout[:,frw,fcl],prbs)
totclrqout[frw,fcl,:] = tmpclr[:]
str1 = 'Clr/Ovc Indicator %d, %d %.2f Quantile: %d' % (frw,fcl,prbs[103],tmpclr[103])
print(str1)
tmplgtq = calculate_VPD.quantile_msgdat(cfclgt1out[:,frw,fcl],prbs)
lgt1qs[frw,fcl,:] = tmplgtq[:]
tmplgtq = calculate_VPD.quantile_msgdat(cfclgt2out[:,frw,fcl],prbs)
lgt2qs[frw,fcl,:] = tmplgtq[:]
tmplgtq = calculate_VPD.quantile_msgdat(cfclgt12out[:,frw,fcl],prbs)
lgt12qs[frw,fcl,:] = tmplgtq[:]
str1 = 'CFrac Logit %d, %d %.2f Quantile: %.3f, %.3f, %.3f' % (frw,fcl,prbs[103], \
lgt1qs[frw,fcl,103],lgt2qs[frw,fcl,103],lgt12qs[frw,fcl,103])
print(str1)
ngwt1qs = calculate_VPD.quantile_msgdat(ngwt1out,prbs)
str1 = '%.2f NGWater1 Quantile: %.3f' % (prbs[103],ngwt1qs[103])
print(str1)
ngwt2qs = calculate_VPD.quantile_msgdat(ngwt2out,prbs)
str1 = '%.2f NGWater2 Quantile: %.3f' % (prbs[103],ngwt2qs[103])
print(str1)
cttp1qs = calculate_VPD.quantile_msgdat(cttp1out,prbs)
str1 = '%.2f CTTemp1 Quantile: %.3f' % (prbs[103],cttp1qs[103])
print(str1)
cttp2qs = calculate_VPD.quantile_msgdat(cttp2out,prbs)
str1 = '%.2f CTTemp2 Quantile: %.3f' % (prbs[103],cttp2qs[103])
print(str1)
# Output Quantiles
qfnm = '%s/CONUS_AIRS_JJA_%04d_%02dUTC_%s_Cloud_Quantile.nc' % (dtdr,yrlst[k],hrchc,rgchc)
qout = Dataset(qfnm,'w')
dimp = qout.createDimension('probability',nprb)
dimfov1 = qout.createDimension('fovrow',3)
dimfov2 = qout.createDimension('fovcol',3)
varprb = qout.createVariable('probability','f4',['probability'], fill_value = -9999)
varprb[:] = prbs
varprb.long_name = 'Probability break points'
varprb.units = 'none'
varprb.missing_value = -9999
varnslb = qout.createVariable('NumberSlab_quantile','i2',['probability'], fill_value = -99)
varnslb[:] = nslbqs
varnslb.long_name = 'Number of cloud slabs quantiles'
varnslb.units = 'Count'
varnslb.missing_value = -99
varcbprs = qout.createVariable('CloudBot1Logit_quantile','f4',['probability'], fill_value = -9999)
varcbprs[:] = prsbt1qs
varcbprs.long_name = 'Slab 1 cloud bottom pressure logit quantiles'
varcbprs.units = 'hPa'
varcbprs.missing_value = -9999
vardpc1 = qout.createVariable('DPCloud1Logit_quantile','f4',['probability'], fill_value = -9999)
vardpc1[:] = dpcld1qs
vardpc1.long_name = 'Slab 1 cloud pressure depth logit quantiles'
vardpc1.units = 'hPa'
vardpc1.missing_value = -9999
vardpslb = qout.createVariable('DPSlabLogit_quantile','f4',['probability'], fill_value = -9999)
vardpslb[:] = dpslbqs
vardpslb.long_name = 'Two-slab vertical separation logit quantiles'
vardpslb.units = 'hPa'
vardpslb.missing_value = -9999
vardpc2 = qout.createVariable('DPCloud2Logit_quantile','f4',['probability'], fill_value = -9999)
vardpc2[:] = dpcld2qs
vardpc2.long_name = 'Slab 2 cloud pressure depth logit quantiles'
vardpc2.units = 'hPa'
vardpc2.missing_value = -9999
vartyp1 = qout.createVariable('CType1_quantile','i2',['probability'], fill_value = -99)
vartyp1[:] = slb1qs
vartyp1.long_name = 'Slab 1 cloud type quantiles'
vartyp1.units = 'None'
vartyp1.missing_value = -99
vartyp1.comment = 'Cloud slab type: 0=Liquid, 1=Ice'
vartyp2 = qout.createVariable('CType2_quantile','i2',['probability'], fill_value = -99)
vartyp2[:] = slb2qs
vartyp2.long_name = 'Slab 2 cloud type quantiles'
vartyp2.units = 'None'
vartyp2.missing_value = -99
vartyp2.comment = 'Cloud slab type: 0=Liquid, 1=Ice'
varcvr = qout.createVariable('CCoverInd_quantile','i2',['fovrow','fovcol','probability'], fill_value = 99)
varcvr[:] = totclrqout
varcvr.long_name = 'Cloud cover indicator quantiles'
varcvr.units = 'None'
varcvr.missing_value = -99
varcvr.comment = 'Cloud cover indicators: -1=Clear, 0=Partly cloudy, 1=Overcast'
varlgt1 = qout.createVariable('CFrcLogit1_quantile','f4',['fovrow','fovcol','probability'], fill_value = -9999)
varlgt1[:] = lgt1qs
varlgt1.long_name = 'Slab 1 cloud fraction (cfrac1x) logit quantiles'
varlgt1.units = 'None'
varlgt1.missing_value = -9999
varlgt2 = qout.createVariable('CFrcLogit2_quantile','f4',['fovrow','fovcol','probability'], fill_value = -9999)
varlgt2[:] = lgt2qs
varlgt2.long_name = 'Slab 2 cloud fraction (cfrac2x) logit quantiles'
varlgt2.units = 'None'
varlgt2.missing_value = -9999
varlgt12 = qout.createVariable('CFrcLogit12_quantile','f4',['fovrow','fovcol','probability'], fill_value = -9999)
varlgt12[:] = lgt12qs
varlgt12.long_name = 'Slab 1/2 overlap fraction (cfrac12) logit quantiles'
varlgt12.units = 'None'
varlgt12.missing_value = -9999
varngwt1 = qout.createVariable('NGWater1_quantile','f4',['probability'], fill_value = -9999)
varngwt1[:] = ngwt1qs
varngwt1.long_name = 'Slab 1 cloud non-gas water quantiles'
varngwt1.units = 'g m^-2'
varngwt1.missing_value = -9999
varngwt2 = qout.createVariable('NGWater2_quantile','f4',['probability'], fill_value = -9999)
varngwt2[:] = ngwt2qs
varngwt2.long_name = 'Slab 2 cloud non-gas water quantiles'
varngwt2.units = 'g m^-2'
varngwt2.missing_value = -9999
varcttp1 = qout.createVariable('CTTemp1_quantile','f4',['probability'], fill_value = -9999)
varcttp1[:] = cttp1qs
varcttp1.long_name = 'Slab 1 cloud top temperature'
varcttp1.units = 'K'
varcttp1.missing_value = -9999
varcttp2 = qout.createVariable('CTTemp2_quantile','f4',['probability'], fill_value = -9999)
varcttp2[:] = cttp2qs
varcttp2.long_name = 'Slab 2 cloud top temperature'
varcttp2.units = 'K'
varcttp2.missing_value = -9999
qout.close()
# Set up transformations
zccvout = numpy.zeros((tsmp,3,3,)) - 9999.
zlgt1 = numpy.zeros((tsmp,3,3)) - 9999.
zlgt2 = numpy.zeros((tsmp,3,3)) - 9999.
zlgt12 = numpy.zeros((tsmp,3,3)) - 9999.
znslb = calculate_VPD.std_norm_quantile_from_obs(nslabout, nslbqs, prbs, msgval=-99)
zprsbt1 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(prsbot1out, prsbt1qs, prbs, msgval=-9999.)
zdpcld1 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(dpcld1out, dpcld1qs, prbs, msgval=-9999.)
zdpslb = calculate_VPD.std_norm_quantile_from_obs_fill_msg(dpslbout, dpslbqs, prbs, msgval=-9999.)
zdpcld2 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(dpcld2out, dpcld2qs, prbs, msgval=-9999.)
zctyp1 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(slbtyp1out, slb1qs, prbs, msgval=-99)
zctyp2 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(slbtyp2out, slb2qs, prbs, msgval=-99)
for frw in range(3):
for fcl in range(3):
ztmp = calculate_VPD.std_norm_quantile_from_obs_fill_msg(totclrout[:,frw,fcl], totclrqout[frw,fcl,:], \
prbs, msgval=-99)
zccvout[:,frw,fcl] = ztmp[:]
ztmp = calculate_VPD.std_norm_quantile_from_obs_fill_msg(cfclgt1out[:,frw,fcl], lgt1qs[frw,fcl,:], \
prbs, msgval=-9999.)
zlgt1[:,frw,fcl] = ztmp[:]
ztmp = calculate_VPD.std_norm_quantile_from_obs_fill_msg(cfclgt2out[:,frw,fcl], lgt2qs[frw,fcl,:], \
prbs, msgval=-9999.)
zlgt2[:,frw,fcl] = ztmp[:]
ztmp = calculate_VPD.std_norm_quantile_from_obs_fill_msg(cfclgt12out[:,frw,fcl], lgt12qs[frw,fcl,:], \
prbs, msgval=-9999.)
zlgt12[:,frw,fcl] = ztmp[:]
zngwt1 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(ngwt1out, ngwt1qs, prbs, msgval=-9999.)
zngwt2 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(ngwt2out, ngwt2qs, prbs, msgval=-9999.)
zcttp1 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(cttp1out, cttp1qs, prbs, msgval=-9999.)
zcttp2 = calculate_VPD.std_norm_quantile_from_obs_fill_msg(cttp2out, cttp2qs, prbs, msgval=-9999.)
# Output transformed quantile samples
zfnm = '%s/CONUS_AIRS_JJA_%04d_%02dUTC_%s_Cloud_StdGausTrans.nc' % (dtdr,yrlst[k],hrchc,rgchc)
zout = Dataset(zfnm,'w')
dimsmp = zout.createDimension('sample',tsmp)
dimfov1 = zout.createDimension('fovrow',3)
dimfov2 = zout.createDimension('fovcol',3)
varlon = zout.createVariable('Longitude','f4',['sample'])
varlon[:] = lonout
varlon.long_name = 'Longitude'
varlon.units = 'degrees_east'
varlat = zout.createVariable('Latitude','f4',['sample'])
varlat[:] = latout
varlat.long_name = 'Latitude'
varlat.units = 'degrees_north'
varjdy = zout.createVariable('JulianDay','i2',['sample'])
varjdy[:] = jdyout
varjdy.long_name = 'JulianDay'
varjdy.units = 'day'
varyr = zout.createVariable('Year','i2',['sample'])
varyr[:] = yrout
varyr.long_name = 'Year'
varyr.units = 'year'
varnslb = zout.createVariable('NumberSlab_StdGaus','f4',['sample'], fill_value = -9999)
varnslb[:] = znslb
varnslb.long_name = 'Quantile transformed number of cloud slabs'
varnslb.units = 'None'
varnslb.missing_value = -9999.
varcbprs = zout.createVariable('CloudBot1Logit_StdGaus','f4',['sample'], fill_value = -9999)
varcbprs[:] = zprsbt1
varcbprs.long_name = 'Quantile transformed slab 1 cloud bottom pressure logit'
varcbprs.units = 'None'
varcbprs.missing_value = -9999.
vardpc1 = zout.createVariable('DPCloud1Logit_StdGaus','f4',['sample'], fill_value = -9999)
vardpc1[:] = zdpcld1
vardpc1.long_name = 'Quantile transformed slab 1 cloud pressure depth logit'
vardpc1.units = 'None'
vardpc1.missing_value = -9999.
vardpslb = zout.createVariable('DPSlabLogit_StdGaus','f4',['sample'], fill_value = -9999)
vardpslb[:] = zdpslb
vardpslb.long_name = 'Quantile transformed two-slab vertical separation logit'
vardpslb.units = 'None'
vardpslb.missing_value = -9999.
vardpc2 = zout.createVariable('DPCloud2Logit_StdGaus','f4',['sample'], fill_value = -9999)
vardpc2[:] = zdpcld2
vardpc2.long_name = 'Quantile transformed slab 2 cloud pressure depth logit'
vardpc2.units = 'None'
vardpc2.missing_value = -9999.
vartyp1 = zout.createVariable('CType1_StdGaus','f4',['sample'], fill_value = -9999)
vartyp1[:] = zctyp1
vartyp1.long_name = 'Quantile transformed slab 1 cloud type logit'
vartyp1.units = 'None'
vartyp1.missing_value = -9999.
vartyp2 = zout.createVariable('CType2_StdGaus','f4',['sample'], fill_value = -9999)
vartyp2[:] = zctyp2
vartyp2.long_name = 'Quantile transformed slab 2 cloud type'
vartyp2.units = 'None'
vartyp2.missing_value = -9999.
varcov = zout.createVariable('CCoverInd_StdGaus','f4',['sample','fovrow','fovcol'], fill_value= -9999)
varcov[:] = zccvout
varcov.long_name = 'Quantile transformed cloud cover indicator'
varcov.units = 'None'
varcov.missing_value = -9999.
varlgt1 = zout.createVariable('CFrcLogit1_StdGaus','f4',['sample','fovrow','fovcol'], fill_value = -9999)
varlgt1[:] = zlgt1
varlgt1.long_name = 'Quantile transformed slab 1 cloud fraction logit'
varlgt1.units = 'None'
varlgt1.missing_value = -9999.
varlgt2 = zout.createVariable('CFrcLogit2_StdGaus','f4',['sample','fovrow','fovcol'], fill_value = -9999)
varlgt2[:] = zlgt2
varlgt2.long_name = 'Quantile transformed slab 2 cloud fraction logit'
varlgt2.units = 'None'
varlgt2.missing_value = -9999.
varlgt12 = zout.createVariable('CFrcLogit12_StdGaus','f4',['sample','fovrow','fovcol'], fill_value = -9999)
varlgt12[:] = zlgt12
varlgt12.long_name = 'Quantile transformed slab 1/2 overlap fraction logit'
varlgt12.units = 'None'
varlgt12.missing_value = -9999.
varngwt1 = zout.createVariable('NGWater1_StdGaus','f4',['sample'], fill_value = -9999)
varngwt1[:] = zngwt1
varngwt1.long_name = 'Quantile transformed slab 1 non-gas water'
varngwt1.units = 'None'
varngwt1.missing_value = -9999.
varngwt2 = zout.createVariable('NGWater2_StdGaus','f4',['sample'], fill_value = -9999)
varngwt2[:] = zngwt2
varngwt2.long_name = 'Quantile transformed slab 2 non-gas water'
varngwt2.units = 'None'
varngwt2.missing_value = -9999.
varcttp1 = zout.createVariable('CTTemp1_StdGaus','f4',['sample'], fill_value = -9999)
varcttp1[:] = zcttp1
varcttp1.long_name = 'Quantile transformed slab 1 cloud top temperature'
varcttp1.units = 'None'
varcttp1.missing_value = -9999.
varcttp2 = zout.createVariable('CTTemp2_StdGaus','f4',['sample'], fill_value = -9999)
varcttp2[:] = zcttp2
varcttp2.long_name = 'Quantile transformed slab 2 cloud top temperature'
varcttp2.units = 'None'
varcttp2.missing_value = -9999.
zout.close()
return
def quantile_profile_locmask_conus(rfdr, mtdr, csdr, airdr, dtdr, yrlst, mnst, mnfn, hrchc, rgchc, mskvr, mskvl):
# Construct profile/sfc variable quantiles and z-scores, with a possibly irregular location mask
# rfdr: Directory for reference data (Levels/Quantiles)
# mtdr: Directory for MERRA data
# csdr: Directory for cloud slab data
# airdr: Directory for AIRS cloud fraction
# dtdr: Output directory
# yrlst: List of years to process
# mnst: Starting Month
# mnfn: Ending Month
# hrchc: Template Hour Choice
# rgchc: Template Region Choice
# mskvr: Name of region mask variable
# mskvl: Value of region mask for Region Choice
# Read probs and pressure levels
rnm = '%s/AIRS_Levels_Quantiles.nc' % (rfdr)
f = Dataset(rnm,'r')
plev = f['level'][:]
prbs = f['probability'][:]
alts = f['altitude'][:]
f.close()
nyr = len(yrlst)
nprb = prbs.shape[0]
nzout = 101
tmpqout = numpy.zeros((nzout,nprb)) - 9999.
rhqout = numpy.zeros((nzout,nprb)) - 9999.
sftmpqs = numpy.zeros((nprb,)) - 9999.
sfaltqs = numpy.zeros((nprb,)) - 9999.
psfcqs = numpy.zeros((nprb,)) - 9999.
altmed = numpy.zeros((nzout,)) - 9999.
# Mask, lat, lon
fnm = '%s/interpolated_merra2_for_SARTA_two_slab_%d_JJA_CONUS_with_NCA_regions_%02dUTC_no_vertical_variation_for_missing.nc' % (mtdr,yrlst[0],hrchc)
f = Dataset(fnm,'r')
mask = f.variables[mskvr][:,:]
latmet = f.variables['plat'][:]
lonmet = f.variables['plon'][:]
tminf = f.variables['time'][:]
tmunit = f.variables['time'].units[:]
f.close()
mskind = numpy.zeros((mask.shape),dtype=mask.dtype)
print(mskvl)
mskind[mask == mskvl] = 1
lnsq = numpy.arange(lonmet.shape[0])
ltsq = numpy.arange(latmet.shape[0])
# Subset a bit
lnsm = numpy.sum(mskind,axis=0)
#print(lnsq.shape)
#print(lnsm.shape)
#print(lnsm)
ltsm = numpy.sum(mskind,axis=1)
#print(ltsq.shape)
#print(ltsm.shape)
#print(ltsm)
lnmn = numpy.amin(lnsq[lnsm > 0])
lnmx = numpy.amax(lnsq[lnsm > 0]) + 1
ltmn = numpy.amin(ltsq[ltsm > 0])
ltmx = numpy.amax(ltsq[ltsm > 0]) + 1
stridx = 'Lon Range: %d, %d\nLat Range: %d, %d \n' % (lnmn,lnmx,ltmn,ltmx)
print(stridx)
nx = lnmx - lnmn
ny = ltmx - ltmn
lnrp =
|
numpy.tile(lonmet[lnmn:lnmx],ny)
|
numpy.tile
|
#!/bin/env python
"""
Cascade decomposition
=====================
This example script shows how to compute and plot the cascade decompositon of
a single radar precipitation field in pysteps.
"""
from matplotlib import cm, pyplot as plt
import numpy as np
import os
from pprint import pprint
from pysteps.cascade.bandpass_filters import filter_gaussian
from pysteps import io, rcparams
from pysteps.cascade.decomposition import decomposition_fft
from pysteps.utils import conversion, transformation
from pysteps.visualization import plot_precip_field
###############################################################################
# Read precipitation field
# ------------------------
#
# First thing, the radar composite is imported and transformed in units
# of dB.
# Import the example radar composite
root_path = rcparams.data_sources["fmi"]["root_path"]
filename = os.path.join(
root_path, "20160928", "201609281600_fmi.radar.composite.lowest_FIN_SUOMI1.pgm.gz"
)
R, _, metadata = io.import_fmi_pgm(filename, gzipped=True)
# Convert to rain rate
R, metadata = conversion.to_rainrate(R, metadata)
# Nicely print the metadata
pprint(metadata)
# Plot the rainfall field
plot_precip_field(R, geodata=metadata)
plt.show()
# Log-transform the data
R, metadata = transformation.dB_transform(R, metadata, threshold=0.1, zerovalue=-15.0)
###############################################################################
# 2D Fourier spectrum
# --------------------
#
# Compute and plot the 2D Fourier power spectrum of the precipitaton field.
# Set Nans as the fill value
R[~np.isfinite(R)] = metadata["zerovalue"]
# Compute the Fourier transform of the input field
F = abs(np.fft.fftshift(
|
np.fft.fft2(R)
|
numpy.fft.fft2
|
"""
Test Tabular Surrogate Explainer Builder
========================================
"""
# Author: <NAME> <<EMAIL>>
# License: new BSD
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
import numpy as np
import tabular_surrogate_builder
RANDOM_SEED = 42
iris = datasets.load_iris()
x_name, y_name = 'petal length (cm)', 'petal width (cm)'
x_ind = iris.feature_names.index(x_name)
y_ind = iris.feature_names.index(y_name)
X = iris.data[:, [x_ind, y_ind]] # We only take the first two features
Y = iris.target
tree_clf = DecisionTreeClassifier(
max_depth=5, min_samples_leaf=15, random_state=RANDOM_SEED)
tree_clf.fit(X, Y)
logreg_clf = LogisticRegression(random_state=RANDOM_SEED)
logreg_clf.fit(X, Y)
def test_tabular_blimey():
"""Tests bLIMEy explanations."""
x_min, x_max = X[:, 0].min(), X[:, 0].max()
y_min, y_max = X[:, 1].min(), X[:, 1].max()
class_map = {cls: i for i, cls in enumerate(iris.target_names)}
instances = {
'setosa': np.array([1.5, 0.25]),
'versicolor': np.array([4.5, 1.25]),
'virginica': np.array([5.5, 2.25])
}
models = {
'tree-intercept': (tree_clf.predict, True),
'tree-no-intercept': (tree_clf.predict, False),
'logreg-intercept': (logreg_clf.predict, True),
'logreg-no-intercept': (logreg_clf.predict, False)
}
samples = [
[0, 3, 6, 9, 12, 15, 18, 21, 24],
[12, 6, 24, 0, 15, 9, 3, 21, 18]
]
x_bins, y_bins = [1, 2.5, 3.3, 6], [.5, 1.5, 2]
discs = []
for i, ix in enumerate(x_bins):
for iix in x_bins[i + 1:]: # X-axis
for j, jy in enumerate(y_bins): # Y-axis
for jjy in y_bins[j + 1:]:
discs.append({
0: [ix, iix],
1: [jy, jjy]
})
for inst_i, inst in instances.items():
for samples_no_i, samples_no in enumerate(samples):
for cls, cls_i in class_map.items():
for disc_i, disc in enumerate(discs):
for model_i, (pred_fn, intercept) in models.items():
disc_x = [x_min] + disc[0] + [x_max]
disc_y = [y_min] + disc[1] + [y_max]
data = tabular_surrogate_builder._generate_data(
samples_no, disc_x, disc_y, RANDOM_SEED)
exp = tabular_surrogate_builder.build_tabular_blimey(
inst, cls_i, data, pred_fn, disc, intercept, RANDOM_SEED)
key = '{}&{}&{}&{}&{}'.format(
inst_i, samples_no_i, cls, disc_i, model_i)
assert np.allclose(
exp,
EXP[key],
atol=.001,
equal_nan=True
)
EXP = {
'setosa&0&setosa&0&tree-intercept': np.array([1.5739331306990878, 0.10256534954407359]),
'setosa&0&setosa&0&tree-no-intercept': np.array([1.1973684210526314, -0.36430921052631576]),
'setosa&0&setosa&0&logreg-intercept': np.array([1.5013252279635236, 0.16661398176291822]),
'setosa&0&setosa&0&logreg-no-intercept': np.array([1.144736842105263, -0.2754934210526315]),
'setosa&0&setosa&1&tree-intercept': np.array([1.5739331306990878, 0.10256534954407359]),
'setosa&0&setosa&1&tree-no-intercept': np.array([1.1973684210526314, -0.36430921052631576]),
'setosa&0&setosa&1&logreg-intercept': np.array([1.5013252279635236, 0.16661398176291822]),
'setosa&0&setosa&1&logreg-no-intercept': np.array([1.144736842105263, -0.2754934210526315]),
'setosa&0&setosa&2&tree-intercept': np.array([1.5739331306990878, 0.10256534954407359]),
'setosa&0&setosa&2&tree-no-intercept': np.array([1.1973684210526314, -0.36430921052631576]),
'setosa&0&setosa&2&logreg-intercept': np.array([1.5739331306990878, 0.10256534954407359]),
'setosa&0&setosa&2&logreg-no-intercept': np.array([1.1973684210526314, -0.36430921052631576]),
'setosa&0&setosa&3&tree-intercept': np.array([1.3185896656534941, 0.11883282674772026]),
'setosa&0&setosa&3&tree-no-intercept': np.array([0.9342105263157893, -0.35773026315789463]),
'setosa&0&setosa&3&logreg-intercept': np.array([1.4272097264437666, 0.1328632218844987]),
'setosa&0&setosa&3&logreg-no-intercept': np.array([1.0394736842105263, -0.3478618421052631]),
'setosa&0&setosa&4&tree-intercept': np.array([1.3185896656534941, 0.11883282674772026]),
'setosa&0&setosa&4&tree-no-intercept': np.array([0.9342105263157893, -0.35773026315789463]),
'setosa&0&setosa&4&logreg-intercept': np.array([1.4272097264437666, 0.1328632218844987]),
'setosa&0&setosa&4&logreg-no-intercept': np.array([1.0394736842105263, -0.3478618421052631]),
'setosa&0&setosa&5&tree-intercept': np.array([1.3185896656534941, 0.11883282674772026]),
'setosa&0&setosa&5&tree-no-intercept': np.array([0.9342105263157893, -0.35773026315789463]),
'setosa&0&setosa&5&logreg-intercept': np.array([1.3891063829787214, 0.17719148936170231]),
'setosa&0&setosa&5&logreg-no-intercept': np.array([0.9868421052631579, -0.32154605263157887]),
'setosa&0&setosa&6&tree-intercept': np.array([0.699282674772036, 0.13733738601823722]),
'setosa&0&setosa&6&tree-no-intercept': np.array([0.3026315789473684, -0.3544407894736842]),
'setosa&0&setosa&6&logreg-intercept': np.array([0.8812644376899683, 0.13621884498480247]),
'setosa&0&setosa&6&logreg-no-intercept': np.array([0.4868421052631579, -0.3527960526315789]),
'setosa&0&setosa&7&tree-intercept': np.array([0.699282674772036, 0.13733738601823722]),
'setosa&0&setosa&7&tree-no-intercept': np.array([0.3026315789473684, -0.3544407894736842]),
'setosa&0&setosa&7&logreg-intercept': np.array([0.8812644376899683, 0.13621884498480247]),
'setosa&0&setosa&7&logreg-no-intercept': np.array([0.4868421052631579, -0.3527960526315789]),
'setosa&0&setosa&8&tree-intercept': np.array([0.699282674772036, 0.13733738601823722]),
'setosa&0&setosa&8&tree-no-intercept': np.array([0.3026315789473684, -0.3544407894736842]),
'setosa&0&setosa&8&logreg-intercept': np.array([0.8812644376899683, 0.13621884498480247]),
'setosa&0&setosa&8&logreg-no-intercept': np.array([0.4868421052631579, -0.3527960526315789]),
'setosa&0&setosa&9&tree-intercept': np.array([1.905052264808362, 0.007839721254356067]),
'setosa&0&setosa&9&tree-no-intercept': np.array([1.5081743869209807, -0.8460490463215258]),
'setosa&0&setosa&9&logreg-intercept': np.array([1.6393728222996513, 0.07259001161440241]),
'setosa&0&setosa&9&logreg-no-intercept': np.array([1.3365122615803813, -0.5790190735694822]),
'setosa&0&setosa&10&tree-intercept': np.array([1.905052264808362, 0.007839721254356067]),
'setosa&0&setosa&10&tree-no-intercept': np.array([1.5081743869209807, -0.8460490463215258]),
'setosa&0&setosa&10&logreg-intercept': np.array([1.6585365853658536, 0.11382113821138252]),
'setosa&0&setosa&10&logreg-no-intercept': np.array([1.3365122615803813, -0.5790190735694822]),
'setosa&0&setosa&11&tree-intercept': np.array([1.905052264808362, 0.007839721254356067]),
'setosa&0&setosa&11&tree-no-intercept': np.array([1.5081743869209807, -0.8460490463215258]),
'setosa&0&setosa&11&logreg-intercept': np.array([1.7238675958188177, 0.1634727061556331]),
'setosa&0&setosa&11&logreg-no-intercept': np.array([1.3610354223433239, -0.6171662125340599]),
'setosa&0&setosa&12&tree-intercept': np.array([1.905052264808362, 0.007839721254356067]),
'setosa&0&setosa&12&tree-no-intercept': np.array([1.5081743869209807, -0.8460490463215258]),
'setosa&0&setosa&12&logreg-intercept': np.array([1.8780487804878052, 0.04065040650406589]),
'setosa&0&setosa&12&logreg-no-intercept': np.array([1.483651226158038, -0.8079019073569482]),
'setosa&0&setosa&13&tree-intercept': np.array([1.905052264808362, 0.007839721254356067]),
'setosa&0&setosa&13&tree-no-intercept': np.array([1.5081743869209807, -0.8460490463215258]),
'setosa&0&setosa&13&logreg-intercept': np.array([1.8780487804878052, 0.04065040650406589]),
'setosa&0&setosa&13&logreg-no-intercept': np.array([1.483651226158038, -0.8079019073569482]),
'setosa&0&setosa&14&tree-intercept': np.array([1.905052264808362, 0.007839721254356067]),
'setosa&0&setosa&14&tree-no-intercept': np.array([1.5081743869209807, -0.8460490463215258]),
'setosa&0&setosa&14&logreg-intercept': np.array([1.905052264808362, 0.007839721254356067]),
'setosa&0&setosa&14&logreg-no-intercept': np.array([1.5081743869209807, -0.8460490463215258]),
'setosa&0&setosa&15&tree-intercept': np.array([1.54965156794425, 0.03106852497096429]),
'setosa&0&setosa&15&tree-no-intercept': np.array([1.145776566757493, -0.8378746594005451]),
'setosa&0&setosa&15&logreg-intercept': np.array([1.6933797909407649, 0.006968641114983181]),
'setosa&0&setosa&15&logreg-no-intercept': np.array([1.2956403269754766, -0.8487738419618529]),
'setosa&0&setosa&16&tree-intercept': np.array([1.54965156794425, 0.03106852497096429]),
'setosa&0&setosa&16&tree-no-intercept': np.array([1.145776566757493, -0.8378746594005451]),
'setosa&0&setosa&16&logreg-intercept': np.array([1.6933797909407649, 0.006968641114983181]),
'setosa&0&setosa&16&logreg-no-intercept': np.array([1.2956403269754766, -0.8487738419618529]),
'setosa&0&setosa&17&tree-intercept': np.array([1.54965156794425, 0.03106852497096429]),
'setosa&0&setosa&17&tree-no-intercept': np.array([1.145776566757493, -0.8378746594005451]),
'setosa&0&setosa&17&logreg-intercept': np.array([1.61759581881533, 0.05603948896631865]),
'setosa&0&setosa&17&logreg-no-intercept': np.array([1.2084468664850134, -0.8242506811989099]),
'setosa&0&versicolor&0&tree-intercept': np.array([-1.4066382978723382, 0.10485106382978716]),
'setosa&0&versicolor&0&tree-no-intercept': np.array([-1.1973684210526314, 0.36430921052631576]),
'setosa&0&versicolor&0&logreg-intercept': np.array([-1.2977264437689953, 0.008778115501519752]),
'setosa&0&versicolor&0&logreg-no-intercept': np.array([-1.118421052631579, 0.23108552631578946]),
'setosa&0&versicolor&1&tree-intercept': np.array([-1.3062613981762905, 0.22930091185410326]),
'setosa&0&versicolor&1&tree-no-intercept': np.array([-1.1973684210526314, 0.36430921052631576]),
'setosa&0&versicolor&1&logreg-intercept': np.array([-1.2642674772036455, 0.0502613981762915]),
'setosa&0&versicolor&1&logreg-no-intercept': np.array([-1.118421052631579, 0.23108552631578946]),
'setosa&0&versicolor&2&tree-intercept': np.array([-1.105507598784193, 0.47820060790273566]),
'setosa&0&versicolor&2&tree-no-intercept': np.array([-1.1973684210526314, 0.36430921052631576]),
'setosa&0&versicolor&2&logreg-intercept': np.array([-1.2699574468085086, 0.19727659574468057]),
'setosa&0&versicolor&2&logreg-no-intercept': np.array([-1.1710526315789473, 0.3199013157894736]),
'setosa&0&versicolor&3&tree-intercept': np.array([-1.189398176291792, 0.13291185410334339]),
'setosa&0&versicolor&3&tree-no-intercept': np.array([-0.9868421052631579, 0.38404605263157887]),
'setosa&0&versicolor&3&logreg-intercept': np.array([-1.223610942249238, 0.04252887537993902]),
'setosa&0&versicolor&3&logreg-no-intercept': np.array([-1.013157894736842, 0.303453947368421]),
'setosa&0&versicolor&4&tree-intercept': np.array([-1.0890212765957425, 0.25736170212765935]),
'setosa&0&versicolor&4&tree-no-intercept': np.array([-0.9868421052631579, 0.38404605263157887]),
'setosa&0&versicolor&4&logreg-intercept': np.array([-1.15669300911854, 0.12549544072948324]),
'setosa&0&versicolor&4&logreg-no-intercept': np.array([-1.013157894736842, 0.303453947368421]),
'setosa&0&versicolor&5&tree-intercept': np.array([-0.9263708206686919, 0.5505896656534954]),
'setosa&0&versicolor&5&tree-no-intercept': np.array([-1.0394736842105263, 0.4103618421052631]),
'setosa&0&versicolor&5&logreg-intercept': np.array([-0.9484498480243149, 0.2150759878419453]),
'setosa&0&versicolor&5&logreg-no-intercept': np.array([-0.9342105263157895, 0.2327302631578947]),
'setosa&0&versicolor&6&tree-intercept': np.array([-0.6081945288753788, 0.15873556231003033]),
'setosa&0&versicolor&6&tree-no-intercept': np.array([-0.4078947368421052, 0.40707236842105254]),
'setosa&0&versicolor&6&logreg-intercept': np.array([0.037398176291793324, 0.5550881458966567]),
'setosa&0&versicolor&6&logreg-no-intercept': np.array([-0.3552631578947368, 0.06825657894736842]),
'setosa&0&versicolor&7&tree-intercept': np.array([-0.5459209726443761, 0.32751367781155016]),
'setosa&0&versicolor&7&tree-no-intercept': np.array([-0.4605263157894736, 0.4333881578947368]),
'setosa&0&versicolor&7&logreg-intercept': np.array([0.037398176291793324, 0.5550881458966567]),
'setosa&0&versicolor&7&logreg-no-intercept': np.array([-0.3552631578947368, 0.06825657894736842]),
'setosa&0&versicolor&8&tree-intercept': np.array([-0.4594772036474156, 0.7093981762917936]),
'setosa&0&versicolor&8&tree-no-intercept': np.array([-0.618421052631579, 0.5123355263157895]),
'setosa&0&versicolor&8&logreg-intercept': np.array([0.4359878419452881, 0.15392097264437712]),
'setosa&0&versicolor&8&logreg-no-intercept': np.array([-0.0921052631578948, -0.5008223684210525]),
'setosa&0&versicolor&9&tree-intercept': np.array([-1.7709059233449487, 0.28077816492450636]),
'setosa&0&versicolor&9&tree-no-intercept': np.array([-1.5081743869209807, 0.8460490463215258]),
'setosa&0&versicolor&9&logreg-intercept': np.array([-1.5165505226480849, 0.10075493612078955]),
'setosa&0&versicolor&9&logreg-no-intercept': np.array([-1.3119891008174385, 0.5408719346049046]),
'setosa&0&versicolor&10&tree-intercept': np.array([-1.6559233449477355, 0.5281649245063876]),
'setosa&0&versicolor&10&tree-no-intercept': np.array([-1.5081743869209807, 0.8460490463215258]),
'setosa&0&versicolor&10&logreg-intercept': np.array([-1.4973867595818822, 0.1419860627177698]),
'setosa&0&versicolor&10&logreg-no-intercept': np.array([-1.3119891008174385, 0.5408719346049046]),
'setosa&0&versicolor&11&tree-intercept': np.array([-1.4642857142857129, 0.9404761904761902]),
'setosa&0&versicolor&11&tree-no-intercept': np.array([-1.5081743869209807, 0.8460490463215258]),
'setosa&0&versicolor&11&logreg-intercept': np.array([-1.4590592334494785, 0.22444831591173042]),
'setosa&0&versicolor&11&logreg-no-intercept': np.array([-1.3119891008174385, 0.5408719346049046]),
'setosa&0&versicolor&12&tree-intercept': np.array([-1.7709059233449487, 0.28077816492450636]),
'setosa&0&versicolor&12&tree-no-intercept': np.array([-1.5081743869209807, 0.8460490463215258]),
'setosa&0&versicolor&12&logreg-intercept': np.array([-1.2674216027874567, 0.636759581881533]),
'setosa&0&versicolor&12&logreg-no-intercept': np.array([-1.3119891008174385, 0.5408719346049046]),
'setosa&0&versicolor&13&tree-intercept': np.array([-1.6559233449477355, 0.5281649245063876]),
'setosa&0&versicolor&13&tree-no-intercept': np.array([-1.5081743869209807, 0.8460490463215258]),
'setosa&0&versicolor&13&logreg-intercept': np.array([-1.2674216027874567, 0.636759581881533]),
'setosa&0&versicolor&13&logreg-no-intercept': np.array([-1.3119891008174385, 0.5408719346049046]),
'setosa&0&versicolor&14&tree-intercept': np.array([-1.4642857142857129, 0.9404761904761902]),
'setosa&0&versicolor&14&tree-no-intercept': np.array([-1.5081743869209807, 0.8460490463215258]),
'setosa&0&versicolor&14&logreg-intercept': np.array([-0.8780487804878059, 0.2926829268292682]),
'setosa&0&versicolor&14&logreg-no-intercept': np.array([-0.9931880108991824, 0.04495912806539504]),
'setosa&0&versicolor&15&tree-intercept': np.array([-1.4155052264808363, 0.2575493612078976]),
'setosa&0&versicolor&15&tree-no-intercept': np.array([-1.145776566757493, 0.8378746594005451]),
'setosa&0&versicolor&15&logreg-intercept': np.array([-1.0635888501742157, 0.7116724738675959]),
'setosa&0&versicolor&15&logreg-no-intercept': np.array([-1.1239782016348774, 0.5817438692098093]),
'setosa&0&versicolor&16&tree-intercept': np.array([-1.300522648083622, 0.5049361207897793]),
'setosa&0&versicolor&16&tree-no-intercept': np.array([-1.145776566757493, 0.8378746594005451]),
'setosa&0&versicolor&16&logreg-intercept': np.array([-1.0444250871080138, 0.7529036004645763]),
'setosa&0&versicolor&16&logreg-no-intercept': np.array([-1.1239782016348774, 0.5817438692098093]),
'setosa&0&versicolor&17&tree-intercept': np.array([-1.184668989547039, 0.9663182346109179]),
'setosa&0&versicolor&17&tree-no-intercept': np.array([-1.2329700272479562, 0.8623978201634876]),
'setosa&0&versicolor&17&logreg-intercept': np.array([-0.5331010452961679, 0.36817653890824636]),
'setosa&0&versicolor&17&logreg-no-intercept': np.array([-0.6934604904632151, 0.02316076294277924]),
'setosa&0&virginica&0&tree-intercept': np.array([-0.16729483282674798, -0.20741641337386058]),
'setosa&0&virginica&0&tree-no-intercept': np.array([-0.5657894736842106, -0.7014802631578947]),
'setosa&0&virginica&0&logreg-intercept': np.array([-0.20359878419452845, -0.1753920972644377]),
'setosa&0&virginica&0&logreg-no-intercept': np.array([-0.5921052631578947, -0.6570723684210525]),
'setosa&0&virginica&1&tree-intercept': np.array([-0.26767173252279597, -0.3318662613981765]),
'setosa&0&virginica&1&tree-no-intercept': np.array([-0.5657894736842106, -0.7014802631578947]),
'setosa&0&virginica&1&logreg-intercept': np.array([-0.23705775075987823, -0.2168753799392096]),
'setosa&0&virginica&1&logreg-no-intercept': np.array([-0.5921052631578947, -0.6570723684210525]),
'setosa&0&virginica&2&tree-intercept': np.array([-0.4684255319148932, -0.5807659574468084]),
'setosa&0&virginica&2&tree-no-intercept': np.array([-0.5657894736842106, -0.7014802631578947]),
'setosa&0&virginica&2&logreg-intercept': np.array([-0.30397568389057705, -0.2998419452887544]),
'setosa&0&virginica&2&logreg-no-intercept': np.array([-0.5921052631578947, -0.6570723684210525]),
'setosa&0&virginica&3&tree-intercept': np.array([-0.12919148936170174, -0.2517446808510638]),
'setosa&0&virginica&3&tree-no-intercept': np.array([-0.5131578947368421, -0.7277960526315789]),
'setosa&0&virginica&3&logreg-intercept': np.array([-0.20359878419452845, -0.1753920972644377]),
'setosa&0&virginica&3&logreg-no-intercept': np.array([-0.5921052631578947, -0.6570723684210525]),
'setosa&0&virginica&4&tree-intercept': np.array([-0.2295683890577505, -0.37619452887538046]),
'setosa&0&virginica&4&tree-no-intercept': np.array([-0.5131578947368421, -0.7277960526315789]),
'setosa&0&virginica&4&logreg-intercept': np.array([-0.27051671732522753, -0.2583586626139816]),
'setosa&0&virginica&4&logreg-no-intercept': np.array([-0.5921052631578947, -0.6570723684210525]),
'setosa&0&virginica&5&tree-intercept': np.array([-0.39221884498480175, -0.6694224924012161]),
'setosa&0&virginica&5&tree-no-intercept': np.array([-0.4605263157894739, -0.7541118421052629]),
'setosa&0&virginica&5&logreg-intercept': np.array([-0.4406565349544069, -0.3922674772036473]),
'setosa&0&virginica&5&logreg-no-intercept': np.array([-0.618421052631579, -0.6126644736842104]),
'setosa&0&virginica&6&tree-intercept': np.array([-0.09108814589665652, -0.296072948328267]),
'setosa&0&virginica&6&tree-no-intercept': np.array([-0.4605263157894739, -0.7541118421052629]),
'setosa&0&virginica&6&logreg-intercept': np.array([-0.9186626139817617, -0.6913069908814593]),
'setosa&0&virginica&6&logreg-no-intercept': np.array([-0.6973684210526315, -0.4169407894736842]),
'setosa&0&virginica&7&tree-intercept': np.array([-0.15336170212765957, -0.46485106382978736]),
'setosa&0&virginica&7&tree-no-intercept': np.array([-0.4078947368421052, -0.7804276315789473]),
'setosa&0&virginica&7&logreg-intercept': np.array([-0.9186626139817617, -0.6913069908814593]),
'setosa&0&virginica&7&logreg-no-intercept': np.array([-0.6973684210526315, -0.4169407894736842]),
'setosa&0&virginica&8&tree-intercept': np.array([-0.23980547112461933, -0.8467355623100313]),
'setosa&0&virginica&8&tree-no-intercept': np.array([-0.2500000000000001, -0.8593749999999999]),
'setosa&0&virginica&8&logreg-intercept': np.array([-1.3172522796352566, -0.29013981762917995]),
'setosa&0&virginica&8&logreg-no-intercept': np.array([-0.9605263157894737, 0.15213815789473678]),
'setosa&0&virginica&9&tree-intercept': np.array([-0.13414634146341475, -0.28861788617886147]),
'setosa&0&virginica&9&tree-no-intercept': np.array([-0.40463215258855567, -0.8705722070844688]),
'setosa&0&virginica&9&logreg-intercept': np.array([-0.12282229965156827, -0.17334494773519163]),
'setosa&0&virginica&9&logreg-no-intercept': np.array([-0.42915531335149854, -0.832425068119891]),
'setosa&0&virginica&10&tree-intercept': np.array([-0.24912891986062732, -0.5360046457607435]),
'setosa&0&virginica&10&tree-no-intercept': np.array([-0.40463215258855567, -0.8705722070844688]),
'setosa&0&virginica&10&logreg-intercept': np.array([-0.1611498257839721, -0.25580720092915205]),
'setosa&0&virginica&10&logreg-no-intercept': np.array([-0.42915531335149854, -0.832425068119891]),
'setosa&0&virginica&11&tree-intercept': np.array([-0.4407665505226477, -0.9483159117305445]),
'setosa&0&virginica&11&tree-no-intercept': np.array([-0.40463215258855567, -0.8705722070844688]),
'setosa&0&virginica&11&logreg-intercept': np.array([-0.2648083623693382, -0.3879210220673635]),
'setosa&0&virginica&11&logreg-no-intercept': np.array([-0.45367847411444123, -0.7942779291553135]),
'setosa&0&virginica&12&tree-intercept': np.array([-0.13414634146341475, -0.28861788617886147]),
'setosa&0&virginica&12&tree-no-intercept': np.array([-0.40463215258855567, -0.8705722070844688]),
'setosa&0&virginica&12&logreg-intercept': np.array([-0.6106271777003482, -0.6774099883855986]),
'setosa&0&virginica&12&logreg-no-intercept': np.array([-0.5762942779291551, -0.6035422343324252]),
'setosa&0&virginica&13&tree-intercept': np.array([-0.24912891986062732, -0.5360046457607435]),
'setosa&0&virginica&13&tree-no-intercept': np.array([-0.40463215258855567, -0.8705722070844688]),
'setosa&0&virginica&13&logreg-intercept': np.array([-0.6106271777003482, -0.6774099883855986]),
'setosa&0&virginica&13&logreg-no-intercept': np.array([-0.5762942779291551, -0.6035422343324252]),
'setosa&0&virginica&14&tree-intercept': np.array([-0.4407665505226477, -0.9483159117305445]),
'setosa&0&virginica&14&tree-no-intercept': np.array([-0.40463215258855567, -0.8705722070844688]),
'setosa&0&virginica&14&logreg-intercept': np.array([-1.0270034843205562, -0.30052264808362433]),
'setosa&0&virginica&14&logreg-no-intercept': np.array([-0.9196185286103541, -0.06948228882833794]),
'setosa&0&virginica&15&tree-intercept': np.array([-0.13414634146341475, -0.28861788617886147]),
'setosa&0&virginica&15&tree-no-intercept': np.array([-0.40463215258855567, -0.8705722070844688]),
'setosa&0&virginica&15&logreg-intercept': np.array([-0.6297909407665501, -0.7186411149825784]),
'setosa&0&virginica&15&logreg-no-intercept': np.array([-0.5762942779291551, -0.6035422343324252]),
'setosa&0&virginica&16&tree-intercept': np.array([-0.24912891986062732, -0.5360046457607435]),
'setosa&0&virginica&16&tree-no-intercept': np.array([-0.40463215258855567, -0.8705722070844688]),
'setosa&0&virginica&16&logreg-intercept': np.array([-0.6489547038327523, -0.7598722415795597]),
'setosa&0&virginica&16&logreg-no-intercept': np.array([-0.5762942779291551, -0.6035422343324252]),
'setosa&0&virginica&17&tree-intercept': np.array([-0.36498257839721343, -0.9973867595818819]),
'setosa&0&virginica&17&tree-no-intercept': np.array([-0.3174386920980926, -0.8950953678474115]),
'setosa&0&virginica&17&logreg-intercept': np.array([-1.0844947735191648, -0.4242160278745647]),
'setosa&0&virginica&17&logreg-no-intercept': np.array([-0.9196185286103541, -0.06948228882833794]),
'setosa&1&setosa&0&tree-intercept': np.array([1.7608531994981194, -0.04592220828105419]),
'setosa&1&setosa&0&tree-no-intercept': np.array([1.1988571428571433, -0.5017142857142859]),
'setosa&1&setosa&0&logreg-intercept': np.array([1.7214554579673789, 0.004015056461731351]),
'setosa&1&setosa&0&logreg-no-intercept': np.array([1.1725714285714288, -0.44114285714285734]),
'setosa&1&setosa&1&tree-intercept': np.array([1.7608531994981194, -0.04592220828105419]),
'setosa&1&setosa&1&tree-no-intercept': np.array([1.1988571428571433, -0.5017142857142859]),
'setosa&1&setosa&1&logreg-intercept': np.array([1.7214554579673789, 0.004015056461731351]),
'setosa&1&setosa&1&logreg-no-intercept': np.array([1.1725714285714288, -0.44114285714285734]),
'setosa&1&setosa&2&tree-intercept': np.array([1.7608531994981194, -0.04592220828105419]),
'setosa&1&setosa&2&tree-no-intercept': np.array([1.1988571428571433, -0.5017142857142859]),
'setosa&1&setosa&2&logreg-intercept': np.array([1.7608531994981194, -0.04592220828105419]),
'setosa&1&setosa&2&logreg-no-intercept': np.array([1.1988571428571433, -0.5017142857142859]),
'setosa&1&setosa&3&tree-intercept': np.array([1.5081555834378932, -0.11543287327478088]),
'setosa&1&setosa&3&tree-no-intercept': np.array([0.9600000000000003, -0.5600000000000002]),
'setosa&1&setosa&3&logreg-intercept': np.array([1.614303638644919, -0.06398996235884592]),
'setosa&1&setosa&3&logreg-no-intercept': np.array([1.054857142857143, -0.5177142857142859]),
'setosa&1&setosa&4&tree-intercept': np.array([1.5081555834378932, -0.11543287327478088]),
'setosa&1&setosa&4&tree-no-intercept': np.array([0.9600000000000003, -0.5600000000000002]),
'setosa&1&setosa&4&logreg-intercept': np.array([1.614303638644919, -0.06398996235884592]),
'setosa&1&setosa&4&logreg-no-intercept': np.array([1.054857142857143, -0.5177142857142859]),
'setosa&1&setosa&5&tree-intercept': np.array([1.5081555834378932, -0.11543287327478088]),
'setosa&1&setosa&5&tree-no-intercept': np.array([0.9600000000000003, -0.5600000000000002]),
'setosa&1&setosa&5&logreg-intercept': np.array([1.573902132998746, -0.03061480552070302]),
'setosa&1&setosa&5&logreg-no-intercept': np.array([1.005714285714286, -0.49142857142857155]),
'setosa&1&setosa&6&tree-intercept': np.array([0.7754077791718955, -0.2057716436637395]),
'setosa&1&setosa&6&tree-no-intercept': np.array([0.24000000000000005, -0.64]),
'setosa&1&setosa&6&logreg-intercept': np.array([0.9548306148055205, -0.14529485570890852]),
'setosa&1&setosa&6&logreg-no-intercept': np.array([0.406857142857143, -0.5897142857142859]),
'setosa&1&setosa&7&tree-intercept': np.array([0.7754077791718955, -0.2057716436637395]),
'setosa&1&setosa&7&tree-no-intercept': np.array([0.24000000000000005, -0.64]),
'setosa&1&setosa&7&logreg-intercept': np.array([0.9548306148055205, -0.14529485570890852]),
'setosa&1&setosa&7&logreg-no-intercept': np.array([0.406857142857143, -0.5897142857142859]),
'setosa&1&setosa&8&tree-intercept': np.array([0.7754077791718955, -0.2057716436637395]),
'setosa&1&setosa&8&tree-no-intercept': np.array([0.24000000000000005, -0.64]),
'setosa&1&setosa&8&logreg-intercept': np.array([0.9548306148055205, -0.14529485570890852]),
'setosa&1&setosa&8&logreg-no-intercept': np.array([0.406857142857143, -0.5897142857142859]),
'setosa&1&setosa&9&tree-intercept': np.array([1.8531099380307665, -0.015607069084231005]),
'setosa&1&setosa&9&tree-no-intercept': np.array([1.1089837997054492, -0.9145802650957291]),
'setosa&1&setosa&9&logreg-intercept': np.array([1.6711804758626017, 0.16922959222706882]),
'setosa&1&setosa&9&logreg-no-intercept': np.array([1.0471281296023565, -0.5846833578792343]),
'setosa&1&setosa&10&tree-intercept': np.array([1.8531099380307665, -0.015607069084231005]),
'setosa&1&setosa&10&tree-no-intercept': np.array([1.1089837997054492, -0.9145802650957291]),
'setosa&1&setosa&10&logreg-intercept': np.array([1.6976512891133129, 0.2012087828016237]),
'setosa&1&setosa&10&logreg-no-intercept': np.array([1.0471281296023565, -0.5846833578792343]),
'setosa&1&setosa&11&tree-intercept': np.array([1.8531099380307665, -0.015607069084231005]),
'setosa&1&setosa&11&tree-no-intercept': np.array([1.1089837997054492, -0.9145802650957291]),
'setosa&1&setosa&11&logreg-intercept': np.array([1.7614566597812, 0.2204881034350868]),
'setosa&1&setosa&11&logreg-no-intercept': np.array([1.0559646539027983, -0.6318114874815908]),
'setosa&1&setosa&12&tree-intercept': np.array([1.8531099380307665, -0.015607069084231005]),
'setosa&1&setosa&12&tree-no-intercept': np.array([1.1089837997054492, -0.9145802650957291]),
'setosa&1&setosa&12&logreg-intercept': np.array([1.8422461938642845, 0.02907199143141387]),
'setosa&1&setosa&12&logreg-no-intercept': np.array([1.1001472754050075, -0.8674521354933727]),
'setosa&1&setosa&13&tree-intercept': np.array([1.8531099380307665, -0.015607069084231005]),
'setosa&1&setosa&13&tree-no-intercept': np.array([1.1089837997054492, -0.9145802650957291]),
'setosa&1&setosa&13&logreg-intercept': np.array([1.8422461938642845, 0.02907199143141387]),
'setosa&1&setosa&13&logreg-no-intercept': np.array([1.1001472754050075, -0.8674521354933727]),
'setosa&1&setosa&14&tree-intercept': np.array([1.8531099380307665, -0.015607069084231005]),
'setosa&1&setosa&14&tree-no-intercept': np.array([1.1089837997054492, -0.9145802650957291]),
'setosa&1&setosa&14&logreg-intercept': np.array([1.8531099380307665, -0.015607069084231005]),
'setosa&1&setosa&14&logreg-no-intercept': np.array([1.1089837997054492, -0.9145802650957291]),
'setosa&1&setosa&15&tree-intercept': np.array([1.476245122790915, -0.04314895570346513]),
'setosa&1&setosa&15&tree-no-intercept': np.array([0.7378497790868925, -0.9351988217967601]),
'setosa&1&setosa&15&logreg-intercept': np.array([1.6122714405936887, 0.017137173896413307]),
'setosa&1&setosa&15&logreg-no-intercept': np.array([0.8556701030927836, -0.8969072164948455]),
'setosa&1&setosa&16&tree-intercept': np.array([1.476245122790915, -0.04314895570346513]),
'setosa&1&setosa&16&tree-no-intercept': np.array([0.7378497790868925, -0.9351988217967601]),
'setosa&1&setosa&16&logreg-intercept': np.array([1.6122714405936887, 0.017137173896413307]),
'setosa&1&setosa&16&logreg-no-intercept': np.array([0.8556701030927836, -0.8969072164948455]),
'setosa&1&setosa&17&tree-intercept': np.array([1.476245122790915, -0.04314895570346513]),
'setosa&1&setosa&17&tree-no-intercept': np.array([0.7378497790868925, -0.9351988217967601]),
'setosa&1&setosa&17&logreg-intercept': np.array([1.6122714405936887, 0.017137173896413307]),
'setosa&1&setosa&17&logreg-no-intercept': np.array([0.8556701030927836, -0.8969072164948455]),
'setosa&1&versicolor&0&tree-intercept': np.array([-1.1872020075282315, 0.5111668757841914]),
'setosa&1&versicolor&0&tree-no-intercept': np.array([-1.1988571428571433, 0.5017142857142859]),
'setosa&1&versicolor&0&logreg-intercept': np.array([-1.2996235884567129, 0.25621079046424133]),
'setosa&1&versicolor&0&logreg-no-intercept': np.array([-1.1462857142857146, 0.3805714285714287]),
'setosa&1&versicolor&1&tree-intercept': np.array([-0.9322459222082816, 0.7179422835633638]),
'setosa&1&versicolor&1&tree-no-intercept': np.array([-1.1988571428571433, 0.5017142857142859]),
'setosa&1&versicolor&1&logreg-intercept': np.array([-1.2358845671267256, 0.30790464240903437]),
'setosa&1&versicolor&1&logreg-no-intercept': np.array([-1.1462857142857146, 0.3805714285714287]),
'setosa&1&versicolor&2&tree-intercept': np.array([-0.8047678795483063, 0.8213299874529498]),
'setosa&1&versicolor&2&tree-no-intercept': np.array([-1.1988571428571433, 0.5017142857142859]),
'setosa&1&versicolor&2&logreg-intercept': np.array([-1.2752823086574652, 0.35784190715181996]),
'setosa&1&versicolor&2&logreg-no-intercept': np.array([-1.1725714285714288, 0.44114285714285734]),
'setosa&1&versicolor&3&tree-intercept': np.array([-0.9749058971141784, 0.6140526976160612]),
'setosa&1&versicolor&3&tree-no-intercept': np.array([-1.0091428571428573, 0.5862857142857144]),
'setosa&1&versicolor&3&logreg-intercept': np.array([-1.16060225846926, 0.35006273525721504]),
'setosa&1&versicolor&3&logreg-no-intercept': np.array([-1.0285714285714287, 0.4571428571428573]),
'setosa&1&versicolor&4&tree-intercept': np.array([-0.7199498117942287, 0.8208281053952332]),
'setosa&1&versicolor&4&tree-no-intercept': np.array([-1.0091428571428573, 0.5862857142857144]),
'setosa&1&versicolor&4&logreg-intercept': np.array([-0.9693851944792976, 0.5051442910915944]),
'setosa&1&versicolor&4&logreg-no-intercept': np.array([-1.0285714285714287, 0.4571428571428573]),
'setosa&1&versicolor&5&tree-intercept': np.array([-0.6328732747804271, 0.9575909661229628]),
'setosa&1&versicolor&5&tree-no-intercept': np.array([-1.0582857142857145, 0.6125714285714288]),
'setosa&1&versicolor&5&logreg-intercept': np.array([-0.8577164366373908, 0.44767879548306233]),
'setosa&1&versicolor&5&logreg-no-intercept': np.array([-0.9531428571428574, 0.3702857142857144]),
'setosa&1&versicolor&6&tree-intercept': np.array([-0.3229611041405272, 0.7711417816813058]),
'setosa&1&versicolor&6&tree-no-intercept': np.array([-0.3874285714285715, 0.7188571428571429]),
'setosa&1&versicolor&6&logreg-intercept': np.array([0.2928481806775409, 0.7319949811794241]),
'setosa&1&versicolor&6&logreg-no-intercept': np.array([-0.2982857142857143, 0.2525714285714286]),
'setosa&1&versicolor&7&tree-intercept': np.array([-0.10840652446675068, 1.0112923462986212]),
'setosa&1&versicolor&7&tree-no-intercept': np.array([-0.43657142857142867, 0.7451428571428572]),
'setosa&1&versicolor&7&logreg-intercept': np.array([0.2928481806775409, 0.7319949811794241]),
'setosa&1&versicolor&7&logreg-no-intercept': np.array([-0.2982857142857143, 0.2525714285714286]),
'setosa&1&versicolor&8&tree-intercept': np.array([-0.14253450439146828, 1.2481806775407795]),
'setosa&1&versicolor&8&tree-no-intercept': np.array([-0.6331428571428572, 0.8502857142857143]),
'setosa&1&versicolor&8&logreg-intercept': np.array([0.5741530740276033, 0.2735257214554584]),
'setosa&1&versicolor&8&logreg-no-intercept': np.array([-0.13371428571428576, -0.30057142857142854]),
'setosa&1&versicolor&9&tree-intercept': np.array([-1.2442812332644855, 0.7511284522989824]),
'setosa&1&versicolor&9&tree-no-intercept': np.array([-1.1089837997054492, 0.9145802650957291]),
'setosa&1&versicolor&9&logreg-intercept': np.array([-1.316196159436929, 0.20182082472649332]),
'setosa&1&versicolor&9&logreg-no-intercept': np.array([-1.0382916053019147, 0.5375552282768779]),
'setosa&1&versicolor&10&tree-intercept': np.array([-0.9531022875066961, 1.1028995486190813]),
'setosa&1&versicolor&10&tree-no-intercept': np.array([-1.1089837997054492, 0.9145802650957291]),
'setosa&1&versicolor&10&logreg-intercept': np.array([-1.1838420931833873, 0.3617167775992656]),
'setosa&1&versicolor&10&logreg-no-intercept': np.array([-1.0382916053019147, 0.5375552282768779]),
'setosa&1&versicolor&11&tree-intercept': np.array([-0.6883941549996199, 1.4226914543646265]),
'setosa&1&versicolor&11&tree-no-intercept': np.array([-1.1089837997054492, 0.9145802650957291]),
'setosa&1&versicolor&11&logreg-intercept': np.array([-1.2103129064340965, 0.3297375870247106]),
'setosa&1&versicolor&11&logreg-no-intercept': np.array([-1.0382916053019147, 0.5375552282768779]),
'setosa&1&versicolor&12&tree-intercept': np.array([-1.2442812332644855, 0.7511284522989824]),
'setosa&1&versicolor&12&tree-no-intercept': np.array([-1.1089837997054492, 0.9145802650957291]),
'setosa&1&versicolor&12&logreg-intercept': np.array([-0.8505852650906629, 0.8221253155841185]),
'setosa&1&versicolor&12&logreg-no-intercept': np.array([-1.0471281296023565, 0.5846833578792343]),
'setosa&1&versicolor&13&tree-intercept': np.array([-0.9531022875066961, 1.1028995486190813]),
'setosa&1&versicolor&13&tree-no-intercept': np.array([-1.1089837997054492, 0.9145802650957291]),
'setosa&1&versicolor&13&logreg-intercept': np.array([-0.8505852650906629, 0.8221253155841185]),
'setosa&1&versicolor&13&logreg-no-intercept': np.array([-1.0471281296023565, 0.5846833578792343]),
'setosa&1&versicolor&14&tree-intercept': np.array([-0.6883941549996199, 1.4226914543646265]),
'setosa&1&versicolor&14&tree-no-intercept': np.array([-1.1089837997054492, 0.9145802650957291]),
'setosa&1&versicolor&14&logreg-intercept': np.array([-0.6998699410909677, 0.4839721520924183]),
'setosa&1&versicolor&14&logreg-no-intercept': np.array([-0.96759941089838, 0.16053019145802655]),
'setosa&1&versicolor&15&tree-intercept': np.array([-1.1082549154617132, 0.8114145818988611]),
'setosa&1&versicolor&15&tree-no-intercept': np.array([-0.9911634756995582, 0.9528718703976438]),
'setosa&1&versicolor&15&logreg-intercept': np.array([-0.5941396985693541, 0.8660393236936744]),
'setosa&1&versicolor&15&logreg-no-intercept': np.array([-0.8026509572901326, 0.6141384388807071]),
'setosa&1&versicolor&16&tree-intercept': np.array([-0.8170759697039257, 1.16318567821896]),
'setosa&1&versicolor&16&tree-no-intercept': np.array([-0.9911634756995582, 0.9528718703976438]),
'setosa&1&versicolor&16&logreg-intercept': np.array([-0.5676688853186463, 0.8980185142682289]),
'setosa&1&versicolor&16&logreg-no-intercept': np.array([-0.8026509572901326, 0.6141384388807071]),
'setosa&1&versicolor&17&tree-intercept': np.array([-0.5523678371968499, 1.4829775839645034]),
'setosa&1&versicolor&17&tree-no-intercept': np.array([-0.9911634756995582, 0.9528718703976438]),
'setosa&1&versicolor&17&logreg-intercept': np.array([-0.3796190039017688, 0.547165480835438]),
'setosa&1&versicolor&17&logreg-no-intercept': np.array([-0.7142857142857143, 0.14285714285714288]),
'setosa&1&virginica&0&tree-intercept': np.array([-0.5736511919698881, -0.4652446675031376]),
'setosa&1&virginica&0&tree-no-intercept': np.array([-0.7257142857142859, -0.5885714285714285]),
'setosa&1&virginica&0&logreg-intercept': np.array([-0.42183186951066537, -0.2602258469259727]),
'setosa&1&virginica&0&logreg-no-intercept': np.array([-0.7520000000000002, -0.5279999999999999]),
'setosa&1&virginica&1&tree-intercept': np.array([-0.8286072772898374, -0.6720200752823098]),
'setosa&1&virginica&1&tree-no-intercept': np.array([-0.7257142857142859, -0.5885714285714285]),
'setosa&1&virginica&1&logreg-intercept': np.array([-0.48557089084065264, -0.3119196988707659]),
'setosa&1&virginica&1&logreg-no-intercept': np.array([-0.7520000000000002, -0.5279999999999999]),
'setosa&1&virginica&2&tree-intercept': np.array([-0.9560853199498119, -0.775407779171896]),
'setosa&1&virginica&2&tree-no-intercept': np.array([-0.7257142857142859, -0.5885714285714285]),
'setosa&1&virginica&2&logreg-intercept': np.array([-0.48557089084065264, -0.3119196988707659]),
'setosa&1&virginica&2&logreg-no-intercept': np.array([-0.7520000000000002, -0.5279999999999999]),
'setosa&1&virginica&3&tree-intercept': np.array([-0.5332496863237133, -0.4986198243412803]),
'setosa&1&virginica&3&tree-no-intercept': np.array([-0.6765714285714287, -0.6148571428571429]),
'setosa&1&virginica&3&logreg-intercept': np.array([-0.45370138017565914, -0.2860727728983692]),
'setosa&1&virginica&3&logreg-no-intercept': np.array([-0.7520000000000002, -0.5279999999999999]),
'setosa&1&virginica&4&tree-intercept': np.array([-0.7882057716436645, -0.7053952321204522]),
'setosa&1&virginica&4&tree-no-intercept': np.array([-0.6765714285714287, -0.6148571428571429]),
'setosa&1&virginica&4&logreg-intercept': np.array([-0.6449184441656205, -0.44115432873274896]),
'setosa&1&virginica&4&logreg-no-intercept': np.array([-0.7520000000000002, -0.5279999999999999]),
'setosa&1&virginica&5&tree-intercept': np.array([-0.8752823086574644, -0.8421580928481814]),
'setosa&1&virginica&5&tree-no-intercept': np.array([-0.6274285714285718, -0.641142857142857]),
'setosa&1&virginica&5&logreg-intercept': np.array([-0.7161856963613555, -0.41706398996235955]),
'setosa&1&virginica&5&logreg-no-intercept': np.array([-0.7782857142857146, -0.46742857142857136]),
'setosa&1&virginica&6&tree-intercept': np.array([-0.45244667503136793, -0.5653701380175667]),
'setosa&1&virginica&6&tree-no-intercept': np.array([-0.5782857142857144, -0.6674285714285714]),
'setosa&1&virginica&6&logreg-intercept': np.array([-1.2476787954830602, -0.5867001254705156]),
'setosa&1&virginica&6&logreg-no-intercept': np.array([-0.8342857142857144, -0.25142857142857133]),
'setosa&1&virginica&7&tree-intercept': np.array([-0.6670012547051445, -0.8055207026348825]),
'setosa&1&virginica&7&tree-no-intercept': np.array([-0.5291428571428572, -0.6937142857142856]),
'setosa&1&virginica&7&logreg-intercept': np.array([-1.2476787954830602, -0.5867001254705156]),
'setosa&1&virginica&7&logreg-no-intercept': np.array([-0.8342857142857144, -0.25142857142857133]),
'setosa&1&virginica&8&tree-intercept': np.array([-0.6328732747804267, -1.0424090338770402]),
'setosa&1&virginica&8&tree-no-intercept': np.array([-0.3325714285714286, -0.7988571428571429]),
'setosa&1&virginica&8&logreg-intercept': np.array([-1.5289836888331256, -0.12823086574654946]),
'setosa&1&virginica&8&logreg-no-intercept': np.array([-0.9988571428571431, 0.3017142857142859]),
'setosa&1&virginica&9&tree-intercept': np.array([-0.6088287047662795, -0.7355213832147518]),
'setosa&1&virginica&9&tree-no-intercept': np.array([-0.7643593519882179, -0.923416789396171]),
'setosa&1&virginica&9&logreg-intercept': np.array([-0.3549843164256765, -0.37105041695356206]),
'setosa&1&virginica&9&logreg-no-intercept': np.array([-0.7731958762886597, -0.8762886597938145]),
'setosa&1&virginica&10&tree-intercept': np.array([-0.9000076505240656, -1.0872924795348506]),
'setosa&1&virginica&10&tree-no-intercept': np.array([-0.7643593519882179, -0.923416789396171]),
'setosa&1&virginica&10&logreg-intercept': np.array([-0.5138091959299242, -0.5629255604008893]),
'setosa&1&virginica&10&logreg-no-intercept': np.array([-0.7731958762886597, -0.8762886597938145]),
'setosa&1&virginica&11&tree-intercept': np.array([-1.1647157830311445, -1.4070843852803956]),
'setosa&1&virginica&11&tree-no-intercept': np.array([-0.7643593519882179, -0.923416789396171]),
'setosa&1&virginica&11&logreg-intercept': np.array([-0.5511437533471077, -0.5502256904597981]),
'setosa&1&virginica&11&logreg-no-intercept': np.array([-0.7820324005891016, -0.8291605301914582]),
'setosa&1&virginica&12&tree-intercept': np.array([-0.6088287047662795, -0.7355213832147518]),
'setosa&1&virginica&12&tree-no-intercept': np.array([-0.7643593519882179, -0.923416789396171]),
'setosa&1&virginica&12&logreg-intercept': np.array([-0.9916609287736264, -0.8511973070155333]),
'setosa&1&virginica&12&logreg-no-intercept': np.array([-0.8173784977908689, -0.6406480117820326]),
'setosa&1&virginica&13&tree-intercept': np.array([-0.9000076505240656, -1.0872924795348506]),
'setosa&1&virginica&13&tree-no-intercept': np.array([-0.7643593519882179, -0.923416789396171]),
'setosa&1&virginica&13&logreg-intercept': np.array([-0.9916609287736264, -0.8511973070155333]),
'setosa&1&virginica&13&logreg-no-intercept': np.array([-0.8173784977908689, -0.6406480117820326]),
'setosa&1&virginica&14&tree-intercept': np.array([-1.1647157830311445, -1.4070843852803956]),
'setosa&1&virginica&14&tree-no-intercept': np.array([-0.7643593519882179, -0.923416789396171]),
'setosa&1&virginica&14&logreg-intercept': np.array([-1.1532399969397957, -0.46836508300818763]),
'setosa&1&virginica&14&logreg-no-intercept': np.array([-0.9057437407952872, -0.16936671575846834]),
'setosa&1&virginica&15&tree-intercept': np.array([-0.3679902073292049, -0.7682656261953961]),
'setosa&1&virginica&15&tree-no-intercept': np.array([-0.5110456553755522, -0.9410898379970547]),
'setosa&1&virginica&15&logreg-intercept': np.array([-1.0181317420243343, -0.8831764975900871]),
'setosa&1&virginica&15&logreg-no-intercept': np.array([-0.8173784977908689, -0.6406480117820326]),
'setosa&1&virginica&16&tree-intercept': np.array([-0.6591691530869906, -1.1200367225154955]),
'setosa&1&virginica&16&tree-no-intercept': np.array([-0.5110456553755522, -0.9410898379970547]),
'setosa&1&virginica&16&logreg-intercept': np.array([-1.044602555275042, -0.9151556881646411]),
'setosa&1&virginica&16&logreg-no-intercept': np.array([-0.8173784977908689, -0.6406480117820326]),
'setosa&1&virginica&17&tree-intercept': np.array([-0.9238772855940695, -1.4398286282610386]),
'setosa&1&virginica&17&tree-no-intercept': np.array([-0.5110456553755522, -0.9410898379970547]),
'setosa&1&virginica&17&logreg-intercept': np.array([-1.2326524366919196, -0.5643026547318511]),
'setosa&1&virginica&17&logreg-no-intercept': np.array([-0.9057437407952872, -0.16936671575846834]),
'versicolor&0&setosa&0&tree-intercept': np.array([-1.9266055045871562, 5.783107597965657e-16]),
'versicolor&0&setosa&0&tree-no-intercept': np.array([-1.1882193635748137, 0.6438727149627623]),
'versicolor&0&setosa&0&logreg-intercept': np.array([-1.8409785932721714, -0.05333333333333287]),
'versicolor&0&setosa&0&logreg-no-intercept': np.array([-1.08801624915369, 0.6032498307379824]),
'versicolor&0&setosa&1&tree-intercept': np.array([-1.9266055045871562, 5.783107597965657e-16]),
'versicolor&0&setosa&1&tree-no-intercept': np.array([-1.1882193635748137, 0.6438727149627623]),
'versicolor&0&setosa&1&logreg-intercept': np.array([-1.8409785932721714, -0.05333333333333287]),
'versicolor&0&setosa&1&logreg-no-intercept': np.array([-1.08801624915369, 0.6032498307379824]),
'versicolor&0&setosa&2&tree-intercept': np.array([-1.9261016949152545, 0.0061016949152551386]),
'versicolor&0&setosa&2&tree-no-intercept': np.array([-1.3682432432432434, 0.7474662162162162]),
'versicolor&0&setosa&2&logreg-intercept': np.array([-1.9261016949152545, 0.0061016949152551386]),
'versicolor&0&setosa&2&logreg-no-intercept': np.array([-1.3682432432432434, 0.7474662162162162]),
'versicolor&0&setosa&3&tree-intercept': np.array([-1.7125382262996922, 0.02666666666666699]),
'versicolor&0&setosa&3&tree-no-intercept': np.array([-1.147596479350034, 0.5192958700067705]),
'versicolor&0&setosa&3&logreg-intercept': np.array([-1.8042813455657487, 0.026666666666667067]),
'versicolor&0&setosa&3&logreg-no-intercept': np.array([-1.1679079214624237, 0.5815842924847664]),
'versicolor&0&setosa&4&tree-intercept': np.array([-1.7125382262996922, 0.02666666666666699]),
'versicolor&0&setosa&4&tree-no-intercept': np.array([-1.147596479350034, 0.5192958700067705]),
'versicolor&0&setosa&4&logreg-intercept': np.array([-1.8042813455657487, 0.026666666666667067]),
'versicolor&0&setosa&4&logreg-no-intercept': np.array([-1.1679079214624237, 0.5815842924847664]),
'versicolor&0&setosa&5&tree-intercept': np.array([-1.710056497175139, 0.030056497175141528]),
'versicolor&0&setosa&5&tree-no-intercept': np.array([-1.287162162162162, 0.5920608108108109]),
'versicolor&0&setosa&5&logreg-intercept': np.array([-1.7665536723163846, 0.08655367231638454]),
'versicolor&0&setosa&5&logreg-no-intercept': np.array([-1.3277027027027026, 0.6697635135135135]),
'versicolor&0&setosa&6&tree-intercept': np.array([0.7047099771051965, -0.009396133028069199]),
'versicolor&0&setosa&6&tree-no-intercept': np.array([0.2180406212664277, -0.3763440860215054]),
'versicolor&0&setosa&6&logreg-intercept': np.array([0.8863597263210817, 0.014848536982385543]),
'versicolor&0&setosa&6&logreg-no-intercept': np.array([0.3960573476702509, -0.35483870967741943]),
'versicolor&0&setosa&7&tree-intercept': np.array([0.7047099771051965, -0.009396133028069199]),
'versicolor&0&setosa&7&tree-no-intercept': np.array([0.2180406212664277, -0.3763440860215054]),
'versicolor&0&setosa&7&logreg-intercept': np.array([0.8863597263210817, 0.014848536982385543]),
'versicolor&0&setosa&7&logreg-no-intercept': np.array([0.3960573476702509, -0.35483870967741943]),
'versicolor&0&setosa&8&tree-intercept': np.array([0.699282674772036, 0.13733738601823722]),
'versicolor&0&setosa&8&tree-no-intercept': np.array([0.3026315789473684, -0.3544407894736842]),
'versicolor&0&setosa&8&logreg-intercept': np.array([0.8812644376899683, 0.13621884498480247]),
'versicolor&0&setosa&8&logreg-no-intercept': np.array([0.4868421052631579, -0.3527960526315789]),
'versicolor&0&setosa&9&tree-intercept': np.array([-0.8256880733944945, 7.845915558428959e-17]),
'versicolor&0&setosa&9&tree-no-intercept': np.array([-0.9444820582261341, -0.10358835477318894]),
'versicolor&0&setosa&9&logreg-intercept': np.array([-1.1620795107033637, 0.02666666666666693]),
'versicolor&0&setosa&9&logreg-no-intercept': np.array([-1.025727826675694, 0.14556533513879485]),
'versicolor&0&setosa&10&tree-intercept': np.array([-0.8256880733944945, 7.845915558428959e-17]),
'versicolor&0&setosa&10&tree-no-intercept': np.array([-0.9444820582261341, -0.10358835477318894]),
'versicolor&0&setosa&10&logreg-intercept': np.array([-1.1314984709480123, -0.026666666666666502]),
'versicolor&0&setosa&10&logreg-no-intercept': np.array([-1.005416384563304, 0.0832769126607989]),
'versicolor&0&setosa&11&tree-intercept': np.array([-0.8176271186440669, 0.09762711864406831]),
'versicolor&0&setosa&11&tree-no-intercept': np.array([-0.9425675675675675, -0.06841216216216213]),
'versicolor&0&setosa&11&logreg-intercept': np.array([-1.0214689265536716, 0.22146892655367253]),
'versicolor&0&setosa&11&logreg-no-intercept': np.array([-1.0641891891891893, 0.16469594594594594]),
'versicolor&0&setosa&12&tree-intercept': np.array([-0.7200000000000022, -1.280753281207582e-16]),
'versicolor&0&setosa&12&tree-no-intercept': np.array([-0.9110204081632652, -0.19102040816326532]),
'versicolor&0&setosa&12&logreg-intercept': np.array([-0.6666666666666671, -0.026666666666666783]),
'versicolor&0&setosa&12&logreg-no-intercept': np.array([-0.8506122448979593, -0.2106122448979592]),
'versicolor&0&setosa&13&tree-intercept': np.array([-0.7200000000000022, -1.280753281207582e-16]),
'versicolor&0&setosa&13&tree-no-intercept': np.array([-0.9110204081632652, -0.19102040816326532]),
'versicolor&0&setosa&13&logreg-intercept': np.array([-0.6666666666666671, -0.026666666666666783]),
'versicolor&0&setosa&13&logreg-no-intercept': np.array([-0.8506122448979593, -0.2106122448979592]),
'versicolor&0&setosa&14&tree-intercept': np.array([-0.7200000000000022, 0.16513761467889929]),
'versicolor&0&setosa&14&tree-no-intercept': np.array([-0.9014011416709913, -0.12610275038920604]),
'versicolor&0&setosa&14&logreg-intercept': np.array([-0.7200000000000022, 0.16513761467889929]),
'versicolor&0&setosa&14&logreg-no-intercept': np.array([-0.9014011416709913, -0.12610275038920604]),
'versicolor&0&setosa&15&tree-intercept': np.array([-0.5866666666666676, -0.026666666666666575]),
'versicolor&0&setosa&15&tree-no-intercept': np.array([-0.8718367346938776, -0.3118367346938776]),
'versicolor&0&setosa&15&logreg-intercept': np.array([-0.6400000000000003, -1.848990096833507e-16]),
'versicolor&0&setosa&15&logreg-no-intercept': np.array([-0.8914285714285715, -0.2514285714285715]),
'versicolor&0&setosa&16&tree-intercept': np.array([-0.5866666666666676, -0.026666666666666575]),
'versicolor&0&setosa&16&tree-no-intercept': np.array([-0.8718367346938776, -0.3118367346938776]),
'versicolor&0&setosa&16&logreg-intercept': np.array([-0.6400000000000003, -1.848990096833507e-16]),
'versicolor&0&setosa&16&logreg-no-intercept': np.array([-0.8914285714285715, -0.2514285714285715]),
'versicolor&0&setosa&17&tree-intercept': np.array([-0.5866666666666676, 0.1590214067278286]),
'versicolor&0&setosa&17&tree-no-intercept': np.array([-0.8360145303580695, -0.24130773222625848]),
'versicolor&0&setosa&17&logreg-intercept': np.array([-0.6133333333333348, 0.1896024464831804]),
'versicolor&0&setosa&17&logreg-no-intercept': np.array([-0.8578100674623768, -0.2029060716139077]),
'versicolor&0&versicolor&0&tree-intercept': np.array([1.712538226299694, 0.13333333333333336]),
'versicolor&0&versicolor&0&tree-no-intercept': np.array([0.9377115775220041, -0.5423155044008124]),
'versicolor&0&versicolor&0&logreg-intercept': np.array([1.5840978593272164, 0.0533333333333329]),
'versicolor&0&versicolor&0&logreg-no-intercept': np.array([0.8280297901150983, -0.6059580230196344]),
'versicolor&0&versicolor&1&tree-intercept': np.array([1.5840978593272161, 0.05333333333333288]),
'versicolor&0&versicolor&1&tree-no-intercept': np.array([0.8280297901150983, -0.6059580230196344]),
'versicolor&0&versicolor&1&logreg-intercept': np.array([1.5412844036697262, -4.102460810749454e-16]),
'versicolor&0&versicolor&1&logreg-no-intercept': np.array([0.7982396750169264, -0.6479350033852405]),
'versicolor&0&versicolor&2&tree-intercept': np.array([1.3724293785310735, 0.5475706214689265]),
'versicolor&0&versicolor&2&tree-no-intercept': np.array([0.6114864864864865, -0.4636824324324324]),
'versicolor&0&versicolor&2&logreg-intercept': np.array([1.5640677966101708, 0.27593220338983027]),
'versicolor&0&versicolor&2&logreg-no-intercept': np.array([0.9020270270270271, -0.6038851351351351]),
'versicolor&0&versicolor&3&tree-intercept': np.array([1.5290519877675843, 0.13333333333333325]),
'versicolor&0&versicolor&3&tree-no-intercept': np.array([0.8970886932972242, -0.41773865944482064]),
'versicolor&0&versicolor&3&logreg-intercept': np.array([1.5474006116207948, -0.026666666666667234]),
'versicolor&0&versicolor&3&logreg-no-intercept': np.array([0.9079214624238322, -0.5842924847664185]),
'versicolor&0&versicolor&4&tree-intercept': np.array([1.4006116207951067, 0.053333333333332955]),
'versicolor&0&versicolor&4&tree-no-intercept': np.array([0.7874069058903183, -0.48138117806364256]),
'versicolor&0&versicolor&4&logreg-intercept': np.array([1.4617737003058104, -0.133333333333334]),
'versicolor&0&versicolor&4&logreg-no-intercept': np.array([0.8483412322274881, -0.6682464454976303]),
'versicolor&0&versicolor&5&tree-intercept': np.array([1.225084745762712, 0.6149152542372882]),
'versicolor&0&versicolor&5&tree-no-intercept': np.array([0.5304054054054054, -0.30827702702702703]),
'versicolor&0&versicolor&5&logreg-intercept': np.array([1.2402259887005642, 0.27977401129943485]),
'versicolor&0&versicolor&5&logreg-no-intercept': np.array([0.6655405405405407, -0.4839527027027027]),
'versicolor&0&versicolor&6&tree-intercept': np.array([-0.6043698635575601, 0.1947249315141009]),
'versicolor&0&versicolor&6&tree-no-intercept': np.array([-0.3064516129032258, 0.4193548387096775]),
'versicolor&0&versicolor&6&logreg-intercept': np.array([0.06429072429628248, -0.4541905429906176]),
'versicolor&0&versicolor&6&logreg-no-intercept': np.array([-0.11768219832735957, -0.5913978494623657]),
'versicolor&0&versicolor&7&tree-intercept': np.array([-0.5339650357979427, 0.06045286714397276]),
'versicolor&0&versicolor&7&tree-no-intercept': np.array([-0.2861409796893668, 0.24731182795698925]),
'versicolor&0&versicolor&7&logreg-intercept': np.array([0.06429072429628248, -0.4541905429906176]),
'versicolor&0&versicolor&7&logreg-no-intercept': np.array([-0.11768219832735957, -0.5913978494623657]),
'versicolor&0&versicolor&8&tree-intercept': np.array([-0.4594772036474156, 0.7093981762917936]),
'versicolor&0&versicolor&8&tree-no-intercept': np.array([-0.618421052631579, 0.5123355263157895]),
'versicolor&0&versicolor&8&logreg-intercept': np.array([0.4359878419452881, 0.15392097264437712]),
'versicolor&0&versicolor&8&logreg-no-intercept': np.array([-0.0921052631578948, -0.5008223684210525]),
'versicolor&0&versicolor&9&tree-intercept': np.array([0.6727828746177367, 0.1866666666666667]),
'versicolor&0&versicolor&9&tree-no-intercept': np.array([0.6939742721733244, 0.20514556533513884]),
'versicolor&0&versicolor&9&logreg-intercept': np.array([0.9051987767584099, -0.026666666666666818]),
'versicolor&0&versicolor&9&logreg-no-intercept': np.array([0.7657413676371023, -0.14827352742044686]),
'versicolor&0&versicolor&10&tree-intercept': np.array([0.6360856269113152, 0.026666666666666283]),
'versicolor&0&versicolor&10&tree-no-intercept': np.array([0.6249153689911983, 0.01692620176032502]),
'versicolor&0&versicolor&10&logreg-intercept': np.array([0.788990825688073, -0.08000000000000025]),
'versicolor&0&versicolor&10&logreg-no-intercept': np.array([0.6858496953283683, -0.16993906567366282]),
'versicolor&0&versicolor&11&tree-intercept': np.array([0.573107344632768, 0.8668926553672316]),
'versicolor&0&versicolor&11&tree-no-intercept': np.array([0.18581081081081083, 0.35219594594594594]),
'versicolor&0&versicolor&11&logreg-intercept': np.array([0.49514124293785294, 0.14485875706214688]),
'versicolor&0&versicolor&11&logreg-no-intercept': np.array([0.40202702702702703, 0.021114864864864837]),
'versicolor&0&versicolor&12&tree-intercept': np.array([0.7466666666666678, 0.1866666666666669]),
'versicolor&0&versicolor&12&tree-no-intercept': np.array([0.7902040816326531, 0.23020408163265307]),
'versicolor&0&versicolor&12&logreg-intercept': np.array([1.360000000000002, -0.40000000000000047]),
'versicolor&0&versicolor&12&logreg-no-intercept': np.array([1.0840816326530613, -0.6759183673469389]),
'versicolor&0&versicolor&13&tree-intercept': np.array([0.6666666666666685, 0.026666666666666446]),
'versicolor&0&versicolor&13&tree-no-intercept': np.array([0.6873469387755102, 0.04734693877551018]),
'versicolor&0&versicolor&13&logreg-intercept': np.array([1.360000000000002, -0.40000000000000047]),
'versicolor&0&versicolor&13&logreg-no-intercept': np.array([1.0840816326530613, -0.6759183673469389]),
'versicolor&0&versicolor&14&tree-intercept': np.array([0.6133333333333342, 0.8195718654434251]),
'versicolor&0&versicolor&14&tree-no-intercept': np.array([0.3035806953814219, 0.32226258432797095]),
'versicolor&0&versicolor&14&logreg-intercept': np.array([1.6000000000000019, 0.22018348623853212]),
'versicolor&0&versicolor&14&logreg-no-intercept': np.array([1.0934094447327454, -0.5931499740529322]),
'versicolor&0&versicolor&15&tree-intercept': np.array([0.6133333333333342, 0.21333333333333307]),
'versicolor&0&versicolor&15&tree-no-intercept': np.array([0.7510204081632653, 0.35102040816326535]),
'versicolor&0&versicolor&15&logreg-intercept': np.array([1.2800000000000025, -0.40000000000000036]),
'versicolor&0&versicolor&15&logreg-no-intercept': np.array([1.0644897959183675, -0.6155102040816327]),
'versicolor&0&versicolor&16&tree-intercept': np.array([0.5333333333333344, 0.053333333333333434]),
'versicolor&0&versicolor&16&tree-no-intercept': np.array([0.6481632653061226, 0.16816326530612244]),
'versicolor&0&versicolor&16&logreg-intercept': np.array([1.2266666666666681, -0.4533333333333338]),
'versicolor&0&versicolor&16&logreg-no-intercept': np.array([1.0236734693877552, -0.656326530612245]),
'versicolor&0&versicolor&17&tree-intercept': np.array([0.5066666666666672, 0.8685015290519882]),
'versicolor&0&versicolor&17&tree-no-intercept': np.array([0.23819408406850026, 0.43746756616502336]),
'versicolor&0&versicolor&17&logreg-intercept': np.array([1.3333333333333346, 0.324159021406728]),
'versicolor&0&versicolor&17&logreg-no-intercept': np.array([0.8505448884276077, -0.4509600415153089]),
'versicolor&0&virginica&0&tree-intercept': np.array([0.21406727828746208, -0.13333333333333322]),
'versicolor&0&virginica&0&tree-no-intercept': np.array([-0.5111712931618145, -0.7657413676371024]),
'versicolor&0&virginica&0&logreg-intercept': np.array([0.2568807339449539, -8.946858431370215e-17]),
'versicolor&0&virginica&0&logreg-no-intercept': np.array([-0.5016926201760326, -0.6614759647935005]),
'versicolor&0&virginica&1&tree-intercept': np.array([0.3425076452599386, -0.05333333333333347]),
'versicolor&0&virginica&1&tree-no-intercept': np.array([-0.40148950575490855, -0.7020988490182805]),
'versicolor&0&virginica&1&logreg-intercept': np.array([0.2996941896024467, 0.05333333333333336]),
'versicolor&0&virginica&1&logreg-no-intercept': np.array([-0.47190250507786047, -0.6194989844278944]),
'versicolor&0&virginica&2&tree-intercept': np.array([0.553672316384182, -0.5536723163841808]),
'versicolor&0&virginica&2&tree-no-intercept': np.array([0.179054054054054, -1.0515202702702702]),
'versicolor&0&virginica&2&logreg-intercept': np.array([0.3620338983050849, -0.28203389830508535]),
'versicolor&0&virginica&2&logreg-no-intercept': np.array([-0.11148648648648658, -0.9113175675675674]),
'versicolor&0&virginica&3&tree-intercept': np.array([0.18348623853210996, -0.15999999999999964]),
'versicolor&0&virginica&3&tree-no-intercept': np.array([-0.5111712931618145, -0.7657413676371024]),
'versicolor&0&virginica&3&logreg-intercept': np.array([0.2568807339449539, -8.946858431370215e-17]),
'versicolor&0&virginica&3&logreg-no-intercept': np.array([-0.5016926201760326, -0.6614759647935005]),
'versicolor&0&virginica&4&tree-intercept': np.array([0.31192660550458734, -0.08000000000000036]),
'versicolor&0&virginica&4&tree-no-intercept': np.array([-0.40148950575490855, -0.7020988490182805]),
'versicolor&0&virginica&4&logreg-intercept': np.array([0.3425076452599389, 0.10666666666666669]),
'versicolor&0&virginica&4&logreg-no-intercept': np.array([-0.4421123899796886, -0.5775220040622885]),
'versicolor&0&virginica&5&tree-intercept': np.array([0.48497175141242926, -0.6449717514124298]),
'versicolor&0&virginica&5&tree-no-intercept': np.array([0.179054054054054, -1.0515202702702702]),
'versicolor&0&virginica&5&logreg-intercept': np.array([0.5263276836158192, -0.3663276836158193]),
'versicolor&0&virginica&5&logreg-no-intercept': np.array([0.08445945945945933, -0.9535472972972971]),
'versicolor&0&virginica&6&tree-intercept': np.array([-0.10034011354763567, -0.18532879848603112]),
'versicolor&0&virginica&6&tree-no-intercept': np.array([-0.6959378733572282, -0.6344086021505377]),
'versicolor&0&virginica&6&logreg-intercept': np.array([-0.9506504506173643, 0.4393420060082324]),
'versicolor&0&virginica&6&logreg-no-intercept': np.array([-1.0627240143369177, 0.35483870967741943]),
'versicolor&0&virginica&7&tree-intercept': np.array([-0.17074494130725346, -0.05105673411590355]),
'versicolor&0&virginica&7&tree-no-intercept': np.array([-0.7162485065710872, -0.4623655913978496]),
'versicolor&0&virginica&7&logreg-intercept': np.array([-0.9506504506173643, 0.4393420060082324]),
'versicolor&0&virginica&7&logreg-no-intercept': np.array([-1.0627240143369177, 0.35483870967741943]),
'versicolor&0&virginica&8&tree-intercept': np.array([-0.23980547112461933, -0.8467355623100313]),
'versicolor&0&virginica&8&tree-no-intercept': np.array([-0.2500000000000001, -0.8593749999999999]),
'versicolor&0&virginica&8&logreg-intercept': np.array([-1.3172522796352566, -0.29013981762917995]),
'versicolor&0&virginica&8&logreg-no-intercept': np.array([-0.9605263157894737, 0.15213815789473678]),
'versicolor&0&virginica&9&tree-intercept': np.array([0.1529051987767585, -0.1866666666666663]),
'versicolor&0&virginica&9&tree-no-intercept': np.array([-0.5111712931618145, -0.7657413676371024]),
'versicolor&0&virginica&9&logreg-intercept': np.array([0.2568807339449539, -8.946858431370215e-17]),
'versicolor&0&virginica&9&logreg-no-intercept': np.array([-0.5016926201760326, -0.6614759647935005]),
'versicolor&0&virginica&10&tree-intercept': np.array([0.18960244648318034, -0.02666666666666668]),
'versicolor&0&virginica&10&tree-no-intercept': np.array([-0.4421123899796886, -0.5775220040622885]),
'versicolor&0&virginica&10&logreg-intercept': np.array([0.3425076452599389, 0.10666666666666669]),
'versicolor&0&virginica&10&logreg-no-intercept': np.array([-0.4421123899796886, -0.5775220040622885]),
'versicolor&0&virginica&11&tree-intercept': np.array([0.2445197740113, -0.9645197740112978]),
'versicolor&0&virginica&11&tree-no-intercept': np.array([0.179054054054054, -1.0515202702702702]),
'versicolor&0&virginica&11&logreg-intercept': np.array([0.5263276836158192, -0.3663276836158193]),
'versicolor&0&virginica&11&logreg-no-intercept': np.array([0.08445945945945933, -0.9535472972972971]),
'versicolor&0&virginica&12&tree-intercept': np.array([-0.026666666666666748, -0.18666666666666626]),
'versicolor&0&virginica&12&tree-no-intercept': np.array([-0.6138775510204082, -0.7738775510204082]),
'versicolor&0&virginica&12&logreg-intercept': np.array([-0.6933333333333341, 0.42666666666666747]),
'versicolor&0&virginica&12&logreg-no-intercept': np.array([-0.9681632653061225, 0.1518367346938776]),
'versicolor&0&virginica&13&tree-intercept': np.array([0.053333333333333365, -0.026666666666666634]),
'versicolor&0&virginica&13&tree-no-intercept': np.array([-0.5110204081632653, -0.5910204081632653]),
'versicolor&0&virginica&13&logreg-intercept': np.array([-0.6933333333333341, 0.42666666666666747]),
'versicolor&0&virginica&13&logreg-no-intercept': np.array([-0.9681632653061225, 0.1518367346938776]),
'versicolor&0&virginica&14&tree-intercept': np.array([0.10666666666666684, -0.9847094801223223]),
'versicolor&0&virginica&14&tree-no-intercept': np.array([0.08873897249610807, -1.0134924753502856]),
'versicolor&0&virginica&14&logreg-intercept': np.array([-0.8800000000000017, -0.38532110091743144]),
'versicolor&0&virginica&14&logreg-no-intercept': np.array([-0.7010897768552155, -0.09807991696938245]),
'versicolor&0&virginica&15&tree-intercept': np.array([-0.026666666666666748, -0.18666666666666626]),
'versicolor&0&virginica&15&tree-no-intercept': np.array([-0.6138775510204082, -0.7738775510204082]),
'versicolor&0&virginica&15&logreg-intercept': np.array([-0.6400000000000012, 0.40000000000000047]),
'versicolor&0&virginica&15&logreg-no-intercept': np.array([-0.9077551020408163, 0.1322448979591837]),
'versicolor&0&virginica&16&tree-intercept': np.array([0.053333333333333365, -0.026666666666666634]),
'versicolor&0&virginica&16&tree-no-intercept': np.array([-0.5110204081632653, -0.5910204081632653]),
'versicolor&0&virginica&16&logreg-intercept': np.array([-0.5866666666666672, 0.45333333333333425]),
'versicolor&0&virginica&16&logreg-no-intercept': np.array([-0.8669387755102042, 0.17306122448979594]),
'versicolor&0&virginica&17&tree-intercept': np.array([0.0800000000000002, -1.0275229357798168]),
'versicolor&0&virginica&17&tree-no-intercept': np.array([0.08873897249610807, -1.0134924753502856]),
'versicolor&0&virginica&17&logreg-intercept': np.array([-0.7200000000000009, -0.5137614678899083]),
'versicolor&0&virginica&17&logreg-no-intercept': np.array([-0.5018162947586923, -0.1634665282823041]),
'versicolor&1&setosa&0&tree-intercept': np.array([-1.9276406561398112, 0.008585006898666207]),
'versicolor&1&setosa&0&tree-no-intercept': np.array([-1.0902378999179656, 0.6324856439704676]),
'versicolor&1&setosa&0&logreg-intercept': np.array([-1.8913587817466402, -0.009709233992539434]),
'versicolor&1&setosa&0&logreg-no-intercept': np.array([-1.0492206726825268, 0.6177194421657095]),
'versicolor&1&setosa&1&tree-intercept': np.array([-1.9276406561398112, 0.008585006898666207]),
'versicolor&1&setosa&1&tree-no-intercept': np.array([-1.0902378999179656, 0.6324856439704676]),
'versicolor&1&setosa&1&logreg-intercept': np.array([-1.8913587817466402, -0.009709233992539434]),
'versicolor&1&setosa&1&logreg-no-intercept': np.array([-1.0492206726825268, 0.6177194421657095]),
'versicolor&1&setosa&2&tree-intercept': np.array([-1.9280332846058676, 0.004947711683347047]),
'versicolor&1&setosa&2&tree-no-intercept': np.array([-1.2034518828451883, 0.6433054393305441]),
'versicolor&1&setosa&2&logreg-intercept': np.array([-1.9280332846058676, 0.004947711683347047]),
'versicolor&1&setosa&2&logreg-no-intercept': np.array([-1.2034518828451883, 0.6433054393305441]),
'versicolor&1&setosa&3&tree-intercept': np.array([-1.6922683836680463, -0.008687209361745997]),
'versicolor&1&setosa&3&tree-no-intercept': np.array([-1.0607054963084497, 0.4618539786710419]),
'versicolor&1&setosa&3&logreg-intercept': np.array([-1.7919157851704224, 0.01338852266339612]),
'versicolor&1&setosa&3&logreg-no-intercept': np.array([-1.0754716981132078, 0.5471698113207547]),
'versicolor&1&setosa&4&tree-intercept': np.array([-1.6922683836680463, -0.008687209361745997]),
'versicolor&1&setosa&4&tree-no-intercept': np.array([-1.0607054963084497, 0.4618539786710419]),
'versicolor&1&setosa&4&logreg-intercept': np.array([-1.7919157851704224, 0.01338852266339612]),
'versicolor&1&setosa&4&logreg-no-intercept': np.array([-1.0754716981132078, 0.5471698113207547]),
'versicolor&1&setosa&5&tree-intercept': np.array([-1.6964653847595488, -0.07496532853555218]),
'versicolor&1&setosa&5&tree-no-intercept': np.array([-1.1281380753138077, 0.4257322175732219]),
'versicolor&1&setosa&5&logreg-intercept': np.array([-1.7583867461299152, 0.012444244536901855]),
'versicolor&1&setosa&5&logreg-no-intercept': np.array([-1.1657949790794981, 0.5345188284518829]),
'versicolor&1&setosa&6&tree-intercept': np.array([0.792951541850221, -0.3083700440528633]),
'versicolor&1&setosa&6&tree-no-intercept': np.array([0.14545454545454553, -0.6472727272727274]),
'versicolor&1&setosa&6&logreg-intercept': np.array([0.9710903083700437, -0.2491740088105726]),
'versicolor&1&setosa&6&logreg-no-intercept': np.array([0.31818181818181823, -0.590909090909091]),
'versicolor&1&setosa&7&tree-intercept': np.array([0.792951541850221, -0.3083700440528633]),
'versicolor&1&setosa&7&tree-no-intercept': np.array([0.14545454545454553, -0.6472727272727274]),
'versicolor&1&setosa&7&logreg-intercept': np.array([0.9710903083700437, -0.2491740088105726]),
'versicolor&1&setosa&7&logreg-no-intercept': np.array([0.31818181818181823, -0.590909090909091]),
'versicolor&1&setosa&8&tree-intercept': np.array([0.7754077791718955, -0.2057716436637395]),
'versicolor&1&setosa&8&tree-no-intercept': np.array([0.24000000000000005, -0.64]),
'versicolor&1&setosa&8&logreg-intercept': np.array([0.9548306148055205, -0.14529485570890852]),
'versicolor&1&setosa&8&logreg-no-intercept': np.array([0.406857142857143, -0.5897142857142859]),
'versicolor&1&setosa&9&tree-intercept': np.array([-0.5411620420052133, -0.4031887168480761]),
'versicolor&1&setosa&9&tree-no-intercept': np.array([-0.8687448728465956, -0.6472518457752255]),
'versicolor&1&setosa&9&logreg-intercept': np.array([-0.9036741785477033, -0.28800654095763695]),
'versicolor&1&setosa&9&logreg-no-intercept': np.array([-0.9278096800656276, -0.305988515176374]),
'versicolor&1&setosa&10&tree-intercept': np.array([-0.5411620420052133, -0.4031887168480761]),
'versicolor&1&setosa&10&tree-no-intercept': np.array([-0.8687448728465956, -0.6472518457752255]),
'versicolor&1&setosa&10&logreg-intercept': np.array([-0.8761817159793557, -0.36384076856252284]),
'versicolor&1&setosa&10&logreg-no-intercept': np.array([-0.9130434782608696, -0.39130434782608686]),
'versicolor&1&setosa&11&tree-intercept': np.array([-0.5237827504779049, -0.24851006409535661]),
'versicolor&1&setosa&11&tree-no-intercept': np.array([-0.8080543933054394, -0.49895397489539745]),
'versicolor&1&setosa&11&logreg-intercept': np.array([-0.7454552269575321, -0.017916713519996647]),
'versicolor&1&setosa&11&logreg-no-intercept': np.array([-0.9210251046025106, -0.17259414225941416]),
'versicolor&1&setosa&12&tree-intercept': np.array([-0.38930264048747576, -0.22681110358835407]),
'versicolor&1&setosa&12&tree-no-intercept': np.array([-0.8117647058823529, -0.47294117647058825]),
'versicolor&1&setosa&12&logreg-intercept': np.array([-0.33536447754457255, -0.26495147822162013]),
'versicolor&1&setosa&12&logreg-no-intercept': np.array([-0.7529411764705882, -0.5082352941176471]),
'versicolor&1&setosa&13&tree-intercept': np.array([-0.38930264048747576, -0.22681110358835407]),
'versicolor&1&setosa&13&tree-no-intercept': np.array([-0.8117647058823529, -0.47294117647058825]),
'versicolor&1&setosa&13&logreg-intercept': np.array([-0.33536447754457255, -0.26495147822162013]),
'versicolor&1&setosa&13&logreg-no-intercept': np.array([-0.7529411764705882, -0.5082352941176471]),
'versicolor&1&setosa&14&tree-intercept': np.array([-0.41295938104448854, -0.1402321083172151]),
'versicolor&1&setosa&14&tree-no-intercept': np.array([-0.7457386363636362, -0.47301136363636365]),
'versicolor&1&setosa&14&logreg-intercept': np.array([-0.41295938104448854, -0.1402321083172151]),
'versicolor&1&setosa&14&logreg-no-intercept': np.array([-0.7457386363636362, -0.47301136363636365]),
'versicolor&1&setosa&15&tree-intercept': np.array([-0.31144211238997993, -0.1814488828706828]),
'versicolor&1&setosa&15&tree-no-intercept': np.array([-0.8117647058823529, -0.47294117647058825]),
'versicolor&1&setosa&15&logreg-intercept': np.array([-0.3373956217558123, -0.19656962310990667]),
'versicolor&1&setosa&15&logreg-no-intercept': np.array([-0.8117647058823529, -0.47294117647058825]),
'versicolor&1&setosa&16&tree-intercept': np.array([-0.31144211238997993, -0.1814488828706828]),
'versicolor&1&setosa&16&tree-no-intercept': np.array([-0.8117647058823529, -0.47294117647058825]),
'versicolor&1&setosa&16&logreg-intercept': np.array([-0.3373956217558123, -0.19656962310990667]),
'versicolor&1&setosa&16&logreg-no-intercept': np.array([-0.8117647058823529, -0.47294117647058825]),
'versicolor&1&setosa&17&tree-intercept': np.array([-0.3249516441005804, -0.14313346228239884]),
'versicolor&1&setosa&17&tree-no-intercept': np.array([-0.7159090909090909, -0.5340909090909092]),
'versicolor&1&setosa&17&logreg-intercept': np.array([-0.36331399097356637, -0.0905867182462926]),
'versicolor&1&setosa&17&logreg-no-intercept': np.array([-0.7457386363636362, -0.47301136363636365]),
'versicolor&1&versicolor&0&tree-intercept': np.array([1.2745669170627014, 0.32071132914303224]),
'versicolor&1&versicolor&0&tree-no-intercept': np.array([0.3519278096800657, -0.36669401148482367]),
'versicolor&1&versicolor&0&logreg-intercept': np.array([1.4025244008380604, 0.04210741478869665]),
'versicolor&1&versicolor&0&logreg-no-intercept': np.array([0.5455291222313372, -0.5963904840032814]),
'versicolor&1&versicolor&1&tree-intercept': np.array([0.9671419081199849, 0.2616383054831618]),
'versicolor&1&versicolor&1&tree-no-intercept': np.array([0.05332239540607054, -0.41919606234618534]),
'versicolor&1&versicolor&1&logreg-intercept': np.array([1.3213756451530474, -0.024017578823649452]),
'versicolor&1&versicolor&1&logreg-no-intercept': np.array([0.47826086956521746, -0.6521739130434783]),
'versicolor&1&versicolor&2&tree-intercept': np.array([0.8462835938378496, 0.7956819970763532]),
'versicolor&1&versicolor&2&tree-no-intercept': np.array([-0.14592050209205024, -0.07845188284518828]),
'versicolor&1&versicolor&2&logreg-intercept': np.array([1.3822107275385127, 0.3200269875182732]),
'versicolor&1&versicolor&2&logreg-no-intercept': np.array([0.5475941422594143, -0.41527196652719667]),
'versicolor&1&versicolor&3&tree-intercept': np.array([1.0752721140579484, 0.3648627931933156]),
'versicolor&1&versicolor&3&tree-no-intercept': np.array([0.3223954060705497, -0.19606234618539786]),
'versicolor&1&versicolor&3&logreg-intercept': np.array([1.2667995298686694, 0.03730389902396669]),
'versicolor&1&versicolor&3&logreg-no-intercept': np.array([0.5307629204265792, -0.5110746513535686]),
'versicolor&1&versicolor&4&tree-intercept': np.array([0.7678471051152332, 0.30578976953344555]),
'versicolor&1&versicolor&4&tree-no-intercept': np.array([0.023789991796554544, -0.2485643970467596]),
'versicolor&1&versicolor&4&logreg-intercept': np.array([1.0405232766109662, 0.04435586897644245]),
'versicolor&1&versicolor&4&logreg-no-intercept': np.array([0.29942575881870387, -0.5077932731747333]),
'versicolor&1&versicolor&5&tree-intercept': np.array([0.6865324787285877, 0.9388657745792586]),
'versicolor&1&versicolor&5&tree-no-intercept': np.array([-0.221234309623431, 0.1391213389121339]),
'versicolor&1&versicolor&5&logreg-intercept': np.array([0.9552082162000072, 0.42400389819708445]),
'versicolor&1&versicolor&5&logreg-no-intercept': np.array([0.21391213389121339, -0.2290794979079498]),
'versicolor&1&versicolor&6&tree-intercept': np.array([-0.33287444933920707, 0.7023678414096908]),
'versicolor&1&versicolor&6&tree-no-intercept': np.array([-0.28181818181818186, 0.7290909090909092]),
'versicolor&1&versicolor&6&logreg-intercept': np.array([0.36508810572687256, 0.003854625550660802]),
'versicolor&1&versicolor&6&logreg-no-intercept': np.array([-0.1181818181818182, -0.24909090909090909]),
'versicolor&1&versicolor&7&tree-intercept': np.array([-0.07640418502202659, 0.5557544052863433]),
'versicolor&1&versicolor&7&tree-no-intercept': np.array([-0.2454545454545455, 0.4672727272727273]),
'versicolor&1&versicolor&7&logreg-intercept': np.array([0.36508810572687256, 0.003854625550660802]),
'versicolor&1&versicolor&7&logreg-no-intercept': np.array([-0.1181818181818182, -0.24909090909090909]),
'versicolor&1&versicolor&8&tree-intercept': np.array([-0.14253450439146828, 1.2481806775407795]),
'versicolor&1&versicolor&8&tree-no-intercept': np.array([-0.6331428571428572, 0.8502857142857143]),
'versicolor&1&versicolor&8&logreg-intercept': np.array([0.5741530740276033, 0.2735257214554584]),
'versicolor&1&versicolor&8&logreg-no-intercept': np.array([-0.13371428571428576, -0.30057142857142854]),
'versicolor&1&versicolor&9&tree-intercept': np.array([0.0684756502631714, 0.8668812918391341]),
'versicolor&1&versicolor&9&tree-no-intercept': np.array([0.1304347826086957, 0.9130434782608694]),
'versicolor&1&versicolor&9&logreg-intercept': np.array([0.37855792324595006, 0.3386989626449994]),
'versicolor&1&versicolor&9&logreg-no-intercept': np.array([0.38310090237899924, 0.34208367514356025]),
'versicolor&1&versicolor&10&tree-intercept': np.array([-0.1478869640758337, 0.6830190607593644]),
'versicolor&1&versicolor&10&tree-no-intercept': np.array([-0.13863822805578344, 0.689909762100082]),
'versicolor&1&versicolor&10&logreg-intercept': np.array([0.12478920741989878, 0.42158516020236075]),
'versicolor&1&versicolor&10&logreg-no-intercept': np.array([0.13699753896636588, 0.4306808859721083]),
'versicolor&1&versicolor&11&tree-intercept': np.array([-0.0552494471307019, 1.4920349338431007]),
'versicolor&1&versicolor&11&tree-no-intercept': np.array([-0.5413179916317992, 1.0638075313807531]),
'versicolor&1&versicolor&11&logreg-intercept': np.array([-0.05772330297237527, 0.4543648562539831]),
'versicolor&1&versicolor&11&logreg-no-intercept': np.array([-0.03085774058577407, 0.4780334728033473]),
'versicolor&1&versicolor&12&tree-intercept': np.array([0.5867749943579327, 0.6896863010607092]),
'versicolor&1&versicolor&12&tree-no-intercept': np.array([0.5176470588235295, 0.6494117647058824]),
'versicolor&1&versicolor&12&logreg-intercept': np.array([1.4468517264725833, -0.04400812457684755]),
'versicolor&1&versicolor&12&logreg-no-intercept': np.array([0.9529411764705884, -0.331764705882353]),
'versicolor&1&versicolor&13&tree-intercept': np.array([0.7246671180320474, 0.4917625818099747]),
'versicolor&1&versicolor&13&tree-no-intercept': np.array([0.48235294117647065, 0.35058823529411764]),
'versicolor&1&versicolor&13&logreg-intercept': np.array([1.4468517264725833, -0.04400812457684755]),
'versicolor&1&versicolor&13&logreg-no-intercept': np.array([0.9529411764705884, -0.331764705882353]),
'versicolor&1&versicolor&14&tree-intercept': np.array([0.42198581560283754, 1.42198581560284]),
'versicolor&1&versicolor&14&tree-no-intercept': np.array([-0.10937500000000007, 0.8906250000000001]),
'versicolor&1&versicolor&14&logreg-intercept': np.array([1.5583494519664765, 0.2856221792392003]),
'versicolor&1&versicolor&14&logreg-no-intercept': np.array([0.9176136363636364, -0.35511363636363635]),
'versicolor&1&versicolor&15&tree-intercept': np.array([0.5608214849921023, 0.6745655608214859]),
'versicolor&1&versicolor&15&tree-no-intercept': np.array([0.5176470588235295, 0.6494117647058824]),
'versicolor&1&versicolor&15&logreg-intercept': np.array([1.394944707740918, -0.07424960505529424]),
'versicolor&1&versicolor&15&logreg-no-intercept': np.array([0.9529411764705884, -0.331764705882353]),
'versicolor&1&versicolor&16&tree-intercept': np.array([0.6987136086662175, 0.476641841570749]),
'versicolor&1&versicolor&16&tree-no-intercept': np.array([0.48235294117647065, 0.35058823529411764]),
'versicolor&1&versicolor&16&logreg-intercept': np.array([1.3640261791920592, -0.1444369216881082]),
'versicolor&1&versicolor&16&logreg-no-intercept': np.array([0.9294117647058823, -0.3976470588235294]),
'versicolor&1&versicolor&17&tree-intercept': np.array([0.383623468729851, 1.4745325596389454]),
'versicolor&1&versicolor&17&tree-no-intercept': np.array([-0.13920454545454547, 0.9517045454545455]),
'versicolor&1&versicolor&17&logreg-intercept': np.array([1.3510638297872353, 0.35106382978723477]),
'versicolor&1&versicolor&17&logreg-no-intercept': np.array([0.734375, -0.26562499999999994]),
'versicolor&1&virginica&0&tree-intercept': np.array([0.6530737390771106, -0.3292963360416981]),
'versicolor&1&virginica&0&tree-no-intercept': np.array([-0.1304347826086957, -0.9130434782608694]),
'versicolor&1&virginica&0&logreg-intercept': np.array([0.48883438090857895, -0.03239818079615714]),
'versicolor&1&virginica&0&logreg-no-intercept': np.array([-0.3650533223954061, -0.6685808039376537]),
'versicolor&1&virginica&1&tree-intercept': np.array([0.9604987480198254, -0.27022331238182823]),
'versicolor&1&virginica&1&tree-no-intercept': np.array([0.16817063166529944, -0.8605414273995078]),
'versicolor&1&virginica&1&logreg-intercept': np.array([0.5699831365935911, 0.03372681281618891]),
'versicolor&1&virginica&1&logreg-no-intercept': np.array([-0.2977850697292863, -0.6127973748974569]),
'versicolor&1&virginica&2&tree-intercept': np.array([1.0817496907680186, -0.8006297087597005]),
'versicolor&1&virginica&2&tree-no-intercept': np.array([0.5978033472803348, -1.2269874476987448]),
'versicolor&1&virginica&2&logreg-intercept': np.array([0.5458225570673556, -0.3249746992016201]),
'versicolor&1&virginica&2&logreg-no-intercept': np.array([-0.09571129707112974, -0.8901673640167364]),
'versicolor&1&virginica&3&tree-intercept': np.array([0.6169962696100971, -0.35617558383157105]),
'versicolor&1&virginica&3&tree-no-intercept': np.array([-0.1304347826086957, -0.9130434782608694]),
'versicolor&1&virginica&3&logreg-intercept': np.array([0.5251162553017522, -0.05069242168736262]),
'versicolor&1&virginica&3&logreg-no-intercept': np.array([-0.3240360951599673, -0.6833470057424116]),
'versicolor&1&virginica&4&tree-intercept': np.array([0.924421278552812, -0.2971025601717011]),
'versicolor&1&virginica&4&tree-no-intercept': np.array([0.16817063166529944, -0.8605414273995078]),
'versicolor&1&virginica&4&logreg-intercept': np.array([0.7513925085594562, -0.057744391639838154]),
'versicolor&1&virginica&4&logreg-no-intercept': np.array([-0.09269893355209187, -0.6866283839212469]),
'versicolor&1&virginica&5&tree-intercept': np.array([1.0099329060309608, -0.8639004460437059]),
'versicolor&1&virginica&5&tree-no-intercept': np.array([0.5978033472803348, -1.2269874476987448]),
'versicolor&1&virginica&5&logreg-intercept': np.array([0.8031785299299063, -0.4364481427339867]),
'versicolor&1&virginica&5&logreg-no-intercept': np.array([0.20031380753138087, -0.9675732217573223]),
'versicolor&1&virginica&6&tree-intercept': np.array([-0.4600770925110137, -0.39399779735682705]),
'versicolor&1&virginica&6&tree-no-intercept': np.array([-0.7181818181818183, -0.5290909090909091]),
'versicolor&1&virginica&6&logreg-intercept': np.array([-1.3361784140969153, 0.24531938325991165]),
'versicolor&1&virginica&6&logreg-no-intercept': np.array([-1.0545454545454547, 0.39272727272727276]),
'versicolor&1&virginica&7&tree-intercept': np.array([-0.7165473568281941, -0.2473843612334808]),
'versicolor&1&virginica&7&tree-no-intercept': np.array([-0.7545454545454546, -0.26727272727272733]),
'versicolor&1&virginica&7&logreg-intercept': np.array([-1.3361784140969153, 0.24531938325991165]),
'versicolor&1&virginica&7&logreg-no-intercept': np.array([-1.0545454545454547, 0.39272727272727276]),
'versicolor&1&virginica&8&tree-intercept': np.array([-0.6328732747804267, -1.0424090338770402]),
'versicolor&1&virginica&8&tree-no-intercept': np.array([-0.3325714285714286, -0.7988571428571429]),
'versicolor&1&virginica&8&logreg-intercept': np.array([-1.5289836888331256, -0.12823086574654946]),
'versicolor&1&virginica&8&logreg-no-intercept': np.array([-0.9988571428571431, 0.3017142857142859]),
'versicolor&1&virginica&9&tree-intercept': np.array([0.4726863917420401, -0.46369257499105654]),
'versicolor&1&virginica&9&tree-no-intercept': np.array([-0.1304347826086957, -0.9130434782608694]),
'versicolor&1&virginica&9&logreg-intercept': np.array([0.5251162553017522, -0.05069242168736262]),
'versicolor&1&virginica&9&logreg-no-intercept': np.array([-0.3240360951599673, -0.6833470057424116]),
'versicolor&1&virginica&10&tree-intercept': np.array([0.6890490060810458, -0.2798303439112883]),
'versicolor&1&virginica&10&tree-no-intercept': np.array([0.13863822805578344, -0.689909762100082]),
'versicolor&1&virginica&10&logreg-intercept': np.array([0.7513925085594562, -0.057744391639838154]),
'versicolor&1&virginica&10&logreg-no-intercept': np.array([-0.09269893355209187, -0.6866283839212469]),
'versicolor&1&virginica&11&tree-intercept': np.array([0.579032197608606, -1.2435248697477443]),
'versicolor&1&virginica&11&tree-no-intercept': np.array([0.5978033472803348, -1.2269874476987448]),
'versicolor&1&virginica&11&logreg-intercept': np.array([0.8031785299299063, -0.4364481427339867]),
'versicolor&1&virginica&11&logreg-no-intercept': np.array([0.20031380753138087, -0.9675732217573223]),
'versicolor&1&virginica&12&tree-intercept': np.array([-0.19747235387045856, -0.4628751974723528]),
'versicolor&1&virginica&12&tree-no-intercept': np.array([-0.5176470588235295, -0.6494117647058824]),
'versicolor&1&virginica&12&logreg-intercept': np.array([-1.1114872489280097, 0.3089596027984669]),
'versicolor&1&virginica&12&logreg-no-intercept': np.array([-1.011764705882353, 0.3670588235294118]),
'versicolor&1&virginica&13&tree-intercept': np.array([-0.3353644775445726, -0.2649514782216201]),
'versicolor&1&virginica&13&tree-no-intercept': np.array([-0.48235294117647065, -0.35058823529411764]),
'versicolor&1&virginica&13&logreg-intercept': np.array([-1.1114872489280097, 0.3089596027984669]),
'versicolor&1&virginica&13&logreg-no-intercept': np.array([-1.011764705882353, 0.3670588235294118]),
'versicolor&1&virginica&14&tree-intercept': np.array([-0.00902643455834887, -1.281753707285625]),
'versicolor&1&virginica&14&tree-no-intercept': np.array([0.19886363636363633, -1.0738636363636362]),
'versicolor&1&virginica&14&logreg-intercept': np.array([-1.145390070921987, -0.145390070921986]),
'versicolor&1&virginica&14&logreg-no-intercept': np.array([-0.828125, 0.17187499999999997]),
'versicolor&1&virginica&15&tree-intercept': np.array([-0.24937937260212142, -0.493116677950801]),
'versicolor&1&virginica&15&tree-no-intercept': np.array([-0.5176470588235295, -0.6494117647058824]),
'versicolor&1&virginica&15&logreg-intercept': np.array([-1.0575490859851067, 0.27081922816520076]),
'versicolor&1&virginica&15&logreg-no-intercept': np.array([-0.9529411764705884, 0.331764705882353]),
'versicolor&1&virginica&16&tree-intercept': np.array([-0.3872714962762358, -0.295192958700068]),
'versicolor&1&virginica&16&tree-no-intercept': np.array([-0.48235294117647065, -0.35058823529411764]),
'versicolor&1&virginica&16&logreg-intercept': np.array([-1.0266305574362464, 0.341006544798015]),
'versicolor&1&virginica&16&logreg-no-intercept': np.array([-0.9294117647058823, 0.3976470588235294]),
'versicolor&1&virginica&17&tree-intercept': np.array([-0.0586718246292713, -1.3313990973565462]),
'versicolor&1&virginica&17&tree-no-intercept': np.array([0.19886363636363633, -1.0738636363636362]),
'versicolor&1&virginica&17&logreg-intercept': np.array([-0.9877498388136704, -0.2604771115409417]),
'versicolor&1&virginica&17&logreg-no-intercept': np.array([-0.6448863636363635, 0.08238636363636362]),
'virginica&0&setosa&0&tree-intercept': np.array([-1.9251012145748991, -0.018218623481781476]),
'virginica&0&setosa&0&tree-no-intercept': np.array([-1.0188679245283019, 0.3113207547169811]),
'virginica&0&setosa&0&logreg-intercept': np.array([-1.8346828609986505, -0.07624831309041748]),
'virginica&0&setosa&0&logreg-no-intercept': np.array([-0.9245283018867925, 0.2547169811320754]),
'virginica&0&setosa&1&tree-intercept': np.array([-1.9251012145748991, -0.018218623481781476]),
'virginica&0&setosa&1&tree-no-intercept': np.array([-1.0188679245283019, 0.3113207547169811]),
'virginica&0&setosa&1&logreg-intercept': np.array([-1.8346828609986505, -0.07624831309041748]),
'virginica&0&setosa&1&logreg-no-intercept': np.array([-0.9245283018867925, 0.2547169811320754]),
'virginica&0&setosa&2&tree-intercept': np.array([-1.9251012145748991, -0.018218623481781476]),
'virginica&0&setosa&2&tree-no-intercept': np.array([-1.0188679245283019, 0.3113207547169811]),
'virginica&0&setosa&2&logreg-intercept': np.array([-1.9251012145748991, -0.018218623481781476]),
'virginica&0&setosa&2&logreg-no-intercept': np.array([-1.0188679245283019, 0.3113207547169811]),
'virginica&0&setosa&3&tree-intercept': np.array([-1.699055330634276, -0.16329284750337328]),
'virginica&0&setosa&3&tree-no-intercept': np.array([-0.9905660377358491, 0.09433962264150945]),
'virginica&0&setosa&3&logreg-intercept': np.array([-1.7881241565452086, -0.1956815114709829]),
'virginica&0&setosa&3&logreg-no-intercept': np.array([-0.9905660377358491, 0.09433962264150945]),
'virginica&0&setosa&4&tree-intercept': np.array([-1.699055330634276, -0.16329284750337328]),
'virginica&0&setosa&4&tree-no-intercept': np.array([-0.9905660377358491, 0.09433962264150945]),
'virginica&0&setosa&4&logreg-intercept': np.array([-1.7881241565452086, -0.1956815114709829]),
'virginica&0&setosa&4&logreg-no-intercept': np.array([-0.9905660377358491, 0.09433962264150945]),
'virginica&0&setosa&5&tree-intercept': np.array([-1.699055330634276, -0.16329284750337328]),
'virginica&0&setosa&5&tree-no-intercept': np.array([-0.9905660377358491, 0.09433962264150945]),
'virginica&0&setosa&5&logreg-intercept': np.array([-1.7584345479082326, -0.1848852901484467]),
'virginica&0&setosa&5&logreg-no-intercept': np.array([-0.9905660377358491, 0.09433962264150945]),
'virginica&0&setosa&6&tree-intercept': np.array([0.6852879694370354, -0.38153247314928257]),
'virginica&0&setosa&6&tree-no-intercept': np.array([0.1337099811676083, -0.7401129943502825]),
'virginica&0&setosa&6&logreg-intercept': np.array([0.8640524760325801, -0.44424421538239667]),
'virginica&0&setosa&6&logreg-no-intercept': np.array([0.3220338983050848, -0.7966101694915255]),
'virginica&0&setosa&7&tree-intercept': np.array([0.6852879694370354, -0.38153247314928257]),
'virginica&0&setosa&7&tree-no-intercept': np.array([0.1337099811676083, -0.7401129943502825]),
'virginica&0&setosa&7&logreg-intercept': np.array([0.8640524760325801, -0.44424421538239667]),
'virginica&0&setosa&7&logreg-no-intercept': np.array([0.3220338983050848, -0.7966101694915255]),
'virginica&0&setosa&8&tree-intercept': np.array([0.6852879694370354, -0.38153247314928257]),
'virginica&0&setosa&8&tree-no-intercept': np.array([0.1337099811676083, -0.7401129943502825]),
'virginica&0&setosa&8&logreg-intercept': np.array([0.8640524760325801, -0.44424421538239667]),
'virginica&0&setosa&8&logreg-no-intercept': np.array([0.3220338983050848, -0.7966101694915255]),
'virginica&0&setosa&9&tree-intercept': np.array([-0.8016194331983796, -0.29149797570850244]),
'virginica&0&setosa&9&tree-no-intercept': np.array([-0.9339622641509434, -0.3396226415094339]),
'virginica&0&setosa&9&logreg-intercept': np.array([-1.1282051282051282, -0.4102564102564085]),
'virginica&0&setosa&9&logreg-no-intercept': np.array([-0.9339622641509434, -0.3396226415094339]),
'virginica&0&setosa&10&tree-intercept': np.array([-0.8016194331983796, -0.29149797570850244]),
'virginica&0&setosa&10&tree-no-intercept': np.array([-0.9339622641509434, -0.3396226415094339]),
'virginica&0&setosa&10&logreg-intercept': np.array([-1.0985155195681513, -0.3994601889338714]),
'virginica&0&setosa&10&logreg-no-intercept': np.array([-0.9339622641509434, -0.3396226415094339]),
'virginica&0&setosa&11&tree-intercept': np.array([-0.8016194331983796, -0.29149797570850244]),
'virginica&0&setosa&11&tree-no-intercept': np.array([-0.9339622641509434, -0.3396226415094339]),
'virginica&0&setosa&11&logreg-intercept': np.array([-1.0094466936572193, -0.3670715249662607]),
'virginica&0&setosa&11&logreg-no-intercept': np.array([-0.9339622641509434, -0.3396226415094339]),
'virginica&0&setosa&12&tree-intercept': np.array([-0.7200000000000022, -0.48648648648648657]),
'virginica&0&setosa&12&tree-no-intercept': np.array([-0.9224376731301939, -0.6232686980609419]),
'virginica&0&setosa&12&logreg-intercept': np.array([-0.6666666666666671, -0.5045045045045023]),
'virginica&0&setosa&12&logreg-no-intercept': np.array([-0.8670360110803325, -0.6398891966759003]),
'virginica&0&setosa&13&tree-intercept': np.array([-0.7200000000000022, -0.48648648648648657]),
'virginica&0&setosa&13&tree-no-intercept': np.array([-0.9224376731301939, -0.6232686980609419]),
'virginica&0&setosa&13&logreg-intercept': np.array([-0.6666666666666671, -0.5045045045045023]),
'virginica&0&setosa&13&logreg-no-intercept': np.array([-0.8670360110803325, -0.6398891966759003]),
'virginica&0&setosa&14&tree-intercept': np.array([-0.7200000000000022, -0.48648648648648657]),
'virginica&0&setosa&14&tree-no-intercept': np.array([-0.9224376731301939, -0.6232686980609419]),
'virginica&0&setosa&14&logreg-intercept': np.array([-0.7200000000000022, -0.48648648648648657]),
'virginica&0&setosa&14&logreg-no-intercept': np.array([-0.9224376731301939, -0.6232686980609419]),
'virginica&0&setosa&15&tree-intercept': np.array([-0.5866666666666676, -0.3963963963963961]),
'virginica&0&setosa&15&tree-no-intercept': np.array([-0.9224376731301939, -0.6232686980609419]),
'virginica&0&setosa&15&logreg-intercept': np.array([-0.6400000000000003, -0.4324324324324308]),
'virginica&0&setosa&15&logreg-no-intercept': np.array([-0.9224376731301939, -0.6232686980609419]),
'virginica&0&setosa&16&tree-intercept': np.array([-0.5866666666666676, -0.3963963963963961]),
'virginica&0&setosa&16&tree-no-intercept': np.array([-0.9224376731301939, -0.6232686980609419]),
'virginica&0&setosa&16&logreg-intercept': np.array([-0.6400000000000003, -0.4324324324324308]),
'virginica&0&setosa&16&logreg-no-intercept': np.array([-0.9224376731301939, -0.6232686980609419]),
'virginica&0&setosa&17&tree-intercept': np.array([-0.5866666666666676, -0.3963963963963961]),
'virginica&0&setosa&17&tree-no-intercept': np.array([-0.9224376731301939, -0.6232686980609419]),
'virginica&0&setosa&17&logreg-intercept': np.array([-0.6133333333333348, -0.4144144144144144]),
'virginica&0&setosa&17&logreg-no-intercept': np.array([-0.9224376731301939, -0.6232686980609419]),
'virginica&0&versicolor&0&tree-intercept': np.array([1.7901484480431848, -0.939946018893386]),
'virginica&0&versicolor&0&tree-no-intercept': np.array([0.9245283018867925, -1.2547169811320753]),
'virginica&0&versicolor&0&logreg-intercept': np.array([1.6180836707152488, -0.41160593792172706]),
'virginica&0&versicolor&0&logreg-no-intercept': np.array([0.7264150943396226, -0.7358490566037734]),
'virginica&0&versicolor&1&tree-intercept': np.array([1.6727395411605928, -1.073549257759782]),
'virginica&0&versicolor&1&tree-no-intercept': np.array([0.8113207547169812, -1.3867924528301885]),
'virginica&0&versicolor&1&logreg-intercept': np.array([1.5728744939271269, -0.38259109311740913]),
'virginica&0&versicolor&1&logreg-no-intercept': np.array([0.679245283018868, -0.7075471698113207]),
'virginica&0&versicolor&2&tree-intercept': np.array([1.401484480431849, -0.8994601889338717]),
'virginica&0&versicolor&2&tree-no-intercept': np.array([0.5283018867924528, -1.2169811320754715]),
'virginica&0&versicolor&2&logreg-intercept': np.array([1.5728744939271269, -0.38259109311740913]),
'virginica&0&versicolor&2&logreg-no-intercept': np.array([0.679245283018868, -0.7075471698113207]),
'virginica&0&versicolor&3&tree-intercept': np.array([1.6120107962213228, -1.0047233468286092]),
'virginica&0&versicolor&3&tree-no-intercept': np.array([0.9245283018867925, -1.2547169811320753]),
'virginica&0&versicolor&3&logreg-intercept': np.array([1.571524966261808, -0.2921727395411606]),
'virginica&0&versicolor&3&logreg-no-intercept': np.array([0.7924528301886793, -0.5754716981132075]),
'virginica&0&versicolor&4&tree-intercept': np.array([1.4946018893387312, -1.138326585695005]),
'virginica&0&versicolor&4&tree-no-intercept': np.array([0.8113207547169812, -1.3867924528301885]),
'virginica&0&versicolor&4&logreg-intercept': np.array([1.48110661268556, -0.23414304993252388]),
'virginica&0&versicolor&4&logreg-no-intercept': np.array([0.6981132075471699, -0.5188679245283018]),
'virginica&0&versicolor&5&tree-intercept': np.array([1.253036437246964, -0.9534412955465583]),
'virginica&0&versicolor&5&tree-no-intercept': np.array([0.5283018867924528, -1.2169811320754715]),
'virginica&0&versicolor&5&logreg-intercept': np.array([1.2253711201079616, -0.09986504723346863]),
'virginica&0&versicolor&5&logreg-no-intercept': np.array([0.46226415094339623, -0.3773584905660377]),
'virginica&0&versicolor&6&tree-intercept': np.array([-0.65270669646075, -1.000864989548041]),
'virginica&0&versicolor&6&tree-no-intercept': np.array([-0.12994350282485873, -0.6610169491525425]),
'virginica&0&versicolor&6&logreg-intercept': np.array([0.03820370503856402, -0.408563396525625]),
'virginica&0&versicolor&6&logreg-no-intercept': np.array([-0.2806026365348399, -0.615819209039548]),
'virginica&0&versicolor&7&tree-intercept': np.array([-0.5907157788510049, -1.1355150291933958]),
'virginica&0&versicolor&7&tree-no-intercept': np.array([-0.15630885122410548, -0.8531073446327684]),
'virginica&0&versicolor&7&logreg-intercept': np.array([0.03820370503856402, -0.408563396525625]),
'virginica&0&versicolor&7&logreg-no-intercept': np.array([-0.2806026365348399, -0.615819209039548]),
'virginica&0&versicolor&8&tree-intercept': np.array([-0.4804296114755274, -0.9564621927485036]),
'virginica&0&versicolor&8&tree-no-intercept': np.array([-0.2693032015065913, -0.8192090395480226]),
'virginica&0&versicolor&8&logreg-intercept': np.array([0.4380451236214222, -0.07705615223816006]),
'virginica&0&versicolor&8&logreg-no-intercept': np.array([-0.3559322033898305, -0.5932203389830509]),
'virginica&0&versicolor&9&tree-intercept': np.array([0.7624831309041832, -1.0863697705802955]),
'virginica&0&versicolor&9&tree-no-intercept': np.array([0.8962264150943396, -1.0377358490566035]),
'virginica&0&versicolor&9&logreg-intercept': np.array([0.9116059379217275, -0.07759784075573567]),
'virginica&0&versicolor&9&logreg-no-intercept': np.array([0.7358490566037736, -0.14150943396226418]),
'virginica&0&versicolor&10&tree-intercept': np.array([0.7523616734143053, -1.4082321187584337]),
'virginica&0&versicolor&10&tree-no-intercept': np.array([0.8113207547169812, -1.3867924528301885]),
'virginica&0&versicolor&10&logreg-intercept': np.array([0.7914979757085017, -0.03036437246963573]),
'virginica&0&versicolor&10&logreg-no-intercept': np.array([0.6415094339622641, -0.08490566037735847]),
'virginica&0&versicolor&11&tree-intercept': np.array([0.5998650472334681, -1.1909581646423728]),
'virginica&0&versicolor&11&tree-no-intercept': np.array([0.5283018867924528, -1.2169811320754715]),
'virginica&0&versicolor&11&logreg-intercept': np.array([0.47638326585694996, 0.0823211875843454]),
'virginica&0&versicolor&11&logreg-no-intercept': np.array([0.4056603773584906, 0.05660377358490561]),
'virginica&0&versicolor&12&tree-intercept': np.array([0.7466666666666678, -0.9009009009008994]),
'virginica&0&versicolor&12&tree-no-intercept': np.array([0.9279778393351802, -0.7783933518005541]),
'virginica&0&versicolor&12&logreg-intercept': np.array([1.360000000000002, -0.4864864864864854]),
'virginica&0&versicolor&12&logreg-no-intercept': np.array([0.9279778393351802, -0.7783933518005541]),
'virginica&0&versicolor&13&tree-intercept': np.array([0.6666666666666685, -1.225225225225224]),
'virginica&0&versicolor&13&tree-no-intercept': np.array([0.7950138504155125, -1.1385041551246537]),
'virginica&0&versicolor&13&logreg-intercept': np.array([1.360000000000002, -0.4864864864864854]),
'virginica&0&versicolor&13&logreg-no-intercept': np.array([0.9279778393351802, -0.7783933518005541]),
'virginica&0&versicolor&14&tree-intercept': np.array([0.6133333333333342, -1.0450450450450428]),
'virginica&0&versicolor&14&tree-no-intercept': np.array([0.5734072022160666, -1.0720221606648201]),
'virginica&0&versicolor&14&logreg-intercept': np.array([1.6000000000000019, -0.21621621621621567]),
'virginica&0&versicolor&14&logreg-no-intercept': np.array([0.817174515235457, -0.7451523545706371]),
'virginica&0&versicolor&15&tree-intercept': np.array([0.6133333333333342, -0.9909909909909898]),
'virginica&0&versicolor&15&tree-no-intercept': np.array([0.9279778393351802, -0.7783933518005541]),
'virginica&0&versicolor&15&logreg-intercept': np.array([1.2800000000000025, -0.7567567567567552]),
'virginica&0&versicolor&15&logreg-no-intercept': np.array([0.9445983379501386, -0.9833795013850416]),
'virginica&0&versicolor&16&tree-intercept': np.array([0.5333333333333344, -1.3153153153153148]),
'virginica&0&versicolor&16&tree-no-intercept': np.array([0.7950138504155125, -1.1385041551246537]),
'virginica&0&versicolor&16&logreg-intercept': np.array([1.2266666666666681, -0.7387387387387381]),
'virginica&0&versicolor&16&logreg-no-intercept': np.array([0.8891966759002771, -0.9667590027700831]),
'virginica&0&versicolor&17&tree-intercept': np.array([0.5066666666666672, -1.1171171171171137]),
'virginica&0&versicolor&17&tree-no-intercept': np.array([0.5734072022160666, -1.0720221606648201]),
'virginica&0&versicolor&17&logreg-intercept': np.array([1.3333333333333346, -0.45045045045044946]),
'virginica&0&versicolor&17&logreg-no-intercept': np.array([0.667590027700831, -0.9002770083102495]),
'virginica&0&virginica&0&tree-intercept': np.array([0.13495276653171426, 0.9581646423751661]),
'virginica&0&virginica&0&tree-no-intercept': np.array([-0.839622641509434, 0.6037735849056604]),
'virginica&0&virginica&0&logreg-intercept': np.array([0.2165991902834008, 0.487854251012143]),
'virginica&0&virginica&0&logreg-no-intercept': np.array([-0.7358490566037736, 0.14150943396226418]),
'virginica&0&virginica&1&tree-intercept': np.array([0.2523616734143047, 1.091767881241564]),
'virginica&0&virginica&1&tree-no-intercept': np.array([-0.7264150943396226, 0.7358490566037734]),
'virginica&0&virginica&1&logreg-intercept': np.array([0.2618083670715251, 0.45883940620782776]),
'virginica&0&virginica&1&logreg-no-intercept': np.array([-0.6886792452830189, 0.11320754716981131]),
'virginica&0&virginica&2&tree-intercept': np.array([0.5236167341430509, 0.9176788124156552]),
'virginica&0&virginica&2&tree-no-intercept': np.array([-0.44339622641509435, 0.5660377358490565]),
'virginica&0&virginica&2&logreg-intercept': np.array([0.3522267206477735, 0.40080971659918874]),
'virginica&0&virginica&2&logreg-no-intercept': np.array([-0.5943396226415094, 0.056603773584905655]),
'virginica&0&virginica&3&tree-intercept': np.array([0.0870445344129551, 1.1680161943319844]),
'virginica&0&virginica&3&tree-no-intercept': np.array([-0.8679245283018869, 0.820754716981132]),
'virginica&0&virginica&3&logreg-intercept': np.array([0.2165991902834008, 0.487854251012143]),
'virginica&0&virginica&3&logreg-no-intercept': np.array([-0.7358490566037736, 0.14150943396226418]),
'virginica&0&virginica&4&tree-intercept': np.array([0.2044534412955465, 1.3016194331983821]),
'virginica&0&virginica&4&tree-no-intercept': np.array([-0.7547169811320754, 0.9528301886792452]),
'virginica&0&virginica&4&logreg-intercept': np.array([0.3070175438596492, 0.42982456140350833]),
'virginica&0&virginica&4&logreg-no-intercept': np.array([-0.6415094339622641, 0.08490566037735847]),
'virginica&0&virginica&5&tree-intercept': np.array([0.4460188933873141, 1.1167341430499316]),
'virginica&0&virginica&5&tree-no-intercept': np.array([-0.4716981132075471, 0.7830188679245281]),
'virginica&0&virginica&5&logreg-intercept': np.array([0.5330634278002698, 0.2847503373819156]),
'virginica&0&virginica&5&logreg-no-intercept': np.array([-0.4056603773584906, -0.05660377358490561]),
'virginica&0&virginica&6&tree-intercept': np.array([-0.03258127297628498, 1.3823974626973203]),
'virginica&0&virginica&6&tree-no-intercept': np.array([-0.9510357815442562, 0.7853107344632769]),
'virginica&0&virginica&6&logreg-intercept': np.array([-0.9022561810711441, 0.852807611908022]),
'virginica&0&virginica&6&logreg-no-intercept': np.array([-0.9887005649717514, 0.7966101694915255]),
'virginica&0&virginica&7&tree-intercept': np.array([-0.09457219058603027, 1.5170475023426795]),
'virginica&0&virginica&7&tree-no-intercept': np.array([-0.9246704331450093, 0.9774011299435027]),
'virginica&0&virginica&7&logreg-intercept': np.array([-0.9022561810711441, 0.852807611908022]),
'virginica&0&virginica&7&logreg-no-intercept': np.array([-0.9887005649717514, 0.7966101694915255]),
'virginica&0&virginica&8&tree-intercept': np.array([-0.20485835796150728, 1.3379946658977826]),
'virginica&0&virginica&8&tree-no-intercept': np.array([-0.8116760828625236, 0.9435028248587572]),
'virginica&0&virginica&8&logreg-intercept': np.array([-1.3020975996540025, 0.5213003676205564]),
'virginica&0&virginica&8&logreg-no-intercept': np.array([-0.9133709981167608, 0.7740112994350282]),
'virginica&0&virginica&9&tree-intercept': np.array([0.039136302294197386, 1.377867746288793]),
'virginica&0&virginica&9&tree-no-intercept': np.array([-0.8962264150943396, 1.0377358490566035]),
'virginica&0&virginica&9&logreg-intercept': np.array([0.2165991902834008, 0.487854251012143]),
'virginica&0&virginica&9&logreg-no-intercept': np.array([-0.7358490566037736, 0.14150943396226418]),
'virginica&0&virginica&10&tree-intercept': np.array([0.04925775978407555, 1.6997300944669331]),
'virginica&0&virginica&10&tree-no-intercept': np.array([-0.8113207547169812, 1.3867924528301885]),
'virginica&0&virginica&10&logreg-intercept': np.array([0.3070175438596492, 0.42982456140350833]),
'virginica&0&virginica&10&logreg-no-intercept': np.array([-0.6415094339622641, 0.08490566037735847]),
'virginica&0&virginica&11&tree-intercept': np.array([0.20175438596491266, 1.4824561403508731]),
'virginica&0&virginica&11&tree-no-intercept': np.array([-0.5283018867924528, 1.2169811320754715]),
'virginica&0&virginica&11&logreg-intercept': np.array([0.5330634278002698, 0.2847503373819156]),
'virginica&0&virginica&11&logreg-no-intercept': np.array([-0.4056603773584906, -0.05660377358490561]),
'virginica&0&virginica&12&tree-intercept': np.array([-0.026666666666666738, 1.3873873873873812]),
'virginica&0&virginica&12&tree-no-intercept': np.array([-0.9279778393351802, 0.7783933518005541]),
'virginica&0&virginica&12&logreg-intercept': np.array([-0.6933333333333341, 0.9909909909909898]),
'virginica&0&virginica&12&logreg-no-intercept': np.array([-0.9833795013850415, 0.7950138504155126]),
'virginica&0&virginica&13&tree-intercept': np.array([0.05333333333333337, 1.7117117117117082]),
'virginica&0&virginica&13&tree-no-intercept': np.array([-0.7950138504155125, 1.1385041551246537]),
'virginica&0&virginica&13&logreg-intercept': np.array([-0.6933333333333341, 0.9909909909909898]),
'virginica&0&virginica&13&logreg-no-intercept': np.array([-0.9833795013850415, 0.7950138504155126]),
'virginica&0&virginica&14&tree-intercept': np.array([0.10666666666666685, 1.5315315315315274]),
'virginica&0&virginica&14&tree-no-intercept': np.array([-0.5734072022160666, 1.0720221606648201]),
'virginica&0&virginica&14&logreg-intercept': np.array([-0.8800000000000017, 0.702702702702702]),
'virginica&0&virginica&14&logreg-no-intercept': np.array([-0.817174515235457, 0.7451523545706371]),
'virginica&0&virginica&15&tree-intercept': np.array([-0.026666666666666738, 1.3873873873873812]),
'virginica&0&virginica&15&tree-no-intercept': np.array([-0.9279778393351802, 0.7783933518005541]),
'virginica&0&virginica&15&logreg-intercept': np.array([-0.6400000000000012, 1.1891891891891901]),
'virginica&0&virginica&15&logreg-no-intercept': np.array([-0.9445983379501386, 0.9833795013850416]),
'virginica&0&virginica&16&tree-intercept': np.array([0.05333333333333337, 1.7117117117117082]),
'virginica&0&virginica&16&tree-no-intercept': np.array([-0.7950138504155125, 1.1385041551246537]),
'virginica&0&virginica&16&logreg-intercept': np.array([-0.5866666666666672, 1.1711711711711674]),
'virginica&0&virginica&16&logreg-no-intercept': np.array([-0.8891966759002771, 0.9667590027700831]),
'virginica&0&virginica&17&tree-intercept': np.array([0.08000000000000022, 1.5135135135135112]),
'virginica&0&virginica&17&tree-no-intercept': np.array([-0.5734072022160666, 1.0720221606648201]),
'virginica&0&virginica&17&logreg-intercept': np.array([-0.7200000000000009, 0.8648648648648636]),
'virginica&0&virginica&17&logreg-no-intercept': np.array([-0.667590027700831, 0.9002770083102495]),
'virginica&1&setosa&0&tree-intercept': np.array([-1.926647564469912, -0.011461318051577027]),
'virginica&1&setosa&0&tree-no-intercept': np.array([-1.2343373493975904, 0.5493975903614458]),
'virginica&1&setosa&0&logreg-intercept': np.array([-1.8834765998089784, -0.04737344794651441]),
'virginica&1&setosa&0&logreg-no-intercept': np.array([-1.1825301204819278, 0.5204819277108433]),
'virginica&1&setosa&1&tree-intercept': np.array([-1.926647564469912, -0.011461318051577027]),
'virginica&1&setosa&1&tree-no-intercept': np.array([-1.2343373493975904, 0.5493975903614458]),
'virginica&1&setosa&1&logreg-intercept': np.array([-1.8834765998089784, -0.04737344794651441]),
'virginica&1&setosa&1&logreg-no-intercept': np.array([-1.1825301204819278, 0.5204819277108433]),
'virginica&1&setosa&2&tree-intercept': np.array([-1.926647564469912, -0.011461318051577027]),
'virginica&1&setosa&2&tree-no-intercept': np.array([-1.2343373493975904, 0.5493975903614458]),
'virginica&1&setosa&2&logreg-intercept': np.array([-1.926647564469912, -0.011461318051577027]),
'virginica&1&setosa&2&logreg-no-intercept': np.array([-1.2343373493975904, 0.5493975903614458]),
'virginica&1&setosa&3&tree-intercept': np.array([-1.7039159503342882, 0.08290353390639874]),
'virginica&1&setosa&3&tree-no-intercept': np.array([-1.205421686746988, 0.48674698795180715]),
'virginica&1&setosa&3&logreg-intercept': np.array([-1.7944603629417377, 0.00955109837631287]),
'virginica&1&setosa&3&logreg-no-intercept': np.array([-1.205421686746988, 0.48674698795180715]),
'virginica&1&setosa&4&tree-intercept': np.array([-1.7039159503342882, 0.08290353390639874]),
'virginica&1&setosa&4&tree-no-intercept': np.array([-1.205421686746988, 0.48674698795180715]),
'virginica&1&setosa&4&logreg-intercept': np.array([-1.7944603629417377, 0.00955109837631287]),
'virginica&1&setosa&4&logreg-no-intercept': np.array([-1.205421686746988, 0.48674698795180715]),
'virginica&1&setosa&5&tree-intercept': np.array([-1.7039159503342882, 0.08290353390639874]),
'virginica&1&setosa&5&tree-no-intercept': np.array([-1.205421686746988, 0.48674698795180715]),
'virginica&1&setosa&5&logreg-intercept': np.array([-1.7642788920725883, 0.03400191021967489]),
'virginica&1&setosa&5&logreg-no-intercept': np.array([-1.205421686746988, 0.48674698795180715]),
'virginica&1&setosa&6&tree-intercept': np.array([0.8549874266554914, 0.44949706621961516]),
'virginica&1&setosa&6&tree-no-intercept': np.array([0.03936629860777728, -0.29188670187229965]),
'virginica&1&setosa&6&logreg-intercept': np.array([1.0163453478625313, 0.3413139145012578]),
'virginica&1&setosa&6&logreg-no-intercept': np.array([0.24579932789246284, -0.35909745559289497]),
'virginica&1&setosa&7&tree-intercept': np.array([0.8549874266554914, 0.44949706621961516]),
'virginica&1&setosa&7&tree-no-intercept': np.array([0.03936629860777728, -0.29188670187229965]),
'virginica&1&setosa&7&logreg-intercept': np.array([1.0163453478625313, 0.3413139145012578]),
'virginica&1&setosa&7&logreg-no-intercept': np.array([0.24579932789246284, -0.35909745559289497]),
'virginica&1&setosa&8&tree-intercept': np.array([0.8549874266554914, 0.44949706621961516]),
'virginica&1&setosa&8&tree-no-intercept': np.array([0.03936629860777728, -0.29188670187229965]),
'virginica&1&setosa&8&logreg-intercept': np.array([1.0163453478625313, 0.3413139145012578]),
'virginica&1&setosa&8&logreg-no-intercept': np.array([0.24579932789246284, -0.35909745559289497]),
'virginica&1&setosa&9&tree-intercept': np.array([-0.5902578796561612, 0.5547277936962749]),
'virginica&1&setosa&9&tree-no-intercept': np.array([-1.060843373493976, 0.17349397590361448]),
'virginica&1&setosa&9&logreg-intercept': np.array([-0.9222540592168101, 0.285768863419293]),
'virginica&1&setosa&9&logreg-no-intercept': np.array([-1.060843373493976, 0.17349397590361448]),
'virginica&1&setosa&10&tree-intercept': np.array([-0.5902578796561612, 0.5547277936962749]),
'virginica&1&setosa&10&tree-no-intercept': np.array([-1.060843373493976, 0.17349397590361448]),
'virginica&1&setosa&10&logreg-intercept': np.array([-0.8920725883476605, 0.3102196752626554]),
'virginica&1&setosa&10&logreg-no-intercept': np.array([-1.060843373493976, 0.17349397590361448]),
'virginica&1&setosa&11&tree-intercept': np.array([-0.5902578796561612, 0.5547277936962749]),
'virginica&1&setosa&11&tree-no-intercept': np.array([-1.060843373493976, 0.17349397590361448]),
'virginica&1&setosa&11&logreg-intercept': np.array([-0.80152817574021, 0.3835721107927413]),
'virginica&1&setosa&11&logreg-no-intercept': np.array([-1.060843373493976, 0.17349397590361448]),
'virginica&1&setosa&12&tree-intercept': np.array([-0.3039161610590191, 0.3447324875896295]),
'virginica&1&setosa&12&tree-no-intercept': np.array([-0.9365692222835079, -0.2879205736348593]),
'virginica&1&setosa&12&logreg-intercept': np.array([-0.26328369185512007, 0.33131090273947433]),
'virginica&1&setosa&12&logreg-no-intercept': np.array([-0.889134031991175, -0.29453943739658023]),
'virginica&1&setosa&13&tree-intercept': np.array([-0.3039161610590191, 0.3447324875896295]),
'virginica&1&setosa&13&tree-no-intercept': np.array([-0.9365692222835079, -0.2879205736348593]),
'virginica&1&setosa&13&logreg-intercept': np.array([-0.26328369185512007, 0.33131090273947433]),
'virginica&1&setosa&13&logreg-no-intercept': np.array([-0.889134031991175, -0.29453943739658023]),
'virginica&1&setosa&14&tree-intercept': np.array([-0.3039161610590191, 0.3447324875896295]),
'virginica&1&setosa&14&tree-no-intercept': np.array([-0.9365692222835079, -0.2879205736348593]),
'virginica&1&setosa&14&logreg-intercept': np.array([-0.3039161610590191, 0.3447324875896295]),
'virginica&1&setosa&14&logreg-no-intercept': np.array([-0.9365692222835079, -0.2879205736348593]),
'virginica&1&setosa&15&tree-intercept': np.array([-0.22945394373965736, 0.31108659680088324]),
'virginica&1&setosa&15&tree-no-intercept': np.array([-0.9233314947600662, -0.3827909542195256]),
'virginica&1&setosa&15&logreg-intercept': np.array([-0.27707299135870644, 0.26346754918183396]),
'virginica&1&setosa&15&logreg-no-intercept': np.array([-0.9233314947600662, -0.3827909542195256]),
'virginica&1&setosa&16&tree-intercept': np.array([-0.22945394373965736, 0.31108659680088324]),
'virginica&1&setosa&16&tree-no-intercept': np.array([-0.9233314947600662, -0.3827909542195256]),
'virginica&1&setosa&16&logreg-intercept': np.array([-0.27707299135870644, 0.26346754918183396]),
'virginica&1&setosa&16&logreg-no-intercept': np.array([-0.9233314947600662, -0.3827909542195256]),
'virginica&1&setosa&17&tree-intercept': np.array([-0.22945394373965736, 0.31108659680088324]),
'virginica&1&setosa&17&tree-no-intercept': np.array([-0.9233314947600662, -0.3827909542195256]),
'virginica&1&setosa&17&logreg-intercept': np.array([-0.27707299135870644, 0.26346754918183396]),
'virginica&1&setosa&17&logreg-no-intercept': np.array([-0.9233314947600662, -0.3827909542195256]),
'virginica&1&versicolor&0&tree-intercept': np.array([1.3558739255014327, -0.7243553008595989]),
'virginica&1&versicolor&0&tree-no-intercept': np.array([0.8222891566265061, -1.1566265060240963]),
'virginica&1&versicolor&0&logreg-intercept': np.array([1.4368672397325692, -0.25367717287488045]),
'virginica&1&versicolor&0&logreg-no-intercept': np.array([0.7981927710843375, -0.7710843373493975]),
'virginica&1&versicolor&1&tree-intercept': np.array([1.0792741165233992, -0.8978032473734482]),
'virginica&1&versicolor&1&tree-no-intercept': np.array([0.5813253012048193, -1.3012048192771084]),
'virginica&1&versicolor&1&logreg-intercept': np.array([1.3619866284622721, -0.25864374403056306]),
'virginica&1&versicolor&1&logreg-no-intercept': np.array([0.7234939759036145, -0.7759036144578312]),
'virginica&1&versicolor&2&tree-intercept': np.array([0.9065902578796557, -0.7541547277936966]),
'virginica&1&versicolor&2&tree-no-intercept': np.array([0.3740963855421687, -1.1855421686746987]),
'virginica&1&versicolor&2&logreg-intercept': np.array([1.4051575931232085, -0.29455587392550103]),
'virginica&1&versicolor&2&logreg-no-intercept': np.array([0.7753012048192772, -0.8048192771084337]),
'virginica&1&versicolor&3&tree-intercept': np.array([1.1747851002865326, -0.8710601719197715]),
'virginica&1&versicolor&3&tree-no-intercept': np.array([0.8222891566265061, -1.1566265060240963]),
'virginica&1&versicolor&3&logreg-intercept': np.array([1.3161413562559687, -0.35148042024832815]),
'virginica&1&versicolor&3&logreg-no-intercept': np.array([0.7981927710843375, -0.7710843373493975]),
'virginica&1&versicolor&4&tree-intercept': np.array([0.8981852913085002, -1.0445081184336202]),
'virginica&1&versicolor&4&tree-no-intercept': np.array([0.5813253012048193, -1.3012048192771084]),
'virginica&1&versicolor&4&logreg-intercept': np.array([1.1144221585482328, -0.5199617956064946]),
'virginica&1&versicolor&4&logreg-no-intercept': np.array([0.6319277108433735, -0.9108433734939758]),
'virginica&1&versicolor&5&tree-intercept': np.array([0.7556829035339061, -0.8764087870105066]),
'virginica&1&versicolor&5&tree-no-intercept': np.array([0.3740963855421687, -1.1855421686746987]),
'virginica&1&versicolor&5&logreg-intercept': np.array([0.9978987583572108, -0.47258834765998076]),
'virginica&1&versicolor&5&logreg-no-intercept': np.array([0.5283132530120482, -0.853012048192771]),
'virginica&1&versicolor&6&tree-intercept': np.array([-0.5444258172673938, -1.339532690695726]),
'virginica&1&versicolor&6&tree-no-intercept': np.array([0.11905904944791174, -0.7364378300528086]),
'virginica&1&versicolor&6&logreg-intercept': np.array([0.19488683989941338, -0.76702640402347]),
'virginica&1&versicolor&6&logreg-no-intercept': np.array([0.04896783485357669, -0.8996639462313973]),
'virginica&1&versicolor&7&tree-intercept': np.array([-0.3369656328583412, -1.478625314333614]),
'virginica&1&versicolor&7&tree-no-intercept': np.array([0.15842534805568897, -1.0283245319251082]),
'virginica&1&versicolor&7&logreg-intercept': np.array([0.19488683989941338, -0.76702640402347]),
'virginica&1&versicolor&7&logreg-no-intercept': np.array([0.04896783485357669, -0.8996639462313973]),
'virginica&1&versicolor&8&tree-intercept': np.array([-0.29337803855825706, -1.2351215423302608]),
'virginica&1&versicolor&8&tree-no-intercept': np.array([-0.00672107537205949, -0.974555928948632]),
'virginica&1&versicolor&8&logreg-intercept': np.array([0.518860016764459, -0.37059932942162654]),
'virginica&1&versicolor&8&logreg-no-intercept': np.array([-0.0336053768602976, -0.872779644743159]),
'virginica&1&versicolor&9&tree-intercept': np.array([0.2276981852913078, -1.552244508118433]),
'virginica&1&versicolor&9&tree-no-intercept': np.array([0.7933734939759037, -1.0939759036144578]),
'virginica&1&versicolor&9&logreg-intercept': np.array([0.4439350525310407, -0.6276981852913086]),
'virginica&1&versicolor&9&logreg-no-intercept': np.array([0.6536144578313254, -0.4578313253012048]),
'virginica&1&versicolor&10&tree-intercept': np.array([0.0531041069723017, -1.7291308500477556]),
'virginica&1&versicolor&10&tree-no-intercept': np.array([0.5813253012048193, -1.3012048192771084]),
'virginica&1&versicolor&10&logreg-intercept': np.array([0.2120343839541545, -0.8206303724928373]),
'virginica&1&versicolor&10&logreg-no-intercept': np.array([0.48734939759036144, -0.597590361445783]),
'virginica&1&versicolor&11&tree-intercept': np.array([0.061509073543457256, -1.4387774594078315]),
'virginica&1&versicolor&11&tree-no-intercept': np.array([0.3740963855421687, -1.1855421686746987]),
'virginica&1&versicolor&11&logreg-intercept': np.array([0.035148042024832724, -0.8221585482330472]),
'virginica&1&versicolor&11&logreg-no-intercept': np.array([0.3837349397590361, -0.5397590361445782]),
'virginica&1&versicolor&12&tree-intercept': np.array([0.17135502849788356, -1.450266593123736]),
'virginica&1&versicolor&12&tree-no-intercept': np.array([0.8516271373414229, -0.7699944842801983]),
'virginica&1&versicolor&12&logreg-intercept': np.array([1.1500275785990097, -0.7418643132928826]),
'virginica&1&versicolor&12&logreg-no-intercept': np.array([0.966354109211252, -0.9255377826806398]),
'virginica&1&versicolor&13&tree-intercept': np.array([0.19102776245633107, -1.6468100753815054]),
'virginica&1&versicolor&13&tree-no-intercept': np.array([0.7556536127964699, -1.0821842250413676]),
'virginica&1&versicolor&13&logreg-intercept': np.array([1.1500275785990097, -0.7418643132928826]),
'virginica&1&versicolor&13&logreg-no-intercept': np.array([0.966354109211252, -0.9255377826806398]),
'virginica&1&versicolor&14&tree-intercept': np.array([0.13770913770913668, -1.3758043758043756]),
'virginica&1&versicolor&14&tree-no-intercept': np.array([0.471042471042471, -1.0424710424710422]),
'virginica&1&versicolor&14&logreg-intercept': np.array([1.4973340687626444, -0.28644971502113953]),
'virginica&1&versicolor&14&logreg-no-intercept': np.array([0.8714837286265857, -0.9123000551571979]),
'virginica&1&versicolor&15&tree-intercept': np.array([0.12373598087883601, -1.4978856407427865]),
'virginica&1&versicolor&15&tree-no-intercept': np.array([0.8516271373414229, -0.7699944842801983]),
'virginica&1&versicolor&15&logreg-intercept': np.array([1.0889869461298038, -0.7488508917080328]),
'virginica&1&versicolor&15&logreg-no-intercept':
|
np.array([0.9597352454495311, -0.8781025923883066])
|
numpy.array
|
# -*- coding: utf-8 -*-
'''
example usage:
%matplotlib inline
import matplotlib.pyplot as plt
from pymr.heart import ahaseg
heart_mask = (LVbmask, LVwmask, RVbmask)
label_mask = ahaseg.get_seg(heart_mask, nseg=4)
plt.imshow(label_mask)
'''
import numpy as np
from scipy import ndimage
import matplotlib.pyplot as plt
def get_heartmask(heart_mask):
if isinstance(heart_mask, tuple):
#backward compatibility
LVbmask, LVwmask, RVbmask = heart_mask
else:
LVbmask = (heart_mask==1)
LVwmask = (heart_mask==2)
RVbmask = (heart_mask==3)
return LVbmask, LVwmask, RVbmask
def degree_calcu(UP, DN, seg_num):
anglelist = np.zeros(seg_num)
if seg_num == 4:
anglelist[0] = DN - 180.
anglelist[1] = UP
anglelist[2] = DN
anglelist[3] = UP + 180.
if seg_num == 6:
anglelist[0] = DN - 180.
anglelist[1] = UP
anglelist[2] = (UP + DN)/2.
anglelist[3] = DN
anglelist[4] = UP + 180.
anglelist[5] = anglelist[2] + 180.
anglelist = (anglelist + 360) % 360
return anglelist.astype(int)
def degree_calcu_new(mid, seg_num):
anglelist = np.zeros(seg_num)
if seg_num == 4:
anglelist[0] = mid - 45 - 90
anglelist[1] = mid - 45
anglelist[2] = mid + 45
anglelist[3] = mid + 45 + 90
if seg_num == 6:
anglelist[0] = mid - 120
anglelist[1] = mid - 60
anglelist[2] = mid
anglelist[3] = mid + 60
anglelist[4] = mid + 120
anglelist[5] = mid + 180
anglelist = (anglelist + 360) % 360
return anglelist.astype(int)
def circular_sector(r_range, theta_range, LV_center):
cx, cy = LV_center
theta = theta_range/180*np.pi
z = r_range.reshape(-1, 1).dot(np.exp(1.0j*theta).reshape(1, -1))
xall = -np.imag(z) + cx
yall = np.real(z) + cy
return xall, yall
def get_theta(sweep360):
from scipy.optimize import curve_fit
from scipy.signal import medfilt
y = sweep360.copy()
def gauss(x, *p):
A, mu, sigma = p
return A*np.exp(-(x-mu)**2/(2.*sigma**2))
def fit(x, y):
#print(y)
p0 = [np.max(y), np.argmax(y)+x[0], 1.]
try:
coeff, var_matrix = curve_fit(gauss, x, y, p0=p0)
A, mu, sigma = coeff
except:
mu = 0
sigma = 0
return mu, sigma
y = medfilt(y)
y2 = np.hstack([y, y, y])
#y2 = medfilt(y2)
maxv = y2.argsort()[::-1][:10]
maxv = maxv[np.argmin(np.abs(maxv-360*3//2))]
#print('maxv:%d' % maxv, y2.argsort()[::-1][:10])
y2[:(maxv-90)] = 0
y2[(maxv+90):] = 0
#print(y2[(maxv-90):maxv])
x = np.arange(y2.size)
mu, sigma = fit(x[(maxv-150):maxv], y2[(maxv-150):maxv])
uprank1 = mu - sigma*2.5
mu, sigma = fit(x[maxv:(maxv+150)], y2[maxv:(maxv+150)])
downrank1 = mu + sigma*2.5
uprank2 = np.nonzero(y2 >= min(
|
np.max(y2)
|
numpy.max
|
import numpy as np
from gym import utils
from gym.envs.dart import dart_env
from .simple_water_world import BaseFluidSimulator
from .simple_water_world import BaseFluidEnhancedAllDirSimulator
from keras.models import load_model
class DartCubePathFindingDataCollectEnv(dart_env.DartEnv, utils.EzPickle):
def __init__(self):
control_bounds =
|
np.array([[1.0] * 2, [-1.0] * 2])
|
numpy.array
|
import numpy as np
import torch as torch
class ConvexConjugateFunction:
def __call__(self, *args, **kwargs):
pass
def grad(self, x):
pass
def convex_conjugate(self, x):
pass
def grad_convex_conjugate(self, x):
pass
class QuadraticFunction(ConvexConjugateFunction):
def __init__(self, A, cuda=False, domain=(-1., +1.)):
self.domain = domain
self.convex_domain = domain
self.n = A.shape[1]
self.A = A
self.invA = np.linalg.inv(A)
self.A_torch = torch.from_numpy(self.A).float().view(-1, self.n, self.n)
self.invA_torch = torch.from_numpy(self.invA).float()
self.device = None
self.cuda() if cuda else self.cpu()
def __call__(self, x):
if isinstance(x, np.ndarray):
x = x.reshape((-1, self.n, 1))
quad = np.matmul(x.transpose(0, 2, 1), np.matmul(self.A.reshape((1, self.n, self.n)), x)).reshape(x.shape[0], 1, 1)
elif isinstance(x, torch.Tensor):
# shape = x.shape
x = x.view(-1, self.n, 1)
quad = torch.matmul(torch.matmul(x.transpose(dim0=1, dim1=2), self.A_torch), x).view(x.shape[0], 1, 1)
else:
raise ValueError("x must be either an numpy.ndarray or torch.Tensor, but is type {0}.".format(type(x)))
return quad
def grad(self, x):
if isinstance(x, np.ndarray):
shape = x.shape
x = x.reshape((-1, self.n, 1))
Ax = np.matmul(self.A.reshape((1, self.n, self.n)), x).reshape(shape)
elif isinstance(x, torch.Tensor):
x = x.view(-1, self.n, 1)
Ax = torch.matmul(x.transpose(dim0=1, dim1=2), self.A_torch).squeeze()
else:
raise ValueError("x must be either an numpy.ndarray or torch.Tensor, but is type {0}.".format(type(x)))
return 2. * Ax
def convex_conjugate(self, x):
if isinstance(x, np.ndarray):
shape = x.shape
x = x.reshape((-1, self.n, 1))
xTinvAx = np.matmul(x.transpose(0, 2, 1), np.matmul(self.invA.reshape((1, self.n, self.n)), x)).reshape(shape)
elif isinstance(x, torch.Tensor):
shape = x.shape
x = x.view(-1, self.n, 1)
xTinvAx = torch.matmul(torch.matmul(x.transpose(dim0=1, dim1=2), self.invA_torch), x).view(*shape)
else:
raise ValueError("x must be either an numpy.ndarray or torch.Tensor, but is type {0}.".format(type(x)))
return 1./4. * xTinvAx
def grad_convex_conjugate(self, x):
if isinstance(x, np.ndarray):
x = x.reshape((-1, self.n, 1))
invAx = np.matmul(self.invA.reshape((1, self.n, self.n)), x)
elif isinstance(x, torch.Tensor):
shape = x.shape
x = x.view(-1, self.n, 1)
invAx = torch.matmul(x.transpose(dim0=1, dim1=2), self.invA_torch).view(*shape)
else:
raise ValueError("x must be either an numpy.ndarray or torch.Tensor, but is type {0}.".format(type(x)))
return 1./2. * invAx
def cuda(self, device=None):
self.A_torch = self.A_torch.cuda()
self.invA_torch = self.invA_torch.cuda()
self.device = self.A_torch.device
return self
def cpu(self):
self.A_torch = self.A_torch.cpu()
self.invA_torch = self.invA_torch.cpu()
self.device = self.A_torch.device
return self
class HyperbolicTangent(ConvexConjugateFunction):
def __init__(self, alpha=+1., beta=+1.0, cuda=False):
self.n = 1
self.a = alpha
self.b = beta
self.domain = (-np.abs(alpha), +np.abs(alpha))
self.convex_domain = (-10.0, +10.0)
assert self.a >= self.domain[0] and self.a <= self.domain[1]
# Compute Offset:
self.off = 0.0
self.off = -self(np.zeros(1))
def __call__(self, x):
if isinstance(x, np.ndarray):
assert np.all((self.a - np.abs(x)) >= 0.0)
g = np.ones(x.size) * self.b * self.a * np.log(2. * self.a) + self.off
mask = self.a - np.abs(x) > 0
g[mask] = 0.5 * self.b * ((self.a - x[mask]) * np.log(self.a - x[mask]) +
(self.a + x[mask]) * np.log(self.a + x[mask])) + self.off
elif isinstance(x, torch.Tensor):
x = x.view(-1, self.n, 1)
g = 0.5 * self.b * (np.matmul((self.a - x).transpose(dim0=1, dim1=2), np.log(self.a - x)) +
np.matmul((self.a + x).transpose(dim0=1, dim1=2), np.log(self.a + x))) + self.off
else:
raise ValueError("x must be either an numpy.ndarray or torch.Tensor, but is type {0}.".format(type(x)))
return g.squeeze()
def grad(self, x):
if isinstance(x, np.ndarray):
g_grad = self.b * np.arctanh(x/self.a)
elif isinstance(x, torch.Tensor):
g_grad = self.b * 0.5 * torch.log((1+x/self.a)/(1-x/self.a))
else:
raise ValueError("x must be either an numpy.ndarray or torch.Tensor, but is type {0}.".format(type(x)))
return g_grad
def convex_conjugate(self, x):
if isinstance(x, np.ndarray):
# Naive implementation:
g_star = self.a * self.b * np.log(np.cosh(x / self.b))
# Numerically stable implementation to prevent overflows of exp(|x|):
g_star = self.a * self.b * (np.log(0.5) + np.abs(x / self.b) +
np.log(np.exp(-2. * np.abs(x / self.b)) + 1.0))
elif isinstance(x, torch.Tensor):
# Naive implementation:
# g_star = self.a * self.b * torch.log(torch.cosh(x / self.b))
# Numerically stable implementation to prevent overflows of exp(|x|):
g_star = self.a * self.b * (torch.log(torch.tensor(0.5)) + torch.abs(x/self.b) +
torch.log(torch.exp(-2. * torch.abs(x / self.b)) + 1.0))
else:
raise ValueError("x must be either an numpy.ndarray or torch.Tensor, but is type {0}.".format(type(x)))
return g_star
def grad_convex_conjugate(self, x):
if isinstance(x, np.ndarray):
g_star_grad = self.a *
|
np.tanh(x / self.b)
|
numpy.tanh
|
# Copyright (C) 2018, <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>
"""Dynamics model."""
import six
import abc
import theano
import numpy as np
import theano.tensor as T
from scipy.optimize import approx_fprime
from .autodiff import (as_function, batch_jacobian, hessian_vector,
jacobian_vector)
@six.add_metaclass(abc.ABCMeta)
class Dynamics():
"""Dynamics Model."""
@property
@abc.abstractmethod
def state_size(self):
"""State size."""
raise NotImplementedError
@property
@abc.abstractmethod
def action_size(self):
"""Action size."""
raise NotImplementedError
@property
@abc.abstractmethod
def has_hessians(self):
"""Whether the second order derivatives are available."""
raise NotImplementedError
@abc.abstractmethod
def f(self, x, u, i):
"""Dynamics model.
Args:
x: Current state [state_size].
u: Current control [action_size].
i: Current time step.
Returns:
Next state [state_size].
"""
raise NotImplementedError
@abc.abstractmethod
def f_x(self, x, u, i):
"""Partial derivative of dynamics model with respect to x.
Args:
x: Current state [state_size].
u: Current control [action_size].
i: Current time step.
Returns:
df/dx [state_size, state_size].
"""
raise NotImplementedError
@abc.abstractmethod
def f_u(self, x, u, i):
"""Partial derivative of dynamics model with respect to u.
Args:
x: Current state [state_size].
u: Current control [action_size].
i: Current time step.
Returns:
df/du [state_size, action_size].
"""
raise NotImplementedError
@abc.abstractmethod
def f_xx(self, x, u, i):
"""Second partial derivative of dynamics model with respect to x.
Note:
This is not necessary to implement if you're planning on skipping
Hessian evaluation as the iLQR implementation does by default.
Args:
x: Current state [state_size].
u: Current control [action_size].
i: Current time step.
Returns:
d^2f/dx^2 [state_size, state_size, state_size].
"""
raise NotImplementedError
@abc.abstractmethod
def f_ux(self, x, u, i):
"""Second partial derivative of dynamics model with respect to u and x.
Note:
This is not necessary to implement if you're planning on skipping
Hessian evaluation as the iLQR implementation does by default.
Args:
x: Current state [state_size].
u: Current control [action_size].
i: Current time step.
Returns:
d^2f/dudx [state_size, action_size, state_size].
"""
raise NotImplementedError
@abc.abstractmethod
def f_uu(self, x, u, i):
"""Second partial derivative of dynamics model with respect to u.
Note:
This is not necessary to implement if you're planning on skipping
Hessian evaluation as the iLQR implementation does by default.
Args:
x: Current state [state_size].
u: Current control [action_size].
i: Current time step.
Returns:
d^2f/du^2 [state_size, action_size, action_size].
"""
raise NotImplementedError
class AutoDiffDynamics(Dynamics):
"""Auto-differentiated Dynamics Model."""
def __init__(self, f, x_inputs, u_inputs, i=None, hessians=False, **kwargs):
"""Constructs an AutoDiffDynamics model.
Args:
f: Vector Theano tensor expression.
x_inputs: Theano state input variables.
u_inputs: Theano action input variables.
i: Theano tensor time step variable.
hessians: Evaluate the dynamic model's second order derivatives.
Default: only use first order derivatives. (i.e. iLQR instead
of DDP).
**kwargs: Additional keyword-arguments to pass to
`theano.function()`.
"""
self._tensor = f
self._i = T.dscalar("i") if i is None else i
non_t_inputs = np.hstack([x_inputs, u_inputs]).tolist()
inputs = np.hstack([x_inputs, u_inputs, self._i]).tolist()
self._x_inputs = x_inputs
self._u_inputs = u_inputs
self._inputs = inputs
self._non_t_inputs = non_t_inputs
x_dim = len(x_inputs)
u_dim = len(u_inputs)
self._state_size = x_dim
self._action_size = u_dim
self._J = jacobian_vector(f, non_t_inputs, x_dim)
self._f = as_function(f, inputs, name="f", **kwargs)
self._f_x = as_function(
self._J[:, :x_dim], inputs, name="f_x", **kwargs)
self._f_u = as_function(
self._J[:, x_dim:], inputs, name="f_u", **kwargs)
self._has_hessians = hessians
if hessians:
self._Q = hessian_vector(f, non_t_inputs, x_dim)
self._f_xx = as_function(
self._Q[:, :x_dim, :x_dim], inputs, name="f_xx", **kwargs)
self._f_ux = as_function(
self._Q[:, x_dim:, :x_dim], inputs, name="f_ux", **kwargs)
self._f_uu = as_function(
self._Q[:, x_dim:, x_dim:], inputs, name="f_uu", **kwargs)
super(AutoDiffDynamics, self).__init__()
@property
def state_size(self):
"""State size."""
return self._state_size
@property
def action_size(self):
"""Action size."""
return self._action_size
@property
def has_hessians(self):
"""Whether the second order derivatives are available."""
return self._has_hessians
@property
def tensor(self):
"""The dynamics model variable."""
return self._tensor
@property
def x(self):
"""The state variables."""
return self._x_inputs
@property
def u(self):
"""The control variables."""
return self._u_inputs
@property
def i(self):
"""The time step variable."""
return self._i
def f(self, x, u, i):
"""Dynamics model.
Args:
x: Current state [state_size].
u: Current control [action_size].
i: Current time step.
Returns:
Next state [state_size].
"""
z =
|
np.hstack([x, u, i])
|
numpy.hstack
|
#coding:utf-8 将异常部分变为条件
import time
import os
import heapq
import copy
import json
import sys
from math import pi, pow, sqrt, log, log10
import numpy as np
import tensorly as tl
from tensorly.tenalg import khatri_rao
from tensorly.random import random_kruskal
from scipy.special import digamma, gamma
from scipy.stats import zscore
from imageio import imread, imsave
import scipy.signal
from numpy.linalg import norm, svd, inv, det
import matplotlib
matplotlib.use('Agg')
from utils.utils import *
def update(Y, O, params, N, maxRank, maxiters, tol=1e-5, verbose=0):
Z0 = params['Z']
ZSigma0 = params['ZSigma']
coefficient = params['coefficient']
EZZT = params['EZZT']
EZZT_t_pre = params['EZZT_t_pre']
Z_pre = params['Z_pre']
O_pre = params['O_pre']
a_tau0 = params['a_tau0']
b_tau0 = params['b_tau0']
tau = params['tau']
L_pre = params['L_pre']
# Model learning
R = maxRank
nObs = np.sum(O)
LB = []
dimY = Y.shape
C = np.expand_dims(Y, 2)
Z = []
ZSigma = []
for n in range(N - 1):
Z.append(np.random.randn(dimY[n], R)) # E[A ^ (n)]
ZSigma.append(np.zeros([R, R, dimY[n]])) # covariance of A ^ (n)
for i in range(dimY[n]):
ZSigma[n][:, :, i] = np.eye(R)
#if init == 'rand':
Z0_t = np.random.randn(1, R)
Z_t = np.random.randn(1, R)
# elif init == 'ml':
# Z0_t = np.expand_dims(z_tmp[t, :], axis=0)
# Z_t = np.expand_dims(z_tmp[t, :], axis=0)
ZSigma0_t = np.expand_dims(np.eye(R), 2)
ZSigma_t = np.eye(R)
EZZT_t = np.reshape(ZSigma0_t, [R * R, 1], 'F').T
sigma_E0 = np.ones([dimY[0], dimY[1], 1])
E0 = np.zeros([dimY[0], dimY[1], 1])
E = np.zeros_like(E0)
sigma_E = np.ones_like(sigma_E0)
a_tauN = 1e-6
b_tauN = 1e-6
for it in range(maxiters):
for n in range(N-1):
ENZZT = np.reshape(np.dot(khatri_rao([EZZT[0], EZZT[1], EZZT_t], skip_matrix=n, reverse=bool).T,
unfold(O, n).T), [R, R, dimY[n]], 'F')
ENZZT_prev = []
for i in range(len(coefficient)):
tmp = np.reshape(np.dot(khatri_rao([EZZT[0], EZZT[1], EZZT_t_pre[i]], skip_matrix=n, reverse=bool).T,
unfold(O, n).T), [R, R, dimY[n]], 'F')
ENZZT_prev.append(tmp)
FslashY = np.dot(khatri_rao([Z[0], Z[1], Z_t], skip_matrix=n, reverse=bool).T,
unfold((C - E) * O, n).T)
for i in range(len(coefficient)):
FslashY += coefficient[i] * np.dot(khatri_rao([Z[0], Z[1], Z_pre[i]], skip_matrix=n, reverse=bool).T,
unfold(L_pre[i] * O_pre[i], n).T)
for i in range(dimY[n]):
ENZZT_sum = ENZZT[:, :, i]
for j in range(len(coefficient)):
ENZZT_sum += coefficient[j] * ENZZT_prev[j][:, :, i]
ZSigma[n][:, :, i] = inv(tau * ENZZT_sum + inv(ZSigma0[n][:, :, i]))
Z[n][i, :] = np.squeeze(
(np.dot(ZSigma[n][:, :, i], (inv(ZSigma0[n][:, :, i]).dot(np.expand_dims(Z0[n][i, :], 1))
+ tau * np.expand_dims(FslashY[:, i], 1)))).T)
EZZT[n] = (np.reshape(ZSigma[n], [R * R, dimY[n]], 'F') + khatri_rao([Z[n].T, Z[n].T])).T
ENZZT = np.reshape(np.dot(khatri_rao([EZZT[0], EZZT[1], EZZT_t], skip_matrix=2, reverse=bool).T, unfold(O, 2).T),
[R, R, 1], 'F')
# compute E(Z_{\n})
FslashY = np.dot(khatri_rao([Z[0], Z[1], Z_t], skip_matrix=2, reverse=bool).T,
unfold((C-E) * O, 2).T)
ZSigma_t = inv(tau * ENZZT[:, :, 0] + inv(ZSigma0_t[:, :, 0]))
Z_t = (np.dot(ZSigma_t, (inv(ZSigma0_t[:, :, 0]).dot(np.reshape(Z0_t, [R, 1])) +
tau*
|
np.expand_dims(FslashY[:, 0], 1)
|
numpy.expand_dims
|
import torch
import torch.nn as nn
from torch.distributions import MultivariateNormal
import numpy as np
import matplotlib.pyplot as plt
import lqr_control as control
# temp fix for OpenMP issue
import os
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device = torch.device("cpu")
def simulate(A, B, policy, x0, T):
"""
simulate trajectory based on policy learned by agent
"""
x_data = []
u_data = []
x = x0
u = policy(torch.FloatTensor(x.reshape(1, -1)).to(device)).detach()
for t in range(T):
u_data.append(u.item())
x_data.append(x.item())
u = policy(torch.FloatTensor(x.reshape(1, -1)).to(device)).detach()
x = A @ x + B @ u.numpy()
return x_data, u_data
def compare_paths(x_sim, x_star, ylabel):
fig, ax = plt.subplots()
colors = ['#2D328F', '#F15C19'] # blue, orange
t = np.arange(0, x_star.shape[0])
ax.plot(t, x_star, color=colors[1], label='True')
ax.plot(t, x_sim, color=colors[0], label='Agent')
ax.set_xlabel('Time', fontsize=18)
ax.set_ylabel(ylabel, fontsize=18)
plt.legend(fontsize=18)
plt.grid(True)
plt.show()
return
def compare_P(actor, K, low=-10, high=10, actor_label='Approx. Policy'):
fig, ax = plt.subplots()
colors = ['#2D328F', '#F15C19'] # blue, orange
label_fontsize = 18
states = torch.linspace(low, high).detach().reshape(100, 1)
actions = actor(states).squeeze().detach().numpy()
optimal = -K * states.numpy()
ax.plot(states.numpy(), optimal, color=colors[1], label='Optimal Policy')
ax.plot(states.numpy(), actions, color=colors[0], label=actor_label)
ax.set_xlabel('x (state)', fontsize=label_fontsize)
ax.set_ylabel('u (action)', fontsize=label_fontsize)
plt.legend()
plt.grid(True)
plt.show()
return
# "custom" activation functions for pytorch - compatible with autograd
class PLU(nn.Module):
def __init__(self):
super(PLU, self).__init__()
self.w1 = torch.nn.Parameter(torch.ones(1))
self.w2 = torch.nn.Parameter(torch.ones(1))
def forward(self, x):
return self.w1 * torch.max(x, torch.zeros_like(x)) + self.w2 * torch.min(x, torch.zeros_like(x))
class Spike(nn.Module):
def __init__(self, center=1, width=1):
super(Spike, self).__init__()
self.c = center
self.w = width
self.alpha = torch.nn.Parameter(torch.ones(1))
self.beta = torch.nn.Parameter(torch.ones(1))
# self.alpha = torch.nn.Parameter(torch.normal(0,1,size=(1,)))
# self.beta = torch.nn.Parameter(torch.normal(0,1,size=(1,)))
def forward(self, x):
return self.alpha * x + self.beta * (
torch.min(torch.max((x - (self.c - self.w)), torch.zeros_like(x)),torch.max((-x + (self.c + self.w)), torch.zeros_like(x)))
- 2*torch.min(torch.max((x - (self.c - self.w+1)), torch.zeros_like(x)),torch.max((-x + (self.c + self.w+1)), torch.zeros_like(x)))
)
class Memory:
def __init__(self):
self.actions = []
self.states = []
self.logprobs = []
self.costs = []
self.is_terminals = []
def clear_memory(self):
del self.actions[:]
del self.states[:]
del self.logprobs[:]
del self.costs[:]
del self.is_terminals[:]
class PRELU(nn.Module):
def __init__(self, state_dim, action_dim, n_latent_var, sigma):
super(PRELU, self).__init__()
self.agent = nn.Sequential(
PLU(),
nn.Linear(state_dim, action_dim, bias=True)
)
self.state_dim = state_dim
self.action_dim = action_dim
self.sigma = sigma
def forward(self):
raise NotImplementedError
def act(self, state, memory):
action_mean = self.agent(state)
action_var = torch.full((action_dim,), self.sigma)
action_var = action_var.expand_as(action_mean)
cov_mat = torch.diag_embed(action_var).to(device)
dist = MultivariateNormal(action_mean, cov_mat)
action = dist.sample()
action_logprob = dist.log_prob(action)
if memory is not None:
memory.states.append(state)
memory.actions.append(action)
memory.logprobs.append(action_logprob)
return action.detach()
def evaluate(self, states, actions):
action_means = self.agent(states)
action_var = torch.full((action_dim,), self.sigma)
action_var = action_var.expand_as(action_means)
cov_mat = torch.diag_embed(action_var).to(device)
dist = MultivariateNormal(action_means, cov_mat)
action_logprobs = dist.log_prob(actions)
dist_entropy = dist.entropy()
return action_logprobs, dist_entropy
class CHAOS(nn.Module):
def __init__(self, state_dim, action_dim, n_latent_var, sigma):
super(CHAOS, self).__init__()
self.agent = nn.Sequential(
# nn.Linear(state_dim, action_dim, bias=False),
Spike(),
# nn.Linear(n_latent_var, action_dim, bias=False)
)
self.state_dim = state_dim
self.action_dim = action_dim
self.sigma = sigma
def forward(self):
raise NotImplementedError
def act(self, state, memory):
action_mean = self.agent(state)
action_var = torch.full((action_dim,), self.sigma)
action_var = action_var.expand_as(action_mean)
cov_mat = torch.diag_embed(action_var).to(device)
dist = MultivariateNormal(action_mean, cov_mat)
action = dist.sample()
action_logprob = dist.log_prob(action)
if memory is not None:
memory.states.append(state)
memory.actions.append(action)
memory.logprobs.append(action_logprob)
return action.detach()
def evaluate(self, states, actions):
action_means = self.agent(states)
action_var = torch.full((action_dim,), self.sigma)
action_var = action_var.expand_as(action_means)
cov_mat = torch.diag_embed(action_var).to(device)
dist = MultivariateNormal(action_means, cov_mat)
action_logprobs = dist.log_prob(actions)
dist_entropy = dist.entropy()
return action_logprobs, dist_entropy
class PG:
def __init__(self, state_dim, action_dim, n_latent_var, sigma, lr, betas, gamma, K_epochs):
self.betas = betas
self.gamma = gamma
self.K_epochs = K_epochs
self.sigma = sigma
# self.policy = PRELU(state_dim, action_dim, n_latent_var, sigma).to(device)
self.policy = CHAOS(state_dim, action_dim, n_latent_var, sigma).to(device)
self.optimizer = torch.optim.Adam(self.policy.agent.parameters(), lr=lr, betas=betas)
def select_action(self, state, memory):
state = torch.FloatTensor(state.reshape(1, -1)).to(device)
return self.policy.act(state, memory).cpu().data.numpy().flatten()
def update(self, memory):
# Monte Carlo estimate of state costs:
costs = []
discounted_cost = 0
for cost, is_terminal in zip(reversed(memory.costs), reversed(memory.is_terminals)):
if is_terminal:
discounted_cost = 0
discounted_cost = cost + (self.gamma * discounted_cost)
costs.insert(0, discounted_cost)
# Normalizing the costs:
costs = torch.tensor(costs).to(device)
# costs = (costs - costs.mean()) / (costs.std() + 1e-8)
# convert list to tensor
old_states = torch.stack(memory.states).to(device).detach()
old_actions = torch.stack(memory.actions).to(device).detach()
# Optimize policy for K epochs:
for _ in range(self.K_epochs):
# Evaluating old actions and values :
logprobs, dist_entropy = self.policy.evaluate(old_states, old_actions)
# Finding Loss:
actor_loss = costs * logprobs
loss = actor_loss - 0.01 * dist_entropy
# take gradient step
self.optimizer.zero_grad()
loss.mean().backward()
self.optimizer.step()
############## Hyperparameters ##############
A = np.array(1).reshape(1, 1)
B = np.array(1).reshape(1, 1)
Q =
|
np.array(1)
|
numpy.array
|
import os
import numpy as np
import pandas as pd
from ctypes import CDLL, cast, c_int, c_float, POINTER, ARRAY
LIBRARY_NAME = "libChanlunX"
LIBRARY_PATH = os.path.split(__file__)[0]
class ChanLibrary(object):
"""缠论DLL抽象类
此处使用NumPy的Ctypes接口来加载缠论DLL
并且封装了Windows和Linux加载不同DLL文件的差异
"""
def __init__(self, bi_style=1, bi_frac_range=2, duan_style=0, debug=False):
self._bi_style = bi_style
self._bi_frac_range = bi_frac_range
self._duan_style = duan_style
self._debug = debug
@classmethod
def _get_library(cls):
"""全局唯一的DLL对象
"""
lib = getattr(cls, "_lib", None)
if lib is None:
lib = cls._load_library()
cls._lib = lib
return lib
@classmethod
def _load_library(cls):
## 使用NumPy的Ctypes便捷接口加载DLL
lib = np.ctypeslib.load_library(LIBRARY_NAME, LIBRARY_PATH)
## 定义多个函数接口
lib.ChanK.argtypes = [
np.ctypeslib.ndpointer(
dtype=np.float32, ndim=1, flags="C_CONTIGUOUS"),
np.ctypeslib.ndpointer(
dtype=np.float32, ndim=1, flags="C_CONTIGUOUS"),
np.ctypeslib.ndpointer(
dtype=np.float32, ndim=1, flags="C_CONTIGUOUS"),
np.ctypeslib.ndpointer(
dtype=np.float32, ndim=1, flags="C_CONTIGUOUS"),
np.ctypeslib.ndpointer(
dtype=np.float32, ndim=1, flags="C_CONTIGUOUS"),
np.ctypeslib.ndpointer(
dtype=np.float32, ndim=1, flags="C_CONTIGUOUS"),
c_int,
]
lib.ChanBi.argtypes = [
np.ctypeslib.ndpointer(
dtype=np.float32, ndim=1, flags="C_CONTIGUOUS"),
np.ctypeslib.ndpointer(
dtype=np.float32, ndim=1, flags="C_CONTIGUOUS"),
np.ctypeslib.ndpointer(
dtype=np.float32, ndim=1, flags="C_CONTIGUOUS"),
c_int,
c_int,
c_int,
]
lib.ChanDuan.argtypes = [
np.ctypeslib.ndpointer(
dtype=np.float32, ndim=1, flags="C_CONTIGUOUS"),
np.ctypeslib.ndpointer(
dtype=np.float32, ndim=1, flags="C_CONTIGUOUS"),
np.ctypeslib.ndpointer(
dtype=np.float32, ndim=1, flags="C_CONTIGUOUS"),
np.ctypeslib.ndpointer(
dtype=np.float32, ndim=1, flags="C_CONTIGUOUS"),
c_int,
c_int,
]
lib.ChanZhongShu.argtypes = [
np.ctypeslib.ndpointer(
dtype=np.float32, ndim=1, flags="C_CONTIGUOUS"),
np.ctypeslib.ndpointer(
dtype=np.float32, ndim=1, flags="C_CONTIGUOUS"),
np.ctypeslib.ndpointer(
dtype=np.float32, ndim=1, flags="C_CONTIGUOUS"),
np.ctypeslib.ndpointer(
dtype=np.float32, ndim=1, flags="C_CONTIGUOUS"),
np.ctypeslib.ndpointer(
dtype=np.float32, ndim=1, flags="C_CONTIGUOUS"),
np.ctypeslib.ndpointer(
dtype=np.float32, ndim=1, flags="C_CONTIGUOUS"),
np.ctypeslib.ndpointer(
dtype=np.float32, ndim=1, flags="C_CONTIGUOUS"),
c_int,
]
lib.ChanZhongShu2.argtypes = [
np.ctypeslib.ndpointer(
dtype=np.float32, ndim=1, flags="C_CONTIGUOUS"),
np.ctypeslib.ndpointer(
dtype=np.float32, ndim=1, flags="C_CONTIGUOUS"),
np.ctypeslib.ndpointer(
dtype=np.float32, ndim=1, flags="C_CONTIGUOUS"),
np.ctypeslib.ndpointer(
dtype=np.float32, ndim=1, flags="C_CONTIGUOUS"),
np.ctypeslib.ndpointer(
dtype=np.float32, ndim=1, flags="C_CONTIGUOUS"),
np.ctypeslib.ndpointer(
dtype=np.float32, ndim=1, flags="C_CONTIGUOUS"),
np.ctypeslib.ndpointer(
dtype=np.float32, ndim=1, flags="C_CONTIGUOUS"),
c_int,
]
return lib
@classmethod
def _free_library(cls):
lib = getattr(cls, "_lib", None)
if lib:
try:
import win32api
win32api.FreeLibrary(lib._handle)
except:
pass
# 下面的函数都是对DLL的进一步封装
# 封装的原因是因为,这个DLL的函数是C风格的,接收的参数都是指向一个数组的指针
# 因此这里做一下类型转换和内存空间分配
def chanK(self, high_list, low_list):
"""导入K线"""
if self._debug:
print(f'导入K线')
count = len(high_list)
arr_direction = np.zeros(count).astype(np.float32)
arr_include = np.zeros(count).astype(np.float32)
arr_high = np.zeros(count).astype(np.float32)
arr_low = np.zeros(count).astype(np.float32)
lib = self._get_library()
lib.ChanK(arr_direction, arr_include, arr_high, arr_low, high_list, low_list,
count)
if self._debug:
print(f'导入K线完成')
return arr_direction, arr_include, arr_high, arr_low
def chanBi(self, high_list, low_list):
"""计算笔"""
if self._debug:
print(f'计算分笔')
count = len(high_list)
arr_bi = np.zeros(count).astype(np.float32)
lib = self._get_library()
lib.ChanBi(arr_bi, high_list, low_list, count, self._bi_style,
self._bi_frac_range)
if self._debug:
print(f'计算分笔完成')
return arr_bi
def chanDuan(self, bi, high_list, low_list):
"""计算段"""
if self._debug:
print(f'计算线段开始')
count = len(high_list)
arr_duan = np.zeros(count).astype(np.float32)
lib = self._get_library()
lib.ChanDuan(arr_duan, bi, high_list, low_list, count, self._duan_style)
if self._debug:
print(f'计算线段完成')
return arr_duan
def chanZhongShu(self, duan, high_list, low_list):
"""计算中枢"""
if self._debug:
print(f'计算中枢开始')
count = len(high_list)
arr_direction = np.zeros(count).astype(np.float32)
arr_range = np.zeros(count).astype(np.float32)
arr_high =
|
np.zeros(count)
|
numpy.zeros
|
import random
from collections import deque, namedtuple
import numpy as np
import torch
## Default Values
DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
BUFFER_SIZE = int(1e5)
BATCH_SIZE = 64
MIN_NUM_BATCHES = 5
class ReplayMemoryBuffer:
def __init__(self,
buffer_size: int = BUFFER_SIZE,
batch_size: int = BATCH_SIZE,
device: str = DEVICE,
seed: int = 0):
self.device = device
self.memory_buffer = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.experience = namedtuple('Experience', field_names=['state', 'action', 'reward', 'next_state', 'done'])
self.seed = random.seed(seed)
def __len__(self):
return len(self.memory_buffer)
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
experience_sample = self.experience(state, action, reward, next_state, done)
self.memory_buffer.append(experience_sample)
def sample(self, batch_size: int = None, device: str = None):
if not batch_size:
batch_size = self.batch_size
if not device:
device = self.device
experiences = random.sample(self.memory_buffer, k=batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).long().to(device)
rewards = torch.from_numpy(
|
np.vstack([e.reward for e in experiences if e is not None])
|
numpy.vstack
|
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('Agg') # for saving figures
import matplotlib.pyplot as plt
benign_traffic = pd.read_csv('benign_traffic.csv.zip')
gafgyt_traffic = pd.read_csv('gafgyt_traffic.csv.zip', nrows=2000)
print("Benign traffic:")
print(benign_traffic.head())
print("Gafgyt traffic:")
print(gafgyt_traffic.head())
# Compute distances from every benign data point to every other
from sklearn.metrics.pairwise import cosine_distances
benign_dists = cosine_distances(benign_traffic)
print("Benign self-distances:")
print(benign_dists)
print(np.shape(benign_dists))
print("Min, max:", np.min(benign_dists),
|
np.max(benign_dists)
|
numpy.max
|
import numpy as np
import math
from .distribution import Distribution
from .helpers import create_distance_kernel
from .gamma_evaluation_c import gamma_evaluation_c
def difference_between(dist1, dist2, kind="relative"):
"""Compute the difference between two distributions.
Calculate either the absolute or relative difference between the values
in the two distributions. Both distributions must have matching grid sizes
and positions.
Parameters
----------
distribution1 : Distribution
The first distribution to calculate the difference between.
distribution2 : Distribution
The second distribution to calculate the difference between.
kind : str
The type of comparison to be made. Either "relative" or "absolute".
Returns
-------
Distribution
A new distribution with data values corresponding to the difference
between the two input distributions.
"""
if kind == "absolute":
new_data = dist2.data - dist1.data
elif kind == "relative":
new_data =
|
np.copy(dist2.data)
|
numpy.copy
|
# Functions needed to run Excalibur
# Data munging in separate file
from os.path import basename
import numpy as np
from astropy.time import Time
from scipy import interpolate, optimize
from tqdm.auto import trange
import warnings
warnings.simplefilter('ignore', np.RankWarning)
###########################################################
# PCA Patching
###########################################################
def pcaPatch(x_values, mask, K=2, num_iters=50):
"""
Iterative PCA patching, where bad values are replaced
with denoised values.
Parameters
----------
x_values : 2D array
List of values we want to denoise
mask : 2D array
Mask for x_values that is true for values that
we would like to denoise
K : int, optional (default: 2)
Number of principal components used for denoising
num_iters : int, optional (default: 50)
Number of iterations to run iterative PCA patching
Returns
-------
x_values : 2D ndarray
x_values with bad values replaced by denoised values
mean_x_values : 2D ndarray
Mean of x_values over all exposures.
Used as fiducial model of line positions
PCA is done over deviations from this fiducial model
denoised_xs : 2D ndarray
Denoised x values from PCA reconstruction
uu, ss, vv : ndarrays
Arrays from single value decomposition. Used to
reconstruct principal components and their corresponding
coefficients
"""
K = int(K)
for i in range(num_iters):
# There should be no more NaN values in x_values
assert np.sum(np.isnan(x_values)) == 0
# Redefine mean
mean_x_values = np.mean(x_values,axis=0)
# Run PCA
uu,ss,vv = np.linalg.svd(x_values-mean_x_values, full_matrices=False)
# Repatch bad data with K PCA reconstruction
denoised_xs = mean_x_values + np.dot((uu*ss)[:,:K],vv[:K])
x_values[mask] = denoised_xs[mask]
return x_values, mean_x_values, denoised_xs, uu, ss, vv
def patchAndDenoise(x_values, orders, waves,
x_errors=None, times=None, file_list=None,
K=2, num_iters=50, running_window=9,
line_cutoff=0.5, file_cutoff=0.5,
outlier_cut=0, verbose=False):
"""
- Vet for bad lines/exposures
- Initial patch of bad data with running mean
- Iterative patch with PCA for specified number of iterations
- Optional second round of interative PCA patching to catch outliers
Parameters
----------
x_values, x_errors : 2D ndarray
Array of line positions for all lines for each exposure and errors
orders : 1D ndarray
Array of orders for each line
waves : 1D ndarray
Array of wavelengths for each line
times : 1D ndarray, optional
Time stamps for each exposure
Just written into the returned patch dictionary
(not explicitely used for this code, but helps with evalWaveSol)
K : int, optional (default: 2)
Number of principal components used for denoising
num_iters : int, optional (default: 50)
Number of iterations to run iterative PCA patching
running_window ; int, optional (default: 9)
Window size of running mean used to initialize pixel values
for lines missing measured pixel values
line_cutoff, file_cutoff : float [0,1], optional (default: 0.5)
Cutoff for bad lines or files, respectively.
i.e. defaults cut lines that show up in less than 50% of exposure
and files that contain less than 50% of all lines
outlier_cut : float, optional (default: 0)
Sigma cut used to identify outliers following first round of
iterative PCA.
Note: 0 means this process isn't done.
Returns
-------
patch : dict
Dictionary containing all the useful information from this process
(among, very many uselss information!)
Needed for evalWaveSol function
"""
# Arrays that aren't needed, but are helpful to have in returned dictionary
if times is None:
times = np.zeros_like(file_list)
if x_errors is None:
x_errors = np.zeros_like(x_errors)
if file_list is None:
file_list = np.zeros_like(times)
### Vetting
# Find where there is no line information
x_values[np.nan_to_num(x_values) < 1] = np.nan
# Mask out of order lines
out_of_order =
|
np.zeros_like(x_values,dtype=bool)
|
numpy.zeros_like
|
# Copyright 2021 [name of copyright owner]
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cv2, h5py
import os, glob, random
import numpy as np
from torch.utils.data import Dataset
from torchvision import transforms
import torch
import gc
SEED = 32
np.random.seed(SEED)
random.seed(SEED)
torch.manual_seed(SEED)
os.environ['PYTHONHASHSEED']=str(SEED)
class NRDataset(Dataset):
def __init__(self, data_path, grayScale = True, batch_size = 8, chunk_size = 10, passes=10, max_dim = 720, geo = False):
self.cnt = 0
self.chunk_cnt = 0
self.done = 0
self.batch_size = batch_size
self.chunk_size = chunk_size
self.passes = passes
self.chunk_data = None
self.grayScale = grayScale
self.max_dim = max_dim
self.data = h5py.File(data_path, "r")
self.geo = True if 'geopatch' in self.data.keys() and geo else False
self.names = list(self.data['imgs'].keys())
self.names = list(set(['{:s}__{:s}'.format(*n.split('__')[:2]) for n in self.names]))
random.shuffle(self.names)
self.regularize_names()
round_sz = len(self.names) // (batch_size * chunk_size)
self.names = self.names[:round_sz * batch_size * chunk_size] # trim dataset size to a multiple of batch&chunk
self.toTensor = transforms.ToTensor()
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.load_chunk()
def __len__(self):
return len(self.names) // (self.chunk_size * self.batch_size)
def load_chunk(self):
print('loading chunk [{:d}]...'.format(self.chunk_cnt), end='', flush=True)
self.chunk_data = [] ; gc.collect()
N = len(self.names) // self.chunk_size
for i in range(0, N , self.batch_size):
batch = []
for b in range(self.batch_size):
#Read the data from disk
idx = N * self.chunk_cnt + i + b
key = self.names[idx]
X =self.data['imgs/'+key+'__1'][...]
Y =self.data['imgs/'+key+'__2'][...]
x_kps = self.data['kps/'+key+'__1'][...]
y_kps = self.data['kps/'+key+'__2'][...]
if self.geo: #grab GeoPatches
x_geo = self.data['geopatch/'+key+'__1'][...]
y_geo = self.data['geopatch/'+key+'__2'][...]
#permute axis to (N,1,H,W)
x_geo = np.transpose(x_geo, (2,0,1))[:,np.newaxis,:,:].astype(np.float32)/255.
y_geo = np.transpose(y_geo, (2,0,1))[:,np.newaxis,:,:].astype(np.float32)/255.
# Resize image and keypoint attributes if the image is too large
r_x = max(X.shape[:2])/self.max_dim
r_y = max(Y.shape[:2])/self.max_dim
if r_x > 1.0:
X = cv2.resize(X, None, fx = 1/r_x, fy = 1/r_x)
x_kps[:,:2]/= r_x ; x_kps[:,3]/= r_x
if r_y > 1.0:
Y = cv2.resize(Y, None, fx = 1/r_y, fy = 1/r_y)
y_kps[:,:2]/= r_y ; x_kps[:,3]/= r_y
X = self.toTensor(X) ; Y = self.toTensor(Y)
if self.grayScale:
X = torch.mean(X,0,True) ; Y = torch.mean(Y,0,True)
batch.append((X, x_kps, Y, y_kps) if not self.geo else (X, x_kps,x_geo, Y, y_kps, y_geo))
self.chunk_data.append(batch)
self.chunk_cnt+=1
self.done=0
if self.chunk_cnt == self.chunk_size:
self.chunk_cnt = 0
print('done.')
def get_raw_batch(self, idx):
batch = list(zip(*self.chunk_data[idx]))
return batch
def regularize_names(self):
dk = {}
new_names = []
from collections import deque
for n in self.names:
key = n.split('__')[0]
if key in dk: dk[key].append(n)
else: dk[key] = deque([n])
#for v in dk.values():
# print(len(v))
done = False
while not done:
cnt=0
for k,v in dk.items():
if len(dk[k])==0:
cnt+=1
else:
new_names.append(dk[k].pop())
if cnt==len(dk):
done = True
self.names = new_names
def __getitem__(self, idx):
if self.done == self.passes:
self.load_chunk()
batch = list(zip(*self.chunk_data[idx]))
batch = self.prepare_batch(batch)
return batch
def __iter__(self):
self.cnt = 0
return self
def __next__(self):
if self.cnt == self.__len__():
self.done+=1
self.cnt=0
raise StopIteration
else:
self.cnt+=1
return self.__getitem__(self.cnt-1)
def prepare_batch(self, batch, max_kps = 128):
dev = self.device
if not self.geo:
X, x_kps, Y, y_kps = batch
else:
X, x_kps, x_geo, Y, y_kps, y_geo = batch
sampled_x_kps, sampled_y_kps = [], []
sampled_x_geo, sampled_y_geo = [], []
for b in range(len(x_kps)):
rnd_idx = np.arange(len(x_kps[b]))
np.random.shuffle(rnd_idx)
rnd_idx = rnd_idx[:max_kps]
sampled_x_kps.append(x_kps[b][rnd_idx])
sampled_y_kps.append(y_kps[b][rnd_idx])
if self.geo:
sampled_x_geo.append(x_geo[b][rnd_idx])
sampled_y_geo.append(y_geo[b][rnd_idx])
x_kps = sampled_x_kps
y_kps = sampled_y_kps
if self.geo:
x_geo = sampled_x_geo
y_geo = sampled_y_geo
nkps_x, nkps_y = [], []
nori_x, nori_y, nscale_x, nscale_y = [], [], [], []
ngeo_x, ngeo_y = [], []
idx_keys = []
Hs_x, Ws_x = [], []
Hs_y, Ws_y = [], []
X_dict, Y_dict = {}, {}
for b in range(len(x_kps)): #for each image in the batch, prepare keypoints
H, W = X[b].shape[-2:]
imgSize = np.array([W-1,H-1], dtype = np.float32)
nkp_x = x_kps[b][:,:2] / imgSize * 2 - 1
Hs_x += [H] * len(x_kps[b])
Ws_x += [W] * len(x_kps[b])
H, W = Y[b].shape[-2:]
imgSize =
|
np.array([W-1,H-1], dtype = np.float32)
|
numpy.array
|
import json
import random
from os import path as osp
import pandas
import h5py
import numpy as np
import quaternion
from scipy.ndimage import gaussian_filter1d
from scipy.spatial.transform import Rotation, Slerp
from scipy.interpolate import interp1d
from torch.utils.data import Dataset
from data_utils import CompiledSequence, select_orientation_source, load_cached_sequences
import logging
logging.getLogger().setLevel(logging.INFO)
class GlobSpeedSequence(CompiledSequence):
"""
Dataset :- RoNIN (can be downloaded from http://ronin.cs.sfu.ca/)
Features :- raw angular rate and acceleration (includes gravity).
"""
feature_dim = 6
# target_dim = 2
target_dim = 3
aux_dim = 8
def __init__(self, data_path=None, **kwargs):
super().__init__(**kwargs)
self.ts, self.features, self.targets, self.orientations, self.gt_pos = None, None, None, None, None
self.info = {}
self.grv_only = kwargs.get('grv_only', False)
self.max_ori_error = kwargs.get('max_ori_error', 20.0)
self.w = kwargs.get('interval', 1)
if data_path is not None:
self.load(data_path)
def load(self, data_path):
if data_path[-1] == '/':
data_path = data_path[:-1]
with open(osp.join(data_path, 'info.json')) as f:
self.info = json.load(f)
self.info['path'] = osp.split(data_path)[-1]
self.info['ori_source'], ori, self.info['source_ori_error'] = select_orientation_source(
data_path, self.max_ori_error, self.grv_only)
with h5py.File(osp.join(data_path, 'data.hdf5')) as f:
gyro_uncalib = f['synced/gyro_uncalib']
acce_uncalib = f['synced/acce']
gyro = gyro_uncalib - np.array(self.info['imu_init_gyro_bias'])
acce = np.array(self.info['imu_acce_scale']) * (acce_uncalib - np.array(self.info['imu_acce_bias']))
ts = np.copy(f['synced/time'])
tango_pos = np.copy(f['pose/tango_pos'])
init_tango_ori = quaternion.quaternion(*f['pose/tango_ori'][0])
# Compute the IMU orientation in the Tango coordinate frame.
ori_q = quaternion.from_float_array(ori)
rot_imu_to_tango = quaternion.quaternion(*self.info['start_calibration'])
init_rotor = init_tango_ori * rot_imu_to_tango * ori_q[0].conj()
ori_q = init_rotor * ori_q
dt = (ts[self.w:] - ts[:-self.w])[:, None]
glob_v = (tango_pos[self.w:] - tango_pos[:-self.w]) / dt
gyro_q = quaternion.from_float_array(np.concatenate([np.zeros([gyro.shape[0], 1]), gyro], axis=1)) # quaternion is of w, x, y, z format
acce_q = quaternion.from_float_array(np.concatenate([np.zeros([acce.shape[0], 1]), acce], axis=1))
glob_gyro = quaternion.as_float_array(ori_q * gyro_q * ori_q.conj())[:, 1:]
glob_acce = quaternion.as_float_array(ori_q * acce_q * ori_q.conj())[:, 1:]
start_frame = self.info.get('start_frame', 0)
self.ts = ts[start_frame:]
self.features = np.concatenate([glob_gyro, glob_acce], axis=1)[start_frame:]
# self.targets = glob_v[start_frame:, :2]
self.targets = glob_v[start_frame:, :3]
self.orientations = quaternion.as_float_array(ori_q)[start_frame:]
self.gt_pos = tango_pos[start_frame:]
pass
def get_feature(self):
return self.features
def get_target(self):
return self.targets
def get_aux(self):
return np.concatenate([self.ts[:, None], self.orientations, self.gt_pos], axis=1)
def get_meta(self):
return '{}: device: {}, ori_error ({}): {:.3f}'.format(
self.info['path'], self.info['device'], self.info['ori_source'], self.info['source_ori_error'])
class SenseINSSequence(CompiledSequence):
"""
Dataset :- RoNIN (can be downloaded from http://ronin.cs.sfu.ca/)
Features :- raw angular rate and acceleration (includes gravity).
"""
feature_dim = 6
# target_dim = 2
target_dim = 3
aux_dim = 8
def __init__(self, data_path=None, **kwargs):
super().__init__(**kwargs)
self.ts, self.features, self.targets, self.orientations, self.gt_pos = None, None, None, None, None
self.info = {}
args = kwargs['args']
self.imu_freq = args.imu_freq
self.sum_dur = 0
self.interval = args.window_size
self.w = kwargs.get('interval', 1)
if data_path is not None:
self.load(data_path)
def load(self, data_path):
if data_path[-1] == '/':
data_path = data_path[:-1]
# info_file = osp.join(data_path, 'SenseINS.json')
# if osp.exists(info_file):
# with open(info_file) as f:
# self.info = json.load(f)
# else:
# logging.info(f"data_glob_speed.py: info_file does not exist. {info_file}")
data_file = osp.join(data_path, 'SenseINS.csv')
if osp.exists(data_file):
imu_all = pandas.read_csv(data_file)
else:
logging.info(f"data_glob_speed.py: data_file does not exist. {data_file}")
return
self.info['path'] = osp.split(data_path)[-1]
# ---ts---
if 'times' in imu_all:
tmp_ts = np.copy(imu_all[['times']].values)
else:
tmp_ts = np.copy(imu_all[['time']].values)
tmp_ts = np.squeeze(tmp_ts)
start_ts = tmp_ts[1]
end_ts = tmp_ts[1] + int((tmp_ts[-4] - tmp_ts[1]) * self.imu_freq) / self.imu_freq # if use tmp_ts[-2], it may
# out of bounds when using interpolation
ts_interval = 1.0 / self.imu_freq
ts = np.arange(start_ts, end_ts, ts_interval)
self.sum_dur = end_ts - start_ts
self.info['time_duration'] = self.sum_dur
# ---vio_q and vio_p---
tmp_vio_q = np.copy(imu_all[['gt_q_w', 'gt_q_x', 'gt_q_y', 'gt_q_z']].values) # gt orientation
get_gt = True
# this is to check whether gt exists, if not, use VIO as gt
if tmp_vio_q[0][0] == 1.0 and tmp_vio_q[100][0] == 1.0 or tmp_vio_q[0][0] == tmp_vio_q[-1][0]:
tmp_vio_q = np.copy(imu_all[['vio_q_w', 'vio_q_x', 'vio_q_y', 'vio_q_z']].values)
tmp_vio_p = np.copy(imu_all[['vio_p_x', 'vio_p_y', 'vio_p_z']].values)
get_gt = False
else:
tmp_vio_p = np.copy(imu_all[['gt_p_x', 'gt_p_y', 'gt_p_z']].values)
# ---acce and gyro---
tmp_gyro =
|
np.copy(imu_all[['gyro_x', 'gyro_y', 'gyro_z']].values)
|
numpy.copy
|
"""
@package ion_functions.test.adcp_functions
@file ion_functions/test/test_adcp_functions.py
@author <NAME>, <NAME>, <NAME>
@brief Unit tests for adcp_functions module
"""
from nose.plugins.attrib import attr
from ion_functions.test.base_test import BaseUnitTestCase
import numpy as np
from ion_functions.data import adcp_functions as af
from ion_functions.data.adcp_functions import ADCP_FILLVALUE
from ion_functions.data.generic_functions import SYSTEM_FILLVALUE
@attr('UNIT', group='func')
class TestADCPFunctionsUnit(BaseUnitTestCase):
def setUp(self):
"""
Implemented by:
2014-02-06: <NAME>. Initial Code.
2015-06-12: <NAME>. Changed raw beam data to type int. This
change did not affect any previously written unit tests.
"""
# set test inputs -- values from DPS
self.b1 = np.array([[-0.0300, -0.2950, -0.5140, -0.2340, -0.1880,
0.2030, -0.3250, 0.3050, -0.2040, -0.2940]]) * 1000
self.b2 = np.array([[0.1800, -0.1320, 0.2130, 0.3090, 0.2910,
0.0490, 0.1880, 0.3730, -0.0020, 0.1720]]) * 1000
self.b3 = np.array([[-0.3980, -0.4360, -0.1310, -0.4730, -0.4430,
0.1880, -0.1680, 0.2910, -0.1790, 0.0080]]) * 1000
self.b4 = np.array([[-0.2160, -0.6050, -0.0920, -0.0580, 0.4840,
-0.0050, 0.3380, 0.1750, -0.0800, -0.5490]]) * 1000
# the data type of the raw beam velocities is int;
# set b1-b4 to int so that fill replacement can be tested.
self.b1 = self.b1.astype(int)
self.b2 = self.b2.astype(int)
self.b3 = self.b3.astype(int)
self.b4 = self.b4.astype(int)
#
self.echo = np.array([[0, 25, 50, 75, 100, 125, 150, 175, 200, 225, 250]])
self.sfactor = 0.45
# units of compass data are in centidegrees.
self.heading = 9841
self.pitch = 69
self.roll = -254
self.orient = 1
self.lat = 50.0000
self.lon = -145.0000
self.depth = 0.0
self.ntp = 3545769600.0 # May 12, 2012
# set expected results -- velocity profiles in earth coordinates
# (values in DPS)
self.uu = np.array([[0.2175, -0.2814, -0.1002, 0.4831, 1.2380,
-0.2455, 0.6218, -0.1807, 0.0992, -0.9063]])
self.vv = np.array([[-0.3367, -0.1815, -1.0522, -0.8676, -0.8919,
0.2585, -0.8497, -0.0873, -0.3073, -0.5461]])
self.ww = np.array([[0.1401, 0.3977, 0.1870, 0.1637, 0.0091,
-0.1290, 0.0334, -0.3017, 0.1384, 0.1966]])
# set expected results -- magnetic variation correction applied
# (computed in Matlab using above values and mag_var.m)
self.uu_cor = np.array([[0.1099, -0.3221, -0.4025, 0.2092, 0.9243,
-0.1595, 0.3471, -0.1983, 0.0053, -1.0261]])
self.vv_cor = np.array([[-0.3855, -0.0916, -0.9773, -0.9707, -1.2140,
0.3188, -0.9940, -0.0308, -0.3229, -0.2582]])
# set the expected results -- error velocity
self.ee = np.array([[0.789762, 0.634704, -0.080630, 0.626434, 0.064090,
0.071326, -0.317352, 0.219148, 0.054787, 0.433129]])
# set the expected results -- echo intensity conversion from counts to dB
self.dB = np.array([[0.00, 11.25, 22.50, 33.75, 45.00, 56.25, 67.50,
78.75, 90.00, 101.25, 112.50]])
def test_adcp_beam(self):
"""
Directly tests DPA functions adcp_beam_eastward, adcp_beam_northward,
adcp_beam_vertical, and adcp_beam_error.
Tests adcp_beam2ins, adcp_ins2earth and magnetic_correction functions
for ADCPs that output data in beam coordinates. All three functions
must return the correct output for final tests cases to work.
Values based on those defined in DPS:
OOI (2012). Data Product Specification for Velocity Profile and Echo
Intensity. Document Control Number 1341-00750.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00750_Data_Product_SPEC_VELPROF_OOI.pdf)
Implemented by:
2013-04-10: <NAME>. Initial code.
2014-02-06: <NAME>. Added tests to confirm arrays of
arrays can be processed (in other words, vectorized the
code).
2015-06-23: <NAME>. Revised documentation. Added unit test
for the function adcp_beam_error.
Notes:
The original suite of tests within this function did not provide a
test for adcp_beam_error. However, adcp_beam_error and vadcp_beam_error
are identical functions, and vadcp_beam_error is implicitly tested in the
test_vadcp_beam function when the 4th output argument of adcp_beam2inst
is tested. Therefore values to directly test adcp_beam_error were
then derived from the function itself and included as part of the unit
test within this code (test_adcp_beam).
"""
# single record case
got_uu_cor = af.adcp_beam_eastward(self.b1, self.b2, self.b3, self.b4,
self.heading, self.pitch, self.roll, self.orient,
self.lat, self.lon, self.depth, self.ntp)
got_vv_cor = af.adcp_beam_northward(self.b1, self.b2, self.b3, self.b4,
self.heading, self.pitch, self.roll, self.orient,
self.lat, self.lon, self.depth, self.ntp)
got_ww = af.adcp_beam_vertical(self.b1, self.b2, self.b3, self.b4,
self.heading, self.pitch, self.roll, self.orient)
got_ee = af.adcp_beam_error(self.b1, self.b2, self.b3, self.b4)
# test results
np.testing.assert_array_almost_equal(got_uu_cor, self.uu_cor, 4)
np.testing.assert_array_almost_equal(got_vv_cor, self.vv_cor, 4)
np.testing.assert_array_almost_equal(got_ww, self.ww, 4)
np.testing.assert_array_almost_equal(got_ee, self.ee, 4)
# reset the test inputs for multiple records
b1 = np.tile(self.b1, (24, 1))
b2 = np.tile(self.b2, (24, 1))
b3 = np.tile(self.b3, (24, 1))
b4 = np.tile(self.b4, (24, 1))
heading = np.ones(24, dtype=np.int) * self.heading
pitch = np.ones(24, dtype=np.int) * self.pitch
roll = np.ones(24, dtype=np.int) * self.roll
orient = np.ones(24, dtype=np.int) * self.orient
lat = np.ones(24) * self.lat
lon = np.ones(24) * self.lon
depth = np.ones(24) * self.depth
ntp = np.ones(24) * self.ntp
# reset outputs for multiple records
uu_cor = np.tile(self.uu_cor, (24, 1))
vv_cor = np.tile(self.vv_cor, (24, 1))
ww = np.tile(self.ww, (24, 1))
ee = np.tile(self.ee, (24, 1))
# multiple record case
got_uu_cor = af.adcp_beam_eastward(b1, b2, b3, b4,
heading, pitch, roll, orient,
lat, lon, depth, ntp)
got_vv_cor = af.adcp_beam_northward(b1, b2, b3, b4,
heading, pitch, roll, orient,
lat, lon, depth, ntp)
got_ww = af.adcp_beam_vertical(b1, b2, b3, b4,
heading, pitch, roll, orient)
got_ee = af.adcp_beam_error(b1, b2, b3, b4)
# test results
np.testing.assert_array_almost_equal(got_uu_cor, uu_cor, 4)
np.testing.assert_array_almost_equal(got_vv_cor, vv_cor, 4)
np.testing.assert_array_almost_equal(got_ww, ww, 4)
np.testing.assert_array_almost_equal(got_ee, ee, 4)
def test_adcp_beam_with_fill(self):
"""
Directly tests DPA functions adcp_beam_eastward, adcp_beam_northward,
adcp_beam_vertical, and adcp_beam_error when system fill values and
ADCP fill values (bad value sentinels) are present in the data stream.
Non-fill values are based on those used in test_adcp_beam in this module.
Implemented by:
2013-06-24: <NAME>. Initial code.
Notes:
"""
# for convenience
sfill = SYSTEM_FILLVALUE
afill = ADCP_FILLVALUE
### set input data
# units of compass data are in centidegrees.
heading = np.array([9841])
pitch = np.array([69])
roll = np.array([-254])
missingroll = np.array([sfill])
orient = np.array([1])
lat = np.array([50.0000])
lon = np.array([-145.0000])
depth = np.array([0.0])
ntp = np.array([3545769600.0]) # May 12, 2012
###
# for positional clarity, input beam and expected velocities will be explicitly
# enumerated for each single time record test case.
###
### single time record case; missing roll data
## the ADCP does not use its bad flag sentinel for compass data, only beam data.
## however, it is possible that CI could supply the system fillvalue for missing compass data.
# input data
# beam velocity units are mm/s
b1_x1 = np.array([[-30, -295, -514, -234, -188, 203, -325, 305, -204, -294]])
b2_x1 = np.array([[180, -132, 213, 309, 291, 49, 188, 373, -2, 172]])
b3_x1 = np.array([[-398, -436, -131, -473, -443, 188, -168, 291, -179, 8]])
b4_x1 = np.array([[-216, -605, -92, -58, 484, -5, 338, 175, -80, -549]])
# expected results if all good beam and compass data
# these will be used later in the multiple time record test
uu_x0 = np.array([[0.1099, -0.3221, -0.4025, 0.2092, 0.9243,
-0.1595, 0.3471, -0.1983, 0.0053, -1.0261]])
vv_x0 = np.array([[-0.3855, -0.0916, -0.9773, -0.9707, -1.2140,
0.3188, -0.9940, -0.0308, -0.3229, -0.2582]])
ww_x0 = np.array([[0.1401, 0.3977, 0.1870, 0.1637, 0.0091,
-0.1290, 0.0334, -0.3017, 0.1384, 0.1966]])
ee_x0 = np.array([[0.789762, 0.634704, -0.080630, 0.626434, 0.064090,
0.071326, -0.317352, 0.219148, 0.054787, 0.433129]])
# expected results for all good beam data, missing roll data;
# nans for all results except for the error velocity, which does not depend on the compass
uu_x1 = uu_x0 * np.nan
vv_x1 = vv_x0 * np.nan
ww_x1 = ww_x0 * np.nan
ee_x1 = np.copy(ee_x0)
uu_calc = af.adcp_beam_eastward(b1_x1, b2_x1, b3_x1, b4_x1, heading, pitch,
missingroll,
orient, lat, lon, depth, ntp)
vv_calc = af.adcp_beam_northward(b1_x1, b2_x1, b3_x1, b4_x1, heading, pitch,
missingroll,
orient, lat, lon, depth, ntp)
ww_calc = af.adcp_beam_vertical(b1_x1, b2_x1, b3_x1, b4_x1, heading, pitch,
missingroll,
orient)
ee_calc = af.adcp_beam_error(b1_x1, b2_x1, b3_x1, b4_x1)
# test results
np.testing.assert_array_almost_equal(uu_calc, uu_x1, 4)
np.testing.assert_array_almost_equal(vv_calc, vv_x1, 4)
np.testing.assert_array_almost_equal(ww_calc, ww_x1, 4)
np.testing.assert_array_almost_equal(ee_calc, ee_x1, 4)
### single time record case; missing and bad-flagged beam data, good compass data
# input data
b1_x2 = np.array([[sfill, -295, -514, -234, -188, 203, -325, afill, -204, -294]])
b2_x2 = np.array([[sfill, -132, 213, 309, 291, 49, 188, afill, -2, sfill]])
b3_x2 = np.array([[sfill, -436, -131, -473, -443, 188, -168, afill, -179, 8]])
b4_x2 = np.array([[sfill, -605, -92, -58, afill, -5, 338, afill, -80, -549]])
# expected
uu_x2 = np.array([[np.nan, -0.3221, -0.4025, 0.2092, np.nan,
-0.1595, 0.3471, np.nan, 0.0053, np.nan]])
vv_x2 = np.array([[np.nan, -0.0916, -0.9773, -0.9707, np.nan,
0.3188, -0.9940, np.nan, -0.3229, np.nan]])
ww_x2 = np.array([[np.nan, 0.3977, 0.1870, 0.1637, np.nan,
-0.1290, 0.0334, np.nan, 0.1384, np.nan]])
ee_x2 = np.array([[np.nan, 0.634704, -0.080630, 0.626434, np.nan,
0.071326, -0.317352, np.nan, 0.054787, np.nan]])
# calculated
uu_calc = af.adcp_beam_eastward(b1_x2, b2_x2, b3_x2, b4_x2,
heading, pitch, roll, orient,
lat, lon, depth, ntp)
vv_calc = af.adcp_beam_northward(b1_x2, b2_x2, b3_x2, b4_x2,
heading, pitch, roll, orient,
lat, lon, depth, ntp)
ww_calc = af.adcp_beam_vertical(b1_x2, b2_x2, b3_x2, b4_x2,
heading, pitch, roll, orient)
ee_calc = af.adcp_beam_error(b1_x2, b2_x2, b3_x2, b4_x2)
# test results
np.testing.assert_array_almost_equal(uu_calc, uu_x2, 4)
np.testing.assert_array_almost_equal(vv_calc, vv_x2, 4)
np.testing.assert_array_almost_equal(ww_calc, ww_x2, 4)
np.testing.assert_array_almost_equal(ee_calc, ee_x2, 4)
### multiple (5) record case
## reset the test inputs for 5 time records
# 1st time record is the bad/missing beam data case above
# 2nd time record is a missing heading data case
# 3rd time record is all good data
# 4th time record is bad/missing beam and missing pitch data.
# 5th time record is missing orientation data
b1 = np.vstack((b1_x2, b1_x1, b1_x1, b1_x2, b1_x1))
b2 = np.vstack((b2_x2, b2_x1, b2_x1, b2_x2, b2_x1))
b3 = np.vstack((b3_x2, b3_x1, b3_x1, b3_x2, b3_x1))
b4 = np.vstack((b4_x2, b4_x1, b4_x1, b4_x2, b4_x1))
heading = np.hstack((heading, sfill, heading, heading, heading))
pitch = np.hstack((pitch, pitch, pitch, sfill, pitch))
roll = np.tile(roll, 5)
orient = np.hstack((orient, orient, orient, orient, sfill))
lat = np.tile(lat, 5)
lon = np.tile(lon, 5)
depth = np.tile(depth, 5)
ntp = np.tile(ntp, 5)
# set expected outputs for these 5 records
# notes:
# (1) heading is not used in the calculation of vertical velocity,
# therefore the second entry to ww_xpctd is good data out (ww_x0),
# not nans as resulted from the missingroll test.
# (2) pitch is not used in the calculation of error velocity, so that
# in the mixed case (time record 4) the error velocity should be
# the same as that for the pure bad/missing beam case (ee_x2, 1st
# and 4th entries in ee_xpctd).
# (3) the orientation argument affects the roll calculation, so that
# when its value is missing (5th time record) the expected result
# would be the same as if the roll value were missing. therefore
# the 5th column entries are all x1 results.
uu_xpctd = np.vstack((uu_x2, uu_x1, uu_x0, uu_x1, uu_x1))
vv_xpctd = np.vstack((vv_x2, vv_x1, vv_x0, vv_x1, vv_x1))
ww_xpctd = np.vstack((ww_x2, ww_x0, ww_x0, ww_x1, ww_x1))
ee_xpctd = np.vstack((ee_x2, ee_x1, ee_x0, ee_x2, ee_x1))
# calculated
uu_calc = af.adcp_beam_eastward(b1, b2, b3, b4,
heading, pitch, roll, orient,
lat, lon, depth, ntp)
vv_calc = af.adcp_beam_northward(b1, b2, b3, b4,
heading, pitch, roll, orient,
lat, lon, depth, ntp)
ww_calc = af.adcp_beam_vertical(b1, b2, b3, b4,
heading, pitch, roll, orient)
ee_calc = af.adcp_beam_error(b1, b2, b3, b4)
# test results
np.testing.assert_array_almost_equal(uu_calc, uu_xpctd, 4)
np.testing.assert_array_almost_equal(vv_calc, vv_xpctd, 4)
np.testing.assert_array_almost_equal(ww_calc, ww_xpctd, 4)
np.testing.assert_array_almost_equal(ee_calc, ee_xpctd, 4)
def test_adcp_earth(self):
"""
Tests magnetic_correction function for ADCPs set to output data in the
Earth Coordinate system.
Values were not defined in DPS, were recreated using test values above:
OOI (2012). Data Product Specification for Velocity Profile and Echo
Intensity. Document Control Number 1341-00750.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00750_Data_Product_SPEC_VELPROF_OOI.pdf)
Implemented by:
2014-02-06: <NAME>. Initial code.
2015-06-10: <NAME>.
Changed adcp_ins2earth to require the units of the compass
data to be in centidegrees.
"""
# set the test data
u, v, w, e = af.adcp_beam2ins(self.b1, self.b2, self.b3, self.b4)
### old adcp_ins2earth returned 3 variables (CEW)
# adcp_ins2earth now requires compass data in units of centidegrees (RAD)
uu, vv, ww = af.adcp_ins2earth(u, v, w, self.heading, self.pitch,
self.roll, self.orient)
# test the magnetic variation correction
got_uu_cor = af.adcp_earth_eastward(uu, vv, self.depth, self.lat, self.lon, self.ntp)
got_vv_cor = af.adcp_earth_northward(uu, vv, self.depth, self.lat, self.lon, self.ntp)
np.testing.assert_array_almost_equal(got_uu_cor, self.uu_cor, 4)
np.testing.assert_array_almost_equal(got_vv_cor, self.vv_cor, 4)
# reset the test inputs for multiple records using the integer inputs.
uu = np.tile(uu, (24, 1))
vv = np.tile(vv, (24, 1))
depth = np.ones(24) * self.depth
lat = np.ones(24) * self.lat
lon = np.ones(24) * self.lon
ntp = np.ones(24) * self.ntp
# reset expected results for multiple records
uu_cor = np.tile(self.uu_cor, (24, 1))
vv_cor = np.tile(self.vv_cor, (24, 1))
# compute the results for multiple records
got_uu_cor = af.adcp_earth_eastward(uu, vv, depth, lat, lon, ntp)
got_vv_cor = af.adcp_earth_northward(uu, vv, depth, lat, lon, ntp)
# test the magnetic variation correction
np.testing.assert_array_almost_equal(got_uu_cor, uu_cor, 4)
np.testing.assert_array_almost_equal(got_vv_cor, vv_cor, 4)
def test_adcp_earth_int_input_velocity_data(self):
"""
Tests adcp_earth_eastward and adcp_earth_northward using int type raw velocity data,
as will be supplied by CI. Also tests the almost trivial functions adcp_earth_vertical
and adcp_earth_error (unit change).
Input raw velocity values were derived from the float unit test in test_adcp_earth
by rounding the uu and vv float output from adcp_ins2earth. These int inputs failed
the assert_array_almost_equal unit tests (decimals=4) in test_adcp_earth because of
round-off error but passed when the agreement precision was relaxed to decimals=3.
This is taken as justification to more precisely calculate the expected values for
unit tests in the current module from adcp_earth_eastward and adcp_earth_northward
themselves (the very modules being tested), using as input the type int raw velocity
data. Because these DPA functions were used to derive their own check data, the
original (float type input velocity data) unit tests are retained in the
test_adcp_earth function.
The tests in this module will be used to derive unit tests checking the replacement
of ADCP int bad value sentinels (-32768) with Nans; these tests require that the
raw velocity data be of type int.
Implemented by:
2014-06-16: <NAME>. Initial code.
"""
# set the input test data [mm/sec]
uu = np.array([[218, -281, -100, 483, 1238, -245, 622, -181, 99, -906]])
vv = np.array([[-337, -182, -1052, -868, -892, 258, -850, -87, -307, -546]])
ww = np.array([[140, 398, 187, 164, 9, -129, 33, -302, 138, 197]])
ee = np.array([[790, 635, 81, 626, 64, 71, -317, 219, 55, 433]])
# expected values, calculated using adcp_earth_eastward and adcp_earth_northward
uu_cor = np.array([[0.11031103, -0.32184604, -0.40227939, 0.20903718, 0.92426103,
-0.15916447, 0.34724837, -0.19849871, 0.00522179, -1.02580274]])
vv_cor = np.array([[-0.38590734, -0.09219615, -0.97717720, -0.97109035, -1.21410442,
0.31820696, -0.99438552, -0.03046741, -0.32252555, -0.25822614]])
# expected values, calculated by changing units from mm/s to m/s
ww_vel = ww / 1000.0
ee_vel = ee / 1000.0
# test the magnetic variation correction using type integer inputs for the velocities.
got_uu_cor = af.adcp_earth_eastward(uu, vv, self.depth, self.lat, self.lon, self.ntp)
got_vv_cor = af.adcp_earth_northward(uu, vv, self.depth, self.lat, self.lon, self.ntp)
# and the unit change functions
got_ww_vel = af.adcp_earth_vertical(ww)
got_ee_vel = af.adcp_earth_error(ee)
np.testing.assert_array_almost_equal(got_uu_cor, uu_cor, 4)
np.testing.assert_array_almost_equal(got_vv_cor, vv_cor, 4)
np.testing.assert_array_almost_equal(got_ww_vel, ww_vel, 4)
np.testing.assert_array_almost_equal(got_ee_vel, ee_vel, 4)
# reset the test inputs for multiple records using the integer inputs.
uu = np.tile(uu, (24, 1))
vv = np.tile(vv, (24, 1))
ww = np.tile(ww, (24, 1))
ee = np.tile(ee, (24, 1))
depth = np.ones(24) * self.depth
lat = np.ones(24) * self.lat
lon = np.ones(24) * self.lon
ntp = np.ones(24) * self.ntp
# reset expected results for multiple records
uu_cor = np.tile(uu_cor, (24, 1))
vv_cor = np.tile(vv_cor, (24, 1))
ww_vel = np.tile(ww_vel, (24, 1))
ee_vel = np.tile(ee_vel, (24, 1))
# compute the results for multiple records
got_uu_cor = af.adcp_earth_eastward(uu, vv, depth, lat, lon, ntp)
got_vv_cor = af.adcp_earth_northward(uu, vv, depth, lat, lon, ntp)
got_ww_vel = af.adcp_earth_vertical(ww)
got_ee_vel = af.adcp_earth_error(ee)
# test the magnetic variation correction
np.testing.assert_array_almost_equal(got_uu_cor, uu_cor, 4)
np.testing.assert_array_almost_equal(got_vv_cor, vv_cor, 4)
# and the unit change functions
np.testing.assert_array_almost_equal(got_ww_vel, ww_vel, 4)
np.testing.assert_array_almost_equal(got_ee_vel, ee_vel, 4)
def test_adcp_earth_with_fill(self):
"""
Tests adcp_earth_eastward, adcp_earth_northward, adcp_earth_vertical and
adcp_earth_error when system fill values and ADCP fill values (bad value
sentinels) are present in the data stream.
Non-fill test values come from the function test_adcp_earth_int_input_velocity_data
in this module.
Implemented by:
2014-06-25: <NAME>. Initial code.
"""
# for convenience
sfill = SYSTEM_FILLVALUE
afill = ADCP_FILLVALUE
### scalar time case
# set the input test data
lat = np.array([50.0000])
lon = np.array([-145.0000])
depth = np.array([0.0])
ntp = np.array([3545769600.0]) # May 12, 2012
# input velocities [mm/sec]
uu_in0 = np.array([[218, sfill, -100, 483, afill, -245]])
vv_in0 = np.array([[sfill, -182, -1052, -868, -892, afill]])
ww_in0 = np.array([[sfill, 398, afill, 164, 9, -129]])
ee_in0 = np.array([[afill, 635, 81, 626, sfill, 71]])
# expected values [m/sec]
uu_x0 = np.array([[np.nan, np.nan, -0.40227, 0.20903, np.nan, np.nan]])
vv_x0 = np.array([[np.nan, np.nan, -0.97717, -0.97109, np.nan, np.nan]])
ww_x0 = np.array([[np.nan, 0.398, np.nan, 0.164, 0.009, -0.129]])
ee_x0 = np.array([[np.nan, 0.635, 0.081, 0.626, np.nan, 0.071]])
# calculated
uu_calc = af.adcp_earth_eastward(uu_in0, vv_in0, depth, lat, lon, ntp)
vv_calc = af.adcp_earth_northward(uu_in0, vv_in0, depth, lat, lon, ntp)
ww_calc = af.adcp_earth_vertical(ww_in0)
ee_calc = af.adcp_earth_error(ee_in0)
# test
np.testing.assert_array_almost_equal(uu_calc, uu_x0, 4)
np.testing.assert_array_almost_equal(vv_calc, vv_x0, 4)
np.testing.assert_array_almost_equal(ww_calc, ww_x0, 4)
np.testing.assert_array_almost_equal(ee_calc, ee_x0, 4)
### multiple time record case
# set the input test data
lat = np.tile(lat, 5)
lon = np.tile(lon, 5)
depth = np.tile(depth, 5)
ntp = np.tile(ntp, 5)
uu_in0 = np.tile(uu_in0, (5, 1))
vv_in0 = np.tile(vv_in0, (5, 1))
ww_in0 = np.tile(ww_in0, (5, 1))
ee_in0 = np.tile(ee_in0, (5, 1))
# expected
uu_x0 = np.tile(uu_x0, (5, 1))
vv_x0 = np.tile(vv_x0, (5, 1))
ww_x0 = np.tile(ww_x0, (5, 1))
ee_x0 = np.tile(ee_x0, (5, 1))
# calculated
uu_calc = af.adcp_earth_eastward(uu_in0, vv_in0, depth, lat, lon, ntp)
vv_calc = af.adcp_earth_northward(uu_in0, vv_in0, depth, lat, lon, ntp)
ww_calc = af.adcp_earth_vertical(ww_in0)
ee_calc = af.adcp_earth_error(ee_in0)
# test
np.testing.assert_array_almost_equal(uu_calc, uu_x0, 4)
np.testing.assert_array_almost_equal(vv_calc, vv_x0, 4)
np.testing.assert_array_almost_equal(ww_calc, ww_x0, 4)
np.testing.assert_array_almost_equal(ee_calc, ee_x0, 4)
def test_adcp_backscatter(self):
"""
Tests echo intensity scaling function (adcp_backscatter) for ADCPs
in order to convert from echo intensity in counts to dB.
Values were not defined in DPS, were created using test values above:
OOI (2012). Data Product Specification for Velocity Profile and Echo
Intensity. Document Control Number 1341-00750.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00750_Data_Product_SPEC_VELPROF_OOI.pdf)
Implemented by <NAME>, 2014-02-06
<NAME>, 2015-06-25. Added tests for fill values.
"""
# the single record case
got = af.adcp_backscatter(self.echo, self.sfactor)
np.testing.assert_array_almost_equal(got, self.dB, 4)
# the multi-record case -- inputs
raw = np.tile(self.echo, (24, 1))
sf = np.ones(24) * self.sfactor
# the multi-record case -- outputs
dB = np.tile(self.dB, (24, 1))
got = af.adcp_backscatter(raw, sf)
np.testing.assert_array_almost_equal(got, dB, 4)
### test fill value replacement with nan
# for convenience
sfill = SYSTEM_FILLVALUE
# the adcp bad sentinel fillvalue (requires 2 bytes) is not used for echo
# intensity, which is stored in 1 byte.
# the single time record case
echo_with_fill, xpctd = np.copy(self.echo), np.copy(self.dB)
echo_with_fill[0, 3], xpctd[0, 3] = sfill, np.nan
echo_with_fill[0, 6], xpctd[0, 6] = sfill, np.nan
echo_with_fill[0, 7], xpctd[0, 7] = sfill, np.nan
got = af.adcp_backscatter(echo_with_fill, self.sfactor)
np.testing.assert_array_almost_equal(got, xpctd, 4)
# the multiple time record case
echo_with_fill = np.vstack((echo_with_fill, self.echo, echo_with_fill))
xpctd = np.vstack((xpctd, self.dB, xpctd))
sfactor = np.tile(self.sfactor, (3, 1))
got = af.adcp_backscatter(echo_with_fill, sfactor)
np.testing.assert_array_almost_equal(got, xpctd, 4)
def test_vadcp_beam(self):
"""
Indirectly tests vadcp_beam_eastward, vadcp_beam_northward,
vadcp_beam_vertical_est, and vadcp_beam_vertical_true functions (which
call adcp_beam2ins and adcp_ins2earth) and vadcp_beam_error (which only
calls adcp_beam2ins) for the specialized 5-beam ADCP. Application of
the magnetic correction and conversion from mm/s to m/s is not applied.
Values based on those defined in DPS:
OOI (2012). Data Product Specification for Turbulent Velocity Profile
and Echo Intensity. Document Control Number 1341-00760.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-00760_Data_Product_SPEC_VELTURB_OOI.pdf)
Implemented by:
2014-07-24: <NAME>. Initial code.
2015-06-10: <NAME>.
adcp_ins2earth now requires the units of the compass
data to be in centidegrees.
"""
# test inputs
b1 = np.ones((10, 10)).astype(int) * -325
b2 = np.ones((10, 10)).astype(int) * 188
b3 = np.ones((10, 10)).astype(int) * 168
b4 = np.ones((10, 10)).astype(int) * -338
b5 = np.ones((10, 10)).astype(int) * -70
# units of centidegrees
heading = np.array([30, 30, 30, 30, 30,
32, 32, 32, 32, 32]) * 100
pitch = np.array([0, 2, 3, 3, 1, 2, 2, 3, 3, 1]) * 100
roll = np.array([0, 4, 3, 4, 3, 3, 4, 3, 4, 3]) * 100
orient =
|
np.ones(10, dtype=np.int)
|
numpy.ones
|
import pickle
import os
import glob
import random
import pathlib
import time
import collections
import itertools
import numpy as np
import ray
from lux_ai import scraper, collector, evaluator, imitator, trainer_ac, trainer_pg, trainer_ac_mc, tools
from lux_gym.envs.lux.action_vectors_new import empty_worker_action_vectors
from run_configuration import CONF_Scrape, CONF_Collect, CONF_RL, CONF_Main, CONF_Imitate, CONF_Evaluate
def scrape():
config = {**CONF_Main, **CONF_Scrape}
if config["scrape_type"] == "single":
scraper_agent = scraper.Agent(config)
scraper_agent.scrape_all()
elif config["scrape_type"] == "multi":
parallel_calls = config["parallel_calls"]
actions_shape = [item.shape for item in empty_worker_action_vectors]
env_name = config["environment"]
feature_maps_shape = tools.get_feature_maps_shape(config["environment"])
lux_version = config["lux_version"]
team_name = config["team_name"]
only_wins = config["only_wins"]
is_for_rl = config["is_for_rl"]
is_pg_rl = config["is_pg_rl"]
files_pool = set(glob.glob("./data/jsons/*.json"))
if is_for_rl:
data_path = "./data/tfrecords/rl/storage/"
else:
data_path = "./data/tfrecords/imitator/train/"
already_saved_files = glob.glob(data_path + "*.tfrec")
saved_submissions = set()
for file_name in already_saved_files:
raw_name = pathlib.Path(file_name).stem
saved_submissions.add(raw_name.split("_")[0])
file_names_saved = []
for i, file_name in enumerate(files_pool):
raw_name = pathlib.Path(file_name).stem
if raw_name in saved_submissions:
print(f"File {file_name} for {team_name}; is already saved.")
file_names_saved.append(file_name)
files_pool -= set(file_names_saved)
set_size = int(len(files_pool) / parallel_calls)
sets = []
for _ in range(parallel_calls):
new_set = set(random.sample(files_pool, set_size))
files_pool -= new_set
sets.append(new_set)
for i, file_names in enumerate(zip(*sets)):
print(f"Iteration {i} starts")
ray.init(num_cpus=parallel_calls, include_dashboard=False)
scraper_object = ray.remote(scraper.scrape_file)
futures = [scraper_object.remote(env_name, file_names[j], team_name, lux_version, only_wins,
feature_maps_shape, actions_shape, i, is_for_rl, is_pg_rl)
for j in range(len(file_names))]
_ = ray.get(futures)
ray.shutdown()
else:
raise ValueError
return 0
def collect(input_data):
config = {**CONF_Main, **CONF_Collect}
if config["is_for_imitator"]:
data_path = "data/tfrecords/imitator/storage_0/"
elif config["is_for_rl"]:
data_path = "data/tfrecords/rl/storage_0/"
else:
raise NotImplementedError
# collector.collect(config, input_data, data_path, 9)
ray.init(include_dashboard=False)
collector_object = ray.remote(collector.collect)
futures = [collector_object.remote(config, input_data, data_path, j, steps=10) for j in range(2)]
_ = ray.get(futures)
ray.shutdown()
def evaluate(input_data):
config = {**CONF_Main, **CONF_Evaluate}
eval_agent = evaluator.Agent(config, input_data)
eval_agent.evaluate()
def imitate(input_data):
config = {**CONF_Main, **CONF_Imitate, **CONF_Evaluate, **CONF_Collect}
if config["self_imitation"]:
prev_n = 2
for i in range(10):
print(f"Self imitation, cycle {i}.")
current_n = i % 3 # current and prev to use
next_n = (i + 1) % 3 # next to collect
data_path = f"data/tfrecords/imitator/storage_{next_n}/"
fnames_train = glob.glob("data/tfrecords/imitator/train/*.tfrec")
fnames_curr = glob.glob(f"data/tfrecords/imitator/storage_{current_n}/*.tfrec")
fnames_prev = glob.glob(f"data/tfrecords/imitator/storage_{prev_n}/*.tfrec")
self_exp_n = len(fnames_curr) + len(fnames_prev)
fnames_train = random.choices(fnames_train, k=self_exp_n)
filenames = fnames_train + fnames_prev + fnames_curr
files = glob.glob("./data/weights/*.pickle")
if len(files) > 0:
with open(files[-1], 'rb') as datafile:
input_data = pickle.load(datafile)
raw_name = pathlib.Path(files[-1]).stem
print(f"Training and collecting from {raw_name}.pickle weights.")
# trainer_agent = imitator.Agent(config, input_data, filenames=filenames, current_cycle=i)
# trainer_agent.self_imitate()
ray.init(num_gpus=1, include_dashboard=False)
# remote objects creation
trainer_object = ray.remote(num_gpus=1)(imitator.Agent)
eval_object = ray.remote(evaluator.Agent)
collector_object = ray.remote(collector.collect)
# initialization
workers_info = tools.GlobalVarActor.remote()
imitator_agent = trainer_object.remote(config, input_data, workers_info, filenames, i)
eval_agent = eval_object.remote(config, input_data, workers_info)
# remote call
trainer_future = imitator_agent.self_imitate.remote()
eval_future = eval_agent.evaluate.remote()
col_futures = [collector_object.remote(config, input_data, data_path, j, global_var_actor_out=workers_info)
for j in range(2)]
# getting results from remote functions
_ = ray.get(trainer_future)
_ = ray.get(eval_future)
_ = ray.get(col_futures)
time.sleep(1)
ray.shutdown()
prev_n = current_n
time.sleep(5)
elif config["with_evaluation"]:
ray.init(num_gpus=1, include_dashboard=False)
# remote objects creation
trainer_object = ray.remote(num_gpus=1)(imitator.Agent)
eval_object = ray.remote(evaluator.Agent)
# initialization
workers_info = tools.GlobalVarActor.remote()
trainer_agent = trainer_object.remote(config, input_data, workers_info)
eval_agent = eval_object.remote(config, input_data, workers_info)
# remote call
trainer_future = trainer_agent.imitate.remote()
eval_future = eval_agent.evaluate.remote()
# getting results from remote functions
_ = ray.get(trainer_future)
_ = ray.get(eval_future)
time.sleep(1)
ray.shutdown()
else:
trainer_agent = imitator.Agent(config, input_data)
trainer_agent.imitate()
def rl_train(input_data): # , checkpoint):
config = {**CONF_Main, **CONF_RL, **CONF_Evaluate, **CONF_Collect}
# if checkpoint is not None:
# path = str(Path(checkpoint).parent) # due to https://github.com/deepmind/reverb/issues/12
# checkpointer = reverb.checkpointers.DefaultCheckpointer(path=path)
# else:
# checkpointer = None
# feature_maps_shape = tools.get_feature_maps_shape(config["environment"])
# buffer = storage.UniformBuffer(feature_maps_shape,
# num_tables=1, min_size=config["batch_size"], max_size=config["buffer_size"],
# n_points=config["n_points"], checkpointer=checkpointer)
if config["rl_type"] == "single":
trainer_ac.ac_agent_run(config, input_data)
elif config["rl_type"] == "single_pg":
trainer_pg.pg_agent_run(config, input_data)
elif config["rl_type"] == "single_ac_mc":
# data_list = glob.glob(f"data/tfrecords/rl/storage/*.tfrec")
data_list = [glob.glob(f"data/tfrecords/rl/storage_{i}/*.tfrec") for i in [j for j in range(10)]]
data_list = list(itertools.chain.from_iterable(data_list))
trainer_ac_mc.ac_mc_agent_run(config, input_data, filenames_in=data_list)
elif config["rl_type"] == "with_evaluation":
for i in range(10):
print(f"RL learning, cycle {i}.")
ray.init(num_gpus=1, include_dashboard=False)
# remote objects creation
trainer_object = ray.remote(num_gpus=1)(trainer_ac.ac_agent_run)
eval_object = ray.remote(evaluator.Agent)
# initialization
workers_info = tools.GlobalVarActor.remote()
eval_agent = eval_object.remote(config, input_data, workers_info)
# remote call
trainer_future = trainer_object.remote(config, input_data, i, workers_info)
eval_future = eval_agent.evaluate.remote()
# getting results from remote functions
_ = ray.get(trainer_future)
_ = ray.get(eval_future)
time.sleep(1)
ray.shutdown()
time.sleep(5)
elif config["rl_type"] == "continuous_pg":
prev_n = 4
prev_prev_n = 3
prev_prev_prev_n = 2
for i in range(10):
print(f"PG learning, cycle {i}.")
current_n = i % 5 # current and prev to use
next_n = (i + 1) % 5 # next to collect
data_path = f"data/tfrecords/rl/storage_{next_n}/" # path to save in
fnames_fixed = glob.glob("data/tfrecords/rl/storage/*.tfrec")
fnames_curr = glob.glob(f"data/tfrecords/rl/storage_{current_n}/*.tfrec")
fnames_prev = glob.glob(f"data/tfrecords/rl/storage_{prev_n}/*.tfrec")
fnames_prev_prev = glob.glob(f"data/tfrecords/rl/storage_{prev_prev_n}/*.tfrec")
fnames_prev_prev_prev = glob.glob(f"data/tfrecords/rl/storage_{prev_prev_prev_n}/*.tfrec")
self_exp_n = len(fnames_curr) + len(fnames_prev) + len(fnames_prev_prev) + len(fnames_prev_prev_prev)
fnames_fixed = random.choices(fnames_fixed, k=self_exp_n)
filenames = fnames_fixed + fnames_prev + fnames_prev_prev + fnames_prev_prev_prev + fnames_curr
files = glob.glob("./data/weights/*.pickle")
if len(files) > 0:
with open(files[-1], 'rb') as datafile:
input_data = pickle.load(datafile)
raw_name = pathlib.Path(files[-1]).stem
print(f"Training and collecting from {raw_name}.pickle weights.")
# trainer_pg.pg_agent_run(config, input_data, None, filenames, 0)
ray.init(num_gpus=1, include_dashboard=False)
# remote objects creation
trainer_object = ray.remote(num_gpus=1)(trainer_pg.pg_agent_run)
eval_object = ray.remote(evaluator.Agent)
collector_object = ray.remote(collector.collect)
# initialization
workers_info = tools.GlobalVarActor.remote()
eval_agent = eval_object.remote(config, input_data, workers_info)
# remote call
trainer_future = trainer_object.remote(config, input_data, workers_info, filenames, i)
eval_future = eval_agent.evaluate.remote()
col_futures = [collector_object.remote(config, input_data, data_path, j, global_var_actor_out=workers_info)
for j in range(2)]
# getting results from remote functions
_ = ray.get(trainer_future)
_ = ray.get(eval_future)
_ = ray.get(col_futures)
time.sleep(1)
ray.shutdown()
prev_prev_prev_n = prev_prev_n
prev_prev_n = prev_n
prev_n = current_n
time.sleep(5)
elif config["rl_type"] == "from_scratch_pg":
amount_of_pieces = 20
previous_pieces = collections.deque([i + 2 for i in range(amount_of_pieces - 2)])
for i in range(100):
print(f"PG learning, cycle {i}.")
current_n = i % amount_of_pieces # current and prev to use
next_n = (i + 1) % amount_of_pieces # next to collect
data_path = f"data/tfrecords/rl/storage_{next_n}/" # path to save in
fnames_curr = glob.glob(f"data/tfrecords/rl/storage_{current_n}/*.tfrec")
fnames_prev_list = [glob.glob(f"data/tfrecords/rl/storage_{i}/*.tfrec") for i in previous_pieces]
fnames_prev_list = list(itertools.chain.from_iterable(fnames_prev_list))
filenames = fnames_prev_list + fnames_curr
files = glob.glob("./data/weights/*.pickle")
if len(files) > 0:
with open(files[-1], 'rb') as datafile:
input_data = pickle.load(datafile)
raw_name = pathlib.Path(files[-1]).stem
print(f"Training and collecting from {raw_name}.pickle weights.")
# trainer_pg.pg_agent_run(config, input_data, None, filenames, 0)
ray.init(num_gpus=1, include_dashboard=False)
# remote objects creation
trainer_object = ray.remote(num_gpus=1)(trainer_pg.pg_agent_run)
collector_object = ray.remote(collector.collect)
# initialization
workers_info = tools.GlobalVarActor.remote()
# remote call
trainer_future = trainer_object.remote(config, input_data, workers_info, filenames, i)
col_futures = [
collector_object.remote(config, input_data, data_path, j, global_var_actor_out=workers_info)
for j in range(2)]
# getting results from remote functions
_ = ray.get(trainer_future)
_ = ray.get(col_futures)
time.sleep(1)
ray.shutdown()
previous_pieces.rotate(-1)
previous_pieces[-1] = current_n
time.sleep(5)
elif config["rl_type"] == "continuous_ac_mc":
amount_of_pieces = 10
previous_pieces = collections.deque([i + 2 for i in range(amount_of_pieces - 2)])
for i in range(100):
print(f"PG learning, cycle {i}.")
current_n = i % amount_of_pieces # current and prev to use
next_n = (i + 1) % amount_of_pieces # next to collect
data_path = f"data/tfrecords/rl/storage_{next_n}/" # path to save in
files_to_delete = glob.glob(data_path + "*")
for f in files_to_delete:
os.remove(f)
fnames_fixed = glob.glob("data/tfrecords/rl/storage/*.tfrec")
fnames_curr = glob.glob(f"data/tfrecords/rl/storage_{current_n}/*.tfrec")
fnames_prev_list = [glob.glob(f"data/tfrecords/rl/storage_{i}/*.tfrec") for i in previous_pieces]
fnames_prev_list = list(itertools.chain.from_iterable(fnames_prev_list))
self_exp_n = len(fnames_curr) + len(fnames_prev_list)
n_fixed = min(int(self_exp_n / 2), len(fnames_fixed))
fnames_fixed = random.choices(fnames_fixed, k=n_fixed)
filenames = fnames_fixed + fnames_prev_list + fnames_curr
files = glob.glob("./data/weights/*.pickle")
if len(files) > 0:
raw_names = [int(pathlib.Path(file_name).stem) for file_name in files]
np_names = np.array(raw_names)
last_file_arg_number =
|
np.argmax(np_names)
|
numpy.argmax
|
import numpy as np
from SimPEG import Utils
def line(a, t, l):
"""
Linear interpolation between a and b
0 <= t <= 1
"""
return a + t * l
def weight(t, a1, l1, h1, a2, l2, h2):
"""
Edge basis functions
"""
x1 = line(a1, t, l1)
x2 = line(a2, t, l2)
w0 = (1. - x1 / h1) * (1. - x2 / h2)
w1 = (x1 / h1) * (1. - x2 / h2)
w2 = (1. - x1 / h1) * (x2 / h2)
w3 = (x1 / h1) * (x2 / h2)
return np.r_[w0, w1, w2, w3]
# TODO: Extend this when current is defined on cell-face
def getStraightLineCurrentIntegral(hx, hy, hz, ax, ay, az, bx, by, bz):
"""
Compute integral int(W . J dx^3) in brick of size hx x hy x hz
where W denotes the 12 local bilinear edge basis functions
and where J prescribes a unit line current
between points (ax,ay,az) and (bx,by,bz).
"""
# length of line segment
lx = bx - ax
ly = by - ay
lz = bz - az
l = np.sqrt(lx**2+ly**2+lz**2)
if l == 0:
sx = np.zeros(4, 1)
sy = np.zeros(4, 1)
sz = np.zeros(4, 1)
# integration using Simpson's rule
wx0 = weight(0., ay, ly, hy, az, lz, hz)
wx0_5 = weight(0.5, ay, ly, hy, az, lz, hz)
wx1 = weight(1., ay, ly, hy, az, lz, hz)
wy0 = weight(0., ax, lx, hx, az, lz, hz)
wy0_5 = weight(0.5, ax, lx, hx, az, lz, hz)
wy1 = weight(1., ax, lx, hx, az, lz, hz)
wz0 = weight(0., ax, lx, hx, ay, ly, hy)
wz0_5 = weight(0.5, ax, lx, hx, ay, ly, hy)
wz1 = weight(1., ax, lx, hx, ay, ly, hy)
sx = (wx0 + 4. * wx0_5 + wx1) * (lx / 6.)
sy = (wy0 + 4. * wy0_5 + wy1) * (ly / 6.)
sz = (wz0 + 4. * wz0_5 + wz1) * (lz / 6.)
return sx, sy, sz
def findlast(x):
if x.sum() == 0:
return -1
else:
return np.arange(x.size)[x][-1]
def getSourceTermLineCurrentPolygon(xorig, hx, hy, hz, px, py, pz):
"""
Given a tensor product mesh with origin at (x0,y0,z0) and cell sizes
hx, hy, hz, compute the source vector for a unit current flowing along
the polygon with vertices px, py, pz.
The 3-D arrays sx, sy, sz contain the source terms for all x/y/z-edges
of the tensor product mesh.
Modified from matlab code:
getSourceTermLineCurrentPolygon(x0,y0,z0,hx,hy,hz,px,py,pz)
<NAME>, February 2014
"""
import numpy as np
# number of cells
nx = len(hx)
ny = len(hy)
nz = len(hz)
x0, y0, z0 = xorig[0], xorig[1], xorig[2]
# nodal grid
x = np.r_[x0, x0+np.cumsum(hx)]
y = np.r_[y0, y0+
|
np.cumsum(hy)
|
numpy.cumsum
|
import numpy as np
import pandas as pd
def calc_score_slow(predictions, targets):
max_submission = len(predictions.loc[predictions.index[0]])
scores = []
idcgs = {i: idcg(i) for i in range(1, max_submission+1)}
for id in targets.index:
target = targets.loc[id]['country']
preds = predictions.loc[id]
dcg = 0
for k, pred in enumerate(preds['country']):
if k > max_submission:
raise Exception('The entry %s has more destinationes than required.' % id)
rel = 1 if pred == target else 0
dcg += float((pow(2, rel) - 1)) / np.log2((k+1)+1)
scores.append(dcg / idcgs[k])
return np.mean(scores)
def calc_score_med(predictions, targets):
""" Merging predictions and targets for faster processing. """
aggregated_data = pd.DataFrame(index=predictions.index)
aggregated_data['predictions'] = predictions['country']
aggregated_data = aggregated_data.join(targets)
aggregated_data = aggregated_data.rename(columns={'country':'targets'})
""" Applying transformations. """
# Matching the predictions with the target.
groups = aggregated_data.groupby(aggregated_data.index)
match = lambda row: 1 if row['predictions'] == row['targets'] else 0
group_match = lambda group: group.apply(match, axis=1)
match_countries = groups.apply(group_match)
match_countries.index = aggregated_data.index
aggregated_data['match'] = match_countries
# Computing the coefficients it implies.
aggregated_data['rank'] = range(5) * len(targets.index) # Creates a repetition of 0, 1, 2, 3...
calc_coef = lambda row: float((pow(2, row['match']) - 1)) /
|
np.log2((row['rank']+1)+1)
|
numpy.log2
|
"""
Plot the kinetic reactions of biomass pyrolysis for the Ranzi 2014 kinetic
scheme for biomass pyrolysis.
Reference:
<NAME>, 2014. Chemical Engineering Science, 110, pp 2-12.
"""
import numpy as np
import pandas as pd
# Parameters
# ------------------------------------------------------------------------------
# T = 773 # temperature for rate constants, K
# weight percent (%) cellulose, hemicellulose, lignin for beech wood
# wtcell = 48
# wthemi = 28
# wtlig = 24
# dt = 0.001 # time step, delta t
# tmax = 4 # max time, s
# t = np.linspace(0, tmax, num=int(tmax/dt)) # time vector
# nt = len(t) # total number of time steps
# Functions for Ranzi 2014 Kinetic Scheme
# ------------------------------------------------------------------------------
def ranzicell(wood, wt, T, dt, nt):
"""
Cellulose reactions CELL from Ranzi 2014 paper for biomass pyrolysis.
Parameters
----------
wood = wood concentration, kg/m^3
wt = weight percent wood as cellulose, %
T = temperature, K
dt = time step, s
nt = total number of time steps
Returns
-------
main = mass concentration of main group, (-)
prod = mass concentration of product group, (-)
"""
# vector for initial wood concentration, kg/m^3
pw = np.ones(nt)*wood
# vectors to store main product concentrations, kg/m^3
cell = pw*(wt/100) # initial cellulose conc. in wood
g1 = np.zeros(nt) # G1
cella = np.zeros(nt) # CELLA
lvg = np.zeros(nt) # LVG
g4 = np.zeros(nt) # G4
R = 1.987 # universal gas constant, kcal/kmol*K
# reaction rate constant for each reaction, 1/s
# A = pre-factor (1/s) and E = activation energy (kcal/kmol)
K1 = 4e7 * np.exp(-31000 / (R * T)) # CELL -> G1
K2 = 4e13 * np.exp(-45000 / (R * T)) # CELL -> CELLA
K3 = 1.8 * T * np.exp(-10000 / (R * T)) # CELLA -> LVG
K4 = 0.5e9 * np.exp(-29000 / (R * T)) # CELLA -> G4
# sum of moles in each group, mol
sumg1 = 11 # sum of G1
sumg4 = 4.08 # sum of G4
# calculate concentrations for main groups, kg/m^3
for i in range(1, nt):
r1 = K1 * cell[i-1] # CELL -> G1
r2 = K2 * cell[i-1] # CELL -> CELLA
r3 = K3 * cella[i-1] # CELLA -> LVG
r4 = K4 * cella[i-1] # CELLA -> G4
cell[i] = cell[i-1] - (r1+r2)*dt # CELL
g1[i] = g1[i-1] + r1*dt # G1
cella[i] = cella[i-1] + r2*dt - (r3+r4)*dt # CELLA
lvg[i] = lvg[i-1] + r3*dt # LVG
g4[i] = g4[i-1] + r4*dt # G4
# store main groups in array
main = np.array([cell, g1, cella, lvg, g4])
# total group concentration per total moles in that group, (kg/m^3) / mol
fg1 = g1/sumg1 # fraction of G1
fg4 = g4/sumg4 # fraction of G4
# array to store product concentrations as a density, kg/m^3
prod = np.zeros([21, nt])
prod[0] = 0.16*fg4 # CO
prod[1] = 0.21*fg4 # CO2
prod[2] = 0.4*fg4 # CH2O
prod[3] = 0.02*fg4 # HCOOH
prod[5] = 0.1*fg4 # CH4
prod[6] = 0.2*fg4 # Glyox
prod[8] = 0.1*fg4 # C2H4O
prod[9] = 0.8*fg4 # HAA
prod[11] = 0.3*fg4 # C3H6O
prod[14] = 0.25*fg4 # HMFU
prod[15] = lvg # LVG
prod[18] = 0.1*fg4 # H2
prod[19] = 5*fg1 + 0.83*fg4 # H2O
prod[20] = 6*fg1 + 0.61*fg4 # Char
# return arrays of main groups and products as mass fraction, (-)
return main/wood, prod/wood
def ranzihemi(wood, wt, T, dt, nt):
"""
Hemicellulose reactions HCE from Ranzi 2014 paper for biomass pyrolysis.
Parameters
----------
wood = wood density, kg/m^3
wt = weight percent of hemicellulose, %
T = temperature, K
dt = time step, s
nt = total number of time steps
Returns
-------
main/wood = mass fraction of main group, (-)
prod/wood = mass fraction of product group, (-)
"""
# vector for initial wood concentration, kg/m^3
pw = np.ones(nt)*wood
# vectors to store main product concentrations, kg/m^3
hce = pw*(wt/100) # initial hemicellulose conc. in wood
g1 =
|
np.zeros(nt)
|
numpy.zeros
|
# -*- coding: utf-8 -*-
from aiida_quantumespresso.calculations.cp import CpCalculation
from aiida_quantumespresso.parsers.raw_parser_cp import (
QEOutputParsingError, parse_cp_traj_stanzas, parse_cp_raw_output)
from aiida_quantumespresso.parsers.constants import (bohr_to_ang,
timeau_to_sec, hartree_to_ev)
from aiida.orm.data.parameter import ParameterData
from aiida.orm.data.structure import StructureData
from aiida.orm.data.folder import FolderData
from aiida.parsers.parser import Parser
from aiida.common.datastructures import calc_states
from aiida.orm.data.array.trajectory import TrajectoryData
import numpy
class CpParser(Parser):
"""
This class is the implementation of the Parser class for Cp.
"""
def __init__(self, calc):
"""
Initialize the instance of CpParser
:param calculation: calculation object.
"""
# check for valid input
if not isinstance(calc, CpCalculation):
raise QEOutputParsingError("Input calc must be a CpCalculation")
super(CpParser, self).__init__(calc)
def parse_with_retrieved(self, retrieved):
"""
Receives in input a dictionary of retrieved nodes.
Does all the logic here.
"""
from aiida.common.exceptions import InvalidOperation
import os, numpy
from distutils.version import LooseVersion
successful = True
# get the input structure
input_structure = self._calc.inp.structure
# load the input dictionary
# TODO: pass this input_dict to the parser. It might need it.
input_dict = self._calc.inp.parameters.get_dict()
# Check that the retrieved folder is there
try:
out_folder = retrieved[self._calc._get_linkname_retrieved()]
except KeyError:
self.logger.error("No retrieved folder found")
return False, ()
# check what is inside the folder
list_of_files = out_folder.get_folder_list()
# at least the stdout should exist
if not self._calc._OUTPUT_FILE_NAME in list_of_files:
successful = False
new_nodes_tuple = ()
self.logger.error("Standard output not found")
return successful, new_nodes_tuple
# if there is something more, I note it down, so to call the raw parser
# with the right options
# look for xml
out_file = out_folder.get_abs_path(self._calc._OUTPUT_FILE_NAME)
xml_file = None
if self._calc._DATAFILE_XML_BASENAME in list_of_files:
xml_file = out_folder.get_abs_path(self._calc._DATAFILE_XML_BASENAME)
xml_counter_file = None
if self._calc._FILE_XML_PRINT_COUNTER in list_of_files:
xml_counter_file = out_folder.get_abs_path(
self._calc._FILE_XML_PRINT_COUNTER)
parsing_args = [out_file, xml_file, xml_counter_file]
# call the raw parsing function
out_dict, raw_successful = parse_cp_raw_output(*parsing_args)
successful = True if raw_successful else False
# parse the trajectory. Units in Angstrom, picoseconds and eV.
# append everthing in the temporary dictionary raw_trajectory
expected_configs = None
raw_trajectory = {}
evp_keys = ['electronic_kinetic_energy', 'cell_temperature', 'ionic_temperature',
'scf_total_energy', 'enthalpy', 'enthalpy_plus_kinetic',
'energy_constant_motion', 'volume', 'pressure']
pos_vel_keys = ['cells', 'positions', 'times', 'velocities']
# set a default null values
# Now prepare the reordering, as filex in the xml are ordered
reordering = self._generate_sites_ordering(out_dict['species'],
out_dict['atoms'])
# =============== POSITIONS trajectory ============================
try:
with open(out_folder.get_abs_path(
'{}.pos'.format(self._calc._PREFIX))) as posfile:
pos_data = [l.split() for l in posfile]
# POSITIONS stored in angstrom
traj_data = parse_cp_traj_stanzas(num_elements=out_dict['number_of_atoms'],
splitlines=pos_data,
prepend_name='positions_traj',
rescale=bohr_to_ang)
# here initialize the dictionary. If the parsing of positions fails, though, I don't have anything
# out of the CP dynamics. Therefore, the calculation status is set to FAILED.
raw_trajectory['positions_ordered'] = self._get_reordered_array(traj_data['positions_traj_data'],
reordering)
raw_trajectory['times'] =
|
numpy.array(traj_data['positions_traj_times'])
|
numpy.array
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Imports
from numpy import array, array_equal, allclose, zeros
from nisqai.data._cdata import CData, LabeledCData, random_data, get_iris_setosa_data, get_mnist_data
import unittest
class DataTest(unittest.TestCase):
"""Unit tests for CData and LabeledCData."""
def test_basic_cdata(self):
"""Creates a CData object and makes sure the dimensions are correct."""
data =
|
array([[1, 0, 0], [0, 1, 0]])
|
numpy.array
|
import time
from torch.utils import data
import glob
import datetime
import pandas as pd
import scipy.misc as m
from bs4 import BeautifulSoup
import numpy as np
import torch
from torch.nn import functional as F
import pickle
import os
from skimage.io import imread
from scipy.ndimage.filters import gaussian_filter
import json
import glob
from torch.utils import data
import glob
import datetime
import pandas as pd
import scipy.misc as m
from bs4 import BeautifulSoup
import numpy as np
import torch
import imageio
from scipy.io import loadmat
# import utils_commons as ut
import cv2
from skimage.segmentation import mark_boundaries
from tqdm import tqdm
from skimage.segmentation import slic
from torchvision import transforms
from PIL import Image
# import visualize as vis
import torchvision
import torchvision.transforms.functional as FT
import glob
import h5py
from scipy.misc import imsave
import misc as ms
def save_images_labels(path):
img_folder = h5py.File(path +
"CVPPP2017_training_images.h5",
'r')["A1"]
gt_folder = h5py.File(path+"CVPPP2017_training_truth.h5",
'r')["A1"]
img_names_train = list(img_folder.keys())
# for name in img_names:
# image = np.array(img_folder[name]["rgb"])[:,:,:3]
# points = np.array(img_folder[name]["centers"])
# img_path = path + "/images/{}.png".format(name)
# points_path = path + "/labels/{}_points.png".format(name)
# ms.imsave(img_path, image)
# ms.imsave(points_path, points)
# assert (ms.imread(img_path) == image).mean()==1
# assert (ms.imread(points_path) == points).mean()==1
# maskObjects_path = path + "/labels/{}_maskObjects.png".format(name)
# maskObjects = np.array(gt_folder[name]["label"])
# ms.imsave(maskObjects_path, maskObjects)
# assert (ms.imread(maskObjects_path) == maskObjects).mean()==1
print("| DONE TRAIN", len(img_names_train))
# Test
img_folder = h5py.File(path+
"CVPPP2017_testing_images.h5",
'r')["A1"]
img_names = list(img_folder.keys())
assert np.in1d(img_names_train, img_names).mean() == 0
for name in img_names:
image = np.array(img_folder[name]["rgb"])[:,:,:3]
img_path = path + "/images/{}.png".format(name)
ms.imsave(img_path, image)
assert (ms.imread(img_path) == image).mean()==1
print("| DONE TEST", len(img_names))
from datasets import base_dataset
class Plants(base_dataset.BaseDataset):
def __init__(self,root,split=None,
transform_function=None):
super().__init__()
self.split = split
self.path = "/mnt/datasets/public/issam/sbd/"
self.proposals_path = self.path + "/ProposalsSharp/"
img_folder = h5py.File(self.path +
"CVPPP2017_training_images.h5",
'r')["A1"]
self.img_names = list(img_folder.keys())
self.img_names.sort()
self.annList_path = "/mnt/datasets/public/issam/sbd/annotations/val_gt_annList.json"
# save_images_labels(self.path)
self.transform_function = transform_function()
# save_images_labels(self.path)
if split == "train":
self.img_indices = np.arange(28,len(self.img_names))
elif split == "val":
self.img_indices = np.arange(28)
elif split == "test":
self.img_folder = h5py.File(self.path+
"CVPPP2017_testing_images.h5",
'r')["A1"]
self.img_names = list(self.img_folder.keys())
self.img_indices = np.arange(len(self.img_names))
else:
raise ValueError("split does not exist...")
self.n_images = len(self.img_indices)
self.n_classes = 2
self.categories = [{'supercategory': 'none',
"id":1,
"name":"plant"}]
# base = "/mnt/projects/counting/Saves/main/"
# self.lcfcn_path = base + "dataset:Plants_model:Res50FCN_metric:mRMSE_loss:water_loss_B_config:basic/"
# import ipdb; ipdb.set_trace() # breakpoint 20711b9f //
# self.pointDict = ms.load_pkl(self.lcfcn_path+"lcfcn_points/Pascal2012.pkl")
def __len__(self):
return self.n_images
def __getitem__(self, index):
name = self.img_names[self.img_indices[index]]
image = ms.imread(self.path + "/images/{}.png".format(name))
h, w = image.shape[:2]
if self.split in ["test"]:
points =
|
np.zeros((h, w), "uint8")
|
numpy.zeros
|
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Extract from notebook for Serving Optimization """
from __future__ import print_function
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
import numpy as np
import tensorflow as tf
from datetime import datetime
import requests
import sys
import json
BATCH_SIZE = 100
DISCOVERY_URL = 'https://storage.googleapis.com/cloud-ml/discovery/ml_v1_discovery.json'
PROJECT = 'lramsey-goog-com-csa-ml'
MODEL_NAME = 'mnist_classifier'
credentials = GoogleCredentials.get_application_default()
api = discovery.build(
'ml', 'v1',
credentials=credentials,
discoveryServiceUrl=DISCOVERY_URL
)
def load_mnist_data():
mnist = tf.contrib.learn.datasets.load_dataset('mnist')
train_data = mnist.train.images
train_labels =
|
np.asarray(mnist.train.labels, dtype=np.int32)
|
numpy.asarray
|
import os
import random
import numpy as np
import torch
import math
from scipy.spatial.transform import Rotation as sciR
# from vgtk.pc.transform import *
'''
Point cloud augmentation
Only numpy function is included for now
'''
def R_from_euler_np(angles):
'''
angles: [(b, )3]
'''
R_x = np.array([[1, 0, 0 ],
[0, math.cos(angles[0]), -math.sin(angles[0]) ],
[0, math.sin(angles[0]), math.cos(angles[0]) ]
])
R_y = np.array([[math.cos(angles[1]), 0, math.sin(angles[1]) ],
[0, 1, 0 ],
[-math.sin(angles[1]), 0, math.cos(angles[1]) ]
])
R_z = np.array([[math.cos(angles[2]), -math.sin(angles[2]), 0],
[math.sin(angles[2]), math.cos(angles[2]), 0],
[0, 0, 1]
])
return np.dot(R_z, np.dot( R_y, R_x ))
def rotate_point_cloud_90(data, normal = None):
""" Randomly rotate the point clouds to augument the dataset
rotation is per shape based along up direction
Input:
Nx3 array, original point clouds
Return:
Nx3 array, rotated point clouds
"""
rotated_data = np.zeros(data.shape, dtype=np.float32)
rotation_angle = np.random.randint(low=0, high=4) * (np.pi/2.0)
cosval = np.cos(rotation_angle)
sinval = np.sin(rotation_angle)
rotation_matrix = np.array([[cosval, 0, sinval],
[0, 1, 0],
[-sinval, 0, cosval]])
rotated_data = np.dot(data.reshape((-1, 3)), rotation_matrix)
rotated_normal = np.dot(normal.reshape((-1, 3)), rotation_matrix) if normal is not None else None
return rotated_data, rotated_normal, rotation_matrix
def rotate_point_cloud(data, R = None, max_degree = None):
""" Randomly rotate the point clouds to augument the dataset
rotation is per shape based along up direction
Input:
Nx3 array, original point clouds
R:
3x3 array, optional Rotation matrix used to rotate the input
max_degree:
float, optional maximum DEGREE to randomly generate rotation
Return:
Nx3 array, rotated point clouds
"""
# rotated_data = np.zeros(data.shape, dtype=np.float32)
if R is not None:
rotation_angle = R
elif max_degree is not None:
rotation_angle = np.random.randint(0, max_degree, 3) * np.pi / 180.0
else:
rotation_angle = sciR.random().as_matrix() if R is None else R
if isinstance(rotation_angle, list) or rotation_angle.ndim == 1:
rotation_matrix = R_from_euler_np(rotation_angle)
else:
assert rotation_angle.shape[0] >= 3 and rotation_angle.shape[1] >= 3
rotation_matrix = rotation_angle[:3, :3]
if data is None:
return None, rotation_matrix
rotated_data = np.dot(rotation_matrix, data.reshape((-1, 3)).T)
return rotated_data.T, rotation_matrix
def batch_rotate_point_cloud(data, R = None):
""" Randomly rotate the point clouds to augument the dataset
rotation is per shape based along up direction
Input:
BxNx3 array, original point clouds (torch tensor)
R: numpy data
Return:
BxNx3 array, rotated point clouds
"""
rotation_angle = sciR.random().as_matrix() if R is None else R
if isinstance(rotation_angle, list) or rotation_angle.ndim == 1:
rotation_matrix = R_from_euler_np(rotation_angle)
else:
assert rotation_angle.shape[0] >= 3 and rotation_angle.shape[1] >= 3
rotation_matrix = rotation_angle[:3, :3]
# since we are using pytorch...
rotation_matrix = torch.from_numpy(rotation_matrix).to(data.device)
rotation_matrix = rotation_matrix[None].repeat(data.shape[0],1,1)
# Bx3x3, Bx3xN ->Bx3xN
rotated_data = torch.matmul(rotation_matrix.double(), data.transpose(1,2).double())
return rotated_data.transpose(1,2).contiguous().float(), rotation_matrix.float()
def rotate_point_cloud_with_normal(pc, surface_normal):
""" Randomly rotate the point clouds to augument the dataset
rotation is per shape based along up direction
Input:
Nx3 array, original point clouds
Return:
Nx3 array, rotated point clouds
"""
rotation_angle = np.random.uniform() * 2 * np.pi
# rotation_angle = np.random.randint(low=0, high=12) * (2*np.pi / 12.0)
cosval = np.cos(rotation_angle)
sinval = np.sin(rotation_angle)
rotation_matrix = np.array([[cosval, 0, sinval],
[0, 1, 0],
[-sinval, 0, cosval]])
rotated_pc = np.dot(pc, rotation_matrix)
rotated_surface_normal =
|
np.dot(surface_normal, rotation_matrix)
|
numpy.dot
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.utils.data as data
import json
import h5py
import os
import numpy as np
import random
import copy
class HybridLoader:
"""
If db_path is a director, then use normal file loading
The loading method depend on extention.
"""
def __init__(self, db_path, ext):
self.db_path = db_path
self.ext = ext
if self.ext == '.npy':
self.loader = lambda x: np.load(x,encoding='latin1')
else:
if "sg" or "graph" in db_path:
self.loader = lambda x: np.load(x,allow_pickle=True,encoding='latin1')['feat'].tolist() # SG output, should be a dict
else:
self.loader = lambda x:
|
np.load(x,encoding='latin1')
|
numpy.load
|
#/usr/bin/env python
# -*- coding: latin-1 -*-
#
# Copyright 2016-2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# $Author: frederic $
# $Date: 2016/07/12 13:50:29 $
# $Id: tide_funcs.py,v 1.4 2016/07/12 13:50:29 frederic Exp $
#
from __future__ import print_function, division
import numpy as np
import sys
import os
import pandas as pd
import json
import copy
# ---------------------------------------- Global constants -------------------------------------------
MAXLINES = 10000000
# ----------------------------------------- Conditional imports ---------------------------------------
try:
import nibabel as nib
nibabelexists = True
except ImportError:
nibabelexists = False
# ---------------------------------------- NIFTI file manipulation ---------------------------
if nibabelexists:
def readfromnifti(inputfile):
r"""Open a nifti file and read in the various important parts
Parameters
----------
inputfile : str
The name of the nifti file.
Returns
-------
nim : nifti image structure
nim_data : array-like
nim_hdr : nifti header
thedims : int array
thesizes : float array
"""
if os.path.isfile(inputfile):
inputfilename = inputfile
elif os.path.isfile(inputfile + '.nii.gz'):
inputfilename = inputfile + '.nii.gz'
elif os.path.isfile(inputfile + '.nii'):
inputfilename = inputfile + '.nii'
else:
print('nifti file', inputfile, 'does not exist')
sys.exit()
nim = nib.load(inputfilename)
nim_data = nim.get_fdata()
nim_hdr = nim.header.copy()
thedims = nim_hdr['dim'].copy()
thesizes = nim_hdr['pixdim'].copy()
return nim, nim_data, nim_hdr, thedims, thesizes
# dims are the array dimensions along each axis
def parseniftidims(thedims):
r"""Split the dims array into individual elements
Parameters
----------
thedims : int array
The nifti dims structure
Returns
-------
nx, ny, nz, nt : int
Number of points along each dimension
"""
return thedims[1], thedims[2], thedims[3], thedims[4]
# sizes are the mapping between voxels and physical coordinates
def parseniftisizes(thesizes):
r"""Split the size array into individual elements
Parameters
----------
thesizes : float array
The nifti voxel size structure
Returns
-------
dimx, dimy, dimz, dimt : float
Scaling from voxel number to physical coordinates
"""
return thesizes[1], thesizes[2], thesizes[3], thesizes[4]
def savetonifti(thearray, theheader, thename):
r""" Save a data array out to a nifti file
Parameters
----------
thearray : array-like
The data array to save.
theheader : nifti header
A valid nifti header
thepixdim : array
The pixel dimensions.
thename : str
The name of the nifti file to save
Returns
-------
"""
outputaffine = theheader.get_best_affine()
qaffine, qcode = theheader.get_qform(coded=True)
saffine, scode = theheader.get_sform(coded=True)
if theheader['magic'] == 'n+2':
output_nifti = nib.Nifti2Image(thearray, outputaffine, header=theheader)
suffix = '.nii'
else:
output_nifti = nib.Nifti1Image(thearray, outputaffine, header=theheader)
suffix = '.nii.gz'
output_nifti.set_qform(qaffine, code=int(qcode))
output_nifti.set_sform(saffine, code=int(scode))
thedtype = thearray.dtype
if thedtype == np.uint8:
theheader.datatype = 2
elif thedtype == np.int16:
theheader.datatype = 4
elif thedtype == np.int32:
theheader.datatype = 8
elif thedtype == np.float32:
theheader.datatype = 16
elif thedtype == np.complex64:
theheader.datatype = 32
elif thedtype == np.float64:
theheader.datatype = 64
elif thedtype == np.int8:
theheader.datatype = 256
elif thedtype == np.uint16:
theheader.datatype = 512
elif thedtype == np.uint32:
theheader.datatype = 768
elif thedtype == np.int64:
theheader.datatype = 1024
elif thedtype == np.uint64:
theheader.datatype = 1280
elif thedtype == np.float128:
theheader.datatype = 1536
elif thedtype == np.complex128:
theheader.datatype = 1792
elif thedtype == np.complex256:
theheader.datatype = 2048
else:
print('type', thedtype, 'is not legal')
sys.exit()
output_nifti.to_filename(thename + suffix)
output_nifti = None
def checkifnifti(filename):
r"""Check to see if a file name is a valid nifti name.
Parameters
----------
filename : str
The file name
Returns
-------
isnifti : bool
True if name is a valid nifti file name.
"""
if filename.endswith(".nii") or filename.endswith(".nii.gz"):
return True
else:
return False
def niftisplitext(filename):
r"""Split nifti filename into name base and extensionn.
Parameters
----------
filename : str
The file name
Returns
-------
name : str
Base name of the nifti file.
ext : str
Extension of the nifti file.
"""
firstsplit = os.path.splitext(filename)
secondsplit = os.path.splitext(firstsplit[0])
if secondsplit[1] is not None:
return secondsplit[0], secondsplit[1] + firstsplit[1]
else:
return firstsplit[0], firstsplit[1]
def niftisplit(inputfile, outputroot, axis=3):
infile, infile_data, infile_hdr, infiledims, infilesizes = readfromnifti(inputfile)
theheader = copy.deepcopy(infile_hdr)
numpoints = infiledims[axis + 1]
print(infiledims)
theheader['dim'][axis + 1] = 1
for i in range(numpoints):
if infiledims[0] == 5:
if axis == 0:
thisslice = infile_data[i:i + 1, :, :, :, :]
elif axis == 1:
thisslice = infile_data[:, i:i + 1, :, :, :]
elif axis == 2:
thisslice = infile_data[:, :, i:i + 1, :, :]
elif axis == 3:
thisslice = infile_data[:, :, :, i:i + 1, :]
elif axis == 4:
thisslice = infile_data[:, :, :, :, i:i + 1]
else:
print('illegal axis')
sys.exit()
elif infiledims[0] == 4:
if axis == 0:
thisslice = infile_data[i:i + 1, :, :, :]
elif axis == 1:
thisslice = infile_data[:, i:i + 1, :, :]
elif axis == 2:
thisslice = infile_data[:, :, i:i + 1, :]
elif axis == 3:
thisslice = infile_data[:, :, :, i:i + 1]
else:
print('illegal axis')
sys.exit()
savetonifti(thisslice, theheader, outputroot + str(i).zfill(4))
def niftimerge(inputlist, outputname, writetodisk=True, axis=3, returndata=False, debug=False):
inputdata = []
for thefile in inputlist:
if debug:
print('reading', thefile)
infile, infile_data, infile_hdr, infiledims, infilesizes = readfromnifti(thefile)
if infiledims[0] == 3:
inputdata.append(infile_data.reshape((infiledims[1], infiledims[2], infiledims[3], 1)) + 0.0)
else:
inputdata.append(infile_data + 0.0)
theheader = copy.deepcopy(infile_hdr)
theheader['dim'][axis + 1] = len(inputdata)
output_data = np.concatenate(inputdata, axis=axis)
if writetodisk:
savetonifti(output_data, theheader, outputname)
if returndata:
return output_data, infile_hdr
def niftiroi(inputfile, outputfile, startpt, numpoints):
print(inputfile, outputfile, startpt, numpoints)
infile, infile_data, infile_hdr, infiledims, infilesizes = readfromnifti(inputfile)
theheader = copy.deepcopy(infile_hdr)
theheader['dim'][4] = numpoints
if infiledims[0] == 5:
output_data = infile_data[:, :, :, startpt:startpt + numpoints, :]
else:
output_data = infile_data[:, :, :, startpt:startpt + numpoints]
savetonifti(output_data, theheader, outputfile)
def checkiftext(filename):
r"""Check to see if the specified filename ends in '.txt'
Parameters
----------
filename : str
The file name
Returns
-------
istext : bool
True if filename ends with '.txt'
"""
if filename.endswith(".txt"):
return True
else:
return False
def getniftiroot(filename):
r"""Strip a nifti filename down to the root with no extensions
Parameters
----------
filename : str
The file name to strip
Returns
-------
strippedname : str
The file name without any nifti extensions
"""
if filename.endswith(".nii"):
return filename[:-4]
elif filename.endswith(".nii.gz"):
return filename[:-7]
else:
return filename
def fmritimeinfo(niftifilename):
r"""Retrieve the repetition time and number of timepoints from a nifti file
Parameters
----------
niftifilename : str
The name of the nifti file
Returns
-------
tr : float
The repetition time, in seconds
timepoints : int
The number of points along the time axis
"""
nim = nib.load(niftifilename)
hdr = nim.header.copy()
thedims = hdr['dim'].copy()
thesizes = hdr['pixdim'].copy()
if hdr.get_xyzt_units()[1] == 'msec':
tr = thesizes[4] / 1000.0
else:
tr = thesizes[4]
timepoints = thedims[4]
return tr, timepoints
def checkspacematch(hdr1, hdr2):
r"""Check the headers of two nifti files to determine if the cover the same volume at the same resolution
Parameters
----------
hdr1 : nifti header structure
The header of the first file
hdr2 : nifti header structure
The header of the second file
Returns
-------
ismatched : bool
True if the spatial dimensions and resolutions of the two files match.
"""
dimmatch = checkspaceresmatch(hdr1['pixdim'], hdr2['pixdim'])
resmatch = checkspacedimmatch(hdr1['dim'], hdr2['dim'])
return dimmatch and resmatch
def checkspaceresmatch(sizes1, sizes2):
r"""Check the spatial pixdims of two nifti files to determine if they have the same resolution
Parameters
----------
sizes1 : float array
The size array from the first nifti file
sizes2 : float array
The size array from the second nifti file
Returns
-------
ismatched : bool
True if the spatial resolutions of the two files match.
"""
for i in range(1, 4):
if sizes1[i] != sizes2[i]:
print("File spatial resolutions do not match")
print("sizeension ", i, ":", sizes1[i], "!=", sizes2[i])
return False
else:
return True
def checkspacedimmatch(dims1, dims2):
r"""Check the dimension arrays of two nifti files to determine if the cover the same number of voxels in each dimension
Parameters
----------
dims1 : int array
The dimension array from the first nifti file
dims2 : int array
The dimension array from the second nifti file
Returns
-------
ismatched : bool
True if the spatial dimensions of the two files match.
"""
for i in range(1, 4):
if dims1[i] != dims2[i]:
print("File spatial voxels do not match")
print("dimension ", i, ":", dims1[i], "!=", dims2[i])
return False
else:
return True
def checktimematch(dims1, dims2, numskip1=0, numskip2=0):
r"""Check the dimensions of two nifti files to determine if the cover the same number of timepoints
Parameters
----------
dims1 : int array
The dimension array from the first nifti file
dims2 : int array
The dimension array from the second nifti file
numskip1 : int, optional
Number of timepoints skipped at the beginning of file 1
numskip2 : int, optional
Number of timepoints skipped at the beginning of file 2
Returns
-------
ismatched : bool
True if the time dimension of the two files match.
"""
if (dims1[4] - numskip1) != (dims2[4] - numskip2):
print("File numbers of timepoints do not match")
print("dimension ", 4, ":", dims1[4],
"(skip ", numskip1, ") !=",
dims2[4],
" (skip ", numskip2, ")")
return False
else:
return True
# --------------------------- non-NIFTI file I/O functions ------------------------------------------
def checkifparfile(filename):
r"""Checks to see if a file is an FSL style motion parameter file
Parameters
----------
filename : str
The name of the file in question.
Returns
-------
isparfile : bool
True if filename ends in '.par', False otherwise.
"""
if filename.endswith(".par"):
return True
else:
return False
def readparfile(filename):
r"""Checks to see if a file is an FSL style motion parameter file
Parameters
----------
filename : str
The name of the file in question.
Returns
-------
motiondict: dict
All the timecourses in the file, keyed by name
"""
labels = ['X', 'Y', 'Z', 'RotX', 'RotY', 'RotZ']
motiontimeseries = readvecs(filename)
motiondict = {}
for j in range(0, 6):
motiondict[labels[j]] = 1.0 * motiontimeseries[j, :]
return motiondict
def readmotion(filename, colspec=None):
r"""Reads motion regressors from filename (from the columns specified in colspec, if given)
Parameters
----------
filename : str
The name of the file in question.
colspec: str, optional
The column numbers from the input file to use for the 6 motion regressors
Returns
-------
motiondict: dict
All the timecourses in the file, keyed by name
"""
labels = ['X', 'Y', 'Z', 'RotX', 'RotY', 'RotZ']
motiontimeseries = readvecs(filename, colspec=colspec)
if motiontimeseries.shape[0] != 6:
print('readmotion: expect 6 motion regressors', motiontimeseries.shape[0], 'given')
sys.exit()
motiondict = {}
for j in range(0, 6):
motiondict[labels[j]] = 1.0 * motiontimeseries[j, :]
return motiondict
def calcmotregressors(motiondict, start=0, end=-1, position=True, deriv=True, derivdelayed=False):
r"""Calculates various motion related timecourses from motion data dict, and returns an array
Parameters
----------
motiondict: dict
A dictionary of the 6 motion direction vectors
Returns
-------
motionregressors: array
All the derivative timecourses to use in a numpy array
"""
labels = ['X', 'Y', 'Z', 'RotX', 'RotY', 'RotZ']
numpoints = len(motiondict[labels[0]])
if end == -1:
end = numpoints - 1
if (0 <= start <= numpoints - 1) and (start < end + 1):
numoutputpoints = end - start + 1
numoutputregressors = 0
if position:
numoutputregressors += 6
if deriv:
numoutputregressors += 6
if derivdelayed:
numoutputregressors += 6
if numoutputregressors > 0:
outputregressors = np.zeros((numoutputregressors, numoutputpoints), dtype=float)
else:
print('no output types selected - exiting')
sys.exit()
activecolumn = 0
if position:
for thelabel in labels:
outputregressors[activecolumn, :] = motiondict[thelabel][start:end + 1]
activecolumn += 1
if deriv:
for thelabel in labels:
outputregressors[activecolumn, 1:] = np.diff(motiondict[thelabel][start:end + 1])
activecolumn += 1
if derivdelayed:
for thelabel in labels:
outputregressors[activecolumn, 2:] = np.diff(motiondict[thelabel][start:end + 1])[1:]
activecolumn += 1
return outputregressors
def sliceinfo(slicetimes, tr):
# find out what timepoints we have, and their spacing
sortedtimes = np.sort(slicetimes)
diffs = sortedtimes[1:] - sortedtimes[0:-1]
minstep = np.max(diffs)
numsteps = int(np.round(tr / minstep, 0))
sliceoffsets =
|
np.around(slicetimes / minstep)
|
numpy.around
|
# Copyright 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
import numpy as np
from auspex.log import logger
from numpy.fft import fft
from scipy.linalg import svd, eig, inv, pinv
"""
Produces initial guess for damped exponential using SVD method described in:
<NAME>. (1993). Enhanced resolution based on minimum variance estimation and exponential data modeling Signal Processing, 33(3), 333-355. doi:10.1016/0165-1684(93)90130-3
"""
def hilbert(signal):
""" Construct the Hilbert transform of the signal via the Fast Fourier Transform.
This sets the negative frequency components to zero and multiplies positive frequencies by 2.
This is neccessary since the fitted model refers only to positive frequency components e^(jwt)
"""
spectrum = np.fft.fft(signal)
n = len(signal)
midpoint = int(np.ceil(n/2))
kernel = np.zeros(n)
kernel[0] = 1
if n%2 == 0:
kernel[midpoint] = 1
kernel[1:midpoint] = 2
return np.fft.ifft(kernel * spectrum)
def hankel(signal,M):
# Create the Hankel matrix
N = len(signal)
L = N-M+1
H = np.zeros((L, M), dtype=np.complex128)
for ct in range(M):
H[:,ct] = signal[ct:ct+L]
return H
def cleandata(signal,M,K):
""" Clean data iteratively using minimum variance (MV) estimation described in:
<NAME>. (1993). Enhanced resolution based on minimum variance estimation and exponential data modeling Signal Processing, 33(3), 333-355. doi:10.1016/0165-1684(93)90130-3
"""
L = len(signal)-M+1
H = hankel(signal,M)
#Try and seperate the signal and noise subspace via the svd
#Note: Numpy returns matrix = UxSxV, not matrix = UxSXV*
U,S,V = svd(H, False)
#Reconstruct the approximate Hankel matrix with the first K singular values
#Here we can iterate and modify the singular values
S_k = np.diag(S[:K])
#Estimate the variance from the rest of the singular values
varEst = (1/((M-K)*L)) * np.sum(S[K:]**2)
Sfilt = np.matmul(S_k**2 - L*varEst*np.eye(K), inv(S_k))
HK = np.matmul(np.matmul(U[:,:K], Sfilt), V[:K,:])
#Reconstruct the data from the averaged anti-diagonals
cleanedData = np.zeros(len(signal), dtype=np.complex128)
tmpMat = np.flip(HK,1)
idx = -L+1
for ct in range(len(signal)-1,-1,-1):
cleanedData[ct] = np.mean(np.diag(tmpMat,idx))
idx += 1
#Iterate until variance of noise is less than signal eigenvalues, which should be the case for high SNR data
if L*varEst > min(np.diagonal(S_k))**2:
logger.warning("Noise variance greater than signal amplitudes. Consider taking more averages. Iterating cleanup ...")
cleanedData = cleandata(hilbert(cleanedData),M,K)
return cleanedData
def KT_estimation(data, times, order):
"""KT estimation of periodic signal components."""
K = order
N = len(data)
time_step = times[1]-times[0]
#clean data with M = K+1 so that L>>M is guaranteed as per MV estimation
cleanedData = cleandata(hilbert(data),order+1,order)
#Create a cleaned Hankel matrix, here the matrix is constructed so that L~M
cleanedAnalyticSig = hilbert(cleanedData)
cleanedH = hankel(cleanedAnalyticSig,(N//2) - 1)
#Compute Q with total least squares (TLS)
#<NAME> (1996) Numerical Methods for Least Squares Problems
#UK1*Q = UK2
U = svd(cleanedH, False)[0]
UK = U[:,0:K]
tmpMat = np.hstack((UK[:-1,:],UK[1:,:]))
V = svd(tmpMat, False)[2].T.conj()
n =
|
np.size(UK,1)
|
numpy.size
|
__version__ = '1.17.2'
import numpy as np
import matplotlib.pyplot as plt
import dynaphopy.projection as projection
import dynaphopy.parameters as parameters
import dynaphopy.interface.phonopy_link as pho_interface
import dynaphopy.interface.iofile as reading
import dynaphopy.analysis.energy as energy
import dynaphopy.analysis.fitting as fitting
import dynaphopy.analysis.modes as modes
import dynaphopy.analysis.coordinates as trajdist
import dynaphopy.analysis.thermal_properties as thm
from dynaphopy.power_spectrum import power_spectrum_functions
from scipy import integrate
class Quasiparticle:
def __init__(self,
dynamic,
last_steps=None,
vc=None):
self._dynamic = dynamic
self._vc = vc
self._eigenvectors = None
self._frequencies = None
self._vq = None
self._power_spectrum_phonon = None
self._power_spectrum_wave_vector = None
self._power_spectrum_direct = None
self._power_spectrum_partials = None
self._bands = None
self._renormalized_bands = None
self._renormalized_force_constants = None
self._commensurate_points_data = None
self._temperature = None
self._force_constants_qha = None
self._parameters = parameters.Parameters()
self.crop_trajectory(last_steps)
# print('Using {0} time steps for calculation'.format(len(self.dynamic.velocity)))
# Crop trajectory
def crop_trajectory(self, last_steps):
if self._vc is None:
self._dynamic.crop_trajectory(last_steps)
print("Using {0} steps".format(len(self._dynamic.velocity)))
else:
if last_steps is not None:
self._vc = self._vc[-last_steps:, :, :]
print("Using {0} steps".format(len(self._vc)))
# Memory clear methods
def full_clear(self):
self._eigenvectors = None
self._frequencies = None
self._vc = None
self._vq = None
self._power_spectrum_direct = None
self._power_spectrum_wave_vector = None
self._power_spectrum_phonon = None
def power_spectra_clear(self):
self._power_spectrum_phonon = None
self._power_spectrum_wave_vector = None
self._power_spectrum_direct = None
self.force_constants_clear()
def force_constants_clear(self):
self._renormalized_force_constants = None
self._commensurate_points_data = None
self.bands_clear()
def bands_clear(self):
self._bands = None
self._renormalized_bands = None
# Properties
@property
def dynamic(self):
return self._dynamic
@property
def parameters(self):
return self._parameters
def set_NAC(self, NAC):
self._bands = None
self.parameters.use_NAC = NAC
def write_to_xfs_file(self, file_name):
reading.write_xsf_file(file_name, self.dynamic.structure)
def save_velocity_hdf5(self, file_name, save_trajectory=True):
if save_trajectory:
trajectory = self.dynamic.trajectory
else:
trajectory = None
reading.save_data_hdf5(file_name,
self.dynamic.get_time(),
self.dynamic.get_supercell_matrix(),
velocity=self.dynamic.velocity,
trajectory=trajectory)
print("Velocity saved in file " + file_name)
def save_vc_hdf5(self, file_name):
reading.save_data_hdf5(file_name,
self.dynamic.get_time(),
self.dynamic.get_supercell_matrix(),
vc=self.get_vc(),
reduced_q_vector=self.get_reduced_q_vector())
print("Projected velocity saved in file " + file_name)
def set_number_of_mem_coefficients(self, coefficients):
self.power_spectra_clear()
self.parameters.number_of_coefficients_mem = coefficients
def set_projection_onto_atom_type(self, atom_type):
if atom_type in range(self.dynamic.structure.get_number_of_primitive_atoms()):
self.parameters.project_on_atom = atom_type
else:
print('Atom type {} does not exist'.format(atom_type))
exit()
def _set_frequency_range(self, frequency_range):
if not np.array_equiv(np.array(frequency_range), np.array(self.parameters.frequency_range)):
self.power_spectra_clear()
self.parameters.frequency_range = frequency_range
def set_spectra_resolution(self, resolution):
limits = [self.get_frequency_range()[0], self.get_frequency_range()[-1]]
self.parameters.spectrum_resolution = resolution
self._set_frequency_range(np.arange(limits[0], limits[1] + resolution, resolution))
def set_frequency_limits(self, limits):
resolution = self.parameters.spectrum_resolution
self._set_frequency_range(np.arange(limits[0], limits[1] + resolution, resolution))
def get_frequency_range(self):
return self.parameters.frequency_range
# Wave vector related methods
def set_reduced_q_vector(self, q_vector):
if len(q_vector) == len(self.parameters.reduced_q_vector):
if (np.array(q_vector) != self.parameters.reduced_q_vector).any():
self.full_clear()
self.parameters.reduced_q_vector = np.array(q_vector)
def get_reduced_q_vector(self):
return self.parameters.reduced_q_vector
def get_q_vector(self):
return np.dot(self.parameters.reduced_q_vector,
2.0 * np.pi * np.linalg.inv(self.dynamic.structure.get_primitive_cell()).T)
# Phonopy harmonic calculation related methods
def get_eigenvectors(self):
if self._eigenvectors is None:
# print("Getting frequencies & eigenvectors from Phonopy")
self._eigenvectors, self._frequencies = (
pho_interface.obtain_eigenvectors_and_frequencies(self.dynamic.structure,
self.parameters.reduced_q_vector))
return self._eigenvectors
def get_frequencies(self):
if self._frequencies is None:
# print("Getting frequencies & eigenvectors from Phonopy")
self._eigenvectors, self._frequencies = (
pho_interface.obtain_eigenvectors_and_frequencies(self.dynamic.structure,
self.parameters.reduced_q_vector))
return self._frequencies
def set_band_ranges(self, band_ranges):
self.bands_clear()
self.parameters.band_ranges = band_ranges
def get_band_ranges_and_labels(self):
# return self.parameters.band_ranges
if self.parameters.band_ranges is None:
self.parameters.band_ranges = self.dynamic.structure.get_path_using_seek_path()
return self.parameters.band_ranges
def plot_phonon_dispersion_bands(self):
bands = self.get_band_ranges_and_labels()
band_ranges = bands['ranges']
if self._bands is None:
self._bands = pho_interface.obtain_phonon_dispersion_bands(self.dynamic.structure,
band_ranges,
NAC=self.parameters.use_NAC)
for i, freq in enumerate(self._bands[1]):
plt.plot(self._bands[1][i], self._bands[2][i], color='r')
# plt.axes().get_xaxis().set_visible(False)
plt.axes().get_xaxis().set_ticks([])
plt.ylabel('Frequency [THz]')
plt.xlabel('Wave vector')
plt.xlim([0, self._bands[1][-1][-1]])
plt.axhline(y=0, color='k', ls='dashed')
plt.suptitle('Phonon dispersion')
if 'labels' in bands:
plt.rcParams.update({'mathtext.default': 'regular'})
labels = bands['labels']
labels_e = []
x_labels = []
for i, freq in enumerate(self._bands[1]):
if labels[i][0] == labels[i - 1][1]:
labels_e.append('$' + labels[i][0].replace('GAMMA', '\Gamma') + '$')
else:
labels_e.append(
'$' + labels[i - 1][1].replace('GAMMA', '\Gamma') + '/' + labels[i][0].replace('GAMMA',
'\Gamma') + '$')
x_labels.append(self._bands[1][i][0])
x_labels.append(self._bands[1][-1][-1])
labels_e.append('$' + labels[-1][1].replace('GAMMA', '\Gamma') + '$')
labels_e[0] = '$' + labels[0][0].replace('GAMMA', '\Gamma') + '$'
plt.xticks(x_labels, labels_e, rotation='horizontal')
plt.show()
def plot_renormalized_phonon_dispersion_bands(self, plot_linewidths=False, plot_harmonic=True):
bands_full_data = self.get_renormalized_phonon_dispersion_bands(with_linewidths=plot_linewidths)
plot_title = 'Renormalized phonon dispersion relations'
for i, path in enumerate(bands_full_data):
plt.plot(path['q_path_distances'], np.array(list(path['renormalized_frequencies'].values())).T, color='r',
label='Renormalized')
if plot_harmonic:
plt.plot(path['q_path_distances'], np.array(list(path['harmonic_frequencies'].values())).T, color='b',
label='Harmonic')
if plot_linewidths:
for freq, linewidth in zip(list(path['renormalized_frequencies'].values()),
list(path['linewidth'].values())):
plt.fill_between(path['q_path_distances'], freq + np.array(linewidth) / 2,
freq - np.array(linewidth) / 2, color='r', alpha=0.2, interpolate=True,
linewidth=0)
plot_title = 'Renormalized phonon dispersion relations and linewidths'
# plt.axes().get_xaxis().set_visible(False)
plt.suptitle(plot_title)
plt.axes().get_xaxis().set_ticks([])
plt.ylabel('Frequency [THz]')
plt.xlabel('Wave vector')
plt.xlim([0, bands_full_data[-1]['q_path_distances'][-1]])
plt.axhline(y=0, color='k', ls='dashed')
if plot_harmonic:
handles = plt.gca().get_legend_handles_labels()[0]
plt.legend([handles[-1], handles[0]], ['Harmonic', 'Renormalized'])
if 'labels' in bands_full_data[0]:
plt.rcParams.update({'mathtext.default': 'regular'})
labels = [[bands_full_data[i]['labels']['inf'],
bands_full_data[i]['labels']['sup']]
for i in range(len(bands_full_data))]
labels_e = []
x_labels = []
for i, freq in enumerate(bands_full_data):
if labels[i][0] == labels[i - 1][1]:
labels_e.append('$' + labels[i][0].replace('GAMMA', '\Gamma') + '$')
else:
labels_e.append(
'$' + labels[i - 1][1].replace('GAMMA', '\Gamma') + '/' + labels[i][0].replace('GAMMA',
'\Gamma') + '$')
x_labels.append(bands_full_data[i]['q_path_distances'][0])
x_labels.append(bands_full_data[-1]['q_path_distances'][-1])
labels_e.append('$' + labels[-1][1].replace('GAMMA', '\Gamma') + '$')
labels_e[0] = '$' + labels[0][0].replace('GAMMA', '\Gamma') + '$'
plt.xticks(x_labels, labels_e, rotation='horizontal')
plt.show()
def plot_linewidths_and_shifts_bands(self):
bands_full_data = self.get_renormalized_phonon_dispersion_bands(with_linewidths=True,
band_connection=True,
interconnect_bands=True)
number_of_branches = len(bands_full_data[0]['linewidth'])
# print('number_of branches', number_of_branches)
for i, path in enumerate(bands_full_data):
prop_cicle = plt.rcParams['axes.prop_cycle']
colors = prop_cicle.by_key()['color']
for j in range(number_of_branches):
plt.figure(0)
branch = path['linewidth']['branch_{}'.format(j)]
plt.plot(path['q_path_distances'], branch, color=np.roll(colors, -j)[0], label='linewidth')
plt.figure(1)
branch = path['harmonic_frequencies']['branch_{}'.format(j)]
plt.plot(path['q_path_distances'], branch, color=np.roll(colors, -j)[0], label='linewidth')
plt.figure(2)
branch = path['frequency_shifts']['branch_{}'.format(j)]
plt.plot(path['q_path_distances'], branch, color=np.roll(colors, -j)[0], label='linewidth')
plt.figure(3)
branch = path['renormalized_frequencies']['branch_{}'.format(j)]
plt.plot(path['q_path_distances'], branch, color=np.roll(colors, -j)[0], label='linewidth')
plt.figure(0)
plt.suptitle('Phonon linewidths')
plt.figure(1)
plt.suptitle('Harmonic phonon dispersion relations')
plt.figure(2)
plt.suptitle('Frequency shifts')
plt.figure(3)
plt.suptitle('Renormalized phonon dispersion relations')
for ifig in [0, 1, 2, 3]:
plt.figure(ifig)
plt.axes().get_xaxis().set_ticks([])
plt.ylabel('Frequency [THz]')
plt.xlabel('Wave vector')
plt.xlim([0, bands_full_data[-1]['q_path_distances'][-1]])
plt.axhline(y=0, color='k', ls='dashed')
if 'labels' in bands_full_data[0]:
plt.rcParams.update({'mathtext.default': 'regular'})
labels = [[bands_full_data[i]['labels']['inf'],
bands_full_data[i]['labels']['sup']]
for i in range(len(bands_full_data))]
labels_e = []
x_labels = []
for i, freq in enumerate(bands_full_data):
if labels[i][0] == labels[i - 1][1]:
labels_e.append('$' + labels[i][0].replace('GAMMA', '\Gamma') + '$')
else:
labels_e.append(
'$' + labels[i - 1][1].replace('GAMMA', '\Gamma') + '/' + labels[i][0].replace('GAMMA',
'\Gamma') + '$')
x_labels.append(bands_full_data[i]['q_path_distances'][0])
x_labels.append(bands_full_data[-1]['q_path_distances'][-1])
labels_e.append('$' + labels[-1][1].replace('GAMMA', '\Gamma') + '$')
labels_e[0] = '$' + labels[0][0].replace('GAMMA', '\Gamma') + '$'
plt.xticks(x_labels, labels_e, rotation='horizontal')
plt.show()
def plot_frequencies_vs_linewidths(self):
qpoints, multiplicity, frequencies, linewidths = self.get_mesh_frequencies_and_linewidths()
plt.ylabel('Linewidth [THz]')
plt.xlabel('Frequency [THz]')
plt.axhline(y=0, color='k', ls='dashed')
plt.title('Frequency vs linewidths (from mesh: {})'.format(self.parameters.mesh_phonopy))
plt.scatter(np.array(frequencies).flatten(), np.array(linewidths).flatten(), s=multiplicity)
plt.show()
def get_renormalized_phonon_dispersion_bands(self,
with_linewidths=False,
interconnect_bands=False,
band_connection=False):
def reconnect_eigenvectors(bands):
order = range(bands[2][0].shape[1])
for i, ev_bands in enumerate(bands[3]):
if i > 0:
ref = bands[3][i-1][-1]
metric = np.abs(np.dot(ref.conjugate().T, ev_bands[0]))
order = np.argmax(metric, axis=1)
bands[2][i] = bands[2][i].T[order].T
bands[3][i] = bands[3][i].T[order].T
def reconnect_frequencies(bands):
order = range(bands[2][0].shape[1])
for i, f_bands in enumerate(bands[2]):
if i > 0:
order = []
ref = np.array(bands[2][i-1][-1]).copy()
for j, test_val in enumerate(f_bands[0]):
ov = np.argmin(np.abs(ref - test_val))
order.append(ov)
ref[ov] = 1000
# print(order)
bands[2][i] = bands[2][i].T[order].T
bands[3][i] = bands[3][i].T[order].T
def eigenvector_order(ev_bands, ev_renormalized):
metric = np.zeros_like(np.abs(np.dot(ev_bands[0].conjugate().T, ev_renormalized[0])))
for ev_ref, ev in zip(ev_bands, ev_renormalized):
metric += np.abs(np.dot(ev_ref.conjugate().T, ev))
order = np.argmax(metric, axis=1,)
return order
def set_order(bands, renormalized_bands):
order_list = []
for ev_bands, ev_renormalized in zip(bands[3], renormalized_bands[3]):
order = eigenvector_order(ev_bands, ev_renormalized)
order_list.append(order)
freq = np.array(renormalized_bands[2]).copy()
for i, o in enumerate(order_list):
renormalized_bands[2][i] = freq[i].T[o].T
renormalized_force_constants = self.get_renormalized_force_constants()
bands = self.get_band_ranges_and_labels()
band_ranges = bands['ranges']
_bands = pho_interface.obtain_phonon_dispersion_bands(self.dynamic.structure,
band_ranges,
NAC=self.parameters.use_NAC,
band_connection=band_connection,
band_resolution=self.parameters.band_resolution)
if interconnect_bands:
# reconnect_frequencies(_bands)
reconnect_eigenvectors(_bands)
_renormalized_bands = pho_interface.obtain_phonon_dispersion_bands(self.dynamic.structure,
band_ranges,
force_constants=renormalized_force_constants,
NAC=self.parameters.use_NAC,
band_connection=band_connection,
band_resolution=self.parameters.band_resolution)
if band_connection:
set_order(_bands, _renormalized_bands)
data = self.get_commensurate_points_data()
renormalized_frequencies = data['frequencies']
eigenvectors = data['eigenvectors']
linewidths = data['linewidths']
fc_supercell = data['fc_supercell']
sup_lim = pho_interface.get_renormalized_force_constants(renormalized_frequencies + linewidths / 2,
eigenvectors,
self.dynamic.structure,
fc_supercell,
symmetrize=self.parameters.symmetrize)
inf_lim = pho_interface.get_renormalized_force_constants(renormalized_frequencies - linewidths / 2,
eigenvectors,
self.dynamic.structure,
fc_supercell,
symmetrize=self.parameters.symmetrize)
if with_linewidths:
renormalized_bands_s = pho_interface.obtain_phonon_dispersion_bands(self.dynamic.structure,
band_ranges,
force_constants=sup_lim,
NAC=self.parameters.use_NAC,
band_connection=band_connection,
band_resolution=self.parameters.band_resolution)
renormalized_bands_i = pho_interface.obtain_phonon_dispersion_bands(self.dynamic.structure,
band_ranges,
force_constants=inf_lim,
NAC=self.parameters.use_NAC,
band_connection=band_connection,
band_resolution=self.parameters.band_resolution)
if band_connection:
set_order(_bands, renormalized_bands_s)
set_order(_bands, renormalized_bands_i)
bands_full_data = []
for i, q_path in enumerate(_bands[1]):
band = {'q_path_distances': q_path.tolist(),
'q_bounds': {'inf': list(band_ranges[i][0]), 'sup': list(band_ranges[i][1])},
'harmonic_frequencies': {'branch_{}'.format(key): value.tolist() for (key, value) in
enumerate(_bands[2][i].T)},
'renormalized_frequencies': {'branch_{}'.format(key): value.tolist() for (key, value) in
enumerate(_renormalized_bands[2][i].T)},
'frequency_shifts': {'branch_{}'.format(key): value.tolist() for (key, value) in
enumerate(_renormalized_bands[2][i].T - _bands[2][i].T)},
}
if with_linewidths:
band.update({'linewidth_minus': {'branch_{}'.format(key): value.tolist() for (key, value) in
enumerate(renormalized_bands_i[2][i].T)},
'linewidth_plus': {'branch_{}'.format(key): value.tolist() for (key, value) in
enumerate(renormalized_bands_s[2][i].T)},
'linewidth': {'branch_{}'.format(key): value.tolist() for (key, value) in
enumerate(renormalized_bands_s[2][i].T - renormalized_bands_i[2][i].T)}}
)
if 'labels' in bands:
labels = bands['labels']
band.update({'labels': {'inf': labels[i][0], 'sup': labels[i][1]}})
bands_full_data.append(band)
return bands_full_data
def get_mesh_frequencies_and_linewidths(self):
data = self.get_commensurate_points_data()
renormalized_frequencies = data['frequencies']
eigenvectors = data['eigenvectors']
linewidths = data['linewidths']
fc_supercell = data['fc_supercell']
linewidths_fc = pho_interface.get_renormalized_force_constants(linewidths,
eigenvectors,
self.dynamic.structure,
fc_supercell,
symmetrize=self.parameters.symmetrize)
_, _, linewidths_mesh = pho_interface.obtain_phonopy_mesh_from_force_constants(self.dynamic.structure,
force_constants=linewidths_fc,
mesh=self.parameters.mesh_phonopy,
NAC=None)
frequencies_fc = pho_interface.get_renormalized_force_constants(renormalized_frequencies,
eigenvectors,
self.dynamic.structure,
fc_supercell,
symmetrize=self.parameters.symmetrize)
qpoints, multiplicity, frequencies_mesh = pho_interface.obtain_phonopy_mesh_from_force_constants(self.dynamic.structure,
force_constants=frequencies_fc,
mesh=self.parameters.mesh_phonopy,
NAC=None)
return qpoints, multiplicity, frequencies_mesh, linewidths_mesh
def write_renormalized_phonon_dispersion_bands(self, filename='bands_data.yaml'):
bands_full_data = self.get_renormalized_phonon_dispersion_bands(with_linewidths=True, band_connection=True)
reading.save_bands_data_to_file(bands_full_data, filename)
def print_phonon_dispersion_bands(self):
if self._bands is None:
self._bands = pho_interface.obtain_phonon_dispersion_bands(self.dynamic.structure,
self.get_band_ranges_and_labels(),
NAC=self.parameters.use_NAC)
np.set_printoptions(linewidth=200)
for i, freq in enumerate(self._bands[1]):
print(str(np.hstack([self._bands[1][i][None].T, self._bands[2][i]])).replace('[', '').replace(']', ''))
def plot_eigenvectors(self):
modes.plot_phonon_modes(self.dynamic.structure,
self.get_eigenvectors(),
self.get_q_vector(),
vectors_scale=self.parameters.modes_vectors_scale)
def plot_dos_phonopy(self, force_constants=None):
phonopy_dos = pho_interface.obtain_phonopy_dos(self.dynamic.structure,
mesh=self.parameters.mesh_phonopy,
projected_on_atom=self.parameters.project_on_atom,
NAC=self.parameters.use_NAC)
plt.plot(phonopy_dos[0], phonopy_dos[1], 'b-', label='Harmonic')
if force_constants is not None:
phonopy_dos_r = pho_interface.obtain_phonopy_dos(self.dynamic.structure,
mesh=self.parameters.mesh_phonopy,
force_constants=force_constants,
projected_on_atom=self.parameters.project_on_atom,
NAC=self.parameters.use_NAC)
plt.plot(phonopy_dos_r[0], phonopy_dos_r[1], 'g-', label='Renormalized')
plt.title('Density of states (Normalized to unit cell)')
plt.xlabel('Frequency [THz]')
plt.ylabel('Density of states')
plt.legend()
plt.axhline(y=0, color='k', ls='dashed')
plt.show()
def check_commensurate(self, q_point, decimals=4):
supercell = self.dynamic.get_supercell_matrix()
commensurate = False
primitive_matrix = self.dynamic.structure.get_primitive_matrix()
transform = np.dot(q_point, np.linalg.inv(primitive_matrix))
transform = np.multiply(transform, supercell)
transform = np.around(transform, decimals=decimals)
if np.all(np.equal(np.mod(transform, 1), 0)):
commensurate = True
return commensurate
# Projections related methods
def get_vc(self):
if self._vc is None:
print("Projecting into wave vector")
# Check if commensurate point
if not self.check_commensurate(self.get_reduced_q_vector()):
print("warning! This wave vector is not a commensurate q-point in MD supercell")
if self.parameters.project_on_atom > -1:
element = self.dynamic.structure.get_atomic_elements(unique=True)[self.parameters.project_on_atom]
print('Project on atom {} : {}'.format(self.parameters.project_on_atom, element))
self._vc = projection.project_onto_wave_vector(self.dynamic,
self.get_q_vector(),
project_on_atom=self.parameters.project_on_atom)
return self._vc
def get_vq(self):
if self._vq is None:
print("Projecting into phonon mode")
self._vq = projection.project_onto_phonon(self.get_vc(), self.get_eigenvectors())
return self._vq
def plot_vq(self, modes=None):
if not modes:
modes = [0]
plt.suptitle('Phonon mode projection')
plt.xlabel('Time [ps]')
plt.ylabel('$u^{1/2}\AA/ps$')
time = np.linspace(0, self.get_vc().shape[0] * self.dynamic.get_time_step_average(),
num=self.get_vc().shape[0])
for mode in modes:
plt.plot(time, self.get_vq()[:, mode].real, label='mode: ' + str(mode))
plt.legend()
plt.show()
def plot_vc(self, atoms=None, coordinates=None):
if not atoms:
atoms = [0]
if not coordinates:
coordinates = [0]
time = np.linspace(0, self.get_vc().shape[0] * self.dynamic.get_time_step_average(),
num=self.get_vc().shape[0])
plt.suptitle('Wave vector projection')
plt.xlabel('Time [ps]')
plt.ylabel('$u^{1/2}\AA/ps$')
for atom in atoms:
for coordinate in coordinates:
plt.plot(time,
self.get_vc()[:, atom, coordinate].real,
label='atom: ' + str(atom) + ' coordinate:' + str(coordinate))
plt.legend()
plt.show()
def save_vc(self, file_name, atom=0):
print("Saving wave vector projection to file")
np.savetxt(file_name, self.get_vc()[:, atom, :].real)
def save_vq(self, file_name):
print("Saving phonon projection to file")
np.savetxt(file_name, self.get_vq().real)
# Power spectra related methods
def select_power_spectra_algorithm(self, algorithm):
if algorithm in power_spectrum_functions.keys():
if algorithm != self.parameters.power_spectra_algorithm:
self.power_spectra_clear()
self.parameters.power_spectra_algorithm = algorithm
print("Using {0} function".format(power_spectrum_functions[algorithm][1]))
else:
print("Power spectrum algorithm number not found!\nPlease select:")
for i in power_spectrum_functions.keys():
print('{0} : {1}'.format(i, power_spectrum_functions[i][1]))
exit()
def select_fitting_function(self, function):
from dynaphopy.analysis.fitting.fitting_functions import fitting_functions
if function in fitting_functions.keys():
if function != self.parameters.fitting_function:
self.force_constants_clear()
self.parameters.fitting_function = function
else:
print("Fitting function number not found!\nPlease select:")
for i in fitting_functions.keys():
print('{0} : {1}'.format(i, fitting_functions[i]))
exit()
def get_power_spectrum_phonon(self):
if self._power_spectrum_phonon is None:
print("Calculating phonon projection power spectra")
if self.parameters.use_symmetry:
initial_reduced_q_point = self.get_reduced_q_vector()
power_spectrum_phonon = []
q_points_equivalent = pho_interface.get_equivalent_q_points_by_symmetry(self.get_reduced_q_vector(),
self.dynamic.structure)
# print(q_points_equivalent)
for q_point in q_points_equivalent:
self.set_reduced_q_vector(q_point)
power_spectrum_phonon.append(
(power_spectrum_functions[self.parameters.power_spectra_algorithm])[0](self.get_vq(),
self.dynamic,
self.parameters))
self.set_reduced_q_vector(initial_reduced_q_point)
self._power_spectrum_phonon =
|
np.average(power_spectrum_phonon, axis=0)
|
numpy.average
|
#!/usr/bin/env python3
"""
Discriminative Bayesian Filtering Lends Momentum
to the Stochastic Newton Method for Minimizing Log-Convex Functions
Exhibits and tests a discriminative filtering strategy for the stochastic
(batch-based) Newton method that aims to minimize the mean of log-convex
functions using sub-sampled gradients and Hessians
Runs using the provided Dockerfile (https://www.docker.com):
```
docker build --no-cache -t hibiscus .
docker run --rm -ti -v $(pwd):/home/felixity hibiscus
```
or in a virtual environment with Python3.10:
```
python3.10 -m venv turquoise
source turquoise/bin/activate
pip3 install -r requirements.txt
python3.10 filtered_stochastic_newton.py
```
"""
from __future__ import annotations
import datetime
import functools
import inspect
import logging
import os
import platform
import re
import subprocess
import sys
import time
from typing import Callable
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.optimize import minimize as sp_minimize
from numpy.linalg import *
class DiscriminativeKalmanFilter:
"""
Implements the Discriminative Kalman Filter as described in <NAME>.,
<NAME>., <NAME>., <NAME>., & <NAME>.'s "The
discriminative Kalman filter for Bayesian filtering with nonlinear and
nongaussian observation models." Neural Comput. 32(5), 969–1017 (2020).
"""
def __init__(
self,
stateModelA: np.mat,
stateModelGamma: np.mat,
stateModelS: np.mat,
posteriorMean: np.mat = None,
posteriorCovariance: np.mat = None,
) -> None:
"""
Specifies the
state model p(hidden_t|hidden_{t-1})
= eta_{dState}(hidden_t; stateModelA*hidden_{t-1}, stateModelGamma)
and measurement model p(hidden_t|observed_t)
= eta_{dState}(hidden_t; ft, Qt)
where ft, Qt must be supplied by the user at each time step for updates
:param stateModelA: A from eq. (2.1b)
:param stateModelGamma: Γ from eq. (2.1b)
:param stateModelS: S from eq. (2.1a)
:param posteriorMean: μ_t from eq. (2.6)
:param posteriorCovariance: Σ_t from eq. (2.6)
"""
self.stateModelA = stateModelA
self.stateModelGamma = stateModelGamma
self.stateModelS = stateModelS
self.dState = stateModelA.shape[0]
if posteriorMean is not None:
self.posteriorMean = posteriorMean
else:
self.posteriorMean = np.zeros((self.dState, 1))
if posteriorCovariance is not None:
self.posteriorCovariance = posteriorCovariance
else:
self.posteriorCovariance = self.stateModelS
def stateUpdate(self) -> None:
"""
Calculates the first 2 lines of eq. (2.7) in-place
"""
self.posteriorMean = self.stateModelA * self.posteriorMean
self.posteriorCovariance = (
self.stateModelA * self.posteriorCovariance * self.stateModelA.T
+ self.stateModelGamma
)
def measurementUpdate(self, ft: np.mat, Qt: np.mat) -> None:
"""
Given ft & Qt, calculates the last 2 lines of eq. (2.7)
:param ft: f(x_t) from eq. (2.2)
:param Qt: Q(x_t) from eq. (2.2)
:return:
"""
if not np.all(eigvals(inv(Qt) - inv(self.stateModelS)) > 1e-6):
Qt = inv(inv(Qt) + inv(self.stateModelS))
newPosteriorCovInv = (
inv(self.posteriorCovariance) + inv(Qt) - inv(self.stateModelS)
)
self.posteriorMean = solve(
newPosteriorCovInv,
solve(self.posteriorCovariance, self.posteriorMean)
+ solve(Qt, ft),
)
self.posteriorCovariance = inv(newPosteriorCovInv)
def predict(self, ft: np.mat, Qt: np.mat) -> tuple[np.mat, np.mat]:
"""
Given ft & Qt, performs stateUpdate() and measurementUpdate(ft, Qt)
:param ft: f(x_t) from eq. (2.2)
:param Qt: Q(x_t) from eq. (2.2)
:return: new posterior mean and covariance from applying eq. (2.7)
"""
self.stateUpdate()
self.measurementUpdate(ft, Qt)
return self.posteriorMean, self.posteriorCovariance
def ArmijoStyleSearch(
fn: Callable[[float], float],
t0: np.mat,
step_dir: np.mat,
grad_fn_t0: np.mat,
) -> np.mat:
"""
Implements a backtracking line search inspired by Armijo, L.'s
"Minimization of functions having Lipschitz continuous first partial
derivatives." Pacific J. Math. 16(1), 1–3 (1966).
:param fn: callable fn for which we seek a minimum
:param t0: starting point
:param step_dir: direction in which to seek a minimum of fn from t0
:param grad_fn_t0: gradient of fn at t0
:return: reasonable step length
"""
fn_x0 = fn(t0)
for k in range(5):
step_length = 2**-k
if fn(t0 + step_length * step_dir) - fn_x0 <= float(
0.95 * step_length * step_dir * grad_fn_t0.T
):
break
return step_length
def angular_distance(v1: np.array, v2: np.array) -> np.float:
"""
Returns the angle in radians between two equal-length vectors v1 and v2;
if in 2 dimensions, returns a signed angle
:param v1: first vector
:param v2: second vector
:return: angle between v1 and v2 (radians)
"""
v1_n = np.asarray(v1).ravel() / norm(v1)
v2_n = np.asarray(v2).ravel() / norm(v2)
if v1_n.size != 2:
return np.arccos(np.dot(v1_n, v2_n))
else:
# can assign a sign when vectors are 2-dimensional
theta1 = np.arctan2(v1_n[1], v1_n[0])
theta2 = np.arctan2(v2_n[1], v2_n[0])
diff_rad = theta1 - theta2
return min(
[diff_rad + 2 * j * np.pi for j in range(-1, 2)],
key=lambda x: np.abs(x),
)
def log_calls(func: Callable) -> Callable:
"""
Logs information about a function call including inputs and output
:param func: function to call
:return: wrapped function with i/o logging
"""
logger = logging.getLogger("filtered_stochastic_newton")
@functools.wraps(func)
def log_io(*args, **kwargs):
func_args = inspect.signature(func).bind(*args, **kwargs).arguments
y = func(*args, **kwargs)
logger.debug(
f"Function {func.__name__} called with "
+ "; ".join([str(i) + ":" + str(j) for i, j in func_args.items()])
+ f" & returned: {y}"
)
return y
return log_io
class LinearGaussianExample:
"""
Example as described in section 5 of the manuscript
"""
def __init__(self, n_total: int, d_theta: int):
self.n_total = n_total
self.theta = np.mat(np.random.normal(loc=1.0, size=(1, d_theta)))
self.X = np.mat(
np.random.multivariate_normal(
mean=np.zeros(d_theta),
cov=0.9 * np.eye(d_theta) + 0.1 * np.ones((d_theta, d_theta)),
size=self.n_total,
)
)
self.zeta = np.mat(np.random.normal(size=(self.n_total, 1)))
self.Y = self.X * self.theta.T + self.zeta
self.minimum = self.find_minimum()
self.logger = logging.getLogger(
os.path.basename(__file__).split(".")[0]
)
self.logger.debug(f"{self.theta.tolist()=}")
self.logger.debug(f"{self.X.tolist()=}")
self.logger.debug(f"{self.zeta.tolist()=}")
self.logger.debug(f"{self.Y.tolist()=}")
self.logger.debug(f"{self.minimum.tolist()=}")
def g_i(self, theta: np.mat, i: int) -> float:
return 1 / 2 * float(self.Y[i] - theta * self.X[i].T) ** 2
def grad_i(self, theta: np.mat, i: int) -> np.mat:
return theta * (self.X[i].T * self.X[i]) - self.X[i] * float(self.Y[i])
def grad2_i(self, theta: np.mat, i: int) -> float:
return self.grad_i(theta, i) * self.grad_i(theta, i).T
def hess_i(self, theta: np.mat, i: int) -> float:
return self.X[i].T * self.X[i]
def g_idx(self, theta: np.mat, idx: list) -> float:
g = 0
if len(idx) == 0:
return g
for i in idx:
g += self.g_i(theta, i)
return g / len(idx)
def grad_idx(self, theta: np.mat, idx: list) -> np.mat:
grad = np.zeros_like(self.grad_i(self.theta, 0))
if len(idx) == 0:
return grad
for i in idx:
grad += self.grad_i(theta, i)
return grad / len(idx)
def grad2_idx(self, theta: np.mat, idx: list) -> float:
grad2 = np.zeros_like(self.grad2_i(self.theta, 0))
if len(idx) == 0:
return float(grad2)
for i in idx:
grad2 += self.grad2_i(theta, i)
return grad2 / len(idx)
def hess_idx(self, theta: np.mat, idx: list) -> float:
hess = np.zeros_like(self.hess_i(self.theta, 0))
if len(idx) == 0:
return float(hess)
for i in idx:
hess += self.hess_i(theta, i)
return hess / len(idx)
def g(self, theta: np.mat) -> float:
return self.g_idx(theta, list(range(self.n_total)))
def grad(self, theta: np.mat) -> np.mat:
return self.grad_idx(theta, list(range(self.n_total)))
def grad2(self, theta: np.mat) -> float:
return self.grad2_idx(theta, list(range(self.n_total)))
def hess(self, theta: np.mat) -> float:
return self.hess_idx(theta, list(range(self.n_total)))
@log_calls
def random_sample(self, n_sample: np.int) -> list:
return list(np.random.choice(range(self.n_total), n_sample))
def find_minimum(self) -> np.mat:
res = sp_minimize(self.g, self.theta, method="Nelder-Mead", tol=1e-6)
return np.mat(res.x)
def run_example(
n_total: int,
n_steps: int,
n_sample: int,
d_theta: int,
n_starts: int,
alpha: float,
beta: float,
) -> pd.DataFrame:
"""
Runs a single comparison test
:param n_total: n from the paper
:param n_steps: number of optimization steps to perform
:param n_sample: size of each sample (\abs{\mathcal S} from the paper)
:param d_theta: dimension of theta
:param n_starts: number of restarts for a given problem
:param alpha: positive parameter for state evolution eq. (21)
:param beta: positive parameter, <1, for state evolution eq. (21)
:return: dataframe containing test results
"""
eg = LinearGaussianExample(n_total=n_total, d_theta=d_theta)
theta0 = np.mat(np.random.normal(size=(1, d_theta)))
for run_n in range(n_starts):
# for recording the unfiltered estimate
theta_nf = theta0.copy()
theta_nf_list = [theta_nf.round(3).tolist()[0]]
g_theta_nf_list = [eg.g(theta_nf)]
step_direction_nf_list = []
step_direction_nf_at_theta_f_list = []
# for recording filtered estimate;
# initialized at same point as unfiltered
theta_f = theta0.copy()
theta_f_list = [theta_f.round(3).tolist()[0]]
g_theta_f_list = [eg.g(theta_f)]
step_direction_f_list = []
# for recording angular comparisons
true_step_at_theta_f_list = []
step_angle_nf_at_theta_f_list = []
step_angle_f_at_theta_f_list = []
# for recording Mt's at each step
Mt_list = [(0.0 *
|
np.eye(d_theta)
|
numpy.eye
|
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import * # NOQA
import inspect
import os
import pickle
import platform
import unittest
import warnings
from copy import deepcopy
import numpy as np
from obspy import Stream, Trace, UTCDateTime, read, read_inventory
from obspy.core.compatibility import mock
from obspy.core.stream import _is_pickle, _read_pickle, _write_pickle
from obspy.core.util.attribdict import AttribDict
from obspy.core.util.base import NamedTemporaryFile
from obspy.io.xseed import Parser
class StreamTestCase(unittest.TestCase):
"""
Test suite for obspy.core.stream.Stream.
"""
def setUp(self):
# set specific seed value such that random numbers are reproducible
np.random.seed(815)
header = {'network': 'BW', 'station': 'BGLD',
'starttime': UTCDateTime(2007, 12, 31, 23, 59, 59, 915000),
'npts': 412, 'sampling_rate': 200.0,
'channel': 'EHE'}
trace1 = Trace(data=np.random.randint(0, 1000, 412).astype(np.float64),
header=deepcopy(header))
header['starttime'] = UTCDateTime(2008, 1, 1, 0, 0, 4, 35000)
header['npts'] = 824
trace2 = Trace(data=np.random.randint(0, 1000, 824).astype(np.float64),
header=deepcopy(header))
header['starttime'] = UTCDateTime(2008, 1, 1, 0, 0, 10, 215000)
trace3 = Trace(data=np.random.randint(0, 1000, 824).astype(np.float64),
header=deepcopy(header))
header['starttime'] = UTCDateTime(2008, 1, 1, 0, 0, 18, 455000)
header['npts'] = 50668
trace4 = Trace(
data=np.random.randint(0, 1000, 50668).astype(np.float64),
header=deepcopy(header))
self.mseed_stream = Stream(traces=[trace1, trace2, trace3, trace4])
header = {'network': '', 'station': 'RNON ', 'location': '',
'starttime': UTCDateTime(2004, 6, 9, 20, 5, 59, 849998),
'sampling_rate': 200.0, 'npts': 12000,
'channel': ' Z'}
trace = Trace(
data=np.random.randint(0, 1000, 12000).astype(np.float64),
header=header)
self.gse2_stream = Stream(traces=[trace])
self.data_path = os.path.join(os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe()))), "data")
@staticmethod
def __remove_processing(st):
"""
Helper method removing the processing information from all traces
within a Stream object.
Useful for testing.
"""
for tr in st:
if "processing" not in tr.stats:
continue
del tr.stats.processing
def test_init(self):
"""
Tests the __init__ method of the Stream object.
"""
# empty
st = Stream()
self.assertEqual(len(st), 0)
# single trace
st = Stream(Trace())
self.assertEqual(len(st), 1)
# array of traces
st = Stream([Trace(), Trace()])
self.assertEqual(len(st), 2)
def test_setitem(self):
"""
Tests the __setitem__ method of the Stream object.
"""
stream = self.mseed_stream
stream[0] = stream[3]
self.assertEqual(stream[0], stream[3])
st = deepcopy(stream)
stream[0].data[0:10] = 999
self.assertNotEqual(st[0].data[0], 999)
st[0] = stream[0]
np.testing.assert_array_equal(stream[0].data[:10],
np.ones(10, dtype=np.int_) * 999)
def test_getitem(self):
"""
Tests the __getitem__ method of the Stream object.
"""
stream = read()
self.assertEqual(stream[0], stream.traces[0])
self.assertEqual(stream[-1], stream.traces[-1])
self.assertEqual(stream[2], stream.traces[2])
# out of index should fail
self.assertRaises(IndexError, stream.__getitem__, 3)
self.assertRaises(IndexError, stream.__getitem__, -99)
def test_add(self):
"""
Tests the adding of two stream objects.
"""
stream = self.mseed_stream
self.assertEqual(4, len(stream))
# Add the same stream object to itself.
stream = stream + stream
self.assertEqual(8, len(stream))
# This will not create copies of Traces and thus the objects should
# be identical (and the Traces attributes should be identical).
for _i in range(4):
self.assertEqual(stream[_i], stream[_i + 4])
self.assertEqual(stream[_i] == stream[_i + 4], True)
self.assertEqual(stream[_i] != stream[_i + 4], False)
self.assertEqual(stream[_i] is stream[_i + 4], True)
self.assertEqual(stream[_i] is not stream[_i + 4], False)
# Now add another stream to it.
other_stream = self.gse2_stream
self.assertEqual(1, len(other_stream))
new_stream = stream + other_stream
self.assertEqual(9, len(new_stream))
# The traces of all streams are copied.
for _i in range(8):
self.assertEqual(new_stream[_i], stream[_i])
self.assertEqual(new_stream[_i] is stream[_i], True)
# Also test for the newly added stream.
self.assertEqual(new_stream[8], other_stream[0])
self.assertEqual(new_stream[8].stats, other_stream[0].stats)
np.testing.assert_array_equal(new_stream[8].data, other_stream[0].data)
# adding something else than stream or trace results into TypeError
self.assertRaises(TypeError, stream.__add__, 1)
self.assertRaises(TypeError, stream.__add__, 'test')
def test_iadd(self):
"""
Tests the __iadd__ method of the Stream objects.
"""
stream = self.mseed_stream
self.assertEqual(4, len(stream))
other_stream = self.gse2_stream
self.assertEqual(1, len(other_stream))
# Add the other stream to the stream.
stream += other_stream
# This will leave the Traces of the new stream and create a deepcopy of
# the other Stream's Traces
self.assertEqual(5, len(stream))
self.assertEqual(other_stream[0], stream[-1])
self.assertEqual(other_stream[0].stats, stream[-1].stats)
np.testing.assert_array_equal(other_stream[0].data, stream[-1].data)
# adding something else than stream or trace results into TypeError
self.assertRaises(TypeError, stream.__iadd__, 1)
self.assertRaises(TypeError, stream.__iadd__, 'test')
def test_mul(self):
"""
Tests the __mul__ method of the Stream objects.
"""
st = Stream(Trace())
self.assertEqual(len(st), 1)
st = st * 4
self.assertEqual(len(st), 4)
# multiplying by something else than an integer results into TypeError
self.assertRaises(TypeError, st.__mul__, 1.2345)
self.assertRaises(TypeError, st.__mul__, 'test')
def test_add_trace_to_stream(self):
"""
Tests using a Trace on __add__ and __iadd__ methods of the Stream.
"""
st0 = read()
st1 = st0[0:2]
tr = st0[2]
# __add__
self.assertEqual(st1.__add__(tr), st0)
self.assertEqual(st1 + tr, st0)
# __iadd__
st1 += tr
self.assertEqual(st1, st0)
def test_append(self):
"""
Tests the append method of the Stream object.
"""
stream = self.mseed_stream
# Check current count of traces
self.assertEqual(len(stream), 4)
# Append first traces to the Stream object.
stream.append(stream[0])
self.assertEqual(len(stream), 5)
# This is supposed to make a deepcopy of the Trace and thus the two
# Traces are not identical.
self.assertEqual(stream[0], stream[-1])
# But the attributes and data values should be identical.
self.assertEqual(stream[0].stats, stream[-1].stats)
np.testing.assert_array_equal(stream[0].data, stream[-1].data)
# Append the same again
stream.append(stream[0])
self.assertEqual(len(stream), 6)
# Now the two objects should be identical.
self.assertEqual(stream[0], stream[-1])
# Using append with a list of Traces, or int, or ... should fail.
self.assertRaises(TypeError, stream.append, stream[:])
self.assertRaises(TypeError, stream.append, 1)
self.assertRaises(TypeError, stream.append, stream[0].data)
def test_count_and_len(self):
"""
Tests the count and __len__ methods of the Stream object.
"""
# empty stream without traces
stream = Stream()
self.assertEqual(len(stream), 0)
self.assertEqual(stream.count(), 0)
# stream with traces
stream = read()
self.assertEqual(len(stream), 3)
self.assertEqual(stream.count(), 3)
def test_extend(self):
"""
Tests the extend method of the Stream object.
"""
stream = self.mseed_stream
# Check current count of traces
self.assertEqual(len(stream), 4)
# Extend the Stream object with the first two traces.
stream.extend(stream[0:2])
self.assertEqual(len(stream), 6)
# This is NOT supposed to make a deepcopy of the Trace and thus the two
# Traces compare equal and are identical.
self.assertEqual(stream[0], stream[-2])
self.assertEqual(stream[1], stream[-1])
self.assertIs(stream[0], stream[-2])
self.assertIs(stream[1], stream[-1])
# Using extend with a single Traces, or a wrong list, or ...
# should fail.
self.assertRaises(TypeError, stream.extend, stream[0])
self.assertRaises(TypeError, stream.extend, 1)
self.assertRaises(TypeError, stream.extend, [stream[0], 1])
def test_insert(self):
"""
Tests the insert Method of the Stream object.
"""
stream = self.mseed_stream
self.assertEqual(4, len(stream))
# Insert the last Trace before the second trace.
stream.insert(1, stream[-1])
self.assertEqual(len(stream), 5)
# This is supposed to make a deepcopy of the Trace and thus the two
# Traces are not identical.
# self.assertNotEqual(stream[1], stream[-1])
self.assertEqual(stream[1], stream[-1])
# But the attributes and data values should be identical.
self.assertEqual(stream[1].stats, stream[-1].stats)
np.testing.assert_array_equal(stream[1].data, stream[-1].data)
# Do the same again
stream.insert(1, stream[-1])
self.assertEqual(len(stream), 6)
# Now the two Traces should be identical
self.assertEqual(stream[1], stream[-1])
# Do the same with a list of traces this time.
# Insert the last two Trace before the second trace.
stream.insert(1, stream[-2:])
self.assertEqual(len(stream), 8)
# This is supposed to make a deepcopy of the Trace and thus the two
# Traces are not identical.
self.assertEqual(stream[1], stream[-2])
self.assertEqual(stream[2], stream[-1])
# But the attributes and data values should be identical.
self.assertEqual(stream[1].stats, stream[-2].stats)
np.testing.assert_array_equal(stream[1].data, stream[-2].data)
self.assertEqual(stream[2].stats, stream[-1].stats)
np.testing.assert_array_equal(stream[2].data, stream[-1].data)
# Do the same again
stream.insert(1, stream[-2:])
self.assertEqual(len(stream), 10)
# Now the two Traces should be identical
self.assertEqual(stream[1], stream[-2])
self.assertEqual(stream[2], stream[-1])
# Using insert without a single Traces or a list of Traces should fail.
self.assertRaises(TypeError, stream.insert, 1, 1)
self.assertRaises(TypeError, stream.insert, stream[0], stream[0])
self.assertRaises(TypeError, stream.insert, 1, [stream[0], 1])
def test_get_gaps(self):
"""
Tests the get_gaps method of the Stream objects.
It is compared directly to the obspy.io.mseed method getGapsList which
is assumed to be correct.
"""
stream = self.mseed_stream
gap_list = stream.get_gaps()
# Gaps list created with obspy.io.mseed
mseed_gap_list = [
('BW', 'BGLD', '', 'EHE',
UTCDateTime(2008, 1, 1, 0, 0, 1, 970000),
UTCDateTime(2008, 1, 1, 0, 0, 4, 35000),
2.0599999999999999, 412.0),
('BW', 'BGLD', '', 'EHE',
UTCDateTime(2008, 1, 1, 0, 0, 8, 150000),
UTCDateTime(2008, 1, 1, 0, 0, 10, 215000),
2.0599999999999999, 412.0),
('BW', 'BGLD', '', 'EHE',
UTCDateTime(2008, 1, 1, 0, 0, 14, 330000),
UTCDateTime(2008, 1, 1, 0, 0, 18, 455000),
4.120, 824.0)]
# Assert the number of gaps.
self.assertEqual(len(mseed_gap_list), len(gap_list))
for _i in range(len(mseed_gap_list)):
# Compare the string values directly.
for _j in range(6):
self.assertEqual(gap_list[_i][_j], mseed_gap_list[_i][_j])
# The small differences are probably due to rounding errors.
self.assertAlmostEqual(float(mseed_gap_list[_i][6]),
float(gap_list[_i][6]),
places=3)
self.assertAlmostEqual(float(mseed_gap_list[_i][7]),
float(gap_list[_i][7]),
places=3)
def test_get_gaps_multiplexed_streams(self):
"""
Tests the get_gaps method of the Stream objects.
"""
data = np.random.randint(0, 1000, 412)
# different channels
st = Stream()
for channel in ['EHZ', 'EHN', 'EHE']:
st.append(Trace(data=data, header={'channel': channel}))
self.assertEqual(len(st.get_gaps()), 0)
# different locations
st = Stream()
for location in ['', '00', '01']:
st.append(Trace(data=data, header={'location': location}))
self.assertEqual(len(st.get_gaps()), 0)
# different stations
st = Stream()
for station in ['MANZ', 'ROTZ', 'BLAS']:
st.append(Trace(data=data, header={'station': station}))
self.assertEqual(len(st.get_gaps()), 0)
# different networks
st = Stream()
for network in ['BW', 'GE', 'GR']:
st.append(Trace(data=data, header={'network': network}))
self.assertEqual(len(st.get_gaps()), 0)
def test_pop(self):
"""
Test the pop method of the Stream object.
"""
stream = self.mseed_stream
# Make a copy of the Traces.
traces = deepcopy(stream[:])
# Remove and return the last Trace.
temp_trace = stream.pop()
self.assertEqual(3, len(stream))
# Assert attributes. The objects itself are not identical.
self.assertEqual(temp_trace.stats, traces[-1].stats)
np.testing.assert_array_equal(temp_trace.data, traces[-1].data)
# Remove the last copied Trace.
traces.pop()
# Remove and return the second Trace.
temp_trace = stream.pop(1)
# Assert attributes. The objects itself are not identical.
self.assertEqual(temp_trace.stats, traces[1].stats)
np.testing.assert_array_equal(temp_trace.data, traces[1].data)
# Remove the second copied Trace.
traces.pop(1)
# Compare all remaining Traces.
self.assertEqual(2, len(stream))
self.assertEqual(2, len(traces))
for _i in range(len(traces)):
self.assertEqual(traces[_i].stats, stream[_i].stats)
np.testing.assert_array_equal(traces[_i].data, stream[_i].data)
def test_slicing(self):
"""
Tests the __getslice__ method of the Stream object.
"""
stream = read()
self.assertEqual(stream[0:], stream[0:])
self.assertEqual(stream[:2], stream[:2])
self.assertEqual(stream[:], stream[:])
self.assertEqual(len(stream), 3)
new_stream = stream[1:3]
self.assertTrue(isinstance(new_stream, Stream))
self.assertEqual(len(new_stream), 2)
self.assertEqual(new_stream[0].stats, stream[1].stats)
self.assertEqual(new_stream[1].stats, stream[2].stats)
def test_slicing_with_steps(self):
"""
Tests the __getslice__ method of the Stream object with step.
"""
tr1 = Trace()
tr2 = Trace()
tr3 = Trace()
tr4 = Trace()
tr5 = Trace()
st = Stream([tr1, tr2, tr3, tr4, tr5])
self.assertEqual(st[0:6].traces, [tr1, tr2, tr3, tr4, tr5])
self.assertEqual(st[0:6:1].traces, [tr1, tr2, tr3, tr4, tr5])
self.assertEqual(st[0:6:2].traces, [tr1, tr3, tr5])
self.assertEqual(st[1:6:2].traces, [tr2, tr4])
self.assertEqual(st[1:6:6].traces, [tr2])
def test_slice(self):
"""
Slice method should not loose attributes set on stream object itself.
"""
st = read()
st.test = 1
st.muh = "Muh"
st2 = st.slice(st[0].stats.starttime, st[0].stats.endtime)
self.assertEqual(st2.test, 1)
self.assertEqual(st2.muh, "Muh")
def test_slice_nearest_sample(self):
"""
Tests that the nearest_sample argument is correctly passed to the
trace function calls.
"""
# It defaults to True.
st = read()
with mock.patch("obspy.core.trace.Trace.slice") as patch:
patch.return_value = st[0]
st.slice(1, 2)
self.assertEqual(patch.call_count, 3)
for arg in patch.call_args_list:
self.assertTrue(arg[1]["nearest_sample"])
# Force True.
with mock.patch("obspy.core.trace.Trace.slice") as patch:
patch.return_value = st[0]
st.slice(1, 2, nearest_sample=True)
self.assertEqual(patch.call_count, 3)
for arg in patch.call_args_list:
self.assertTrue(arg[1]["nearest_sample"])
# Set to False.
with mock.patch("obspy.core.trace.Trace.slice") as patch:
patch.return_value = st[0]
st.slice(1, 2, nearest_sample=False)
self.assertEqual(patch.call_count, 3)
for arg in patch.call_args_list:
self.assertFalse(arg[1]["nearest_sample"])
def test_cutout(self):
"""
Test cutout method of the Stream object. Compare against equivalent
trimming operations.
"""
t1 = UTCDateTime("2009-06-24")
t2 = UTCDateTime("2009-08-24T00:20:06.007Z")
t3 = UTCDateTime("2009-08-24T00:20:16.008Z")
t4 = UTCDateTime("2011-09-11")
st = read()
st_cut = read()
# 1
st_cut.cutout(t4, t4 + 10)
self.__remove_processing(st_cut)
self.assertEqual(st, st_cut)
# 2
st_cut.cutout(t1 - 10, t1)
self.__remove_processing(st_cut)
self.assertEqual(st, st_cut)
# 3
st_cut.cutout(t1, t2)
st.trim(starttime=t2, nearest_sample=True)
self.__remove_processing(st_cut)
self.__remove_processing(st)
self.assertEqual(st, st_cut)
# 4
st = read()
st_cut = read()
st_cut.cutout(t3, t4)
st.trim(endtime=t3, nearest_sample=True)
self.__remove_processing(st_cut)
self.__remove_processing(st)
self.assertEqual(st, st_cut)
# 5
st = read()
st.trim(endtime=t2, nearest_sample=True)
tmp = read()
tmp.trim(starttime=t3, nearest_sample=True)
st += tmp
st_cut = read()
st_cut.cutout(t2, t3)
self.__remove_processing(st_cut)
self.__remove_processing(st)
self.assertEqual(st, st_cut)
def test_pop2(self):
"""
Test the pop method of the Stream object.
"""
trace = Trace(data=np.arange(0, 1000))
st = Stream([trace])
st = st + st + st + st
self.assertEqual(len(st), 4)
st.pop()
self.assertEqual(len(st), 3)
st[1].stats.station = 'MUH'
st.pop(0)
self.assertEqual(len(st), 2)
self.assertEqual(st[0].stats.station, 'MUH')
def test_remove(self):
"""
Tests the remove method of the Stream object.
"""
stream = self.mseed_stream
# Make a copy of the Traces.
stream2 = deepcopy(stream)
# Use the remove method of the Stream object and of the list of Traces.
stream.remove(stream[1])
del(stream2[1])
stream.remove(stream[-1])
del(stream2[-1])
# Compare remaining Streams.
self.assertEqual(stream, stream2)
def test_reverse(self):
"""
Tests the reverse method of the Stream object.
"""
stream = self.mseed_stream
# Make a copy of the Traces.
traces = deepcopy(stream[:])
# Use reversing of the Stream object and of the list.
stream.reverse()
traces.reverse()
# Compare all Traces.
self.assertEqual(4, len(stream))
self.assertEqual(4, len(traces))
for _i in range(len(traces)):
self.assertEqual(traces[_i].stats, stream[_i].stats)
np.testing.assert_array_equal(traces[_i].data, stream[_i].data)
def test_select(self):
"""
Tests the select method of the Stream object.
"""
# Create a list of header dictionaries.
headers = [
{'starttime': UTCDateTime(1990, 1, 1), 'network': 'AA',
'station': 'ZZZZ', 'channel': 'EHZ', 'sampling_rate': 200.0,
'npts': 100},
{'starttime': UTCDateTime(1990, 1, 1), 'network': 'BB',
'station': 'YYYY', 'channel': 'EHN', 'sampling_rate': 200.0,
'npts': 100},
{'starttime': UTCDateTime(2000, 1, 1), 'network': 'AA',
'station': 'ZZZZ', 'channel': 'BHZ', 'sampling_rate': 20.0,
'npts': 100},
{'starttime': UTCDateTime(1989, 1, 1), 'network': 'BB',
'station': 'XXXX', 'channel': 'BHN', 'sampling_rate': 20.0,
'npts': 100},
{'starttime': UTCDateTime(2010, 1, 1), 'network': 'AA',
'station': 'XXXX', 'channel': 'EHZ', 'sampling_rate': 200.0,
'npts': 100, 'location': '00'}]
# Make stream object for test case
traces = []
for header in headers:
traces.append(Trace(data=np.random.randint(0, 1000, 100),
header=header))
stream = Stream(traces=traces)
# Test cases:
stream2 = stream.select()
self.assertEqual(stream, stream2)
self.assertRaises(Exception, stream.select, channel="EHZ",
component="N")
stream2 = stream.select(channel='EHE')
self.assertEqual(len(stream2), 0)
stream2 = stream.select(channel='EHZ')
self.assertEqual(len(stream2), 2)
self.assertIn(stream[0], stream2)
self.assertIn(stream[4], stream2)
stream2 = stream.select(component='Z')
self.assertEqual(len(stream2), 3)
self.assertIn(stream[0], stream2)
self.assertIn(stream[2], stream2)
self.assertIn(stream[4], stream2)
stream2 = stream.select(component='n')
self.assertEqual(len(stream2), 2)
self.assertIn(stream[1], stream2)
self.assertIn(stream[3], stream2)
stream2 = stream.select(channel='BHZ', npts=100, sampling_rate='20.0',
network='AA', component='Z', station='ZZZZ')
self.assertEqual(len(stream2), 1)
self.assertIn(stream[2], stream2)
stream2 = stream.select(channel='EHZ', station="XXXX")
self.assertEqual(len(stream2), 1)
self.assertIn(stream[4], stream2)
stream2 = stream.select(network='AA')
self.assertEqual(len(stream2), 3)
self.assertIn(stream[0], stream2)
self.assertIn(stream[2], stream2)
self.assertIn(stream[4], stream2)
stream2 = stream.select(sampling_rate=20.0)
self.assertEqual(len(stream2), 2)
self.assertIn(stream[2], stream2)
self.assertIn(stream[3], stream2)
# tests for wildcarded channel:
stream2 = stream.select(channel='B*')
self.assertEqual(len(stream2), 2)
self.assertIn(stream[2], stream2)
self.assertIn(stream[3], stream2)
stream2 = stream.select(channel='EH*')
self.assertEqual(len(stream2), 3)
self.assertIn(stream[0], stream2)
self.assertIn(stream[1], stream2)
self.assertIn(stream[4], stream2)
stream2 = stream.select(channel='*Z')
self.assertEqual(len(stream2), 3)
self.assertIn(stream[0], stream2)
self.assertIn(stream[2], stream2)
self.assertIn(stream[4], stream2)
# tests for other wildcard operations:
stream2 = stream.select(station='[XY]*')
self.assertEqual(len(stream2), 3)
self.assertIn(stream[1], stream2)
self.assertIn(stream[3], stream2)
self.assertIn(stream[4], stream2)
stream2 = stream.select(station='[A-Y]*')
self.assertEqual(len(stream2), 3)
self.assertIn(stream[1], stream2)
self.assertIn(stream[3], stream2)
self.assertIn(stream[4], stream2)
stream2 = stream.select(station='[A-Y]??*', network='A?')
self.assertEqual(len(stream2), 1)
self.assertIn(stream[4], stream2)
# test case insensitivity
stream2 = stream.select(channel='BhZ', npts=100, sampling_rate='20.0',
network='aA', station='ZzZz',)
self.assertEqual(len(stream2), 1)
self.assertIn(stream[2], stream2)
stream2 = stream.select(channel='e?z', network='aa', station='x?X*',
location='00', component='z')
self.assertEqual(len(stream2), 1)
self.assertIn(stream[4], stream2)
def test_select_on_single_letter_channels(self):
st = read()
st[0].stats.channel = "Z"
st[1].stats.channel = "N"
st[2].stats.channel = "E"
self.assertEqual([tr.stats.channel for tr in st], ["Z", "N", "E"])
self.assertEqual(st.select(component="Z")[0], st[0])
self.assertEqual(st.select(component="N")[0], st[1])
self.assertEqual(st.select(component="E")[0], st[2])
self.assertEqual(len(st.select(component="Z")), 1)
self.assertEqual(len(st.select(component="N")), 1)
self.assertEqual(len(st.select(component="E")), 1)
def test_sort(self):
"""
Tests the sort method of the Stream object.
"""
# Create new Stream
stream = Stream()
# Create a list of header dictionaries. The sampling rate serves as a
# unique identifier for each Trace.
headers = [
{'starttime': UTCDateTime(1990, 1, 1), 'network': 'AAA',
'station': 'ZZZ', 'channel': 'XXX', 'sampling_rate': 100.0},
{'starttime': UTCDateTime(1990, 1, 1), 'network': 'AAA',
'station': 'YYY', 'channel': 'CCC', 'sampling_rate': 200.0},
{'starttime': UTCDateTime(2000, 1, 1), 'network': 'AAA',
'station': 'EEE', 'channel': 'GGG', 'sampling_rate': 300.0},
{'starttime': UTCDateTime(1989, 1, 1), 'network': 'AAA',
'station': 'XXX', 'channel': 'GGG', 'sampling_rate': 400.0},
{'starttime': UTCDateTime(2010, 1, 1), 'network': 'AAA',
'station': 'XXX', 'channel': 'FFF', 'sampling_rate': 500.0}]
# Create a Trace object of it and append it to the Stream object.
for _i in headers:
new_trace = Trace(header=_i)
stream.append(new_trace)
# Use normal sorting.
stream.sort()
self.assertEqual([i.stats.sampling_rate for i in stream.traces],
[300.0, 500.0, 400.0, 200.0, 100.0])
# Sort after sampling_rate.
stream.sort(keys=['sampling_rate'])
self.assertEqual([i.stats.sampling_rate for i in stream.traces],
[100.0, 200.0, 300.0, 400.0, 500.0])
# Sort after channel and sampling rate.
stream.sort(keys=['channel', 'sampling_rate'])
self.assertEqual([i.stats.sampling_rate for i in stream.traces],
[200.0, 500.0, 300.0, 400.0, 100.0])
# Sort after npts and sampling_rate and endtime.
stream.sort(keys=['npts', 'sampling_rate', 'endtime'])
self.assertEqual([i.stats.sampling_rate for i in stream.traces],
[100.0, 200.0, 300.0, 400.0, 500.0])
# The same with reverted sorting
# Use normal sorting.
stream.sort(reverse=True)
self.assertEqual([i.stats.sampling_rate for i in stream.traces],
[100.0, 200.0, 400.0, 500.0, 300.0])
# Sort after sampling_rate.
stream.sort(keys=['sampling_rate'], reverse=True)
self.assertEqual([i.stats.sampling_rate for i in stream.traces],
[500.0, 400.0, 300.0, 200.0, 100.0])
# Sort after channel and sampling rate.
stream.sort(keys=['channel', 'sampling_rate'], reverse=True)
self.assertEqual([i.stats.sampling_rate for i in stream.traces],
[100.0, 400.0, 300.0, 500.0, 200.0])
# Sort after npts and sampling_rate and endtime.
stream.sort(keys=['npts', 'sampling_rate', 'endtime'], reverse=True)
self.assertEqual([i.stats.sampling_rate for i in stream.traces],
[500.0, 400.0, 300.0, 200.0, 100.0])
# Sorting without a list or a wrong item string should fail.
self.assertRaises(TypeError, stream.sort, keys=1)
self.assertRaises(TypeError, stream.sort, keys='sampling_rate')
self.assertRaises(KeyError, stream.sort, keys=['npts', 'wrong_value'])
def test_sorting_twice(self):
"""
Sorting twice should not change order.
"""
stream = Stream()
headers = [
{'starttime': UTCDateTime(1990, 1, 1),
'endtime': UTCDateTime(1990, 1, 2), 'network': 'AAA',
'station': 'ZZZ', 'channel': 'XXX', 'npts': 10000,
'sampling_rate': 100.0},
{'starttime': UTCDateTime(1990, 1, 1),
'endtime': UTCDateTime(1990, 1, 3), 'network': 'AAA',
'station': 'YYY', 'channel': 'CCC', 'npts': 10000,
'sampling_rate': 200.0},
{'starttime': UTCDateTime(2000, 1, 1),
'endtime': UTCDateTime(2001, 1, 2), 'network': 'AAA',
'station': 'EEE', 'channel': 'GGG', 'npts': 1000,
'sampling_rate': 300.0},
{'starttime': UTCDateTime(1989, 1, 1),
'endtime': UTCDateTime(2010, 1, 2), 'network': 'AAA',
'station': 'XXX', 'channel': 'GGG', 'npts': 10000,
'sampling_rate': 400.0},
{'starttime': UTCDateTime(2010, 1, 1),
'endtime': UTCDateTime(2011, 1, 2), 'network': 'AAA',
'station': 'XXX', 'channel': 'FFF', 'npts': 1000,
'sampling_rate': 500.0}]
# Create a Trace object of it and append it to the Stream object.
for _i in headers:
new_trace = Trace(header=_i)
stream.append(new_trace)
stream.sort()
a = [i.stats.sampling_rate for i in stream.traces]
stream.sort()
b = [i.stats.sampling_rate for i in stream.traces]
# should be equal
self.assertEqual(a, b)
def test_merge_with_different_calibration_factors(self):
"""
Test the merge method of the Stream object.
"""
# 1 - different calibration factors for the same channel should fail
tr1 = Trace(data=np.zeros(5))
tr1.stats.calib = 1.0
tr2 = Trace(data=np.zeros(5))
tr2.stats.calib = 2.0
st = Stream([tr1, tr2])
# this also emits an UserWarning
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
self.assertRaises(Exception, st.merge)
# 2 - different calibration factors for the different channels is ok
tr1 = Trace(data=np.zeros(5))
tr1.stats.calib = 2.00
tr1.stats.channel = 'EHE'
tr2 = Trace(data=np.zeros(5))
tr2.stats.calib = 5.0
tr2.stats.channel = 'EHZ'
tr3 = Trace(data=np.zeros(5))
tr3.stats.calib = 2.00
tr3.stats.channel = 'EHE'
tr4 = Trace(data=np.zeros(5))
tr4.stats.calib = 5.0
tr4.stats.channel = 'EHZ'
st = Stream([tr1, tr2, tr3, tr4])
st.merge()
def test_merge_with_different_sampling_rates(self):
"""
Test the merge method of the Stream object.
"""
# 1 - different sampling rates for the same channel should fail
tr1 = Trace(data=np.zeros(5))
tr1.stats.sampling_rate = 200
tr2 = Trace(data=np.zeros(5))
tr2.stats.sampling_rate = 50
st = Stream([tr1, tr2])
# this also emits an UserWarning
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
self.assertRaises(Exception, st.merge)
# 2 - different sampling rates for the different channels is ok
tr1 = Trace(data=np.zeros(5))
tr1.stats.sampling_rate = 200
tr1.stats.channel = 'EHE'
tr2 = Trace(data=np.zeros(5))
tr2.stats.sampling_rate = 50
tr2.stats.channel = 'EHZ'
tr3 = Trace(data=np.zeros(5))
tr3.stats.sampling_rate = 200
tr3.stats.channel = 'EHE'
tr4 = Trace(data=np.zeros(5))
tr4.stats.sampling_rate = 50
tr4.stats.channel = 'EHZ'
st = Stream([tr1, tr2, tr3, tr4])
st.merge()
def test_merge_with_different_data_types(self):
"""
Test the merge method of the Stream object.
"""
# 1 - different dtype for the same channel should fail
tr1 = Trace(data=np.zeros(5, dtype=np.int32))
tr2 = Trace(data=np.zeros(5, dtype=np.float32))
st = Stream([tr1, tr2])
# this also emits an UserWarning
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
self.assertRaises(Exception, st.merge)
# 2 - different sampling rates for the different channels is ok
tr1 = Trace(data=np.zeros(5, dtype=np.int32))
tr1.stats.channel = 'EHE'
tr2 = Trace(data=np.zeros(5, dtype=np.float32))
tr2.stats.channel = 'EHZ'
tr3 = Trace(data=np.zeros(5, dtype=np.int32))
tr3.stats.channel = 'EHE'
tr4 = Trace(data=np.zeros(5, dtype=np.float32))
tr4.stats.channel = 'EHZ'
st = Stream([tr1, tr2, tr3, tr4])
st.merge()
def test_merge_gaps(self):
"""
Test the merge method of the Stream object.
"""
stream = self.mseed_stream
start = UTCDateTime("2007-12-31T23:59:59.915000")
end = UTCDateTime("2008-01-01T00:04:31.790000")
self.assertEqual(len(stream), 4)
self.assertEqual(len(stream[0]), 412)
self.assertEqual(len(stream[1]), 824)
self.assertEqual(len(stream[2]), 824)
self.assertEqual(len(stream[3]), 50668)
self.assertEqual(stream[0].stats.starttime, start)
self.assertEqual(stream[3].stats.endtime, end)
for i in range(4):
self.assertEqual(stream[i].stats.sampling_rate, 200)
self.assertEqual(stream[i].get_id(), 'BW.BGLD..EHE')
stream.verify()
# merge it
stream.merge()
stream.verify()
self.assertEqual(len(stream), 1)
self.assertEqual(len(stream[0]), stream[0].data.size)
self.assertEqual(stream[0].stats.starttime, start)
self.assertEqual(stream[0].stats.endtime, end)
self.assertEqual(stream[0].stats.sampling_rate, 200)
self.assertEqual(stream[0].get_id(), 'BW.BGLD..EHE')
def test_merge_gaps_2(self):
"""
Test the merge method of the Stream object on two traces with a gap in
between.
"""
tr1 = Trace(data=np.ones(4, dtype=np.int32) * 1)
tr2 = Trace(data=np.ones(3, dtype=np.int32) * 5)
tr2.stats.starttime = tr1.stats.starttime + 9
stream = Stream([tr1, tr2])
# 1 - masked array
# Trace 1: 1111
# Trace 2: 555
# 1 + 2 : 1111-----555
st = stream.copy()
st.merge()
self.assertEqual(len(st), 1)
self.assertTrue(isinstance(st[0].data, np.ma.masked_array))
self.assertEqual(st[0].data.tolist(),
[1, 1, 1, 1, None, None, None, None, None, 5, 5, 5])
# 2 - fill in zeros
# Trace 1: 1111
# Trace 2: 555
# 1 + 2 : 111100000555
st = stream.copy()
st.merge(fill_value=0)
self.assertEqual(len(st), 1)
self.assertTrue(isinstance(st[0].data, np.ndarray))
self.assertEqual(st[0].data.tolist(),
[1, 1, 1, 1, 0, 0, 0, 0, 0, 5, 5, 5])
# 2b - fill in some other user-defined value
# Trace 1: 1111
# Trace 2: 555
# 1 + 2 : 111199999555
st = stream.copy()
st.merge(fill_value=9)
self.assertEqual(len(st), 1)
self.assertTrue(isinstance(st[0].data, np.ndarray))
self.assertEqual(st[0].data.tolist(),
[1, 1, 1, 1, 9, 9, 9, 9, 9, 5, 5, 5])
# 3 - use last value of first trace
# Trace 1: 1111
# Trace 2: 555
# 1 + 2 : 111111111555
st = stream.copy()
st.merge(fill_value='latest')
self.assertEqual(len(st), 1)
self.assertTrue(isinstance(st[0].data, np.ndarray))
self.assertEqual(st[0].data.tolist(),
[1, 1, 1, 1, 1, 1, 1, 1, 1, 5, 5, 5])
# 4 - interpolate
# Trace 1: 1111
# Trace 2: 555
# 1 + 2 : 111112334555
st = stream.copy()
st.merge(fill_value='interpolate')
self.assertEqual(len(st), 1)
self.assertTrue(isinstance(st[0].data, np.ndarray))
self.assertEqual(st[0].data.tolist(),
[1, 1, 1, 1, 1, 2, 3, 3, 4, 5, 5, 5])
def test_split(self):
"""
Testing splitting of streams containing masked arrays.
"""
# 1 - create a Stream with gaps
tr1 = Trace(data=np.ones(4, dtype=np.int32) * 1)
tr2 = Trace(data=np.ones(3, dtype=np.int32) * 5)
tr2.stats.starttime = tr1.stats.starttime + 9
st = Stream([tr1, tr2])
st.merge()
self.assertTrue(isinstance(st[0].data, np.ma.masked_array))
# now we split again
st2 = st.split()
self.assertEqual(len(st2), 2)
self.assertTrue(isinstance(st2[0].data, np.ndarray))
self.assertTrue(isinstance(st2[1].data, np.ndarray))
self.assertEqual(st2[0].data.tolist(), [1, 1, 1, 1])
self.assertEqual(st2[1].data.tolist(), [5, 5, 5])
# 2 - use default example
st = self.mseed_stream
st.merge()
self.assertTrue(isinstance(st[0].data, np.ma.masked_array))
# now we split again
st2 = st.split()
self.assertEqual(len(st2), 4)
self.assertEqual(len(st2[0]), 412)
self.assertEqual(len(st2[1]), 824)
self.assertEqual(len(st2[2]), 824)
self.assertEqual(len(st2[3]), 50668)
self.assertEqual(st2[0].stats.starttime,
UTCDateTime("2007-12-31T23:59:59.915000"))
self.assertEqual(st2[3].stats.endtime,
UTCDateTime("2008-01-01T00:04:31.790000"))
for i in range(4):
self.assertEqual(st2[i].stats.sampling_rate, 200)
self.assertEqual(st2[i].get_id(), 'BW.BGLD..EHE')
def test_merge_overlaps_default_method(self):
"""
Test the merge method of the Stream object.
"""
# 1 - overlapping trace with differing data
# Trace 1: 0000000
# Trace 2: 1111111
# 1 + 2 : 00000--11111
tr1 = Trace(data=np.zeros(7))
tr2 = Trace(data=np.ones(7))
tr2.stats.starttime = tr1.stats.starttime + 5
st = Stream([tr1, tr2])
st.merge()
self.assertEqual(len(st), 1)
self.assertTrue(isinstance(st[0].data, np.ma.masked_array))
self.assertEqual(st[0].data.tolist(),
[0, 0, 0, 0, 0, None, None, 1, 1, 1, 1, 1])
# 2 - overlapping trace with same data
# Trace 1: 0123456
# Trace 2: 56789
# 1 + 2 : 0123456789
tr1 = Trace(data=np.arange(7))
tr2 = Trace(data=np.arange(5, 10))
tr2.stats.starttime = tr1.stats.starttime + 5
st = Stream([tr1, tr2])
st.merge()
self.assertEqual(len(st), 1)
self.assertTrue(isinstance(st[0].data, np.ndarray))
np.testing.assert_array_equal(st[0].data, np.arange(10))
#
# 3 - contained overlap with same data
# Trace 1: 0123456789
# Trace 2: 56
# 1 + 2 : 0123456789
tr1 = Trace(data=np.arange(10))
tr2 = Trace(data=np.arange(5, 7))
tr2.stats.starttime = tr1.stats.starttime + 5
st = Stream([tr1, tr2])
st.merge()
self.assertEqual(len(st), 1)
self.assertTrue(isinstance(st[0].data, np.ndarray))
np.testing.assert_array_equal(st[0].data, np.arange(10))
#
# 4 - contained overlap with differing data
# Trace 1: 0000000000
# Trace 2: 11
# 1 + 2 : 00000--000
tr1 = Trace(data=np.zeros(10))
tr2 = Trace(data=np.ones(2))
tr2.stats.starttime = tr1.stats.starttime + 5
st = Stream([tr1, tr2])
st.merge()
self.assertEqual(len(st), 1)
self.assertTrue(isinstance(st[0].data, np.ma.masked_array))
self.assertEqual(st[0].data.tolist(),
[0, 0, 0, 0, 0, None, None, 0, 0, 0])
def test_tab_completion_trace(self):
"""
Test tab completion of Trace object.
"""
tr = Trace()
self.assertIn('sampling_rate', dir(tr.stats))
self.assertIn('npts', dir(tr.stats))
self.assertIn('station', dir(tr.stats))
self.assertIn('starttime', dir(tr.stats))
self.assertIn('endtime', dir(tr.stats))
self.assertIn('calib', dir(tr.stats))
self.assertIn('delta', dir(tr.stats))
def test_bugfix_merge_drop_trace_if_already_contained(self):
"""
Trace data already existing in another trace and ending on the same
end time was not correctly merged until now.
"""
trace1 = Trace(data=np.empty(10))
trace2 = Trace(data=np.empty(2))
trace2.stats.starttime = trace1.stats.endtime - trace1.stats.delta
st = Stream([trace1, trace2])
st.merge()
def test_bugfix_merge_multiple_traces(self):
"""
Bugfix for merging multiple traces in a row.
"""
# create a stream with multiple traces overlapping
trace1 = Trace(data=np.empty(10))
traces = [trace1]
for _ in range(10):
trace = Trace(data=np.empty(10))
trace.stats.starttime = \
traces[-1].stats.endtime - trace1.stats.delta
traces.append(trace)
st = Stream(traces)
st.merge()
def test_bugfix_merge_multiple_traces_2(self):
"""
Bugfix for merging multiple traces in a row.
"""
trace1 = Trace(data=np.empty(4190864))
trace1.stats.sampling_rate = 200
trace1.stats.starttime = UTCDateTime("2010-01-21T00:00:00.015000Z")
trace2 = Trace(data=np.empty(603992))
trace2.stats.sampling_rate = 200
trace2.stats.starttime = UTCDateTime("2010-01-21T05:49:14.330000Z")
trace3 = Trace(data=np.empty(222892))
trace3.stats.sampling_rate = 200
trace3.stats.starttime = UTCDateTime("2010-01-21T06:39:33.280000Z")
st = Stream([trace1, trace2, trace3])
st.merge()
def test_merge_with_small_sampling_rate(self):
"""
Bugfix for merging multiple traces with very small sampling rate.
"""
# create traces
np.random.seed(815)
trace1 = Trace(data=np.random.randn(1441))
trace1.stats.delta = 60.0
trace1.stats.starttime = UTCDateTime("2009-02-01T00:00:02.995000Z")
trace2 = Trace(data=np.random.randn(1441))
trace2.stats.delta = 60.0
trace2.stats.starttime = UTCDateTime("2009-02-02T00:00:12.095000Z")
trace3 = Trace(data=np.random.randn(1440))
trace3.stats.delta = 60.0
trace3.stats.starttime = UTCDateTime("2009-02-03T00:00:16.395000Z")
trace4 = Trace(data=np.random.randn(1440))
trace4.stats.delta = 60.0
trace4.stats.starttime = UTCDateTime("2009-02-04T00:00:11.095000Z")
# create stream
st = Stream([trace1, trace2, trace3, trace4])
# merge
st.merge()
# compare results
self.assertEqual(len(st), 1)
self.assertEqual(st[0].stats.delta, 60.0)
self.assertEqual(st[0].stats.starttime, trace1.stats.starttime)
# end time of last trace
endtime = trace1.stats.starttime + \
(4 * 1440 - 1) * trace1.stats.delta
self.assertEqual(st[0].stats.endtime, endtime)
def test_merge_overlaps_method_1(self):
"""
Test merging with method = 1.
"""
# Test merging three traces.
trace1 = Trace(data=np.ones(10))
trace2 = Trace(data=10 * np.ones(11))
trace3 = Trace(data=2 * np.ones(20))
st = Stream([trace1, trace2, trace3])
st.merge(method=1)
np.testing.assert_array_equal(st[0].data, 2 * np.ones(20))
# Any contained traces with different data will be discarded::
#
# Trace 1: 111111111111 (contained trace)
# Trace 2: 55
# 1 + 2 : 111111111111
trace1 = Trace(data=np.ones(12))
trace2 = Trace(data=5 * np.ones(2))
trace2.stats.starttime += 4
st = Stream([trace1, trace2])
st.merge(method=1)
np.testing.assert_array_equal(st[0].data, np.ones(12))
# No interpolation (``interpolation_samples=0``)::
#
# Trace 1: 11111111
# Trace 2: 55555555
# 1 + 2 : 111155555555
trace1 = Trace(data=np.ones(8))
trace2 = Trace(data=5 * np.ones(8))
trace2.stats.starttime += 4
st = Stream([trace1, trace2])
st.merge(method=1)
np.testing.assert_array_equal(st[0].data, np.array([1] * 4 + [5] * 8))
# Interpolate first two samples (``interpolation_samples=2``)::
#
# Trace 1: 00000000
# Trace 2: 66666666
# 1 + 2 : 000024666666 (interpolation_samples=2)
trace1 = Trace(data=np.zeros(8, dtype=np.int32))
trace2 = Trace(data=6 * np.ones(8, dtype=np.int32))
trace2.stats.starttime += 4
st = Stream([trace1, trace2])
st.merge(method=1, interpolation_samples=2)
np.testing.assert_array_equal(st[0].data,
np.array([0] * 4 + [2] + [4] + [6] * 6))
# Interpolate all samples (``interpolation_samples=-1``)::
#
# Trace 1: 00000000
# Trace 2: 55555555
# 1 + 2 : 000012345555
trace1 = Trace(data=np.zeros(8, dtype=np.int32))
trace2 = Trace(data=5 * np.ones(8, dtype=np.int32))
trace2.stats.starttime += 4
st = Stream([trace1, trace2])
st.merge(method=1, interpolation_samples=(-1))
np.testing.assert_array_equal(
st[0].data, np.array([0] * 4 + [1] + [2] + [3] + [4] + [5] * 4))
# Interpolate all samples (``interpolation_samples=5``)::
# Given number of samples is bigger than the actual overlap - should
# interpolate all samples
#
# Trace 1: 00000000
# Trace 2: 55555555
# 1 + 2 : 000012345555
trace1 = Trace(data=np.zeros(8, dtype=np.int32))
trace2 = Trace(data=5 * np.ones(8, dtype=np.int32))
trace2.stats.starttime += 4
st = Stream([trace1, trace2])
st.merge(method=1, interpolation_samples=5)
np.testing.assert_array_equal(
st[0].data, np.array([0] * 4 + [1] + [2] + [3] + [4] + [5] * 4))
def test_trim_removing_empty_traces(self):
"""
A stream containing several empty traces after trimming should throw
away the empty traces.
"""
# create Stream.
trace1 = Trace(data=np.zeros(10))
trace1.stats.delta = 1.0
trace2 = Trace(data=np.ones(10))
trace2.stats.delta = 1.0
trace2.stats.starttime = UTCDateTime(1000)
trace3 = Trace(data=np.arange(10))
trace3.stats.delta = 1.0
trace3.stats.starttime = UTCDateTime(2000)
stream = Stream([trace1, trace2, trace3])
stream.trim(UTCDateTime(900), UTCDateTime(1100))
# Check if only trace2 is still in the Stream object.
self.assertEqual(len(stream), 1)
np.testing.assert_array_equal(np.ones(10), stream[0].data)
self.assertEqual(stream[0].stats.starttime, UTCDateTime(1000))
self.assertEqual(stream[0].stats.npts, 10)
def test_trim_with_small_sampling_rate(self):
"""
Bugfix for cutting multiple traces with very small sampling rate.
"""
# create traces
trace1 = Trace(data=np.empty(1441))
trace1.stats.delta = 60.0
trace1.stats.starttime = UTCDateTime("2009-02-01T00:00:02.995000Z")
trace2 = Trace(data=np.empty(1441))
trace2.stats.delta = 60.0
trace2.stats.starttime = UTCDateTime("2009-02-02T00:00:12.095000Z")
trace3 = Trace(data=np.empty(1440))
trace3.stats.delta = 60.0
trace3.stats.starttime = UTCDateTime("2009-02-03T00:00:16.395000Z")
trace4 = Trace(data=np.empty(1440))
trace4.stats.delta = 60.0
trace4.stats.starttime = UTCDateTime("2009-02-04T00:00:11.095000Z")
# create stream
st = Stream([trace1, trace2, trace3, trace4])
# trim
st.trim(trace1.stats.starttime, trace4.stats.endtime)
# compare results
self.assertEqual(len(st), 4)
self.assertEqual(st[0].stats.delta, 60.0)
self.assertEqual(st[0].stats.starttime, trace1.stats.starttime)
self.assertEqual(st[3].stats.endtime, trace4.stats.endtime)
def test_writing_masked_array(self):
"""
Writing a masked array should raise an exception.
"""
# np.ma.masked_array with masked values
tr = Trace(data=np.ma.masked_all(10))
st = Stream([tr])
self.assertRaises(NotImplementedError, st.write, 'filename', 'MSEED')
# np.ma.masked_array without masked values
tr = Trace(data=np.ma.ones(10))
st = Stream([tr])
self.assertRaises(NotImplementedError, st.write, 'filename', 'MSEED')
def test_pickle(self):
"""
Testing pickling of Stream objects..
"""
tr = Trace(data=np.random.randn(1441))
st = Stream([tr])
st.verify()
# protocol 0 (ASCII)
temp = pickle.dumps(st, protocol=0)
st2 = pickle.loads(temp)
np.testing.assert_array_equal(st[0].data, st2[0].data)
self.assertEqual(st[0].stats, st2[0].stats)
# protocol 1 (old binary)
temp = pickle.dumps(st, protocol=1)
st2 = pickle.loads(temp)
np.testing.assert_array_equal(st[0].data, st2[0].data)
self.assertEqual(st[0].stats, st2[0].stats)
# protocol 2 (new binary)
temp = pickle.dumps(st, protocol=2)
st2 = pickle.loads(temp)
np.testing.assert_array_equal(st[0].data, st2[0].data)
self.assertEqual(st[0].stats, st2[0].stats)
def test_cpickle(self):
"""
Testing pickling of Stream objects..
"""
tr = Trace(data=np.random.randn(1441))
st = Stream([tr])
st.verify()
# protocol 0 (ASCII)
temp = pickle.dumps(st, protocol=0)
st2 = pickle.loads(temp)
np.testing.assert_array_equal(st[0].data, st2[0].data)
self.assertEqual(st[0].stats, st2[0].stats)
# protocol 1 (old binary)
temp = pickle.dumps(st, protocol=1)
st2 = pickle.loads(temp)
np.testing.assert_array_equal(st[0].data, st2[0].data)
self.assertEqual(st[0].stats, st2[0].stats)
# protocol 2 (new binary)
temp = pickle.dumps(st, protocol=2)
st2 = pickle.loads(temp)
np.testing.assert_array_equal(st[0].data, st2[0].data)
self.assertEqual(st[0].stats, st2[0].stats)
def test_is_pickle(self):
"""
Testing _is_pickle function.
"""
# existing file
st = read()
with NamedTemporaryFile() as tf:
st.write(tf.name, format='PICKLE')
# check using file name
self.assertTrue(_is_pickle(tf.name))
# check using file handler
self.assertTrue(_is_pickle(tf))
# not existing files
self.assertFalse(_is_pickle('/path/to/pickle.file'))
self.assertFalse(_is_pickle(12345))
def test_read_write_pickle(self):
"""
Testing _read_pickle and _write_pickle functions.
"""
st = read()
# write
with NamedTemporaryFile() as tf:
# write using file name
_write_pickle(st, tf.name)
self.assertTrue(_is_pickle(tf.name))
# write using file handler
_write_pickle(st, tf)
tf.seek(0)
self.assertTrue(_is_pickle(tf))
# write using stream write method
st.write(tf.name, format='PICKLE')
# check and read directly
st2 = _read_pickle(tf.name)
self.assertEqual(len(st2), 3)
np.testing.assert_array_equal(st2[0].data, st[0].data)
# use read() with given format
st2 = read(tf.name, format='PICKLE')
self.assertEqual(len(st2), 3)
np.testing.assert_array_equal(st2[0].data, st[0].data)
# use read() and automatically detect format
st2 = read(tf.name)
self.assertEqual(len(st2), 3)
np.testing.assert_array_equal(st2[0].data, st[0].data)
def test_read_pickle(self):
"""
Testing _read_pickle function.
Example pickles were written using obspy 1.0.3 on Py2 and Py3
respectively.
"""
for filename in ('example_py2.pickle', 'example_py3.pickle'):
pickle_file = os.path.join(self.data_path, filename)
st = read(pickle_file, format='PICKLE')
self.assertEqual(len(st), 3)
self.assertEqual(len(st[0]), 114)
self.assertEqual(len(st[1]), 110)
self.assertEqual(len(st[2]), 105)
for tr, comp in zip(st, 'ZNE'):
self.assertEqual(tr.stats.network, 'BW')
self.assertEqual(tr.stats.station, 'RJOB')
self.assertEqual(tr.stats.location, '')
self.assertEqual(tr.stats.channel, 'EH' + comp)
self.assertEqual(tr.stats.sampling_rate, 100.0)
self.assertEqual(st[0].stats.starttime,
UTCDateTime('2009-08-24T00:20:03.000000Z'))
self.assertEqual(st[1].stats.starttime,
UTCDateTime('2009-08-24T00:20:03.040000Z'))
self.assertEqual(st[2].stats.starttime,
UTCDateTime('2009-08-24T00:20:03.090000Z'))
self.assertEqual(st[0].stats.endtime,
UTCDateTime('2009-08-24T00:20:04.130000Z'))
self.assertEqual(st[1].stats.endtime,
UTCDateTime('2009-08-24T00:20:04.130000Z'))
self.assertEqual(st[2].stats.endtime,
UTCDateTime('2009-08-24T00:20:04.130000Z'))
np.testing.assert_array_equal(
st[0].data[:10], [0, 6, 75, 262, 549, 943, 1442, 1785, 2147,
3029])
np.testing.assert_array_equal(
st[1].data[:10], [624, 1125, 1647, 2607, 3320, 4389, 5764,
7078, 8063, 9458])
np.testing.assert_array_equal(
st[2].data[:10], [-9573, -11576, -14450, -16754, -20348,
-23220, -28837, -30811, -36024, -40052])
np.testing.assert_array_equal(
st[0].data[-10:], [-283742, -305558, -302737, -317144, -310056,
-308462, -302752, -304243, -296202,
-313968])
np.testing.assert_array_equal(
st[1].data[-10:], [90493, 105062, 100721, 98631, 100355,
106287, 115356, 117989, 97907, 113225])
np.testing.assert_array_equal(
st[2].data[-10:], [-144765, -149205, -123622, -139548, -137160,
-154283, -103584, -138578, -128339,
-125707])
def test_get_gaps_2(self):
"""
Test case for issue #73.
"""
tr1 = Trace(data=np.empty(720000))
tr1.stats.starttime = UTCDateTime("2010-02-09T00:19:19.850000Z")
tr1.stats.sampling_rate = 200.0
tr1.verify()
tr2 = Trace(data=np.empty(720000))
tr2.stats.starttime = UTCDateTime("2010-02-09T01:19:19.850000Z")
tr2.stats.sampling_rate = 200.0
tr2.verify()
tr3 = Trace(data=np.empty(720000))
tr3.stats.starttime = UTCDateTime("2010-02-09T02:19:19.850000Z")
tr3.stats.sampling_rate = 200.0
tr3.verify()
st = Stream([tr1, tr2, tr3])
st.verify()
# same sampling rate should have no gaps
gaps = st.get_gaps()
self.assertEqual(len(gaps), 0)
# different sampling rate should result in a gap
tr3.stats.sampling_rate = 50.0
gaps = st.get_gaps()
self.assertEqual(len(gaps), 1)
# but different ids will be skipped (if only one trace)
tr3.stats.station = 'MANZ'
gaps = st.get_gaps()
self.assertEqual(len(gaps), 0)
# multiple traces with same id will be handled again
tr2.stats.station = 'MANZ'
gaps = st.get_gaps()
self.assertEqual(len(gaps), 1)
def test_get_gaps_whole_overlap(self):
"""
Test get_gaps method with a trace completely overlapping another trace.
"""
tr1 = Trace(data=np.empty(3600))
tr1.stats.starttime = UTCDateTime("2018-09-25T00:00:00.000000Z")
tr1.stats.sampling_rate = 1.
tr2 = Trace(data=np.empty(60))
tr2.stats.starttime = UTCDateTime("2018-09-25T00:01:00.000000Z")
tr2.stats.sampling_rate = 1.
st = Stream([tr1, tr2])
gaps = st.get_gaps()
self.assertEqual(len(gaps), 1)
gap = gaps[0]
starttime = gap[4]
self.assertEqual(starttime, UTCDateTime("2018-09-25T00:01:59.000000Z"))
endtime = gap[5]
self.assertEqual(endtime, tr2.stats.starttime)
def test_comparisons(self):
"""
Tests all rich comparison operators (==, !=, <, <=, >, >=)
The latter four are not implemented due to ambiguous meaning and bounce
an error.
"""
# create test streams
tr0 = Trace(np.arange(3))
tr1 = Trace(np.arange(3))
tr2 = Trace(np.arange(3), {'station': 'X'})
tr3 = Trace(np.arange(3),
{'processing': ["filter:lowpass:{'freq': 10}"]})
tr4 = Trace(np.arange(5))
tr5 = Trace(np.arange(5), {'station': 'X'})
tr6 = Trace(np.arange(5),
{'processing': ["filter:lowpass:{'freq': 10}"]})
tr7 = Trace(np.arange(5),
{'processing': ["filter:lowpass:{'freq': 10}"]})
st0 = Stream([tr0])
st1 = Stream([tr1])
st2 = Stream([tr0, tr1])
st3 = Stream([tr2, tr3])
st4 = Stream([tr1, tr2, tr3])
st5 = Stream([tr4, tr5, tr6])
st6 = Stream([tr0, tr6])
st7 = Stream([tr1, tr7])
st8 = Stream([tr7, tr1])
st9 = Stream()
st_a = Stream()
# tests that should raise a NotImplementedError (i.e. <=, <, >=, >)
self.assertRaises(NotImplementedError, st1.__lt__, st1)
self.assertRaises(NotImplementedError, st1.__le__, st1)
self.assertRaises(NotImplementedError, st1.__gt__, st1)
self.assertRaises(NotImplementedError, st1.__ge__, st1)
self.assertRaises(NotImplementedError, st1.__lt__, st2)
self.assertRaises(NotImplementedError, st1.__le__, st2)
self.assertRaises(NotImplementedError, st1.__gt__, st2)
self.assertRaises(NotImplementedError, st1.__ge__, st2)
# normal tests
for st in [st1]:
self.assertEqual(st0 == st, True)
self.assertEqual(st0 != st, False)
for st in [st2, st3, st4, st5, st6, st7, st8, st9, st_a]:
self.assertEqual(st0 == st, False)
self.assertEqual(st0 != st, True)
for st in [st0]:
self.assertEqual(st1 == st, True)
self.assertEqual(st1 != st, False)
for st in [st2, st3, st4, st5, st6, st7, st8, st9, st_a]:
self.assertEqual(st1 == st, False)
self.assertEqual(st1 != st, True)
for st in [st0, st1, st3, st4, st5, st6, st7, st8, st9, st_a]:
self.assertEqual(st2 == st, False)
self.assertEqual(st2 != st, True)
for st in [st0, st1, st2, st4, st5, st6, st7, st8, st9, st_a]:
self.assertEqual(st3 == st, False)
self.assertEqual(st3 != st, True)
for st in [st0, st1, st2, st3, st5, st6, st7, st8, st9, st_a]:
self.assertEqual(st4 == st, False)
self.assertEqual(st4 != st, True)
for st in [st0, st1, st2, st3, st4, st6, st7, st8, st9, st_a]:
self.assertEqual(st5 == st, False)
self.assertEqual(st5 != st, True)
for st in [st7, st8]:
self.assertEqual(st6 == st, True)
self.assertEqual(st6 != st, False)
for st in [st0, st1, st2, st3, st4, st5, st9, st_a]:
self.assertEqual(st6 == st, False)
self.assertEqual(st6 != st, True)
for st in [st6, st8]:
self.assertEqual(st7 == st, True)
self.assertEqual(st7 != st, False)
for st in [st0, st1, st2, st3, st4, st5, st9, st_a]:
self.assertEqual(st7 == st, False)
self.assertEqual(st7 != st, True)
for st in [st6, st7]:
self.assertEqual(st8 == st, True)
self.assertEqual(st8 != st, False)
for st in [st0, st1, st2, st3, st4, st5, st9, st_a]:
self.assertEqual(st8 == st, False)
self.assertEqual(st8 != st, True)
for st in [st_a]:
self.assertEqual(st9 == st, True)
self.assertEqual(st9 != st, False)
for st in [st0, st1, st2, st3, st4, st5, st6, st7, st8]:
self.assertEqual(st9 == st, False)
self.assertEqual(st9 != st, True)
for st in [st9]:
self.assertEqual(st_a == st, True)
self.assertEqual(st_a != st, False)
for st in [st0, st1, st2, st3, st4, st5, st6, st7, st8]:
self.assertEqual(st_a == st, False)
self.assertEqual(st_a != st, True)
# some weird tests against non-Stream objects
for object in [0, 1, 0.0, 1.0, "", "test", True, False, [], [tr0],
set(), set(tr0), {}, {"test": "test"}, Trace(), None]:
self.assertEqual(st0 == object, False)
self.assertEqual(st0 != object, True)
def test_trim_nearest_sample(self):
"""
Tests to trim at nearest sample
"""
head = {'sampling_rate': 1.0, 'starttime': UTCDateTime(0.0)}
tr1 = Trace(data=np.random.randint(0, 1000, 120), header=head)
tr2 = Trace(data=np.random.randint(0, 1000, 120), header=head)
tr2.stats.starttime += 0.4
st = Stream(traces=[tr1, tr2])
# STARTTIME
# check that trimming first selects the next best sample, and only
# then selects the following ones
# | S | | |
# | | | |
st.trim(UTCDateTime(0.6), endtime=None)
self.assertEqual(st[0].stats.starttime.timestamp, 1.0)
self.assertEqual(st[1].stats.starttime.timestamp, 1.4)
# ENDTIME
# check that trimming first selects the next best sample, and only
# then selects the following ones
# | | | E |
# | | | |
st.trim(starttime=None, endtime=UTCDateTime(2.6))
self.assertEqual(st[0].stats.endtime.timestamp, 3.0)
self.assertEqual(st[1].stats.endtime.timestamp, 3.4)
def test_trim_consistent_start_end_time_nearest_sample(self):
"""
Test case for #127. It ensures that the sample sizes stay
consistent after trimming. That is that _ltrim and _rtrim
round in the same direction.
"""
data = np.zeros(10)
t = UTCDateTime(0)
traces = []
for delta in (0, 0.25, 0.5, 0.75, 1):
traces.append(Trace(data.copy()))
traces[-1].stats.starttime = t + delta
st = Stream(traces)
st.trim(t + 3.5, t + 6.5)
start = [4.0, 4.25, 4.5, 3.75, 4.0]
end = [6.0, 6.25, 6.50, 5.75, 6.0]
for i in range(len(st)):
self.assertEqual(3, st[i].stats.npts)
self.assertEqual(st[i].stats.starttime.timestamp, start[i])
self.assertEqual(st[i].stats.endtime.timestamp, end[i])
def test_trim_consistent_start_end_time_nearest_sample_padded(self):
"""
Test case for #127. It ensures that the sample sizes stay
consistent after trimming. That is that _ltrim and _rtrim
round in the same direction. Padded version.
"""
data = np.zeros(10)
t = UTCDateTime(0)
traces = []
for delta in (0, 0.25, 0.5, 0.75, 1):
traces.append(Trace(data.copy()))
traces[-1].stats.starttime = t + delta
st = Stream(traces)
st.trim(t - 3.5, t + 16.5, pad=True)
start = [-4.0, -3.75, -3.5, -4.25, -4.0]
end = [17.0, 17.25, 17.50, 16.75, 17.0]
for i in range(len(st)):
self.assertEqual(22, st[i].stats.npts)
self.assertEqual(st[i].stats.starttime.timestamp, start[i])
self.assertEqual(st[i].stats.endtime.timestamp, end[i])
def test_trim_consistent_start_end_time(self):
"""
Test case for #127. It ensures that the sample start and end times
stay consistent after trimming.
"""
data = np.zeros(10)
t = UTCDateTime(0)
traces = []
for delta in (0, 0.25, 0.5, 0.75, 1):
traces.append(Trace(data.copy()))
traces[-1].stats.starttime = t + delta
st = Stream(traces)
st.trim(t + 3.5, t + 6.5, nearest_sample=False)
start = [4.00, 4.25, 3.50, 3.75, 4.00]
end = [6.00, 6.25, 6.50, 5.75, 6.00]
npts = [3, 3, 4, 3, 3]
for i in range(len(st)):
self.assertEqual(st[i].stats.npts, npts[i])
self.assertEqual(st[i].stats.starttime.timestamp, start[i])
self.assertEqual(st[i].stats.endtime.timestamp, end[i])
def test_trim_consistent_start_and_time_pad(self):
"""
Test case for #127. It ensures that the sample start and end times
stay consistent after trimming. Padded version.
"""
data = np.zeros(10)
t = UTCDateTime(0)
traces = []
for delta in (0, 0.25, 0.5, 0.75, 1):
traces.append(Trace(data.copy()))
traces[-1].stats.starttime = t + delta
st = Stream(traces)
st.trim(t - 3.5, t + 16.5, nearest_sample=False, pad=True)
start = [-3.00, -2.75, -3.50, -3.25, -3.00]
end = [16.00, 16.25, 16.50, 15.75, 16.00]
npts = [20, 20, 21, 20, 20]
for i in range(len(st)):
self.assertEqual(st[i].stats.npts, npts[i])
self.assertEqual(st[i].stats.starttime.timestamp, start[i])
self.assertEqual(st[i].stats.endtime.timestamp, end[i])
def test_str(self):
"""
Test case for issue #162 - print streams in a more consistent way.
"""
tr1 = Trace()
tr1.stats.station = "1"
tr2 = Trace()
tr2.stats.station = "12345"
st = Stream([tr1, tr2])
result = st.__str__()
expected = "2 Trace(s) in Stream:\n" + \
".1.. | 1970-01-01T00:00:00.000000Z - 1970-01-01" + \
"T00:00:00.000000Z | 1.0 Hz, 0 samples\n" + \
".12345.. | 1970-01-01T00:00:00.000000Z - 1970-01-01" + \
"T00:00:00.000000Z | 1.0 Hz, 0 samples"
self.assertEqual(result, expected)
# streams containing more than 20 lines will be compressed
st2 = Stream([tr1]) * 40
result = st2.__str__()
self.assertIn('40 Trace(s) in Stream:', result)
self.assertIn('other traces', result)
def test_cleanup(self):
"""
Test case for merging traces in the stream with method=-1. This only
should merge traces that are exactly the same or contained and exactly
the same or directly adjacent.
"""
tr1 = self.mseed_stream[0]
start = tr1.stats.starttime
end = tr1.stats.endtime
dt = end - start
delta = tr1.stats.delta
# test traces that should be merged:
# contained traces with compatible data
tr2 = tr1.slice(start, start + dt / 3)
tr3 = tr1.copy()
tr4 = tr1.slice(start + dt / 4, end - dt / 4)
# adjacent traces
tr5 = tr1.copy()
tr5.stats.starttime = end + delta
tr6 = tr1.copy()
tr6.stats.starttime = start - dt - delta
# create overlapping traces with compatible data
tr_01 = tr1.copy()
tr_01.trim(starttime=start + 2 * delta)
tr_01.data = np.concatenate([tr_01.data, np.arange(5)])
tr_02 = tr1.copy()
tr_02.trim(endtime=end - 2 * delta)
tr_02.data = np.concatenate([np.arange(5), tr_02.data])
tr_02.stats.starttime -= 5 * delta
for _i in [tr1, tr2, tr3, tr4, tr5, tr6, tr_01, tr_02]:
if "processing" in _i.stats:
del _i.stats.processing
# test mergeable traces (contained ones)
for tr_b in [tr2, tr3, tr4]:
tr_a = tr1.copy()
st = Stream([tr_a, tr_b])
st._cleanup()
self.assertEqual(st, Stream([tr1]))
self.assertEqual(type(st[0].data), np.ndarray)
# test mergeable traces (adjacent ones)
for tr_b in [tr5, tr6]:
tr_a = tr1.copy()
st = Stream([tr_a, tr_b])
st._cleanup()
self.assertEqual(len(st), 1)
self.assertEqual(type(st[0].data), np.ndarray)
st_result = Stream([tr1, tr_b])
st_result.merge()
self.assertEqual(st, st_result)
# test mergeable traces (overlapping ones)
for tr_b in [tr_01, tr_02]:
tr_a = tr1.copy()
st = Stream([tr_a, tr_b])
st._cleanup()
self.assertEqual(len(st), 1)
self.assertEqual(type(st[0].data), np.ndarray)
st_result = Stream([tr1, tr_b])
st_result.merge()
self.assertEqual(st, st_result)
# test traces that should not be merged
tr7 = tr1.copy()
tr7.stats.sampling_rate *= 2
tr8 = tr1.copy()
tr8.stats.station = "AA"
tr9 = tr1.copy()
tr9.stats.starttime = end + 10 * delta
# test some weird gaps near to one sample:
tr10 = tr1.copy()
tr10.stats.starttime = end + 0.5 * delta
tr11 = tr1.copy()
tr11.stats.starttime = end + 0.1 * delta
tr12 = tr1.copy()
tr12.stats.starttime = end + 0.8 * delta
tr13 = tr1.copy()
tr13.stats.starttime = end + 1.2 * delta
# test non-mergeable traces
for tr_b in [tr7, tr8, tr9, tr10, tr11, tr12, tr13]:
tr_a = tr1.copy()
st = Stream([tr_a, tr_b])
# ignore UserWarnings
with warnings.catch_warnings(record=True):
warnings.simplefilter('ignore', UserWarning)
st._cleanup()
self.assertEqual(st, Stream([tr_a, tr_b]))
def test_integrate_and_differentiate(self):
"""
Test integration and differentiation methods of stream
"""
st1 = read()
st2 = read()
st1.filter('lowpass', freq=1.0)
st2.filter('lowpass', freq=1.0)
st1.differentiate()
st1.integrate()
st2.integrate()
st2.differentiate()
np.testing.assert_array_almost_equal(
st1[0].data[:-1], st2[0].data[:-1], decimal=5)
def test_read(self):
"""
Testing read function.
"""
# 1 - default example
# dtype
tr = read(dtype=np.int64)[0]
self.assertEqual(tr.data.dtype, np.int64)
# dtype is string
tr2 = read(dtype='i8')[0]
self.assertEqual(tr2.data.dtype, np.int64)
self.assertEqual(tr, tr2)
# start/end time
tr2 = read(starttime=tr.stats.starttime + 1,
endtime=tr.stats.endtime - 2)[0]
self.assertEqual(tr2.stats.starttime, tr.stats.starttime + 1)
self.assertEqual(tr2.stats.endtime, tr.stats.endtime - 2)
# headonly
tr = read(headonly=True)[0]
self.assertFalse(tr.data)
# 2 - via http
# now in separate test case "test_read_url_via_network"
# 3 - some example within obspy
# dtype
tr = read('/path/to/slist_float.ascii', dtype=np.int32)[0]
self.assertEqual(tr.data.dtype, np.int32)
# start/end time
tr2 = read('/path/to/slist_float.ascii',
starttime=tr.stats.starttime + 0.025,
endtime=tr.stats.endtime - 0.05)[0]
self.assertEqual(tr2.stats.starttime, tr.stats.starttime + 0.025)
self.assertEqual(tr2.stats.endtime, tr.stats.endtime - 0.05)
# headonly
tr = read('/path/to/slist_float.ascii', headonly=True)[0]
self.assertFalse(tr.data)
# not existing
self.assertRaises(OSError, read, '/path/to/UNKNOWN')
# 4 - file patterns
path = os.path.dirname(__file__)
ascii_path = os.path.join(path, "..", "..", "io", "ascii", "tests",
"data")
filename = os.path.join(ascii_path, 'slist.*')
st = read(filename)
self.assertEqual(len(st), 2)
# exception if no file matches file pattern
filename = path + os.sep + 'data' + os.sep + 'NOTEXISTING.*'
self.assertRaises(Exception, read, filename)
# argument headonly should not be used with start or end time or dtype
with warnings.catch_warnings(record=True):
# will usually warn only but here we force to raise an exception
warnings.simplefilter('error', UserWarning)
self.assertRaises(UserWarning, read, '/path/to/slist_float.ascii',
headonly=True, starttime=0, endtime=1)
def test_read_url_via_network(self):
"""
Testing read function with an URL fetching data via network connection
"""
# 2 - via http
# dtype
tr = read('https://examples.obspy.org/test.sac', dtype=np.int32)[0]
self.assertEqual(tr.data.dtype, np.int32)
# start/end time
tr2 = read('https://examples.obspy.org/test.sac',
starttime=tr.stats.starttime + 1,
endtime=tr.stats.endtime - 2)[0]
self.assertEqual(tr2.stats.starttime, tr.stats.starttime + 1)
self.assertEqual(tr2.stats.endtime, tr.stats.endtime - 2)
# headonly
tr = read('https://examples.obspy.org/test.sac', headonly=True)[0]
self.assertFalse(tr.data)
def test_copy(self):
"""
Testing the copy method of the Stream object.
"""
st = read()
st2 = st.copy()
self.assertEqual(st, st2)
self.assertEqual(st2, st)
self.assertFalse(st is st2)
self.assertFalse(st2 is st)
self.assertEqual(st.traces[0], st2.traces[0])
self.assertFalse(st.traces[0] is st2.traces[0])
def test_merge_with_empty_trace(self):
"""
Merging a stream containing a empty trace with a differing sampling
rate should not fail.
"""
# preparing a dataset
tr = read()[0]
st = tr / 3
# empty and change sampling rate of second trace
st[1].stats.sampling_rate = 0
st[1].data = np.array([])
# merge
st.merge(fill_value='interpolate')
self.assertEqual(len(st), 1)
def test_rotate(self):
"""
Testing the rotate method.
"""
st = read()
st += st.copy()
st[3:].normalize()
st2 = st.copy()
# rotate to RT and back with 6 traces
st.rotate(method='NE->RT', back_azimuth=30)
self.assertTrue((st[0].stats.channel[-1] + st[1].stats.channel[-1] +
st[2].stats.channel[-1]) == 'ZRT')
self.assertTrue((st[3].stats.channel[-1] + st[4].stats.channel[-1] +
st[5].stats.channel[-1]) == 'ZRT')
st.rotate(method='RT->NE', back_azimuth=30)
self.assertTrue((st[0].stats.channel[-1] + st[1].stats.channel[-1] +
st[2].stats.channel[-1]) == 'ZNE')
self.assertTrue((st[3].stats.channel[-1] + st[4].stats.channel[-1] +
st[5].stats.channel[-1]) == 'ZNE')
self.assertTrue(np.allclose(st[0].data, st2[0].data))
self.assertTrue(np.allclose(st[1].data, st2[1].data))
self.assertTrue(np.allclose(st[2].data, st2[2].data))
self.assertTrue(np.allclose(st[3].data, st2[3].data))
self.assertTrue(np.allclose(st[4].data, st2[4].data))
self.assertTrue(np.allclose(st[5].data, st2[5].data))
# again, with angles given in stats and just 2 components
st = st2.copy()
st = st[1:3] + st[4:]
st[0].stats.back_azimuth = 190
st[2].stats.back_azimuth = 200
st.rotate(method='NE->RT')
st.rotate(method='RT->NE')
self.assertTrue(np.allclose(st[0].data, st2[1].data))
self.assertTrue(
|
np.allclose(st[1].data, st2[2].data)
|
numpy.allclose
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 23 12:21:27 2020.
@author: pielsticker
"""
import warnings
import numpy as np
import os
import pandas as pd
import json
import datetime
import h5py
from time import time
import matplotlib.pyplot as plt
from base_model.spectra import (
safe_arange_with_edges,
MeasuredSpectrum,
)
from base_model.figures import Figure
from sim import Simulation
# %%
class Creator:
"""Class for simulating mixed XPS spectra."""
def __init__(self, params=None):
"""
Prepare simulation run.
Loading the input spectra and creating the empty simulation
matrix based on the number of input spectra.
Parameters
----------
no_of_simulations : int
The number of spectra that will be simulated.
input_filenames : list
List of strings that defines the seed files for the
simulations.
single : bool, optional
If single, then only one of the input spectra will be used
for creating a single spectrum. If not single, a linear
combination of all spectra will be used.
The default is True.
variable_no_of_inputs : bool, optional
If variable_no_of_inputs and if single, then the number of
input spectra used in the linear combination will be
randomly chosen from the interval
(1, No. of input spectra).
The default is True.
Returns
-------
None.
"""
default_param_filename = "default_params.json"
default_param_filepath = os.path.join(
os.path.dirname(os.path.abspath(__file__)), default_param_filename
)
with open(default_param_filepath, "r") as param_file:
self.params = json.load(param_file)
self.params["init_param_filepath"] = default_param_filepath
self.sim_ranges = self.params["sim_ranges"]
timestamp = datetime.datetime.now().strftime("%Y%m%d")
self.params["timestamp"] = timestamp
# Replace the default params with the supplied params
# if available.
if params is not None:
for key in params.keys():
if key != "sim_ranges":
self.params[key] = params[key]
else:
for subkey in params["sim_ranges"].keys():
self.sim_ranges[subkey] = params["sim_ranges"][subkey]
# Print parameter file name.
print(
"Parameters were taken from "
f"{self.params['init_param_filepath']}."
)
del self.params["init_param_filepath"]
self.name = self.params["timestamp"] + "_" + self.params["name"]
self.no_of_simulations = self.params["no_of_simulations"]
self.labels = self.params["labels"]
self.spectra = self.params["spectra"]
# Warning if core and auger spectra of same species are
# not scaled together.
if not self.params["same_auger_core_percentage"]:
warnings.warn(
"Auger and core spectra of the same species are not scaled"
" together. If you have Auger spectra, you may want to set"
' "same_auger_core_percentage" to True!'
)
# Load input spectra from all reference sets.
self.input_spectra = self.load_input_spectra(
self.params["input_filenames"]
)
# No. of parameter = 1 ref_set + no. of linear parameter + 6
# (one parameter each for resolution, shift_x, signal_to noise,
# scatterer, distance, pressure)
self.no_of_linear_params = len(self.spectra)
no_of_params = 1 + self.no_of_linear_params + 6
self.simulation_matrix = np.zeros(
(self.no_of_simulations, no_of_params)
)
# Create the parameter matrix for the simulation.
self.create_matrix(
single=self.params["single"],
variable_no_of_inputs=self.params["variable_no_of_inputs"],
always_auger=self.params["always_auger"],
always_core=self.params["always_core"],
)
def load_input_spectra(self, filenames):
"""
Load input spectra.
Load input spectra from all reference sets into DataFrame.
Will store NaN value if no reference is available.
Parameters
----------
filenames : dict
Dictionary of list with filenames to load.
Returns
-------
pd.DataFrame
A DataFrame containing instances of MeasuredSpectrum.
Each row contains one set of reference spectra.
"""
input_spectra = pd.DataFrame(columns=self.spectra)
input_datapath = os.path.join(
*[
os.path.dirname(os.path.abspath(__file__)).partition(
"simulation"
)[0],
"data",
"references",
]
)
input_spectra_list = []
for set_key, value_list in filenames.items():
ref_spectra_dict = {}
for filename in value_list:
filepath = os.path.join(input_datapath, filename)
measured_spectrum = MeasuredSpectrum(filepath)
if self.params["normalize_inputs"]:
measured_spectrum.normalize()
label = next(iter(measured_spectrum.label.keys()))
ref_spectra_dict[label] = measured_spectrum
input_spectra_list.append(ref_spectra_dict)
return pd.concat(
[input_spectra, pd.DataFrame(input_spectra_list)], join="outer"
)
def create_matrix(
self,
single=False,
variable_no_of_inputs=True,
always_auger=False,
always_core=True,
):
"""
Create matrix for multiple simulations.
Creates the numpy array 'simulation_matrix' (instance
variable) that is used to simulate the new spectra.
simulation_matrix has the dimensions (n x p), where:
n: no. of spectra that will be created
p: number of parameters
p = no. of input spectra + 3 (resolution,shift,noise)
The parameters are chosen randomly. For the last three
parameters, the random numbers are integers drawn from specific
intervals that create reasonable spectra.
Parameters
----------
single : bool, optional
If single, only one input spectrum is taken.
The default is False.
variable_no_of_inputs : bool, optional
If variable_no_of_inputs and if single, then the number of
input spectra used in the linear combination will be
randomly chosen from the interval
(1, No. of input spectra).
The default is True.
always_auger : bool, optional
If always_auger, there will always be at least one
Auger spectrum in the output spectrum.
The default is False.
always_core : bool, optional
If always_auger, there will always be at least one
core level spectrum in the output spectrum.
The default is True.
Returns
-------
None.
"""
for i in range(self.no_of_simulations):
key = self.select_reference_set() # select a set of references
self.simulation_matrix[i, 0] = int(key)
self.simulation_matrix[
i, 1 : self.no_of_linear_params + 1
] = self.select_scaling_params(
key, single, variable_no_of_inputs, always_auger
)
self.simulation_matrix[
i, self.no_of_linear_params + 1 :
] = self.select_sim_params(key)
print(
"Random parameters: "
+ str(i + 1)
+ "/"
+ str(self.no_of_simulations)
)
def select_reference_set(self):
"""
Randomly select a number for calling one of the reference sets.
Returns
-------
int
A number between 0 and the total number of input
reference sets.
"""
return np.random.randint(0, self.input_spectra.shape[0])
def select_scaling_params(
self,
key,
single=False,
variable_no_of_inputs=True,
always_auger=False,
always_core=True,
):
"""
Randomly select parameters for linear combination.
Select scaling parameters for a simulation from one set
of reference spectra (given by key).
Parameters
----------
key : int
Integer number of the reference spectrum set to use.
single : bool, optional
If single, only one input spectrum is taken.
The default is False.
variable_no_of_inputs : bool, optional
If variable_no_of_inputs and if single, then the number of
input spectra used in the linear combination will be
randomly chosen from the interval
(1, No. of input spectra).
The default is True.
always_auger : bool, optional
If always_auger, there will always be at least one
Auger spectrum in the output spectrum.
The default is False.
always_core : bool, optional
If always_auger, there will always be at least one
core level spectrum in the output spectrum.
The default is True.
Returns
-------
linear_params : list
A list of parameters for the linear combination of r
eference spectra.
"""
linear_params = [0.0] * self.no_of_linear_params
# Get input spectra from one set of references.
inputs = self.input_spectra.iloc[[key]]
# Select indices where a spectrum is available for this key.
indices = [
self.spectra.index(j)
for j in inputs.columns[inputs.isnull().any() == False].tolist()
]
indices_empty = [
self.spectra.index(j)
for j in inputs.columns[inputs.isnull().any()].tolist()
]
# This ensures that always just one Auger region is used.
auger_spectra = []
core_spectra = []
for s in inputs.iloc[0]:
if str(s) != "nan":
if s.spectrum_type == "auger":
auger_spectra.append(s)
if s.spectrum_type == "core_level":
core_spectra.append(s)
auger_region = self._select_one_auger_region(auger_spectra)
selected_auger_spectra = [
auger_spectrum
for auger_spectrum in auger_spectra
if (auger_region in list(auger_spectrum.label.keys())[0])
]
unselected_auger_spectra = [
auger_spectrum
for auger_spectrum in auger_spectra
if (auger_region not in list(auger_spectrum.label.keys())[0])
]
selected_auger_indices = [
inputs.columns.get_loc(list(s.label.keys())[0])
for s in selected_auger_spectra
]
unselected_auger_indices = [
inputs.columns.get_loc(list(s.label.keys())[0])
for s in unselected_auger_spectra
]
if single:
# Set one parameter to 1 and others to 0.
q = np.random.choice(indices)
linear_params[q] = 1.0
else:
if variable_no_of_inputs:
# Randomly choose how many spectra shall be combined
no_of_spectra = np.random.randint(1, len(indices) + 1)
params = [0.0] * no_of_spectra
while sum(params) == 0.0:
params = [
np.random.uniform(0.1, 1.0)
for j in range(no_of_spectra)
]
params = self._normalize_float_list(params)
# Don't allow parameters below 0.1.
for p in params:
if p <= 0.1:
params[params.index(p)] = 0.0
params = self._normalize_float_list(params)
# Add zeros if no_of_spectra < no_of_linear_params.
for _ in range(len(indices) - no_of_spectra):
params.append(0.0)
else:
# Linear parameters
r = [np.random.uniform(0.1, 1.0) for j in range(len(indices))]
params = self._normalize_float_list(r)
while all(p >= 0.1 for p in params) is not False:
# sample again if one of the parameters is smaller
# than 0.1.
r = [
np.random.uniform(0.1, 1.0)
for j in range(len(indices))
]
params = self._normalize_float_list(r)
# Randomly shuffle so that zeros are equally distributed.
np.random.shuffle(params)
# Add linear params at the positions where there
# are reference spectra available
param_iter = iter(params)
for index in indices:
linear_params[index] = next(param_iter)
# Making sure that a single spectrum is not moved
# to the undesired Auger region.
test_indices = [
i for i in indices if i not in unselected_auger_indices
]
while all(
p == 0.0 for p in [linear_params[i] for i in test_indices]
):
np.random.shuffle(params)
param_iter = iter(params)
for index in indices:
linear_params[index] = next(param_iter)
# Remove undesired auger spectra
for index in unselected_auger_indices:
linear_params[index] = 0.0
if self.params["same_auger_core_percentage"]:
# Set percentage for core and auger spectra of the
# same species to the same value.
linear_params = self._scale_auger_core_together(
linear_params, selected_auger_spectra, core_spectra
)
linear_params = self._normalize_float_list(linear_params)
# If no spectrum is available, set the corresponding
# linear param to NaN.
for index in indices_empty:
linear_params[index] = float("NAN")
if always_auger:
# Always use at least one Auger spectrum
# when available.
if all(
p == 0.0
for p in [linear_params[i] for i in selected_auger_indices]
):
linear_params = self.select_scaling_params(
key=key,
single=single,
variable_no_of_inputs=variable_no_of_inputs,
always_auger=always_auger,
always_core=always_core,
)
if always_core:
# Always use at least one core level spectrum
# when available.
core_level_indices = [
inputs.columns.get_loc(list(s.label.keys())[0])
for s in core_spectra
]
if all(
p == 0.0
for p in [linear_params[i] for i in core_level_indices]
):
linear_params = self.select_scaling_params(
key=key,
single=single,
variable_no_of_inputs=variable_no_of_inputs,
always_auger=always_auger,
always_core=always_core,
)
return linear_params
def select_sim_params(self, row):
"""
Select parameters for one row in the simulation matrix.
Parameters
----------
row : int
Row in the simulation matrix to fill.
Returns
-------
sim_params : list
List of parameters for changing a spectrum using
various processing steps.
"""
sim_params = [0.0] * 6
# FWHM
sim_params[-6] = self._select_random_fwhm()
# shift_x
# Get step from first existing spectrum
for spectrum in self.input_spectra.iloc[row]:
try:
step = spectrum.step
break
except AttributeError:
continue
sim_params[-5] = self._select_random_shift_x(step)
# Signal-to-noise
sim_params[-4] = self._select_random_noise()
# Scattering
# Scatterer
sim_params[-3] = self._select_random_scatterer()
# Pressurex
sim_params[-2] = self._select_random_scatter_pressure()
# Distance
sim_params[-1] = self._select_random_scatter_distance()
return sim_params
def _select_random_fwhm(self):
if self.params["broaden"] is not False:
return np.random.randint(
self.sim_ranges["FWHM"][0], self.sim_ranges["FWHM"][1]
)
return 0
def _select_random_shift_x(self, step):
if self.params["shift_x"] is not False:
shift_range = np.arange(
self.sim_ranges["shift_x"][0],
self.sim_ranges["shift_x"][1],
step,
)
r = np.round(np.random.randint(0, len(shift_range)), decimals=2)
if -step < shift_range[r] < step:
shift_range[r] = 0
return shift_range[r]
return 0
def _select_random_noise(self):
if self.params["noise"] is not False:
return (
np.random.randint(
self.sim_ranges["noise"][0] * 1000,
self.sim_ranges["noise"][1] * 1000,
)
/ 1000
)
return 0
def _select_random_scatterer(self):
if self.params["scatter"] is not False:
# Scatterer ID
return np.random.randint(
0, len(self.sim_ranges["scatterers"].keys())
)
return None
def _select_random_scatter_pressure(self):
if self.params["scatter"] is not False:
return (
np.random.randint(
self.sim_ranges["pressure"][0] * 10,
self.sim_ranges["pressure"][1] * 10,
)
/ 10
)
return 0
def _select_random_scatter_distance(self):
if self.params["scatter"] is not False:
return (
np.random.randint(
self.sim_ranges["distance"][0] * 100,
self.sim_ranges["distance"][1] * 100,
)
/ 100
)
return 0
def _select_one_auger_region(self, auger_spectra):
"""
Randomly select one region of Auger spectra.
Checks for the available Auger spectra and randomly
selects one emission line.
Returns
-------
str
Name of the randomly selected Auger region.
"""
labels = [list(s.label.keys())[0] for s in auger_spectra]
if not labels:
return []
auger_regions = set([label.split(" ", 1)[0] for label in labels])
auger_regions = list(auger_regions)
r = np.random.randint(0, len(auger_regions))
return auger_regions[r]
def _scale_auger_core_together(
self, linear_params, selected_auger_spectra, core_spectra
):
"""
Set core/auger percentage of the same species to same value.
Parameters
----------
linear_params : list
List of all linear parameters.
selected_auger_spectra : list
List of all selected Auger spectra.
core_spectra : list
List of all avialble core level spectra.
Returns
-------
linear_params : list
New list of linear parameters where core and auger spectra
of the same species have the same percentage.
"""
auger_labels = [
list(s.label.keys())[0] for s in selected_auger_spectra
]
auger_phases = [label.split(" ", 1)[1] for label in auger_labels]
core_labels = [list(s.label.keys())[0] for s in core_spectra]
core_phases = [label.split(" ", 1)[1] for label in core_labels]
overlapping_species = [
phase for phase in auger_phases if phase in core_phases
]
for species in overlapping_species:
label_auger = [
label for label in auger_labels if species in label
][0]
label_core = [label for label in core_labels if species in label][
0
]
i_auger = self.spectra.index(label_auger)
i_core = self.spectra.index(label_core)
max_value = np.max(
[linear_params[i_auger], linear_params[i_core]]
)
linear_params[i_auger] = max_value
linear_params[i_core] = max_value
return linear_params
def _normalize_float_list(self, list_of_floats):
"""
Normalize a list of float by its sum.
Parameters
----------
param_list : list
List of floats.
Returns
-------
list
Normalized list of floats
(or original list if all entries are 0.)
"""
try:
return [k / sum(list_of_floats) for k in list_of_floats]
except ZeroDivisionError:
return list_of_floats
def run(self):
"""
Run the simulations.
The artificial spectra are createad using the Simulation
class and the simulation matrix. All data is then stored in
a dataframe.
Returns
-------
None.
"""
dict_list = []
for i in range(self.no_of_simulations):
ref_set_key = int(self.simulation_matrix[i, 0])
# Only select input spectra and scaling parameter
# for the references that are avalable.
sim_input_spectra = [
spectrum
for spectrum in self.input_spectra.iloc[ref_set_key].tolist()
if str(spectrum) != "nan"
]
scaling_params = [
p
for p in self.simulation_matrix[i][
1 : self.no_of_linear_params + 1
]
if str(p) != "nan"
]
self.sim = Simulation(sim_input_spectra)
self.sim.combine_linear(scaling_params=scaling_params)
fwhm = self.simulation_matrix[i][-6]
shift_x = self.simulation_matrix[i][-5]
signal_to_noise = self.simulation_matrix[i][-4]
scatterer_id = self.simulation_matrix[i][-3]
distance = self.simulation_matrix[i][-2]
pressure = self.simulation_matrix[i][-1]
try:
# In order to assign a label, the scatterers are encoded
# by numbers.
scatterer_label = self.sim_ranges["scatterers"][
str(int(scatterer_id))
]
except ValueError:
scatterer_label = None
self.sim.change_spectrum(
fwhm=fwhm,
shift_x=shift_x,
signal_to_noise=signal_to_noise,
scatterer={
"label": scatterer_label,
"distance": distance,
"pressure": pressure,
},
)
if self.params["normalize_outputs"]:
self.sim.output_spectrum.normalize()
d1 = {"reference_set": ref_set_key}
d2 = self._dict_from_one_simulation(self.sim)
d = {**d1, **d2}
dict_list.append(d)
print(
"Simulation: "
+ str(i + 1)
+ "/"
+ str(self.no_of_simulations)
)
print("Number of created spectra: " + str(self.no_of_simulations))
self.df = pd.DataFrame(dict_list)
if self.params["ensure_same_length"]:
self.df = self._extend_spectra_in_df(self.df)
self._prepare_metadata_after_run()
return self.df
def _dict_from_one_simulation(self, sim):
"""
Create a dictionary with data from one simulation event.
Parameters
----------
sim : Simulation
The simulation for which the dictionary shall be created.
Returns
-------
d : dict
Dictionaty containing all simulation data.
"""
spectrum = sim.output_spectrum
# Add all percentages of one species together.
new_label = {}
out_phases = []
for key, value in spectrum.label.items():
phase = key.split(" ", 1)[1]
if phase not in out_phases:
new_label[phase] = value
else:
new_label[phase] += value
out_phases.append(phase)
spectrum.label = new_label
y = np.reshape(spectrum.lineshape, (spectrum.lineshape.shape[0], -1))
d = {
"label": spectrum.label,
"shift_x": spectrum.shift_x,
"noise": spectrum.signal_to_noise,
"FWHM": spectrum.fwhm,
"scatterer": spectrum.scatterer,
"distance": spectrum.distance,
"pressure": spectrum.pressure,
"x": spectrum.x,
"y": y,
}
for label_value in self.labels:
if label_value not in d["label"].keys():
d["label"][label_value] = 0.0
return d
def _extend_spectra_in_df(self, df):
"""
Extend all x and y columns in the dataframe to the same length.
Parameters
----------
df : pd.DataFrame
A dataframe with "x" and "y" columns containing 1D numpy
arrays.
Returns
-------
df : pd.DataFrame
The same dataframe as the input, with the arrays in the
"x" and "y" columns all having the same shape.
"""
max_length = np.max([y.shape for y in self.df["y"].tolist()])
data_list = df[["x", "y"]].values.tolist()
new_spectra = []
for (x, y) in data_list:
x_new, y_new = self._extend_xy(x, y, max_length)
new_data_dict = {"x": x_new, "y": y_new}
new_spectra.append(new_data_dict)
df.update(pd.DataFrame(new_spectra))
return df
def _extend_xy(self, X0, Y0, new_length):
"""
Extend two 1D arrays to a new length.
Parameters
----------
X0 : TYPE
Regularly spaced 1D array.
Y0 : TYPE
1D array of the same size as X0.
new_length : int
Length of new array.
Returns
-------
None.
"""
"""
Returns
-------
None.
"""
def start_stop_step_from_x(x):
"""
Calculcate start, stop, and step from a regular array.
Parameters
----------
x : ndarrray
A numpy array with regular spacing,
i.e. the same step size between all points.
Returns
-------
start : int
Minimal value of x.
stop : int
Maximal value of x.
step : float
Step size between points in x.
"""
start = np.min(x)
stop = np.max(x)
x1 = np.roll(x, -1)
diff = np.abs(np.subtract(x, x1))
step = np.round(np.min(diff[diff != 0]), 3)
return start, stop, step
len_diff = new_length - X0.shape[0]
if len_diff > 0.0:
start0, stop0, step0 = start_stop_step_from_x(X0)
start = start0 - int(len_diff / 2) * step0
stop = stop0 + int(len_diff / 2) * step0
X = np.flip(safe_arange_with_edges(start, stop, step0))
Y = np.zeros(shape=(X.shape[0], 1))
Y[: int(len_diff / 2)] = np.mean(Y0[:20])
Y[int(len_diff / 2) : -int(len_diff / 2)] = Y0
Y[-int(len_diff / 2) :] =
|
np.mean(Y0[-20])
|
numpy.mean
|
import re
import os
import string
import ipdb
import pickle
import matplotlib
matplotlib.use('Agg')
from matplotlib import rcParams
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVR
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import TimeSeriesSplit
from sklearn.metrics import mean_squared_error
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import normalize
from sklearn.preprocessing import RobustScaler
from sklearn import linear_model
from wordcloud import WordCloud
from nltk import pos_tag, word_tokenize
import gensim.downloader as api
MIN_DF = 10
MAX_DF = 100
WORD_CLOUD_NUMBER = 50
BOW = "bow"
TFIDF = "tfidf"
WORD2VEC = "word2vec"
SKIPTHOUGHT = "skipThought"
def select_by_pos_tag(sentence, tags):
word_tokens = word_tokenize(sentence)
tagged_word_token = pos_tag(word_tokens)
selected_words = [word for word, tag in tagged_word_token if tag in tags]
return ' '.join(selected_words)
def clean_sentence(s):
s = re.sub("\n", " ", s)
s = re.sub("[" + string.punctuation + "]", " ", s)
s = re.sub("\?", " ", s)
s = re.sub("[0-9]+", " ", s)
s = re.sub(" +", " ", s)
return s.strip()
def generate_bag_of_words(train, test, feature_args):
vectorizer = CountVectorizer(min_df=MIN_DF, max_df=MAX_DF, **feature_args)
train_bag_of_words = vectorizer.fit_transform(train['text'].apply(clean_sentence)).toarray()
test_bag_of_words = vectorizer.transform(test['text'].apply(clean_sentence)).toarray()
train_bag_of_words = normalize(train_bag_of_words)
test_bag_of_words = normalize(test_bag_of_words)
word_list = vectorizer.get_feature_names()
train_text_df = pd.DataFrame(train_bag_of_words, index=train.index, columns=word_list)
test_text_df = pd.DataFrame(test_bag_of_words, index=test.index, columns=word_list)
bag_of_words_df = pd.concat([train_text_df, test_text_df], axis=0)
return bag_of_words_df, vectorizer
def generate_tfidf(train, test, feature_args):
vectorizer = TfidfVectorizer(min_df=MIN_DF, max_df=MAX_DF, **feature_args)
train_bag_of_words = vectorizer.fit_transform(train['text'].apply(clean_sentence)).toarray()
test_bag_of_words = vectorizer.transform(test['text'].apply(clean_sentence)).toarray()
word_list = vectorizer.get_feature_names()
train_text_df = pd.DataFrame(train_bag_of_words, index=train.index, columns=word_list)
test_text_df = pd.DataFrame(test_bag_of_words, index=test.index, columns=word_list)
bag_of_words_df = pd.concat([train_text_df, test_text_df], axis=0)
return bag_of_words_df, vectorizer
def average_word2vec(sentence, model):
sentence = clean_sentence(sentence)
word2vecs = []
for word in sentence.split(" "):
word = word.lower()
if word in model:
word2vecs.append(model[word])
return pd.Series(
|
np.average(word2vecs, axis=0)
|
numpy.average
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''Unit tests for the Dataset.py module'''
import unittest
from ocw.dataset import Dataset, Bounds
import numpy as np
import datetime as dt
class TestDatasetAttributes(unittest.TestCase):
def setUp(self):
self.lat = np.array([10, 12, 14, 16, 18])
self.lon = np.array([100, 102, 104, 106, 108])
self.time = np.array([dt.datetime(2000, x, 1) for x in range(1, 13)])
flat_array = np.array(range(300))
self.value = flat_array.reshape(12, 5, 5)
self.variable = 'prec'
self.test_dataset = Dataset(self.lat, self.lon, self.time,
self.value, self.variable)
def test_lats(self):
self.assertItemsEqual(self.test_dataset.lats, self.lat)
def test_lons(self):
self.assertItemsEqual(self.test_dataset.lons, self.lon)
def test_times(self):
self.assertItemsEqual(self.test_dataset.times, self.time)
def test_values(self):
self.assertEqual(self.test_dataset.values.all(), self.value.all())
def test_variable(self):
self.assertEqual(self.test_dataset.variable, self.variable)
class TestInvalidDatasetInit(unittest.TestCase):
def setUp(self):
self.lat = np.array([10, 12, 14, 16, 18])
self.lon = np.array([100, 102, 104, 106, 108])
self.time = np.array([dt.datetime(2000, x, 1) for x in range(1, 13)])
flat_array = np.array(range(300))
self.value = flat_array.reshape(12, 5, 5)
self.values_in_wrong_order = flat_array.reshape(5, 5, 12)
def test_bad_lat_shape(self):
self.lat = np.array([[1, 2], [3, 4]])
with self.assertRaises(ValueError):
Dataset(self.lat, self.lon, self.time, self.value, 'prec')
def test_bad_lon_shape(self):
self.lon = np.array([[1, 2], [3, 4]])
with self.assertRaises(ValueError):
Dataset(self.lat, self.lon, self.time, self.value, 'prec')
def test_bad_times_shape(self):
self.time = np.array([[1, 2], [3, 4]])
with self.assertRaises(ValueError):
Dataset(self.lat, self.lon, self.time, self.value, 'prec')
def test_bad_values_shape(self):
self.value = np.array([[1, 2], [2, 3], [3, 4], [4, 5]])
with self.assertRaises(ValueError):
Dataset(self.lat, self.lon, self.time, self.value, 'prec')
def test_values_shape_mismatch(self):
# If we change lats to this the shape of value will not match
# up with the length of the lats array.
self.lat = self.lat[:-2]
with self.assertRaises(ValueError):
Dataset(self.lat, self.lon, self.time, self.value, 'prec')
def test_values_given_in_wrong_order(self):
with self.assertRaises(ValueError):
Dataset(self.lat, self.lon, self.time, self.values_in_wrong_order)
def test_lons_values_incorrectly_gridded(self):
times = np.array([dt.datetime(2000, x, 1) for x in range(1, 13)])
lats = np.arange(-30, 30)
bad_lons = np.arange(360)
flat_array = np.arange(len(times) * len(lats) * len(bad_lons))
values = flat_array.reshape(len(times), len(lats), len(bad_lons))
ds = Dataset(lats, bad_lons, times, values)
np.testing.assert_array_equal(ds.lons, np.arange(-180, 180))
def test_reversed_lats(self):
ds = Dataset(self.lat[::-1], self.lon, self.time, self.value)
np.testing.assert_array_equal(ds.lats, self.lat)
class TestDatasetFunctions(unittest.TestCase):
def setUp(self):
self.lat = np.array([10, 12, 14, 16, 18])
self.lon =
|
np.array([100, 102, 104, 106, 108])
|
numpy.array
|
import cv2
import numpy as np
import time
import os
import matplotlib.pyplot as plt
from easyocr import Reader
from PIL import Image
import datetime
def detect_etiqueta(etiquetas):
# display function to show image on Jupyter
def display_img(img,cmap=None):
fig = plt.figure(figsize = (12,12))
plt.axis(False)
ax = fig.add_subplot(111)
ax.imshow(img,cmap)
# Load the COCO class labels in which our YOLO model was trained on
labelsPath = os.path.join("obj.names")
LABELS = open(labelsPath).read().strip().split("\n")
# The COCO dataset contains 80 different classes
#LABELS
# derive the paths to the YOLO weights and model configuration
weightsPath = os.path.join("yolov4-obj_final.weights")
configPath = os.path.join("yolov4-obj.cfg")
# Loading the neural network framework Darknet (YOLO was created based on this framework)
net = cv2.dnn.readNetFromDarknet(configPath,weightsPath)
#net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
# Create the function which predict the frame input
def predict(image):
# initialize a list of colors to represent each possible class label
np.random.seed(42)
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3), dtype="uint8")
(H, W) = image.shape[:2]
# determine only the "ouput" layers name which we need from YOLO
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# construct a blob from the input image and then perform a forward pass of the YOLO object detector,
# giving us our bounding boxes and associated probabilities
blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416), swapRB=True, crop=False)
net.setInput(blob)
layerOutputs = net.forward(ln)
#global boxes
boxes = []
confidences = []
classIDs = []
threshold = 0.50
# loop over each of the layer outputs
for output in layerOutputs:
# loop over each of the detections
for detection in output:
# extract the class ID and confidence (i.e., probability) of
# the current object detection
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
# filter out weak predictions by ensuring the detected
# probability is greater than the minimum probability
# confidence type=float, default=0.5
if confidence > threshold:
# scale the bounding box coordinates back relative to the
# size of the image, keeping in mind that YOLO actually
# returns the center (x, y)-coordinates of the bounding
# box followed by the boxes' width and height
box = detection[0:4] *
|
np.array([W, H, W, H])
|
numpy.array
|
"""
Python implementation of the LiNGAM algorithms.
The LiNGAM Project: https://sites.google.com/site/sshimizu06/lingam
"""
import itertools
import warnings
import numpy as np
from sklearn.linear_model import LassoLarsIC, LinearRegression
from sklearn.utils import check_array, resample
from statsmodels.tsa.statespace.varmax import VARMAX
from .base import _BaseLiNGAM
from .bootstrap import BootstrapResult
from .direct_lingam import DirectLiNGAM
from .hsic import hsic_test_gamma
from .utils import predict_adaptive_lasso, find_all_paths
class VARMALiNGAM:
"""Implementation of VARMA-LiNGAM Algorithm [1]_
References
----------
.. [1] <NAME>, <NAME>, <NAME>.
Analyzing relationships among ARMA processes based on non-Gaussianity
of external influences. Neurocomputing, Volume 74: 2212-2221, 2011
"""
def __init__(
self,
order=(1, 1),
criterion="bic",
prune=False,
max_iter=100,
ar_coefs=None,
ma_coefs=None,
lingam_model=None,
random_state=None,
):
"""Construct a VARMALiNGAM model.
Parameters
----------
order : turple, length = 2, optional (default=(1, 1))
Number of lags for AR and MA model.
criterion : {'aic', 'bic', 'hqic', None}, optional (default='bic')
Criterion to decide the best order in the all combinations of ``order``.
Searching the best order is disabled if ``criterion`` is ``None``.
prune : boolean, optional (default=False)
Whether to prune the adjacency matrix or not.
max_iter : int, optional (default=100)
Maximm number of iterations to estimate VARMA model.
ar_coefs : array-like, optional (default=None)
Coefficients of AR of ARMA. Estimating ARMA model is skipped if specified ``ar_coefs`` and `ma_coefs`.
Shape must be (``order[0]``, n_features, n_features).
ma_coefs : array-like, optional (default=None)
Coefficients of MA of ARMA. Estimating ARMA model is skipped if specified ``ar_coefs`` and `ma_coefs`.
Shape must be (``order[1]``, n_features, n_features).
lingam_model : lingam object inherits 'lingam._BaseLiNGAM', optional (default=None)
LiNGAM model for causal discovery. If None, DirectLiNGAM algorithm is selected.
random_state : int, optional (default=None)
``random_state`` is the seed used by the random number generator.
"""
self._order = order
self._criterion = criterion
self._prune = prune
self._max_iter = max_iter
self._ar_coefs = (
check_array(ar_coefs, allow_nd=True) if ar_coefs is not None else None
)
self._ma_coefs = (
check_array(ma_coefs, allow_nd=True) if ma_coefs is not None else None
)
self._lingam_model = lingam_model
self._random_state = random_state
def fit(self, X):
"""Fit the model to X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where ``n_samples`` is the number of samples
and ``n_features`` is the number of features.
returns
-------
self : object
Returns the instance itself.
"""
self._causal_order = None
self._adjacency_matrices = None
X = check_array(X)
lingam_model = self._lingam_model
if lingam_model is None:
lingam_model = DirectLiNGAM()
elif not isinstance(lingam_model, _BaseLiNGAM):
raise ValueError("lingam_model must be a subclass of _BaseLiNGAM")
phis = self._ar_coefs
thetas = self._ma_coefs
order = self._order
if phis is None or thetas is None:
phis, thetas, order, residuals = self._estimate_varma_coefs(X)
else:
p = phis.shape[0]
q = thetas.shape[0]
residuals = self._calc_residuals(X, phis, thetas, p, q)
model = lingam_model
model.fit(residuals)
psis, omegas = self._calc_psi_and_omega(
model.adjacency_matrix_, phis, thetas, order
)
if self._prune:
ee = np.dot(
np.eye(model.adjacency_matrix_.shape[0]) - model.adjacency_matrix_,
residuals.T,
).T
psis, omegas = self._pruning(X, ee, order, model.causal_order_)
self._ar_coefs = phis
self._ma_coefs = thetas
self._order = order
self._residuals = residuals
self._causal_order = model.causal_order_
self._adjacency_matrices = (psis, omegas)
return self
def bootstrap(self, X, n_sampling):
"""Evaluate the statistical reliability of DAG based on the bootstrapping.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where ``n_samples`` is the number of samples
and ``n_features`` is the number of features.
n_sampling : int
Number of bootstrapping samples.
Returns
-------
result : TimeseriesBootstrapResult
Returns the result of bootstrapping.
"""
X = check_array(X)
n_samples = X.shape[0]
n_features = X.shape[1]
(p, q) = self._order
criterion = self._criterion
self._criterion = None
self.fit(X)
residuals = self._residuals
ar_coefs = self._ar_coefs
ma_coefs = self._ma_coefs
total_effects = np.zeros([n_sampling, n_features, n_features * (1 + p)])
adjacency_matrices = []
for i in range(n_sampling):
sampled_residuals = resample(residuals, n_samples=n_samples)
resampled_X = np.zeros((n_samples, n_features))
for j in range(n_samples):
if j < max(p, q):
resampled_X[j, :] = sampled_residuals[j]
continue
ar = np.zeros((1, n_features))
for t, M in enumerate(ar_coefs):
ar += np.dot(M, resampled_X[j - t - 1, :].T).T
ma = np.zeros((1, n_features))
for t, M in enumerate(ma_coefs):
ma += np.dot(M, sampled_residuals[j - t - 1, :].T).T
resampled_X[j, :] = ar + sampled_residuals[j] + ma
self.fit(resampled_X)
psi = self._adjacency_matrices[0]
omega = self._adjacency_matrices[1]
am = np.concatenate([*psi, *omega], axis=1)
adjacency_matrices.append(am)
ee = np.dot(np.eye(psi[0].shape[0]) - psi[0], sampled_residuals.T).T
# total effects
for c, to in enumerate(reversed(self._causal_order)):
# time t
for from_ in self._causal_order[: n_features - (c + 1)]:
total_effects[i, to, from_] = self.estimate_total_effect(
resampled_X, ee, from_, to
)
# time t-tau
for lag in range(p):
for from_ in range(n_features):
total_effects[
i, to, from_ + n_features
] = self.estimate_total_effect(
resampled_X, ee, from_, to, lag + 1
)
self._criterion = criterion
return VARMABootstrapResult(adjacency_matrices, total_effects, self._order)
def estimate_total_effect(self, X, E, from_index, to_index, from_lag=0):
"""Estimate total effect using causal model.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Original data, where n_samples is the number of samples
and n_features is the number of features.
E : array-like, shape (n_samples, n_features)
Original error data, where n_samples is the number of samples
and n_features is the number of features.
from_index :
Index of source variable to estimate total effect.
to_index :
Index of destination variable to estimate total effect.
Returns
-------
total_effect : float
Estimated total effect.
"""
X = check_array(X)
n_features = X.shape[1]
# Check from/to causal order
if from_lag == 0:
from_order = self._causal_order.index(from_index)
to_order = self._causal_order.index(to_index)
if from_order > to_order:
warnings.warn(
f"The estimated causal effect may be incorrect because "
f"the causal order of the destination variable (to_index={to_index}) "
f"is earlier than the source variable (from_index={from_index})."
)
# X + lagged X
X_joined = np.zeros(
(X.shape[0], X.shape[1] * (1 + from_lag + self._order[0] + self._order[1]))
)
for p in range(1 + self._order[0]):
pos = n_features * p
X_joined[:, pos : pos + n_features] = np.roll(X[:, 0:n_features], p, axis=0)
for q in range(self._order[1]):
pos = n_features * (1 + self._order[0]) + n_features * q
X_joined[:, pos : pos + n_features] = np.roll(
E[:, 0:n_features], q + 1, axis=0
)
# concat psi and omega
psi = self._adjacency_matrices[0]
omega = self._adjacency_matrices[1]
am = np.concatenate([*psi, *omega], axis=1)
# from_index + parents indices
parents = np.where(np.abs(am[from_index]) > 0)[0]
from_index = from_index if from_lag == 0 else from_index + n_features
parents = parents if from_lag == 0 else parents + n_features
predictors = [from_index]
predictors.extend(parents)
# Estimate total effect
coefs = predict_adaptive_lasso(X_joined, predictors, to_index)
return coefs[0]
def get_error_independence_p_values(self):
"""Calculate the p-value matrix of independence between error variables.
Returns
-------
independence_p_values : array-like, shape (n_features, n_features)
p-value matrix of independence between error variables.
"""
eps = self.residuals_
psi0 = self._adjacency_matrices[0][0]
E = np.dot(np.eye(psi0.shape[0]) - psi0, eps.T).T
n_samples = E.shape[0]
n_features = E.shape[1]
p_values = np.zeros([n_features, n_features])
for i, j in itertools.combinations(range(n_features), 2):
_, p_value = hsic_test_gamma(
np.reshape(E[:, i], [n_samples, 1]), np.reshape(E[:, j], [n_samples, 1])
)
p_values[i, j] = p_value
p_values[j, i] = p_value
return p_values
def _estimate_varma_coefs(self, X):
if self._criterion not in ["aic", "bic", "hqic"]:
result = VARMAX(X, order=self._order, trend="c").fit(maxiter=self._max_iter)
else:
min_value = float("Inf")
result = None
orders = [
(p, q)
for p in range(self._order[0] + 1)
for q in range(self._order[1] + 1)
]
orders.remove((0, 0))
for order in orders:
fitted = VARMAX(X, order=order, trend="c").fit(maxiter=self._max_iter)
value = getattr(fitted, self._criterion)
if value < min_value:
min_value = value
result = fitted
return (
result.coefficient_matrices_var,
result.coefficient_matrices_vma,
result.specification["order"],
result.resid,
)
def _calc_residuals(self, X, ar_coefs, ma_coefs, p, q):
X = X.T
n_features = X.shape[0]
n_samples = X.shape[1]
start_index = max(p, q)
epsilon = np.zeros([n_features, n_samples])
for t in range(n_samples):
if t < start_index:
epsilon[:, t] = np.random.normal(size=(n_features))
continue
ar = np.zeros([n_features, 1])
for i in range(p):
ar += np.dot(ar_coefs[i], X[:, t - i - 1].reshape(-1, 1))
ma = np.zeros([n_features, 1])
for j in range(q):
ma += np.dot(ma_coefs[j], epsilon[:, t - j - 1].reshape(-1, 1))
epsilon[:, t] = X[:, t] - (ar.flatten() + ma.flatten())
residuals = epsilon[:, start_index:].T
return residuals
def _calc_psi_and_omega(self, psi0, phis, thetas, order):
psis = [psi0]
for i in range(order[0]):
psi = np.dot(np.eye(psi0.shape[0]) - psi0, phis[i])
psis.append(psi)
omegas = []
for j in range(order[1]):
omega = np.dot(
np.eye(psi0.shape[0]) - psi0,
thetas[j],
np.linalg.inv(
|
np.eye(psi0.shape[0])
|
numpy.eye
|
import numpy as np
import pandas as pd
import random
import math,time,sys
from matplotlib import pyplot
from datetime import datetime
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
import csv
# from sklearn.neural_network import MLPClassifier
# from sklearn.ensemble import RandomForestClassifier
################################################################################################################3
def sigmoid(gamma): #convert to probability
if gamma < 0:
return 1 - 1/(1 + math.exp(gamma))
else:
return 1/(1 + math.exp(-gamma))
def Vfunction(gamma):
val = 1 + gamma*gamma
val = math.sqrt(val)
val = gamma/val
return abs(val)
def fitness(particle):
cols=np.flatnonzero(particle)
val=1
if np.shape(cols)[0]==0:
return val
# clf = RandomForestClassifier(n_estimators=300)
clf=KNeighborsClassifier(n_neighbors=5)
# clf=MLPClassifier( alpha=0.01, max_iter=1000) #hidden_layer_sizes=(1000,500,100)
#cross=4
#test_size=(1/cross)
#X_train, X_test, y_train, y_test = train_test_split(trainX, trainy, stratify=trainy,test_size=test_size)
train_data=trainX[:,cols]
test_data=testX[:,cols]
clf.fit(train_data,trainy)
val=1-clf.score(test_data,testy)
#print('particle: ',particle)
#in case of multi objective []
set_cnt=sum(particle)
set_cnt=set_cnt/np.shape(particle)[0]
val=omega*val+(1-omega)*set_cnt
return val
def onecount(particle):
cnt=0
for i in particle:
if i==1.0:
cnt+=1
return cnt
def allfit(population):
x=np.shape(population)[0]
acc=np.zeros(x)
for i in range(x):
acc[i]=fitness(population[i])
#print(acc[i])
return acc
def initialize(partCount,dim):
population=np.zeros((partCount,dim))
minn = 1
maxx = math.floor(0.5*dim)
if maxx<minn:
maxx = minn + 1
for i in range(partCount):
random.seed(i**3 + 10 + time.time() )
no = random.randint(minn,maxx)
if no == 0:
no = 1
random.seed(time.time()+ 100)
pos = random.sample(range(0,dim-1),no)
for j in pos:
population[i][j]=1
#print(population[i])
return population
def avg_concentration(eqPool,poolSize,dimension):
# simple average
# print(np.shape(eqPool[0]))
(r,) = np.shape(eqPool[0])
avg = np.zeros(np.shape(eqPool[0]))
for i in range(poolSize):
x = np.array(eqPool[i])
avg = avg + x
#print(avg)
avg = avg/poolSize
#print(avg)
#not actual average; but voting
# for i in range(dimension):
# if avg[i]>=0.5:
# avg[i] = 1
# else:
# avg[i] = 0
return avg
#weighted avg (using Correlation/MI)
def signFunc(x): #signum function? or just sign ?
if x<0:
return -1
return 1
def neighbor(particle,population):
percent = 30
percent /= 100
numFeatures = np.shape(population)[1]
numChange = int(numFeatures*percent)
pos = np.random.randint(0,numFeatures-1,numChange)
particle[pos] = 1 - particle[pos]
return particle
def SA(population,accList):
#dispPop()
[partCount,numFeatures] = np.shape(population)
T0 = numFeatures
#print('T0: ',T0)
i = 0
for partNo in range(partCount):
i = i+1
#print('i: ',i)
T=2*numFeatures
curPar = population[partNo].copy()
curAcc = accList[partNo].copy()
#print('Par:',partNo, 'curAcc:',curAcc, 'curFeat:', onecount(curPar), 'fitness_check:', fitness(curPar))
bestPar = curPar.copy()
bestAcc = curAcc.copy()
while T>T0:
#print('T: ',T)
newPar = neighbor(curPar,population)
newAcc = fitness(newPar)/1.0
if newAcc<bestAcc:
curPar=newPar.copy()
curAcc=newAcc.copy()
bestPar=curPar.copy()
bestAcc=curAcc.copy()
elif newAcc==bestAcc:
if onecount(newPar)<onecount(bestPar):
curPar=newPar.copy()
curAcc=newAcc
bestPar=curPar.copy()
bestAcc=curAcc
else:
prob=np.exp((bestAcc-curAcc)/T)
if(random.random()<=prob):
curPar=newPar.copy()
curAcc=newAcc
T=int(T*0.7)
#print('one_count: ', onecount(curPar))
#print('bestAcc: ',bestAcc)
#print('Par:',partNo, 'newAcc:',bestAcc, 'newFeat:', onecount(bestPar), 'fitness_check: ', fitness(bestPar))
population[partNo]=bestPar.copy()
accList[partNo]=bestAcc.copy()
return population
def EO_SA(population,poolSize,max_iter,partCount,dimension):
eqPool = np.zeros((poolSize+1,dimension))
# print(eqPool)
eqfit = np.zeros(poolSize+1)
# print(eqfit)
for i in range(poolSize+1):
eqfit[i] = 100
for curriter in range(max_iter):
# print("iter no: ",curriter)
# print(eqPool)
popnew = np.zeros((partCount,dimension))
accList = allfit(population)
# x_axis.append(curriter)
# y_axis.aend(min(accList))
for i in range(partCount):
for j in range(poolSize):
if accList[i] <= eqfit[j]:
eqfit[j] = accList[i].copy()
eqPool[j] = population[i].copy()
break
# print("till best: ",eqfit[0],onecount(eqPool[0]))
Cave = avg_concentration(eqPool,poolSize,dimension)
eqPool[poolSize] = Cave.copy()
t = (1 - (curriter/max_iter))**(a2*curriter/max_iter)
for i in range(partCount):
#randomly choose one candidate from the equillibrium pool
random.seed(time.time() + 100 + 0.02*i)
inx = random.randint(0,poolSize)
Ceq = np.array(eqPool[inx])
lambdaVec = np.zeros(np.shape(Ceq))
rVec = np.zeros(np.shape(Ceq))
for j in range(dimension):
random.seed(time.time() + 1.1)
lambdaVec[j] = random.random()
random.seed(time.time() + 10.01)
rVec[j] = random.random()
FVec = np.zeros(np.shape(Ceq))
for j in range(dimension):
x = -1*lambdaVec[j]*t
x = math.exp(x) - 1
x = a1 * signFunc(rVec[j] - 0.5) * x
random.seed(time.time() + 200)
r1 = random.random()
random.seed(time.time() + 20)
r2 = random.random()
if r2 < GP:
GCP = 0
else:
GCP = 0.5 * r1
G0 = np.zeros(np.shape(Ceq))
G = np.zeros(np.shape(Ceq))
for j in range(dimension):
G0[j] = GCP * (Ceq[j] - lambdaVec[j]*population[i][j])
G[j] = G0[j]*FVec[j]
# print('popnew[',i,']: ')
for j in range(dimension):
temp = Ceq[j] + (population[i][j] - Ceq[j])*FVec[j] + G[j]*(1 - FVec[j])/lambdaVec[j]
temp = Vfunction(temp)
if temp>0.5:
popnew[i][j] = 1 - population[i][j]
else:
popnew[i][j] = population[i][j]
# print(popnew[i][j],end=',')
# print()
population = popnew.copy()
popnew = SA(popnew,accList)
population = popnew.copy()
return eqPool,population
############################################################################################################
maxRun = 10
omega = 0.9 #weightage for no of features and accuracy
partCountAll = [10]
max_iterAll = [20]
a2 = 1
a1 = 2
GP = 0.5
poolSize = 4
best_accuracy=np.zeros((1,4))
best_no_features=
|
np.zeros((1,4))
|
numpy.zeros
|
# -*- coding: utf-8 -*-
"""Comutations about two-body Orbits (v1.0.0)
This module provide computations about two-body orbits, including:
Define the orbit by position and velocity of an object
Define the orbit by classical orbital elements of an object
Compute position and velocity of an object at given time
Provide seriese of points on orbital trajectory for visualization
Solve Lambert's problem (From given two positions and flight time
between them, lambert() computes initial and terminal velocity of
the object)
@author: <NAME>/whiskie14142
"""
import numpy as np
import math
from scipy.optimize import newton
from scipy.optimize import bisect
class TwoBodyOrbit:
"""A class of a two-body orbit of a celestial object
"""
def timeFperi(self, ta):
"""Computes time from periapsis passage for given true anomaly
Args:
ta: True Anomaly in radians
Returns: sec_from_peri
sec_from_peri: Time from periapsis passage (float). Unit of time
depends on gravitational parameter (mu)
"""
if not self._setOrb:
raise(RuntimeError('Orbit has not been defined: in TwoBodyOrbit.timeFperi'))
r = self.a * (1.0 - self.e ** 2) / (1.0 + self.e * np.cos(ta))
if self.e < 1.0:
b_over_a = np.sqrt(1.0 - self.e ** 2)
ecc_anm = np.arctan2(r * np.sin(ta) / b_over_a, self.a * self.e \
+ r * np.cos(ta))
if ecc_anm < 0.0: ecc_anm += math.pi * 2.0
sec_from_peri = np.sqrt(self.a **3 / self.mu) * (ecc_anm - self.e \
* np.sin(ecc_anm))
elif self.e == 1.0:
ecc_anm = np.sqrt(self.p) * np.tan(ta / 2.0)
sec_from_peri = (self.p * ecc_anm + ecc_anm ** 3 / 3.0) / 2.0 / \
np.sqrt(self.mu)
else:
sy = (self.e + np.cos(ta)) / (1.0 + self.e * np.cos(ta))
lf = np.log(sy + np.sqrt(sy ** 2 - 1.0))
if (ta < 0.0) or (ta > math.pi): lf = lf * (-1.0)
sec_from_peri = np.sqrt((-1.0) * self.a ** 3 / self.mu) * (self.e \
* np.sinh(lf) - lf)
return sec_from_peri
def posvel(self, ta):
"""Comuputs position and velocity for given true anomaly
Args:
ta: True Anomaly in radians
Returns: rv, vv
rv: Position (x,y,z) as numpy array
vv: Velocity (xd,yd,zd) as numpy array
Units are depend on gravitational parameter (mu)
"""
if not self._setOrb:
raise(RuntimeError('Orbit has not been defined: in TwoBodyOrbit.posvel'))
PV = self.evd
QV = np.cross(self.hv, PV) / np.sqrt(np.dot(self.hv, self.hv))
r = self.p / (1.0 + self.e * np.cos(ta))
rv = r * np.cos(ta) * PV + r * np.sin(ta) * QV
vv = np.sqrt(self.mu / self.p) * ((-1.0) * np.sin(ta) * PV + (self.e \
+ np.cos(ta)) * QV)
return rv, vv
def __init__(self, bname, mname='Sun', mu=1.32712440041e20):
"""
Args:
bname: Name of the object which orbit around the central body
mname: Name of the central body
mu : Gravitational parameter (mu) of the central body
Default value is gravitational parameter of the Sun.
mu should be:
if Mc >> Mo
mu = G*Mc
else
mu = G*(Mc + Mo)
where:
G: Newton's gravitational constant
Mc: mass of the central body
Mo: mass of the object
"""
self._setOrb = False
self.bodyname = bname
self.mothername = mname
self.mu = mu
def setOrbCart(self, t, pos, vel):
"""Define the orbit by epoch, position, and velocity of the object
Args:
t: Epoch
pos: Position (x,y,z), array-like object
vel: Velocity (xd,yd,zd), array-like object
Units are depend on gravitational parameter (mu)
Origin of coordinates are the central body
Exceptions:
ValueError: when angular momentum is zero, the method raises
ValueError
when e becomes 1.0, the method raises ValueError
"""
self.t0 = t
self.pos = np.array(pos)
self.vel = np.array(vel)
self._setOrb = True
# Computes Classical orbital elements
r0 = np.array(self.pos)
r0len = np.sqrt(np.dot(r0, r0))
rd0 = np.array(self.vel)
rd0len2 = np.dot(rd0, rd0)
h = np.cross(r0, rd0)
hlen2 = np.dot(h, h)
hlen = np.sqrt(hlen2)
if hlen == 0.0:
self._setOrb = False
raise(ValueError('Inappropriate pos and vel in TwoBodyOrbit.setOrbCart'))
# eccentricity vector; it can be zero
ev = ((rd0len2 - self.mu/r0len) * r0 - np.dot(r0, rd0) * rd0) / self.mu
evlen = np.sqrt(np.dot(ev, ev)) #evlen can be zero (circular orbit)
K = np.array([0., 0., 1.])
n = np.cross(K, h) # direction of the ascending node
nlen = np.sqrt(np.dot(n, n)) # nlen can be zero (orbital inclination is zero)
if evlen == 0.0:
if nlen == 0.0:
ev_norm = np.array([1.0, 0.0, 0.0])
else:
ev_norm = n / nlen
else:
ev_norm = ev / evlen
he = np.cross(h, ev)
he_norm = he / np.sqrt(np.dot(he, he))
if nlen == 0.0:
self.lan = 0.0
self.parg = np.arctan2(ev[1], ev[0])
if self.parg < 0.0:
self.parg += math.pi * 2.0
else:
n_norm = n / nlen
hn = np.cross(h, n)
hn_norm = hn / np.sqrt(np.dot(hn, hn))
self.lan = np.arctan2(n[1], n[0]) # longitude of ascending node (radians)
if self.lan < 0.0:
self.lan += math.pi * 2.0
self.parg = np.arctan2(np.dot(ev, hn_norm), np.dot(ev, n_norm)) # periapsis argument (radians)
if self.parg < 0.0:
self.parg += math.pi * 2.0
self.hv = h # orbital mormentum vecctor
self.p = hlen2 / self.mu # semi-latus rectum
self.ev = ev # eccentricity vector
self.evd = ev_norm # normalized eccentricity vector
self.e = np.sqrt(np.dot(ev, ev)) # eccentricity
if self.e == 1.0:
self._setOrb = False
raise(ValueError('Inappropriate pos and vel in TwoBodyOrbit.setOrbCart'))
self.a = self.p / (1.0 - self.e ** 2) # semi-major axis
self.i = np.arccos(h[2] / hlen) # inclination (radians)
self.ta0 = np.arctan2(np.dot(he_norm, r0), np.dot(ev_norm, r0)) # true anomaly at epoch
if self.ta0 < 0.0:
self.ta0 += math.pi * 2.0
# time from recent periapsis, mean anomaly, periapsis passage time
timef = self.timeFperi(self.ta0)
self.ma = None
self.pr = None
self.mm = None
if self.e < 1.0:
self.pr = (2.0 * math.pi * np.sqrt(self.a ** 3 /self.mu)) # orbital period
self.ma = timef / self.pr * math.pi * 2.0 # Mean anomaly (rad)
self.mm = 2.0 * math.pi / self.pr # mean motion (rad/time)
self.T = self.t0 - timef # periapsis passage time
def setOrbKepl(self, epoch, a, e, i, LoAN, AoP, TA=None, T=None, MA=None):
"""Define the orbit by classical orbital elements
Args:
epoch: Epoch
a: Semi-major axis
e: Eccentricity (should not be 1.0)
i: Inclination (degrees)
LoAN: longitude of ascending node (degrees)
If inclination is zero, this value defines reference
longitude of AoP
AoP: Argument of periapsis (degrees)
For a circular orbit, this value indicates a imaginary
periapsis.
TA: True anomaly on epoch (degrees)
For a circular orbit, the value defines angle from the
imaginary periapsis defined by AoP
T: Periapsis passage time
for a circular orbit, the value defines passage time for
the imaginary periapsis defined by AoP
MA: Mean anomaly on epoch (degrees)
For a hyperbolic trajectory, you cannot specify this
argument
For a circular orbit, the value defines anomaly from
the imaginary periapsis defined by AoP
TA, T, and MA are mutually exclusive arguments. You should
specify one of them. If TA is specified, other arguments
will be ignored. If T is specified, MA will be ignored.
Exceptions:
ValueError: If classical orbital element(s) are inconsistent, the
method raises ValueError
"""
# changed keys
Lomega = LoAN
Somega = AoP
TAoE = TA
ma = MA
self._setOrb = False
if e < 0.0:
raise ValueError('Invalid orbital element (e<0.0) in TwoBodyOrbit.setOrbKepl')
if e == 1.0:
raise ValueError('Invalid orbital element (e=1.0) in TwoBodyOrbit.setOrbKepl')
if (e > 1.0 and a >= 0.0) or (e < 1.0 and a <= 0.0):
raise ValueError('Invalid Orbital Element(s) (inconsistent e and a) in TwoBodyOrbit.setOrbKepl')
if e > 1.0 and TAoE is None and T is None:
raise ValueError('Missing Orbital Element (TA or T) in TwoBodyOrbit.setOrbKepl')
if TAoE is None and T is None and ma is None:
raise ValueError('Missing Orbital Elements (TA, T, or MA) in TwoBodyOrbit.setOrbKepl')
taError = False
if TAoE is not None and e > 1.0:
mta = math.degrees(math.acos((-1.0) / e))
if TAoE >= mta and TAoE <= 180.0:
taError = True
elif TAoE >= 180.0 and TAoE <= (360.0 - mta):
taError = True
elif TAoE <= (-1.0) * mta:
taError = True
if taError:
raise ValueError('Invalid Orbital Element (TA) in TwoBodyOrbit.setOrbKepl')
self.t0 = epoch
self.a = a
self.e = e
self.i = math.radians(i)
self.lan = math.radians(Lomega)
self.parg = math.radians(Somega)
self.pr = None
self.ma = None
self.mm = None
self._setOrb = True
# semi-latus rectum
self.p = a * (1.0 - e * e)
# orbital period and mean motion
if e < 1.0:
self.pr = math.pi * 2.0 / math.sqrt(self.mu) * a ** 1.5
self.mm = math.pi * 2.0 / self.pr
# R: rotation matrix
R1n = np.array([math.cos(self.lan)*math.cos(self.parg)
- math.sin(self.lan)*math.sin(self.parg)*math.cos(self.i),
(-1.0)*math.cos(self.lan)*math.sin(self.parg)
- math.sin(self.lan)*math.cos(self.parg)*math.cos(self.i),
math.sin(self.lan)*math.sin(self.i)])
R2n = np.array([math.sin(self.lan)*math.cos(self.parg)
+ math.cos(self.lan)*math.sin(self.parg)*math.cos(self.i),
(-1.0)*math.sin(self.lan)*math.sin(self.parg)
+ math.cos(self.lan)*math.cos(self.parg)*math.cos(self.i),
(-1.0)*math.cos(self.lan)*math.sin(self.i)])
R3n = np.array([math.sin(self.parg)*math.sin(self.i),
math.cos(self.parg)*math.sin(self.i),
math.cos(self.i)])
R = np.array([R1n, R2n, R3n])
# eccentricity vector
self.evd = (np.dot(R, np.array([[1.0], [0.0], [0.0]]))).T[0]
self.ev = self.evd * self.e
# angular momentum vector
h = math.sqrt(self.p * self.mu)
self.hv = (np.dot(R, np.array([[0.0], [0.0], [1.0]]))).T[0] * h
nv = (np.dot(R, np.array([[0.0], [1.0], [0.0]]))).T[0]
# ta0, T, ma
if TAoE is not None:
# true anomaly at epoch
self.ta0 = math.radians(TAoE)
# periapsis passage time
self.T = self.t0 - self.timeFperi(self.ta0)
# mean anomaly at epoch
if self.e < 1.0:
self.ma = (self.t0 - self.T) / self.pr * math.pi * 2.0
else:
self.ma = None
elif T is not None:
# periapsis passage time
self.T = T
# position and velocity on periapsis
self.pos, self.vel = self.posvel(0.0)
# position and velocity at epoch
self.t0 = self.T # temporary setting
pos, vel = self.posvelatt(epoch)
# true anomaly at epoch
ev_norm = self.evd
nv_norm = nv / np.sqrt(np.dot(nv, nv))
pos_norm = pos / np.sqrt(np.dot(pos, pos))
# true anomaly at epoch
self.ta0 = np.arctan2(np.dot(pos_norm, nv_norm), np.dot(pos_norm, ev_norm))
# mean anomaly at epoch
if self.e < 1.0:
self.ma = (epoch - self.T) / self.pr * math.pi * 2.0
if self.ma < 0.0:
self.ma += math.pi * 2.0
else:
self.ma = None
else:
# mean anomaly at epoch
self.ma = math.radians(ma)
# periapsis passage time
self.T = epoch - self.pr * self.ma / (math.pi * 2.0)
# position and velocity on periapsis
self.pos, self.vel = self.posvel(0.0)
# position and velocity at epoch
self.t0 = self.T # temporary setting
pos, vel = self.posvelatt(epoch)
# true anomaly at epoch
ev_norm = self.ev / np.sqrt(np.dot(self.ev, self.ev))
nv_norm = nv / np.sqrt(np.dot(nv, nv))
pos_norm = pos / np.sqrt(np.dot(pos, pos))
# true anomaly at epoch
self.ta0 = np.arctan2(np.dot(pos_norm, nv_norm), np.dot(pos_norm, ev_norm))
# epoch
self.t0 = epoch
# position and velocity at epoch
if e != 0.0:
self.pos, self.vel = self.posvel(self.ta0)
else:
r = np.array([[math.cos(self.ta0)], [math.sin(self.ta0)], [0.0]]) * self.a
self.pos = (np.dot(R, r).T)[0]
v = np.array([[(-1.0)*math.sin(self.ta0)], [math.cos(self.ta0)], [0.0]]) * math.sqrt(self.mu / self.a)
self.vel = (np.dot(R, v).T)[0]
def points(self, ndata):
"""Returns points on orbital trajectory for visualization
Args:
ndata: Number of points
Returns: xs, ys, zs, times
xs: Array of x-coordinates (Numpy array)
ys: Array of y-coordinates (Numpy array)
zs: Array of z-coordinates (Numpy array)
times: Array of times (Numpy array)
Origin of coordinates are position of the central body
"""
if not self._setOrb:
raise(RuntimeError('Orbit has not been defined: TwoBodyOrbit.points'))
times = np.zeros(ndata)
xs = np.zeros(ndata)
ys = np.zeros(ndata)
zs = np.zeros(ndata)
tas = np.zeros(ndata)
if self.e < 1.0:
tas =np.linspace(0.0, math.pi * 2.0, ndata)
else:
stop = math.pi - np.arccos(1.0 / self.e)
start = (-1.) * stop
delta = (stop - start) / (ndata + 1)
tas = np.linspace(start + delta, stop - delta, ndata)
for j in range(ndata):
ta =tas[j]
times[j] = self.timeFperi(ta) + self.T
xyz, xdydzd =self.posvel(ta)
xs[j] = xyz[0]
ys[j] = xyz[1]
zs[j] = xyz[2]
return xs, ys, zs, times
def posvelatt(self, t):
"""Returns position and velocity of the object at given t
Args:
t: Time
Returns: newpos, newvel
newpos: Position of the object at t (x,y,z) (Numpy array)
newvel: Velocity of the object at t (xd,yd,zd) (Numpy array)
Exception:
RuntimeError: If it failed to the computation, raises RuntimeError
Origin of coordinates are position of the central body
"""
def _Cz(z):
if z < 0:
return (1.0 - np.cosh(np.sqrt((-1)*z))) / z
else:
return (1.0 - np.cos(np.sqrt(z))) / z
def _Sz(z):
if z < 0:
sqz = np.sqrt((-1)*z)
return (np.sinh(sqz) - sqz) / sqz ** 3
else:
sqz = np.sqrt(z)
return (sqz - np.sin(sqz)) / sqz ** 3
def _func(xn, targett):
z = xn * xn / self.a
sr = np.sqrt(np.dot(self.pos, self.pos))
tn = (np.dot(self.pos, self.vel) / np.sqrt(self.mu) * xn * xn \
* _Cz(z) + (1.0 - sr / self.a) * xn ** 3 * _Sz(z) + sr * xn) \
/ np.sqrt(self.mu) - targett
return tn
def _fprime(x, targett):
z = x * x / self.a
sqmu = np.sqrt(self.mu)
sr = np.sqrt(np.dot(self.pos, self.pos))
dtdx = (x * x * _Cz(z) + np.dot(self.pos, self.vel) / sqmu * x \
* (1.0 - z * _Sz(z)) + sr * (1.0 - z * _Cz(z))) / sqmu
return dtdx
if not self._setOrb:
raise(RuntimeError('Orbit has not been defined: TwoBodyOrbit.posvelatt'))
delta_t = (t - self.t0)
if delta_t == 0.0:
return self.pos + 0.0, self.vel + 0.0
# you should not return self.pos. it can cause trouble!
x0 = np.sqrt(self.mu) * delta_t / self.a
try:
# compute with scipy.optimize.newton
xn = newton(_func, x0, args=(delta_t,), fprime=_fprime)
except RuntimeError:
# Configure boundaries for scipy.optimize.bisect
# b1: Lower boundary
# b2: Upper boundary
f0 = _func(x0, delta_t)
if f0 < 0.0:
b1 = x0
found = False
for i in range(50):
x1 = x0 + 10 ** (i + 1)
test = _func(x1, delta_t)
if test > 0.0:
found = True
b2 = x1
break
if not found:
raise(RuntimeError('Could not compute position and ' +
'velocity: TwoBodyOrbit.posvelatt'))
else:
b2 = x0
found = False
for i in range(50):
x1 = x0 - 10 ** (i + 1)
test = _func(x1, delta_t)
if test < 0.0:
found = True
b1 = x1
break
if not found:
raise(RuntimeError('Could not compute position and ' +
'velocity: TwoBodyOrbit.posvelatt'))
# compute with scipy.optimize.bisect
xn = bisect(_func, b1, b2, args=(delta_t,), maxiter=200)
z = xn * xn / self.a
sr = np.sqrt(np.dot(self.pos, self.pos))
sqmu = np.sqrt(self.mu)
val_f = 1.0 - xn * xn / sr * _Cz(z)
val_g = delta_t - xn ** 3 / sqmu * _Sz(z)
newpos = self.pos * val_f + self.vel * val_g
newr = np.sqrt(np.dot(newpos, newpos))
val_fd = sqmu / sr / newr * xn * (z * _Sz(z) - 1.0)
val_gd = 1.0 - xn * xn / newr * _Cz(z)
newvel = self.pos * val_fd + self.vel * val_gd
return newpos, newvel
def elmKepl(self):
"""Returns Classical orbital element
Returns:
kepl: Dictionary of orbital elements. Keys are as follows
'epoch': Epoch
'a': Semimajor axis
'e': Eccentricity
'i': Inclination in degrees
'LoAN': Longitude of ascending node in degrees
If inclination is zero, LoAN yields reference longitude
for AoP
'AoP': Argument of periapsis in degrees
If inclination is zero, AoP yields angle from reference
longitude (LoAN)
For circular orbit, AoP yields imaginary periapsis
'TA': True anomaly at epoch in degrees
For circular orbit, TAoE yields angle from imaginary
periapsis (AoP)
'T': Periapsis passage time
For circular orbit, T yields passage time of imaginary
periapsis (AoP)
'MA': Mean anomaly at epoch in degrees (elliptic orbit only)
For circular orbit, ma is the same to TAoE
'n': Mean motion in degrees (elliptic orbit only)
'P': Orbital period (elliptic orbit only)
For a hyperbolic trajectory, values for keys 'MA', 'n', and 'P'
are None for each
"""
if not self._setOrb:
raise(RuntimeError('Orbit has not been defined: TwoBodyOrbit'))
kepl = {'epoch':self.t0, \
'a':self.a, \
'e':self.e, \
'i':math.degrees(self.i), \
'LoAN':math.degrees(self.lan), \
'AoP':math.degrees(self.parg), \
'TA':math.degrees(self.ta0), \
'T':self.T}
if self.e < 1.0:
kepl['MA'] = math.degrees(self.ma)
kepl['n'] = math.degrees(self.mm)
kepl['P'] = self.pr
else:
kepl['MA'] = None
kepl['n'] = None
kepl['P'] = None
return kepl
def lambert(ipos, tpos, targett, mu=1.32712440041e20, ccw=True):
"""A function to solve 'Lambert's Problem'
From given initial position, terminal position, and flight time,
compute initial velocity and terminal velocity.
Args: ipos, tpos, targett, mu, ccw
ipos: Initial position of the object (x,y,z) (array-like object)
tpos: Terminal position of the object (x,y,z) (array-like object)
targett: Flight time
mu: Gravitational parameter of the central body (default value is for the Sun)
ccw: Flag for orbital direction. If True, counter clockwise
Returns: ivel, tvel
ivel: Initial velocity of the object (xd,yd,zd) as Numpy array
tvel: Terminal velocity of the object (xd,yd,zd) as Numpy array
Exception:
ValueError: When input data (ipos, tpos, targett) are inappropriate,
the function raises ValueError
Origin of coordinates are position of the central body
"""
def _Cz(z):
if z < 0:
return (1.0 - np.cosh(np.sqrt((-1)*z))) / z
else:
return (1.0 - np.cos(np.sqrt(z))) / z
def _Sz(z):
if z < 0:
sqz = np.sqrt((-1)*z)
return (np.sinh(sqz) - sqz) / sqz ** 3
else:
sqz = np.sqrt(z)
return (sqz - np.sin(sqz)) / sqz ** 3
def _func(z, targett, r1pr2, A, mu):
val_y = r1pr2 - A * (1.0 - z * _Sz(z)) / np.sqrt(_Cz(z))
val_x = np.sqrt(val_y / _Cz(z))
t = (val_x ** 3 * _Sz(z) + A * np.sqrt(val_y)) / np.sqrt(mu)
return t - targett
sipos = np.array(ipos)
stpos = np.array(tpos)
tsec = targett * 1.0
r1 = np.sqrt(np.dot(sipos, sipos))
r2 = np.sqrt(np.dot(stpos, stpos))
r1cr2 = np.cross(sipos, stpos)
r1dr2 =
|
np.dot(sipos, stpos)
|
numpy.dot
|
from numpy import array, ones, identity, zeros, full, exp
from numpy.random import normal
from numpy.ma.testutils import assert_array_equal, assert_array_almost_equal
from sstspack.Utilities import block_diag, identity_fn, jacobian, hessian, identity_fn
def test_block_diag():
""""""
expected = identity(4)
assert_array_equal(block_diag([identity(2), identity(2)]), expected)
def test_jacobian():
""""""
def test_func(x):
return
|
full((1, 1), x[0] ** 2 + 3 * x[1])
|
numpy.full
|
from openpathsampling.tests.test_helpers import (data_filename)
import openpathsampling as paths
import openpathsampling.engines.toy as toys
from openpathsampling.pathsimulators.sshooting_simulator import SShootingSimulation
import numpy as np
import os
class TestSShootingSimulation(object):
def setup(self):
# PES is one-dimensional zero function (y(x) = 0)
pes = toys.LinearSlope(m=[0.0], c=[0.0])
topology = toys.Topology(n_spatial=1, masses=[1.0], pes=pes)
integrator = toys.LeapfrogVerletIntegrator(0.02)
options = {
'integ' : integrator,
'n_frames_max' : 1000,
'n_steps_per_frame' : 1
}
self.engine = toys.Engine(options=options, topology=topology)
# test uses snapshots with different velocities
self.initial_snapshots = [toys.Snapshot(
coordinates=
|
np.array([[0.0]])
|
numpy.array
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.