code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
"""
A group of spiking neurons with noise `~U(0, potential_noise_scale)` is added
to `n_neurons * prob_rand_fire` neurons at each step.
Each spiking neuron has an internal membrane potential that
increases with each incoming spike. The potential persists but slowly
decreases over time. Each neuron fires when its potential surpasses
some firing threshold and does not fire again for the duration
of its refractory period.
"""
import numpy as np
from spikey.module import Key
from spikey.snn.neuron.template import Neuron
class RandPotential(Neuron):
"""
A group of spiking neurons with noise `~U(0, potential_noise_scale)` is added
to `n_neurons * prob_rand_fire` neurons at each step.
Each spiking neuron has an internal membrane potential that
increases with each incoming spike. The potential persists but slowly
decreases over time. Each neuron fires when its potential surpasses
some firing threshold and does not fire again for the duration
of its refractory period.
Parameters
----------
kwargs: dict
Dictionary with values for each key in NECESSARY_KEYS.
Examples
--------
.. code-block:: python
config = {
"magnitude": 2,
"n_neurons": 100,
"neuron_pct_inhibitory": .2,
"potential_decay": .2,
"prob_rand_fire": .08,
"refractory_period": 1,
"resting_mv": 0,
"spike_delay": 0,
"potential_noise_scale": .1,
}
neurons = Neuron(**config)
neurons.reset()
weights = np.random.uniform(0, 2, size=(config['n_neurons'], config['n_neurons]))
for i in range(100):
spikes = self.neurons()
neurons += np.sum(
weights * spikes.reshape((-1, 1)), axis=0
)
.. code-block:: python
class network_template(Network):
keys = {
"magnitude": 2,
"n_neurons": 100,
"neuron_pct_inhibitory": .2,
"potential_decay": .2,
"prob_rand_fire": .08,
"refractory_period": 1,
"potential_noise_scale": .1,
}
parts = {
"neurons": Neuron
}
"""
NECESSARY_KEYS = Neuron.extend_keys(
[Key("potential_noise_scale", "Multiplier of leak to add to potential.", float)]
)
def __call__(self) -> np.bool:
"""
Add noise `~U(0, potential_noise_scale)` to `n_neurons * prob_rand_fire` neurons
then determine whether each neuron will fire or not according to threshold.
Called once per network step.
Parameters
----------
threshold: float
Spiking threshold, neurons schedule spikes if potentials >= threshold.
Returns
-------
ndarray[n_neurons, dtype=bool] Spike output from each neuron at the current timestep.
Examples
--------
.. code-block:: python
config = {
"magnitude": 2,
"n_neurons": 100,
"neuron_pct_inhibitory": .2,
"potential_decay": .2,
"prob_rand_fire": .08,
"refractory_period": 1,
"potential_noise_scale": .1,
"firing_threshold": 16,
}
neurons = Neuron(**config)
neurons.reset()
weights = np.random.uniform(0, 2, size=(config['n_neurons'], config['n_neurons]))
for i in range(100):
spikes = self.neurons()
neurons += np.sum(
weights * spikes.reshape((-1, 1)), axis=0
)
"""
noise = np.random.uniform(0, self._potential_noise_scale, size=self._n_neurons)
noise[
~(np.random.uniform(0, 1, size=self._n_neurons) <= self._prob_rand_fire)
] = 0
self.potentials += noise
spike_occurences = self.potentials >= self._firing_threshold
self.refractory_timers[spike_occurences] = self._refractory_period + 1
self.schedule += self.spike_shape * np.int_(spike_occurences)
output = self.schedule[0] * self.polarities * self._magnitude
return output
|
[
"numpy.int_",
"numpy.random.uniform",
"spikey.module.Key"
] |
[((3757, 3828), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'self._potential_noise_scale'], {'size': 'self._n_neurons'}), '(0, self._potential_noise_scale, size=self._n_neurons)\n', (3774, 3828), True, 'import numpy as np\n'), ((2339, 2417), 'spikey.module.Key', 'Key', (['"""potential_noise_scale"""', '"""Multiplier of leak to add to potential."""', 'float'], {}), "('potential_noise_scale', 'Multiplier of leak to add to potential.', float)\n", (2342, 2417), False, 'from spikey.module import Key\n'), ((4171, 4196), 'numpy.int_', 'np.int_', (['spike_occurences'], {}), '(spike_occurences)\n', (4178, 4196), True, 'import numpy as np\n'), ((3858, 3903), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {'size': 'self._n_neurons'}), '(0, 1, size=self._n_neurons)\n', (3875, 3903), True, 'import numpy as np\n')]
|
from skimage import color
import numpy as np
from matplotlib import pyplot as plt
def plot_cielab(l):
min_range = -110
max_range = 110
ab_range = np.linspace(min_range, max_range, 500)
b, a = np.meshgrid(ab_range, ab_range)
color_cielab = np.array([np.ones(a.shape) * l, a, b]).T
color_rgb = color.lab2rgb(color_cielab)
# Cut out saturated colors
saturated = np.any((color_rgb == 1.) | (color_rgb == 0.), axis=2)
color_rgb[saturated] = (1., 1., 1.)
fig, ax = plt.subplots()
ax.imshow(color_rgb[::-1], extent=[min_range, max_range, min_range, max_range])
def cielab_circle(l, r_a, r_b, count=8):
theta = np.arange(0, 2*np.pi, 2*np.pi / count)
# ab_range = np.linspace(np.ones(100), np.sin(theta), 100)
a = np.cos(theta) * r_a
b = np.sin(theta) * r_b
color_cielab = np.array([np.ones(a.shape) * l, a, b]).T
color_rgb = color.lab2rgb([color_cielab])[0]
fig, ax = plt.subplots()
# ax.scatter(a, b, s=200, c=color_rgb)
# ax.set_facecolor(color.lab2rgb([[[10, 0, -5]]])[0][0])
ax.imshow(color_rgb[None, :, :])
int_color_rgb = (color_rgb * 255).astype(int)
for r, g, b in int_color_rgb:
test_str = '███ abcdefghijklmnopqrstuvwxyz'
print(f"\033[38;2;{r};{g};{b}m{test_str} {r:3d} {g:3d} {b:3d}\033[0m")
def plot_cielab_line(l_start, a_start, b_start, l_end, a_end, b_end, count=1000):
l = np.linspace(l_start, l_end, count)
a = np.linspace(a_start, a_end, count)
b = np.linspace(b_start, b_end, count)
color_cielab = np.array([l, a, b]).T
color_rgb = color.lab2rgb([color_cielab])[0]
fig, ax = plt.subplots()
# ax.scatter(a, b, s=200, c=color_rgb)
# ax.set_facecolor(color.lab2rgb([[[10, 0, -5]]])[0][0])
ax.imshow(color_rgb[None, :, ::-1], aspect='auto')
|
[
"numpy.meshgrid",
"numpy.ones",
"numpy.any",
"skimage.color.lab2rgb",
"numpy.sin",
"numpy.arange",
"numpy.array",
"numpy.linspace",
"numpy.cos",
"matplotlib.pyplot.subplots"
] |
[((159, 197), 'numpy.linspace', 'np.linspace', (['min_range', 'max_range', '(500)'], {}), '(min_range, max_range, 500)\n', (170, 197), True, 'import numpy as np\n'), ((210, 241), 'numpy.meshgrid', 'np.meshgrid', (['ab_range', 'ab_range'], {}), '(ab_range, ab_range)\n', (221, 241), True, 'import numpy as np\n'), ((318, 345), 'skimage.color.lab2rgb', 'color.lab2rgb', (['color_cielab'], {}), '(color_cielab)\n', (331, 345), False, 'from skimage import color\n'), ((394, 449), 'numpy.any', 'np.any', (['((color_rgb == 1.0) | (color_rgb == 0.0))'], {'axis': '(2)'}), '((color_rgb == 1.0) | (color_rgb == 0.0), axis=2)\n', (400, 449), True, 'import numpy as np\n'), ((503, 517), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (515, 517), True, 'from matplotlib import pyplot as plt\n'), ((656, 698), 'numpy.arange', 'np.arange', (['(0)', '(2 * np.pi)', '(2 * np.pi / count)'], {}), '(0, 2 * np.pi, 2 * np.pi / count)\n', (665, 698), True, 'import numpy as np\n'), ((940, 954), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (952, 954), True, 'from matplotlib import pyplot as plt\n'), ((1404, 1438), 'numpy.linspace', 'np.linspace', (['l_start', 'l_end', 'count'], {}), '(l_start, l_end, count)\n', (1415, 1438), True, 'import numpy as np\n'), ((1447, 1481), 'numpy.linspace', 'np.linspace', (['a_start', 'a_end', 'count'], {}), '(a_start, a_end, count)\n', (1458, 1481), True, 'import numpy as np\n'), ((1490, 1524), 'numpy.linspace', 'np.linspace', (['b_start', 'b_end', 'count'], {}), '(b_start, b_end, count)\n', (1501, 1524), True, 'import numpy as np\n'), ((1631, 1645), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1643, 1645), True, 'from matplotlib import pyplot as plt\n'), ((767, 780), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (773, 780), True, 'import numpy as np\n'), ((795, 808), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (801, 808), True, 'import numpy as np\n'), ((892, 921), 'skimage.color.lab2rgb', 'color.lab2rgb', (['[color_cielab]'], {}), '([color_cielab])\n', (905, 921), False, 'from skimage import color\n'), ((1545, 1564), 'numpy.array', 'np.array', (['[l, a, b]'], {}), '([l, a, b])\n', (1553, 1564), True, 'import numpy as np\n'), ((1583, 1612), 'skimage.color.lab2rgb', 'color.lab2rgb', (['[color_cielab]'], {}), '([color_cielab])\n', (1596, 1612), False, 'from skimage import color\n'), ((271, 287), 'numpy.ones', 'np.ones', (['a.shape'], {}), '(a.shape)\n', (278, 287), True, 'import numpy as np\n'), ((845, 861), 'numpy.ones', 'np.ones', (['a.shape'], {}), '(a.shape)\n', (852, 861), True, 'import numpy as np\n')]
|
from Statistics.Mean import mean
from numpy import absolute, asarray
def var(data):
x = (absolute(asarray(data) - mean(data)))
y = x **2
z = mean(y)
return round(z, 13)
# variance is the square of mean deviation
|
[
"Statistics.Mean.mean",
"numpy.asarray"
] |
[((155, 162), 'Statistics.Mean.mean', 'mean', (['y'], {}), '(y)\n', (159, 162), False, 'from Statistics.Mean import mean\n'), ((104, 117), 'numpy.asarray', 'asarray', (['data'], {}), '(data)\n', (111, 117), False, 'from numpy import absolute, asarray\n'), ((120, 130), 'Statistics.Mean.mean', 'mean', (['data'], {}), '(data)\n', (124, 130), False, 'from Statistics.Mean import mean\n')]
|
"""
Matrix related utility functions
"""
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import functools as _functools
import itertools as _itertools
import warnings as _warnings
import numpy as _np
import scipy.linalg as _spl
import scipy.optimize as _spo
import scipy.sparse as _sps
import scipy.sparse.linalg as _spsl
from pygsti.tools.basistools import change_basis
try:
from . import fastcalc as _fastcalc
except ImportError:
_fastcalc = None
#EXPM_DEFAULT_TOL = 1e-7
EXPM_DEFAULT_TOL = 2**-53 # Scipy default
def trace(m): # memory leak in numpy causes repeated trace calls to eat up all memory --TODO: Cython this
"""
The trace of a matrix, sum_i m[i,i].
A memory leak in some version of numpy can cause repeated calls to numpy's
trace function to eat up all available system memory, and this function
does not have this problem.
Parameters
----------
m : numpy array
the matrix (any object that can be double-indexed)
Returns
-------
element type of m
The trace of m.
"""
return sum([m[i, i] for i in range(m.shape[0])])
# with warnings.catch_warnings():
# warnings.filterwarnings('error')
# try:
# ret =
# except Warning:
# print "BAD trace from:\n"
# for i in range(M.shape[0]):
# print M[i,i]
# raise ValueError("STOP")
# return ret
def is_hermitian(mx, tol=1e-9):
"""
Test whether mx is a hermitian matrix.
Parameters
----------
mx : numpy array
Matrix to test.
tol : float, optional
Tolerance on absolute magitude of elements.
Returns
-------
bool
True if mx is hermitian, otherwise False.
"""
(m, n) = mx.shape
for i in range(m):
if abs(mx[i, i].imag) > tol: return False
for j in range(i + 1, n):
if abs(mx[i, j] - mx[j, i].conjugate()) > tol: return False
return True
def is_pos_def(mx, tol=1e-9):
"""
Test whether mx is a positive-definite matrix.
Parameters
----------
mx : numpy array
Matrix to test.
tol : float, optional
Tolerance on absolute magitude of elements.
Returns
-------
bool
True if mx is positive-semidefinite, otherwise False.
"""
evals = _np.linalg.eigvals(mx)
return all([ev > -tol for ev in evals])
def is_valid_density_mx(mx, tol=1e-9):
"""
Test whether mx is a valid density matrix (hermitian, positive-definite, and unit trace).
Parameters
----------
mx : numpy array
Matrix to test.
tol : float, optional
Tolerance on absolute magitude of elements.
Returns
-------
bool
True if mx is a valid density matrix, otherwise False.
"""
return is_hermitian(mx, tol) and is_pos_def(mx, tol) and abs(trace(mx) - 1.0) < tol
def frobeniusnorm(ar):
"""
Compute the frobenius norm of an array (or matrix),
sqrt( sum( each_element_of_a^2 ) )
Parameters
----------
ar : numpy array
What to compute the frobenius norm of. Note that ar can be any shape
or number of dimenions.
Returns
-------
float or complex
depending on the element type of ar.
"""
return _np.sqrt(_np.sum(ar**2))
def frobeniusnorm_squared(ar):
"""
Compute the squared frobenius norm of an array (or matrix),
sum( each_element_of_a^2 ) )
Parameters
----------
ar : numpy array
What to compute the squared frobenius norm of. Note that ar can be any
shape or number of dimenions.
Returns
-------
float or complex
depending on the element type of ar.
"""
return _np.sum(ar**2)
def nullspace(m, tol=1e-7):
"""
Compute the nullspace of a matrix.
Parameters
----------
m : numpy array
An matrix of shape (M,N) whose nullspace to compute.
tol : float , optional
Nullspace tolerance, used when comparing singular values with zero.
Returns
-------
An matrix of shape (M,K) whose columns contain nullspace basis vectors.
"""
_, s, vh = _np.linalg.svd(m)
rank = (s > tol).sum()
return vh[rank:].T.copy()
def nullspace_qr(m, tol=1e-7):
"""
Compute the nullspace of a matrix using the QR decomposition.
The QR decomposition is faster but less accurate than the SVD
used by :func:`nullspace`.
Parameters
----------
m : numpy array
An matrix of shape (M,N) whose nullspace to compute.
tol : float , optional
Nullspace tolerance, used when comparing diagonal values of R with zero.
Returns
-------
An matrix of shape (M,K) whose columns contain nullspace basis vectors.
"""
#if M,N = m.shape, and q,r,p = _spl.qr(...)
# q.shape == (N,N), r.shape = (N,M), p.shape = (M,)
q, r, _ = _spl.qr(m.T, mode='full', pivoting=True)
rank = (_np.abs(_np.diagonal(r)) > tol).sum()
#DEBUG: requires q,r,p = _sql.qr(...) above
#assert( _np.linalg.norm(_np.dot(q,r) - m.T[:,p]) < 1e-8) #check QR decomp
#print("Rank QR = ",rank)
#print('\n'.join(map(str,_np.abs(_np.diagonal(r)))))
#print("Ret = ", q[:,rank:].shape, " Q = ",q.shape, " R = ",r.shape)
return q[:, rank:]
def nice_nullspace(m, tol=1e-7):
"""
Computes the nullspace of a matrix, and tries to return a "nice" basis for it.
Columns of the returned value (a basis for the nullspace) each have a maximum
absolute value of 1.0 and are chosen so as to align with the the original
matrix's basis as much as possible (the basis is found by projecting each
original basis vector onto an arbitrariliy-found nullspace and keeping only
a set of linearly independent projections).
Parameters
----------
m : numpy array
An matrix of shape (M,N) whose nullspace to compute.
tol : float , optional
Nullspace tolerance, used when comparing diagonal values of R with zero.
Returns
-------
An matrix of shape (M,K) whose columns contain nullspace basis vectors.
"""
nullsp = nullspace(m, tol)
nullsp_projector = _np.dot(nullsp, nullsp.conj().T)
keepers = []; current_rank = 0
for i in range(nullsp_projector.shape[1]): # same as mx.shape[1]
rank = _np.linalg.matrix_rank(nullsp_projector[:, 0:i + 1], tol=tol)
if rank > current_rank:
keepers.append(i)
current_rank = rank
ret = _np.take(nullsp_projector, keepers, axis=1)
for j in range(ret.shape[1]): # normalize columns so largest element is +1.0
mx = abs(max(ret[:, j]))
if mx > 1e-6: ret[:, j] /= mx
return ret
def normalize_columns(m, return_norms=False, ord=None):
"""
Normalizes the columns of a matrix.
Parameters
----------
m : numpy.ndarray or scipy sparse matrix
The matrix.
return_norms : bool, optional
If `True`, also return a 1D array containing the norms
of the columns (before they were normalized).
ord : int, optional
The order of the norm. See :function:`numpy.linalg.norm`.
Returns
-------
normalized_m : numpy.ndarray
The matrix after columns are normalized
column_norms : numpy.ndarray
Only returned when `return_norms=True`, a 1-dimensional array
of the pre-normalization norm of each column.
"""
norms = column_norms(m, ord)
norms[norms == 0.0] = 1.0 # avoid division of zero-column by zero
normalized_m = scale_columns(m, 1 / norms)
return (normalized_m, norms) if return_norms else normalized_m
def column_norms(m, ord=None):
"""
Compute the norms of the columns of a matrix.
Parameters
----------
m : numpy.ndarray or scipy sparse matrix
The matrix.
ord : int, optional
The order of the norm. See :function:`numpy.linalg.norm`.
Returns
-------
numpy.ndarray
A 1-dimensional array of the column norms (length is number of columns of `m`).
"""
if _sps.issparse(m):
#this could be done more efficiently, e.g. by converting to csc and taking column norms directly
norms = _np.array([_np.linalg.norm(m[:, j].todense(), ord=ord) for j in range(m.shape[1])])
else:
norms = _np.array([_np.linalg.norm(m[:, j], ord=ord) for j in range(m.shape[1])])
return norms
def scale_columns(m, scale_values):
"""
Scale each column of a matrix by a given value.
Usually used for normalization purposes, when the
matrix columns represent vectors.
Parameters
----------
m : numpy.ndarray or scipy sparse matrix
The matrix.
scale_values : numpy.ndarray
A 1-dimensional array of scale values, one per
column of `m`.
Returns
-------
numpy.ndarray or scipy sparse matrix
A copy of `m` with scaled columns, possibly with different sparsity structure.
"""
if _sps.issparse(m):
assert(len(scale_values) == m.shape[1])
m_csc = _sps.csc_matrix(m)
for j, scale in enumerate(scale_values):
m_csc.data[m_csc.indptr[j]:m_csc.indptr[j + 1]] *= scale
return m_csc
else:
return m * scale_values[None, :]
def columns_are_orthogonal(m, tol=1e-7):
"""
Checks whether a matrix contains orthogonal columns.
The columns do not need to be normalized. In the
complex case, two vectors v and w are considered orthogonal
if `dot(v.conj(), w) == 0`.
Parameters
----------
m : numpy.ndarray
The matrix to check.
tol : float, optional
Tolerance for checking whether dot products are zero.
Returns
-------
bool
"""
if m.size == 0: return True # boundary case
check = _np.dot(m.conj().T, m)
check[_np.diag_indices_from(check)] = 0.0
return bool(_np.linalg.norm(check) / check.size < tol)
def columns_are_orthonormal(m, tol=1e-7):
"""
Checks whether a matrix contains orthogonal columns.
The columns do not need to be normalized. In the
complex case, two vectors v and w are considered orthogonal
if `dot(v.conj(), w) == 0`.
Parameters
----------
m : numpy.ndarray
The matrix to check.
tol : float, optional
Tolerance for checking whether dot products are zero.
Returns
-------
bool
"""
if m.size == 0: return True # boundary case
check = _np.dot(m.conj().T, m)
return bool(_np.allclose(check, _np.identity(check.shape[0], 'd'), atol=tol))
def independent_columns(m, initial_independent_cols=None, tol=1e-7):
"""
Computes the indices of the linearly-independent columns in a matrix.
Optionally starts with a "base" matrix of independent columns, so that
the returned indices indicate the columns of `m` that are independent
of all the base columns and the other independent columns of `m`.
Parameters
----------
m : numpy.ndarray or scipy sparse matrix
The matrix.
initial_independent_cols : numpy.ndarray or scipy sparse matrix, optional
If not `None`, a matrix of known-to-be independent columns so to test the
columns of `m` with respect to (in addition to the already chosen independent
columns of `m`.
tol : float, optional
Tolerance threshold used to decide whether a singular value is nonzero
(it is if it's is greater than `tol`).
Returns
-------
list
A list of the independent-column indices of `m`.
"""
indep_cols = []
if not _sps.issparse(m):
running_indep_cols = initial_independent_cols.copy() \
if (initial_independent_cols is not None) else _np.empty((m.shape[0], 0), m.dtype)
num_indep_cols = running_indep_cols.shape[0]
for j in range(m.shape[1]):
trial = _np.concatenate((running_indep_cols, m[:, j]), axis=1)
if _np.linalg.matrix_rank(trial, tol=tol) == num_indep_cols + 1:
running_indep_cols = trial
indep_cols.append(j)
num_indep_cols += 1
else: # sparse case
from scipy.sparse.linalg.eigen.arpack.arpack import ArpackNoConvergence as _ArpackNoConvergence
running_indep_cols = initial_independent_cols.copy() \
if (initial_independent_cols is not None) else _sps.csc_matrix((m.shape[0], 0), dtype=m.dtype)
num_indep_cols = running_indep_cols.shape[0]
for j in range(m.shape[1]):
trial = _sps.hstack((running_indep_cols, m[:, j]))
try:
lowest_sval = _spsl.svds(trial, k=1, which="SM", return_singular_vectors=False)
except _ArpackNoConvergence:
lowest_sval = 0 # assume lack of convergence means smallest singular value was too small (?)
if lowest_sval > tol: # trial fogi dirs still linearly independent (full rank)
running_indep_cols = trial
indep_cols.append(j)
# else trial column made fogi dirs linearly dependent and so don't tally indep column
return indep_cols
def pinv_of_matrix_with_orthogonal_columns(m):
""" TODO: docstring """
col_scaling = _np.sum(_np.abs(m)**2, axis=0)
m_with_scaled_cols = m.conj() * col_scaling[None, :]
return m_with_scaled_cols.T
def matrix_sign(m):
"""
The "sign" matrix of `m`
Parameters
----------
m : numpy.ndarray
the matrix.
Returns
-------
numpy.ndarray
"""
#Notes: sign(m) defined s.t. eigvecs of sign(m) are evecs of m
# and evals of sign(m) are +/-1 or 0 based on sign of eigenvalues of m
#Using the extremely numerically stable (but expensive) Schur method
# see http://www.maths.manchester.ac.uk/~higham/fm/OT104HighamChapter5.pdf
N = m.shape[0]; assert(m.shape == (N, N)), "m must be square!"
T, Z = _spl.schur(m, 'complex') # m = Z T Z^H where Z is unitary and T is upper-triangular
U = _np.zeros(T.shape, 'complex') # will be sign(T), which is easy to compute
# (U is also upper triangular), and then sign(m) = Z U Z^H
# diagonals are easy
U[_np.diag_indices_from(U)] = _np.sign(_np.diagonal(T))
#Off diagonals: use U^2 = I or TU = UT
# Note: Tij = Uij = 0 when i > j and i==j easy so just consider i<j case
# 0 = sum_k Uik Ukj = (i!=j b/c off-diag)
# FUTURE: speed this up by using np.dot instead of sums below
for j in range(1, N):
for i in range(j - 1, -1, -1):
S = U[i, i] + U[j, j]
if _np.isclose(S, 0): # then use TU = UT
if _np.isclose(T[i, i] - T[j, j], 0): # then just set to zero
U[i, j] = 0.0 # TODO: check correctness of this case
else:
U[i, j] = T[i, j] * (U[i, i] - U[j, j]) / (T[i, i] - T[j, j]) + \
sum([U[i, k] * T[k, j] - T[i, k] * U[k, j] for k in range(i + 1, j)]) \
/ (T[i, i] - T[j, j])
else: # use U^2 = I
U[i, j] = - sum([U[i, k] * U[k, j] for k in range(i + 1, j)]) / S
return _np.dot(Z, _np.dot(U, _np.conjugate(Z.T)))
#Quick & dirty - not always stable:
#U,_,Vt = _np.linalg.svd(M)
#return _np.dot(U,Vt)
def print_mx(mx, width=9, prec=4, withbrackets=False):
"""
Print matrix in pretty format.
Will print real or complex matrices with a desired precision and
"cell" width.
Parameters
----------
mx : numpy array
the matrix (2-D array) to print.
width : int, opitonal
the width (in characters) of each printed element
prec : int optional
the precision (in characters) of each printed element
withbrackets : bool, optional
whether to print brackets and commas to make the result
something that Python can read back in.
Returns
-------
None
"""
print(mx_to_string(mx, width, prec, withbrackets))
def mx_to_string(m, width=9, prec=4, withbrackets=False):
"""
Generate a "pretty-format" string for a matrix.
Will generate strings for real or complex matrices with a desired
precision and "cell" width.
Parameters
----------
m : numpy.ndarray
array to print.
width : int, opitonal
the width (in characters) of each converted element
prec : int optional
the precision (in characters) of each converted element
withbrackets : bool, optional
whether to print brackets and commas to make the result
something that Python can read back in.
Returns
-------
string
matrix m as a pretty formated string.
"""
if m.size == 0: return ""
s = ""; tol = 10**(-prec)
if _np.max(abs(_np.imag(m))) > tol:
return mx_to_string_complex(m, width, width, prec)
if len(m.shape) == 1: m = _np.array(m)[None, :] # so it works w/vectors too
if withbrackets: s += "["
for i in range(m.shape[0]):
if withbrackets: s += " [" if i > 0 else "["
for j in range(m.shape[1]):
if abs(m[i, j]) < tol: s += '{0: {w}.0f}'.format(0, w=width)
else: s += '{0: {w}.{p}f}'.format(m[i, j].real, w=width, p=prec)
if withbrackets and j + 1 < m.shape[1]: s += ","
if withbrackets: s += "]," if i + 1 < m.shape[0] else "]]"
s += "\n"
return s
def mx_to_string_complex(m, real_width=9, im_width=9, prec=4):
"""
Generate a "pretty-format" string for a complex-valued matrix.
Parameters
----------
m : numpy array
array to format.
real_width : int, opitonal
the width (in characters) of the real part of each element.
im_width : int, opitonal
the width (in characters) of the imaginary part of each element.
prec : int optional
the precision (in characters) of each element's real and imaginary parts.
Returns
-------
string
matrix m as a pretty formated string.
"""
if len(m.shape) == 1: m = m[None, :] # so it works w/vectors too
s = ""; tol = 10**(-prec)
for i in range(m.shape[0]):
for j in range(m.shape[1]):
if abs(m[i, j].real) < tol: s += "{0: {w}.0f}".format(0, w=real_width)
else: s += "{0: {w}.{p}f}".format(m[i, j].real, w=real_width, p=prec)
if abs(m[i, j].imag) < tol: s += "{0: >+{w}.0f}j".format(0, w=im_width)
else: s += "{0: >+{w}.{p}f}j".format(m[i, j].imag, w=im_width, p=prec)
s += "\n"
return s
def unitary_superoperator_matrix_log(m, mx_basis):
"""
Construct the logarithm of superoperator matrix `m`.
This function assumes that `m` acts as a unitary on density-matrix space,
(`m: rho -> U rho Udagger`) so that log(m) can be written as the action
by Hamiltonian `H`:
`log(m): rho -> -i[H,rho]`.
Parameters
----------
m : numpy array
The superoperator matrix whose logarithm is taken
mx_basis : {'std', 'gm', 'pp', 'qt'} or Basis object
The source and destination basis, respectively. Allowed
values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt) (or a custom basis object).
Returns
-------
numpy array
A matrix `logM`, of the same shape as `m`, such that `m = exp(logM)`
and `logM` can be written as the action `rho -> -i[H,rho]`.
"""
from . import lindbladtools as _lt # (would create circular imports if at top)
from . import optools as _ot # (would create circular imports if at top)
M_std = change_basis(m, mx_basis, "std")
evals = _np.linalg.eigvals(M_std)
assert(_np.allclose(_np.abs(evals), 1.0)) # simple but technically incomplete check for a unitary superop
# (e.g. could be anti-unitary: diag(1, -1, -1, -1))
U = _ot.process_mx_to_unitary(M_std)
H = _spl.logm(U) / -1j # U = exp(-iH)
logM_std = _lt.hamiltonian_to_lindbladian(H) # rho --> -i[H, rho] * sqrt(d)/2
logM = change_basis(logM_std * (2.0 / _np.sqrt(H.shape[0])), "std", mx_basis)
assert(_np.linalg.norm(_spl.expm(logM) - m) < 1e-8) # expensive b/c of expm - could comment for performance
return logM
def near_identity_matrix_log(m, tol=1e-8):
"""
Construct the logarithm of superoperator matrix `m` that is near the identity.
If `m` is real, the resulting logarithm will be real.
Parameters
----------
m : numpy array
The superoperator matrix whose logarithm is taken
tol : float, optional
The tolerance used when testing for zero imaginary parts.
Returns
-------
numpy array
An matrix `logM`, of the same shape as `m`, such that `m = exp(logM)`
and `logM` is real when `m` is real.
"""
# A near-identity matrix should have a unique logarithm, and it should be
# real if the original matrix is real
M_is_real = bool(_np.linalg.norm(m.imag) < tol)
logM = _spl.logm(m)
if M_is_real:
assert(_np.linalg.norm(logM.imag) < tol), \
"Failed to construct a real logarithm! " \
+ "This is probably because m is not near the identity.\n" \
+ "Its eigenvalues are: " + str(_np.linalg.eigvals(m))
logM = logM.real
return logM
def approximate_matrix_log(m, target_logm, target_weight=10.0, tol=1e-6):
"""
Construct an approximate logarithm of superoperator matrix `m` that is real and near the `target_logm`.
The equation `m = exp( logM )` is allowed to become inexact in order to make
`logM` close to `target_logm`. In particular, the objective function that is
minimized is (where `||` indicates the 2-norm):
`|exp(logM) - m|_1 + target_weight * ||logM - target_logm||^2`
Parameters
----------
m : numpy array
The superoperator matrix whose logarithm is taken
target_logm : numpy array
The target logarithm
target_weight : float
A weighting factor used to blance the exactness-of-log term
with the closeness-to-target term in the optimized objective
function. This value multiplies the latter term.
tol : float, optional
Optimzer tolerance.
Returns
-------
logM : numpy array
An matrix of the same shape as `m`.
"""
assert(_np.linalg.norm(m.imag) < 1e-8), "Argument `m` must be a *real* matrix!"
mx_shape = m.shape
def _objective(flat_logm):
logM = flat_logm.reshape(mx_shape)
testM = _spl.expm(logM)
ret = target_weight * _np.linalg.norm(logM - target_logm)**2 + \
_np.linalg.norm(testM.flatten() - m.flatten(), 1)
#print("DEBUG: ",ret)
return ret
#Alt objective1: puts L1 on target term
#return _np.linalg.norm(testM-m)**2 + target_weight*_np.linalg.norm(
# logM.flatten() - target_logm.flatten(), 1)
#Alt objective2: all L2 terms (ridge regression)
#return target_weight*_np.linalg.norm(logM-target_logm)**2 + \
# _np.linalg.norm(testM - m)**2
#from .. import optimize as _opt
#print_obj_func = _opt.create_obj_func_printer(_objective) #only ever prints to stdout!
print_obj_func = None
logM = _np.real(real_matrix_log(m, action_if_imaginary="ignore")) # just drop any imaginary part
initial_flat_logM = logM.flatten() # + 0.1*target_logm.flatten()
# Note: adding some of target_logm doesn't seem to help; and hurts in easy cases
if _objective(initial_flat_logM) > 1e-16: # otherwise initial logM is fine!
#print("Initial objective fn val = ",_objective(initial_flat_logM))
#print("Initial inexactness = ",_np.linalg.norm(_spl.expm(logM)-m),
# _np.linalg.norm(_spl.expm(logM).flatten()-m.flatten(), 1),
# _np.linalg.norm(logM-target_logm)**2)
solution = _spo.minimize(_objective, initial_flat_logM, options={'maxiter': 1000},
method='L-BFGS-B', callback=print_obj_func, tol=tol)
logM = solution.x.reshape(mx_shape)
#print("Final objective fn val = ",_objective(solution.x))
#print("Final inexactness = ",_np.linalg.norm(_spl.expm(logM)-m),
# _np.linalg.norm(_spl.expm(logM).flatten()-m.flatten(), 1),
# _np.linalg.norm(logM-target_logm)**2)
return logM
def real_matrix_log(m, action_if_imaginary="raise", tol=1e-8):
"""
Construct a *real* logarithm of real matrix `m`.
This is possible when negative eigenvalues of `m` come in pairs, so
that they can be viewed as complex conjugate pairs.
Parameters
----------
m : numpy array
The matrix to take the logarithm of
action_if_imaginary : {"raise","warn","ignore"}, optional
What action should be taken if a real-valued logarithm cannot be found.
"raise" raises a ValueError, "warn" issues a warning, and "ignore"
ignores the condition and simply returns the complex-valued result.
tol : float, optional
An internal tolerance used when testing for equivalence and zero
imaginary parts (real-ness).
Returns
-------
logM : numpy array
An matrix `logM`, of the same shape as `m`, such that `m = exp(logM)`
"""
assert(_np.linalg.norm(_np.imag(m)) < tol), "real_matrix_log must be passed a *real* matrix!"
evals, U = _np.linalg.eig(m)
U = U.astype("complex")
used_indices = set()
neg_real_pairs_real_evecs = []
neg_real_pairs_conj_evecs = []
unpaired_indices = []
for i, ev in enumerate(evals):
if i in used_indices: continue
used_indices.add(i)
if abs(_np.imag(ev)) < tol and _np.real(ev) < 0:
evec1 = U[:, i]
if _np.linalg.norm(_np.imag(evec1)) < tol:
# evec1 is real, so look for ev2 corresponding to another real evec
for j, ev2 in enumerate(evals[i + 1:], start=i + 1):
if abs(ev - ev2) < tol and _np.linalg.norm(_np.imag(U[:, j])) < tol:
used_indices.add(j)
neg_real_pairs_real_evecs.append((i, j)); break
else: unpaired_indices.append(i)
else:
# evec1 is complex, so look for ev2 corresponding to the conjugate of evec1
evec1C = evec1.conjugate()
for j, ev2 in enumerate(evals[i + 1:], start=i + 1):
if abs(ev - ev2) < tol and _np.linalg.norm(evec1C - U[:, j]) < tol:
used_indices.add(j)
neg_real_pairs_conj_evecs.append((i, j)); break
else: unpaired_indices.append(i)
log_evals = _np.log(evals.astype("complex"))
# astype guards against case all evals are real but some are negative
#DEBUG
#print("DB: evals = ",evals)
#print("DB: log_evals:",log_evals)
#for i,ev in enumerate(log_evals):
# print(i,": ",ev, ",".join([str(j) for j in range(U.shape[0]) if abs(U[j,i]) > 0.05]))
#print("DB: neg_real_pairs_real_evecs = ",neg_real_pairs_real_evecs)
#print("DB: neg_real_pairs_conj_evecs = ",neg_real_pairs_conj_evecs)
#print("DB: evec[5] = ",mx_to_string(U[:,5]))
#print("DB: evec[6] = ",mx_to_string(U[:,6]))
for (i, j) in neg_real_pairs_real_evecs: # need to adjust evecs as well
log_evals[i] = _np.log(-evals[i]) + 1j * _np.pi
log_evals[j] = log_evals[i].conjugate()
U[:, i] = (U[:, i] + 1j * U[:, j]) / _np.sqrt(2)
U[:, j] = U[:, i].conjugate()
for (i, j) in neg_real_pairs_conj_evecs: # evecs already conjugates of each other
log_evals[i] = _np.log(-evals[i].real) + 1j * _np.pi
log_evals[j] = log_evals[i].conjugate()
#Note: if *don't* conjugate j-th, then this picks *consistent* branch cut (what scipy would do), which
# results, in general, in a complex logarithm BUT one which seems more intuitive (?) - at least permits
# expected angle extraction, etc.
logM = _np.dot(U, _np.dot(_np.diag(log_evals), _np.linalg.inv(U)))
#if there are unpaired negative real eigenvalues, the logarithm might be imaginary
mayBeImaginary = bool(len(unpaired_indices) > 0)
imMag = _np.linalg.norm(_np.imag(logM))
if mayBeImaginary and imMag > tol:
if action_if_imaginary == "raise":
raise ValueError("Cannot construct a real log: unpaired negative"
+ " real eigenvalues: %s" % [evals[i] for i in unpaired_indices])
#+ "\nDEBUG m = \n%s" % m + "\nDEBUG evals = %s" % evals)
elif action_if_imaginary == "warn":
_warnings.warn("Cannot construct a real log: unpaired negative"
+ " real eigenvalues: %s" % [evals[i] for i in unpaired_indices])
elif action_if_imaginary == "ignore":
pass
else:
assert(False), "Invalid 'action_if_imaginary' argument: %s" % action_if_imaginary
else:
assert(imMag <= tol), "real_matrix_log failed to construct a real logarithm!"
logM = _np.real(logM)
return logM
## ------------------------ Erik : Matrix tools that Tim has moved here -----------
from scipy.linalg import sqrtm as _sqrtm
import itertools as _ittls
def column_basis_vector(i, dim):
"""
Returns the ith standard basis vector in dimension dim.
Parameters
----------
i : int
Basis vector index.
dim : int
Vector dimension.
Returns
-------
numpy.ndarray
An array of shape `(dim, 1)` that is all zeros except for
its `i`-th element, which equals 1.
"""
output = _np.zeros([dim, 1], float)
output[i] = 1.
return output
def vec(matrix_in):
"""
Stacks the columns of a matrix to return a vector
Parameters
----------
matrix_in : numpy.ndarray
Returns
-------
numpy.ndarray
"""
return [b for a in _np.transpose(matrix_in) for b in a]
def unvec(vector_in):
"""
Slices a vector into the columns of a matrix.
Parameters
----------
vector_in : numpy.ndarray
Returns
-------
numpy.ndarray
"""
dim = int(_np.sqrt(len(vector_in)))
return _np.transpose(_np.array(list(
zip(*[_ittls.chain(vector_in,
_ittls.repeat(None, dim - 1))] * dim))))
def norm1(m):
"""
Returns the 1 norm of a matrix
Parameters
----------
m : numpy.ndarray
The matrix.
Returns
-------
numpy.ndarray
"""
return float(_np.real(_np.trace(_sqrtm(_np.dot(m.conj().T, m)))))
def random_hermitian(dim):
"""
Generates a random Hermitian matrix
Parameters
----------
dim : int
the matrix dimensinon.
Returns
-------
numpy.ndarray
"""
my_norm = 0.
while my_norm < 0.5:
dim = int(dim)
a = _np.random.random(size=[dim, dim])
b = _np.random.random(size=[dim, dim])
c = a + 1.j * b + (a + 1.j * b).conj().T
my_norm = norm1(c)
return c / my_norm
def norm1to1(operator, num_samples=10000, mx_basis="gm", return_list=False):
"""
The Hermitian 1-to-1 norm of a superoperator represented in the standard basis.
This is calculated via Monte-Carlo sampling. The definition of Hermitian 1-to-1
norm can be found in arxiv:1109.6887.
Parameters
----------
operator : numpy.ndarray
The operator matrix to take the norm of.
num_samples : int, optional
Number of Monte-Carlo samples.
mx_basis : {'std', 'gm', 'pp', 'qt'} or Basis
The basis of `operator`.
return_list : bool, optional
Whether the entire list of sampled values is returned or just the maximum.
Returns
-------
float or list
Depends on the value of `return_list`.
"""
std_operator = change_basis(operator, mx_basis, 'std')
rand_dim = int(_np.sqrt(float(len(std_operator))))
vals = [norm1(unvec(_np.dot(std_operator, vec(random_hermitian(rand_dim)))))
for n in range(num_samples)]
if return_list:
return vals
else:
return max(vals)
## ------------------------ General utility fns -----------------------------------
def complex_compare(a, b):
"""
Comparison function for complex numbers that compares real part, then imaginary part.
Parameters
----------
a : complex
b : complex
Returns
-------
-1 if a < b
0 if a == b
+1 if a > b
"""
if a.real < b.real: return -1
elif a.real > b.real: return 1
elif a.imag < b.imag: return -1
elif a.imag > b.imag: return 1
else: return 0
def prime_factors(n):
"""
GCD algorithm to produce prime factors of `n`
Parameters
----------
n : int
The number to factorize.
Returns
-------
list
The prime factors of `n`.
"""
i = 2; factors = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(i)
if n > 1:
factors.append(n)
return factors
def minweight_match(a, b, metricfn=None, return_pairs=True,
pass_indices_to_metricfn=False):
"""
Matches the elements of two vectors, `a` and `b` by minimizing the weight between them.
The weight is defined as the sum of `metricfn(x,y)` over all `(x,y)` pairs
(`x` in `a` and `y` in `b`).
Parameters
----------
a : list or numpy.ndarray
First 1D array to match elements between.
b : list or numpy.ndarray
Second 1D array to match elements between.
metricfn : function, optional
A function of two float parameters, `x` and `y`,which defines the cost
associated with matching `x` with `y`. If None, `abs(x-y)` is used.
return_pairs : bool, optional
If True, the matching is also returned.
pass_indices_to_metricfn : bool, optional
If True, the metric function is passed two *indices* into the `a` and
`b` arrays, respectively, instead of the values.
Returns
-------
weight_array : numpy.ndarray
The array of weights corresponding to the min-weight matching. The sum
of this array's elements is the minimized total weight.
pairs : list
Only returned when `return_pairs == True`, a list of 2-tuple pairs of
indices `(ix,iy)` giving the indices into `a` and `b` respectively of
each matched pair. The first (ix) indices will be in continuous
ascending order starting at zero.
"""
assert(len(a) == len(b))
if metricfn is None:
def metricfn(x, y): return abs(x - y)
D = len(a)
weightMx = _np.empty((D, D), 'd')
if pass_indices_to_metricfn:
for i, x in enumerate(a):
weightMx[i, :] = [metricfn(i, j) for j, y in enumerate(b)]
else:
for i, x in enumerate(a):
weightMx[i, :] = [metricfn(x, y) for j, y in enumerate(b)]
a_inds, b_inds = _spo.linear_sum_assignment(weightMx)
assert(_np.allclose(a_inds, range(D))), "linear_sum_assignment returned unexpected row indices!"
matched_pairs = list(zip(a_inds, b_inds))
min_weights = weightMx[a_inds, b_inds]
if return_pairs:
return min_weights, matched_pairs
else:
return min_weights
def minweight_match_realmxeigs(a, b, metricfn=None,
pass_indices_to_metricfn=False, eps=1e-9):
"""
Matches the elements of `a` and `b`, whose elements are assumed to either real or one-half of a conjugate pair.
Matching is performed by minimizing the weight between elements,
defined as the sum of `metricfn(x,y)` over all `(x,y)` pairs
(`x` in `a` and `y` in `b`). If straightforward matching fails
to preserve eigenvalue conjugacy relations, then real and conjugate-
pair eigenvalues are matched *separately* to ensure relations are
preserved (but this can result in a sub-optimal matching). A
ValueError is raised when the elements of `a` and `b` have incompatible
conjugacy structures (#'s of conjugate vs. real pairs).
Parameters
----------
a : numpy.ndarray
First 1D array to match.
b : numpy.ndarray
Second 1D array to match.
metricfn : function, optional
A function of two float parameters, `x` and `y`,which defines the cost
associated with matching `x` with `y`. If None, `abs(x-y)` is used.
pass_indices_to_metricfn : bool, optional
If True, the metric function is passed two *indices* into the `a` and
`b` arrays, respectively, instead of the values.
eps : float, optional
Tolerance when checking if eigenvalues are equal to each other.
Returns
-------
pairs : list
A list of 2-tuple pairs of indices `(ix,iy)` giving the indices into
`a` and `b` respectively of each matched pair.
"""
def check(pairs):
for i, (p0, p1) in enumerate(pairs):
for q0, q1 in pairs[i + 1:]:
a_conj = _np.isclose(a[p0], _np.conjugate(a[q0]))
b_conj = _np.isclose(b[p1], _np.conjugate(b[q1]))
if (abs(a[p0].imag) > 1e-6 and a_conj and not b_conj) or \
(abs(b[p1].imag) > 1e-6 and b_conj and not a_conj):
#print("DB: FALSE at: ",(p0,p1),(q0,q1),(a[p0],b[p1]),(a[q0],b[q1]),a_conj,b_conj)
return False
return True
#First attempt:
# See if matching everything at once satisfies conjugacy relations
# (if this works, this is the best, since it considers everything)
_, pairs = minweight_match(a, b, metricfn, True,
pass_indices_to_metricfn)
if check(pairs):
return pairs # we're done! that was easy
#Otherwise we fall back to considering real values and conj pairs separately
#identify real values and conjugate pairs
def split_real_conj(ar):
real_inds = []; conj_inds = []
for i, v in enumerate(ar):
if abs(v.imag) < eps: real_inds.append(i)
else:
for pair in conj_inds:
if i in pair: break # ok, we've already found v's pair
else:
for j, v2 in enumerate(ar[i + 1:], start=i + 1):
if _np.isclose(_np.conj(v), v2) and all([(j not in cpair) for cpair in conj_inds]):
conj_inds.append((i, j)); break
else:
raise ValueError("No conjugate pair found for %s" % str(v))
# choose 'a+ib' to be representative of pair
conj_rep_inds = [p0 if (ar[p0].imag > ar[p1].imag) else p1
for (p0, p1) in conj_inds]
return real_inds, conj_inds, conj_rep_inds
def add_conjpair(ar, conj_inds, conj_rep_inds, real_inds):
for ii, i in enumerate(real_inds):
for jj, j in enumerate(real_inds[ii + 1:], start=ii + 1):
if _np.isclose(ar[i], ar[j]):
conj_inds.append((i, j))
conj_rep_inds.append(i)
del real_inds[jj]; del real_inds[ii] # note: we know jj > ii
return True
return False
a_real, a_conj, a_reps = split_real_conj(a) # hold indices to a & b arrays
b_real, b_conj, b_reps = split_real_conj(b) # hold indices to a & b arrays
while len(a_conj) > len(b_conj): # try to add real-pair(s) to b_conj
if not add_conjpair(b, b_conj, b_reps, b_real):
raise ValueError(("Vectors `a` and `b` don't have the same conjugate-pair structure, "
" and so they cannot be matched in a way the preserves this structure."))
while len(b_conj) > len(a_conj): # try to add real-pair(s) to a_conj
if not add_conjpair(a, a_conj, a_reps, a_real):
raise ValueError(("Vectors `a` and `b` don't have the same conjugate-pair structure, "
" and so they cannot be matched in a way the preserves this structure."))
#Note: problem with this approach is that we might convert a
# real-pair -> conj-pair sub-optimally (i.e. there might be muliple
# such conversions and we just choose one at random).
_, pairs1 = minweight_match(a[a_real], b[b_real], metricfn, True,
pass_indices_to_metricfn)
_, pairs2 = minweight_match(a[a_reps], b[b_reps], metricfn, True,
pass_indices_to_metricfn)
#pair1 gives matching of real values, pairs2 gives that of conj pairs.
# Now just need to assemble a master pairs list to return.
pairs = []
for p0, p1 in pairs1: # p0 & p1 are indices into a_real & b_real
pairs.append((a_real[p0], b_real[p1]))
for p0, p1 in pairs2: # p0 & p1 are indices into a_reps & b_reps
pairs.append((a_reps[p0], b_reps[p1]))
a_other = a_conj[p0][0] if (a_conj[p0][0] != a_reps[p0]) else a_conj[p0][1]
b_other = b_conj[p1][0] if (b_conj[p1][0] != b_reps[p1]) else b_conj[p1][1]
pairs.append((a_other, b_other))
return sorted(pairs, key=lambda x: x[0]) # sort by a's index
def _fas(a, inds, rhs, add=False):
"""
Fancy Assignment, equivalent to `a[*inds] = rhs` but with
the elements of inds (allowed to be integers, slices, or
integer arrays) always specifing a generalize-slice along
the given dimension. This avoids some weird numpy indexing
rules that make using square brackets a pain.
"""
inds = tuple([slice(None) if (i is None) else i for i in inds])
#Mixes of ints and tuples are fine, and a single
# index-list index is fine too. The case we need to
# deal with is indexing a multi-dimensional array with
# one or more index-lists
if all([isinstance(i, (int, slice)) for i in inds]) or len(inds) == 1:
if add:
a[inds] += rhs # all integers or slices behave nicely
else:
a[inds] = rhs # all integers or slices behave nicely
else:
#convert each dimension's index to a list, take a product of
# these lists, and flatten the right hand side to get the
# proper assignment:
b = []
single_int_inds = [] # for Cython, a and rhs must have the same
# number of dims. This keeps track of single-ints
for ii, i in enumerate(inds):
if isinstance(i, (int, _np.int64)):
b.append(_np.array([i], _np.int64))
single_int_inds.append(ii)
elif isinstance(i, slice):
b.append(_np.array(list(range(*i.indices(a.shape[ii]))), _np.int64))
else:
b.append(_np.array(i, _np.int64))
nDims = len(b)
if nDims > 0 and all([len(x) > 0 for x in b]): # b/c a[()] just returns the entire array!
if _fastcalc is not None and not add:
#Note: we rarely/never use add=True, so don't bother implementing in Cython yet...
if len(single_int_inds) > 0:
remove_single_int_dims = [b[i][0] if (i in single_int_inds) else slice(None)
for i in range(nDims)] # e.g. [:,2,:] if index 1 is a single int
for ii in reversed(single_int_inds): del b[ii] # remove single-int els of b
av = a[tuple(remove_single_int_dims)] # a view into a
nDims -= len(single_int_inds) # for cython routines below
else:
av = a
#Note: we do not require these arrays to be contiguous
if nDims == 1:
_fastcalc.fast_fas_helper_1d(av, rhs, b[0])
elif nDims == 2:
_fastcalc.fast_fas_helper_2d(av, rhs, b[0], b[1])
elif nDims == 3:
_fastcalc.fast_fas_helper_3d(av, rhs, b[0], b[1], b[2])
else:
raise NotImplementedError("No fas helper for nDims=%d" % nDims)
else:
indx_tups = list(_itertools.product(*b))
inds = tuple(zip(*indx_tups)) # un-zips to one list per dim
if add:
a[inds] += rhs.flatten()
else:
a[inds] = rhs.flatten()
#OLD DEBUG: just a reference for building the C-implementation (this is very slow in python!)
##Alt: C-able impl
#indsPerDim = b # list of indices per dimension
#nDims = len(inds)
#b = [0]*nDims
#a_strides = []; stride = 1
#for s in reversed(a.shape):
# a_strides.insert(0,stride)
# stride *= s
#rhs_dims = rhs.shape
#
#a_indx = 0
#for i in range(nDims):
# a_indx += indsPerDim[i][0] * a_strides[i]
#rhs_indx = 0
#
#while(True):
#
# #a.flat[a_indx] = rhs.flat[rhs_indx]
# assert(_np.isclose(a.flat[a_indx],rhs.flat[rhs_indx]))
# rhs_indx += 1 # always increments by 1
#
# #increment b ~ itertools.product & update vec_index_noop = _np.dot(self.multipliers, b)
# for i in range(nDims-1,-1,-1):
# if b[i]+1 < rhs_dims[i]:
# a_indx -= indsPerDim[i][b[i]] * a_strides[i]
# b[i] += 1
# a_indx += indsPerDim[i][b[i]] * a_strides[i]
# break
# else:
# a_indx -= indsPerDim[i][b[i]] * a_strides[i]
# b[i] = 0
# a_indx += indsPerDim[i][b[i]] * a_strides[i]
# else:
# break # can't increment anything - break while(True) loop
return a
def _findx_shape(a, inds):
""" Returns the shape of a fancy-indexed array (`a[*inds].shape`) """
shape = []
for ii, N in enumerate(a.shape):
indx = inds[ii] if ii < len(inds) else None
if indx is None: shape.append(N)
elif isinstance(indx, slice):
shape.append(len(range(*indx.indices(N))))
else: # assume indx is an index list or array
shape.append(len(indx))
return shape
def _findx(a, inds, always_copy=False):
"""
Fancy Indexing, equivalent to `a[*inds].copy()` but with
the elements of inds (allowed to be integers, slices, or
integer arrays) always specifing a generalize-slice along
the given dimension. This avoids some weird numpy indexing
rules that make using square brackets a pain.
"""
inds = tuple([slice(None) if (i is None) else i for i in inds])
#Mixes of ints and tuples are fine, and a single
# index-list index is fine too. The case we need to
# deal with is indexing a multi-dimensional array with
# one or more index-lists
if all([isinstance(i, (int, slice)) for i in inds]) or len(inds) == 1:
return a[inds].copy() if always_copy else a[inds] # all integers or slices behave nicely
else:
#Need to copy to a new array
b = []; squeeze = []
for ii, i in enumerate(inds):
if isinstance(i, int):
b.append([i]); squeeze.append(ii) # squeeze ii-th dimension at end
elif isinstance(i, slice):
b.append(list(range(*i.indices(a.shape[ii]))))
else:
b.append(list(i))
a_inds_shape = [len(x) for x in b]
indx_tups = list(_itertools.product(*b))
if len(indx_tups) > 0: # b/c a[()] just returns the entire array!
inds = tuple(zip(*indx_tups)) # un-zips to one list per dim
a_inds = a[inds].copy() # a 1D array of flattened "fancy" a[inds]
a_inds.shape = a_inds_shape # reshape
else:
a_inds = _np.zeros(a_inds_shape, a.dtype) # has zero elements
assert(a_inds.size == 0)
a_inds = a_inds.squeeze(axis=tuple(squeeze))
return a_inds
def safe_dot(a, b):
"""
Performs dot(a,b) correctly when neither, either, or both arguments are sparse matrices.
Parameters
----------
a : numpy.ndarray or scipy.sparse matrix.
First matrix.
b : numpy.ndarray or scipy.sparse matrix.
Second matrix.
Returns
-------
numpy.ndarray or scipy.sparse matrix
"""
if _sps.issparse(a):
return a.dot(b) # sparseMx.dot works for both sparse and dense args
elif _sps.issparse(b):
# to return a sparse mx even when a is dense (asymmetric behavior):
# --> return _sps.csr_matrix(a).dot(b) # numpyMx.dot can't handle sparse argument
return _np.dot(a, b.toarray())
else:
return _np.dot(a, b)
def safe_real(a, inplace=False, check=False):
"""
Get the real-part of `a`, where `a` can be either a dense array or a sparse matrix.
Parameters
----------
a : numpy.ndarray or scipy.sparse matrix.
Array to take real part of.
inplace : bool, optional
Whether this operation should be done in-place.
check : bool, optional
If True, raise a `ValueError` if `a` has a nonzero imaginary part.
Returns
-------
numpy.ndarray or scipy.sparse matrix
"""
if check:
assert(safe_norm(a, 'imag') < 1e-6), "Check failed: taking real-part of matrix w/nonzero imaginary part"
if _sps.issparse(a):
if _sps.isspmatrix_csr(a):
if inplace:
ret = _sps.csr_matrix((_np.real(a.data), a.indices, a.indptr), shape=a.shape, dtype='d')
else: # copy
ret = _sps.csr_matrix((_np.real(a.data).copy(), a.indices.copy(),
a.indptr.copy()), shape=a.shape, dtype='d')
ret.eliminate_zeros()
return ret
else:
raise NotImplementedError("safe_real() doesn't work with %s matrices yet" % str(type(a)))
else:
return _np.real(a)
def safe_imag(a, inplace=False, check=False):
"""
Get the imaginary-part of `a`, where `a` can be either a dense array or a sparse matrix.
Parameters
----------
a : numpy.ndarray or scipy.sparse matrix.
Array to take imaginary part of.
inplace : bool, optional
Whether this operation should be done in-place.
check : bool, optional
If True, raise a `ValueError` if `a` has a nonzero real part.
Returns
-------
numpy.ndarray or scipy.sparse matrix
"""
if check:
assert(safe_norm(a, 'real') < 1e-6), "Check failed: taking imag-part of matrix w/nonzero real part"
if _sps.issparse(a):
if _sps.isspmatrix_csr(a):
if inplace:
ret = _sps.csr_matrix((_np.imag(a.data), a.indices, a.indptr), shape=a.shape, dtype='d')
else: # copy
ret = _sps.csr_matrix((_np.imag(a.data).copy(), a.indices.copy(),
a.indptr.copy()), shape=a.shape, dtype='d')
ret.eliminate_zeros()
return ret
else:
raise NotImplementedError("safe_real() doesn't work with %s matrices yet" % str(type(a)))
else:
return _np.imag(a)
def safe_norm(a, part=None):
"""
Get the frobenius norm of a matrix or vector, `a`, when it is either a dense array or a sparse matrix.
Parameters
----------
a : ndarray or scipy.sparse matrix
The matrix or vector to take the norm of.
part : {None,'real','imag'}
If not None, return the norm of the real or imaginary
part of `a`.
Returns
-------
float
"""
if part == 'real': takepart = _np.real
elif part == 'imag': takepart = _np.imag
else: takepart = lambda x: x
if _sps.issparse(a):
assert(_sps.isspmatrix_csr(a)), "Non-CSR sparse formats not implemented"
return _np.linalg.norm(takepart(a.data))
else:
return _np.linalg.norm(takepart(a))
# could also use _spsl.norm(A)
def safe_onenorm(a):
"""
Computes the 1-norm of the dense or sparse matrix `a`.
Parameters
----------
a : ndarray or sparse matrix
The matrix or vector to take the norm of.
Returns
-------
float
"""
if _sps.isspmatrix(a):
return sparse_onenorm(a)
else:
return _np.linalg.norm(a, 1)
def csr_sum_indices(csr_matrices):
"""
Precomputes the indices needed to sum a set of CSR sparse matrices.
Computes the index-arrays needed for use in :method:`csr_sum`,
along with the index pointer and column-indices arrays for constructing
a "template" CSR matrix to be the destination of `csr_sum`.
Parameters
----------
csr_matrices : list
The SciPy CSR matrices to be summed.
Returns
-------
ind_arrays : list
A list of numpy arrays giving the destination data-array indices
of each element of `csr_matrices`.
indptr, indices : numpy.ndarray
The row-pointer and column-indices arrays specifying the sparsity
structure of a the destination CSR matrix.
N : int
The dimension of the destination matrix (and of each member of
`csr_matrices`)
"""
if len(csr_matrices) == 0: return [], _np.empty(0, int), _np.empty(0, int), 0
N = csr_matrices[0].shape[0]
for mx in csr_matrices:
assert(mx.shape == (N, N)), "Matrices must have the same square shape!"
indptr = [0]
indices = []
csr_sum_array = [list() for mx in csr_matrices]
#FUTURE sort column indices
for iRow in range(N):
dataInds = {} # keys = column indices, values = data indices (for data in current row)
for iMx, mx in enumerate(csr_matrices):
for i in range(mx.indptr[iRow], mx.indptr[iRow + 1]):
iCol = mx.indices[i]
if iCol not in dataInds: # add a new element to final mx
indices.append(iCol)
dataInds[iCol] = len(indices) - 1 # marks the final data index for this column
csr_sum_array[iMx].append(dataInds[iCol])
indptr.append(len(indices))
#convert lists -> arrays
csr_sum_array = [_np.array(lst, _np.int64) for lst in csr_sum_array]
indptr = _np.array(indptr)
indices = _np.array(indices)
return csr_sum_array, indptr, indices, N
def csr_sum(data, coeffs, csr_mxs, csr_sum_indices):
"""
Accelerated summation of several CSR-format sparse matrices.
:method:`csr_sum_indices` precomputes the necessary indices for
summing directly into the data-array of a destination CSR sparse matrix.
If `data` is the data-array of matrix `D` (for "destination"), then this
method performs:
`D += sum_i( coeff[i] * csr_mxs[i] )`
Note that `D` is not returned; the sum is done internally into D's
data-array.
Parameters
----------
data : numpy.ndarray
The data-array of the destination CSR-matrix.
coeffs : iterable
The weight coefficients which multiply each summed matrix.
csr_mxs : iterable
A list of CSR matrix objects whose data-array is given by
`obj.data` (e.g. a SciPy CSR sparse matrix).
csr_sum_indices : list
A list of precomputed index arrays as returned by
:method:`csr_sum_indices`.
Returns
-------
None
"""
for coeff, mx, inds in zip(coeffs, csr_mxs, csr_sum_indices):
data[inds] += coeff * mx.data
def csr_sum_flat_indices(csr_matrices):
"""
Precomputes quantities allowing fast computation of linear combinations of CSR sparse matrices.
The returned quantities can later be used to quickly compute a linear
combination of the CSR sparse matrices `csr_matrices`.
Computes the index and data arrays needed for use in :method:`csr_sum_flat`,
along with the index pointer and column-indices arrays for constructing
a "template" CSR matrix to be the destination of `csr_sum_flat`.
Parameters
----------
csr_matrices : list
The SciPy CSR matrices to be summed.
Returns
-------
flat_dest_index_array : numpy array
A 1D array of one element per nonzero element in any of
`csr_matrices`, giving the destination-index of that element.
flat_csr_mx_data : numpy array
A 1D array of the same length as `flat_dest_index_array`, which
simply concatenates the data arrays of `csr_matrices`.
mx_nnz_indptr : numpy array
A 1D array of length `len(csr_matrices)+1` such that the data
for the i-th element of `csr_matrices` lie in the index-range of
mx_nnz_indptr[i] to mx_nnz_indptr[i+1]-1 of the flat arrays.
indptr, indices : numpy.ndarray
The row-pointer and column-indices arrays specifying the sparsity
structure of a the destination CSR matrix.
N : int
The dimension of the destination matrix (and of each member of
`csr_matrices`)
"""
csr_sum_array, indptr, indices, N = csr_sum_indices(csr_matrices)
if len(csr_sum_array) == 0:
return (_np.empty(0, int), _np.empty(0, 'd'), _np.zeros(1, int), indptr, indices, N)
flat_dest_index_array = _np.ascontiguousarray(_np.concatenate(csr_sum_array, axis=0), dtype=int)
flat_csr_mx_data = _np.ascontiguousarray(_np.concatenate([mx.data for mx in csr_matrices], axis=0), dtype=complex)
mx_nnz_indptr = _np.cumsum([0] + [mx.nnz for mx in csr_matrices], dtype=int)
return flat_dest_index_array, flat_csr_mx_data, mx_nnz_indptr, indptr, indices, N
if _fastcalc is None:
def csr_sum_flat(data, coeffs, flat_dest_index_array, flat_csr_mx_data, mx_nnz_indptr):
"""
Computation of the summation of several CSR-format sparse matrices.
:method:`csr_sum_flat_indices` precomputes the necessary indices for
summing directly into the data-array of a destination CSR sparse matrix.
If `data` is the data-array of matrix `D` (for "destination"), then this
method performs:
`D += sum_i( coeff[i] * csr_mxs[i] )`
Note that `D` is not returned; the sum is done internally into D's
data-array.
Parameters
----------
data : numpy.ndarray
The data-array of the destination CSR-matrix.
coeffs : ndarray
The weight coefficients which multiply each summed matrix.
flat_dest_index_array : ndarray
The index array generated by :function:`csr_sum_flat_indices`.
flat_csr_mx_data : ndarray
The data array generated by :function:`csr_sum_flat_indices`.
mx_nnz_indptr : ndarray
The number-of-nonzero-elements pointer array generated by
:function:`csr_sum_flat_indices`.
Returns
-------
None
"""
Nmxs = len(mx_nnz_indptr) - 1 # the number of CSR matrices
for iMx in range(Nmxs):
coeff = coeffs[iMx]
for i in range(mx_nnz_indptr[iMx], mx_nnz_indptr[iMx + 1]):
data[flat_dest_index_array[i]] += coeff * flat_csr_mx_data[i]
else:
def csr_sum_flat(data, coeffs, flat_dest_index_array, flat_csr_mx_data, mx_nnz_indptr):
"""
Computes the summation of several CSR-format sparse matrices.
:method:`csr_sum_flat_indices` precomputes the necessary indices for
summing directly into the data-array of a destination CSR sparse matrix.
If `data` is the data-array of matrix `D` (for "destination"), then this
method performs:
`D += sum_i( coeff[i] * csr_mxs[i] )`
Note that `D` is not returned; the sum is done internally into D's
data-array.
Parameters
----------
data : numpy.ndarray
The data-array of the destination CSR-matrix.
coeffs : ndarray
The weight coefficients which multiply each summed matrix.
flat_dest_index_array : ndarray
The index array generated by :function:`csr_sum_flat_indices`.
flat_csr_mx_data : ndarray
The data array generated by :function:`csr_sum_flat_indices`.
mx_nnz_indptr : ndarray
The number-of-nonzero-elements pointer array generated by
:function:`csr_sum_flat_indices`.
"""
coeffs_complex = _np.ascontiguousarray(coeffs, dtype=complex)
return _fastcalc.fast_csr_sum_flat(data, coeffs_complex, flat_dest_index_array, flat_csr_mx_data, mx_nnz_indptr)
def expm_multiply_prep(a, tol=EXPM_DEFAULT_TOL):
"""
Computes "prepared" meta-info about matrix `a`, to be used in `expm_multiply_fast`.
This includes a shifted version of `a`.
Parameters
----------
a : numpy.ndarray
the matrix that will belater exponentiated.
tol : float, optional
Tolerance used to within matrix exponentiation routines.
Returns
-------
tuple
A tuple of values to pass to `expm_multiply_fast`.
"""
if len(a.shape) != 2 or a.shape[0] != a.shape[1]:
raise ValueError('expected a to be like a square matrix')
assert(_sps.isspmatrix_csr(a)) # assuming this allows faster computations
n = a.shape[0]
n0 = 1 # always act exp(a) on *single* vectors
mu = _spsl._expm_multiply._trace(a) / float(n)
#ident = _spsl._expm_multiply._ident_like(a) #general case
if _fastcalc is None:
ident = _sps.identity(a.shape[0], dtype=a.dtype, format='csr') # CSR specific
a = a - mu * ident # SLOW!
else:
indptr = _np.empty(n + 1, _np.int64)
indices = _np.empty(a.data.shape[0] + n, _np.int64) # pessimistic (assume no diags exist)
data = _np.empty(a.data.shape[0] + n, a.dtype) # pessimistic (assume no diags exist)
nxt = _fastcalc.csr_subtract_identity(a.data,
_np.ascontiguousarray(a.indptr, _np.int64),
_np.ascontiguousarray(a.indices, _np.int64),
data, indptr, indices, -mu, n)
a = _sps.csr_matrix((data[0:nxt], indices[0:nxt], indptr), shape=(n, n))
#DB: CHECK: assert(_spsl.norm(A1 - A2) < 1e-6); a = A1
#exact_1_norm specific for CSR
A_1_norm = max(_np.sum(_np.abs(a.data[_np.where(a.indices == iCol)])) for iCol in range(n))
#A_1_norm = _spsl._expm_multiply._exact_1_norm(a) # general case
t = 1.0 # always
if t * A_1_norm == 0:
m_star, s = 0, 1
else:
ell = 2
norm_info = _spsl._expm_multiply.LazyOperatorNormInfo(t * a, A_1_norm=t * A_1_norm, ell=ell)
m_star, s = _spsl._expm_multiply._fragment_3_1(norm_info, n0, tol, ell=ell)
eta = _np.exp(t * mu / float(s))
assert(_sps.isspmatrix_csr(a))
return a, mu, m_star, s, eta
if _fastcalc is None:
def expm_multiply_fast(prep_a, v, tol=EXPM_DEFAULT_TOL):
"""
Multiplies `v` by an exponentiated matrix.
Parameters
----------
prep_a : tuple
A tuple of values from :function:`expm_multiply_prep` that
defines the matrix to be exponentiated and holds other pre-computed
quantities.
v : numpy.ndarray
Vector to multiply (take dot product with).
tol : float, optional
Tolerance used to within matrix exponentiation routines.
Returns
-------
numpy.ndarray
"""
A, mu, m_star, s, eta = prep_a
return _custom_expm_multiply_simple_core(
A, v, mu, m_star, s, tol, eta) # t == 1.0 always, `balance` not implemented so removed
else:
def expm_multiply_fast(prep_a, v, tol=EXPM_DEFAULT_TOL):
"""
Multiplies `v` by an exponentiated matrix.
Parameters
----------
prep_a : tuple
A tuple of values from :function:`expm_multiply_prep` that
defines the matrix to be exponentiated and holds other pre-computed
quantities.
v : numpy.ndarray
Vector to multiply (take dot product with).
tol : float, optional
Tolerance used to within matrix exponentiation routines.
Returns
-------
numpy.ndarray
"""
#Note: copy v for now since it's modified by simple_core fn
A, mu, m_star, s, eta = prep_a
indices = _np.array(A.indices, dtype=int) # convert to 64-bit ints if needed
indptr = _np.array(A.indptr, dtype=int)
return _fastcalc.custom_expm_multiply_simple_core(A.data, indptr, indices,
v.copy(), mu, m_star, s, tol, eta)
def _custom_expm_multiply_simple_core(a, b, mu, m_star, s, tol, eta): # t == 1.0 replaced below
"""
a helper function. Note that this (python) version works when a is a LinearOperator
as well as a SciPy CSR sparse matrix.
"""
#if balance:
# raise NotImplementedError
F = b
for i in range(s):
#if m_star > 0: #added
# c1 = _np.linalg.norm(b, _np.inf) #_exact_inf_norm(b)
for j in range(m_star):
coeff = 1.0 / float(s * (j + 1)) # t == 1.0
b = coeff * a.dot(b)
F = F + b
# if j % 3 == 0: #every == 3 #TODO: work on this
# c2 = _np.linalg.norm(b, _np.inf) #_exact_inf_norm(b)
# if c1 + c2 <= tol * _np.linalg.norm(F, _np.inf): #_exact_inf_norm(F)
# break
# c1 = c2
F = eta * F
b = F
return F
#From SciPy source, as a reference - above we assume A is a sparse csr matrix
# and B is a dense vector
#def _exact_inf_norm(A):
# # A compatibility function which should eventually disappear.
# if scipy.sparse.isspmatrix(A):
# return max(abs(A).sum(axis=1).flat)
# else:
# return np.linalg.norm(A, np.inf)
#
#
#def _exact_1_norm(A):
# # A compatibility function which should eventually disappear.
# if scipy.sparse.isspmatrix(A):
# return max(abs(A).sum(axis=0).flat)
# else:
# return np.linalg.norm(A, 1)
def expop_multiply_prep(op, a_1_norm=None, tol=EXPM_DEFAULT_TOL):
"""
Returns "prepared" meta-info about operation op, which is assumed to be traceless (so no shift is needed).
Used as input for use with `_custom_expm_multiply_simple_core` or fast C-reps.
Parameters
----------
op : scipy.sparse.linalg.LinearOperator
The operator to exponentiate.
a_1_norm : float, optional
The 1-norm (if computed separately) of `op`.
tol : float, optional
Tolerance used to within matrix exponentiation routines.
Returns
-------
tuple
A tuple of values to pass to `expm_multiply_fast`.
"""
assert(isinstance(op, _spsl.LinearOperator))
if len(op.shape) != 2 or op.shape[0] != op.shape[1]:
raise ValueError('expected op to have equal input and output dimensions')
# n = op.shape[0]
n0 = 1 # always act exp(op) on *single* vectors
mu = 0 # _spsl._expm_multiply._trace(A) / float(n)
#ASSUME op is *traceless*
#FUTURE: get exact_1_norm specific for our ops - now just use approximate
if a_1_norm is None:
a_1_norm = _spsl.onenormest(op)
#t = 1.0 # always, so t*<X> => just <X> below
if a_1_norm == 0:
m_star, s = 0, 1
else:
ell = 2
norm_info = _spsl._expm_multiply.LazyOperatorNormInfo(op, A_1_norm=a_1_norm, ell=ell)
m_star, s = _spsl._expm_multiply._fragment_3_1(norm_info, n0, tol, ell=ell)
eta = 1.0 # _np.exp(t*mu / float(s)) # b/c mu always == 0 (traceless assumption)
return mu, m_star, s, eta
def sparse_equal(a, b, atol=1e-8):
"""
Checks whether two Scipy sparse matrices are (almost) equal.
Parameters
----------
a : scipy.sparse matrix
First matrix.
b : scipy.sparse matrix
Second matrix.
atol : float, optional
The tolerance to use, passed to `numpy.allclose`, when comparing
the elements of `a` and `b`.
Returns
-------
bool
"""
if _np.array_equal(a.shape, b.shape) == 0:
return False
r1, c1 = a.nonzero()
r2, c2 = b.nonzero()
lidx1 = _np.ravel_multi_index((r1, c1), a.shape)
lidx2 = _np.ravel_multi_index((r2, c2), b.shape)
sidx1 = lidx1.argsort()
sidx2 = lidx2.argsort()
index_match = _np.array_equal(lidx1[sidx1], lidx2[sidx2])
if index_match == 0:
return False
else:
v1 = a.data
v2 = b.data
V1 = v1[sidx1]
V2 = v2[sidx2]
return _np.allclose(V1, V2, atol=atol)
def sparse_onenorm(a):
"""
Computes the 1-norm of the scipy sparse matrix `a`.
Parameters
----------
a : scipy sparse matrix
The matrix or vector to take the norm of.
Returns
-------
float
"""
return max(abs(a).sum(axis=0).flat)
# also == return _spsl.norm(a, ord=1) (comparable speed)
def ndarray_base(a, verbosity=0):
"""
Get the base memory object for numpy array `a`.
This is found by following `.base` until it comes up None.
Parameters
----------
a : numpy.ndarray
Array to get base of.
verbosity : int, optional
Print additional debugging information if this is > 0.
Returns
-------
numpy.ndarray
"""
if verbosity: print("ndarray_base debug:")
while a.base is not None:
if verbosity: print(" -> base = ", id(a.base))
a = a.base
if verbosity: print(" ==> ", id(a))
return a
def to_unitary(scaled_unitary):
"""
Compute the scaling factor required to turn a scalar multiple of a unitary matrix to a unitary matrix.
Parameters
----------
scaled_unitary : ndarray
A scaled unitary matrix
Returns
-------
scale : float
unitary : ndarray
Such that `scale * unitary == scaled_unitary`.
"""
scaled_identity = _np.dot(scaled_unitary, _np.conjugate(scaled_unitary.T))
scale = _np.sqrt(scaled_identity[0, 0])
assert(_np.allclose(scaled_identity / (scale**2), _np.identity(scaled_identity.shape[0], 'd'))), \
"Given `scaled_unitary` does not appear to be a scaled unitary matrix!"
return scale, (scaled_unitary / scale)
def sorted_eig(mx):
"""
Similar to `numpy.eig`, but returns sorted output.
In particular, the eigenvalues and vectors sorted by eigenvalue,
where sorting is done according to (real_part, imaginary_part) tuple.
Parameters
----------
mx : numpy.ndarray
Matrix to act on.
Returns
-------
eigenvalues : numpy.ndarray
eigenvectors : numpy.ndarray
"""
ev, U = _np.linalg.eig(mx)
sorted_evals = sorted(list(enumerate(ev)), key=lambda x: (x[1].real, x[1].imag))
sorted_ev = ev.copy()
sorted_U = U.copy()
for idest, (isrc, _) in enumerate(sorted_evals):
sorted_ev[idest] = ev[isrc]
sorted_U[:, idest] = U[:, isrc]
return sorted_ev, sorted_U
def compute_kite(eigenvalues):
"""
Computes the "kite" corresponding to a list of eigenvalues.
The kite is defined as a list of integers, each indicating that
there is a degnenerate block of that many eigenvalues within
`eigenvalues`. Thus the sum of the list values equals `len(eigenvalues)`.
Parameters
----------
eigenvalues : numpy.ndarray
A *sorted* array of eigenvalues.
Returns
-------
list
A list giving the multiplicity structure of `evals`.
"""
kite = []
blk = 0; last_ev = eigenvalues[0]
for ev in eigenvalues:
if _np.isclose(ev, last_ev):
blk += 1
else:
kite.append(blk)
blk = 1; last_ev = ev
kite.append(blk)
return kite
def find_zero_communtant_connection(u, u_inv, u0, u0_inv, kite):
"""
Find a matrix `R` such that u_inv R u0 is diagonal AND log(R) has no projection onto the commutant of G0.
More specifically, find a matrix `R` such that u_inv R u0 is diagonal
(so G = R G0 Rinv if G and G0 share the same eigenvalues and have eigenvectors u
and u0 respectively) AND log(R) has no (zero) projection onto the commutant of
G0 = u0 diag(evals) u0_inv.
Parameters
----------
u : numpy.ndarray
Usually the eigenvector matrix of a gate (G).
u_inv : numpy.ndarray
Inverse of `u`.
u0 : numpy.ndarray
Usually the eigenvector matrix of the corresponding target gate (G0).
u0_inv : numpy.ndarray
Inverse of `u0`.
kite : list
The kite structure of `u0`.
Returns
-------
numpy.ndarray
"""
#0. Let R be a matrix that maps G0 -> Gp, where Gp has evecs of G and evals of G0.
#1. Does R vanish on the commutant of G0? If so, we’re done.
#2. Define x = PROJ_COMMUTANT[ log(R) ], and X = exp(-x).
#3. Redefine R = X.R.
#4. GOTO 1.
# G0 = u0 * diag * u0_inv, G = u * diag * u_inv
D = project_onto_kite(_np.dot(u_inv, u0), kite)
R = _np.dot(u, _np.dot(D, u0_inv)) # Include D so R is as close to identity as possible
assert(_np.linalg.norm(R.imag) < 1e-8)
def project_onto_commutant(x):
a = _np.dot(u0_inv, _np.dot(x, u0))
a = project_onto_kite(a, kite)
return _np.dot(u0, _np.dot(a, u0_inv))
iter = 0; lastR = R
while iter < 100:
#Starting condition = u_inv * R * u0 is diagonal, so
# G' = R G0 Rinv where G' has the same spectrum as G0 but different eigenvecs (u vs u0)
assert(_np.linalg.norm(R.imag) < 1e-8)
test = _np.dot(u_inv, _np.dot(R, u0))
assert(_np.linalg.norm(project_onto_antikite(test, kite)) < 1e-8)
r = real_matrix_log(R)
assert(_np.linalg.norm(r.imag) < 1e-8), "log of real matrix should be real!"
r_on_comm = project_onto_commutant(r)
assert(_np.linalg.norm(r_on_comm.imag) < 1e-8), "projection to commutant should not make complex!"
oncomm_norm = _np.linalg.norm(r_on_comm)
#print("Iter %d: onkite-norm = %g lastdiff = %g" % (iter, oncomm_norm, _np.linalg.norm(R-lastR)))
# if r has desired form or we didn't really update R
if oncomm_norm < 1e-12 or (iter > 0 and _np.linalg.norm(R - lastR) < 1e-8):
break # STOP - converged!
X = _spl.expm(-r_on_comm)
assert(_np.linalg.norm(X.imag) < 1e-8)
lastR = R
R = _np.dot(R, X)
iter += 1
assert(_np.linalg.norm(R.imag) < 1e-8), "R should always be real!"
return R.real
def project_onto_kite(mx, kite):
"""
Project `mx` onto `kite`, so `mx` is zero everywhere except on the kite.
Parameters
----------
mx : numpy.ndarray
Matrix to project.
kite : list
A kite structure.
Returns
-------
numpy.ndarray
"""
#Kite is a list of block sizes, such that sum(kite) == dimension of `mx`
mx = mx.copy()
dim = mx.shape[0]
assert(dim == mx.shape[1]), "`mx` must be square!"
k0 = 0
for k in kite:
mx[k0:k0 + k, k0 + k:] = 0
mx[k0 + k:, k0:k0 + k] = 0
k0 += k
assert(k0 == dim), "Invalid kite %d-dimensional matrix: %s" % (dim, str(kite))
return mx
def project_onto_antikite(mx, kite):
"""
Project `mx` onto the complement of `kite`, so `mx` is zero everywhere *on* the kite.
Parameters
----------
mx : numpy.ndarray
Matrix to project.
kite : list
A kite structure.
Returns
-------
numpy.ndarray
"""
#Kite is a list of block sizes, such that sum(kite) == dimension of `mx`
mx = mx.copy()
dim = mx.shape[0]
assert(dim == mx.shape[1]), "`mx` must be square!"
k0 = 0
for k in kite:
mx[k0:k0 + k, k0:k0 + k] = 0
k0 += k
assert(k0 == dim), "Invalid kite %d-dimensional matrix: %s" % (dim, str(kite))
return mx
def remove_dependent_cols(mx, tol=1e-7):
"""
Removes the linearly dependent columns of a matrix.
Parameters
----------
mx : numpy.ndarray
The input matrix
Returns
-------
A linearly independent subset of the columns of `mx`.
"""
last_rank = 0; cols_to_remove = []
for j in range(mx.shape[1]):
rnk = _np.linalg.matrix_rank(mx[:, 0:j + 1], tol)
if rnk == last_rank:
cols_to_remove.append(j)
else:
last_rank = rnk
#print("Removing %d cols" % len(cols_to_remove))
return _np.delete(mx, cols_to_remove, axis=1)
def intersection_space(space1, space2, tol=1e-7, use_nice_nullspace=False):
"""
TODO: docstring
"""
VW = _np.concatenate((space1, -space2), axis=1)
nullsp = nice_nullspace(VW, tol) if use_nice_nullspace else nullspace(VW, tol)
#nullsp = _spl.null_space(VW, rcond=1e-3) # alternative
return _np.dot(space1, nullsp[0:space1.shape[1], :])
def union_space(space1, space2, tol=1e-7):
"""
TODO: docstring
"""
VW = _np.concatenate((space1, space2), axis=1)
return remove_dependent_cols(VW, tol)
#UNUSED
#def spectral_radius(x):
# if hasattr(x, 'ndim') and x.ndim == 2: # then interpret as a numpy array and take norm
# evals = _np.sort(_np.linalg.eigvals(x))
# return abs(evals[-1] - evals[0])
# else:
# return x
def jamiolkowski_angle(hamiltonian_mx):
"""
TODO: docstring
"""
Hmx = hamiltonian_mx
d = Hmx.shape[0]
I = _np.identity(d)
errmap = _np.kron(I, _spl.expm(1j * Hmx))
psi = _np.zeros(d**2) # will be a maximally entangled state
for i in range(d):
x = _np.zeros(d); x[i] = 1.0
xx = _np.kron(x, x)
psi += xx / _np.sqrt(d)
assert(_np.isclose(_np.dot(psi, psi), 1.0))
cos_theta = abs(_np.dot(psi.conj(), _np.dot(errmap, psi)))
return _np.real_if_close(_np.arccos(cos_theta))
#cos_squared_theta = entanglement_infidelity(expm(1j * Hmx), identity)
#return _np.arccos(_np.sqrt(cos_squared_theta))
def zvals_to_dense(self, zvals, superket=True):
"""
Construct the dense operator or superoperator representation of a computational basis state.
Parameters
----------
zvals : list or numpy.ndarray
The z-values, each 0 or 1, defining the computational basis state.
superket : bool, optional
If `True`, the super-ket representation of the state is returned. If `False`,
then the complex ket representation is returned.
Returns
-------
numpy.ndarray
"""
if superket:
factor_dim = 4
v0 = 1.0 / _np.sqrt(2) * _np.array((1, 0, 0, 1), 'd') # '0' qubit state as Pauli dmvec
v1 = 1.0 / _np.sqrt(2) * _np.array((1, 0, 0, -1), 'd') # '1' qubit state as Pauli dmvec
else:
factor_dim = 2
v0 = _np.array((1, 0), complex) # '0' qubit state as complex state vec
v1 = _np.array((0, 1), complex) # '1' qubit state as complex state vec
v = (v0, v1)
if _fastcalc is None: # do it the slow way using numpy
return _functools.reduce(_np.kron, [v[i] for i in zvals])
else:
fast_kron_array = _np.ascontiguousarray(
_np.empty((len(self._zvals), factor_dim), v0.dtype))
fast_kron_factordims = _np.ascontiguousarray(_np.array([factor_dim] * len(self._zvals), _np.int64))
for i, zi in enumerate(self._zvals):
fast_kron_array[i, :] = v[zi]
ret = _np.ascontiguousarray(_np.empty(factor_dim**len(self._zvals), v0.dtype))
if superket:
_fastcalc.fast_kron(ret, fast_kron_array, fast_kron_factordims)
else:
_fastcalc.fast_kron_complex(ret, fast_kron_array, fast_kron_factordims)
return ret
def int64_parity(x):
"""
Compute the partity of x.
Recursively divide a (64-bit) integer (x) into two equal
halves and take their XOR until only 1 bit is left.
Parameters
----------
x : int64
Returns
-------
int64
"""
x = (x & 0x00000000FFFFFFFF) ^ (x >> 32)
x = (x & 0x000000000000FFFF) ^ (x >> 16)
x = (x & 0x00000000000000FF) ^ (x >> 8)
x = (x & 0x000000000000000F) ^ (x >> 4)
x = (x & 0x0000000000000003) ^ (x >> 2)
x = (x & 0x0000000000000001) ^ (x >> 1)
return x & 1 # return the last bit (0 or 1)
def zvals_int64_to_dense(zvals_int, nqubits, outvec=None, trust_outvec_sparsity=False, abs_elval=None):
"""
Fills a dense array with the super-ket representation of a computational basis state.
Parameters
----------
zvals_int : int64
The array of (up to 64) z-values, encoded as the 0s and 1s in the binary representation
of this integer.
nqubits : int
The number of z-values (up to 64)
outvec : numpy.ndarray, optional
The output array, which must be a 1D array of length 4**nqubits or `None`, in
which case a new array is allocated.
trust_outvec_sparsity : bool, optional
When `True`, it is assumed that the provided `outvec` starts as all zeros
and so only non-zero elements of outvec need to be set.
abs_elval : float
the value `1 / (sqrt(2)**nqubits)`, which can be passed here so that
it doesn't need to be recomputed on every call to this function. If
`None`, then we just compute the value.
Returns
-------
numpy.ndarray
"""
if outvec is None:
outvec = _np.zeros(4**nqubits, 'd')
if abs_elval is None:
abs_elval = 1 / (_np.sqrt(2)**nqubits)
# when trust_outvec_sparsity is True, assume we only need to fill in the
# non-zero elements of outvec (i.e. that outvec is already zero wherever
# this vector is zero).
if not trust_outvec_sparsity:
outvec[:] = 0 # reset everything to zero
N = nqubits
# there are nQubits factors
# each factor (4-element, 1Q dmvec) has 2 zero elements and 2 nonzero ones
# loop is over all non-zero elements of the final outvec by looping over
# all the sets of *entirely* nonzero elements from the factors.
# Let the two possible nonzero elements of the k-th factor be represented
# by the k-th bit of `finds` below, which ranges from 0 to 2^nFactors-1
for finds in range(2**N):
#Create the final index (within outvec) corresponding to finds
# assume, like tensorprod, that factor ordering == kron ordering
# so outvec = kron( factor[0], factor[1], ... factor[N-1] ).
# Let factorDim[k] == 4**(N-1-k) be the stride associated with the k-th index
# Whenever finds[bit k] == 0 => finalIndx += 0*factorDim[k]
# finds[bit k] == 1 => finalIndx += 3*factorDim[k] (3 b/c factor's 2nd nonzero el is at index 3)
finalIndx = sum([3 * (4**(N - 1 - k)) for k in range(N) if bool(finds & (1 << k))])
#Determine the sign of this element (the element is either +/- (1/sqrt(2))^N )
# A minus sign is picked up whenever finds[bit k] == 1 (which means we're looking
# at the index=3 element of the factor vec) AND zvals_int[bit k] == 1
# (which means it's a [1 0 0 -1] state rather than a [1 0 0 1] state).
# Since we only care whether the number of minus signs is even or odd, we can
# BITWISE-AND finds with zvals_int (giving an integer whose binary-expansion's
# number of 1's == the number of minus signs) and compute the parity of this.
minus_sign = int64_parity(finds & zvals_int)
outvec[finalIndx] = -abs_elval if minus_sign else abs_elval
return outvec
|
[
"numpy.linalg.eigvals",
"numpy.sum",
"numpy.diag_indices_from",
"numpy.abs",
"scipy.sparse.issparse",
"numpy.empty",
"numpy.allclose",
"pygsti.tools.basistools.change_basis",
"numpy.imag",
"numpy.linalg.svd",
"numpy.linalg.norm",
"numpy.isclose",
"scipy.sparse.linalg._expm_multiply.LazyOperatorNormInfo",
"scipy.linalg.schur",
"numpy.diag",
"numpy.conjugate",
"scipy.sparse.isspmatrix",
"scipy.sparse.isspmatrix_csr",
"scipy.optimize.minimize",
"scipy.sparse.linalg.svds",
"numpy.transpose",
"numpy.identity",
"numpy.linalg.eig",
"numpy.cumsum",
"numpy.linalg.matrix_rank",
"numpy.ravel_multi_index",
"scipy.sparse.identity",
"numpy.real",
"scipy.sparse.linalg.onenormest",
"numpy.kron",
"itertools.product",
"numpy.arccos",
"numpy.diagonal",
"scipy.linalg.logm",
"numpy.conj",
"functools.reduce",
"scipy.linalg.qr",
"scipy.sparse.csr_matrix",
"numpy.linalg.inv",
"numpy.dot",
"numpy.delete",
"numpy.concatenate",
"scipy.optimize.linear_sum_assignment",
"itertools.repeat",
"scipy.linalg.expm",
"numpy.log",
"scipy.sparse.linalg._expm_multiply._trace",
"numpy.zeros",
"scipy.sparse.csc_matrix",
"numpy.random.random",
"numpy.take",
"numpy.array",
"scipy.sparse.hstack",
"scipy.sparse.linalg._expm_multiply._fragment_3_1",
"numpy.array_equal",
"warnings.warn",
"numpy.where",
"numpy.ascontiguousarray",
"numpy.sqrt"
] |
[((2965, 2987), 'numpy.linalg.eigvals', '_np.linalg.eigvals', (['mx'], {}), '(mx)\n', (2983, 2987), True, 'import numpy as _np\n'), ((4368, 4384), 'numpy.sum', '_np.sum', (['(ar ** 2)'], {}), '(ar ** 2)\n', (4375, 4384), True, 'import numpy as _np\n'), ((4800, 4817), 'numpy.linalg.svd', '_np.linalg.svd', (['m'], {}), '(m)\n', (4814, 4817), True, 'import numpy as _np\n'), ((5528, 5568), 'scipy.linalg.qr', '_spl.qr', (['m.T'], {'mode': '"""full"""', 'pivoting': '(True)'}), "(m.T, mode='full', pivoting=True)\n", (5535, 5568), True, 'import scipy.linalg as _spl\n'), ((7127, 7170), 'numpy.take', '_np.take', (['nullsp_projector', 'keepers'], {'axis': '(1)'}), '(nullsp_projector, keepers, axis=1)\n', (7135, 7170), True, 'import numpy as _np\n'), ((8701, 8717), 'scipy.sparse.issparse', '_sps.issparse', (['m'], {}), '(m)\n', (8714, 8717), True, 'import scipy.sparse as _sps\n'), ((9608, 9624), 'scipy.sparse.issparse', '_sps.issparse', (['m'], {}), '(m)\n', (9621, 9624), True, 'import scipy.sparse as _sps\n'), ((14543, 14567), 'scipy.linalg.schur', '_spl.schur', (['m', '"""complex"""'], {}), "(m, 'complex')\n", (14553, 14567), True, 'import scipy.linalg as _spl\n'), ((14636, 14665), 'numpy.zeros', '_np.zeros', (['T.shape', '"""complex"""'], {}), "(T.shape, 'complex')\n", (14645, 14665), True, 'import numpy as _np\n'), ((20233, 20265), 'pygsti.tools.basistools.change_basis', 'change_basis', (['m', 'mx_basis', '"""std"""'], {}), "(m, mx_basis, 'std')\n", (20245, 20265), False, 'from pygsti.tools.basistools import change_basis\n'), ((20278, 20303), 'numpy.linalg.eigvals', '_np.linalg.eigvals', (['M_std'], {}), '(M_std)\n', (20296, 20303), True, 'import numpy as _np\n'), ((21601, 21613), 'scipy.linalg.logm', '_spl.logm', (['m'], {}), '(m)\n', (21610, 21613), True, 'import scipy.linalg as _spl\n'), ((26029, 26046), 'numpy.linalg.eig', '_np.linalg.eig', (['m'], {}), '(m)\n', (26043, 26046), True, 'import numpy as _np\n'), ((30301, 30327), 'numpy.zeros', '_np.zeros', (['[dim, 1]', 'float'], {}), '([dim, 1], float)\n', (30310, 30327), True, 'import numpy as _np\n'), ((32523, 32562), 'pygsti.tools.basistools.change_basis', 'change_basis', (['operator', 'mx_basis', '"""std"""'], {}), "(operator, mx_basis, 'std')\n", (32535, 32562), False, 'from pygsti.tools.basistools import change_basis\n'), ((35381, 35403), 'numpy.empty', '_np.empty', (['(D, D)', '"""d"""'], {}), "((D, D), 'd')\n", (35390, 35403), True, 'import numpy as _np\n'), ((35680, 35716), 'scipy.optimize.linear_sum_assignment', '_spo.linear_sum_assignment', (['weightMx'], {}), '(weightMx)\n', (35706, 35716), True, 'import scipy.optimize as _spo\n'), ((49205, 49221), 'scipy.sparse.issparse', '_sps.issparse', (['a'], {}), '(a)\n', (49218, 49221), True, 'import scipy.sparse as _sps\n'), ((50225, 50241), 'scipy.sparse.issparse', '_sps.issparse', (['a'], {}), '(a)\n', (50238, 50241), True, 'import scipy.sparse as _sps\n'), ((51462, 51478), 'scipy.sparse.issparse', '_sps.issparse', (['a'], {}), '(a)\n', (51475, 51478), True, 'import scipy.sparse as _sps\n'), ((52598, 52614), 'scipy.sparse.issparse', '_sps.issparse', (['a'], {}), '(a)\n', (52611, 52614), True, 'import scipy.sparse as _sps\n'), ((53089, 53107), 'scipy.sparse.isspmatrix', '_sps.isspmatrix', (['a'], {}), '(a)\n', (53104, 53107), True, 'import scipy.sparse as _sps\n'), ((55097, 55114), 'numpy.array', '_np.array', (['indptr'], {}), '(indptr)\n', (55106, 55114), True, 'import numpy as _np\n'), ((55129, 55147), 'numpy.array', '_np.array', (['indices'], {}), '(indices)\n', (55138, 55147), True, 'import numpy as _np\n'), ((58243, 58303), 'numpy.cumsum', '_np.cumsum', (['([0] + [mx.nnz for mx in csr_matrices])'], {'dtype': 'int'}), '([0] + [mx.nnz for mx in csr_matrices], dtype=int)\n', (58253, 58303), True, 'import numpy as _np\n'), ((61952, 61974), 'scipy.sparse.isspmatrix_csr', '_sps.isspmatrix_csr', (['a'], {}), '(a)\n', (61971, 61974), True, 'import scipy.sparse as _sps\n'), ((63592, 63614), 'scipy.sparse.isspmatrix_csr', '_sps.isspmatrix_csr', (['a'], {}), '(a)\n', (63611, 63614), True, 'import scipy.sparse as _sps\n'), ((69080, 69120), 'numpy.ravel_multi_index', '_np.ravel_multi_index', (['(r1, c1)', 'a.shape'], {}), '((r1, c1), a.shape)\n', (69101, 69120), True, 'import numpy as _np\n'), ((69133, 69173), 'numpy.ravel_multi_index', '_np.ravel_multi_index', (['(r2, c2)', 'b.shape'], {}), '((r2, c2), b.shape)\n', (69154, 69173), True, 'import numpy as _np\n'), ((69249, 69292), 'numpy.array_equal', '_np.array_equal', (['lidx1[sidx1]', 'lidx2[sidx2]'], {}), '(lidx1[sidx1], lidx2[sidx2])\n', (69264, 69292), True, 'import numpy as _np\n'), ((69446, 69477), 'numpy.allclose', '_np.allclose', (['V1', 'V2'], {'atol': 'atol'}), '(V1, V2, atol=atol)\n', (69458, 69477), True, 'import numpy as _np\n'), ((70872, 70903), 'numpy.sqrt', '_np.sqrt', (['scaled_identity[0, 0]'], {}), '(scaled_identity[0, 0])\n', (70880, 70903), True, 'import numpy as _np\n'), ((71549, 71567), 'numpy.linalg.eig', '_np.linalg.eig', (['mx'], {}), '(mx)\n', (71563, 71567), True, 'import numpy as _np\n'), ((77330, 77368), 'numpy.delete', '_np.delete', (['mx', 'cols_to_remove'], {'axis': '(1)'}), '(mx, cols_to_remove, axis=1)\n', (77340, 77368), True, 'import numpy as _np\n'), ((77492, 77534), 'numpy.concatenate', '_np.concatenate', (['(space1, -space2)'], {'axis': '(1)'}), '((space1, -space2), axis=1)\n', (77507, 77534), True, 'import numpy as _np\n'), ((77690, 77735), 'numpy.dot', '_np.dot', (['space1', 'nullsp[0:space1.shape[1], :]'], {}), '(space1, nullsp[0:space1.shape[1], :])\n', (77697, 77735), True, 'import numpy as _np\n'), ((77826, 77867), 'numpy.concatenate', '_np.concatenate', (['(space1, space2)'], {'axis': '(1)'}), '((space1, space2), axis=1)\n', (77841, 77867), True, 'import numpy as _np\n'), ((78290, 78305), 'numpy.identity', '_np.identity', (['d'], {}), '(d)\n', (78302, 78305), True, 'import numpy as _np\n'), ((78362, 78379), 'numpy.zeros', '_np.zeros', (['(d ** 2)'], {}), '(d ** 2)\n', (78371, 78379), True, 'import numpy as _np\n'), ((3933, 3949), 'numpy.sum', '_np.sum', (['(ar ** 2)'], {}), '(ar ** 2)\n', (3940, 3949), True, 'import numpy as _np\n'), ((6961, 7022), 'numpy.linalg.matrix_rank', '_np.linalg.matrix_rank', (['nullsp_projector[:, 0:i + 1]'], {'tol': 'tol'}), '(nullsp_projector[:, 0:i + 1], tol=tol)\n', (6983, 7022), True, 'import numpy as _np\n'), ((9691, 9709), 'scipy.sparse.csc_matrix', '_sps.csc_matrix', (['m'], {}), '(m)\n', (9706, 9709), True, 'import scipy.sparse as _sps\n'), ((10466, 10494), 'numpy.diag_indices_from', '_np.diag_indices_from', (['check'], {}), '(check)\n', (10487, 10494), True, 'import numpy as _np\n'), ((12225, 12241), 'scipy.sparse.issparse', '_sps.issparse', (['m'], {}), '(m)\n', (12238, 12241), True, 'import scipy.sparse as _sps\n'), ((14806, 14830), 'numpy.diag_indices_from', '_np.diag_indices_from', (['U'], {}), '(U)\n', (14827, 14830), True, 'import numpy as _np\n'), ((14843, 14858), 'numpy.diagonal', '_np.diagonal', (['T'], {}), '(T)\n', (14855, 14858), True, 'import numpy as _np\n'), ((20328, 20342), 'numpy.abs', '_np.abs', (['evals'], {}), '(evals)\n', (20335, 20342), True, 'import numpy as _np\n'), ((20520, 20532), 'scipy.linalg.logm', '_spl.logm', (['U'], {}), '(U)\n', (20529, 20532), True, 'import scipy.linalg as _spl\n'), ((22955, 22978), 'numpy.linalg.norm', '_np.linalg.norm', (['m.imag'], {}), '(m.imag)\n', (22970, 22978), True, 'import numpy as _np\n'), ((23142, 23157), 'scipy.linalg.expm', '_spl.expm', (['logM'], {}), '(logM)\n', (23151, 23157), True, 'import scipy.linalg as _spl\n'), ((24514, 24642), 'scipy.optimize.minimize', '_spo.minimize', (['_objective', 'initial_flat_logM'], {'options': "{'maxiter': 1000}", 'method': '"""L-BFGS-B"""', 'callback': 'print_obj_func', 'tol': 'tol'}), "(_objective, initial_flat_logM, options={'maxiter': 1000},\n method='L-BFGS-B', callback=print_obj_func, tol=tol)\n", (24527, 24642), True, 'import scipy.optimize as _spo\n'), ((28889, 28903), 'numpy.imag', '_np.imag', (['logM'], {}), '(logM)\n', (28897, 28903), True, 'import numpy as _np\n'), ((29726, 29740), 'numpy.real', '_np.real', (['logM'], {}), '(logM)\n', (29734, 29740), True, 'import numpy as _np\n'), ((31544, 31578), 'numpy.random.random', '_np.random.random', ([], {'size': '[dim, dim]'}), '(size=[dim, dim])\n', (31561, 31578), True, 'import numpy as _np\n'), ((31591, 31625), 'numpy.random.random', '_np.random.random', ([], {'size': '[dim, dim]'}), '(size=[dim, dim])\n', (31608, 31625), True, 'import numpy as _np\n'), ((49309, 49325), 'scipy.sparse.issparse', '_sps.issparse', (['b'], {}), '(b)\n', (49322, 49325), True, 'import scipy.sparse as _sps\n'), ((50254, 50276), 'scipy.sparse.isspmatrix_csr', '_sps.isspmatrix_csr', (['a'], {}), '(a)\n', (50273, 50276), True, 'import scipy.sparse as _sps\n'), ((50796, 50807), 'numpy.real', '_np.real', (['a'], {}), '(a)\n', (50804, 50807), True, 'import numpy as _np\n'), ((51491, 51513), 'scipy.sparse.isspmatrix_csr', '_sps.isspmatrix_csr', (['a'], {}), '(a)\n', (51510, 51513), True, 'import scipy.sparse as _sps\n'), ((52033, 52044), 'numpy.imag', '_np.imag', (['a'], {}), '(a)\n', (52041, 52044), True, 'import numpy as _np\n'), ((52631, 52653), 'scipy.sparse.isspmatrix_csr', '_sps.isspmatrix_csr', (['a'], {}), '(a)\n', (52650, 52653), True, 'import scipy.sparse as _sps\n'), ((53167, 53188), 'numpy.linalg.norm', '_np.linalg.norm', (['a', '(1)'], {}), '(a, 1)\n', (53182, 53188), True, 'import numpy as _np\n'), ((55032, 55057), 'numpy.array', '_np.array', (['lst', '_np.int64'], {}), '(lst, _np.int64)\n', (55041, 55057), True, 'import numpy as _np\n'), ((58053, 58091), 'numpy.concatenate', '_np.concatenate', (['csr_sum_array'], {'axis': '(0)'}), '(csr_sum_array, axis=0)\n', (58068, 58091), True, 'import numpy as _np\n'), ((58149, 58206), 'numpy.concatenate', '_np.concatenate', (['[mx.data for mx in csr_matrices]'], {'axis': '(0)'}), '([mx.data for mx in csr_matrices], axis=0)\n', (58164, 58206), True, 'import numpy as _np\n'), ((61164, 61208), 'numpy.ascontiguousarray', '_np.ascontiguousarray', (['coeffs'], {'dtype': 'complex'}), '(coeffs, dtype=complex)\n', (61185, 61208), True, 'import numpy as _np\n'), ((62101, 62131), 'scipy.sparse.linalg._expm_multiply._trace', '_spsl._expm_multiply._trace', (['a'], {}), '(a)\n', (62128, 62131), True, 'import scipy.sparse.linalg as _spsl\n'), ((62250, 62304), 'scipy.sparse.identity', '_sps.identity', (['a.shape[0]'], {'dtype': 'a.dtype', 'format': '"""csr"""'}), "(a.shape[0], dtype=a.dtype, format='csr')\n", (62263, 62304), True, 'import scipy.sparse as _sps\n'), ((62384, 62411), 'numpy.empty', '_np.empty', (['(n + 1)', '_np.int64'], {}), '(n + 1, _np.int64)\n', (62393, 62411), True, 'import numpy as _np\n'), ((62430, 62471), 'numpy.empty', '_np.empty', (['(a.data.shape[0] + n)', '_np.int64'], {}), '(a.data.shape[0] + n, _np.int64)\n', (62439, 62471), True, 'import numpy as _np\n'), ((62526, 62565), 'numpy.empty', '_np.empty', (['(a.data.shape[0] + n)', 'a.dtype'], {}), '(a.data.shape[0] + n, a.dtype)\n', (62535, 62565), True, 'import numpy as _np\n'), ((62929, 62997), 'scipy.sparse.csr_matrix', '_sps.csr_matrix', (['(data[0:nxt], indices[0:nxt], indptr)'], {'shape': '(n, n)'}), '((data[0:nxt], indices[0:nxt], indptr), shape=(n, n))\n', (62944, 62997), True, 'import scipy.sparse as _sps\n'), ((63378, 63463), 'scipy.sparse.linalg._expm_multiply.LazyOperatorNormInfo', '_spsl._expm_multiply.LazyOperatorNormInfo', (['(t * a)'], {'A_1_norm': '(t * A_1_norm)', 'ell': 'ell'}), '(t * a, A_1_norm=t * A_1_norm, ell=ell\n )\n', (63419, 63463), True, 'import scipy.sparse.linalg as _spsl\n'), ((63479, 63542), 'scipy.sparse.linalg._expm_multiply._fragment_3_1', '_spsl._expm_multiply._fragment_3_1', (['norm_info', 'n0', 'tol'], {'ell': 'ell'}), '(norm_info, n0, tol, ell=ell)\n', (63513, 63542), True, 'import scipy.sparse.linalg as _spsl\n'), ((65216, 65247), 'numpy.array', '_np.array', (['A.indices'], {'dtype': 'int'}), '(A.indices, dtype=int)\n', (65225, 65247), True, 'import numpy as _np\n'), ((65301, 65331), 'numpy.array', '_np.array', (['A.indptr'], {'dtype': 'int'}), '(A.indptr, dtype=int)\n', (65310, 65331), True, 'import numpy as _np\n'), ((68085, 68105), 'scipy.sparse.linalg.onenormest', '_spsl.onenormest', (['op'], {}), '(op)\n', (68101, 68105), True, 'import scipy.sparse.linalg as _spsl\n'), ((68250, 68323), 'scipy.sparse.linalg._expm_multiply.LazyOperatorNormInfo', '_spsl._expm_multiply.LazyOperatorNormInfo', (['op'], {'A_1_norm': 'a_1_norm', 'ell': 'ell'}), '(op, A_1_norm=a_1_norm, ell=ell)\n', (68291, 68323), True, 'import scipy.sparse.linalg as _spsl\n'), ((68344, 68407), 'scipy.sparse.linalg._expm_multiply._fragment_3_1', '_spsl._expm_multiply._fragment_3_1', (['norm_info', 'n0', 'tol'], {'ell': 'ell'}), '(norm_info, n0, tol, ell=ell)\n', (68378, 68407), True, 'import scipy.sparse.linalg as _spsl\n'), ((68955, 68988), 'numpy.array_equal', '_np.array_equal', (['a.shape', 'b.shape'], {}), '(a.shape, b.shape)\n', (68970, 68988), True, 'import numpy as _np\n'), ((70827, 70858), 'numpy.conjugate', '_np.conjugate', (['scaled_unitary.T'], {}), '(scaled_unitary.T)\n', (70840, 70858), True, 'import numpy as _np\n'), ((70958, 71001), 'numpy.identity', '_np.identity', (['scaled_identity.shape[0]', '"""d"""'], {}), "(scaled_identity.shape[0], 'd')\n", (70970, 71001), True, 'import numpy as _np\n'), ((72478, 72502), 'numpy.isclose', '_np.isclose', (['ev', 'last_ev'], {}), '(ev, last_ev)\n', (72489, 72502), True, 'import numpy as _np\n'), ((73858, 73876), 'numpy.dot', '_np.dot', (['u_inv', 'u0'], {}), '(u_inv, u0)\n', (73865, 73876), True, 'import numpy as _np\n'), ((73903, 73921), 'numpy.dot', '_np.dot', (['D', 'u0_inv'], {}), '(D, u0_inv)\n', (73910, 73921), True, 'import numpy as _np\n'), ((73988, 74011), 'numpy.linalg.norm', '_np.linalg.norm', (['R.imag'], {}), '(R.imag)\n', (74003, 74011), True, 'import numpy as _np\n'), ((74850, 74876), 'numpy.linalg.norm', '_np.linalg.norm', (['r_on_comm'], {}), '(r_on_comm)\n', (74865, 74876), True, 'import numpy as _np\n'), ((75181, 75202), 'scipy.linalg.expm', '_spl.expm', (['(-r_on_comm)'], {}), '(-r_on_comm)\n', (75190, 75202), True, 'import scipy.linalg as _spl\n'), ((75281, 75294), 'numpy.dot', '_np.dot', (['R', 'X'], {}), '(R, X)\n', (75288, 75294), True, 'import numpy as _np\n'), ((75325, 75348), 'numpy.linalg.norm', '_np.linalg.norm', (['R.imag'], {}), '(R.imag)\n', (75340, 75348), True, 'import numpy as _np\n'), ((77114, 77157), 'numpy.linalg.matrix_rank', '_np.linalg.matrix_rank', (['mx[:, 0:j + 1]', 'tol'], {}), '(mx[:, 0:j + 1], tol)\n', (77136, 77157), True, 'import numpy as _np\n'), ((78331, 78352), 'scipy.linalg.expm', '_spl.expm', (['(1.0j * Hmx)'], {}), '(1.0j * Hmx)\n', (78340, 78352), True, 'import scipy.linalg as _spl\n'), ((78452, 78464), 'numpy.zeros', '_np.zeros', (['d'], {}), '(d)\n', (78461, 78464), True, 'import numpy as _np\n'), ((78490, 78504), 'numpy.kron', '_np.kron', (['x', 'x'], {}), '(x, x)\n', (78498, 78504), True, 'import numpy as _np\n'), ((78560, 78577), 'numpy.dot', '_np.dot', (['psi', 'psi'], {}), '(psi, psi)\n', (78567, 78577), True, 'import numpy as _np\n'), ((78677, 78698), 'numpy.arccos', '_np.arccos', (['cos_theta'], {}), '(cos_theta)\n', (78687, 78698), True, 'import numpy as _np\n'), ((79627, 79653), 'numpy.array', '_np.array', (['(1, 0)', 'complex'], {}), '((1, 0), complex)\n', (79636, 79653), True, 'import numpy as _np\n'), ((79707, 79733), 'numpy.array', '_np.array', (['(0, 1)', 'complex'], {}), '((0, 1), complex)\n', (79716, 79733), True, 'import numpy as _np\n'), ((79867, 79917), 'functools.reduce', '_functools.reduce', (['_np.kron', '[v[i] for i in zvals]'], {}), '(_np.kron, [v[i] for i in zvals])\n', (79884, 79917), True, 'import functools as _functools\n'), ((82235, 82263), 'numpy.zeros', '_np.zeros', (['(4 ** nqubits)', '"""d"""'], {}), "(4 ** nqubits, 'd')\n", (82244, 82263), True, 'import numpy as _np\n'), ((11155, 11188), 'numpy.identity', '_np.identity', (['check.shape[0]', '"""d"""'], {}), "(check.shape[0], 'd')\n", (11167, 11188), True, 'import numpy as _np\n'), ((12365, 12400), 'numpy.empty', '_np.empty', (['(m.shape[0], 0)', 'm.dtype'], {}), '((m.shape[0], 0), m.dtype)\n', (12374, 12400), True, 'import numpy as _np\n'), ((12511, 12565), 'numpy.concatenate', '_np.concatenate', (['(running_indep_cols, m[:, j])'], {'axis': '(1)'}), '((running_indep_cols, m[:, j]), axis=1)\n', (12526, 12565), True, 'import numpy as _np\n'), ((13012, 13059), 'scipy.sparse.csc_matrix', '_sps.csc_matrix', (['(m.shape[0], 0)'], {'dtype': 'm.dtype'}), '((m.shape[0], 0), dtype=m.dtype)\n', (13027, 13059), True, 'import scipy.sparse as _sps\n'), ((13170, 13212), 'scipy.sparse.hstack', '_sps.hstack', (['(running_indep_cols, m[:, j])'], {}), '((running_indep_cols, m[:, j]))\n', (13181, 13212), True, 'import scipy.sparse as _sps\n'), ((13875, 13885), 'numpy.abs', '_np.abs', (['m'], {}), '(m)\n', (13882, 13885), True, 'import numpy as _np\n'), ((15208, 15225), 'numpy.isclose', '_np.isclose', (['S', '(0)'], {}), '(S, 0)\n', (15219, 15225), True, 'import numpy as _np\n'), ((15798, 15816), 'numpy.conjugate', '_np.conjugate', (['Z.T'], {}), '(Z.T)\n', (15811, 15816), True, 'import numpy as _np\n'), ((17518, 17530), 'numpy.array', '_np.array', (['m'], {}), '(m)\n', (17527, 17530), True, 'import numpy as _np\n'), ((21559, 21582), 'numpy.linalg.norm', '_np.linalg.norm', (['m.imag'], {}), '(m.imag)\n', (21574, 21582), True, 'import numpy as _np\n'), ((21647, 21673), 'numpy.linalg.norm', '_np.linalg.norm', (['logM.imag'], {}), '(logM.imag)\n', (21662, 21673), True, 'import numpy as _np\n'), ((25943, 25954), 'numpy.imag', '_np.imag', (['m'], {}), '(m)\n', (25951, 25954), True, 'import numpy as _np\n'), ((28010, 28028), 'numpy.log', '_np.log', (['(-evals[i])'], {}), '(-evals[i])\n', (28017, 28028), True, 'import numpy as _np\n'), ((28136, 28147), 'numpy.sqrt', '_np.sqrt', (['(2)'], {}), '(2)\n', (28144, 28147), True, 'import numpy as _np\n'), ((28297, 28320), 'numpy.log', '_np.log', (['(-evals[i].real)'], {}), '(-evals[i].real)\n', (28304, 28320), True, 'import numpy as _np\n'), ((28679, 28698), 'numpy.diag', '_np.diag', (['log_evals'], {}), '(log_evals)\n', (28687, 28698), True, 'import numpy as _np\n'), ((28700, 28717), 'numpy.linalg.inv', '_np.linalg.inv', (['U'], {}), '(U)\n', (28714, 28717), True, 'import numpy as _np\n'), ((30584, 30608), 'numpy.transpose', '_np.transpose', (['matrix_in'], {}), '(matrix_in)\n', (30597, 30608), True, 'import numpy as _np\n'), ((48328, 48350), 'itertools.product', '_itertools.product', (['*b'], {}), '(*b)\n', (48346, 48350), True, 'import itertools as _itertools\n'), ((48665, 48697), 'numpy.zeros', '_np.zeros', (['a_inds_shape', 'a.dtype'], {}), '(a_inds_shape, a.dtype)\n', (48674, 48697), True, 'import numpy as _np\n'), ((49557, 49570), 'numpy.dot', '_np.dot', (['a', 'b'], {}), '(a, b)\n', (49564, 49570), True, 'import numpy as _np\n'), ((54095, 54112), 'numpy.empty', '_np.empty', (['(0)', 'int'], {}), '(0, int)\n', (54104, 54112), True, 'import numpy as _np\n'), ((54114, 54131), 'numpy.empty', '_np.empty', (['(0)', 'int'], {}), '(0, int)\n', (54123, 54131), True, 'import numpy as _np\n'), ((57925, 57942), 'numpy.empty', '_np.empty', (['(0)', 'int'], {}), '(0, int)\n', (57934, 57942), True, 'import numpy as _np\n'), ((57944, 57961), 'numpy.empty', '_np.empty', (['(0)', '"""d"""'], {}), "(0, 'd')\n", (57953, 57961), True, 'import numpy as _np\n'), ((57963, 57980), 'numpy.zeros', '_np.zeros', (['(1)', 'int'], {}), '(1, int)\n', (57972, 57980), True, 'import numpy as _np\n'), ((62705, 62747), 'numpy.ascontiguousarray', '_np.ascontiguousarray', (['a.indptr', '_np.int64'], {}), '(a.indptr, _np.int64)\n', (62726, 62747), True, 'import numpy as _np\n'), ((62795, 62838), 'numpy.ascontiguousarray', '_np.ascontiguousarray', (['a.indices', '_np.int64'], {}), '(a.indices, _np.int64)\n', (62816, 62838), True, 'import numpy as _np\n'), ((74084, 74098), 'numpy.dot', '_np.dot', (['x', 'u0'], {}), '(x, u0)\n', (74091, 74098), True, 'import numpy as _np\n'), ((74166, 74184), 'numpy.dot', '_np.dot', (['a', 'u0_inv'], {}), '(a, u0_inv)\n', (74173, 74184), True, 'import numpy as _np\n'), ((74405, 74428), 'numpy.linalg.norm', '_np.linalg.norm', (['R.imag'], {}), '(R.imag)\n', (74420, 74428), True, 'import numpy as _np\n'), ((74467, 74481), 'numpy.dot', '_np.dot', (['R', 'u0'], {}), '(R, u0)\n', (74474, 74481), True, 'import numpy as _np\n'), ((74604, 74627), 'numpy.linalg.norm', '_np.linalg.norm', (['r.imag'], {}), '(r.imag)\n', (74619, 74627), True, 'import numpy as _np\n'), ((74735, 74766), 'numpy.linalg.norm', '_np.linalg.norm', (['r_on_comm.imag'], {}), '(r_on_comm.imag)\n', (74750, 74766), True, 'import numpy as _np\n'), ((75218, 75241), 'numpy.linalg.norm', '_np.linalg.norm', (['X.imag'], {}), '(X.imag)\n', (75233, 75241), True, 'import numpy as _np\n'), ((78525, 78536), 'numpy.sqrt', '_np.sqrt', (['d'], {}), '(d)\n', (78533, 78536), True, 'import numpy as _np\n'), ((78625, 78645), 'numpy.dot', '_np.dot', (['errmap', 'psi'], {}), '(errmap, psi)\n', (78632, 78645), True, 'import numpy as _np\n'), ((79421, 79449), 'numpy.array', '_np.array', (['(1, 0, 0, 1)', '"""d"""'], {}), "((1, 0, 0, 1), 'd')\n", (79430, 79449), True, 'import numpy as _np\n'), ((79517, 79546), 'numpy.array', '_np.array', (['(1, 0, 0, -1)', '"""d"""'], {}), "((1, 0, 0, -1), 'd')\n", (79526, 79546), True, 'import numpy as _np\n'), ((8961, 8994), 'numpy.linalg.norm', '_np.linalg.norm', (['m[:, j]'], {'ord': 'ord'}), '(m[:, j], ord=ord)\n', (8976, 8994), True, 'import numpy as _np\n'), ((10519, 10541), 'numpy.linalg.norm', '_np.linalg.norm', (['check'], {}), '(check)\n', (10534, 10541), True, 'import numpy as _np\n'), ((12581, 12619), 'numpy.linalg.matrix_rank', '_np.linalg.matrix_rank', (['trial'], {'tol': 'tol'}), '(trial, tol=tol)\n', (12603, 12619), True, 'import numpy as _np\n'), ((13261, 13326), 'scipy.sparse.linalg.svds', '_spsl.svds', (['trial'], {'k': '(1)', 'which': '"""SM"""', 'return_singular_vectors': '(False)'}), "(trial, k=1, which='SM', return_singular_vectors=False)\n", (13271, 13326), True, 'import scipy.sparse.linalg as _spsl\n'), ((15266, 15299), 'numpy.isclose', '_np.isclose', (['(T[i, i] - T[j, j])', '(0)'], {}), '(T[i, i] - T[j, j], 0)\n', (15277, 15299), True, 'import numpy as _np\n'), ((17407, 17418), 'numpy.imag', '_np.imag', (['m'], {}), '(m)\n', (17415, 17418), True, 'import numpy as _np\n'), ((20680, 20700), 'numpy.sqrt', '_np.sqrt', (['H.shape[0]'], {}), '(H.shape[0])\n', (20688, 20700), True, 'import numpy as _np\n'), ((20747, 20762), 'scipy.linalg.expm', '_spl.expm', (['logM'], {}), '(logM)\n', (20756, 20762), True, 'import scipy.linalg as _spl\n'), ((21856, 21877), 'numpy.linalg.eigvals', '_np.linalg.eigvals', (['m'], {}), '(m)\n', (21874, 21877), True, 'import numpy as _np\n'), ((26338, 26350), 'numpy.real', '_np.real', (['ev'], {}), '(ev)\n', (26346, 26350), True, 'import numpy as _np\n'), ((29287, 29421), 'warnings.warn', '_warnings.warn', (["('Cannot construct a real log: unpaired negative' + ' real eigenvalues: %s' %\n [evals[i] for i in unpaired_indices])"], {}), "('Cannot construct a real log: unpaired negative' + \n ' real eigenvalues: %s' % [evals[i] for i in unpaired_indices])\n", (29301, 29421), True, 'import warnings as _warnings\n'), ((39703, 39728), 'numpy.isclose', '_np.isclose', (['ar[i]', 'ar[j]'], {}), '(ar[i], ar[j])\n', (39714, 39728), True, 'import numpy as _np\n'), ((79407, 79418), 'numpy.sqrt', '_np.sqrt', (['(2)'], {}), '(2)\n', (79415, 79418), True, 'import numpy as _np\n'), ((79503, 79514), 'numpy.sqrt', '_np.sqrt', (['(2)'], {}), '(2)\n', (79511, 79514), True, 'import numpy as _np\n'), ((82313, 82324), 'numpy.sqrt', '_np.sqrt', (['(2)'], {}), '(2)\n', (82321, 82324), True, 'import numpy as _np\n'), ((5589, 5604), 'numpy.diagonal', '_np.diagonal', (['r'], {}), '(r)\n', (5601, 5604), True, 'import numpy as _np\n'), ((23188, 23223), 'numpy.linalg.norm', '_np.linalg.norm', (['(logM - target_logm)'], {}), '(logM - target_logm)\n', (23203, 23223), True, 'import numpy as _np\n'), ((26314, 26326), 'numpy.imag', '_np.imag', (['ev'], {}), '(ev)\n', (26322, 26326), True, 'import numpy as _np\n'), ((26415, 26430), 'numpy.imag', '_np.imag', (['evec1'], {}), '(evec1)\n', (26423, 26430), True, 'import numpy as _np\n'), ((37759, 37779), 'numpy.conjugate', '_np.conjugate', (['a[q0]'], {}), '(a[q0])\n', (37772, 37779), True, 'import numpy as _np\n'), ((37825, 37845), 'numpy.conjugate', '_np.conjugate', (['b[q1]'], {}), '(b[q1])\n', (37838, 37845), True, 'import numpy as _np\n'), ((43188, 43213), 'numpy.array', '_np.array', (['[i]', '_np.int64'], {}), '([i], _np.int64)\n', (43197, 43213), True, 'import numpy as _np\n'), ((44814, 44836), 'itertools.product', '_itertools.product', (['*b'], {}), '(*b)\n', (44832, 44836), True, 'import itertools as _itertools\n'), ((75093, 75119), 'numpy.linalg.norm', '_np.linalg.norm', (['(R - lastR)'], {}), '(R - lastR)\n', (75108, 75119), True, 'import numpy as _np\n'), ((43425, 43448), 'numpy.array', '_np.array', (['i', '_np.int64'], {}), '(i, _np.int64)\n', (43434, 43448), True, 'import numpy as _np\n'), ((50341, 50357), 'numpy.real', '_np.real', (['a.data'], {}), '(a.data)\n', (50349, 50357), True, 'import numpy as _np\n'), ((51578, 51594), 'numpy.imag', '_np.imag', (['a.data'], {}), '(a.data)\n', (51586, 51594), True, 'import numpy as _np\n'), ((63135, 63163), 'numpy.where', '_np.where', (['(a.indices == iCol)'], {}), '(a.indices == iCol)\n', (63144, 63163), True, 'import numpy as _np\n'), ((27115, 27148), 'numpy.linalg.norm', '_np.linalg.norm', (['(evec1C - U[:, j])'], {}), '(evec1C - U[:, j])\n', (27130, 27148), True, 'import numpy as _np\n'), ((26655, 26672), 'numpy.imag', '_np.imag', (['U[:, j]'], {}), '(U[:, j])\n', (26663, 26672), True, 'import numpy as _np\n'), ((39043, 39054), 'numpy.conj', '_np.conj', (['v'], {}), '(v)\n', (39051, 39054), True, 'import numpy as _np\n'), ((50472, 50488), 'numpy.real', '_np.real', (['a.data'], {}), '(a.data)\n', (50480, 50488), True, 'import numpy as _np\n'), ((51709, 51725), 'numpy.imag', '_np.imag', (['a.data'], {}), '(a.data)\n', (51717, 51725), True, 'import numpy as _np\n'), ((30969, 30997), 'itertools.repeat', '_ittls.repeat', (['None', '(dim - 1)'], {}), '(None, dim - 1)\n', (30982, 30997), True, 'import itertools as _ittls\n')]
|
'''
Created on Jun 27, 2016
@author: rajajosh
'''
import numpy
from scipy.spatial.distance import euclidean
class KNNClassifier(object):
"K-Nearest Neighbors classifier class"
len=0
x_train=[]
y_train=[]
kVal=1
clusters = set()
def __init__(self):
'''
Constructor
'''
pass
def fit(self, x_train, y_train, kVal=3):
"fir the training data. "
self.len=len(x_train)
if self.len>len(y_train): self.len=len(y_train)
if kVal>self.len: kVal=self.len
self.x_train=x_train
self.y_train=y_train
self.clusters = set(y_train)
self.kVal=kVal
def predict(self, x_test):
retArr = []
for testData in x_test:
distArray =[]
for i in range(0,self.len):
distArray.append([euclidean(testData, self.x_train[i]), self.y_train[i]])
distArray.sort()
counts = [0] * len(self.clusters)
for i in range(0,self.kVal):
index=distArray[i][1]
counts[index]=counts[index]+1
largest=0
indexOfLargest=0
for i in range(0,len(counts)):
if counts[i]>largest:
largest=counts[i]
indexOfLargest=i
retArr.append(indexOfLargest)
return numpy.asarray(retArr)
print("done")
|
[
"numpy.asarray",
"scipy.spatial.distance.euclidean"
] |
[((1417, 1438), 'numpy.asarray', 'numpy.asarray', (['retArr'], {}), '(retArr)\n', (1430, 1438), False, 'import numpy\n'), ((883, 919), 'scipy.spatial.distance.euclidean', 'euclidean', (['testData', 'self.x_train[i]'], {}), '(testData, self.x_train[i])\n', (892, 919), False, 'from scipy.spatial.distance import euclidean\n')]
|
import os
import argparse
import base64
import warnings
from multiprocessing import Pool
import shutil
import zlib
import numpy as np
import cv2
import h5py
from tqdm import tqdm
def encode_single(info):
source_path, target_path, video, video_index, num_videos, delete = info
print('Encoding {} / {} file.'.format(video_index, num_videos))
if os.path.exists(os.path.join(target_path, f'{video}.h5')):
return
file = h5py.File(os.path.join(target_path, f'{video}.h5'), 'w',
driver='core')
for frame in os.listdir(os.path.join(source_path, video)):
with open(os.path.join(source_path, video, frame), 'rb') as frame_file:
string_image = frame_file.read()
string_image = np.void(string_image)
file.create_dataset(frame, data=string_image)
file.close()
if delete:
shutil.rmtree(os.path.join(source_path, video))
return
def encode(opt):
assert os.path.exists(opt.source_path)
if not os.path.exists(opt.target_path):
os.mkdir(opt.target_path)
videos = os.listdir(opt.source_path)
videos = list(filter(lambda x: os.path.isdir(os.path.join(opt.source_path,
x)), videos))
num_videos = len(videos)
if opt.num_worker == 1:
for video in tqdm(videos):
encode_single((opt.source_path, opt.target_path, video))
else:
pool = Pool(opt.num_worker)
pool.map(encode_single, zip([opt.source_path] * num_videos,
[opt.target_path] * num_videos,
videos, range(num_videos),
[num_videos] * num_videos,
[opt.delete_original] * num_videos))
def decode_single(info):
source_path, target_path, video_file, video_index, num_videos, delete = info
print('Decoding {} / {} file.'.format(video_index, num_videos))
video_name = video_file.split('.')[0]
if not os.path.exists(os.path.join(target_path, video_name)):
os.mkdir(os.path.join(target_path, video_name))
file = h5py.File(os.path.join(source_path, video_file), 'r', driver='core')
for key in file.keys():
frame = open(os.path.join(target_path, video_name, key), 'wb')
frame.write(file[key][()].tobytes())
frame.close()
file.close()
if delete:
shutil.rmtree(os.path.join(source_path, video_file))
def decode(opt):
assert os.path.exists(opt.source_path)
if not os.path.exists(opt.target_path):
os.mkdir(opt.target_path)
video_files = os.listdir(opt.source_path)
num_videos = len(video_files)
if opt.num_worker == 1:
for video_file in tqdm(video_files):
decode_single(opt.source_path, opt.target_path, video_file)
else:
pool = Pool(opt.num_worker)
pool.map(decode_single, zip([opt.source_path] * num_videos,
[opt.target_path] * num_videos,
video_files, range(num_videos),
[num_videos] * num_videos,
[opt.delete_original] * num_videos))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-sp', '--source_path', type=str,
default='/home/yhzhai/Downloads/tmp-frames')
parser.add_argument('-tp', '--target_path', type=str,
default='/home/yhzhai/Downloads/tmp-hdf5')
parser.add_argument('--num_worker', type=int, default=4)
parser.add_argument('--single_video', action='store_true', default=False)
parser.add_argument('--decode', action='store_true', default=False)
parser.add_argument('-d', '--delete_original', action='store_true',
default=False)
opt = parser.parse_args()
if not opt.single_video:
if opt.decode:
decode(opt)
else:
encode(opt)
else:
if opt.decode:
source_path, video_file = os.path.split(opt.source_path)
decode_single((source_path, opt.target_path, video_file, 1, 1,
opt.delete_original))
else:
source_path, video_name = os.path.split(opt.source_path)
encode_single((source_path, opt.target_path, video_name, 1, 1,
opt.delete_original))
|
[
"os.mkdir",
"tqdm.tqdm",
"numpy.void",
"argparse.ArgumentParser",
"os.path.exists",
"multiprocessing.Pool",
"os.path.split",
"os.path.join",
"os.listdir"
] |
[((942, 973), 'os.path.exists', 'os.path.exists', (['opt.source_path'], {}), '(opt.source_path)\n', (956, 973), False, 'import os\n'), ((1066, 1093), 'os.listdir', 'os.listdir', (['opt.source_path'], {}), '(opt.source_path)\n', (1076, 1093), False, 'import os\n'), ((2466, 2497), 'os.path.exists', 'os.path.exists', (['opt.source_path'], {}), '(opt.source_path)\n', (2480, 2497), False, 'import os\n'), ((2603, 2630), 'os.listdir', 'os.listdir', (['opt.source_path'], {}), '(opt.source_path)\n', (2613, 2630), False, 'import os\n'), ((3242, 3267), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3265, 3267), False, 'import argparse\n'), ((373, 413), 'os.path.join', 'os.path.join', (['target_path', 'f"""{video}.h5"""'], {}), "(target_path, f'{video}.h5')\n", (385, 413), False, 'import os\n'), ((452, 492), 'os.path.join', 'os.path.join', (['target_path', 'f"""{video}.h5"""'], {}), "(target_path, f'{video}.h5')\n", (464, 492), False, 'import os\n'), ((550, 582), 'os.path.join', 'os.path.join', (['source_path', 'video'], {}), '(source_path, video)\n', (562, 582), False, 'import os\n'), ((985, 1016), 'os.path.exists', 'os.path.exists', (['opt.target_path'], {}), '(opt.target_path)\n', (999, 1016), False, 'import os\n'), ((1026, 1051), 'os.mkdir', 'os.mkdir', (['opt.target_path'], {}), '(opt.target_path)\n', (1034, 1051), False, 'import os\n'), ((1273, 1285), 'tqdm.tqdm', 'tqdm', (['videos'], {}), '(videos)\n', (1277, 1285), False, 'from tqdm import tqdm\n'), ((1381, 1401), 'multiprocessing.Pool', 'Pool', (['opt.num_worker'], {}), '(opt.num_worker)\n', (1385, 1401), False, 'from multiprocessing import Pool\n'), ((2118, 2155), 'os.path.join', 'os.path.join', (['source_path', 'video_file'], {}), '(source_path, video_file)\n', (2130, 2155), False, 'import os\n'), ((2509, 2540), 'os.path.exists', 'os.path.exists', (['opt.target_path'], {}), '(opt.target_path)\n', (2523, 2540), False, 'import os\n'), ((2550, 2575), 'os.mkdir', 'os.mkdir', (['opt.target_path'], {}), '(opt.target_path)\n', (2558, 2575), False, 'import os\n'), ((2719, 2736), 'tqdm.tqdm', 'tqdm', (['video_files'], {}), '(video_files)\n', (2723, 2736), False, 'from tqdm import tqdm\n'), ((2835, 2855), 'multiprocessing.Pool', 'Pool', (['opt.num_worker'], {}), '(opt.num_worker)\n', (2839, 2855), False, 'from multiprocessing import Pool\n'), ((737, 758), 'numpy.void', 'np.void', (['string_image'], {}), '(string_image)\n', (744, 758), True, 'import numpy as np\n'), ((867, 899), 'os.path.join', 'os.path.join', (['source_path', 'video'], {}), '(source_path, video)\n', (879, 899), False, 'import os\n'), ((2001, 2038), 'os.path.join', 'os.path.join', (['target_path', 'video_name'], {}), '(target_path, video_name)\n', (2013, 2038), False, 'import os\n'), ((2058, 2095), 'os.path.join', 'os.path.join', (['target_path', 'video_name'], {}), '(target_path, video_name)\n', (2070, 2095), False, 'import os\n'), ((2226, 2268), 'os.path.join', 'os.path.join', (['target_path', 'video_name', 'key'], {}), '(target_path, video_name, key)\n', (2238, 2268), False, 'import os\n'), ((2397, 2434), 'os.path.join', 'os.path.join', (['source_path', 'video_file'], {}), '(source_path, video_file)\n', (2409, 2434), False, 'import os\n'), ((4051, 4081), 'os.path.split', 'os.path.split', (['opt.source_path'], {}), '(opt.source_path)\n', (4064, 4081), False, 'import os\n'), ((4247, 4277), 'os.path.split', 'os.path.split', (['opt.source_path'], {}), '(opt.source_path)\n', (4260, 4277), False, 'import os\n'), ((603, 642), 'os.path.join', 'os.path.join', (['source_path', 'video', 'frame'], {}), '(source_path, video, frame)\n', (615, 642), False, 'import os\n'), ((1143, 1175), 'os.path.join', 'os.path.join', (['opt.source_path', 'x'], {}), '(opt.source_path, x)\n', (1155, 1175), False, 'import os\n')]
|
import math
import operator
import numpy as np
def _convert_to_float(fl):
""" This method converts ONLY the numeric values of a string into floats """
try:
return float(fl)
except (ValueError, TypeError):
return fl
def _wrap_to_pi(angle):
""" This method wrap the input angle to 360 [deg]
angle : [deg] """
ang2pi = angle - (angle // (2 * np.pi)) * 2 * np.pi
if ang2pi > np.pi or ang2pi < - np.pi:
ang = ang2pi - np.sign(ang2pi) * 2 * np.pi
return ang
def _quaternion_multiply(quat1, quat0):
""" This method performs a standard quaternion multiplication
quat0 : [qR0, qV0], with qR0 being the real part of the quaternion
quat1 : [qR1, qV1], with qR1 being the real part of the quaternion """
w0, x0, y0, z0 = quat0
w1, x1, y1, z1 = quat1
return np.array([-x1 * x0 - y1 * y0 - z1 * z0 + w1 * w0,
x1 * w0 + y1 * z0 - z1 * y0 + w1 * x0,
-x1 * z0 + y1 * w0 + z1 * x0 + w1 * y0,
x1 * y0 - y1 * x0 + z1 * w0 + w1 * z0], dtype=np.float64)
def _solve_fk(eva, joints):
""" This method solves the forward kinematics problem and extract the results directly as an array
joints : joint angles in [rad]
pos : cartesian position, with respect to robot's origin [m]
orient : orientation quaternion of the end effector """
fk_results = eva.calc_forward_kinematics(joints)
pos_json = fk_results['position']
orient_json = fk_results['orientation']
pos = [pos_json['x'], pos_json['y'], pos_json['z']]
orient = [orient_json['w'], orient_json['x'], orient_json['y'], orient_json['z']]
return pos, orient
def solve_ik_head_down(eva, guess, theta, xyz_absolute):
""" This method solves the inverse kinematics problem for the special case of the end-effector
pointing downwards, perpendicular to the ground.
guess : is the IK guess, a 1x6 array of joint angles in [rad]
theta : angular rotation of axis 6 [deg]
xyz_absolute : cartesian position, with respect to robot's origin [m] """
pos = [xyz_absolute[0], xyz_absolute[1], xyz_absolute[2]] # [m]
pos_json = {'x': (pos[0]), 'y': (pos[1]), 'z': (pos[2])} # [m]
orient_rel = [math.cos(np.deg2rad(theta) / 2), 0, 0, math.sin(np.deg2rad(theta) / 2)]
orient_abs = _quaternion_multiply([0, 0, 1, 0], orient_rel)
orient_json = {'w': (orient_abs[0]), 'x': (orient_abs[1]), 'y': (orient_abs[2]), 'z': (orient_abs[3])}
# Compute IK
result_ik = eva.calc_inverse_kinematics(guess, pos_json, orient_json)
success_ik = result_ik['ik']['result']
joints_ik = result_ik['ik']['joints']
return success_ik, joints_ik
def read_tcp_ip(sock, objects):
""" This method reads and decodes the string sent from the camera """
result = sock.recv(4000)
string_read = result.decode('utf-8')
string_split = string_read.split(",")
camera_string_raw = list(string_split)
passed = False
camera_string = ['']
if len(camera_string_raw) is not 0:
if camera_string_raw[0] == 'start' and camera_string_raw[19] == 'end' and len(camera_string_raw) == 20:
camera_string_raw = [_convert_to_float(fl) for fl in camera_string_raw]
passes = [camera_string_raw[6], camera_string_raw[12], camera_string_raw[18]]
scores = [camera_string_raw[5], camera_string_raw[11], camera_string_raw[17]]
passed_score = [passes[0] * scores[0], passes[1] * scores[1], passes[2] * scores[2]]
max_index, max_value = max(enumerate(passed_score), key=operator.itemgetter(1))
select_obj = objects[max_index]
if max_value > 0:
passed = True
# Extract the best matching object from the string
camera_string = _extract_camera_serial(objects, select_obj, camera_string_raw)
# String format = ['start', 'object_name', float x_mm, float y_mm, float angle]
return passed, camera_string
def _extract_camera_serial(objects, index, camera_string_raw):
""" This method extracts only the best matching object data from the entire string """
camera_string = ['', 0, 0, 0, 0]
if index not in objects:
print('Wrong object in the list')
elif index is 'C':
camera_string[0] = 'start'
camera_string[1] = camera_string_raw[1]
camera_string[2] = camera_string_raw[2]
camera_string[3] = camera_string_raw[3]
camera_string[4] = camera_string_raw[4]
elif index is 'M':
camera_string[0] = 'start'
camera_string[1] = camera_string_raw[7]
camera_string[2] = camera_string_raw[8]
camera_string[3] = camera_string_raw[9]
camera_string[4] = camera_string_raw[10]
elif index is 'R':
camera_string[0] = 'start'
camera_string[1] = camera_string_raw[13]
camera_string[2] = camera_string_raw[14]
camera_string[3] = camera_string_raw[15]
camera_string[4] = camera_string_raw[16]
return camera_string
class EvaVision:
""" This class performs the machine vision operations in order to obtain the object position in Eva's frame """
def __init__(self, eva, string, cal_zero, obj_height=0.0, surf_height=0.0, ee_length=0.0):
self.eva = eva
self.string = string
self.cal = cal_zero
self.obj = obj_height
self.surf = surf_height
self.ee = ee_length
def locate_object(self):
print('Pattern identified is: ', self.string[1])
# Relative object position in camera frame:
x_obj_rel_cam = 0.001*self.string[2] # transform X value from [mm] into [m]
y_obj_rel_cam = 0.001*self.string[3] # transform Y value from [mm] into [m]
# Compute relative object position in Eva's frame:
# Need to known Eva's frame rotation wrt to camera frame
# Convention: start from camera frame and rotate of ang [deg] to get to Eva's frame
ang_cam = 180 # [deg]
x_obj_rel = np.cos(np.deg2rad(ang_cam)) * x_obj_rel_cam + np.sin(np.deg2rad(ang_cam)) * y_obj_rel_cam # [m]
y_obj_rel = -np.sin(np.deg2rad(ang_cam)) * x_obj_rel_cam + np.cos(np.deg2rad(ang_cam)) * y_obj_rel_cam # [m]
# Compute absolute object position of calibration board origin in Eva's frame:
pos_cal = self.eva.calc_forward_kinematics(self.cal)['position']
# Compute absolute object position by summing the calibration board origin to the relative object position
x_obj_abs = x_obj_rel + pos_cal['x'] # [m]
y_obj_abs = y_obj_rel + pos_cal['y'] # [m]
# Compute absolute value of Z
z_obj_abs = abs(self.obj) + self.surf + abs(self.ee)
pos_abs = [x_obj_abs, y_obj_abs, z_obj_abs]
return pos_abs
|
[
"operator.itemgetter",
"numpy.array",
"numpy.sign",
"numpy.deg2rad"
] |
[((835, 1029), 'numpy.array', 'np.array', (['[-x1 * x0 - y1 * y0 - z1 * z0 + w1 * w0, x1 * w0 + y1 * z0 - z1 * y0 + w1 *\n x0, -x1 * z0 + y1 * w0 + z1 * x0 + w1 * y0, x1 * y0 - y1 * x0 + z1 * w0 +\n w1 * z0]'], {'dtype': 'np.float64'}), '([-x1 * x0 - y1 * y0 - z1 * z0 + w1 * w0, x1 * w0 + y1 * z0 - z1 *\n y0 + w1 * x0, -x1 * z0 + y1 * w0 + z1 * x0 + w1 * y0, x1 * y0 - y1 * x0 +\n z1 * w0 + w1 * z0], dtype=np.float64)\n', (843, 1029), True, 'import numpy as np\n'), ((2242, 2259), 'numpy.deg2rad', 'np.deg2rad', (['theta'], {}), '(theta)\n', (2252, 2259), True, 'import numpy as np\n'), ((2281, 2298), 'numpy.deg2rad', 'np.deg2rad', (['theta'], {}), '(theta)\n', (2291, 2298), True, 'import numpy as np\n'), ((469, 484), 'numpy.sign', 'np.sign', (['ang2pi'], {}), '(ang2pi)\n', (476, 484), True, 'import numpy as np\n'), ((3573, 3595), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (3592, 3595), False, 'import operator\n'), ((6007, 6026), 'numpy.deg2rad', 'np.deg2rad', (['ang_cam'], {}), '(ang_cam)\n', (6017, 6026), True, 'import numpy as np\n'), ((6053, 6072), 'numpy.deg2rad', 'np.deg2rad', (['ang_cam'], {}), '(ang_cam)\n', (6063, 6072), True, 'import numpy as np\n'), ((6171, 6190), 'numpy.deg2rad', 'np.deg2rad', (['ang_cam'], {}), '(ang_cam)\n', (6181, 6190), True, 'import numpy as np\n'), ((6125, 6144), 'numpy.deg2rad', 'np.deg2rad', (['ang_cam'], {}), '(ang_cam)\n', (6135, 6144), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Provide some reward processors.
It processes the rewards before returning them; this can be useful to standardize, normalize, center them for instance.
"""
import numpy as np
from pyrobolearn.rewards.reward import Reward
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, PyRoboLearn"
__credits__ = ["<NAME>"]
__license__ = "GNU GPLv3"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
class RewardProcessor(Reward):
r"""Reward Processor
Wraps the reward and process it. It also acts as a memory of the last received reward signal, which can be
accessed via the `value` attribute.
Examples:
reward = Reward1() + Reward2()
reward = RewardProcessor(reward, <args>)
"""
def __init__(self, reward, range=None):
"""
Initialize the reward processor.
Args:
reward (Reward): reward to process.
range (tuple of float/int, None): range of the reward processor.
"""
super(RewardProcessor, self).__init__()
# set the reward to process
if not isinstance(reward, Reward):
raise TypeError("Expecting the given 'reward' to be an instance of `Reward`, instead got: "
"{}".format(type(reward)))
self.reward = reward
# set the range
self.range = self.reward.range if range is None else range
# set the initial value (randomly)
self.value = np.random.uniform(low=self.range[0], high=self.range[1])
def _compute(self):
"""Compute the reward and cache its value."""
self.value = self.reward._compute()
return self.value
class ShiftRewardProcessor(RewardProcessor):
r"""Shift Reward Processor
Shift the reward by the given amount; that is, it returned: :math:`\hat{r} = r + x` where :math:`x` is the
specified amount to shift the original reward.
"""
def __init__(self, reward, x):
"""
Initialize the shift reward processor.
Args:
reward (Reward): Reward instance to shift.
x (int, float): amount to be shifted.
"""
if not isinstance(x, (int, float)):
raise TypeError("Expecting the given 'x' (=the amount to be shifted) to be an int or float, instead got: "
"{}".format(type(x)))
self.x = x
super(ShiftRewardProcessor, self).__init__(reward, range=self.reward.range + x)
def _compute(self):
reward = self.reward._compute()
self.value = reward + self.x
return self.value
class ClipRewardProcessor(RewardProcessor):
r"""Clip Reward Processor
Processor that clips the given reward to be between [low, high], where `low` and `high` are respectively the
specified lower and higher bound.
"""
def __init__(self, reward, low=-10, high=10):
"""
Initialize the Clip processor.
Args:
reward (Reward): Reward instance to clip.
low (int, float): lower bound
high (int, float): higher bound
"""
super(ClipRewardProcessor, self).__init__(reward)
self.low = low
self.high = high
def _compute(self):
reward = self.reward._compute()
self.value = np.clip(reward, self.low, self.high)
return self.value
class CenterRewardProcessor(RewardProcessor):
r"""Center Reward Processor
Center the reward using the running mean.
"""
def __init__(self, reward):
"""
Initialize the center reward processor.
Args:
reward (Reward): Reward instance to center.
"""
super(CenterRewardProcessor, self).__init__(reward)
self.mean = 0
self.N = 0
def reset(self):
self.mean = 0
self.N = 0
self.reward.reset()
def _compute(self):
reward = self.reward._compute()
# update the mean
self.mean = self.N / (self.N + 1.) * self.mean + 1. / (self.N + 1) * reward
self.N += 1
# center reward
self.value = self.value - self.mean
return self.value
class NormalizeRewardProcessor(RewardProcessor):
r"""Normalize Reward Processor
Normalize the reward such that it is between 0 and 1. That is, it returned
:math:`\hat{r} = \frac{r - r_{min}}{r_{max} - r_{min}}`, where :math:`r \in [r_{min}, r_{max}]`.
Warnings: the first returned reward will be 0.
"""
def __init__(self, reward):
"""
Initialize the normalizer reward processor.
Args:
reward (Reward): Reward instance to normalize.
"""
super(NormalizeRewardProcessor, self).__init__(reward)
self.min = np.infty
self.max = -np.infty
def reset(self):
self.min = np.infty
self.max = -np.infty
self.reward.reset()
def _compute(self):
reward = self.reward._compute()
self.min = np.minimum(reward, self.min)
self.max = np.maximum(reward, self.max)
den = self.max - self.min
if den == 0:
den = 1.
self.value = (reward - self.min) / den
return self.value
class StandardizeRewardProcessor(RewardProcessor):
r"""Standardize Reward Processor
Standardize the reward such that it returns :math:`\hat{r} = \frac{r - \mu}{\sigma}` where :math:`\mu` is the
running mean, and :math:`\sigma` is the running standard deviation. The returned reward will have a mean of 0
and standard deviation of 1.
"""
def __init__(self, reward, epsilon=1.e-4, center=True):
"""
Initialize the standardizer reward processor.
Args:
reward (Reward): Reward instance to standardize.
epsilon (float): threshold to be added to the standard deviation in order to avoid a division by 0.
center (bool): if we should center the data.
"""
super(StandardizeRewardProcessor, self).__init__(reward)
self.eps = epsilon
self.mean = 0
self.var = 1
self.N = 0
self.center = center
def reset(self):
self.mean = 0
self.var = 1
self.N = 0
self.reward.reset()
def _compute(self):
reward = self.reward._compute()
# update the mean
old_mean = self.mean
self.mean = self.N / (self.N + 1.) * self.mean + 1. / (self.N + 1) * reward
# update the var / stddev
frac = 1. / (self.N + 1)
self.var = self.N * frac * self.var + frac * (self.value - old_mean) * (self.value - self.mean)
std = np.sqrt(self.var)
# update total number of data points
self.N += 1
# standardize the reward
if self.center:
self.value = (reward - self.mean) / (std + self.eps)
else:
self.value = reward / (std + self.eps)
return self.value
class GammaAccumulatedRewardProcessor(RewardProcessor):
r"""Gamma reward processor
It will return the accumulated reward until now: :math:`R = \sum_{t'=0}^t \gamma^{t'} r_{t'}`.
"""
def __init__(self, reward, gamma=0.99):
"""
Initialize the gamma accumulator reward processor.
Args:
reward (Reward): Reward instance to process.
gamma (float): discount factor.
"""
super(GammaAccumulatedRewardProcessor, self).__init__(reward)
self.gamma = gamma
self.value = 0. # return value
def reset(self):
self.value = 0.
self.reward.reset()
def _compute(self):
reward = self.reward._compute()
self.value = reward + self.gamma * self.value
return self.value
class GammaStandardizeRewardProcessor(RewardProcessor):
r"""Gamma Standardize Reward Processor
References:
[1] https://github.com/openai/baselines/blob/master/baselines/common/vec_env/vec_normalize.py
"""
def __init__(self, reward, gamma=0.99, epsilon=1.e-4):
"""
Initialize the gamma standardizer reward processor.
Args:
reward (Reward): Reward instance to process.
gamma (float): discount factor.
epsilon (float): threshold to be added to the standard deviation in order to avoid a division by 0.
"""
super(GammaStandardizeRewardProcessor, self).__init__(reward)
self.gamma = gamma
self.eps = epsilon
self.ret = 0
self.mean = 0
self.var = 1
self.N = 0
def reset(self):
self.ret = 0
self.mean = 0
self.var = 1
self.N = 0
self.reward.reset()
def _compute(self):
reward = self.reward._compute()
# update return
self.ret = reward + self.gamma * self.ret
# update the return mean
old_mean = self.mean
self.mean = self.N / (self.N + 1.) * self.mean + 1. / (self.N + 1) * self.ret
# update the return variance
self.var = self.N / (self.N + 1) * self.var + 1. / (self.N + 1) * (self.ret - old_mean) * (self.ret - self.mean)
std = np.sqrt(self.var)
# update total number of data points
self.N += 1
self.value = reward / (std + self.eps)
return self.value
class ScaleRewardProcessor(RewardProcessor):
r"""Scale Reward Processor
Processor that scales the reward x which is between [x1, x2] to the output y which is between [y1, y2].
"""
def __init__(self, reward, x1, x2, y1, y2):
"""
Initialize the scale reward processor
Args:
reward (Reward): reward function to scale.
x1 (int, float): lower bound of the original reward
x2 (int, float): upper bound of the original reward
y1 (int, float): lower bound of the final reward
y2 (int, float): upper bound of the final reward
"""
super(ScaleRewardProcessor, self).__init__(reward)
self.x1 = x1
self.x2 = x2
self.y1 = y1
self.y2 = y2
self.ratio = (self.y2 - self.y1) / (self.x2 - self.x1)
self.range = (self.y1, self.y2)
def _compute(self):
reward = self.reward._compute()
self.value = self.y1 + (reward - self.x1) * self.ratio
return self.value
|
[
"numpy.random.uniform",
"numpy.minimum",
"numpy.maximum",
"numpy.clip",
"numpy.sqrt"
] |
[((1534, 1590), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'self.range[0]', 'high': 'self.range[1]'}), '(low=self.range[0], high=self.range[1])\n', (1551, 1590), True, 'import numpy as np\n'), ((3361, 3397), 'numpy.clip', 'np.clip', (['reward', 'self.low', 'self.high'], {}), '(reward, self.low, self.high)\n', (3368, 3397), True, 'import numpy as np\n'), ((5037, 5065), 'numpy.minimum', 'np.minimum', (['reward', 'self.min'], {}), '(reward, self.min)\n', (5047, 5065), True, 'import numpy as np\n'), ((5085, 5113), 'numpy.maximum', 'np.maximum', (['reward', 'self.max'], {}), '(reward, self.max)\n', (5095, 5113), True, 'import numpy as np\n'), ((6694, 6711), 'numpy.sqrt', 'np.sqrt', (['self.var'], {}), '(self.var)\n', (6701, 6711), True, 'import numpy as np\n'), ((9191, 9208), 'numpy.sqrt', 'np.sqrt', (['self.var'], {}), '(self.var)\n', (9198, 9208), True, 'import numpy as np\n')]
|
from __future__ import print_function
import threading
import math
from numpy import sign, clip
import rospy
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
from b2_logic.odometry_helpers import heading_from_odometry, normalize_theta, calc_steering_angle
# Mode enum
MODE_FORWARD = 0
MODE_OBSTACLE_PLAN = 1
MODE_OBSTACLE_TURN = 2
class PilotNode:
def __init__(self, loophz, turn_radians, turn_radians_tolerance, cmd_vel_pub, pcontroller):
"""
Parameters:
:param int loophz:
:param float turn_radians:
:param float turn_radians_tolerance:
:param rospy.Publisher cmd_vel_pub:
:param PContoller pcontroller:
"""
self._loophz = loophz
self._turn_radians = turn_radians
self._turn_radians_tolerance = turn_radians_tolerance
self._cmd_vel_pub = cmd_vel_pub
self._pcontroller = pcontroller
self._prox_sensor = False
self._odom = Odometry()
self._state_lock = threading.RLock()
self._current_heading = 0.0 # radians
self._mode = MODE_OBSTACLE_PLAN # MODE_XXX enum
self._heading_goal = 0.0 # radians
self._obstacle_forward = None # True/False/None
self._obstacle_right = None # True/False/None
self._obstacle_left = None # True/False/None
self._reverse_plan = False # True/False
def run(self):
looprate = rospy.Rate(self._loophz)
try:
while not rospy.is_shutdown():
# Update the heading state
with self._state_lock:
self._current_heading = heading_from_odometry(self._odom)
rospy.logdebug(
"Current heading: {} deg (goal: {} deg)".format(
round(math.degrees(normalize_theta(self._current_heading)), 2),
round(math.degrees(self._heading_goal)), 2))
self._decide()
looprate.sleep()
except rospy.ROSInterruptException:
rospy.logwarn("ROSInterruptException received in main loop")
# ~~~~~~~~~~~~~~~~~~~~~~~~
# Subscription callbacks
# ~~~~~~~~~~~~~~~~~~~~~~~~
def prox_callback(self, msg):
"""
:param Proximity msg: The Proximity message
"""
with self._state_lock:
self._prox_sensor = msg.sensors[0]
def odom_callback(self, msg):
"""
:param Odometry msg: The Odometry message
"""
with self._state_lock:
self._odom = msg
# ~~~~~~~~~~~~~~~~~~~~
# Non-public methods
# ~~~~~~~~~~~~~~~~~~~~
def _send_drive_cmd(self, speed):
""" Sends the Twist command for linear.x speed in meters/sec
:param float speed: Speed in meters/sec for linear.x
"""
cmd = Twist()
cmd.linear.x = speed
self._cmd_vel_pub.publish(cmd)
def _send_turn_cmd(self, radians_sec):
""" Sends the Twist command for angular.z speed in radians/sec
:param float radians_sec: Angular speed in radians/sec for angular.z
"""
cmd = Twist()
cmd.angular.z = radians_sec
self._cmd_vel_pub.publish(cmd)
rospy.logdebug("sent cmd_vel: {}".format(cmd.angular.z))
def _set_forward_mode(self):
self._obstacle_forward = None
self._obstacle_right = None
self._obstacle_left = None
self._reverse_plan = False
self._mode = MODE_FORWARD
def _decide(self):
if self._mode == MODE_FORWARD:
if self._prox_sensor is True:
# If driving forward, and center sensor detects obstacle
# --> stop and enter obstacle mode
self._send_drive_cmd(0)
self._mode = MODE_OBSTACLE_PLAN
rospy.logdebug("Obstacle detected while moving forward")
else:
# No obstacle, so command base forward some more
linear_v = self._pcontroller.linear_velocity()
self._send_drive_cmd(linear_v)
rospy.logdebug("Forward is clear, proceeding to drive forward")
else: # Mode is either _PLAN or _TURN
# Need to calculate the heading to which to turn next
if self._mode == MODE_OBSTACLE_PLAN:
rospy.logdebug("Planning next movement")
self._process_obstacle_plan()
# Execute the turn to the target heading
if self._mode == MODE_OBSTACLE_TURN:
rospy.logdebug("Turning base")
self._process_obstacle_turn()
def _process_obstacle_plan(self):
"""
Note, the logic here assumes that if self._obstacle_XXX is None
then we haven't yet been in position to test it. Once we test that
position, we set the value to either True (obstacle) or False (clear)
then calculate the turn and switch into TURN mode.
Therefore, if we are in PLAN mode, we can determine which sides we need
to test still by examiming the self._obstacle_XXX state.
Example:
If in PLAN mode and self._obstacle_forward is NONE, we need to
test the front position, and if TRUE, turn to the right side.
If in PLAN mode and self._obstacle_forward is TRUE,
and self._obstacle_right is NONE: we have turned to the right
but have not yet tested the right side for an obstacle. So test
the position and if TRUE, we need to turn to the left side.
"""
if self._obstacle_forward in (None, False):
if self._prox_sensor is True:
# Calculate the turn to check the right side
self._obstacle_forward = True
rospy.logdebug("(Planner) Forward is blocked")
self._heading_goal = normalize_theta(
self._current_heading - self._turn_radians)
rospy.logdebug(
"(Planner) Turning to check right side. New heading: {}".format(
math.degrees(self._heading_goal)))
self._mode = MODE_OBSTACLE_TURN
else:
self._set_forward_mode()
elif self._obstacle_right is None:
if self._prox_sensor is True:
# Calculate the turn to check the left side
# We've already turned to the right, so we need to turn 180 to test
# the left side
self._obstacle_right = True
rospy.logdebug("(Planner) Right side is blocked")
self._heading_goal = normalize_theta(
self._current_heading + self._turn_radians * 2)
rospy.logdebug("(Planner) Turning to check left side. New heading: {}".format(
math.degrees(self._heading_goal)))
self._mode = MODE_OBSTACLE_TURN
else:
self._set_forward_mode()
elif self._obstacle_left is None:
if self._prox_sensor is True:
# All three of fwd, right, left are blocked
self._obstacle_left = True
rospy.logdebug("(Planner) left is blocked")
self._heading_goal = normalize_theta(
self._current_heading + self._turn_radians)
rospy.logdebug("(Planner) Turning to rear to backtrack. New heading: {}".format(
math.degrees(self._heading_goal)))
self._mode = MODE_OBSTACLE_TURN
self._reverse_plan = True
else:
self._set_forward_mode()
elif self._reverse_plan is True:
# We were performing a turn to reverse. Since we're in plan mode
# again, this means the turn is complete
rospy.logdebug("(Planner) Turn to rear complete, moving forward")
self._set_forward_mode()
else:
# This should not be possible
message = "Obstacle plan logic reached else block that should not be possible"
rospy.logerr(message)
raise RuntimeError(message)
def _process_obstacle_turn(self):
steering_angle = calc_steering_angle(self._current_heading, self._heading_goal)
rospy.logdebug("Steering angle: {} radians".format(round(steering_angle, 2)))
if abs(steering_angle) > self._turn_radians_tolerance:
# We still need to turn some more
angular_v = self._pcontroller.angular_velocity(
self._current_heading, self._heading_goal)
self._send_turn_cmd(angular_v)
else:
# We are done turning, back to obstacle planning
self._mode = MODE_OBSTACLE_PLAN
rospy.logdebug(
"Turn is complete (delta {} < turn radians tolerance {})".format(
steering_angle, self._turn_radians_tolerance))
class PVelocityController:
def __init__(self, min_linear_v, max_linear_v,
min_angular_v, max_angular_v, linear_k=1, angular_k=1):
self.min_linear_v = min_linear_v
self.max_linear_v = max_linear_v
self.max_angular_v = max_angular_v
self.min_angular_v = min_angular_v
self.linear_k = linear_k
self.angular_k = angular_k
def linear_velocity(self, distance_m=1):
"""Calculat the linear velocity using a Proportional (P) method,
clamped to within the min and max linear speeds.
Parameters:
:param float distance_m: Distance to drive
Returns:
The linear velocity in m/sec
:rtype: float
"""
linear_v = self.linear_k * distance_m
_sign = sign(linear_v)
return clip(linear_v, self.min_linear_v, self.max_linear_v) * _sign
def angular_velocity(self, current_angle, target_angle):
"""Calculate the angular velocity using a Proportional (P) method,
clamped to within the min and max angular speeds.
Parameters:
:param float current_angle: The current heading of the robot in radians
:param float target_angle: The goal heading of the robot in radians
Returns:
The angular velocity in radians/sec
:rtype: float
"""
angular_v = self.angular_k * calc_steering_angle(current_angle, target_angle)
_sign = sign(angular_v)
return clip(abs(angular_v), self.min_angular_v, self.max_angular_v) * _sign
|
[
"rospy.logwarn",
"b2_logic.odometry_helpers.heading_from_odometry",
"rospy.logerr",
"nav_msgs.msg.Odometry",
"b2_logic.odometry_helpers.calc_steering_angle",
"b2_logic.odometry_helpers.normalize_theta",
"threading.RLock",
"geometry_msgs.msg.Twist",
"rospy.Rate",
"numpy.clip",
"rospy.is_shutdown",
"rospy.logdebug",
"numpy.sign",
"math.degrees"
] |
[((994, 1004), 'nav_msgs.msg.Odometry', 'Odometry', ([], {}), '()\n', (1002, 1004), False, 'from nav_msgs.msg import Odometry\n'), ((1032, 1049), 'threading.RLock', 'threading.RLock', ([], {}), '()\n', (1047, 1049), False, 'import threading\n'), ((1480, 1504), 'rospy.Rate', 'rospy.Rate', (['self._loophz'], {}), '(self._loophz)\n', (1490, 1504), False, 'import rospy\n'), ((2916, 2923), 'geometry_msgs.msg.Twist', 'Twist', ([], {}), '()\n', (2921, 2923), False, 'from geometry_msgs.msg import Twist\n'), ((3210, 3217), 'geometry_msgs.msg.Twist', 'Twist', ([], {}), '()\n', (3215, 3217), False, 'from geometry_msgs.msg import Twist\n'), ((8316, 8378), 'b2_logic.odometry_helpers.calc_steering_angle', 'calc_steering_angle', (['self._current_heading', 'self._heading_goal'], {}), '(self._current_heading, self._heading_goal)\n', (8335, 8378), False, 'from b2_logic.odometry_helpers import heading_from_odometry, normalize_theta, calc_steering_angle\n'), ((9831, 9845), 'numpy.sign', 'sign', (['linear_v'], {}), '(linear_v)\n', (9835, 9845), False, 'from numpy import sign, clip\n'), ((10506, 10521), 'numpy.sign', 'sign', (['angular_v'], {}), '(angular_v)\n', (10510, 10521), False, 'from numpy import sign, clip\n'), ((9861, 9913), 'numpy.clip', 'clip', (['linear_v', 'self.min_linear_v', 'self.max_linear_v'], {}), '(linear_v, self.min_linear_v, self.max_linear_v)\n', (9865, 9913), False, 'from numpy import sign, clip\n'), ((10441, 10489), 'b2_logic.odometry_helpers.calc_steering_angle', 'calc_steering_angle', (['current_angle', 'target_angle'], {}), '(current_angle, target_angle)\n', (10460, 10489), False, 'from b2_logic.odometry_helpers import heading_from_odometry, normalize_theta, calc_steering_angle\n'), ((1540, 1559), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (1557, 1559), False, 'import rospy\n'), ((2118, 2178), 'rospy.logwarn', 'rospy.logwarn', (['"""ROSInterruptException received in main loop"""'], {}), "('ROSInterruptException received in main loop')\n", (2131, 2178), False, 'import rospy\n'), ((3903, 3959), 'rospy.logdebug', 'rospy.logdebug', (['"""Obstacle detected while moving forward"""'], {}), "('Obstacle detected while moving forward')\n", (3917, 3959), False, 'import rospy\n'), ((4169, 4232), 'rospy.logdebug', 'rospy.logdebug', (['"""Forward is clear, proceeding to drive forward"""'], {}), "('Forward is clear, proceeding to drive forward')\n", (4183, 4232), False, 'import rospy\n'), ((4413, 4453), 'rospy.logdebug', 'rospy.logdebug', (['"""Planning next movement"""'], {}), "('Planning next movement')\n", (4427, 4453), False, 'import rospy\n'), ((4619, 4649), 'rospy.logdebug', 'rospy.logdebug', (['"""Turning base"""'], {}), "('Turning base')\n", (4633, 4649), False, 'import rospy\n'), ((5867, 5913), 'rospy.logdebug', 'rospy.logdebug', (['"""(Planner) Forward is blocked"""'], {}), "('(Planner) Forward is blocked')\n", (5881, 5913), False, 'import rospy\n'), ((5952, 6011), 'b2_logic.odometry_helpers.normalize_theta', 'normalize_theta', (['(self._current_heading - self._turn_radians)'], {}), '(self._current_heading - self._turn_radians)\n', (5967, 6011), False, 'from b2_logic.odometry_helpers import heading_from_odometry, normalize_theta, calc_steering_angle\n'), ((1688, 1721), 'b2_logic.odometry_helpers.heading_from_odometry', 'heading_from_odometry', (['self._odom'], {}), '(self._odom)\n', (1709, 1721), False, 'from b2_logic.odometry_helpers import heading_from_odometry, normalize_theta, calc_steering_angle\n'), ((6639, 6688), 'rospy.logdebug', 'rospy.logdebug', (['"""(Planner) Right side is blocked"""'], {}), "('(Planner) Right side is blocked')\n", (6653, 6688), False, 'import rospy\n'), ((6726, 6789), 'b2_logic.odometry_helpers.normalize_theta', 'normalize_theta', (['(self._current_heading + self._turn_radians * 2)'], {}), '(self._current_heading + self._turn_radians * 2)\n', (6741, 6789), False, 'from b2_logic.odometry_helpers import heading_from_odometry, normalize_theta, calc_steering_angle\n'), ((6175, 6207), 'math.degrees', 'math.degrees', (['self._heading_goal'], {}), '(self._heading_goal)\n', (6187, 6207), False, 'import math\n'), ((7276, 7319), 'rospy.logdebug', 'rospy.logdebug', (['"""(Planner) left is blocked"""'], {}), "('(Planner) left is blocked')\n", (7290, 7319), False, 'import rospy\n'), ((7357, 7416), 'b2_logic.odometry_helpers.normalize_theta', 'normalize_theta', (['(self._current_heading + self._turn_radians)'], {}), '(self._current_heading + self._turn_radians)\n', (7372, 7416), False, 'from b2_logic.odometry_helpers import heading_from_odometry, normalize_theta, calc_steering_angle\n'), ((7927, 7992), 'rospy.logdebug', 'rospy.logdebug', (['"""(Planner) Turn to rear complete, moving forward"""'], {}), "('(Planner) Turn to rear complete, moving forward')\n", (7941, 7992), False, 'import rospy\n'), ((8190, 8211), 'rospy.logerr', 'rospy.logerr', (['message'], {}), '(message)\n', (8202, 8211), False, 'import rospy\n'), ((6930, 6962), 'math.degrees', 'math.degrees', (['self._heading_goal'], {}), '(self._heading_goal)\n', (6942, 6962), False, 'import math\n'), ((1957, 1989), 'math.degrees', 'math.degrees', (['self._heading_goal'], {}), '(self._heading_goal)\n', (1969, 1989), False, 'import math\n'), ((7559, 7591), 'math.degrees', 'math.degrees', (['self._heading_goal'], {}), '(self._heading_goal)\n', (7571, 7591), False, 'import math\n'), ((1878, 1916), 'b2_logic.odometry_helpers.normalize_theta', 'normalize_theta', (['self._current_heading'], {}), '(self._current_heading)\n', (1893, 1916), False, 'from b2_logic.odometry_helpers import heading_from_odometry, normalize_theta, calc_steering_angle\n')]
|
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.gridspec as gridspec
import numpy as np
def isSampleFree(sample, obs, dimW):
for o in range(0, obs.shape[0] // (2 * dimW)):
isFree = 0
for d in range(0, sample.shape[0]):
if (sample[d] < obs[2 * dimW * o + d] or sample[d] > obs[2 * dimW * o + d + dimW]):
isFree = 1
break
if isFree == 0:
return 0
return 1
def gap2obs(condition):
dw = 0.1
dimW = 3
gap1 = condition[0:3]
gap2 = condition[3:6]
gap3 = condition[6:9]
obs1 = [0, gap1[1] - dw, -0.5, gap1[0], gap1[1], 1.5]
obs2 = [gap2[0] - dw, 0, -0.5, gap2[0], gap2[1], 1.5]
obs3 = [gap2[0] - dw, gap2[1] + dw, -0.5, gap2[0], 1, 1.5]
obs4 = [gap1[0] + dw, gap1[1] - dw, -0.5, gap3[0], gap1[1], 1.5]
obs5 = [gap3[0] + dw, gap1[1] - dw, -0.5, 1, gap1[1], 1.5]
obsBounds = [-0.1, -0.1, -0.5, 0, 1.1, 1.5,
-0.1, -0.1, -0.5, 1.1, 0, 1.5,
-0.1, 1, -0.5, 1.1, 1.1, 1.5,
1, -0.1, -0.5, 1.1, 1.1, 1.5, ]
obs = np.concatenate((obs1, obs2, obs3, obs4, obs5, obsBounds), axis=0)
return obs, dimW
def getOccGrid(gridSize):
gridPointsRange = np.linspace(0, 1, num=gridSize)
occGridSamples = np.zeros([gridSize * gridSize, 2])
idx = 0
for i in gridPointsRange:
for j in gridPointsRange:
occGridSamples[idx, 0] = i
occGridSamples[idx, 1] = j
idx += 1
return occGridSamples
def gap2occ(conditions, gridSize):
obs, dimW = gap2obs(conditions)
occGridSamples = getOccGrid(gridSize)
occGrid = np.zeros(gridSize * gridSize)
for i in range(0, gridSize * gridSize):
occGrid[i] = isSampleFree(occGridSamples[i, :], obs, dimW)
return occGrid
def plotCondition(condition):
fig1 = plt.figure(figsize=(10, 6), dpi=80)
ax1 = fig1.add_subplot(111, aspect='equal')
obs, dimW = gap2obs(condition)
for i in range(0, obs.shape[0] // (2 * dimW)): # plot obstacle patches
ax1.add_patch(
patches.Rectangle(
(obs[i * 2 * dimW], obs[i * 2 * dimW + 1]), # (x,y)
obs[i * 2 * dimW + dimW] - obs[i * 2 * dimW], # width
obs[i * 2 * dimW + dimW + 1] - obs[i * 2 * dimW + 1], # height
alpha=0.6
))
gridSize = 11
occGrid = gap2occ(condition, gridSize)
occGridSamples = getOccGrid(gridSize)
for i in range(0, gridSize * gridSize): # plot occupancy grid
if occGrid[i] == 0:
plt.scatter(occGridSamples[i, 0], occGridSamples[i, 1], color="red", s=70, alpha=0.8)
else:
plt.scatter(occGridSamples[i, 0], occGridSamples[i, 1], color="green", s=70, alpha=0.8)
init = condition[9:15]
goal = condition[15:21]
plt.scatter(init[0], init[1], color="red", s=250, edgecolors='black') # init
plt.scatter(goal[0], goal[1], color="blue", s=250, edgecolors='black') # goal
plt.show()
def plotSample(s, condition):
fig1 = plt.figure(figsize=(10, 6), dpi=80)
ax1 = fig1.add_subplot(111, aspect='equal')
plt.scatter(s[:, 0], s[:, 1], color="green", s=70, alpha=0.1)
obs, dimW = gap2obs(condition)
for i in range(0, obs.shape[0] // (2 * dimW)): # plot obstacle patches
ax1.add_patch(
patches.Rectangle(
(obs[i * 2 * dimW], obs[i * 2 * dimW + 1]), # (x,y)
obs[i * 2 * dimW + dimW] - obs[i * 2 * dimW], # width
obs[i * 2 * dimW + dimW + 1] - obs[i * 2 * dimW + 1], # height
alpha=0.6
))
gridSize = 11
occGrid = gap2occ(condition, gridSize)
occGridSamples = getOccGrid(gridSize)
for i in range(0, gridSize * gridSize): # plot occupancy grid
if occGrid[i] == 0:
plt.scatter(occGridSamples[i, 0], occGridSamples[i, 1], color="red", s=70, alpha=0.8)
else:
plt.scatter(occGridSamples[i, 0], occGridSamples[i, 1], color="green", s=70, alpha=0.8)
init = condition[9:15]
goal = condition[15:21]
plt.scatter(init[0], init[1], color="red", s=250, edgecolors='black') # init
plt.scatter(goal[0], goal[1], color="blue", s=250, edgecolors='black') # goal
plt.show()
def plotSpeed(s, c):
plt.figure(figsize=(10, 6), dpi=80)
viz1 = 1
viz2 = 4
dim = 6
plt.scatter(s[:, viz1], s[:, viz2], color="green", s=70, alpha=0.1)
plt.scatter(c[viz1 + 9], c[viz2 + 9], color="red", s=250, edgecolors='black') # init
plt.scatter(c[viz1 + 9 + dim], c[viz2 + 9 + dim], color="blue", s=500, edgecolors='black') # goal
plt.show()
|
[
"matplotlib.pyplot.show",
"matplotlib.patches.Rectangle",
"matplotlib.pyplot.scatter",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.linspace",
"numpy.concatenate"
] |
[((1127, 1192), 'numpy.concatenate', 'np.concatenate', (['(obs1, obs2, obs3, obs4, obs5, obsBounds)'], {'axis': '(0)'}), '((obs1, obs2, obs3, obs4, obs5, obsBounds), axis=0)\n', (1141, 1192), True, 'import numpy as np\n'), ((1264, 1295), 'numpy.linspace', 'np.linspace', (['(0)', '(1)'], {'num': 'gridSize'}), '(0, 1, num=gridSize)\n', (1275, 1295), True, 'import numpy as np\n'), ((1317, 1351), 'numpy.zeros', 'np.zeros', (['[gridSize * gridSize, 2]'], {}), '([gridSize * gridSize, 2])\n', (1325, 1351), True, 'import numpy as np\n'), ((1683, 1712), 'numpy.zeros', 'np.zeros', (['(gridSize * gridSize)'], {}), '(gridSize * gridSize)\n', (1691, 1712), True, 'import numpy as np\n'), ((1886, 1921), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)', 'dpi': '(80)'}), '(figsize=(10, 6), dpi=80)\n', (1896, 1921), True, 'import matplotlib.pyplot as plt\n'), ((2866, 2935), 'matplotlib.pyplot.scatter', 'plt.scatter', (['init[0]', 'init[1]'], {'color': '"""red"""', 's': '(250)', 'edgecolors': '"""black"""'}), "(init[0], init[1], color='red', s=250, edgecolors='black')\n", (2877, 2935), True, 'import matplotlib.pyplot as plt\n'), ((2948, 3018), 'matplotlib.pyplot.scatter', 'plt.scatter', (['goal[0]', 'goal[1]'], {'color': '"""blue"""', 's': '(250)', 'edgecolors': '"""black"""'}), "(goal[0], goal[1], color='blue', s=250, edgecolors='black')\n", (2959, 3018), True, 'import matplotlib.pyplot as plt\n'), ((3031, 3041), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3039, 3041), True, 'import matplotlib.pyplot as plt\n'), ((3085, 3120), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)', 'dpi': '(80)'}), '(figsize=(10, 6), dpi=80)\n', (3095, 3120), True, 'import matplotlib.pyplot as plt\n'), ((3174, 3235), 'matplotlib.pyplot.scatter', 'plt.scatter', (['s[:, 0]', 's[:, 1]'], {'color': '"""green"""', 's': '(70)', 'alpha': '(0.1)'}), "(s[:, 0], s[:, 1], color='green', s=70, alpha=0.1)\n", (3185, 3235), True, 'import matplotlib.pyplot as plt\n'), ((4133, 4202), 'matplotlib.pyplot.scatter', 'plt.scatter', (['init[0]', 'init[1]'], {'color': '"""red"""', 's': '(250)', 'edgecolors': '"""black"""'}), "(init[0], init[1], color='red', s=250, edgecolors='black')\n", (4144, 4202), True, 'import matplotlib.pyplot as plt\n'), ((4215, 4285), 'matplotlib.pyplot.scatter', 'plt.scatter', (['goal[0]', 'goal[1]'], {'color': '"""blue"""', 's': '(250)', 'edgecolors': '"""black"""'}), "(goal[0], goal[1], color='blue', s=250, edgecolors='black')\n", (4226, 4285), True, 'import matplotlib.pyplot as plt\n'), ((4298, 4308), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4306, 4308), True, 'import matplotlib.pyplot as plt\n'), ((4336, 4371), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)', 'dpi': '(80)'}), '(figsize=(10, 6), dpi=80)\n', (4346, 4371), True, 'import matplotlib.pyplot as plt\n'), ((4414, 4481), 'matplotlib.pyplot.scatter', 'plt.scatter', (['s[:, viz1]', 's[:, viz2]'], {'color': '"""green"""', 's': '(70)', 'alpha': '(0.1)'}), "(s[:, viz1], s[:, viz2], color='green', s=70, alpha=0.1)\n", (4425, 4481), True, 'import matplotlib.pyplot as plt\n'), ((4486, 4563), 'matplotlib.pyplot.scatter', 'plt.scatter', (['c[viz1 + 9]', 'c[viz2 + 9]'], {'color': '"""red"""', 's': '(250)', 'edgecolors': '"""black"""'}), "(c[viz1 + 9], c[viz2 + 9], color='red', s=250, edgecolors='black')\n", (4497, 4563), True, 'import matplotlib.pyplot as plt\n'), ((4576, 4670), 'matplotlib.pyplot.scatter', 'plt.scatter', (['c[viz1 + 9 + dim]', 'c[viz2 + 9 + dim]'], {'color': '"""blue"""', 's': '(500)', 'edgecolors': '"""black"""'}), "(c[viz1 + 9 + dim], c[viz2 + 9 + dim], color='blue', s=500,\n edgecolors='black')\n", (4587, 4670), True, 'import matplotlib.pyplot as plt\n'), ((4679, 4689), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4687, 4689), True, 'import matplotlib.pyplot as plt\n'), ((2116, 2296), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['(obs[i * 2 * dimW], obs[i * 2 * dimW + 1])', '(obs[i * 2 * dimW + dimW] - obs[i * 2 * dimW])', '(obs[i * 2 * dimW + dimW + 1] - obs[i * 2 * dimW + 1])'], {'alpha': '(0.6)'}), '((obs[i * 2 * dimW], obs[i * 2 * dimW + 1]), obs[i * 2 *\n dimW + dimW] - obs[i * 2 * dimW], obs[i * 2 * dimW + dimW + 1] - obs[i *\n 2 * dimW + 1], alpha=0.6)\n', (2133, 2296), True, 'import matplotlib.patches as patches\n'), ((2607, 2696), 'matplotlib.pyplot.scatter', 'plt.scatter', (['occGridSamples[i, 0]', 'occGridSamples[i, 1]'], {'color': '"""red"""', 's': '(70)', 'alpha': '(0.8)'}), "(occGridSamples[i, 0], occGridSamples[i, 1], color='red', s=70,\n alpha=0.8)\n", (2618, 2696), True, 'import matplotlib.pyplot as plt\n'), ((2719, 2810), 'matplotlib.pyplot.scatter', 'plt.scatter', (['occGridSamples[i, 0]', 'occGridSamples[i, 1]'], {'color': '"""green"""', 's': '(70)', 'alpha': '(0.8)'}), "(occGridSamples[i, 0], occGridSamples[i, 1], color='green', s=70,\n alpha=0.8)\n", (2730, 2810), True, 'import matplotlib.pyplot as plt\n'), ((3383, 3563), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['(obs[i * 2 * dimW], obs[i * 2 * dimW + 1])', '(obs[i * 2 * dimW + dimW] - obs[i * 2 * dimW])', '(obs[i * 2 * dimW + dimW + 1] - obs[i * 2 * dimW + 1])'], {'alpha': '(0.6)'}), '((obs[i * 2 * dimW], obs[i * 2 * dimW + 1]), obs[i * 2 *\n dimW + dimW] - obs[i * 2 * dimW], obs[i * 2 * dimW + dimW + 1] - obs[i *\n 2 * dimW + 1], alpha=0.6)\n', (3400, 3563), True, 'import matplotlib.patches as patches\n'), ((3873, 3962), 'matplotlib.pyplot.scatter', 'plt.scatter', (['occGridSamples[i, 0]', 'occGridSamples[i, 1]'], {'color': '"""red"""', 's': '(70)', 'alpha': '(0.8)'}), "(occGridSamples[i, 0], occGridSamples[i, 1], color='red', s=70,\n alpha=0.8)\n", (3884, 3962), True, 'import matplotlib.pyplot as plt\n'), ((3985, 4076), 'matplotlib.pyplot.scatter', 'plt.scatter', (['occGridSamples[i, 0]', 'occGridSamples[i, 1]'], {'color': '"""green"""', 's': '(70)', 'alpha': '(0.8)'}), "(occGridSamples[i, 0], occGridSamples[i, 1], color='green', s=70,\n alpha=0.8)\n", (3996, 4076), True, 'import matplotlib.pyplot as plt\n')]
|
import numpy as np
import pandas as pd
import os
from psrqpy import QueryATNF
from utmost_psr import utils, plot
def UTMOST_NS_module_params():
"""
System parameters for a single UTMOST-2D North-South module.
output:
-------
UTMOST_NS_module: dict
Dictionary containing module parameters (Gain [K/Jy], Bandwidth [MHz],
Freq [MHz], T_sys [K], N_pol, Latitude [deg])
"""
UTMOST_NS_module = {
"Gain": 0.0028,
"Bandwidth": 45.0,
"Freq": "843 MHz",
"T_sys": 70.0,
"N_pol": 2.0,
"Latitude": -35.3707088333
}
return UTMOST_NS_module
def radiometer_signal_to_noise(obs_params, flux_density, period, width,
psr_Tsky, t_int=300.0):
"""
Predicted signal to noise ratio from the radiometer equation: see Equation
A1.21 in Kramer & Lorimer (2004).
input:
------
obs_params: dict
Dictionary containing observatory parameters (Gain [K/Jy],
Bandwidth [MHz], Freq [MHz], T_sys [K], N_pol)
flux_density: list, floats
Pulsar flux_density [Jy]
period: list, floats
Pulsar period [s]
width: list, floats
Pulsar width -- W50 [s]
psr_Tsky: list, floats
Sky temperature at pulsar positions (K)
t_int: float, optional
Observation length in seconds (default = 300 seconds)
output:
-------
snr: float
Radiometer signal to noise ratio
"""
# System Equivalent Flux Density: Gain / T_sys
sefd = obs_params["Gain"] / (obs_params["T_sys"] + psr_Tsky)
# Pulsar duty cycle
duty_cycle = np.sqrt((period - width)/width)
# Signal to noise ratio
snr = flux_density * sefd * np.sqrt(obs_params["N_pol"] *
t_int * obs_params["Bandwidth"]*1e6) * duty_cycle
return snr
def Zenith_angle_correction(psr_DECJ, Latitude):
"""
Corrects the detected pulsar S/N based on the pulsar distance from zenith.
input:
------
psr_DECJ: float
Declination of the pulsar in fractional degrees.
Latitude: float
Latitude of the telescope in fractional degrees.
output:
------
zenith_corrected_snr: float
S/N correction for distance from zenith.
"""
zenith_corrected_snr = np.cos((psr_DECJ - Latitude) * np.pi/180.)
return zenith_corrected_snr
def ddmmss_to_deg(position):
"""
Converts positions in deg:min:sec format to fractional degrees.
input:
------
position: str
Position in deg:min:sec format.
output:
-------
position_deg: float
Position in fractional degrees.
"""
split_position = position.split(":")
# Check if positive or negative:
if float(split_position[0]) <= 0:
if len(split_position) == 3:
position_deg = float(split_position[0]) - (
float(split_position[1])/60. + float(split_position[2])/3600.)
else:
position_deg = float(split_position[0]) - (
float(split_position[1])/60.)
else:
if len(split_position) == 3:
position_deg = float(split_position[0]) + (
float(split_position[1])/60. + float(split_position[2])/3600.)
else:
position_deg = float(split_position[0]) + (
float(split_position[1])/60.)
return position_deg
def arrival_time_uncertainty(obs_params, flux_density, period, width, psr_DECJ,
n_cassette, t_int=300.):
"""
Predicted pulse time of arrival (ToA) uncertainty: see see Equation 8.2 in
Kramer & Lorimer (2004).
input:
------
obs_params: dict
Dictionary containing observatory parameters (Gain [K/Jy],
Bandwidth [MHz], Freq [MHz], T_sys [K], N_pol)
flux_density: list, floats
Pulsar flux_density [Jy]
period: list, floats
Pulsar period [s]
width: list, floats
Pulsar width -- W50 [s]
psr_DECJ: list, floats
Pulsar declination [deg]
n_cassette: scalar, optional
Number of UTMOST-NS cassettes (default = 1)
t_int: float, optional
Observation length in seconds (default = 300 seconds)
output:
-------
sigma_toa: list, floats
Estimated ToA uncertainty (us)
"""
# System Equivalent Flux Density: Gain / T_sys
sefd = obs_params["Gain"] / obs_params["T_sys"] * n_cassette
# Pulsar duty cycle
duty_cycle = np.sqrt((period - width)/width)
snr_corr = Zenith_angle_correction(psr_DECJ, obs_params["Latitude"])
sigma_toa = (width/flux_density) * (1/(sefd)*snr_corr) * (1/np.sqrt(
obs_params["N_pol"] * t_int * obs_params["Bandwidth"]*1e6)) * (
1/duty_cycle)
return sigma_toa
def get_extrapolated_flux(flux_ref, freq_ref, spectral_index):
"""
Computes the flux density at 843 MHz extrapolated from a higher/lower flux
density measurement & some assumed spectral index.
input:
------
flux_ref: float
Reference flux density, usually S400 or S1400 [mJy].
freq_ref: float
Refrence frequency, usually 400 or 1400 MHz.
output:
-------
S843: float
Extrapolated flux density at 843 MHz [mJy]
"""
S843 = flux_ref * (843.0 / freq_ref)**(spectral_index)
return S843
|
[
"numpy.cos",
"numpy.sqrt"
] |
[((1611, 1644), 'numpy.sqrt', 'np.sqrt', (['((period - width) / width)'], {}), '((period - width) / width)\n', (1618, 1644), True, 'import numpy as np\n'), ((2264, 2309), 'numpy.cos', 'np.cos', (['((psr_DECJ - Latitude) * np.pi / 180.0)'], {}), '((psr_DECJ - Latitude) * np.pi / 180.0)\n', (2270, 2309), True, 'import numpy as np\n'), ((4411, 4444), 'numpy.sqrt', 'np.sqrt', (['((period - width) / width)'], {}), '((period - width) / width)\n', (4418, 4444), True, 'import numpy as np\n'), ((1704, 1778), 'numpy.sqrt', 'np.sqrt', (["(obs_params['N_pol'] * t_int * obs_params['Bandwidth'] * 1000000.0)"], {}), "(obs_params['N_pol'] * t_int * obs_params['Bandwidth'] * 1000000.0)\n", (1711, 1778), True, 'import numpy as np\n'), ((4582, 4656), 'numpy.sqrt', 'np.sqrt', (["(obs_params['N_pol'] * t_int * obs_params['Bandwidth'] * 1000000.0)"], {}), "(obs_params['N_pol'] * t_int * obs_params['Bandwidth'] * 1000000.0)\n", (4589, 4656), True, 'import numpy as np\n')]
|
#/usr/bin/python3
#-*- encoding=utf-8 -*-
from pathlib import Path
import random
import numpy as np
import cv2
from cv2 import cv2 as cv
from keras.utils import Sequence
import os
def readTxt(txtpath):
filelist = []
with open(txtpath, 'r') as f:
for line in f.readlines():
filelist.append(line.strip())
return filelist
class NoisyImageGenerator(Sequence):
def __init__(self, source_image_txt, target_image_txt, batch_size, image_size):
#image_suffixes = (".jpeg", ".jpg", ".png", ".bmp")
# self.source_image_paths = [p for p in Path(source_image_dir).glob("**/*") if p.suffix.lower() in image_suffixes]
# self.target_image_paths = [p for p in Path(target_image_dir).glob("**/*") if p.suffix.lower() in image_suffixes]
self.source_image_paths = readTxt(source_image_txt)
self.target_image_paths = readTxt(target_image_txt)
self.target_image_txt = target_image_txt
#self.source_noise_model = source_noise_model
#self.target_noise_model = target_noise_model
self.source_image_num = len(self.source_image_paths)
self.target_image_num = len(self.target_image_paths)
self.batch_size = batch_size
self.image_size = image_size
#self.target_image_dir = target_image_dir
if self.source_image_num == 0:
raise ValueError("source image dir does not include any image")
if self.target_image_num == 0:
raise ValueError("target image dir does not include any image")
def __len__(self):
return self.source_image_num // self.batch_size
def __getitem__(self, idx):
batch_size = self.batch_size
image_size = self.image_size
#target_image_dir = self.target_image_dir
x = np.zeros((batch_size, image_size, image_size, 3), dtype=np.uint8)
y = np.zeros((batch_size, image_size, image_size, 3), dtype=np.uint8)
sample_id = 0
########
while True:
source_image_path = random.choice(self.source_image_paths)
#print(source_image_path)
name = os.path.basename(source_image_path)
source_image_path = source_image_path + "/" +name + ".jpg"
#print("source_image_path: ",source_image_path)
#label_gt = os.path.basename(os.path.dirname(source_image_path)) #basename:返回文件名 dirname:去掉文件名,返回目录
#re_item = '.*/' + label_gt + '/.*'
#target_image_list = os.popen("grep %s %s | shuf -n 1" %(name, self.target_image_txt)).readlines()
target_image_list = os.popen("grep --word-regexp %s %s | shuf -n 1" %(name, self.target_image_txt)).readlines()
if len(target_image_list)== 0:
continue
target_image_path = target_image_list[0].strip()
#print("target_image_path: ",target_image_path)
#print(" ")
if ":" in target_image_path:
target_image_path = target_image_path.split(":")[-1]
# print('target_image_list',target_image_list)
if not os.path.exists(target_image_path) or not os.path.exists(source_image_path):
print(source_image_path, target_image_list)
print("Image NOT exists!")
continue
source_image = cv2.imread(source_image_path)
target_image = cv2.imread(target_image_path)
source_patch = cv2.resize(source_image,(image_size,image_size))
target_patch = cv2.resize(target_image,(image_size,image_size))
h, w, _ = source_image.shape
if h >= image_size and w >= image_size:
#h, w, _ = source_image.shape
i = np.random.randint(h - image_size + 1)
j = np.random.randint(w - image_size + 1)
source_patch = source_image[i:i + image_size, j:j + image_size]
target_patch = target_image[i:i + image_size, j:j + image_size]
h1, w1, _ = source_patch.shape
h2, w2, _ = target_patch.shape
#if(h1 != h2 | w1 != w2):
#print(source_image_path)
#print("h1,w1",h1," ",w1)
#print(target_image_path)
#print("h2,w2",h2," ",w2)
#cv2.imshow("source_patch", source_patch)
#cv2.imshow("target_patch", target_patch)
#cv2.waitKey()
x[sample_id] = source_patch
y[sample_id] = target_patch
sample_id += 1
if sample_id == batch_size:
return x, y
class ValGenerator(Sequence):
def __init__(self, source_image_txt, target_image_txt, image_size):
self.test_source_image_paths = readTxt(source_image_txt)
self.test_target_image_paths = readTxt(target_image_txt)
self.target_image_txt = target_image_txt
self.test_source_image_num = len(self.test_source_image_paths)
self.test_target_image_num = len(self.test_target_image_paths)
self.image_size = image_size
#self.test_target_dir = test_target_dir
self.data = []
if self.test_source_image_num == 0:
raise ValueError("test source image dir does not include any image")
if self.test_target_image_num == 0:
raise ValueError("test_target image dir does not include any image")
######
for test_source_image_path in self.test_source_image_paths:
name = os.path.basename(test_source_image_path)
test_source_image_path = test_source_image_path + "/" +name + ".jpg"
#label_gt = os.path.basename(os.path.dirname(source_image_path)) #basename:返回文件名 dirname:去掉文件名,返回目录
#re_item = '.*/' + label_gt + '/.*'
target_image_list = os.popen("grep --word-regexp %s %s | shuf -n 1" %(name, self.target_image_txt)).readlines()
#filename = os.path.basename(test_source_image_path)
#label_gt = os.path.basename(os.path.dirname(test_source_image_path))
#re_item = '.*/' + label_gt + '/.*'
#target_image_list = os.popen("grep %s %s | shuf -n 1" %(re_item, self.target_image_txt)).readlines()
# print('1target_image_list',target_image_list)
if len(target_image_list) ==0:
continue
test_target_image_path = target_image_list[0].strip()
if ":" in test_target_image_path:
test_target_image_path = test_target_image_path.split(":")[-1]
#test_target_image_path = self.test_target_dir+'/'+real_fname
if not os.path.exists(test_target_image_path):
continue
x_source = cv2.imread(test_source_image_path)
y_target = cv2.imread(test_target_image_path)
h, w, _ = x_source.shape
if h >= image_size and w >= image_size:
i = np.random.randint(h - image_size + 1)
j = np.random.randint(w - image_size + 1)
x_source = x_source[i:i + image_size, j:j + image_size]
y_target = y_target[i:i + image_size, j:j + image_size]
#cv2.imshow("source_patch", x_source)
#cv2.imshow("target_patch", y_target)
#cv2.waitKey()
#x = cv2.resize(x_source,(self.image_size,self.image_size))
#print('test_target_image_path',test_target_image_path)
#print('y_target:',y_target.shape)
#y = cv2.resize(y_target,(self.image_size,self.image_size))
self.data.append([np.expand_dims(x_source, axis=0), np.expand_dims(y_target, axis=0)])
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx]
|
[
"os.path.basename",
"os.popen",
"numpy.zeros",
"random.choice",
"os.path.exists",
"numpy.expand_dims",
"cv2.imread",
"numpy.random.randint",
"cv2.resize"
] |
[((1788, 1853), 'numpy.zeros', 'np.zeros', (['(batch_size, image_size, image_size, 3)'], {'dtype': 'np.uint8'}), '((batch_size, image_size, image_size, 3), dtype=np.uint8)\n', (1796, 1853), True, 'import numpy as np\n'), ((1866, 1931), 'numpy.zeros', 'np.zeros', (['(batch_size, image_size, image_size, 3)'], {'dtype': 'np.uint8'}), '((batch_size, image_size, image_size, 3), dtype=np.uint8)\n', (1874, 1931), True, 'import numpy as np\n'), ((2024, 2062), 'random.choice', 'random.choice', (['self.source_image_paths'], {}), '(self.source_image_paths)\n', (2037, 2062), False, 'import random\n'), ((2133, 2168), 'os.path.basename', 'os.path.basename', (['source_image_path'], {}), '(source_image_path)\n', (2149, 2168), False, 'import os\n'), ((3383, 3412), 'cv2.imread', 'cv2.imread', (['source_image_path'], {}), '(source_image_path)\n', (3393, 3412), False, 'import cv2\n'), ((3440, 3469), 'cv2.imread', 'cv2.imread', (['target_image_path'], {}), '(target_image_path)\n', (3450, 3469), False, 'import cv2\n'), ((3498, 3548), 'cv2.resize', 'cv2.resize', (['source_image', '(image_size, image_size)'], {}), '(source_image, (image_size, image_size))\n', (3508, 3548), False, 'import cv2\n'), ((3574, 3624), 'cv2.resize', 'cv2.resize', (['target_image', '(image_size, image_size)'], {}), '(target_image, (image_size, image_size))\n', (3584, 3624), False, 'import cv2\n'), ((5644, 5684), 'os.path.basename', 'os.path.basename', (['test_source_image_path'], {}), '(test_source_image_path)\n', (5660, 5684), False, 'import os\n'), ((6877, 6911), 'cv2.imread', 'cv2.imread', (['test_source_image_path'], {}), '(test_source_image_path)\n', (6887, 6911), False, 'import cv2\n'), ((6948, 6982), 'cv2.imread', 'cv2.imread', (['test_target_image_path'], {}), '(test_target_image_path)\n', (6958, 6982), False, 'import cv2\n'), ((3795, 3832), 'numpy.random.randint', 'np.random.randint', (['(h - image_size + 1)'], {}), '(h - image_size + 1)\n', (3812, 3832), True, 'import numpy as np\n'), ((3853, 3890), 'numpy.random.randint', 'np.random.randint', (['(w - image_size + 1)'], {}), '(w - image_size + 1)\n', (3870, 3890), True, 'import numpy as np\n'), ((6789, 6827), 'os.path.exists', 'os.path.exists', (['test_target_image_path'], {}), '(test_target_image_path)\n', (6803, 6827), False, 'import os\n'), ((7118, 7155), 'numpy.random.randint', 'np.random.randint', (['(h - image_size + 1)'], {}), '(h - image_size + 1)\n', (7135, 7155), True, 'import numpy as np\n'), ((7176, 7213), 'numpy.random.randint', 'np.random.randint', (['(w - image_size + 1)'], {}), '(w - image_size + 1)\n', (7193, 7213), True, 'import numpy as np\n'), ((2643, 2728), 'os.popen', 'os.popen', (["('grep --word-regexp %s %s | shuf -n 1' % (name, self.target_image_txt))"], {}), "('grep --word-regexp %s %s | shuf -n 1' % (name, self.target_image_txt)\n )\n", (2651, 2728), False, 'import os\n'), ((3151, 3184), 'os.path.exists', 'os.path.exists', (['target_image_path'], {}), '(target_image_path)\n', (3165, 3184), False, 'import os\n'), ((3192, 3225), 'os.path.exists', 'os.path.exists', (['source_image_path'], {}), '(source_image_path)\n', (3206, 3225), False, 'import os\n'), ((5974, 6059), 'os.popen', 'os.popen', (["('grep --word-regexp %s %s | shuf -n 1' % (name, self.target_image_txt))"], {}), "('grep --word-regexp %s %s | shuf -n 1' % (name, self.target_image_txt)\n )\n", (5982, 6059), False, 'import os\n'), ((7834, 7866), 'numpy.expand_dims', 'np.expand_dims', (['x_source'], {'axis': '(0)'}), '(x_source, axis=0)\n', (7848, 7866), True, 'import numpy as np\n'), ((7868, 7900), 'numpy.expand_dims', 'np.expand_dims', (['y_target'], {'axis': '(0)'}), '(y_target, axis=0)\n', (7882, 7900), True, 'import numpy as np\n')]
|
import sys
import os
import math
import cv2
import numpy as np
import pandas as pd
from skimage import io
from PIL import Image
from sklearn.model_selection import train_test_split
from skimage.color import gray2rgb
import torch
from torch.utils.data.sampler import SubsetRandomSampler
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
sys.path.append('../')
from config.cfg import cfg
class RafFaceDataset(Dataset):
"""
RAF-Face dataset for Face Expression Recognition
"""
def __init__(self, train=True, type='basic', transform=None):
manual_annotation_dir = os.path.join(cfg['root'], 'RAF-Face', '%s/Annotation/manual' % type)
emotion_label_txt_path = os.path.join(cfg['root'], 'RAF-Face', "%s/EmoLabel/list_patition_label.txt" % type)
emotion_dict = dict(np.loadtxt(emotion_label_txt_path, dtype=np.str))
if train:
face_files = []
genders = []
races = []
ages = []
emotions = []
ldmks = []
for _ in os.listdir(manual_annotation_dir):
if _.startswith('train_'):
face_fname = _.replace('_manu_attri', '_aligned').replace('.txt', '.jpg')
face_files.append(os.path.join(cfg['root'], 'RAF-Face', '%s/Image/aligned' % type, face_fname))
with open(os.path.join(manual_annotation_dir, _), mode='rt') as f:
manu_info_list = f.readlines()
genders.append(int(manu_info_list[5]))
races.append(int(manu_info_list[6]))
ages.append(int(manu_info_list[7]))
emotions.append(int(emotion_dict[face_fname.replace('_aligned', '')].strip()) - 1)
ldmks.append(np.array([[[float(_.replace('\n', ''))] for _ in line.split('\t')] for line in
manu_info_list[0:5]]).flatten().tolist())
else:
face_files = []
genders = []
races = []
ages = []
emotions = []
ldmks = []
for _ in os.listdir(manual_annotation_dir):
if _.startswith('test_'):
face_fname = _.replace('_manu_attri', '_aligned').replace('.txt', '.jpg')
face_files.append(os.path.join(cfg['root'], 'RAF-Face', '%s/Image/aligned' % type, face_fname))
with open(os.path.join(manual_annotation_dir, _), mode='rt') as f:
manu_info_list = f.readlines()
genders.append(int(manu_info_list[5]))
races.append(int(manu_info_list[6]))
ages.append(int(manu_info_list[7]))
emotions.append(int(emotion_dict[face_fname.replace('_aligned', '')].strip()) - 1)
ldmks.append(np.array([[[float(_.replace('\n', ''))] for _ in line.split('\t')] for line in
manu_info_list[0:5]]).flatten().tolist())
self.face_files = face_files
self.genders = genders
self.races = races
self.ages = ages
self.emotions = emotions
self.ldmks = ldmks
self.transform = transform
def __len__(self):
return len(self.face_files)
def __getitem__(self, idx):
image = io.imread(self.face_files[idx])
gender = self.genders[idx]
race = self.races[idx]
age = self.ages[idx]
emotion = self.emotions[idx]
ldmk = self.ldmks[idx]
sample = {'image': image, 'gender': gender, 'race': race, 'age': age, 'emotion': emotion,
'landmark': np.array(ldmk), 'filename': self.face_files[idx]}
if self.transform:
sample['image'] = self.transform(Image.fromarray(sample['image'].astype(np.uint8)))
return sample
class RafPartDataset(Dataset):
"""
RAF-Face dataset for Local Part
"""
def __init__(self, train=True, type='basic', part_name="Mouth", transform=None):
"""
:param train:
:param type:
:param part_name: Mouth, LeftEye, RightEye, Nose
:param transform:
"""
# manual_annotation_dir = os.path.join(cfg['root'], 'RAF-Face', '%s/Annotation/manual' % type)
emotion_label_txt_path = os.path.join(cfg['root'], 'RAF-Face', "%s/EmoLabel/list_patition_label.txt" % type)
local_part_img_dir = os.path.join(cfg['root'], 'RAF-Face', '{0}/LocalParts/{1}'.format(type, part_name))
emotion_dict = dict(np.loadtxt(emotion_label_txt_path, dtype=np.str))
if train:
local_part_imgs = []
emotions = []
for _ in os.listdir(local_part_img_dir):
if _.startswith('train_'):
local_part_imgs.append(os.path.join(local_part_img_dir, _))
emotions.append(int(emotion_dict[_.replace('_aligned', '')].strip()) - 1)
else:
local_part_imgs = []
emotions = []
for _ in os.listdir(local_part_img_dir):
if _.startswith('test_'):
local_part_imgs.append(os.path.join(local_part_img_dir, _))
emotions.append(int(emotion_dict[_.replace('_aligned', '')].strip()) - 1)
self.local_part_imgs = local_part_imgs
self.emotions = emotions
self.transform = transform
def __len__(self):
return len(self.local_part_imgs)
def __getitem__(self, idx):
image = io.imread(self.local_part_imgs[idx])
emotion = self.emotions[idx]
sample = {'image': image, 'emotion': emotion, 'filename': self.local_part_imgs[idx]}
if self.transform:
trans_image = self.transform(Image.fromarray(sample['image'].astype(np.uint8)))
sample['image'] = trans_image
return sample
class CelebADataset(Dataset):
"""
CelebA dataset
"""
def __init__(self, transform=None):
list_attr_celeba_txt = os.path.join(cfg['root'], 'CelebA', 'Anno', 'list_attr_celeba.txt')
df = pd.read_csv(list_attr_celeba_txt, delim_whitespace=True, header=None)
df.columns = ["File", "5_o_Clock_Shadow", "Arched_Eyebrows", "Attractive", "Bags_Under_Eyes", "Bald", "Bangs",
"Big_Lips", "Big_Nose", "Black_Hair", "Blond_Hair", "Blurry", "Brown_Hair", "Bushy_Eyebrows",
"Chubby",
"Double_Chin", "Eyeglasses", "Goatee", "Gray_Hair", "Heavy_Makeup", "High_Cheekbones", "Male",
"Mouth_Slightly_Open", "Mustache", "Narrow_Eyes", "No_Beard", "Oval_Face", "Pale_Skin",
"Pointy_Nose",
"Receding_Hairline", "Rosy_Cheeks", "Sideburns", "Smiling", "Straight_Hair", "Wavy_Hair",
"Wearing_Earrings", "Wearing_Hat", "Wearing_Lipstick", "Wearing_Necklace", "Wearing_Necktie",
"Young"]
self.file_list = df['File']
self.o_clock_shadow_list = df['5_o_Clock_Shadow']
self.arched_eyebrows_list = df['Arched_Eyebrows']
self.attractive_list = df['Attractive']
self.bags_under_eyes_list = df['Bags_Under_Eyes']
self.bald_list = df['Bald']
self.bangs_list = df['Bangs']
self.big_lips_list = df['Big_Lips']
self.big_nose_list = df['Big_Nose']
self.black_hair_list = df['Black_Hair']
self.blond_hair_list = df['Blond_Hair']
self.blurry_list = df['Blurry']
self.brown_hair_list = df['Brown_Hair']
self.bushy_eyebrows_list = df['Bushy_Eyebrows']
self.chubby_list = ['Chubby']
self.double_chin_list = ['Double_Chin']
self.eyeglasses_list = df['Eyeglasses']
self.goatee_list = df['Goatee']
self.gray_hair_list = df['Gray_Hair']
self.heavy_makeup_list = df['Heavy_Makeup']
self.high_cheekbones_list = df['High_Cheekbones']
self.male_list = df['Male']
self.mouth_slightly_open_list = df['Mouth_Slightly_Open']
self.mustache_list = df['Mustache']
self.narrow_eyes_list = df['Narrow_Eyes']
self.no_beard_list = df['No_Beard']
self.oval_face_list = df['Oval_Face']
self.pale_skin_list = df['Pale_Skin']
self.pointy_nose_list = df['Pointy_Nose']
self.receding_hairline_list = df['Receding_Hairline']
self.rosy_cheeks_list = df['Rosy_Cheeks']
self.sideburns_list = df['Sideburns']
self.smiling_list = df['Smiling']
self.straight_hair_list = df['Straight_Hair']
self.wavy_hair_list = df['Wavy_Hair']
self.wearing_earrings_list = df['Wearing_Earrings']
self.wearing_hat_list = df['Wearing_Hat']
self.wearing_lipstick_list = df['Wearing_Lipstick']
self.wearing_necklace_list = df['Wearing_Necklace']
self.wearing_necktie_list = df['Wearing_Necktie']
self.young_list = df['Young']
self.transform = transform
def __len__(self):
return len(self.file_list)
def __getitem__(self, idx):
image = io.imread(os.path.join(cfg['coot', 'CelebA', 'Img', 'img_align_celeba', self.file_list[idx]]))
sample = {'image': image, '5_o_Clock_Shadow': max(self.o_clock_shadow_list[idx], 0),
'Arched_Eyebrows': max(self.arched_eyebrows_list[idx], 0),
'Attractive': max(self.attractive_list[idx], 0),
'Bags_Under_Eyes': max(self.bags_under_eyes_list[idx], 0),
'Bald': max(self.bald_list[idx], 0),
'Bangs': max(self.bangs_list[idx], 0), 'Big_Lips': max(self.big_lips_list[idx], 0),
'Big_Nose': max(self.big_nose_list[idx], 0), 'Black_Hair': max(self.black_hair_list[idx], 0),
'Blond_Hair': max(self.blond_hair_list[idx], 0), 'Blurry': max(self.blurry_list[idx], 0),
'Brown_Hair': max(self.brown_hair_list[idx], 0),
'Bushy_Eyebrows': max(self.bushy_eyebrows_list[idx], 0),
'Chubby': max(self.chubby_list[idx], 0), 'Double_Chin': max(self.double_chin_list[idx], 0),
'Eyeglasses': max(self.eyeglasses_list[idx], 0), 'Goatee': max(self.goatee_list[idx], 0),
'Gray_Hair': max(self.gray_hair_list[idx], 0), 'Heavy_Makeup': max(self.heavy_makeup_list[idx], 0),
'High_Cheekbones': max(self.high_cheekbones_list[idx], 0),
'Male': max(self.male_list[idx], 0),
'Mouth_Slightly_Open': max(self.mouth_slightly_open_list[idx], 0),
'Mustache': max(self.mustache_list[idx], 0),
'Narrow_Eyes': max(self.narrow_eyes_list[idx], 0), 'No_Beard': max(self.no_beard_list[idx], 0),
'Oval_Face': max(self.oval_face_list[idx], 0),
'Pale_Skin': max(self.pale_skin_list[idx], 0), 'Pointy_Nose': max(self.pointy_nose_list[idx], 0),
'Receding_Hairline': max(self.receding_hairline_list[idx], 0),
'Rosy_Cheeks': max(self.rosy_cheeks_list[idx], 0), 'Sideburns': max(self.sideburns_list[idx], 0),
'Smiling': max(self.smiling_list[idx], 0), 'Straight_Hair': max(self.straight_hair_list[idx], 0),
'Wavy_Hair': max(self.wavy_hair_list[idx], 0),
'Wearing_Earrings': max(self.wearing_earrings_list[idx], 0),
'Wearing_Hat': max(self.wearing_hat_list[idx], 0),
'Wearing_Lipstick': max(self.wearing_lipstick_list[idx], 0),
'Wearing_Necklace': max(self.wearing_necklace_list[idx], 0),
'Wearing_Necktie': max(self.wearing_necktie_list[idx], 0), 'Young': max(self.young_list[idx], 0)}
if self.transform:
sample['image'] = self.transform(Image.fromarray(sample['image'].astype(np.uint8)))
return sample
class UTKFaceDataset(Dataset):
"""
UTKFace dataset
"""
def __init__(self, train=True, transform=None):
files = os.listdir(os.path.join(cfg['root'], 'UTKFace'))
ages = [int(fname.split("_")[0]) for fname in files]
train_files, test_files, train_ages, test_ages = train_test_split(files, ages, test_size=0.2, random_state=42)
if train:
self.filelist = train_files
self.agelist = train_ages
self.genderlist = [int(fname.split("_")[1]) for fname in train_files]
self.racelist = [int(fname.split("_")[2]) if len(fname.split("_")[2]) == 1 else 4 for fname in train_files]
else:
self.filelist = test_files
self.agelist = test_ages
self.genderlist = [int(fname.split("_")[1]) for fname in test_files]
self.racelist = [int(fname.split("_")[2]) if len(fname.split("_")[2]) == 1 else 4 for fname in test_files]
self.transform = transform
def __len__(self):
return len(self.filelist)
def __getitem__(self, idx):
img_name = os.path.join(cfg['root'], 'UTKFace', self.filelist[idx])
image = io.imread(img_name)
sample = {'image': image, 'age': self.agelist[idx], "gender": self.genderlist[idx],
"race": self.racelist[idx]}
if self.transform:
sample['image'] = self.transform(Image.fromarray(sample['image'].astype(np.uint8)))
return sample
class FER2013Dataset(Dataset):
"""
FER2013 dataset
"""
def __init__(self, train=True, transform=None):
imgs = []
labels = []
type_ = 'train' if train else 'test'
for cat in os.listdir(os.path.join(cfg['root'], 'FER2013', type_)):
for img_f in os.listdir(os.path.join(cfg['root'], 'FER2013', type_, cat)):
imgs.append(os.path.join(cfg['root'], 'FER2013', type_, cat, img_f))
labels.append(int(cat))
self.imagefiles = imgs
self.labels = labels
self.transform = transform
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
sample = {'filename': self.imagefiles[idx], 'image': io.imread(self.imagefiles[idx]),
'emotion': self.labels[idx], "gender": 0, "race": 0, "age": 0}
if self.transform:
sample['image'] = self.transform(Image.fromarray(gray2rgb(sample['image']).astype(np.uint8)))
return sample
# class FER2013Dataset(Dataset):
# """
# FER2013 dataset
# """
#
# def __init__(self, train=True, fer2013_csv=os.path.join(cfg['root'], 'FER2013', 'fer2013.csv'), transform=None):
# df = pd.read_csv(fer2013_csv)
# train_imgs = []
# test_imgs = []
# train_labels = []
# test_labels = []
#
# for i in range(len(df['Usage'])):
# if df['Usage'][i] == 'Training':
# img_array = np.zeros((48, 48, 3))
# img_array[:, :, 0] = np.array(df['pixels'][i].split(" ")).reshape(48, 48).astype(np.float)
# img_array[:, :, 1] = np.array(df['pixels'][i].split(" ")).reshape(48, 48).astype(np.float)
# img_array[:, :, 2] = np.array(df['pixels'][i].split(" ")).reshape(48, 48).astype(np.float)
# test_imgs.append(img_array)
# train_imgs.append(img_array)
# train_labels.append(df['emotion'][i])
# elif df['Usage'][i] == 'PrivateTest':
# img_array = np.zeros((48, 48, 3))
# img_array[:, :, 0] = np.array(df['pixels'][i].split(" ")).reshape(48, 48).astype(np.float)
# img_array[:, :, 1] = np.array(df['pixels'][i].split(" ")).reshape(48, 48).astype(np.float)
# img_array[:, :, 2] = np.array(df['pixels'][i].split(" ")).reshape(48, 48).astype(np.float)
# test_imgs.append(img_array)
# test_labels.append(df['emotion'][i])
#
# if train:
# self.images = train_imgs
# self.labels = train_labels
# else:
# self.images = test_imgs
# self.labels = test_labels
#
# self.transform = transform
#
# def __len__(self):
# return len(self.labels)
#
# def __getitem__(self, idx):
# sample = {'image': self.images[idx], 'emotion': self.labels[idx], "gender": 0, "race": 0, "age": 0}
#
# if self.transform:
# sample['image'] = self.transform(Image.fromarray(sample['image'].astype(np.uint8)))
#
# return sample
|
[
"sys.path.append",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"numpy.array",
"numpy.loadtxt",
"skimage.color.gray2rgb",
"os.path.join",
"os.listdir",
"skimage.io.imread"
] |
[((379, 401), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (394, 401), False, 'import sys\n'), ((630, 698), 'os.path.join', 'os.path.join', (["cfg['root']", '"""RAF-Face"""', "('%s/Annotation/manual' % type)"], {}), "(cfg['root'], 'RAF-Face', '%s/Annotation/manual' % type)\n", (642, 698), False, 'import os\n'), ((732, 819), 'os.path.join', 'os.path.join', (["cfg['root']", '"""RAF-Face"""', "('%s/EmoLabel/list_patition_label.txt' % type)"], {}), "(cfg['root'], 'RAF-Face', '%s/EmoLabel/list_patition_label.txt' %\n type)\n", (744, 819), False, 'import os\n'), ((3393, 3424), 'skimage.io.imread', 'io.imread', (['self.face_files[idx]'], {}), '(self.face_files[idx])\n', (3402, 3424), False, 'from skimage import io\n'), ((4372, 4459), 'os.path.join', 'os.path.join', (["cfg['root']", '"""RAF-Face"""', "('%s/EmoLabel/list_patition_label.txt' % type)"], {}), "(cfg['root'], 'RAF-Face', '%s/EmoLabel/list_patition_label.txt' %\n type)\n", (4384, 4459), False, 'import os\n'), ((5570, 5606), 'skimage.io.imread', 'io.imread', (['self.local_part_imgs[idx]'], {}), '(self.local_part_imgs[idx])\n', (5579, 5606), False, 'from skimage import io\n'), ((6062, 6129), 'os.path.join', 'os.path.join', (["cfg['root']", '"""CelebA"""', '"""Anno"""', '"""list_attr_celeba.txt"""'], {}), "(cfg['root'], 'CelebA', 'Anno', 'list_attr_celeba.txt')\n", (6074, 6129), False, 'import os\n'), ((6143, 6212), 'pandas.read_csv', 'pd.read_csv', (['list_attr_celeba_txt'], {'delim_whitespace': '(True)', 'header': 'None'}), '(list_attr_celeba_txt, delim_whitespace=True, header=None)\n', (6154, 6212), True, 'import pandas as pd\n'), ((12238, 12299), 'sklearn.model_selection.train_test_split', 'train_test_split', (['files', 'ages'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(files, ages, test_size=0.2, random_state=42)\n', (12254, 12299), False, 'from sklearn.model_selection import train_test_split\n'), ((13035, 13091), 'os.path.join', 'os.path.join', (["cfg['root']", '"""UTKFace"""', 'self.filelist[idx]'], {}), "(cfg['root'], 'UTKFace', self.filelist[idx])\n", (13047, 13091), False, 'import os\n'), ((13109, 13128), 'skimage.io.imread', 'io.imread', (['img_name'], {}), '(img_name)\n', (13118, 13128), False, 'from skimage import io\n'), ((845, 893), 'numpy.loadtxt', 'np.loadtxt', (['emotion_label_txt_path'], {'dtype': 'np.str'}), '(emotion_label_txt_path, dtype=np.str)\n', (855, 893), True, 'import numpy as np\n'), ((1082, 1115), 'os.listdir', 'os.listdir', (['manual_annotation_dir'], {}), '(manual_annotation_dir)\n', (1092, 1115), False, 'import os\n'), ((2166, 2199), 'os.listdir', 'os.listdir', (['manual_annotation_dir'], {}), '(manual_annotation_dir)\n', (2176, 2199), False, 'import os\n'), ((3717, 3731), 'numpy.array', 'np.array', (['ldmk'], {}), '(ldmk)\n', (3725, 3731), True, 'import numpy as np\n'), ((4598, 4646), 'numpy.loadtxt', 'np.loadtxt', (['emotion_label_txt_path'], {'dtype': 'np.str'}), '(emotion_label_txt_path, dtype=np.str)\n', (4608, 4646), True, 'import numpy as np\n'), ((4747, 4777), 'os.listdir', 'os.listdir', (['local_part_img_dir'], {}), '(local_part_img_dir)\n', (4757, 4777), False, 'import os\n'), ((5091, 5121), 'os.listdir', 'os.listdir', (['local_part_img_dir'], {}), '(local_part_img_dir)\n', (5101, 5121), False, 'import os\n'), ((9156, 9244), 'os.path.join', 'os.path.join', (["cfg['coot', 'CelebA', 'Img', 'img_align_celeba', self.file_list[idx]]"], {}), "(cfg['coot', 'CelebA', 'Img', 'img_align_celeba', self.\n file_list[idx]])\n", (9168, 9244), False, 'import os\n'), ((12081, 12117), 'os.path.join', 'os.path.join', (["cfg['root']", '"""UTKFace"""'], {}), "(cfg['root'], 'UTKFace')\n", (12093, 12117), False, 'import os\n'), ((13651, 13694), 'os.path.join', 'os.path.join', (["cfg['root']", '"""FER2013"""', 'type_'], {}), "(cfg['root'], 'FER2013', type_)\n", (13663, 13694), False, 'import os\n'), ((14156, 14187), 'skimage.io.imread', 'io.imread', (['self.imagefiles[idx]'], {}), '(self.imagefiles[idx])\n', (14165, 14187), False, 'from skimage import io\n'), ((13733, 13781), 'os.path.join', 'os.path.join', (["cfg['root']", '"""FER2013"""', 'type_', 'cat'], {}), "(cfg['root'], 'FER2013', type_, cat)\n", (13745, 13781), False, 'import os\n'), ((13812, 13867), 'os.path.join', 'os.path.join', (["cfg['root']", '"""FER2013"""', 'type_', 'cat', 'img_f'], {}), "(cfg['root'], 'FER2013', type_, cat, img_f)\n", (13824, 13867), False, 'import os\n'), ((1292, 1368), 'os.path.join', 'os.path.join', (["cfg['root']", '"""RAF-Face"""', "('%s/Image/aligned' % type)", 'face_fname'], {}), "(cfg['root'], 'RAF-Face', '%s/Image/aligned' % type, face_fname)\n", (1304, 1368), False, 'import os\n'), ((2375, 2451), 'os.path.join', 'os.path.join', (["cfg['root']", '"""RAF-Face"""', "('%s/Image/aligned' % type)", 'face_fname'], {}), "(cfg['root'], 'RAF-Face', '%s/Image/aligned' % type, face_fname)\n", (2387, 2451), False, 'import os\n'), ((4865, 4900), 'os.path.join', 'os.path.join', (['local_part_img_dir', '_'], {}), '(local_part_img_dir, _)\n', (4877, 4900), False, 'import os\n'), ((5208, 5243), 'os.path.join', 'os.path.join', (['local_part_img_dir', '_'], {}), '(local_part_img_dir, _)\n', (5220, 5243), False, 'import os\n'), ((1400, 1438), 'os.path.join', 'os.path.join', (['manual_annotation_dir', '_'], {}), '(manual_annotation_dir, _)\n', (1412, 1438), False, 'import os\n'), ((2483, 2521), 'os.path.join', 'os.path.join', (['manual_annotation_dir', '_'], {}), '(manual_annotation_dir, _)\n', (2495, 2521), False, 'import os\n'), ((14359, 14384), 'skimage.color.gray2rgb', 'gray2rgb', (["sample['image']"], {}), "(sample['image'])\n", (14367, 14384), False, 'from skimage.color import gray2rgb\n')]
|
# Copyright 2020 Forschungszentrum Jülich GmbH and Aix-Marseille Université
# "Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements; and to You under the Apache License, Version 2.0. "
import time
import numpy as np
from mpi4py import MPI
from nest_elephant_tvb.translation.science_tvb_to_nest import generate_data
def init(path_config, nb_spike_generator, id_first_spike_detector, param,
comm, comm_receiver, comm_sender, loggers):
'''
Initialize the transformation with MPI. This is the TVB to NEST direction.
NOTE: more information will be added with more changes. This is still the very first version!
TODO: Use RichEndPoints for communication encapsulation
TODO: Seperate 1)Receive 2)Analysis/Science and 3)Send. See also the many Todos in the code
TODO: Make use of / enable MPI parallelism! Solve hardcoded communication protocol first
TODO: This side mirrors the NEST to TVB side
-> TVB communicates on rank 0
-> NEST communcates on rank 1
This is vice versa in the nest to tvb direction.
TODO: solve this together with the rest of the communication protocol.
'''
# destructure logger list to indivual variables
logger_master, logger_receive, logger_send = loggers
# science part, see import
# TODO: use os.path (or similar) for proper file handling.
# TODO: move this object creation to a proper place. They are passed through many functions.
generator = generate_data(path_config+'/../../log/',nb_spike_generator,param)
############ NEW Code:
# TODO: future work: mpi parallel, use rank 1-x for science and sending
# TODO: use this MPI intracommunicator, without receiving rank 0
# intracomm = comm.Create(comm.Get_group().Excl([0]))
# create the shared memory block / databuffer
databuffer = _shared_mem_buffer(comm)
############# NEW Code end
############ NEW Code: Receive/analyse/send
if comm.Get_rank() == 0: # Receiver from TVB
# All MPI communication is done with rank 0 from TVB side
# Make this (and the NEST side as well) scalable.
_receive(comm_receiver, databuffer, logger_receive)
else: # Science/generate and sender to NEST, rank 1-x
_send(comm_sender, databuffer, logger_send, generator, id_first_spike_detector)
############ NEW Code end
############ NEW Code: disconnect
# TODO: should this be done here?
logger_master.info('Disconnect communicators...')
comm_receiver.Disconnect()
comm_sender.Disconnect()
############ NEW Code end
def _shared_mem_buffer(comm):
'''
Create shared memory buffer. MPI One-sided-Communication.
:param comm: MPI intra communicator to create the buffer.
:return buffer: 1D shared memory buffer array
TODO: Buffersize/max. expected size of incoming data
-> free param, handle properly!
TODO: 2 doubles: [start_time,end_time] of simulation step
TODO: unknown number of doubles: array with rates
'''
datasize = MPI.DOUBLE.Get_size()
bufsize = 2 + 1000000 # NOTE: hardcoded (max.expected size of rate array)
if comm.Get_rank() == 0:
bufbytes = datasize * bufsize
else:
bufbytes= 0
# rank 0: create the shared block
# rank 1-x: get a handle to it
win = MPI.Win.Allocate_shared(bufbytes, datasize, comm=comm)
buf, datasize = win.Shared_query(0)
assert datasize == MPI.DOUBLE.Get_size()
# create a numpy array (buffer) whose data points to the shared mem
return np.ndarray(buffer=buf, dtype='d', shape=(bufsize,))
# See todo in the beginning, encapsulate I/O, transformer, science parts
def _receive(comm_receiver, databuffer, logger):
'''
Receive data on rank 0. Put it into the shared mem buffer.
Replaces the former 'receive' function.
NOTE: First refactored version -> not pretty, not final.
'''
status_ = MPI.Status()
num_sending = comm_receiver.Get_remote_size() # how many TVB ranks are sending?
# init placeholder for incoming data
time_step = np.empty(2, dtype='d') # two doubles with start and end time of the step
size = np.empty(1, dtype='i') # size of the rate-array
# TODO: the last two buffer entries are used for shared information
# --> they replace the status_data variable from previous version
# --> find more elegant solution?
databuffer[-1] = 1 # set buffer to 'ready to receive from tvb'
databuffer[-2] = 0 # marks the 'head' of the buffer
while True:
# TODO: NEST to TVB transformer: irecv
# TODO: TVB to NEST transformer (here): isend
# TODO: --> rework communication protocol between simulators and transformers!
requests=[]
logger.info(" TVB to Nest: wait receive ")
for rank in range(num_sending):
requests.append(comm_receiver.isend(True,dest=rank,tag=0))
MPI.Request.Waitall(requests)
logger.info(" TVB to Nest: receive all")
# TODO: works for now, needs rework if multiple ranks are used on TVB side
# TODO: we receive from "ANY_SOURCE", but only check the status_ of the last receive...
# get the starting and ending time of the simulation step
# NEW: receive directly into the buffer
comm_receiver.Recv([databuffer[0:], MPI.DOUBLE], source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status_)
logger.info(" TVB to Nest: get time_step "+str(time_step)+" status : " + str(status_.Get_tag()))
if status_.Get_tag() == 0:
# wait until ready to receive new data (i.e. the sender has cleared the buffer)
while databuffer[-1] != 1: # TODO: use MPI, remove the sleep
time.sleep(0.001)
pass
# Get the size of the data
comm_receiver.Recv([size, 1, MPI.INT], source=status_.Get_source(), tag=0, status=status_)
# NEW: receive directly into the buffer
# First two entries are the times, see above
comm_receiver.Recv([databuffer[2:], MPI.DOUBLE], source=status_.Get_source(), tag=0, status=status_)
# Mark as 'ready to do analysis'
databuffer[-1] = 0
databuffer[-2] = size # info about size of data array
logger.info(" TVB to Nest: update buffer")
elif status_.Get_tag() == 1:
logger.info('TVB: end simulation')
break
else:
raise Exception("bad mpi tag"+str(status_.Get_tag()))
logger.info('TVB_to_NEST: End of receive function')
# See todo in the beginning, encapsulate I/O, transformer, science parts
def _send(comm_sender, databuffer, logger, generator, id_first_spike_detector):
'''
Generator/Science on INTRAcommunicator (multiple MPI ranks possible).
TODO: not yet used.
Send data to NEST on INTERcommunicator comm_sender (multiple MPI ranks possible).
Replaces the former 'send' function.
NOTE: First refactored version -> not pretty, not final.
TODO: Discuss communication protocol of TVB<->transformer and transformer<->NEST
'''
status_ = MPI.Status()
num_sending = comm_sender.Get_remote_size() # how many TVB ranks are sending?
# init placeholder for incoming data
check = np.empty(1,dtype='b')
size_list = np.empty(1, dtype='i')
while(True):
# TODO: This is still not correct. We only check for the Tag of the last rank.
# TODO: IF all ranks send always the same tag in one iteration (simulation step)
# TODO: then this works. But it should be handled differently!!!!
for rank in range(num_sending):
comm_sender.Recv([check, 1, MPI.CXX_BOOL], source=rank, tag=MPI.ANY_TAG, status=status_)
logger.info("TVB to NEST : send data status : " +str(status_.Get_tag()))
# TODO: handle properly, all ranks send tag 0?
if status_.Get_tag() == 0:
# wait until the receiver has cleared the buffer, i.e. filled with new data
while databuffer[-1] != 0: # TODO: use MPI, remove the sleep
time.sleep(0.001)
pass
# TODO: All science/generate here. Move to a proper place.
# method: generate_spike(count,time_step,rate)
# NOTE: count is a hardcoded '0'. Why?
# NOTE: time_step are the first two doubles in the buffer
# NOTE: rate is a double array, which size is stored in the second to last index
spikes_times = generator.generate_spike(0,databuffer[:2],databuffer[2:int(databuffer[-2])])
logger.info(" TVB to Nest: spike time")
# Mark as 'ready to receive next simulation step'
databuffer[-1] = 1
###### OLD code, kept the communication and science as it is for now
### TODO: Receive from status_.Get_source() and rank
### TODO: Send to status_.Get_source() and rank
### TODO: why???
### TODO: a second status_ object is used, should not be named the same
for rank in range(num_sending):
# NOTE: in 'test_receive_tvb_to_nest.py': hardcoded 10
comm_sender.Recv([size_list, 1, MPI.INT], source=rank, tag=0, status=status_)
if size_list[0] != 0:
list_id = np.empty(size_list, dtype='i')
# NOTE: in 'test_receive_tvb_to_nest.py': hardcoded np.arange(0,10,1)
comm_sender.Recv([list_id, size_list, MPI.INT], source=status_.Get_source(), tag=0, status=status_)
# Select the good spike train and send it
# logger.info(" TVB to Nest:"+str(data))
logger.info("rank "+str(rank)+" list_id "+str(list_id))
# TODO: Creating empty lists and append to them in a loop, all inside a loop
# TODO: this is slow and will be a bottleneck when we scale up.
data = []
shape = []
for i in list_id:
shape += [spikes_times[i-id_first_spike_detector].shape[0]]
data += [spikes_times[i-id_first_spike_detector]]
send_shape = np.array(np.concatenate(([np.sum(shape)],shape)), dtype='i')
# firstly send the size of the spikes train
comm_sender.Send([send_shape, MPI.INT], dest=status_.Get_source(), tag=list_id[0])
# secondly send the spikes train
data = np.concatenate(data).astype('d')
comm_sender.Send([data, MPI.DOUBLE], dest=rank, tag=list_id[0])
logger.info(" end sending:")
###### OLD code end
elif status_.Get_tag() == 1:
logger.info(" TVB to Nest end sending") # NOTE: one sim step?
elif status_.Get_tag() == 2:
logger.info(" TVB to Nest end simulation ") # NOTE: end whole sim.
break
else:
raise Exception("bad mpi tag : "+str(status_.Get_tag()))
logger.info('TVB_to_NEST: End of send function')
|
[
"numpy.sum",
"nest_elephant_tvb.translation.science_tvb_to_nest.generate_data",
"mpi4py.MPI.Win.Allocate_shared",
"mpi4py.MPI.Status",
"numpy.empty",
"time.sleep",
"mpi4py.MPI.DOUBLE.Get_size",
"mpi4py.MPI.Request.Waitall",
"numpy.ndarray",
"numpy.concatenate"
] |
[((1502, 1571), 'nest_elephant_tvb.translation.science_tvb_to_nest.generate_data', 'generate_data', (["(path_config + '/../../log/')", 'nb_spike_generator', 'param'], {}), "(path_config + '/../../log/', nb_spike_generator, param)\n", (1515, 1571), False, 'from nest_elephant_tvb.translation.science_tvb_to_nest import generate_data\n'), ((3072, 3093), 'mpi4py.MPI.DOUBLE.Get_size', 'MPI.DOUBLE.Get_size', ([], {}), '()\n', (3091, 3093), False, 'from mpi4py import MPI\n'), ((3353, 3407), 'mpi4py.MPI.Win.Allocate_shared', 'MPI.Win.Allocate_shared', (['bufbytes', 'datasize'], {'comm': 'comm'}), '(bufbytes, datasize, comm=comm)\n', (3376, 3407), False, 'from mpi4py import MPI\n'), ((3576, 3627), 'numpy.ndarray', 'np.ndarray', ([], {'buffer': 'buf', 'dtype': '"""d"""', 'shape': '(bufsize,)'}), "(buffer=buf, dtype='d', shape=(bufsize,))\n", (3586, 3627), True, 'import numpy as np\n'), ((3951, 3963), 'mpi4py.MPI.Status', 'MPI.Status', ([], {}), '()\n', (3961, 3963), False, 'from mpi4py import MPI\n'), ((4105, 4127), 'numpy.empty', 'np.empty', (['(2)'], {'dtype': '"""d"""'}), "(2, dtype='d')\n", (4113, 4127), True, 'import numpy as np\n'), ((4189, 4211), 'numpy.empty', 'np.empty', (['(1)'], {'dtype': '"""i"""'}), "(1, dtype='i')\n", (4197, 4211), True, 'import numpy as np\n'), ((7159, 7171), 'mpi4py.MPI.Status', 'MPI.Status', ([], {}), '()\n', (7169, 7171), False, 'from mpi4py import MPI\n'), ((7307, 7329), 'numpy.empty', 'np.empty', (['(1)'], {'dtype': '"""b"""'}), "(1, dtype='b')\n", (7315, 7329), True, 'import numpy as np\n'), ((7345, 7367), 'numpy.empty', 'np.empty', (['(1)'], {'dtype': '"""i"""'}), "(1, dtype='i')\n", (7353, 7367), True, 'import numpy as np\n'), ((3471, 3492), 'mpi4py.MPI.DOUBLE.Get_size', 'MPI.DOUBLE.Get_size', ([], {}), '()\n', (3490, 3492), False, 'from mpi4py import MPI\n'), ((4939, 4968), 'mpi4py.MPI.Request.Waitall', 'MPI.Request.Waitall', (['requests'], {}), '(requests)\n', (4958, 4968), False, 'from mpi4py import MPI\n'), ((5754, 5771), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (5764, 5771), False, 'import time\n'), ((8124, 8141), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (8134, 8141), False, 'import time\n'), ((9379, 9409), 'numpy.empty', 'np.empty', (['size_list'], {'dtype': '"""i"""'}), "(size_list, dtype='i')\n", (9387, 9409), True, 'import numpy as np\n'), ((10598, 10618), 'numpy.concatenate', 'np.concatenate', (['data'], {}), '(data)\n', (10612, 10618), True, 'import numpy as np\n'), ((10316, 10329), 'numpy.sum', 'np.sum', (['shape'], {}), '(shape)\n', (10322, 10329), True, 'import numpy as np\n')]
|
# Copyright (c) 2018-2019, NVIDIA CORPORATION.
import numpy as np
import pytest
from utils import assert_eq
import nvcategory
import nvstrings
def test_size():
strs = nvstrings.to_device(
["eee", "aaa", "eee", "ddd", "ccc", "ccc", "ccc", "eee", "aaa"]
)
cat = nvcategory.from_strings(strs)
assert strs.size() == cat.size()
def test_keys():
strs1 = nvstrings.to_device(["a", "b", "b", "f", "c", "f"])
cat = nvcategory.from_strings(strs1)
got = cat.keys()
expected = ["a", "b", "c", "f"]
assert_eq(got, expected)
def test_keys_size():
strs1 = nvstrings.to_device(["a", "b", "b", "f", "c", "f"])
cat = nvcategory.from_strings(strs1)
got = cat.keys_size()
assert got == 4
def test_values():
strs = nvstrings.to_device(
["eee", "aaa", "eee", "ddd", "ccc", "ccc", "ccc", "eee", "aaa"]
)
cat = nvcategory.from_strings(strs)
got = cat.values()
expected = [3, 0, 3, 2, 1, 1, 1, 3, 0]
assert_eq(got, expected)
def test_value_for_index():
strs = nvstrings.to_device(
["eee", "aaa", "eee", "ddd", "ccc", "ccc", "ccc", "eee", "aaa"]
)
cat = nvcategory.from_strings(strs)
got = cat.value_for_index(7)
expected = 3
assert got == expected
def test_value():
strs = nvstrings.to_device(
["eee", "aaa", "eee", "ddd", "ccc", "ccc", "ccc", "eee", "aaa"]
)
cat = nvcategory.from_strings(strs)
got = cat.value("ccc")
expected = 1
assert got == expected
def test_indexes_for_key():
strs = nvstrings.to_device(
["eee", "aaa", "eee", "ddd", "ccc", "ccc", "ccc", "eee", "aaa"]
)
cat = nvcategory.from_strings(strs)
got = cat.indexes_for_key("ccc")
expected = [4, 5, 6]
assert_eq(got, expected)
def test_to_strings():
strs = nvstrings.to_device(
["eee", "aaa", "eee", "ddd", "ccc", "ccc", "ccc", "eee", "aaa"]
)
cat = nvcategory.from_strings(strs)
got = cat.to_strings()
assert_eq(got, strs)
def test_add_strings():
strs = nvstrings.to_device(
["eee", "aaa", "eee", "ddd", "ccc", "ccc", "ccc", "eee", "aaa"]
)
cat = nvcategory.from_strings(strs)
got = cat.add_strings(strs)
expected_keys = ["aaa", "ccc", "ddd", "eee"]
expected_values = [3, 0, 3, 2, 1, 1, 1, 3, 0, 3, 0, 3, 2, 1, 1, 1, 3, 0]
assert_eq(got.keys(), expected_keys)
assert_eq(got.values(), expected_values)
def test_gather_strings():
strs = nvstrings.to_device(
["eee", "aaa", "eee", "ddd", "ccc", "ccc", "ccc", "eee", "aaa"]
)
cat = nvcategory.from_strings(strs)
got = cat.gather_strings([0, 2, 0])
expected = ["aaa", "ddd", "aaa"]
assert_eq(got, expected)
@pytest.mark.parametrize(
"func",
[
lambda cat, indexes: cat.gather_strings(indexes),
lambda cat, indexes: cat.gather(indexes),
lambda cat, indexes: cat.gather_and_remap(indexes),
],
)
def test_gather_index_exception(func):
strs = nvstrings.to_device(
["eee", "aaa", "eee", "ddd", "ccc", "ccc", "ccc", "eee", "aaa"]
)
cat = nvcategory.from_strings(strs)
indexes = [0, 2, 0, 4]
with pytest.raises(Exception):
func(cat, indexes)
def test_remove_strings():
strs = nvstrings.to_device(
["eee", "aaa", "eee", "ddd", "ccc", "ccc", "ccc", "eee", "aaa"]
)
cat = nvcategory.from_strings(strs)
removal_strings = nvstrings.to_device(["ccc", "aaa", "bbb"])
got = cat.remove_strings(removal_strings)
expected_keys = ["ddd", "eee"]
expected_values = [1, 1, 0, 1]
assert_eq(got.keys(), expected_keys)
assert_eq(got.values(), expected_values)
def test_from_strings():
strs1 = nvstrings.to_device(
["eee", "aaa", "eee", "ddd", "ccc", "ccc", "ccc", "eee", "aaa"]
)
strs2 = nvstrings.to_device(
["ggg", "fff", "hhh", "aaa", "fff", "fff", "ggg", "hhh", "bbb"]
)
cat = nvcategory.from_strings(strs1, strs2)
expected_keys = ["aaa", "bbb", "ccc", "ddd", "eee", "fff", "ggg", "hhh"]
expected_values = [4, 0, 4, 3, 2, 2, 2, 4, 0, 6, 5, 7, 0, 5, 5, 6, 7, 1]
assert_eq(cat.keys(), expected_keys)
assert_eq(cat.values(), expected_values)
def test_merge_category():
strs1 = nvstrings.to_device(
["eee", "aaa", "eee", "ddd", "ccc", "ccc", "ccc", "eee", "aaa"]
)
strs2 = nvstrings.to_device(
["ggg", "fff", "hhh", "aaa", "fff", "fff", "ggg", "hhh", "bbb"]
)
cat1 = nvcategory.from_strings(strs1)
cat2 = nvcategory.from_strings(strs2)
ncat = cat1.merge_category(cat2)
expected_keys = ["<KEY>"]
expected_values = [3, 0, 3, 2, 1, 1, 1, 3, 0, 6, 5, 7, 0, 5, 5, 6, 7, 4]
assert_eq(ncat.keys(), expected_keys)
assert_eq(ncat.values(), expected_values)
def test_merge_and_remap():
strs1 = nvstrings.to_device(
["eee", "aaa", "eee", "ddd", "ccc", "ccc", "ccc", "eee", "aaa"]
)
strs2 = nvstrings.to_device(
["ggg", "fff", "hhh", "aaa", "fff", "fff", "ggg", "hhh", "bbb"]
)
cat1 = nvcategory.from_strings(strs1)
cat2 = nvcategory.from_strings(strs2)
ncat = cat1.merge_and_remap(cat2)
expected_keys = ["<KEY>"]
expected_values = [4, 0, 4, 3, 2, 2, 2, 4, 0, 6, 5, 7, 0, 5, 5, 6, 7, 1]
assert_eq(ncat.keys(), expected_keys)
assert_eq(ncat.values(), expected_values)
def test_add_keys():
strs1 = nvstrings.to_device(["a", "b", "b", "f", "c", "f"])
strs2 = nvstrings.to_device(["a", "b", "c", "d"])
cat = nvcategory.from_strings(strs1)
cat1 = cat.add_keys(strs2)
assert_eq(cat1.keys(), ["a", "b", "c", "d", "f"])
def test_remove_keys():
strs1 = nvstrings.to_device(["a", "b", "b", "f", "c", "f"])
strs2 = nvstrings.to_device(["b", "d"])
cat = nvcategory.from_strings(strs1)
cat1 = cat.remove_keys(strs2)
assert_eq(cat1.keys(), ["a", "c", "f"])
def test_set_keys():
strs1 = nvstrings.to_device(["a", "b", "b", "f", "c", "f"])
strs2 = nvstrings.to_device(["b", "c", "e", "d"])
cat = nvcategory.from_strings(strs1)
cat1 = cat.set_keys(strs2)
assert_eq(cat1.keys(), ["b", "c", "d", "e"])
def test_remove_unused_keys():
strs1 = nvstrings.to_device(["a", "b", "b", "f", "c", "f"])
strs2 = nvstrings.to_device(["b", "c", "e", "d"])
cat = nvcategory.from_strings(strs1)
cat1 = cat.set_keys(strs2)
cat1_unused_removed = cat1.remove_unused_keys()
assert_eq(cat1_unused_removed.keys(), ["b", "c"])
def test_gather():
strs1 = nvstrings.to_device(["a", "b", "b", "f", "c", "f"])
cat = nvcategory.from_strings(strs1)
cat1 = cat.gather([1, 3, 2, 3, 1, 2])
expected_keys = ["<KEY>"]
expected_values = [1, 3, 2, 3, 1, 2]
assert_eq(cat1.keys(), expected_keys)
assert_eq(cat1.values(), expected_values)
def test_gather_and_remap():
strs1 = nvstrings.to_device(["a", "b", "b", "f", "c", "f"])
cat = nvcategory.from_strings(strs1)
cat1 = cat.gather_and_remap([1, 3, 2, 3, 1, 2])
expected_keys = ["<KEY>"]
expected_values = [0, 2, 1, 2, 0, 1]
assert_eq(cat1.keys(), expected_keys)
assert_eq(cat1.values(), expected_values)
def test_from_offsets():
values = np.array([97, 112, 112, 108, 101], dtype=np.int8)
offsets = np.array([0, 1, 2, 3, 4, 5], dtype=np.int32)
cat = nvcategory.from_offsets(values, offsets, 5)
expected_keys = ["<KEY>"]
expected_values = [0, 3, 3, 2, 1]
assert_eq(cat.keys(), expected_keys)
assert_eq(cat.values(), expected_values)
def test_from_strings_list():
s1 = nvstrings.to_device(["apple", "pear", "banana"])
s2 = nvstrings.to_device(["orange", "pear"])
cat = nvcategory.from_strings_list([s1, s2])
expected_keys = ["apple", "banana", "orange", "pear"]
expected_values = [0, 3, 1, 2, 3]
assert_eq(cat.keys(), expected_keys)
assert_eq(cat.values(), expected_values)
def test_to_device():
cat = nvcategory.to_device(["apple", "pear", "banana", "orange", "pear"])
expected_keys = ["apple", "banana", "orange", "pear"]
expected_values = [0, 3, 1, 2, 3]
assert_eq(cat.keys(), expected_keys)
assert_eq(cat.values(), expected_values)
|
[
"utils.assert_eq",
"nvcategory.to_device",
"pytest.raises",
"numpy.array",
"nvcategory.from_strings",
"nvcategory.from_strings_list",
"nvstrings.to_device",
"nvcategory.from_offsets"
] |
[((175, 263), 'nvstrings.to_device', 'nvstrings.to_device', (["['eee', 'aaa', 'eee', 'ddd', 'ccc', 'ccc', 'ccc', 'eee', 'aaa']"], {}), "(['eee', 'aaa', 'eee', 'ddd', 'ccc', 'ccc', 'ccc', 'eee',\n 'aaa'])\n", (194, 263), False, 'import nvstrings\n'), ((284, 313), 'nvcategory.from_strings', 'nvcategory.from_strings', (['strs'], {}), '(strs)\n', (307, 313), False, 'import nvcategory\n'), ((382, 433), 'nvstrings.to_device', 'nvstrings.to_device', (["['a', 'b', 'b', 'f', 'c', 'f']"], {}), "(['a', 'b', 'b', 'f', 'c', 'f'])\n", (401, 433), False, 'import nvstrings\n'), ((444, 474), 'nvcategory.from_strings', 'nvcategory.from_strings', (['strs1'], {}), '(strs1)\n', (467, 474), False, 'import nvcategory\n'), ((536, 560), 'utils.assert_eq', 'assert_eq', (['got', 'expected'], {}), '(got, expected)\n', (545, 560), False, 'from utils import assert_eq\n'), ((597, 648), 'nvstrings.to_device', 'nvstrings.to_device', (["['a', 'b', 'b', 'f', 'c', 'f']"], {}), "(['a', 'b', 'b', 'f', 'c', 'f'])\n", (616, 648), False, 'import nvstrings\n'), ((659, 689), 'nvcategory.from_strings', 'nvcategory.from_strings', (['strs1'], {}), '(strs1)\n', (682, 689), False, 'import nvcategory\n'), ((768, 856), 'nvstrings.to_device', 'nvstrings.to_device', (["['eee', 'aaa', 'eee', 'ddd', 'ccc', 'ccc', 'ccc', 'eee', 'aaa']"], {}), "(['eee', 'aaa', 'eee', 'ddd', 'ccc', 'ccc', 'ccc', 'eee',\n 'aaa'])\n", (787, 856), False, 'import nvstrings\n'), ((877, 906), 'nvcategory.from_strings', 'nvcategory.from_strings', (['strs'], {}), '(strs)\n', (900, 906), False, 'import nvcategory\n'), ((977, 1001), 'utils.assert_eq', 'assert_eq', (['got', 'expected'], {}), '(got, expected)\n', (986, 1001), False, 'from utils import assert_eq\n'), ((1043, 1131), 'nvstrings.to_device', 'nvstrings.to_device', (["['eee', 'aaa', 'eee', 'ddd', 'ccc', 'ccc', 'ccc', 'eee', 'aaa']"], {}), "(['eee', 'aaa', 'eee', 'ddd', 'ccc', 'ccc', 'ccc', 'eee',\n 'aaa'])\n", (1062, 1131), False, 'import nvstrings\n'), ((1152, 1181), 'nvcategory.from_strings', 'nvcategory.from_strings', (['strs'], {}), '(strs)\n', (1175, 1181), False, 'import nvcategory\n'), ((1290, 1378), 'nvstrings.to_device', 'nvstrings.to_device', (["['eee', 'aaa', 'eee', 'ddd', 'ccc', 'ccc', 'ccc', 'eee', 'aaa']"], {}), "(['eee', 'aaa', 'eee', 'ddd', 'ccc', 'ccc', 'ccc', 'eee',\n 'aaa'])\n", (1309, 1378), False, 'import nvstrings\n'), ((1399, 1428), 'nvcategory.from_strings', 'nvcategory.from_strings', (['strs'], {}), '(strs)\n', (1422, 1428), False, 'import nvcategory\n'), ((1541, 1629), 'nvstrings.to_device', 'nvstrings.to_device', (["['eee', 'aaa', 'eee', 'ddd', 'ccc', 'ccc', 'ccc', 'eee', 'aaa']"], {}), "(['eee', 'aaa', 'eee', 'ddd', 'ccc', 'ccc', 'ccc', 'eee',\n 'aaa'])\n", (1560, 1629), False, 'import nvstrings\n'), ((1650, 1679), 'nvcategory.from_strings', 'nvcategory.from_strings', (['strs'], {}), '(strs)\n', (1673, 1679), False, 'import nvcategory\n'), ((1746, 1770), 'utils.assert_eq', 'assert_eq', (['got', 'expected'], {}), '(got, expected)\n', (1755, 1770), False, 'from utils import assert_eq\n'), ((1807, 1895), 'nvstrings.to_device', 'nvstrings.to_device', (["['eee', 'aaa', 'eee', 'ddd', 'ccc', 'ccc', 'ccc', 'eee', 'aaa']"], {}), "(['eee', 'aaa', 'eee', 'ddd', 'ccc', 'ccc', 'ccc', 'eee',\n 'aaa'])\n", (1826, 1895), False, 'import nvstrings\n'), ((1916, 1945), 'nvcategory.from_strings', 'nvcategory.from_strings', (['strs'], {}), '(strs)\n', (1939, 1945), False, 'import nvcategory\n'), ((1977, 1997), 'utils.assert_eq', 'assert_eq', (['got', 'strs'], {}), '(got, strs)\n', (1986, 1997), False, 'from utils import assert_eq\n'), ((2035, 2123), 'nvstrings.to_device', 'nvstrings.to_device', (["['eee', 'aaa', 'eee', 'ddd', 'ccc', 'ccc', 'ccc', 'eee', 'aaa']"], {}), "(['eee', 'aaa', 'eee', 'ddd', 'ccc', 'ccc', 'ccc', 'eee',\n 'aaa'])\n", (2054, 2123), False, 'import nvstrings\n'), ((2144, 2173), 'nvcategory.from_strings', 'nvcategory.from_strings', (['strs'], {}), '(strs)\n', (2167, 2173), False, 'import nvcategory\n'), ((2458, 2546), 'nvstrings.to_device', 'nvstrings.to_device', (["['eee', 'aaa', 'eee', 'ddd', 'ccc', 'ccc', 'ccc', 'eee', 'aaa']"], {}), "(['eee', 'aaa', 'eee', 'ddd', 'ccc', 'ccc', 'ccc', 'eee',\n 'aaa'])\n", (2477, 2546), False, 'import nvstrings\n'), ((2567, 2596), 'nvcategory.from_strings', 'nvcategory.from_strings', (['strs'], {}), '(strs)\n', (2590, 2596), False, 'import nvcategory\n'), ((2678, 2702), 'utils.assert_eq', 'assert_eq', (['got', 'expected'], {}), '(got, expected)\n', (2687, 2702), False, 'from utils import assert_eq\n'), ((2976, 3064), 'nvstrings.to_device', 'nvstrings.to_device', (["['eee', 'aaa', 'eee', 'ddd', 'ccc', 'ccc', 'ccc', 'eee', 'aaa']"], {}), "(['eee', 'aaa', 'eee', 'ddd', 'ccc', 'ccc', 'ccc', 'eee',\n 'aaa'])\n", (2995, 3064), False, 'import nvstrings\n'), ((3085, 3114), 'nvcategory.from_strings', 'nvcategory.from_strings', (['strs'], {}), '(strs)\n', (3108, 3114), False, 'import nvcategory\n'), ((3244, 3332), 'nvstrings.to_device', 'nvstrings.to_device', (["['eee', 'aaa', 'eee', 'ddd', 'ccc', 'ccc', 'ccc', 'eee', 'aaa']"], {}), "(['eee', 'aaa', 'eee', 'ddd', 'ccc', 'ccc', 'ccc', 'eee',\n 'aaa'])\n", (3263, 3332), False, 'import nvstrings\n'), ((3353, 3382), 'nvcategory.from_strings', 'nvcategory.from_strings', (['strs'], {}), '(strs)\n', (3376, 3382), False, 'import nvcategory\n'), ((3405, 3447), 'nvstrings.to_device', 'nvstrings.to_device', (["['ccc', 'aaa', 'bbb']"], {}), "(['ccc', 'aaa', 'bbb'])\n", (3424, 3447), False, 'import nvstrings\n'), ((3690, 3778), 'nvstrings.to_device', 'nvstrings.to_device', (["['eee', 'aaa', 'eee', 'ddd', 'ccc', 'ccc', 'ccc', 'eee', 'aaa']"], {}), "(['eee', 'aaa', 'eee', 'ddd', 'ccc', 'ccc', 'ccc', 'eee',\n 'aaa'])\n", (3709, 3778), False, 'import nvstrings\n'), ((3801, 3889), 'nvstrings.to_device', 'nvstrings.to_device', (["['ggg', 'fff', 'hhh', 'aaa', 'fff', 'fff', 'ggg', 'hhh', 'bbb']"], {}), "(['ggg', 'fff', 'hhh', 'aaa', 'fff', 'fff', 'ggg', 'hhh',\n 'bbb'])\n", (3820, 3889), False, 'import nvstrings\n'), ((3910, 3947), 'nvcategory.from_strings', 'nvcategory.from_strings', (['strs1', 'strs2'], {}), '(strs1, strs2)\n', (3933, 3947), False, 'import nvcategory\n'), ((4230, 4318), 'nvstrings.to_device', 'nvstrings.to_device', (["['eee', 'aaa', 'eee', 'ddd', 'ccc', 'ccc', 'ccc', 'eee', 'aaa']"], {}), "(['eee', 'aaa', 'eee', 'ddd', 'ccc', 'ccc', 'ccc', 'eee',\n 'aaa'])\n", (4249, 4318), False, 'import nvstrings\n'), ((4341, 4429), 'nvstrings.to_device', 'nvstrings.to_device', (["['ggg', 'fff', 'hhh', 'aaa', 'fff', 'fff', 'ggg', 'hhh', 'bbb']"], {}), "(['ggg', 'fff', 'hhh', 'aaa', 'fff', 'fff', 'ggg', 'hhh',\n 'bbb'])\n", (4360, 4429), False, 'import nvstrings\n'), ((4451, 4481), 'nvcategory.from_strings', 'nvcategory.from_strings', (['strs1'], {}), '(strs1)\n', (4474, 4481), False, 'import nvcategory\n'), ((4493, 4523), 'nvcategory.from_strings', 'nvcategory.from_strings', (['strs2'], {}), '(strs2)\n', (4516, 4523), False, 'import nvcategory\n'), ((4799, 4887), 'nvstrings.to_device', 'nvstrings.to_device', (["['eee', 'aaa', 'eee', 'ddd', 'ccc', 'ccc', 'ccc', 'eee', 'aaa']"], {}), "(['eee', 'aaa', 'eee', 'ddd', 'ccc', 'ccc', 'ccc', 'eee',\n 'aaa'])\n", (4818, 4887), False, 'import nvstrings\n'), ((4910, 4998), 'nvstrings.to_device', 'nvstrings.to_device', (["['ggg', 'fff', 'hhh', 'aaa', 'fff', 'fff', 'ggg', 'hhh', 'bbb']"], {}), "(['ggg', 'fff', 'hhh', 'aaa', 'fff', 'fff', 'ggg', 'hhh',\n 'bbb'])\n", (4929, 4998), False, 'import nvstrings\n'), ((5020, 5050), 'nvcategory.from_strings', 'nvcategory.from_strings', (['strs1'], {}), '(strs1)\n', (5043, 5050), False, 'import nvcategory\n'), ((5062, 5092), 'nvcategory.from_strings', 'nvcategory.from_strings', (['strs2'], {}), '(strs2)\n', (5085, 5092), False, 'import nvcategory\n'), ((5362, 5413), 'nvstrings.to_device', 'nvstrings.to_device', (["['a', 'b', 'b', 'f', 'c', 'f']"], {}), "(['a', 'b', 'b', 'f', 'c', 'f'])\n", (5381, 5413), False, 'import nvstrings\n'), ((5426, 5467), 'nvstrings.to_device', 'nvstrings.to_device', (["['a', 'b', 'c', 'd']"], {}), "(['a', 'b', 'c', 'd'])\n", (5445, 5467), False, 'import nvstrings\n'), ((5478, 5508), 'nvcategory.from_strings', 'nvcategory.from_strings', (['strs1'], {}), '(strs1)\n', (5501, 5508), False, 'import nvcategory\n'), ((5632, 5683), 'nvstrings.to_device', 'nvstrings.to_device', (["['a', 'b', 'b', 'f', 'c', 'f']"], {}), "(['a', 'b', 'b', 'f', 'c', 'f'])\n", (5651, 5683), False, 'import nvstrings\n'), ((5696, 5727), 'nvstrings.to_device', 'nvstrings.to_device', (["['b', 'd']"], {}), "(['b', 'd'])\n", (5715, 5727), False, 'import nvstrings\n'), ((5738, 5768), 'nvcategory.from_strings', 'nvcategory.from_strings', (['strs1'], {}), '(strs1)\n', (5761, 5768), False, 'import nvcategory\n'), ((5882, 5933), 'nvstrings.to_device', 'nvstrings.to_device', (["['a', 'b', 'b', 'f', 'c', 'f']"], {}), "(['a', 'b', 'b', 'f', 'c', 'f'])\n", (5901, 5933), False, 'import nvstrings\n'), ((5946, 5987), 'nvstrings.to_device', 'nvstrings.to_device', (["['b', 'c', 'e', 'd']"], {}), "(['b', 'c', 'e', 'd'])\n", (5965, 5987), False, 'import nvstrings\n'), ((5998, 6028), 'nvcategory.from_strings', 'nvcategory.from_strings', (['strs1'], {}), '(strs1)\n', (6021, 6028), False, 'import nvcategory\n'), ((6154, 6205), 'nvstrings.to_device', 'nvstrings.to_device', (["['a', 'b', 'b', 'f', 'c', 'f']"], {}), "(['a', 'b', 'b', 'f', 'c', 'f'])\n", (6173, 6205), False, 'import nvstrings\n'), ((6218, 6259), 'nvstrings.to_device', 'nvstrings.to_device', (["['b', 'c', 'e', 'd']"], {}), "(['b', 'c', 'e', 'd'])\n", (6237, 6259), False, 'import nvstrings\n'), ((6270, 6300), 'nvcategory.from_strings', 'nvcategory.from_strings', (['strs1'], {}), '(strs1)\n', (6293, 6300), False, 'import nvcategory\n'), ((6471, 6522), 'nvstrings.to_device', 'nvstrings.to_device', (["['a', 'b', 'b', 'f', 'c', 'f']"], {}), "(['a', 'b', 'b', 'f', 'c', 'f'])\n", (6490, 6522), False, 'import nvstrings\n'), ((6533, 6563), 'nvcategory.from_strings', 'nvcategory.from_strings', (['strs1'], {}), '(strs1)\n', (6556, 6563), False, 'import nvcategory\n'), ((6809, 6860), 'nvstrings.to_device', 'nvstrings.to_device', (["['a', 'b', 'b', 'f', 'c', 'f']"], {}), "(['a', 'b', 'b', 'f', 'c', 'f'])\n", (6828, 6860), False, 'import nvstrings\n'), ((6871, 6901), 'nvcategory.from_strings', 'nvcategory.from_strings', (['strs1'], {}), '(strs1)\n', (6894, 6901), False, 'import nvcategory\n'), ((7154, 7203), 'numpy.array', 'np.array', (['[97, 112, 112, 108, 101]'], {'dtype': 'np.int8'}), '([97, 112, 112, 108, 101], dtype=np.int8)\n', (7162, 7203), True, 'import numpy as np\n'), ((7218, 7262), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5]'], {'dtype': 'np.int32'}), '([0, 1, 2, 3, 4, 5], dtype=np.int32)\n', (7226, 7262), True, 'import numpy as np\n'), ((7273, 7316), 'nvcategory.from_offsets', 'nvcategory.from_offsets', (['values', 'offsets', '(5)'], {}), '(values, offsets, 5)\n', (7296, 7316), False, 'import nvcategory\n'), ((7512, 7560), 'nvstrings.to_device', 'nvstrings.to_device', (["['apple', 'pear', 'banana']"], {}), "(['apple', 'pear', 'banana'])\n", (7531, 7560), False, 'import nvstrings\n'), ((7570, 7609), 'nvstrings.to_device', 'nvstrings.to_device', (["['orange', 'pear']"], {}), "(['orange', 'pear'])\n", (7589, 7609), False, 'import nvstrings\n'), ((7620, 7658), 'nvcategory.from_strings_list', 'nvcategory.from_strings_list', (['[s1, s2]'], {}), '([s1, s2])\n', (7648, 7658), False, 'import nvcategory\n'), ((7876, 7943), 'nvcategory.to_device', 'nvcategory.to_device', (["['apple', 'pear', 'banana', 'orange', 'pear']"], {}), "(['apple', 'pear', 'banana', 'orange', 'pear'])\n", (7896, 7943), False, 'import nvcategory\n'), ((3151, 3175), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (3164, 3175), False, 'import pytest\n')]
|
"""
Object to perform analysis and plotting on a given dataset
Methods for the measurement control software to anaylse and plot data
@author: krolljg
"""
import matplotlib.pyplot as plt
import numpy as np
import colorsys
import qcodes
#from qcodes import Instrument # consider making a qcodes instrument in the future - not sure what the advantage is
import qtt
from qtt.utilities.tools import addPPTslide
import scipy.optimize as optimisation
class MeasurementAnalysis():
"""
Class that allows for analysis of measurement datasets. Can be initialised with a dataset for analysis.
dataset: target dataset
add_ppts: automatically loads plots into a powerpoint
prev_fig: can initialise with a figure in order to continue working on it
"""
def __init__(
self,
dataset=None,
add_ppts=True,
prev_fig=None,
verbose=True,
**kwargs
):
self.add_ppts = add_ppts
# used to keep working on the same figure if necessary
if prev_fig is None:
self.init_fig()
else:
self.fig = prev_fig
if dataset is not None:
self.load_data(dataset)
if len(self.setpoint_vars) == 1:
self.plot_1D()
if len(self.setpoint_vars) == 2:
self.plot_2D()
def load_data(self,dataset, xvar=None, yvar=None, zvar=None):
self.dataset = dataset
arrays = self.dataset.arrays
self.setpoint_vars = {key:value for (key,value) in arrays.items() if arrays.get(key).is_setpoint}
self.measured_vars = {key:value for (key,value) in arrays.items() if arrays.get(key).is_setpoint==False}
# determine dimensionality of dataset, load x, y and z variables appropriately
if len(self.setpoint_vars) == 1:
if xvar is None:
self.xvar = self.setpoint_vars.get(list(self.setpoint_vars)[0])
else:
self.xvar = self.setpoint_vars.get(xvar)
if yvar is None:
self.yvar = self.measured_vars.get(list(self.measured_vars)[0])
else:
self.yvar = self.measured_vars.get(yvar)
else:
if xvar is None:
self.xvar = self.setpoint_vars.get(list(self.setpoint_vars)[0])
else:
self.xvar = self.setpoint_vars.get(xvar)
if yvar is None:
self.yvar = self.setpoint_vars.get(list(self.setpoint_vars)[1])
else:
self.yvar = self.setpoint_vars.get(yvar)
if zvar is None:
self.zvar = self.measured_vars.get(list(self.measured_vars)[0])
else:
self.zvar = self.measured_vars.get(zvar)
def init_fig(self):
''' Initailised a new figure '''
self.fig = plt.figure()
def init_labels(self):
''' Used to generate a figure for 1D plots with axis labels and a title'''
ax = self.fig.add_subplot(111)
xvarlabel = self.xvar.label
xvarunit = self.xvar.unit
yvarlabel = self.yvar.label
yvarunit = self.yvar.unit
ax.set_xlabel(xvarlabel + ' (' + xvarunit + ')', fontsize=12)
ax.set_ylabel(yvarlabel + ' (' + yvarunit + ')', fontsize=12)
ax.set_title(str(self.dataset.location))
ax.ticklabel_format(style='sci', scilimits=(0, 0))
self.fig.tight_layout()
def add_linetrace(self, dataset=None, xvar=None, yvar=None, sub_fig=0, **kwargs):
''' Add linetrace to an existing figure '''
if dataset is not None: # reloads data if new dataset
self.load_data(dataset, xvar=xvar, yvar=yvar)
ax = self.fig.axes[sub_fig] #can addres individual sub figures
ax.plot(self.xvar, self.yvar, **kwargs)
def extract_gates(self):
''' Extract the gate values from the metadata '''
instruments = self.dataset.metadata.get('station').get('instruments')
instrument_list = list(instruments.keys())
ivvis = [inst for inst in instrument_list if inst[:4] == 'ivvi']
dac_list = []
dac_values = []
for ivvi in ivvis:
dac_list += instruments.get(ivvi).get('parameters')
dacs = [dac for dac in dac_list if dac[:3] == 'dac']
dac_values += [instruments.get(ivvi).get('parameters').get(dd).get('value') for dd in dacs]
return dict(zip(dacs, dac_values)) # zip list toogether
def add_ppt_slide(self,title=None,**kwargs):
''' Adds figure to a PPT, creates one of one is not open. '''
gatelist = self.extract_gates()
if title==None:
if __name__ == '__main__':
title = str(self.dataset.location)
addPPTslide(fig=self.fig.number,title=title,notes=str(gatelist),**kwargs)
def plot_1D(self, dataset=None, xvar=None, yvar=None, new_fig=True, **kwargs):
''' Generates a 1D plot from a dataset. x and y can be specified by name.'''
if dataset is not None:
if isinstance(dataset,list): # load first dataset
self.load_data(dataset[0], xvar, yvar)
else:
if isinstance(yvar,list): # load first yvar
self.load_data(dataset,xvar,yvar[0])
else: # load yvar
self.load_data(dataset, xvar, yvar)
if new_fig:
self.init_fig()
self.init_labels()
if isinstance(dataset,list): # plotting multiple datasets
# generating my own colormap
saturation = 0.8
lightness = 0.8
hue_range = np.linspace(0.0, 0.1, len(dataset))
color_list = [colorsys.hsv_to_rgb(hv, saturation, lightness) for hv in hue_range]
for custom_color, fd in zip(color_list, dataset):
if custom_color == color_list[0]:
self.add_linetrace(dataset=fd, xvar=xvar, yvar=yvar, color=custom_color)
else:
self.add_linetrace(dataset=fd, xvar=xvar, yvar=yvar, color=custom_color)
elif isinstance(yvar,list): # plotting multiple Yvars
for yy in yvar:
self.load_data(dataset, xvar, yy)
self.add_linetrace(**kwargs)
else: # plotting single dataset
self.add_linetrace(**kwargs)
if self.add_ppts:
self.add_ppt_slide()
def plot_2D(self, dataset=None, xvar=None, yvar=None, zvar=None, **kwargs):
''' Generates a 2D plot from a dataset. x y and z variables can be specified by name.'''
if dataset is not None:
self.load_data(dataset, xvar, yvar, zvar)
self.init_fig()
self.init_labels()
cb = self.fig.axes[0].pcolormesh(self.xvar, self.yvar, self.zvar)
self.fig.colorbar(cb)
if self.add_ppts:
self.add_ppt_slide()
def calculate_resistance(self,dataset):
self.plot_1D(dataset)
# in future, add routine to calculate rescaling due to axes units (mV->V etc)
fit = np.polyfit(self.xvar, self.yvar, 1)
x_fit = np.linspace(self.xvar[0], self.xvar[-1], 100)
y_fit = fit[0] * x_fit + fit[1]
G = fit[0]
R = (1 / G)
self.fig.axes[0].plot(x_fit,y_fit,'k--',label = 'Resistance: %d Ohm'%R)
self.fig.axes[0].legend()
if self.add_ppts:
self.add_ppt_slide()
def determine_turn_on(self, threshold_factor=0.1, step=3):
self.plot_1D()
x = self.xvar
y = self.yvar
# check sweep direction and fix
if y[0] > y[-1]:
y = np.flip(y, 0)
x = np.flip(x, 0)
y_threshold = max(y) * threshold_factor
# first position in y vector above threshold value:
ind_start = np.argmax(np.asarray(y) > y_threshold)
y_clean = y[ind_start:]
x_clean = x[ind_start:]
diff_vector = y_clean[step:] - y_clean[:-step]
ind_diff_max = np.argmax(diff_vector)
diff_max_y = max(diff_vector)
diff_x = x_clean[ind_diff_max + step] - x_clean[ind_diff_max]
slope = diff_max_y / diff_x
pos_x = (x_clean[ind_diff_max + step] + x_clean[ind_diff_max]) / 2
pos_y = (y_clean[ind_diff_max + step] + y_clean[ind_diff_max]) / 2
offset_y = pos_y - pos_x * slope
turn_on_value = int(np.round(-offset_y / slope, 0))
y_fit = slope * np.asarray(x) + offset_y
self.fig.axes[0].plot(x,y_fit,'k--',label = 'Turn on: %d mV'%turn_on_value)
self.fig.axes[0].legend()
self.fig.axes[0].set_ylim(bottom=min(y),top=max(y))
if self.add_ppts:
self.add_ppt_slide()
def extract_mobility(self, dataset):
e = 1.60217662 * 10 ** -19
def unzip(iterable):
return list(zip(*iterable))[0], list(zip(*iterable))[1]
def linearmodel(x, m, c):
return x * m + c
def fit_gradient(x, y, intercept_error=100, plotting=False,
silent=True): # return gradient, intercept; error if intercept not at 0
popt = np.asarray([np.nan, np.nan])
# strip nans/infs if necessary
filtered = [(bb, rr) for (bb, rr) in zip(x, y) if (not np.isinf(rr)) and (not np.isnan(rr))]
filtered_array = np.asarray(filtered)
# if samples >= 2, fit:
if len(filtered) > 1:
x_filtered = filtered_array[:, 0]
y_filtered = filtered_array[:, 1]
popt, _ = optimisation.curve_fit(linearmodel, x_filtered, y_filtered, p0=[0, 0])
if (np.abs(popt[1]) > intercept_error) and not silent:
print('Fit intercept not at zero - check fits!')
if plotting:
plt.plot(x_filtered, y_filtered, '.')
plt.plot(x_filtered, linearmodel(x_filtered, popt[0], popt[1]), ':')
return popt
self.init_fig()
ax = self.fig.add_subplot(111)
ax.set_xlabel('n (cm$^{-2}$)', fontsize=12)
ax.set_ylabel('$\mu$ (cm$^{2}$/Vs)', fontsize=12)
ax.set_title(str(dataset.location))
ax.ticklabel_format(style='sci', scilimits=(0, 0))
self.fig.tight_layout()
Bs = dataset.B
rho_xx = dataset.Rho_xx
rho_xy = dataset.Rho_xy
rho_xy_dB_popts = np.vstack([fit_gradient(Bs, xys, plotting=True) for xys in np.transpose(rho_xy)])
drho_xy_dB = rho_xy_dB_popts[:, 0]
n_s = 1 / e / drho_xy_dB # in m^-2
mu = drho_xy_dB / rho_xx[0]
nan_inf_removal = [(bb, rr) for (bb, rr) in zip(n_s, mu) if (not np.isinf(rr)) and (not np.isnan(rr))]
negative_removal = [(bb, rr) for (bb, rr) in nan_inf_removal if (bb > 0) and (rr > 0)]
n_s_filt, mu_filt = unzip(negative_removal)
plt.plot(n_s_filt, mu_filt, '.')
def plot_multiple_scans(self, datasets, xvar=None, yvar=None, hue=0, label = None, new_fig=True, **kwargs):
self.load_data(datasets[0], xvar, yvar)
if new_fig:
self.init_fig()
else:
self.fig.clf()
self.init_labels()
# generating my own colormap
saturation = 0.8
lightness = 0.8
hue_range = np.linspace(hue, 0.1, len(datasets))
color_list = [colorsys.hsv_to_rgb(hv, saturation, lightness) for hv in hue_range]
for custom_color, fd in zip(color_list, datasets):
if custom_color == color_list[0]:
self.add_linetrace(dataset=fd, xvar=xvar, yvar=yvar, color=custom_color,label=label, **kwargs)
else:
self.add_linetrace(dataset=fd, xvar=xvar, yvar=yvar, color=custom_color,**kwargs)
if self.add_ppts:
self.add_ppt_slide()
def plot_drift_scans(self, forward_datasets, backward_datasets, xvar=None, yvar=None, new_fig=True):
'''self.add_ppts = False
self.plot_multiple_scans(forward_datasets, xvar=xvar, yvar=yvar, label='Forwards')
self.add_ppts = True
self.plot_multiple_scans(backward_datasets, new_fig=False, xvar=xvar, yvar=yvar, label='Backwards', linestyle='--')
'''
self.load_data(forward_datasets[0], xvar, yvar)
if new_fig:
self.init_fig()
else:
self.fig.clf()
self.init_labels()
# generating my own colormap
saturation = 0.8
lightness = 0.8
hue_range = np.linspace(0.0, 0.1, len(forward_datasets))
color_list = [colorsys.hsv_to_rgb(hv, saturation, lightness) for hv in hue_range]
for custom_color, fd, bd in zip(color_list, forward_datasets, backward_datasets):
if custom_color == color_list[0]:
self.add_linetrace(dataset=fd, xvar=xvar, yvar=yvar, color=custom_color, label='Forward')
self.add_linetrace(dataset=bd, xvar=xvar, yvar=yvar, color=custom_color, linestyle='--', label='Backward')
else:
self.add_linetrace(dataset=fd, xvar=xvar, yvar=yvar, color=custom_color)
self.add_linetrace(dataset=bd, xvar=xvar, yvar=yvar, color=custom_color, linestyle='--')
self.fig.axes[0].legend()
if self.add_ppts:
self.add_ppt_slide()
def analyse_drift_scans(self, forward_datasets, backward_datasets, xvar=None, yvar=None, new_fig=True):
# Written by Lucas (I think). Adapted with minimal changes.
def scans_diff(x1, y1, x2, y2): # ds1 should be shorter than ds2
# check
if len(x1) > len(x2):
print('Error: cannot process datasets in reversed order')
# sort both vectors in ascending order
if y1[0] > y1[-1]:
y1 = np.flip(y1, 0)
x1 = np.flip(x1, 0)
if y2[0] > y2[-1]:
y2 = np.flip(y2, 0)
x2 = np.flip(x2, 0)
# Only select comparable part
x2_trim = x2[:len(x1)]
y2_trim = y2[:len(x1)]
# check
if max(abs(x1 - x2_trim)) > 0.001:
print('Gate voltages are not comparable')
print(x1)
print(x2_trim)
for i in [1]:
break
# calculate sum of difference squared between both vectors
y1_np = np.array(y1)
y2_trim_np = np.array(y2_trim)
try:
y_diff_sq = sum((y1_np - y2_trim_np) ** 2)
except:
print('Error in calculating difference between two consecutive datasets')
if (y_diff_sq / len(x1)) ** 0.5 < 0:
print('ERROR: difference between datasets smaller than zero while it should be larger')
return (y_diff_sq / len(x1)) ** 0.5
##############################################################################
self.load_data(forward_datasets[0], xvar, yvar)
forward_diff_list = []
backward_diff_list = []
peak_voltage_list = []
for i in range(len(forward_datasets) - 1):
# FORWARD
ds1 = forward_datasets[i]
ds2 = forward_datasets[i + 1]
self.load_data(ds1)
x1, y1 = self.xvar, self.yvar
self.load_data(ds2)
x2, y2 = self.xvar, self.yvar
rms_diff_FW = scans_diff(x1, y1, x2, y2)
forward_diff_list.append(rms_diff_FW)
# BACKWARD
ds1 = backward_datasets[i]
ds2 = backward_datasets[i + 1]
self.load_data(ds1)
x1, y1 = self.xvar, self.yvar
self.load_data(ds2)
x2, y2 = self.xvar, self.yvar
rms_diff_BW = scans_diff(x1, y1, x2, y2)
backward_diff_list.append(rms_diff_BW)
# PEAK VOLTAGE LIST
peak_voltage = max(x2)
peak_voltage_list.append(peak_voltage)
if new_fig:
self.init_fig()
else:
self.fig.clf()
ax = self.fig.add_subplot(111)
ax.plot(peak_voltage_list, forward_diff_list, '1r', label='Forward scans')
ax.plot(peak_voltage_list, backward_diff_list, '2b', label='Backward scans')
# plt.yscale("log") #log scale
plt.ylim(bottom=0)
x_title1 = self.xvar.label
plt.xlabel('Peak voltage on %s (mV)' % x_title1)
plt.ylabel('RMS difference (A)')
plt.legend()
plt.tight_layout()
if self.add_ppts:
self.add_ppt_slide(title='RMS difference of drift scan')
# saving diff lists for analysis
self.forward_diff_list = forward_diff_list
self.backward_diff_list = backward_diff_list
#return , backward_diff_list
|
[
"numpy.abs",
"numpy.argmax",
"numpy.polyfit",
"numpy.isnan",
"matplotlib.pyplot.figure",
"numpy.round",
"matplotlib.pyplot.tight_layout",
"numpy.transpose",
"numpy.linspace",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"numpy.asarray",
"numpy.isinf",
"scipy.optimize.curve_fit",
"matplotlib.pyplot.ylabel",
"numpy.flip",
"matplotlib.pyplot.plot",
"colorsys.hsv_to_rgb",
"numpy.array",
"matplotlib.pyplot.xlabel"
] |
[((2870, 2882), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2880, 2882), True, 'import matplotlib.pyplot as plt\n'), ((7099, 7134), 'numpy.polyfit', 'np.polyfit', (['self.xvar', 'self.yvar', '(1)'], {}), '(self.xvar, self.yvar, 1)\n', (7109, 7134), True, 'import numpy as np\n'), ((7151, 7196), 'numpy.linspace', 'np.linspace', (['self.xvar[0]', 'self.xvar[-1]', '(100)'], {}), '(self.xvar[0], self.xvar[-1], 100)\n', (7162, 7196), True, 'import numpy as np\n'), ((8023, 8045), 'numpy.argmax', 'np.argmax', (['diff_vector'], {}), '(diff_vector)\n', (8032, 8045), True, 'import numpy as np\n'), ((10895, 10927), 'matplotlib.pyplot.plot', 'plt.plot', (['n_s_filt', 'mu_filt', '"""."""'], {}), "(n_s_filt, mu_filt, '.')\n", (10903, 10927), True, 'import matplotlib.pyplot as plt\n'), ((16318, 16336), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'bottom': '(0)'}), '(bottom=0)\n', (16326, 16336), True, 'import matplotlib.pyplot as plt\n'), ((16380, 16428), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Peak voltage on %s (mV)' % x_title1)"], {}), "('Peak voltage on %s (mV)' % x_title1)\n", (16390, 16428), True, 'import matplotlib.pyplot as plt\n'), ((16437, 16469), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""RMS difference (A)"""'], {}), "('RMS difference (A)')\n", (16447, 16469), True, 'import matplotlib.pyplot as plt\n'), ((16478, 16490), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (16488, 16490), True, 'import matplotlib.pyplot as plt\n'), ((16500, 16518), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (16516, 16518), True, 'import matplotlib.pyplot as plt\n'), ((7665, 7678), 'numpy.flip', 'np.flip', (['y', '(0)'], {}), '(y, 0)\n', (7672, 7678), True, 'import numpy as np\n'), ((7695, 7708), 'numpy.flip', 'np.flip', (['x', '(0)'], {}), '(x, 0)\n', (7702, 7708), True, 'import numpy as np\n'), ((8411, 8441), 'numpy.round', 'np.round', (['(-offset_y / slope)', '(0)'], {}), '(-offset_y / slope, 0)\n', (8419, 8441), True, 'import numpy as np\n'), ((9158, 9186), 'numpy.asarray', 'np.asarray', (['[np.nan, np.nan]'], {}), '([np.nan, np.nan])\n', (9168, 9186), True, 'import numpy as np\n'), ((9365, 9385), 'numpy.asarray', 'np.asarray', (['filtered'], {}), '(filtered)\n', (9375, 9385), True, 'import numpy as np\n'), ((11372, 11418), 'colorsys.hsv_to_rgb', 'colorsys.hsv_to_rgb', (['hv', 'saturation', 'lightness'], {}), '(hv, saturation, lightness)\n', (11391, 11418), False, 'import colorsys\n'), ((12577, 12623), 'colorsys.hsv_to_rgb', 'colorsys.hsv_to_rgb', (['hv', 'saturation', 'lightness'], {}), '(hv, saturation, lightness)\n', (12596, 12623), False, 'import colorsys\n'), ((14399, 14411), 'numpy.array', 'np.array', (['y1'], {}), '(y1)\n', (14407, 14411), True, 'import numpy as np\n'), ((14437, 14454), 'numpy.array', 'np.array', (['y2_trim'], {}), '(y2_trim)\n', (14445, 14454), True, 'import numpy as np\n'), ((5726, 5772), 'colorsys.hsv_to_rgb', 'colorsys.hsv_to_rgb', (['hv', 'saturation', 'lightness'], {}), '(hv, saturation, lightness)\n', (5745, 5772), False, 'import colorsys\n'), ((7849, 7862), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (7859, 7862), True, 'import numpy as np\n'), ((8468, 8481), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (8478, 8481), True, 'import numpy as np\n'), ((9583, 9653), 'scipy.optimize.curve_fit', 'optimisation.curve_fit', (['linearmodel', 'x_filtered', 'y_filtered'], {'p0': '[0, 0]'}), '(linearmodel, x_filtered, y_filtered, p0=[0, 0])\n', (9605, 9653), True, 'import scipy.optimize as optimisation\n'), ((13801, 13815), 'numpy.flip', 'np.flip', (['y1', '(0)'], {}), '(y1, 0)\n', (13808, 13815), True, 'import numpy as np\n'), ((13837, 13851), 'numpy.flip', 'np.flip', (['x1', '(0)'], {}), '(x1, 0)\n', (13844, 13851), True, 'import numpy as np\n'), ((13904, 13918), 'numpy.flip', 'np.flip', (['y2', '(0)'], {}), '(y2, 0)\n', (13911, 13918), True, 'import numpy as np\n'), ((13940, 13954), 'numpy.flip', 'np.flip', (['x2', '(0)'], {}), '(x2, 0)\n', (13947, 13954), True, 'import numpy as np\n'), ((9844, 9881), 'matplotlib.pyplot.plot', 'plt.plot', (['x_filtered', 'y_filtered', '"""."""'], {}), "(x_filtered, y_filtered, '.')\n", (9852, 9881), True, 'import matplotlib.pyplot as plt\n'), ((10480, 10500), 'numpy.transpose', 'np.transpose', (['rho_xy'], {}), '(rho_xy)\n', (10492, 10500), True, 'import numpy as np\n'), ((9674, 9689), 'numpy.abs', 'np.abs', (['popt[1]'], {}), '(popt[1])\n', (9680, 9689), True, 'import numpy as np\n'), ((10701, 10713), 'numpy.isinf', 'np.isinf', (['rr'], {}), '(rr)\n', (10709, 10713), True, 'import numpy as np\n'), ((10724, 10736), 'numpy.isnan', 'np.isnan', (['rr'], {}), '(rr)\n', (10732, 10736), True, 'import numpy as np\n'), ((9297, 9309), 'numpy.isinf', 'np.isinf', (['rr'], {}), '(rr)\n', (9305, 9309), True, 'import numpy as np\n'), ((9320, 9332), 'numpy.isnan', 'np.isnan', (['rr'], {}), '(rr)\n', (9328, 9332), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as pl
import h5py
import platform
import os
import pickle
import scipy.io as io
import seaborn as sns
from keras.models import model_from_json
import json
from ipdb import set_trace as stop
class plot_map(object):
def __init__(self, root):
self.root = root
self.noise = noise
self.batch_size = 256
self.dataFile = "/net/duna/scratch1/aasensio/deepLearning/milne/database/database_6301_hinode_1component.h5"
f = h5py.File(self.dataFile, 'r')
self.pars = f.get("parameters")
self.lower = np.min(self.pars, axis=0)
self.upper = np.max(self.pars, axis=0)
f.close()
self.root_hinode = "/net/nas4/fis/aasensio/scratch/HINODE/SUNSPOT/"
self.label_files = ["sunspot_stokesI_512x512.sav", "sunspot_stokesQ_512x512.sav", "sunspot_stokesU_512x512.sav", "sunspot_stokesV_512x512.sav"]
self.std_values = np.load('{0}_normalization.npy'.format(self.root))
labels_data = ['data_ii', 'data_qq', 'data_uu', 'data_vv']
self.stokes = np.zeros((512,512,50,4))
for i in range(4):
print("Reading file {0}".format(self.label_files[i]))
stokes = io.readsav("/net/nas4/fis/aasensio/scratch/HINODE/SUNSPOT/{0}".format(self.label_files[i]))[labels_data[i]]
if (i == 0):
mean_stokesi = np.mean(stokes[400:500,0:100,0])
stokes = stokes[:,:,0:50] / mean_stokesi
self.stokes[:,:,:,i] = stokes / self.std_values[None,None,:,i]
self.stokes = self.stokes.reshape((512*512,50,4))
def read_network(self):
print("Reading previous network...")
f = open('{0}_model.json'.format(self.root), 'r')
json_string = f.read()
f.close()
self.model = model_from_json(json_string)
self.model.load_weights("{0}_weights.hdf5".format(self.root))
def forward_network(self):
inTest = []
for i in range(4):
inTest.append(np.atleast_3d(self.stokes[:,:,i]).astype('float32'))
self.prob = self.model.predict(inTest, batch_size=self.batch_size, verbose=1)
def plot(self):
pl.close('all')
f, ax = pl.subplots(nrows=3, ncols=3, figsize=(12,10))
ax = ax.flatten()
labels = ['B [G]', r'$\theta_B$', r'$\phi_B$', r'$v_\mathrm{mac}$', 'a', 'B$_0$', 'B$_1$', r'$\Delta \lambda_D$ [m$\AA$]', r'$\eta$']
for i in range(9):
n_pixel, n_classes = self.prob[i].shape
x = np.linspace(self.lower[i], self.upper[i], n_classes)
mean = np.sum(self.prob[i] * x[None,:], axis=1).reshape((512,512))
ax[i].imshow(mean, cmap=pl.cm.viridis)
ax[i].set_title(labels[i])
pl.tight_layout()
pl.show()
# pl.savefig("{0}_{1}_comparison.png".format(self.root, self.noise))
if (__name__ == '__main__'):
root = 'cnns/6301_hinode_1component'
noise = 1e-4
out = plot_map(root)
out.read_network()
out.forward_network()
out.plot()
|
[
"matplotlib.pyplot.tight_layout",
"h5py.File",
"numpy.atleast_3d",
"matplotlib.pyplot.show",
"numpy.sum",
"matplotlib.pyplot.close",
"numpy.zeros",
"numpy.max",
"keras.models.model_from_json",
"numpy.min",
"numpy.mean",
"numpy.linspace",
"matplotlib.pyplot.subplots"
] |
[((504, 533), 'h5py.File', 'h5py.File', (['self.dataFile', '"""r"""'], {}), "(self.dataFile, 'r')\n", (513, 533), False, 'import h5py\n'), ((611, 636), 'numpy.min', 'np.min', (['self.pars'], {'axis': '(0)'}), '(self.pars, axis=0)\n', (617, 636), True, 'import numpy as np\n'), ((658, 683), 'numpy.max', 'np.max', (['self.pars'], {'axis': '(0)'}), '(self.pars, axis=0)\n', (664, 683), True, 'import numpy as np\n'), ((1110, 1137), 'numpy.zeros', 'np.zeros', (['(512, 512, 50, 4)'], {}), '((512, 512, 50, 4))\n', (1118, 1137), True, 'import numpy as np\n'), ((1855, 1883), 'keras.models.model_from_json', 'model_from_json', (['json_string'], {}), '(json_string)\n', (1870, 1883), False, 'from keras.models import model_from_json\n'), ((2237, 2252), 'matplotlib.pyplot.close', 'pl.close', (['"""all"""'], {}), "('all')\n", (2245, 2252), True, 'import matplotlib.pyplot as pl\n'), ((2269, 2316), 'matplotlib.pyplot.subplots', 'pl.subplots', ([], {'nrows': '(3)', 'ncols': '(3)', 'figsize': '(12, 10)'}), '(nrows=3, ncols=3, figsize=(12, 10))\n', (2280, 2316), True, 'import matplotlib.pyplot as pl\n'), ((2813, 2830), 'matplotlib.pyplot.tight_layout', 'pl.tight_layout', ([], {}), '()\n', (2828, 2830), True, 'import matplotlib.pyplot as pl\n'), ((2839, 2848), 'matplotlib.pyplot.show', 'pl.show', ([], {}), '()\n', (2846, 2848), True, 'import matplotlib.pyplot as pl\n'), ((2581, 2633), 'numpy.linspace', 'np.linspace', (['self.lower[i]', 'self.upper[i]', 'n_classes'], {}), '(self.lower[i], self.upper[i], n_classes)\n', (2592, 2633), True, 'import numpy as np\n'), ((1414, 1448), 'numpy.mean', 'np.mean', (['stokes[400:500, 0:100, 0]'], {}), '(stokes[400:500, 0:100, 0])\n', (1421, 1448), True, 'import numpy as np\n'), ((2653, 2694), 'numpy.sum', 'np.sum', (['(self.prob[i] * x[None, :])'], {'axis': '(1)'}), '(self.prob[i] * x[None, :], axis=1)\n', (2659, 2694), True, 'import numpy as np\n'), ((2059, 2094), 'numpy.atleast_3d', 'np.atleast_3d', (['self.stokes[:, :, i]'], {}), '(self.stokes[:, :, i])\n', (2072, 2094), True, 'import numpy as np\n')]
|
# Copyright 2018 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS
# IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import serial
import numpy as np
class ActonSp300i(object):
def __init__(self, port='/dev/tty', sensor=None, debug=False):
"""
A class to interface to Princeton Instruments SpectraPro 300i
Monocrhomator via serial interface, using the protocol specified
in ftp://ftp.princetoninstruments.com/public/manuals/Acton/Sp-300i.pdf
"""
self._port = port
self._sensor = sensor
self._debug = debug
try:
self._connection = serial.Serial(self._port,
baudrate=9600,
bytesize=serial.EIGHTBITS,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
timeout=5)
except serial.SerialException:
print('Unable to find or configure a serial connection to device %s' %(self._port))
return None
def _send_command(self, command):
# should be modified with something healtier
cmd_string = "%s\r" %command
self._connection.write(cmd_string.encode())
ret_string = self._connection.read_until()
return ret_string.decode().strip()
def close(self):
self._connection.close()
def set_sensor(self, sensor):
self._sensor = sensor
def get_current_position(self):
"""
This method returns the current position of the grating, i.e. the currently
selected wavelength. The value is a float representing the wavelength in
nanometers. On error, the method raises a serial.SerialException.
"""
ret_str = self._send_command('?NM')
if self._debug:
print(ret_str)
return 0.0
ret_elements = ret_str.split()
if (len(ret_elements) == 4) and (ret_elements[-1] == 'ok'):
return float(ret_elements[1])
else:
raise serial.SerialException
def init_scan(self, wavelength):
line = self._send_command('%f GOTO' %(float(wavelength)))
print(line)
def move_to(self, wavelength):
"""
Move the grating to the given wavelength
"""
line = self._send_command('%f NM' %(float(wavelength)))
print(line)
# if line.decode().strip() != 'ok':
# raise serial.SerialException
def scan(self, wavelength_range=(400, 800), n_repetitions=30, n_integrations=1):
"""
Performs a wavelength scan in 'wavelength_range' (defaults from 400 to
800 nm with 1 nm step), repeating the measure n_repetitions times (defaults
to 30) and summing up n_integrations times (defaults to 1, i.e. no sum) the
values.
The method returns a numpy array of length equal to the wavelength range
and whose structure is [wavelenght, mean measure, standard deviation].
"""
measures = []
self.init_scan(wavelength_range[0]-0.1)
for l in range(wavelength_range[0], wavelength_range[1]+1):
current_measure = []
self.move_to(l)
for _ in range(n_integrations):
tmp_measure = []
for _ in range(n_repetitions):
tmp_measure.append(self._sensor.measure())
if n_integrations == 1:
current_measure = tmp_measure
else:
current_measure.append(np.array(tmp_measure).sum())
current_measure = np.array(current_measure)
measures.append([l, current_measure.mean(), current_measure.std()])
return np.array(measures)
|
[
"serial.Serial",
"numpy.array"
] |
[((4568, 4586), 'numpy.array', 'np.array', (['measures'], {}), '(measures)\n', (4576, 4586), True, 'import numpy as np\n'), ((1144, 1284), 'serial.Serial', 'serial.Serial', (['self._port'], {'baudrate': '(9600)', 'bytesize': 'serial.EIGHTBITS', 'parity': 'serial.PARITY_NONE', 'stopbits': 'serial.STOPBITS_ONE', 'timeout': '(5)'}), '(self._port, baudrate=9600, bytesize=serial.EIGHTBITS, parity=\n serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, timeout=5)\n', (1157, 1284), False, 'import serial\n'), ((4443, 4468), 'numpy.array', 'np.array', (['current_measure'], {}), '(current_measure)\n', (4451, 4468), True, 'import numpy as np\n'), ((4381, 4402), 'numpy.array', 'np.array', (['tmp_measure'], {}), '(tmp_measure)\n', (4389, 4402), True, 'import numpy as np\n')]
|
import os
import unittest
import logging
import shutil
import numpy as np
from smac.configspace import Configuration
from smac.scenario.scenario import Scenario
from smac.stats.stats import Stats
from smac.tae.execute_ta_run import StatusType
from smac.tae.execute_ta_run_old import ExecuteTARunOld
from smac.runhistory.runhistory import RunHistory
from smac.utils.io.traj_logging import TrajLogger
from smac.utils.validate import Validator, _Run
from unittest import mock
class ValidationTest(unittest.TestCase):
def setUp(self):
base_directory = os.path.split(__file__)[0]
base_directory = os.path.abspath(
os.path.join(base_directory, '..', '..'))
self.current_dir = os.getcwd()
os.chdir(base_directory)
logging.basicConfig()
self.logger = logging.getLogger(self.__module__ + "." + self.__class__.__name__)
self.logger.setLevel(logging.DEBUG)
self.rng = np.random.RandomState(seed=42)
self.scen_fn = 'test/test_files/validation/scenario.txt'
self.train_insts = ['0', '1', '2']
self.test_insts = ['3', '4', '5']
self.inst_specs = {'0': 'null', '1': 'one', '2': 'two',
'3': 'three', '4': 'four', '5': 'five'}
self.feature_dict = {'0': np.array((1, 2, 3)),
'1': np.array((1, 2, 3)),
'2': np.array((1, 2, 3)),
'3': np.array((1, 2, 3)),
'4': np.array((1, 2, 3)),
'5': np.array((1, 2, 3))}
self.output_rh = 'test/test_files/validation/'
scen = Scenario(self.scen_fn, cmd_options={'run_obj': 'quality'})
self.stats = Stats(scen)
self.trajectory = TrajLogger.read_traj_aclib_format(
fn='test/test_files/validation/test_validation_traj.json', cs=scen.cs)
self.output_dirs = [self.output_rh + 'test']
self.output_files = [self.output_rh + 'validated_runhistory_EPM.json',
self.output_rh + 'validated_runhistory.json']
self.maxDiff = None
def tearDown(self):
for output_dir in self.output_dirs:
if output_dir:
shutil.rmtree(output_dir, ignore_errors=True)
for output_file in self.output_files:
if output_file:
try:
os.remove(output_file)
except FileNotFoundError:
pass
os.chdir(self.current_dir)
def test_rng(self):
scen = Scenario(self.scen_fn, cmd_options={'run_obj': 'quality'})
validator = Validator(scen, self.trajectory, 42)
self.assertTrue(isinstance(validator.rng, np.random.RandomState))
validator = Validator(scen, self.trajectory)
self.assertTrue(isinstance(validator.rng, np.random.RandomState))
validator = Validator(scen, self.trajectory, np.random.RandomState())
self.assertTrue(isinstance(validator.rng, np.random.RandomState))
def test_nonexisting_output(self):
scen = Scenario(self.scen_fn, cmd_options={'run_obj': 'quality'})
validator = Validator(scen, self.trajectory)
path = "test/test_files/validation/test/nonexisting/output"
validator.validate(output_fn=path)
self.assertTrue(os.path.exists(path))
def test_pass_tae(self):
scen = Scenario(self.scen_fn, cmd_options={'run_obj': 'quality'})
tae = ExecuteTARunOld(ta=scen.ta, stats=self.stats)
validator = Validator(scen, self.trajectory)
rh_mock = mock.Mock()
with mock.patch.object(
Validator,
"_validate_parallel",
return_value=[mock.MagicMock(), mock.MagicMock(), mock.MagicMock(), mock.MagicMock()],
) as validate_parallel_mock:
with mock.patch.object(
Validator,
"_get_runs",
return_value=[[mock.Mock(), mock.Mock(), mock.Mock(), mock.Mock()], rh_mock],
):
validator.validate(tae=tae)
self.assertIs(validate_parallel_mock.call_args[0][0], tae)
self.assertEqual(rh_mock.add.call_count, 4)
def test_no_rh_epm(self):
scen = Scenario(self.scen_fn, cmd_options={'run_obj': 'quality'})
scen.feature_array = None
validator = Validator(scen, self.trajectory)
self.assertRaises(ValueError, validator.validate_epm)
def test_epm_reuse_rf(self):
""" if no runhistory is passed to epm, but there was a model trained
before, that model should be reused! (if reuse_epm flag is set) """
scen = Scenario(self.scen_fn, cmd_options={'run_obj': 'quality'})
scen.feature_array = None
validator = Validator(scen, self.trajectory)
old_rh = RunHistory()
for config in [e["incumbent"] for e in self.trajectory]:
old_rh.add(config, 1, 1, StatusType.SUCCESS, instance_id='0',
seed=127)
self.assertTrue(isinstance(validator.validate_epm(runhistory=old_rh),
RunHistory))
self.assertTrue(isinstance(validator.validate_epm(
output_fn="test/test_files/validation/"),
RunHistory))
self.assertRaises(ValueError, validator.validate_epm, reuse_epm=False)
def test_no_feature_dict(self):
scen = Scenario(self.scen_fn, cmd_options={'run_obj': 'quality'})
scen.feature_array = None
validator = Validator(scen, self.trajectory)
old_rh = RunHistory()
for config in [e["incumbent"] for e in self.trajectory]:
old_rh.add(config, 1, 1, StatusType.SUCCESS, instance_id='0',
seed=127)
validator.validate_epm(runhistory=old_rh)
def test_get_configs(self):
scen = Scenario(self.scen_fn, cmd_options={'run_obj': 'quality'})
validator = Validator(scen, self.trajectory, self.rng)
self.assertEqual(1, len(validator._get_configs("def")))
self.assertEqual(1, len(validator._get_configs("inc")))
self.assertEqual(2, len(validator._get_configs("def+inc")))
self.assertEqual(7, len(validator._get_configs("wallclock_time")))
self.assertEqual(8, len(validator._get_configs("cpu_time")))
self.assertEqual(10, len(validator._get_configs("all")))
# Using maxtime
validator.scen.wallclock_limit = 65
validator.scen.algo_runs_timelimit = 33
self.assertEqual(8, len(validator._get_configs("wallclock_time")))
self.assertEqual(9, len(validator._get_configs("cpu_time")))
# Exceptions
self.assertRaises(ValueError, validator._get_configs, "notanoption")
self.assertRaises(ValueError, validator._get_instances, "notanoption")
def test_get_runs_capped(self):
''' test if capped, crashed and aborted runs are ignored
during rh-recovery '''
scen = Scenario(self.scen_fn,
cmd_options={'run_obj': 'quality',
'instances': ['0']})
validator = Validator(scen, self.trajectory, self.rng)
# Get runhistory
old_configs = [Configuration(scen.cs, values={'x1': i, 'x2': i}) for i in range(1, 7)]
old_rh = RunHistory()
old_rh.add(old_configs[0], 1, 1, StatusType.SUCCESS, instance_id='0', seed=0)
old_rh.add(old_configs[1], 1, 1, StatusType.TIMEOUT, instance_id='0', seed=0)
old_rh.add(old_configs[2], 1, 1, StatusType.CRASHED, instance_id='0', seed=0)
old_rh.add(old_configs[3], 1, 1, StatusType.ABORT, instance_id='0', seed=0)
old_rh.add(old_configs[4], 1, 1, StatusType.MEMOUT, instance_id='0', seed=0)
old_rh.add(old_configs[5], 1, 1, StatusType.CAPPED, instance_id='0', seed=0)
# Get multiple configs
expected = [_Run(inst_specs='0', seed=0, inst='0', config=old_configs[2]),
_Run(inst_specs='0', seed=0, inst='0', config=old_configs[3]),
_Run(inst_specs='0', seed=0, inst='0', config=old_configs[5])]
runs = validator._get_runs(old_configs, ['0'], repetitions=1, runhistory=old_rh)
self.assertEqual(runs[0], expected)
def test_get_runs(self):
''' test if the runs are generated as expected '''
scen = Scenario(self.scen_fn,
cmd_options={'run_obj': 'quality',
'train_insts': self.train_insts,
'test_insts': self.test_insts})
scen.instance_specific = self.inst_specs
validator = Validator(scen, self.trajectory, self.rng)
# Get multiple configs
self.maxDiff = None
expected = [_Run(config='config1', inst='3', seed=1608637542, inst_specs='three'),
_Run(config='config2', inst='3', seed=1608637542, inst_specs='three'),
_Run(config='config1', inst='3', seed=1273642419, inst_specs='three'),
_Run(config='config2', inst='3', seed=1273642419, inst_specs='three'),
_Run(config='config1', inst='4', seed=1935803228, inst_specs='four'),
_Run(config='config2', inst='4', seed=1935803228, inst_specs='four'),
_Run(config='config1', inst='4', seed=787846414, inst_specs='four'),
_Run(config='config2', inst='4', seed=787846414, inst_specs='four'),
_Run(config='config1', inst='5', seed=996406378, inst_specs='five'),
_Run(config='config2', inst='5', seed=996406378, inst_specs='five'),
_Run(config='config1', inst='5', seed=1201263687, inst_specs='five'),
_Run(config='config2', inst='5', seed=1201263687, inst_specs='five')]
runs = validator._get_runs(['config1', 'config2'], scen.test_insts, repetitions=2)
self.assertEqual(runs[0], expected)
# Only train
expected = [_Run(config='config1', inst='0', seed=423734972, inst_specs='null'),
_Run(config='config1', inst='0', seed=415968276, inst_specs='null'),
_Run(config='config1', inst='1', seed=670094950, inst_specs='one'),
_Run(config='config1', inst='1', seed=1914837113, inst_specs='one'),
_Run(config='config1', inst='2', seed=669991378, inst_specs='two'),
_Run(config='config1', inst='2', seed=429389014, inst_specs='two')]
runs = validator._get_runs(['config1'], scen.train_insts, repetitions=2)
self.assertEqual(runs[0], expected)
# Test and train
expected = [_Run(config='config1', inst='0', seed=249467210, inst_specs='null'),
_Run(config='config1', inst='1', seed=1972458954, inst_specs='one'),
_Run(config='config1', inst='2', seed=1572714583, inst_specs='two'),
_Run(config='config1', inst='3', seed=1433267572, inst_specs='three'),
_Run(config='config1', inst='4', seed=434285667, inst_specs='four'),
_Run(config='config1', inst='5', seed=613608295, inst_specs='five')]
insts = self.train_insts
insts.extend(self.test_insts)
runs = validator._get_runs(['config1'], insts, repetitions=1)
self.assertEqual(runs[0], expected)
def test_validate(self):
''' test validation '''
scen = Scenario(self.scen_fn,
cmd_options={'run_obj': 'quality',
'train_insts': self.train_insts,
'test_insts': self.test_insts})
scen.instance_specific = self.inst_specs
validator = Validator(scen, self.trajectory, self.rng)
# Test basic usage
rh = validator.validate(config_mode='def', instance_mode='test',
repetitions=3)
self.assertEqual(len(rh.get_all_configs()), 1)
self.assertEqual(len(rh.get_runs_for_config(rh.get_all_configs()[0], only_max_observed_budget=True)), 9)
rh = validator.validate(config_mode='inc', instance_mode='train+test')
self.assertEqual(len(rh.get_all_configs()), 1)
self.assertEqual(len(rh.get_runs_for_config(rh.get_all_configs()[0], only_max_observed_budget=True)), 6)
rh = validator.validate(config_mode='wallclock_time', instance_mode='train')
self.assertEqual(len(rh.get_all_configs()), 7)
self.assertEqual(sum([len(rh.get_runs_for_config(c, only_max_observed_budget=True)) for c in
rh.get_all_configs()]), 21)
# Test with backend multiprocessing
rh = validator.validate(config_mode='def', instance_mode='test',
repetitions=3, backend='multiprocessing')
self.assertEqual(len(rh.get_all_configs()), 1)
self.assertEqual(len(rh.get_runs_for_config(rh.get_all_configs()[0], only_max_observed_budget=True)), 9)
def test_validate_no_insts(self):
''' no instances '''
scen = Scenario(self.scen_fn,
cmd_options={'run_obj': 'quality'})
validator = Validator(scen, self.trajectory, self.rng)
rh = validator.validate(config_mode='def+inc', instance_mode='train',
repetitions=3, output_fn=self.output_rh)
self.assertEqual(len(rh.get_all_configs()), 2)
self.assertEqual(sum([len(rh.get_runs_for_config(c, only_max_observed_budget=True)) for c in
rh.get_all_configs()]), 6)
def test_validate_deterministic(self):
''' deterministic ta '''
scen = Scenario(self.scen_fn,
cmd_options={'run_obj': 'quality',
'train_insts': self.train_insts,
'deterministic': True})
scen.instance_specific = self.inst_specs
validator = Validator(scen, self.trajectory, self.rng)
rh = validator.validate(config_mode='def+inc',
instance_mode='train', repetitions=3)
self.assertEqual(len(rh.get_all_configs()), 2)
self.assertEqual(sum([len(rh.get_runs_for_config(c, only_max_observed_budget=True)) for c in
rh.get_all_configs()]), 6)
def test_parallel(self):
''' test parallel '''
scen = Scenario(self.scen_fn,
cmd_options={'run_obj': 'quality'})
validator = Validator(scen, self.trajectory, self.rng)
validator.validate(config_mode='all', instance_mode='train+test', n_jobs=-1)
def test_passed_runhistory(self):
''' test if passed runhistory is in resulting runhistory '''
scen = Scenario(self.scen_fn,
cmd_options={'run_obj': 'quality',
'train_insts': self.train_insts,
'test_insts': self.test_insts})
scen.instance_specific = self.inst_specs
validator = Validator(scen, self.trajectory, self.rng)
# Add a few runs and check, if they are correctly processed
old_configs = [entry["incumbent"] for entry in self.trajectory]
old_rh = RunHistory()
seeds = [127 for i in range(int(len(old_configs) / 2))]
seeds[-1] = 126 # Test instance_seed-structure in validation
for config in old_configs[:int(len(old_configs) / 2)]:
old_rh.add(config, 1, 1, StatusType.SUCCESS, instance_id='0',
seed=seeds[old_configs.index(config)])
configs = validator._get_configs('all')
insts = validator._get_instances('train')
runs_w_rh = validator._get_runs(configs, insts, repetitions=2,
runhistory=old_rh)
runs_wo_rh = validator._get_runs(configs, insts, repetitions=2)
self.assertEqual(len(runs_w_rh[0]), len(runs_wo_rh[0]) - 4)
self.assertEqual(len(runs_w_rh[1].data), 4)
self.assertEqual(len(runs_wo_rh[1].data), 0)
def test_passed_runhistory_deterministic(self):
''' test if passed runhistory is in resulting runhistory '''
scen = Scenario(self.scen_fn,
cmd_options={'run_obj': 'quality',
'train_insts': self.train_insts,
'deterministic': True})
scen.instance_specific = self.inst_specs
validator = Validator(scen, self.trajectory, self.rng)
# Add a few runs and check, if they are correctly processed
old_configs = [entry["incumbent"] for entry in self.trajectory]
old_rh = RunHistory()
for config in old_configs[:int(len(old_configs) / 2)]:
old_rh.add(config, 1, 1, StatusType.SUCCESS, instance_id='0')
configs = validator._get_configs('all')
insts = validator._get_instances('train')
runs_w_rh = validator._get_runs(configs, insts, repetitions=2,
runhistory=old_rh)
runs_wo_rh = validator._get_runs(configs, insts, repetitions=2)
self.assertEqual(len(runs_w_rh[0]), len(runs_wo_rh[0]) - 4)
self.assertEqual(len(runs_w_rh[1].data), 4)
self.assertEqual(len(runs_wo_rh[1].data), 0)
def test_passed_runhistory_no_insts(self):
''' test passed runhistory, without instances '''
scen = Scenario(self.scen_fn,
cmd_options={'run_obj': 'quality'})
scen.instance_specific = self.inst_specs
validator = Validator(scen, self.trajectory, self.rng)
# Add a few runs and check, if they are correctly processed
old_configs = [entry["incumbent"] for entry in self.trajectory]
old_rh = RunHistory()
for config in old_configs[:int(len(old_configs) / 2)]:
old_rh.add(config, 1, 1, StatusType.SUCCESS, seed=127)
configs = validator._get_configs('all')
insts = validator._get_instances('train')
runs_w_rh = validator._get_runs(configs, insts, repetitions=2,
runhistory=old_rh)
runs_wo_rh = validator._get_runs(configs, insts, repetitions=2)
self.assertEqual(len(runs_w_rh[0]), len(runs_wo_rh[0]) - 4)
self.assertEqual(len(runs_w_rh[1].data), 4)
self.assertEqual(len(runs_wo_rh[1].data), 0)
def test_validate_epm(self):
''' test using epm to validate '''
scen = Scenario(self.scen_fn,
cmd_options={'run_obj': 'quality',
'train_insts': self.train_insts,
'test_insts': self.test_insts,
'features': self.feature_dict})
scen.instance_specific = self.inst_specs
validator = Validator(scen, self.trajectory, self.rng)
# Add a few runs and check, if they are correctly processed
old_configs = [entry["incumbent"] for entry in self.trajectory]
old_rh = RunHistory()
for config in old_configs[:int(len(old_configs) / 2)]:
old_rh.add(config, 1, 1, StatusType.SUCCESS, instance_id='0',
seed=127)
validator.validate_epm('all', 'train', 1, old_rh)
def test_objective_runtime(self):
''' test if everything is ok with objective runtime (imputing!) '''
scen = Scenario(self.scen_fn, cmd_options={'run_obj': 'runtime',
'cutoff_time': 5})
validator = Validator(scen, self.trajectory, self.rng)
old_configs = [entry["incumbent"] for entry in self.trajectory]
old_rh = RunHistory()
for config in old_configs[:int(len(old_configs) / 2)]:
old_rh.add(config, 1, 1, StatusType.SUCCESS, instance_id='0')
validator.validate_epm('all', 'train', 1, old_rh)
def test_inst_no_feat(self):
''' test if scenarios are treated correctly if no features are
specified.'''
scen = Scenario(self.scen_fn,
cmd_options={'run_obj': 'quality',
'train_insts': self.train_insts,
'test_insts': self.test_insts})
self.assertTrue(scen.feature_array is None)
self.assertEqual(len(scen.feature_dict), 0)
scen.instance_specific = self.inst_specs
validator = Validator(scen, self.trajectory, self.rng)
# Add a few runs and check, if they are correctly processed
old_configs = [entry["incumbent"] for entry in self.trajectory]
old_rh = RunHistory()
for config in old_configs[:int(len(old_configs) / 2)]:
old_rh.add(config, 1, 1, StatusType.SUCCESS, instance_id='0',
seed=127)
rh = validator.validate_epm('all', 'train+test', 1, old_rh)
self.assertEqual(len(old_rh.get_all_configs()), 4)
self.assertEqual(len(rh.get_all_configs()), 10)
|
[
"os.remove",
"smac.utils.validate._Run",
"smac.runhistory.runhistory.RunHistory",
"smac.utils.io.traj_logging.TrajLogger.read_traj_aclib_format",
"shutil.rmtree",
"smac.tae.execute_ta_run_old.ExecuteTARunOld",
"os.path.join",
"os.chdir",
"unittest.mock.MagicMock",
"smac.utils.validate.Validator",
"os.path.exists",
"numpy.random.RandomState",
"smac.stats.stats.Stats",
"logging.basicConfig",
"os.getcwd",
"unittest.mock.Mock",
"smac.scenario.scenario.Scenario",
"numpy.array",
"smac.configspace.Configuration",
"os.path.split",
"logging.getLogger"
] |
[((716, 727), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (725, 727), False, 'import os\n'), ((736, 760), 'os.chdir', 'os.chdir', (['base_directory'], {}), '(base_directory)\n', (744, 760), False, 'import os\n'), ((770, 791), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (789, 791), False, 'import logging\n'), ((814, 880), 'logging.getLogger', 'logging.getLogger', (["(self.__module__ + '.' + self.__class__.__name__)"], {}), "(self.__module__ + '.' + self.__class__.__name__)\n", (831, 880), False, 'import logging\n'), ((944, 974), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': '(42)'}), '(seed=42)\n', (965, 974), True, 'import numpy as np\n'), ((1656, 1714), 'smac.scenario.scenario.Scenario', 'Scenario', (['self.scen_fn'], {'cmd_options': "{'run_obj': 'quality'}"}), "(self.scen_fn, cmd_options={'run_obj': 'quality'})\n", (1664, 1714), False, 'from smac.scenario.scenario import Scenario\n'), ((1736, 1747), 'smac.stats.stats.Stats', 'Stats', (['scen'], {}), '(scen)\n', (1741, 1747), False, 'from smac.stats.stats import Stats\n'), ((1774, 1883), 'smac.utils.io.traj_logging.TrajLogger.read_traj_aclib_format', 'TrajLogger.read_traj_aclib_format', ([], {'fn': '"""test/test_files/validation/test_validation_traj.json"""', 'cs': 'scen.cs'}), "(fn=\n 'test/test_files/validation/test_validation_traj.json', cs=scen.cs)\n", (1807, 1883), False, 'from smac.utils.io.traj_logging import TrajLogger\n'), ((2499, 2525), 'os.chdir', 'os.chdir', (['self.current_dir'], {}), '(self.current_dir)\n', (2507, 2525), False, 'import os\n'), ((2566, 2624), 'smac.scenario.scenario.Scenario', 'Scenario', (['self.scen_fn'], {'cmd_options': "{'run_obj': 'quality'}"}), "(self.scen_fn, cmd_options={'run_obj': 'quality'})\n", (2574, 2624), False, 'from smac.scenario.scenario import Scenario\n'), ((2645, 2681), 'smac.utils.validate.Validator', 'Validator', (['scen', 'self.trajectory', '(42)'], {}), '(scen, self.trajectory, 42)\n', (2654, 2681), False, 'from smac.utils.validate import Validator, _Run\n'), ((2776, 2808), 'smac.utils.validate.Validator', 'Validator', (['scen', 'self.trajectory'], {}), '(scen, self.trajectory)\n', (2785, 2808), False, 'from smac.utils.validate import Validator, _Run\n'), ((3090, 3148), 'smac.scenario.scenario.Scenario', 'Scenario', (['self.scen_fn'], {'cmd_options': "{'run_obj': 'quality'}"}), "(self.scen_fn, cmd_options={'run_obj': 'quality'})\n", (3098, 3148), False, 'from smac.scenario.scenario import Scenario\n'), ((3169, 3201), 'smac.utils.validate.Validator', 'Validator', (['scen', 'self.trajectory'], {}), '(scen, self.trajectory)\n', (3178, 3201), False, 'from smac.utils.validate import Validator, _Run\n'), ((3404, 3462), 'smac.scenario.scenario.Scenario', 'Scenario', (['self.scen_fn'], {'cmd_options': "{'run_obj': 'quality'}"}), "(self.scen_fn, cmd_options={'run_obj': 'quality'})\n", (3412, 3462), False, 'from smac.scenario.scenario import Scenario\n'), ((3477, 3522), 'smac.tae.execute_ta_run_old.ExecuteTARunOld', 'ExecuteTARunOld', ([], {'ta': 'scen.ta', 'stats': 'self.stats'}), '(ta=scen.ta, stats=self.stats)\n', (3492, 3522), False, 'from smac.tae.execute_ta_run_old import ExecuteTARunOld\n'), ((3543, 3575), 'smac.utils.validate.Validator', 'Validator', (['scen', 'self.trajectory'], {}), '(scen, self.trajectory)\n', (3552, 3575), False, 'from smac.utils.validate import Validator, _Run\n'), ((3594, 3605), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (3603, 3605), False, 'from unittest import mock\n'), ((4257, 4315), 'smac.scenario.scenario.Scenario', 'Scenario', (['self.scen_fn'], {'cmd_options': "{'run_obj': 'quality'}"}), "(self.scen_fn, cmd_options={'run_obj': 'quality'})\n", (4265, 4315), False, 'from smac.scenario.scenario import Scenario\n'), ((4370, 4402), 'smac.utils.validate.Validator', 'Validator', (['scen', 'self.trajectory'], {}), '(scen, self.trajectory)\n', (4379, 4402), False, 'from smac.utils.validate import Validator, _Run\n'), ((4667, 4725), 'smac.scenario.scenario.Scenario', 'Scenario', (['self.scen_fn'], {'cmd_options': "{'run_obj': 'quality'}"}), "(self.scen_fn, cmd_options={'run_obj': 'quality'})\n", (4675, 4725), False, 'from smac.scenario.scenario import Scenario\n'), ((4780, 4812), 'smac.utils.validate.Validator', 'Validator', (['scen', 'self.trajectory'], {}), '(scen, self.trajectory)\n', (4789, 4812), False, 'from smac.utils.validate import Validator, _Run\n'), ((4830, 4842), 'smac.runhistory.runhistory.RunHistory', 'RunHistory', ([], {}), '()\n', (4840, 4842), False, 'from smac.runhistory.runhistory import RunHistory\n'), ((5410, 5468), 'smac.scenario.scenario.Scenario', 'Scenario', (['self.scen_fn'], {'cmd_options': "{'run_obj': 'quality'}"}), "(self.scen_fn, cmd_options={'run_obj': 'quality'})\n", (5418, 5468), False, 'from smac.scenario.scenario import Scenario\n'), ((5523, 5555), 'smac.utils.validate.Validator', 'Validator', (['scen', 'self.trajectory'], {}), '(scen, self.trajectory)\n', (5532, 5555), False, 'from smac.utils.validate import Validator, _Run\n'), ((5573, 5585), 'smac.runhistory.runhistory.RunHistory', 'RunHistory', ([], {}), '()\n', (5583, 5585), False, 'from smac.runhistory.runhistory import RunHistory\n'), ((5856, 5914), 'smac.scenario.scenario.Scenario', 'Scenario', (['self.scen_fn'], {'cmd_options': "{'run_obj': 'quality'}"}), "(self.scen_fn, cmd_options={'run_obj': 'quality'})\n", (5864, 5914), False, 'from smac.scenario.scenario import Scenario\n'), ((5935, 5977), 'smac.utils.validate.Validator', 'Validator', (['scen', 'self.trajectory', 'self.rng'], {}), '(scen, self.trajectory, self.rng)\n', (5944, 5977), False, 'from smac.utils.validate import Validator, _Run\n'), ((6972, 7050), 'smac.scenario.scenario.Scenario', 'Scenario', (['self.scen_fn'], {'cmd_options': "{'run_obj': 'quality', 'instances': ['0']}"}), "(self.scen_fn, cmd_options={'run_obj': 'quality', 'instances': ['0']})\n", (6980, 7050), False, 'from smac.scenario.scenario import Scenario\n'), ((7133, 7175), 'smac.utils.validate.Validator', 'Validator', (['scen', 'self.trajectory', 'self.rng'], {}), '(scen, self.trajectory, self.rng)\n', (7142, 7175), False, 'from smac.utils.validate import Validator, _Run\n'), ((7314, 7326), 'smac.runhistory.runhistory.RunHistory', 'RunHistory', ([], {}), '()\n', (7324, 7326), False, 'from smac.runhistory.runhistory import RunHistory\n'), ((8358, 8484), 'smac.scenario.scenario.Scenario', 'Scenario', (['self.scen_fn'], {'cmd_options': "{'run_obj': 'quality', 'train_insts': self.train_insts, 'test_insts': self.\n test_insts}"}), "(self.scen_fn, cmd_options={'run_obj': 'quality', 'train_insts':\n self.train_insts, 'test_insts': self.test_insts})\n", (8366, 8484), False, 'from smac.scenario.scenario import Scenario\n'), ((8649, 8691), 'smac.utils.validate.Validator', 'Validator', (['scen', 'self.trajectory', 'self.rng'], {}), '(scen, self.trajectory, self.rng)\n', (8658, 8691), False, 'from smac.utils.validate import Validator, _Run\n'), ((11471, 11597), 'smac.scenario.scenario.Scenario', 'Scenario', (['self.scen_fn'], {'cmd_options': "{'run_obj': 'quality', 'train_insts': self.train_insts, 'test_insts': self.\n test_insts}"}), "(self.scen_fn, cmd_options={'run_obj': 'quality', 'train_insts':\n self.train_insts, 'test_insts': self.test_insts})\n", (11479, 11597), False, 'from smac.scenario.scenario import Scenario\n'), ((11761, 11803), 'smac.utils.validate.Validator', 'Validator', (['scen', 'self.trajectory', 'self.rng'], {}), '(scen, self.trajectory, self.rng)\n', (11770, 11803), False, 'from smac.utils.validate import Validator, _Run\n'), ((13110, 13168), 'smac.scenario.scenario.Scenario', 'Scenario', (['self.scen_fn'], {'cmd_options': "{'run_obj': 'quality'}"}), "(self.scen_fn, cmd_options={'run_obj': 'quality'})\n", (13118, 13168), False, 'from smac.scenario.scenario import Scenario\n'), ((13213, 13255), 'smac.utils.validate.Validator', 'Validator', (['scen', 'self.trajectory', 'self.rng'], {}), '(scen, self.trajectory, self.rng)\n', (13222, 13255), False, 'from smac.utils.validate import Validator, _Run\n'), ((13712, 13830), 'smac.scenario.scenario.Scenario', 'Scenario', (['self.scen_fn'], {'cmd_options': "{'run_obj': 'quality', 'train_insts': self.train_insts, 'deterministic': True}"}), "(self.scen_fn, cmd_options={'run_obj': 'quality', 'train_insts':\n self.train_insts, 'deterministic': True})\n", (13720, 13830), False, 'from smac.scenario.scenario import Scenario\n'), ((13994, 14036), 'smac.utils.validate.Validator', 'Validator', (['scen', 'self.trajectory', 'self.rng'], {}), '(scen, self.trajectory, self.rng)\n', (14003, 14036), False, 'from smac.utils.validate import Validator, _Run\n'), ((14450, 14508), 'smac.scenario.scenario.Scenario', 'Scenario', (['self.scen_fn'], {'cmd_options': "{'run_obj': 'quality'}"}), "(self.scen_fn, cmd_options={'run_obj': 'quality'})\n", (14458, 14508), False, 'from smac.scenario.scenario import Scenario\n'), ((14553, 14595), 'smac.utils.validate.Validator', 'Validator', (['scen', 'self.trajectory', 'self.rng'], {}), '(scen, self.trajectory, self.rng)\n', (14562, 14595), False, 'from smac.utils.validate import Validator, _Run\n'), ((14804, 14930), 'smac.scenario.scenario.Scenario', 'Scenario', (['self.scen_fn'], {'cmd_options': "{'run_obj': 'quality', 'train_insts': self.train_insts, 'test_insts': self.\n test_insts}"}), "(self.scen_fn, cmd_options={'run_obj': 'quality', 'train_insts':\n self.train_insts, 'test_insts': self.test_insts})\n", (14812, 14930), False, 'from smac.scenario.scenario import Scenario\n'), ((15094, 15136), 'smac.utils.validate.Validator', 'Validator', (['scen', 'self.trajectory', 'self.rng'], {}), '(scen, self.trajectory, self.rng)\n', (15103, 15136), False, 'from smac.utils.validate import Validator, _Run\n'), ((15294, 15306), 'smac.runhistory.runhistory.RunHistory', 'RunHistory', ([], {}), '()\n', (15304, 15306), False, 'from smac.runhistory.runhistory import RunHistory\n'), ((16251, 16369), 'smac.scenario.scenario.Scenario', 'Scenario', (['self.scen_fn'], {'cmd_options': "{'run_obj': 'quality', 'train_insts': self.train_insts, 'deterministic': True}"}), "(self.scen_fn, cmd_options={'run_obj': 'quality', 'train_insts':\n self.train_insts, 'deterministic': True})\n", (16259, 16369), False, 'from smac.scenario.scenario import Scenario\n'), ((16533, 16575), 'smac.utils.validate.Validator', 'Validator', (['scen', 'self.trajectory', 'self.rng'], {}), '(scen, self.trajectory, self.rng)\n', (16542, 16575), False, 'from smac.utils.validate import Validator, _Run\n'), ((16733, 16745), 'smac.runhistory.runhistory.RunHistory', 'RunHistory', ([], {}), '()\n', (16743, 16745), False, 'from smac.runhistory.runhistory import RunHistory\n'), ((17478, 17536), 'smac.scenario.scenario.Scenario', 'Scenario', (['self.scen_fn'], {'cmd_options': "{'run_obj': 'quality'}"}), "(self.scen_fn, cmd_options={'run_obj': 'quality'})\n", (17486, 17536), False, 'from smac.scenario.scenario import Scenario\n'), ((17630, 17672), 'smac.utils.validate.Validator', 'Validator', (['scen', 'self.trajectory', 'self.rng'], {}), '(scen, self.trajectory, self.rng)\n', (17639, 17672), False, 'from smac.utils.validate import Validator, _Run\n'), ((17830, 17842), 'smac.runhistory.runhistory.RunHistory', 'RunHistory', ([], {}), '()\n', (17840, 17842), False, 'from smac.runhistory.runhistory import RunHistory\n'), ((18539, 18701), 'smac.scenario.scenario.Scenario', 'Scenario', (['self.scen_fn'], {'cmd_options': "{'run_obj': 'quality', 'train_insts': self.train_insts, 'test_insts': self.\n test_insts, 'features': self.feature_dict}"}), "(self.scen_fn, cmd_options={'run_obj': 'quality', 'train_insts':\n self.train_insts, 'test_insts': self.test_insts, 'features': self.\n feature_dict})\n", (18547, 18701), False, 'from smac.scenario.scenario import Scenario\n'), ((18897, 18939), 'smac.utils.validate.Validator', 'Validator', (['scen', 'self.trajectory', 'self.rng'], {}), '(scen, self.trajectory, self.rng)\n', (18906, 18939), False, 'from smac.utils.validate import Validator, _Run\n'), ((19097, 19109), 'smac.runhistory.runhistory.RunHistory', 'RunHistory', ([], {}), '()\n', (19107, 19109), False, 'from smac.runhistory.runhistory import RunHistory\n'), ((19468, 19544), 'smac.scenario.scenario.Scenario', 'Scenario', (['self.scen_fn'], {'cmd_options': "{'run_obj': 'runtime', 'cutoff_time': 5}"}), "(self.scen_fn, cmd_options={'run_obj': 'runtime', 'cutoff_time': 5})\n", (19476, 19544), False, 'from smac.scenario.scenario import Scenario\n'), ((19616, 19658), 'smac.utils.validate.Validator', 'Validator', (['scen', 'self.trajectory', 'self.rng'], {}), '(scen, self.trajectory, self.rng)\n', (19625, 19658), False, 'from smac.utils.validate import Validator, _Run\n'), ((19748, 19760), 'smac.runhistory.runhistory.RunHistory', 'RunHistory', ([], {}), '()\n', (19758, 19760), False, 'from smac.runhistory.runhistory import RunHistory\n'), ((20098, 20224), 'smac.scenario.scenario.Scenario', 'Scenario', (['self.scen_fn'], {'cmd_options': "{'run_obj': 'quality', 'train_insts': self.train_insts, 'test_insts': self.\n test_insts}"}), "(self.scen_fn, cmd_options={'run_obj': 'quality', 'train_insts':\n self.train_insts, 'test_insts': self.test_insts})\n", (20106, 20224), False, 'from smac.scenario.scenario import Scenario\n'), ((20493, 20535), 'smac.utils.validate.Validator', 'Validator', (['scen', 'self.trajectory', 'self.rng'], {}), '(scen, self.trajectory, self.rng)\n', (20502, 20535), False, 'from smac.utils.validate import Validator, _Run\n'), ((20693, 20705), 'smac.runhistory.runhistory.RunHistory', 'RunHistory', ([], {}), '()\n', (20703, 20705), False, 'from smac.runhistory.runhistory import RunHistory\n'), ((566, 589), 'os.path.split', 'os.path.split', (['__file__'], {}), '(__file__)\n', (579, 589), False, 'import os\n'), ((647, 687), 'os.path.join', 'os.path.join', (['base_directory', '""".."""', '""".."""'], {}), "(base_directory, '..', '..')\n", (659, 687), False, 'import os\n'), ((1290, 1309), 'numpy.array', 'np.array', (['(1, 2, 3)'], {}), '((1, 2, 3))\n', (1298, 1309), True, 'import numpy as np\n'), ((1345, 1364), 'numpy.array', 'np.array', (['(1, 2, 3)'], {}), '((1, 2, 3))\n', (1353, 1364), True, 'import numpy as np\n'), ((1400, 1419), 'numpy.array', 'np.array', (['(1, 2, 3)'], {}), '((1, 2, 3))\n', (1408, 1419), True, 'import numpy as np\n'), ((1455, 1474), 'numpy.array', 'np.array', (['(1, 2, 3)'], {}), '((1, 2, 3))\n', (1463, 1474), True, 'import numpy as np\n'), ((1510, 1529), 'numpy.array', 'np.array', (['(1, 2, 3)'], {}), '((1, 2, 3))\n', (1518, 1529), True, 'import numpy as np\n'), ((1565, 1584), 'numpy.array', 'np.array', (['(1, 2, 3)'], {}), '((1, 2, 3))\n', (1573, 1584), True, 'import numpy as np\n'), ((2936, 2959), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (2957, 2959), True, 'import numpy as np\n'), ((3337, 3357), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (3351, 3357), False, 'import os\n'), ((7225, 7274), 'smac.configspace.Configuration', 'Configuration', (['scen.cs'], {'values': "{'x1': i, 'x2': i}"}), "(scen.cs, values={'x1': i, 'x2': i})\n", (7238, 7274), False, 'from smac.configspace import Configuration\n'), ((7891, 7952), 'smac.utils.validate._Run', '_Run', ([], {'inst_specs': '"""0"""', 'seed': '(0)', 'inst': '"""0"""', 'config': 'old_configs[2]'}), "(inst_specs='0', seed=0, inst='0', config=old_configs[2])\n", (7895, 7952), False, 'from smac.utils.validate import Validator, _Run\n'), ((7974, 8035), 'smac.utils.validate._Run', '_Run', ([], {'inst_specs': '"""0"""', 'seed': '(0)', 'inst': '"""0"""', 'config': 'old_configs[3]'}), "(inst_specs='0', seed=0, inst='0', config=old_configs[3])\n", (7978, 8035), False, 'from smac.utils.validate import Validator, _Run\n'), ((8057, 8118), 'smac.utils.validate._Run', '_Run', ([], {'inst_specs': '"""0"""', 'seed': '(0)', 'inst': '"""0"""', 'config': 'old_configs[5]'}), "(inst_specs='0', seed=0, inst='0', config=old_configs[5])\n", (8061, 8118), False, 'from smac.utils.validate import Validator, _Run\n'), ((8771, 8840), 'smac.utils.validate._Run', '_Run', ([], {'config': '"""config1"""', 'inst': '"""3"""', 'seed': '(1608637542)', 'inst_specs': '"""three"""'}), "(config='config1', inst='3', seed=1608637542, inst_specs='three')\n", (8775, 8840), False, 'from smac.utils.validate import Validator, _Run\n'), ((8862, 8931), 'smac.utils.validate._Run', '_Run', ([], {'config': '"""config2"""', 'inst': '"""3"""', 'seed': '(1608637542)', 'inst_specs': '"""three"""'}), "(config='config2', inst='3', seed=1608637542, inst_specs='three')\n", (8866, 8931), False, 'from smac.utils.validate import Validator, _Run\n'), ((8953, 9022), 'smac.utils.validate._Run', '_Run', ([], {'config': '"""config1"""', 'inst': '"""3"""', 'seed': '(1273642419)', 'inst_specs': '"""three"""'}), "(config='config1', inst='3', seed=1273642419, inst_specs='three')\n", (8957, 9022), False, 'from smac.utils.validate import Validator, _Run\n'), ((9044, 9113), 'smac.utils.validate._Run', '_Run', ([], {'config': '"""config2"""', 'inst': '"""3"""', 'seed': '(1273642419)', 'inst_specs': '"""three"""'}), "(config='config2', inst='3', seed=1273642419, inst_specs='three')\n", (9048, 9113), False, 'from smac.utils.validate import Validator, _Run\n'), ((9135, 9203), 'smac.utils.validate._Run', '_Run', ([], {'config': '"""config1"""', 'inst': '"""4"""', 'seed': '(1935803228)', 'inst_specs': '"""four"""'}), "(config='config1', inst='4', seed=1935803228, inst_specs='four')\n", (9139, 9203), False, 'from smac.utils.validate import Validator, _Run\n'), ((9225, 9293), 'smac.utils.validate._Run', '_Run', ([], {'config': '"""config2"""', 'inst': '"""4"""', 'seed': '(1935803228)', 'inst_specs': '"""four"""'}), "(config='config2', inst='4', seed=1935803228, inst_specs='four')\n", (9229, 9293), False, 'from smac.utils.validate import Validator, _Run\n'), ((9315, 9382), 'smac.utils.validate._Run', '_Run', ([], {'config': '"""config1"""', 'inst': '"""4"""', 'seed': '(787846414)', 'inst_specs': '"""four"""'}), "(config='config1', inst='4', seed=787846414, inst_specs='four')\n", (9319, 9382), False, 'from smac.utils.validate import Validator, _Run\n'), ((9404, 9471), 'smac.utils.validate._Run', '_Run', ([], {'config': '"""config2"""', 'inst': '"""4"""', 'seed': '(787846414)', 'inst_specs': '"""four"""'}), "(config='config2', inst='4', seed=787846414, inst_specs='four')\n", (9408, 9471), False, 'from smac.utils.validate import Validator, _Run\n'), ((9493, 9560), 'smac.utils.validate._Run', '_Run', ([], {'config': '"""config1"""', 'inst': '"""5"""', 'seed': '(996406378)', 'inst_specs': '"""five"""'}), "(config='config1', inst='5', seed=996406378, inst_specs='five')\n", (9497, 9560), False, 'from smac.utils.validate import Validator, _Run\n'), ((9582, 9649), 'smac.utils.validate._Run', '_Run', ([], {'config': '"""config2"""', 'inst': '"""5"""', 'seed': '(996406378)', 'inst_specs': '"""five"""'}), "(config='config2', inst='5', seed=996406378, inst_specs='five')\n", (9586, 9649), False, 'from smac.utils.validate import Validator, _Run\n'), ((9671, 9739), 'smac.utils.validate._Run', '_Run', ([], {'config': '"""config1"""', 'inst': '"""5"""', 'seed': '(1201263687)', 'inst_specs': '"""five"""'}), "(config='config1', inst='5', seed=1201263687, inst_specs='five')\n", (9675, 9739), False, 'from smac.utils.validate import Validator, _Run\n'), ((9761, 9829), 'smac.utils.validate._Run', '_Run', ([], {'config': '"""config2"""', 'inst': '"""5"""', 'seed': '(1201263687)', 'inst_specs': '"""five"""'}), "(config='config2', inst='5', seed=1201263687, inst_specs='five')\n", (9765, 9829), False, 'from smac.utils.validate import Validator, _Run\n'), ((10009, 10076), 'smac.utils.validate._Run', '_Run', ([], {'config': '"""config1"""', 'inst': '"""0"""', 'seed': '(423734972)', 'inst_specs': '"""null"""'}), "(config='config1', inst='0', seed=423734972, inst_specs='null')\n", (10013, 10076), False, 'from smac.utils.validate import Validator, _Run\n'), ((10098, 10165), 'smac.utils.validate._Run', '_Run', ([], {'config': '"""config1"""', 'inst': '"""0"""', 'seed': '(415968276)', 'inst_specs': '"""null"""'}), "(config='config1', inst='0', seed=415968276, inst_specs='null')\n", (10102, 10165), False, 'from smac.utils.validate import Validator, _Run\n'), ((10187, 10253), 'smac.utils.validate._Run', '_Run', ([], {'config': '"""config1"""', 'inst': '"""1"""', 'seed': '(670094950)', 'inst_specs': '"""one"""'}), "(config='config1', inst='1', seed=670094950, inst_specs='one')\n", (10191, 10253), False, 'from smac.utils.validate import Validator, _Run\n'), ((10275, 10342), 'smac.utils.validate._Run', '_Run', ([], {'config': '"""config1"""', 'inst': '"""1"""', 'seed': '(1914837113)', 'inst_specs': '"""one"""'}), "(config='config1', inst='1', seed=1914837113, inst_specs='one')\n", (10279, 10342), False, 'from smac.utils.validate import Validator, _Run\n'), ((10364, 10430), 'smac.utils.validate._Run', '_Run', ([], {'config': '"""config1"""', 'inst': '"""2"""', 'seed': '(669991378)', 'inst_specs': '"""two"""'}), "(config='config1', inst='2', seed=669991378, inst_specs='two')\n", (10368, 10430), False, 'from smac.utils.validate import Validator, _Run\n'), ((10452, 10518), 'smac.utils.validate._Run', '_Run', ([], {'config': '"""config1"""', 'inst': '"""2"""', 'seed': '(429389014)', 'inst_specs': '"""two"""'}), "(config='config1', inst='2', seed=429389014, inst_specs='two')\n", (10456, 10518), False, 'from smac.utils.validate import Validator, _Run\n'), ((10692, 10759), 'smac.utils.validate._Run', '_Run', ([], {'config': '"""config1"""', 'inst': '"""0"""', 'seed': '(249467210)', 'inst_specs': '"""null"""'}), "(config='config1', inst='0', seed=249467210, inst_specs='null')\n", (10696, 10759), False, 'from smac.utils.validate import Validator, _Run\n'), ((10781, 10848), 'smac.utils.validate._Run', '_Run', ([], {'config': '"""config1"""', 'inst': '"""1"""', 'seed': '(1972458954)', 'inst_specs': '"""one"""'}), "(config='config1', inst='1', seed=1972458954, inst_specs='one')\n", (10785, 10848), False, 'from smac.utils.validate import Validator, _Run\n'), ((10870, 10937), 'smac.utils.validate._Run', '_Run', ([], {'config': '"""config1"""', 'inst': '"""2"""', 'seed': '(1572714583)', 'inst_specs': '"""two"""'}), "(config='config1', inst='2', seed=1572714583, inst_specs='two')\n", (10874, 10937), False, 'from smac.utils.validate import Validator, _Run\n'), ((10959, 11028), 'smac.utils.validate._Run', '_Run', ([], {'config': '"""config1"""', 'inst': '"""3"""', 'seed': '(1433267572)', 'inst_specs': '"""three"""'}), "(config='config1', inst='3', seed=1433267572, inst_specs='three')\n", (10963, 11028), False, 'from smac.utils.validate import Validator, _Run\n'), ((11050, 11117), 'smac.utils.validate._Run', '_Run', ([], {'config': '"""config1"""', 'inst': '"""4"""', 'seed': '(434285667)', 'inst_specs': '"""four"""'}), "(config='config1', inst='4', seed=434285667, inst_specs='four')\n", (11054, 11117), False, 'from smac.utils.validate import Validator, _Run\n'), ((11139, 11206), 'smac.utils.validate._Run', '_Run', ([], {'config': '"""config1"""', 'inst': '"""5"""', 'seed': '(613608295)', 'inst_specs': '"""five"""'}), "(config='config1', inst='5', seed=613608295, inst_specs='five')\n", (11143, 11206), False, 'from smac.utils.validate import Validator, _Run\n'), ((2240, 2285), 'shutil.rmtree', 'shutil.rmtree', (['output_dir'], {'ignore_errors': '(True)'}), '(output_dir, ignore_errors=True)\n', (2253, 2285), False, 'import shutil\n'), ((2401, 2423), 'os.remove', 'os.remove', (['output_file'], {}), '(output_file)\n', (2410, 2423), False, 'import os\n'), ((3721, 3737), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (3735, 3737), False, 'from unittest import mock\n'), ((3739, 3755), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (3753, 3755), False, 'from unittest import mock\n'), ((3757, 3773), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (3771, 3773), False, 'from unittest import mock\n'), ((3775, 3791), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (3789, 3791), False, 'from unittest import mock\n'), ((3954, 3965), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (3963, 3965), False, 'from unittest import mock\n'), ((3967, 3978), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (3976, 3978), False, 'from unittest import mock\n'), ((3980, 3991), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (3989, 3991), False, 'from unittest import mock\n'), ((3993, 4004), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (4002, 4004), False, 'from unittest import mock\n')]
|
'''
This script, functions of which are in foo_vb_lib.py, is based on
https://github.com/chenzeno/FOO-VB/blob/ebc14a930ba9d1c1dadc8e835f746c567c253946/main.py
For more information, please see the original paper https://arxiv.org/abs/2010.00373 .
Author: <NAME>(@karalleyna)
'''
import numpy as np
from time import time
from jax import random, value_and_grad, tree_map, vmap, lax
import jax.numpy as jnp
from functools import partial
import foo_vb_lib
def scan(f, init, xs, length=None):
if xs is None:
xs = [None] * length
carry = init
ys = []
for x in xs:
carry, y = f(carry, x)
ys.append(y)
return carry, jnp.stack(ys)
def init_step(key, model, image_size, config):
model_key, param_key = random.split(key)
variables = model.init(model_key, jnp.zeros((config.batch_size, image_size)))
params = tree_map(jnp.transpose, variables)
pytrees = foo_vb_lib.init_param(param_key, params, config.s_init, True, config.alpha)
return pytrees
def train_step(key, pytrees, data, target, value_and_grad_fn, train_mc_iters, eta, diagonal):
weights, m, a, b, avg_psi, e_a, e_b = pytrees
def monte_carlo_step(aggregated_params, key):
# Phi ~ MN(0,I,I)
avg_psi, e_a, e_b = aggregated_params
phi_key, key = random.split(key)
phi = foo_vb_lib.gen_phi(phi_key, weights)
# W = M +B*Phi*A^t
params = foo_vb_lib.randomize_weights(m, a, b, phi)
loss, grads = value_and_grad_fn(tree_map(jnp.transpose, params), data, target)
grads = foo_vb_lib.weight_grad(grads)
avg_psi = foo_vb_lib.aggregate_grads(avg_psi, grads, train_mc_iters)
e_a = foo_vb_lib.aggregate_e_a(e_a, grads, b,
phi, train_mc_iters)
e_b = foo_vb_lib.aggregate_e_b(e_b, grads, a,
phi, train_mc_iters)
return (avg_psi, e_a, e_b), loss
# M = M - B*B^t*avg_Phi*A*A^t
keys = random.split(key, train_mc_iters)
(avg_psi, e_a, e_b), losses = scan(monte_carlo_step,
(avg_psi, e_a, e_b), keys)
print("Loss :", losses.mean())
m = foo_vb_lib.update_m(m, a, b, avg_psi, eta, diagonal=diagonal)
a, b = foo_vb_lib.update_a_b(a, b, e_a, e_b)
avg_psi, e_a, e_b = foo_vb_lib.zero_matrix(avg_psi, e_a, e_b)
pytrees = weights, m, a, b, avg_psi, e_a, e_b
return pytrees, losses
def eval_step(model, pytrees, data, target, train_mc_iters):
weights, m, a, b, avg_psi, e_a, e_b = pytrees
def monte_carlo_step(weights, phi_key):
phi = foo_vb_lib.gen_phi(phi_key, weights)
params = foo_vb_lib.randomize_weights(m, a, b, phi)
output = model.apply(tree_map(jnp.transpose, params), data)
# get the index of the max log-probability
pred = jnp.argmax(output, axis=1)
return weights, jnp.sum(pred == target)
keys = random.split(random.PRNGKey(0), train_mc_iters)
_, correct_per_iter = scan(monte_carlo_step, weights, keys)
n_correct = jnp.sum(correct_per_iter)
return n_correct
def train_continuous_mnist(key, model, train_loader,
test_loader, image_size, num_classes, config):
init_key, key = random.split(key)
pytrees = init_step(key, model, image_size, config)
criterion = partial(foo_vb_lib.cross_entropy_loss,
num_classes=num_classes,
predict_fn=model.apply)
grad_fn = value_and_grad(criterion)
ava_test = []
for task in range(len(test_loader)):
for epoch in range(1, config.epochs + 1):
start_time = time()
for batch_idx, (data, target) in enumerate(train_loader[0]):
data, target = jnp.array(data.view(-1, image_size).numpy()), jnp.array(target.numpy())
train_key, key = random.split(key)
pytrees, losses = train_step(train_key, pytrees, data, target, grad_fn,
config.train_mc_iters, config.eta, config.diagonal)
print("Time : ", time() - start_time)
total = 0
for data, target in test_loader[task]:
data, target = jnp.array(data.numpy().reshape((-1, image_size))), jnp.array(target.numpy())
n_correct = eval_step(model, pytrees, data, target, config.train_mc_iters)
total += n_correct
test_acc = 100. * total / (len(test_loader[task].dataset) * config.train_mc_iters)
print('\nTask num {}, Epoch num {} Test Accuracy: {:.2f}%\n'.format(
task, epoch, test_acc))
test_accuracies = []
for i in range(task + 1):
total = 0
for data, target in test_loader[i]:
data, target = jnp.array(data.numpy().reshape((-1, image_size))), jnp.array(target.numpy())
n_correct = eval_step(model, pytrees, data, target, config.train_mc_iters)
total += n_correct
test_acc = 100. * total / (len(test_loader[task].dataset) * config.train_mc_iters)
test_accuracies.append(test_acc)
print('\nTraning task Num: {} Test Accuracy of task {}: {:.2f}%\n'.format(
task, i, test_acc))
ava_test.append(jnp.mean(np.array(test_accuracies)))
return ava_test
def train_multiple_tasks(key, model, train_loader,
test_loader, num_classes,
permutations, image_size, config):
init_key, key = random.split(key)
pytrees = init_step(key, model, config)
criterion = partial(foo_vb_lib.cross_entropy_loss,
num_classes=num_classes, predict_fn=model.apply)
grad_fn = value_and_grad(criterion)
ava_test = []
for task in range(len(permutations)):
for epoch in range(1, config.epochs + 1):
for batch_idx, (data, target) in enumerate(train_loader):
data, target = jnp.array(data.detach().numpy().reshape((-1, image_size))), jnp.array(
target.detach().numpy())
data = data[:, permutations[task]]
train_key, key = random.split(key)
start_time = time.time()
pytrees, losses = train_step(train_key, pytrees, data, target, grad_fn,
config.train_mc_iters, config.eta, config.diagonal)
print("Time : ", start_time - time.time())
total = 0
for data, target in train_loader:
data, target = jnp.array(data.numpy().reshape((-1, image_size))), jnp.array(target.numpy())
data = data[:, permutations[task]]
n_correct = eval_step(model, pytrees, data, target, config.train_mc_iters)
total += n_correct
train_acc = 100. * total / (len(train_loader.dataset) * config.train_mc_iters)
total = 0
for data, target in test_loader:
data, target = jnp.array(data.numpy().reshape((-1, image_size))), jnp.array(target.numpy())
data = data[:, permutations[task]]
n_correct = eval_step(model, pytrees, data, target, config.train_mc_iters)
total += n_correct
test_acc = 100. * total / (len(test_loader.dataset) * config.train_mc_iters)
print('\nTask num {}, Epoch num {}, Train Accuracy: {:.2f}% Test Accuracy: {:.2f}%\n'.format(
task, epoch, train_acc, test_acc))
test_accuracies = []
for i in range(task + 1):
total = 0
for data, target in test_loader:
data, target = jnp.array(data.numpy().reshape((-1, image_size))), jnp.array(target.numpy())
data = data[:, permutations[i]]
n_correct = eval_step(model, pytrees, data, target, config.train_mc_iters)
total += n_correct
test_acc = 100. * total / (len(test_loader.dataset) * config.train_mc_iters)
test_accuracies.append(test_acc)
print('\nTraning task Num: {} Test Accuracy of task {}: {:.2f}%\n'.format(
task, i, test_acc))
print(test_accuracies)
ava_test.append(jnp.mean(np.array(test_accuracies)))
return ava_test
|
[
"jax.random.PRNGKey",
"foo_vb_lib.aggregate_e_b",
"foo_vb_lib.update_m",
"foo_vb_lib.weight_grad",
"jax.numpy.argmax",
"foo_vb_lib.aggregate_e_a",
"foo_vb_lib.init_param",
"functools.partial",
"foo_vb_lib.aggregate_grads",
"jax.numpy.sum",
"foo_vb_lib.gen_phi",
"foo_vb_lib.zero_matrix",
"jax.numpy.zeros",
"jax.random.split",
"time.time",
"numpy.array",
"jax.value_and_grad",
"foo_vb_lib.update_a_b",
"foo_vb_lib.randomize_weights",
"time.time.time",
"jax.numpy.stack",
"jax.tree_map"
] |
[((752, 769), 'jax.random.split', 'random.split', (['key'], {}), '(key)\n', (764, 769), False, 'from jax import random, value_and_grad, tree_map, vmap, lax\n'), ((865, 899), 'jax.tree_map', 'tree_map', (['jnp.transpose', 'variables'], {}), '(jnp.transpose, variables)\n', (873, 899), False, 'from jax import random, value_and_grad, tree_map, vmap, lax\n'), ((914, 989), 'foo_vb_lib.init_param', 'foo_vb_lib.init_param', (['param_key', 'params', 'config.s_init', '(True)', 'config.alpha'], {}), '(param_key, params, config.s_init, True, config.alpha)\n', (935, 989), False, 'import foo_vb_lib\n'), ((1985, 2018), 'jax.random.split', 'random.split', (['key', 'train_mc_iters'], {}), '(key, train_mc_iters)\n', (1997, 2018), False, 'from jax import random, value_and_grad, tree_map, vmap, lax\n'), ((2187, 2248), 'foo_vb_lib.update_m', 'foo_vb_lib.update_m', (['m', 'a', 'b', 'avg_psi', 'eta'], {'diagonal': 'diagonal'}), '(m, a, b, avg_psi, eta, diagonal=diagonal)\n', (2206, 2248), False, 'import foo_vb_lib\n'), ((2260, 2297), 'foo_vb_lib.update_a_b', 'foo_vb_lib.update_a_b', (['a', 'b', 'e_a', 'e_b'], {}), '(a, b, e_a, e_b)\n', (2281, 2297), False, 'import foo_vb_lib\n'), ((2322, 2363), 'foo_vb_lib.zero_matrix', 'foo_vb_lib.zero_matrix', (['avg_psi', 'e_a', 'e_b'], {}), '(avg_psi, e_a, e_b)\n', (2344, 2363), False, 'import foo_vb_lib\n'), ((3061, 3086), 'jax.numpy.sum', 'jnp.sum', (['correct_per_iter'], {}), '(correct_per_iter)\n', (3068, 3086), True, 'import jax.numpy as jnp\n'), ((3258, 3275), 'jax.random.split', 'random.split', (['key'], {}), '(key)\n', (3270, 3275), False, 'from jax import random, value_and_grad, tree_map, vmap, lax\n'), ((3348, 3440), 'functools.partial', 'partial', (['foo_vb_lib.cross_entropy_loss'], {'num_classes': 'num_classes', 'predict_fn': 'model.apply'}), '(foo_vb_lib.cross_entropy_loss, num_classes=num_classes, predict_fn=\n model.apply)\n', (3355, 3440), False, 'from functools import partial\n'), ((3499, 3524), 'jax.value_and_grad', 'value_and_grad', (['criterion'], {}), '(criterion)\n', (3513, 3524), False, 'from jax import random, value_and_grad, tree_map, vmap, lax\n'), ((5557, 5574), 'jax.random.split', 'random.split', (['key'], {}), '(key)\n', (5569, 5574), False, 'from jax import random, value_and_grad, tree_map, vmap, lax\n'), ((5635, 5727), 'functools.partial', 'partial', (['foo_vb_lib.cross_entropy_loss'], {'num_classes': 'num_classes', 'predict_fn': 'model.apply'}), '(foo_vb_lib.cross_entropy_loss, num_classes=num_classes, predict_fn=\n model.apply)\n', (5642, 5727), False, 'from functools import partial\n'), ((5762, 5787), 'jax.value_and_grad', 'value_and_grad', (['criterion'], {}), '(criterion)\n', (5776, 5787), False, 'from jax import random, value_and_grad, tree_map, vmap, lax\n'), ((662, 675), 'jax.numpy.stack', 'jnp.stack', (['ys'], {}), '(ys)\n', (671, 675), True, 'import jax.numpy as jnp\n'), ((808, 850), 'jax.numpy.zeros', 'jnp.zeros', (['(config.batch_size, image_size)'], {}), '((config.batch_size, image_size))\n', (817, 850), True, 'import jax.numpy as jnp\n'), ((1301, 1318), 'jax.random.split', 'random.split', (['key'], {}), '(key)\n', (1313, 1318), False, 'from jax import random, value_and_grad, tree_map, vmap, lax\n'), ((1333, 1369), 'foo_vb_lib.gen_phi', 'foo_vb_lib.gen_phi', (['phi_key', 'weights'], {}), '(phi_key, weights)\n', (1351, 1369), False, 'import foo_vb_lib\n'), ((1415, 1457), 'foo_vb_lib.randomize_weights', 'foo_vb_lib.randomize_weights', (['m', 'a', 'b', 'phi'], {}), '(m, a, b, phi)\n', (1443, 1457), False, 'import foo_vb_lib\n'), ((1561, 1590), 'foo_vb_lib.weight_grad', 'foo_vb_lib.weight_grad', (['grads'], {}), '(grads)\n', (1583, 1590), False, 'import foo_vb_lib\n'), ((1609, 1667), 'foo_vb_lib.aggregate_grads', 'foo_vb_lib.aggregate_grads', (['avg_psi', 'grads', 'train_mc_iters'], {}), '(avg_psi, grads, train_mc_iters)\n', (1635, 1667), False, 'import foo_vb_lib\n'), ((1682, 1742), 'foo_vb_lib.aggregate_e_a', 'foo_vb_lib.aggregate_e_a', (['e_a', 'grads', 'b', 'phi', 'train_mc_iters'], {}), '(e_a, grads, b, phi, train_mc_iters)\n', (1706, 1742), False, 'import foo_vb_lib\n'), ((1797, 1857), 'foo_vb_lib.aggregate_e_b', 'foo_vb_lib.aggregate_e_b', (['e_b', 'grads', 'a', 'phi', 'train_mc_iters'], {}), '(e_b, grads, a, phi, train_mc_iters)\n', (1821, 1857), False, 'import foo_vb_lib\n'), ((2615, 2651), 'foo_vb_lib.gen_phi', 'foo_vb_lib.gen_phi', (['phi_key', 'weights'], {}), '(phi_key, weights)\n', (2633, 2651), False, 'import foo_vb_lib\n'), ((2669, 2711), 'foo_vb_lib.randomize_weights', 'foo_vb_lib.randomize_weights', (['m', 'a', 'b', 'phi'], {}), '(m, a, b, phi)\n', (2697, 2711), False, 'import foo_vb_lib\n'), ((2846, 2872), 'jax.numpy.argmax', 'jnp.argmax', (['output'], {'axis': '(1)'}), '(output, axis=1)\n', (2856, 2872), True, 'import jax.numpy as jnp\n'), ((2946, 2963), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (2960, 2963), False, 'from jax import random, value_and_grad, tree_map, vmap, lax\n'), ((1498, 1529), 'jax.tree_map', 'tree_map', (['jnp.transpose', 'params'], {}), '(jnp.transpose, params)\n', (1506, 1529), False, 'from jax import random, value_and_grad, tree_map, vmap, lax\n'), ((2741, 2772), 'jax.tree_map', 'tree_map', (['jnp.transpose', 'params'], {}), '(jnp.transpose, params)\n', (2749, 2772), False, 'from jax import random, value_and_grad, tree_map, vmap, lax\n'), ((2897, 2920), 'jax.numpy.sum', 'jnp.sum', (['(pred == target)'], {}), '(pred == target)\n', (2904, 2920), True, 'import jax.numpy as jnp\n'), ((3661, 3667), 'time.time', 'time', ([], {}), '()\n', (3665, 3667), False, 'from time import time\n'), ((3878, 3895), 'jax.random.split', 'random.split', (['key'], {}), '(key)\n', (3890, 3895), False, 'from jax import random, value_and_grad, tree_map, vmap, lax\n'), ((5324, 5349), 'numpy.array', 'np.array', (['test_accuracies'], {}), '(test_accuracies)\n', (5332, 5349), True, 'import numpy as np\n'), ((6202, 6219), 'jax.random.split', 'random.split', (['key'], {}), '(key)\n', (6214, 6219), False, 'from jax import random, value_and_grad, tree_map, vmap, lax\n'), ((6249, 6260), 'time.time.time', 'time.time', ([], {}), '()\n', (6258, 6260), False, 'from time import time\n'), ((8292, 8317), 'numpy.array', 'np.array', (['test_accuracies'], {}), '(test_accuracies)\n', (8300, 8317), True, 'import numpy as np\n'), ((4111, 4117), 'time.time', 'time', ([], {}), '()\n', (4115, 4117), False, 'from time import time\n'), ((6492, 6503), 'time.time.time', 'time.time', ([], {}), '()\n', (6501, 6503), False, 'from time import time\n')]
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
preprocess.
"""
import os
import numpy as np
from src.dataset import create_ocr_val_dataset
from src.model_utils.config import config
def get_bin():
'''generate bin files.'''
prefix = "fsns.mindrecord"
if config.enable_modelarts:
mindrecord_file = os.path.join(config.data_path, prefix + "0")
else:
mindrecord_file = os.path.join(config.test_data_dir, prefix + "0")
print("mindrecord_file", mindrecord_file)
dataset = create_ocr_val_dataset(mindrecord_file, config.eval_batch_size)
data_loader = dataset.create_dict_iterator(num_epochs=1, output_numpy=True)
print("Dataset creation Done!")
sos_id = config.characters_dictionary.go_id
images_path = os.path.join(config.pre_result_path, "00_images")
decoder_input_path = os.path.join(config.pre_result_path, "01_decoder_input")
decoder_hidden_path = os.path.join(config.pre_result_path, "02_decoder_hidden")
annotation_path = os.path.join(config.pre_result_path, "annotation")
os.makedirs(images_path)
os.makedirs(decoder_input_path)
os.makedirs(decoder_hidden_path)
os.makedirs(annotation_path)
for i, data in enumerate(data_loader):
annotation = data["annotation"]
images = data["image"].astype(np.float32)
decoder_hidden = np.zeros((1, config.eval_batch_size, config.decoder_hidden_size),
dtype=np.float16)
decoder_input = (np.ones((config.eval_batch_size, 1)) * sos_id).astype(np.int32)
file_name = "ocr_bs" + str(config.eval_batch_size) + "_" + str(i) + ".bin"
images.tofile(os.path.join(images_path, file_name))
decoder_input.tofile(os.path.join(decoder_input_path, file_name))
decoder_hidden.tofile(os.path.join(decoder_hidden_path, file_name))
file_name = "ocr_bs" + str(config.eval_batch_size) + "_" + str(i) + ".npy"
np.save(os.path.join(annotation_path, file_name), annotation)
print("=" * 10, "export bin files finished.", "=" * 10)
if __name__ == '__main__':
get_bin()
|
[
"os.makedirs",
"src.dataset.create_ocr_val_dataset",
"numpy.zeros",
"numpy.ones",
"os.path.join"
] |
[((1133, 1196), 'src.dataset.create_ocr_val_dataset', 'create_ocr_val_dataset', (['mindrecord_file', 'config.eval_batch_size'], {}), '(mindrecord_file, config.eval_batch_size)\n', (1155, 1196), False, 'from src.dataset import create_ocr_val_dataset\n'), ((1381, 1430), 'os.path.join', 'os.path.join', (['config.pre_result_path', '"""00_images"""'], {}), "(config.pre_result_path, '00_images')\n", (1393, 1430), False, 'import os\n'), ((1456, 1512), 'os.path.join', 'os.path.join', (['config.pre_result_path', '"""01_decoder_input"""'], {}), "(config.pre_result_path, '01_decoder_input')\n", (1468, 1512), False, 'import os\n'), ((1539, 1596), 'os.path.join', 'os.path.join', (['config.pre_result_path', '"""02_decoder_hidden"""'], {}), "(config.pre_result_path, '02_decoder_hidden')\n", (1551, 1596), False, 'import os\n'), ((1619, 1669), 'os.path.join', 'os.path.join', (['config.pre_result_path', '"""annotation"""'], {}), "(config.pre_result_path, 'annotation')\n", (1631, 1669), False, 'import os\n'), ((1674, 1698), 'os.makedirs', 'os.makedirs', (['images_path'], {}), '(images_path)\n', (1685, 1698), False, 'import os\n'), ((1703, 1734), 'os.makedirs', 'os.makedirs', (['decoder_input_path'], {}), '(decoder_input_path)\n', (1714, 1734), False, 'import os\n'), ((1739, 1771), 'os.makedirs', 'os.makedirs', (['decoder_hidden_path'], {}), '(decoder_hidden_path)\n', (1750, 1771), False, 'import os\n'), ((1776, 1804), 'os.makedirs', 'os.makedirs', (['annotation_path'], {}), '(annotation_path)\n', (1787, 1804), False, 'import os\n'), ((943, 987), 'os.path.join', 'os.path.join', (['config.data_path', "(prefix + '0')"], {}), "(config.data_path, prefix + '0')\n", (955, 987), False, 'import os\n'), ((1024, 1072), 'os.path.join', 'os.path.join', (['config.test_data_dir', "(prefix + '0')"], {}), "(config.test_data_dir, prefix + '0')\n", (1036, 1072), False, 'import os\n'), ((1964, 2052), 'numpy.zeros', 'np.zeros', (['(1, config.eval_batch_size, config.decoder_hidden_size)'], {'dtype': 'np.float16'}), '((1, config.eval_batch_size, config.decoder_hidden_size), dtype=np.\n float16)\n', (1972, 2052), True, 'import numpy as np\n'), ((2277, 2313), 'os.path.join', 'os.path.join', (['images_path', 'file_name'], {}), '(images_path, file_name)\n', (2289, 2313), False, 'import os\n'), ((2344, 2387), 'os.path.join', 'os.path.join', (['decoder_input_path', 'file_name'], {}), '(decoder_input_path, file_name)\n', (2356, 2387), False, 'import os\n'), ((2419, 2463), 'os.path.join', 'os.path.join', (['decoder_hidden_path', 'file_name'], {}), '(decoder_hidden_path, file_name)\n', (2431, 2463), False, 'import os\n'), ((2565, 2605), 'os.path.join', 'os.path.join', (['annotation_path', 'file_name'], {}), '(annotation_path, file_name)\n', (2577, 2605), False, 'import os\n'), ((2107, 2143), 'numpy.ones', 'np.ones', (['(config.eval_batch_size, 1)'], {}), '((config.eval_batch_size, 1))\n', (2114, 2143), True, 'import numpy as np\n')]
|
import numpy as np
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense
from tcn import TCN
# if you increase the sequence length make sure the receptive field of the TCN is big enough.
MAX_TIME_STEP = 30
"""
Input: sequence of length 7
Input: sequence of length 25
Input: sequence of length 29
Input: sequence of length 21
Input: sequence of length 20
Input: sequence of length 13
Input: sequence of length 9
Input: sequence of length 7
Input: sequence of length 4
Input: sequence of length 14
Input: sequence of length 10
Input: sequence of length 11
...
"""
def get_x_y(max_time_steps):
for k in range(int(1e9)):
time_steps = np.random.choice(range(1, max_time_steps), size=1)[0]
if k % 2 == 0:
x_train = np.expand_dims([np.insert(np.zeros(shape=(time_steps, 1)), 0, 1)], axis=-1)
y_train = [1]
else:
x_train = np.array([np.zeros(shape=(time_steps, 1))])
y_train = [0]
if k % 100 == 0:
print(f'({k}) Input: sequence of length {time_steps}.')
yield x_train, np.expand_dims(y_train, axis=-1)
m = Sequential([
TCN(input_shape=(None, 1)),
Dense(1, activation='sigmoid')
])
m.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
gen = get_x_y(max_time_steps=MAX_TIME_STEP)
m.fit(gen, epochs=1, steps_per_epoch=1000, max_queue_size=1, verbose=2)
|
[
"tensorflow.keras.layers.Dense",
"numpy.zeros",
"numpy.expand_dims",
"tcn.TCN"
] |
[((1154, 1180), 'tcn.TCN', 'TCN', ([], {'input_shape': '(None, 1)'}), '(input_shape=(None, 1))\n', (1157, 1180), False, 'from tcn import TCN\n'), ((1186, 1216), 'tensorflow.keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (1191, 1216), False, 'from tensorflow.keras.layers import Dense\n'), ((1098, 1130), 'numpy.expand_dims', 'np.expand_dims', (['y_train'], {'axis': '(-1)'}), '(y_train, axis=-1)\n', (1112, 1130), True, 'import numpy as np\n'), ((922, 953), 'numpy.zeros', 'np.zeros', ([], {'shape': '(time_steps, 1)'}), '(shape=(time_steps, 1))\n', (930, 953), True, 'import numpy as np\n'), ((800, 831), 'numpy.zeros', 'np.zeros', ([], {'shape': '(time_steps, 1)'}), '(shape=(time_steps, 1))\n', (808, 831), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 19 11:25:16 2017
@author: flwe6397
"""
import scipy
import statsmodels.api as sm
import matplotlib
matplotlib.rcParams.update({'font.size': 12})
from matplotlib import pyplot
import numpy as np
from pylab import rcParams
rcParams['figure.figsize'] = 16/2,12/2
import statistics
def CalcNonParametric_MannWhitneyWithShapiro(list1, list2, my_alternative='two-sided', printHint=False, printNorm=False):
if (False):
print ('\t\tnormality list1 (Shapiro) = ' + str(scipy.stats.shapiro(list1)))
print ('\t\tnormality list2 (Shapiro) = ' + str(scipy.stats.shapiro(list2)))
print('\t\t' + str(scipy.stats.mannwhitneyu(list1, list2, alternative=my_alternative)))
prefix = ''
if (True):
val1= statistics.median(list1)
val2 = statistics.median(list2)
prefix = 'Median'
else:
val1= statistics.mean(list1)
val2 = statistics.mean(list2)
prefix = 'Mean'
stdevl1 = statistics.stdev(list1)
stdevl2 = statistics.stdev(list2)
print ('\t\t'+prefix+' l1 : ' + str(val1) + '\tStDev l1: ' + str(statistics.stdev(list1)) + "\tN = "+str(len(list1)) + ' [' +str(list1[np.argmin(list1)]) + ';' + str(list1[np.argmax(list1)]) + ']')
print ('\t\t'+prefix+' l2 : ' + str(val2) + '\tStDev l2: ' + str(statistics.stdev(list2)) + "\tN = "+str(len(list2)) + ' [' +str(list2[np.argmin(list2)]) + ';' + str(list2[np.argmax(list2)]) + ']')
if (printNorm):
print (str(len(list1)))
print (str(len(list2)))
if(printHint):
print('\t\tMann-Whitney:' + 'If P <= 0.05, we are confident that the distributions significantly differ')
print('\t\tShapiro :' + 'If P > 0.05, it may be assumed that the data have a normal distribution.')
#http://www.randalolson.com/2012/08/06/statistical-analysis-made-easy-in-python/
return val1, stdevl1, val2, stdevl2
def CalcParametric_WelshWithShapiroAndLevene(list1, list2, printHint=False, printNorm=False):
if (True):
print ('\t\tnormality list1 (Shapiro) = ' + str(scipy.stats.shapiro(list1)))
print ('\t\tnormality list2 (Shapiro) = ' + str(scipy.stats.shapiro(list2)) )
#print (str(len(list1)))
#print (str(len(list2)))
equalvar = scipy.stats.levene(list1, list2, center='mean')
if (equalvar[1] < 0.05):
print ('\t\t' + str(scipy.stats.ttest_ind(list1, list2, equal_var=False)) + '; Welch: '+ str(equalvar[1]) ) #Welch
else:
print ('\t\t' + str(scipy.stats.ttest_ind(list1, list2, equal_var=True))+ '; t-test: '+str(equalvar[1]) )
# a negative sign implies tahat the sample mean is less than the hypothesized mean
meanl1 = statistics.mean(list1)
meanl2 = statistics.mean(list2)
stdevl1 = statistics.stdev(list1)
stdevl2 = statistics.stdev(list2)
print ('\t\tmean l1 : ' + str(statistics.mean(list1)) + '\tStDev l1: ' + str(statistics.stdev(list1))+ "\tN = "+str(len(list1)))
print ('\t\tmean l2 : ' + str(statistics.mean(list2))+ '\tStDev l2: ' + str(statistics.stdev(list2))+ "\tN = "+str(len(list2)))
if (printHint):
print ('\t\tLevene : If p < 0.05 indicates a violation of the assumption that variance is equal across groups. ')
print ('\t\tT-Test : If p < 0.05, then we can reject the null hypothesis of identical average scores. (they differ)')
print ('\t\tShapiro : If P > 0.05, it may be assumed that the data have a normal distribution.')
return meanl1, stdevl1, meanl2, stdevl2
def PrintQQPlot(list1):
res = np.array(list1)
fig = sm.qqplot(res)
pyplot.show()
def plotBarChartWithStDev(means, stdev):
ind = np.arange(len(means))
width = 0.35
colours = ['red','blue','green','yellow', 'orange']
pyplot.figure()
#pyplot.title('Average Age')
for i in range(len(means)):
pyplot.bar(ind[i],means[i],width,color=colours[i],align='center',yerr=stdev[i],ecolor='k')
pyplot.ylabel('bla')
pyplot.xticks(ind,('ul','uc','ur','dr','t'))
def plotBarChartWithStdDevDouble(n, means1, means2, stdev1, stdev2, axislist, axistitle = '', newwidth=.4, vlabeloffset=2, x=16, y = 9, bLog = False, pos='best', dolabel=False):
N = n
ind = np.arange(N) # the x locations for the groups
width = newwidth # the width of the bars
rcParams['figure.figsize'] = x/2,y/2
fig = pyplot.figure()
ax = fig.add_subplot(111)
yvals = means1#[4, 9,6,9,2]
rects1 = ax.bar(left=ind+width, height=yvals, width=newwidth, ecolor='black', error_kw=dict(lw=1, capsize=2, capthick=1), color='#4472c4',edgecolor='none',)
zvals = means2#[1,2,21,1,2]
rects2 = ax.bar(left=ind+width*2, height=zvals, width=newwidth, ecolor='black',error_kw=dict(lw=1, capsize=3, capthick=1), color = '#ed7d31', edgecolor='none')#color='#D3D3D3')#, hatch='..')
ax.set_ylabel(axistitle)
ax.set_xticks(ind+width*vlabeloffset)
ax.set_xticklabels( axislist )
ax.legend( (rects1[0], rects2[0]), ('2D', 'S3D') ,loc=pos )
if (bLog):
ax.set_yscale('symlog')
#pyplot.yscale('log',nonposx='clip')
#pyplot.ylim( (pow(-10,1),pow(10,2)) )
def autolabel(rects):
for rect in rects:
h = rect.get_height()
ax.text(rect.get_x()+rect.get_width()/2., 2.05*h, '%d'%int(h),
ha='center', va='bottom')
if (dolabel):
autolabel(rects1)
autolabel(rects2)
#fig.autofmt_xdate()
# ax.set_xticklabels(ax.xaxis.get_majorticklabels(), rotation=45)
|
[
"matplotlib.pyplot.show",
"statistics.median",
"scipy.stats.shapiro",
"numpy.argmax",
"scipy.stats.mannwhitneyu",
"statistics.stdev",
"matplotlib.rcParams.update",
"matplotlib.pyplot.bar",
"scipy.stats.levene",
"scipy.stats.ttest_ind",
"numpy.argmin",
"matplotlib.pyplot.figure",
"numpy.array",
"statistics.mean",
"statsmodels.api.qqplot",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xticks"
] |
[((153, 198), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (["{'font.size': 12}"], {}), "({'font.size': 12})\n", (179, 198), False, 'import matplotlib\n'), ((1008, 1031), 'statistics.stdev', 'statistics.stdev', (['list1'], {}), '(list1)\n', (1024, 1031), False, 'import statistics\n'), ((1046, 1069), 'statistics.stdev', 'statistics.stdev', (['list2'], {}), '(list2)\n', (1062, 1069), False, 'import statistics\n'), ((2320, 2367), 'scipy.stats.levene', 'scipy.stats.levene', (['list1', 'list2'], {'center': '"""mean"""'}), "(list1, list2, center='mean')\n", (2338, 2367), False, 'import scipy\n'), ((2763, 2785), 'statistics.mean', 'statistics.mean', (['list1'], {}), '(list1)\n', (2778, 2785), False, 'import statistics\n'), ((2799, 2821), 'statistics.mean', 'statistics.mean', (['list2'], {}), '(list2)\n', (2814, 2821), False, 'import statistics\n'), ((2841, 2864), 'statistics.stdev', 'statistics.stdev', (['list1'], {}), '(list1)\n', (2857, 2864), False, 'import statistics\n'), ((2879, 2902), 'statistics.stdev', 'statistics.stdev', (['list2'], {}), '(list2)\n', (2895, 2902), False, 'import statistics\n'), ((3660, 3675), 'numpy.array', 'np.array', (['list1'], {}), '(list1)\n', (3668, 3675), True, 'import numpy as np\n'), ((3686, 3700), 'statsmodels.api.qqplot', 'sm.qqplot', (['res'], {}), '(res)\n', (3695, 3700), True, 'import statsmodels.api as sm\n'), ((3705, 3718), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (3716, 3718), False, 'from matplotlib import pyplot\n'), ((3879, 3894), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {}), '()\n', (3892, 3894), False, 'from matplotlib import pyplot\n'), ((4063, 4083), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (['"""bla"""'], {}), "('bla')\n", (4076, 4083), False, 'from matplotlib import pyplot\n'), ((4088, 4137), 'matplotlib.pyplot.xticks', 'pyplot.xticks', (['ind', "('ul', 'uc', 'ur', 'dr', 't')"], {}), "(ind, ('ul', 'uc', 'ur', 'dr', 't'))\n", (4101, 4137), False, 'from matplotlib import pyplot\n'), ((4332, 4344), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (4341, 4344), True, 'import numpy as np\n'), ((4483, 4498), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {}), '()\n', (4496, 4498), False, 'from matplotlib import pyplot\n'), ((789, 813), 'statistics.median', 'statistics.median', (['list1'], {}), '(list1)\n', (806, 813), False, 'import statistics\n'), ((829, 853), 'statistics.median', 'statistics.median', (['list2'], {}), '(list2)\n', (846, 853), False, 'import statistics\n'), ((904, 926), 'statistics.mean', 'statistics.mean', (['list1'], {}), '(list1)\n', (919, 926), False, 'import statistics\n'), ((942, 964), 'statistics.mean', 'statistics.mean', (['list2'], {}), '(list2)\n', (957, 964), False, 'import statistics\n'), ((3968, 4069), 'matplotlib.pyplot.bar', 'pyplot.bar', (['ind[i]', 'means[i]', 'width'], {'color': 'colours[i]', 'align': '"""center"""', 'yerr': 'stdev[i]', 'ecolor': '"""k"""'}), "(ind[i], means[i], width, color=colours[i], align='center', yerr=\n stdev[i], ecolor='k')\n", (3978, 4069), False, 'from matplotlib import pyplot\n'), ((675, 741), 'scipy.stats.mannwhitneyu', 'scipy.stats.mannwhitneyu', (['list1', 'list2'], {'alternative': 'my_alternative'}), '(list1, list2, alternative=my_alternative)\n', (699, 741), False, 'import scipy\n'), ((533, 559), 'scipy.stats.shapiro', 'scipy.stats.shapiro', (['list1'], {}), '(list1)\n', (552, 559), False, 'import scipy\n'), ((618, 644), 'scipy.stats.shapiro', 'scipy.stats.shapiro', (['list2'], {}), '(list2)\n', (637, 644), False, 'import scipy\n'), ((2124, 2150), 'scipy.stats.shapiro', 'scipy.stats.shapiro', (['list1'], {}), '(list1)\n', (2143, 2150), False, 'import scipy\n'), ((2209, 2235), 'scipy.stats.shapiro', 'scipy.stats.shapiro', (['list2'], {}), '(list2)\n', (2228, 2235), False, 'import scipy\n'), ((1262, 1278), 'numpy.argmax', 'np.argmax', (['list1'], {}), '(list1)\n', (1271, 1278), True, 'import numpy as np\n'), ((1464, 1480), 'numpy.argmax', 'np.argmax', (['list2'], {}), '(list2)\n', (1473, 1480), True, 'import numpy as np\n'), ((2996, 3019), 'statistics.stdev', 'statistics.stdev', (['list1'], {}), '(list1)\n', (3012, 3019), False, 'import statistics\n'), ((3128, 3151), 'statistics.stdev', 'statistics.stdev', (['list2'], {}), '(list2)\n', (3144, 3151), False, 'import statistics\n'), ((2432, 2484), 'scipy.stats.ttest_ind', 'scipy.stats.ttest_ind', (['list1', 'list2'], {'equal_var': '(False)'}), '(list1, list2, equal_var=False)\n', (2453, 2484), False, 'import scipy\n'), ((2567, 2618), 'scipy.stats.ttest_ind', 'scipy.stats.ttest_ind', (['list1', 'list2'], {'equal_var': '(True)'}), '(list1, list2, equal_var=True)\n', (2588, 2618), False, 'import scipy\n'), ((1225, 1241), 'numpy.argmin', 'np.argmin', (['list1'], {}), '(list1)\n', (1234, 1241), True, 'import numpy as np\n'), ((1427, 1443), 'numpy.argmin', 'np.argmin', (['list2'], {}), '(list2)\n', (1436, 1443), True, 'import numpy as np\n'), ((2949, 2971), 'statistics.mean', 'statistics.mean', (['list1'], {}), '(list1)\n', (2964, 2971), False, 'import statistics\n'), ((3082, 3104), 'statistics.mean', 'statistics.mean', (['list2'], {}), '(list2)\n', (3097, 3104), False, 'import statistics\n'), ((1155, 1178), 'statistics.stdev', 'statistics.stdev', (['list1'], {}), '(list1)\n', (1171, 1178), False, 'import statistics\n'), ((1357, 1380), 'statistics.stdev', 'statistics.stdev', (['list2'], {}), '(list2)\n', (1373, 1380), False, 'import statistics\n')]
|
import os
import numpy as np
import logging
from pystella.model.sn_eve import PreSN
from pystella.util.phys_var import phys
logger = logging.getLogger(__name__)
try:
import matplotlib.pyplot as plt
from matplotlib import gridspec
is_matplotlib = True
except ImportError:
logging.debug('matplotlib failed to import', exc_info=True)
is_matplotlib = False
pass
# logger.setLevel(logging.INFO)
# logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
__author__ = 'bakl'
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# 681 15
# 1.0d0 1.0d0 4.0d0 12.0d0 16.0d0 20.0d0 24.0d0 28.0d0 32.0d0 36.0d0 40.0d0 44.0d0 48.0d0 52.0d0 56.0d0
# 0.0d0 1.0d0 2.0d0 6.0d0 8.0d0 10.0d0 12.0d0 14.0d0 16.0d0 18.0d0 20.0d0 22.0d0 24.0d0 26.0d0 28.0d0
snec_elements = "NN H He C O Ne Mg Si S Ar Ca Ti Cr Fe Ni".split()
snec_elements_Z_str = "0.0 1.0 2.0 6.0 8.0 10.0 12.0 14.0 16.0 18.0 20.0 22.0 24.0 26.0 28.0"
snec_elements_Z = [float(s) for s in snec_elements_Z_str.split()]
snec_elements_A_str = "1.0 1.0 4.0 12.0 16.0 20.0 24.0 28.0 32.0 36.0 40.0 44.0 48.0 52.0 56.0"
snec_elements_A = [float(s) for s in snec_elements_A_str.split()]
snec_el_colors = dict(NN="yellow", H="blue", He="cyan", C="darkorange",
O="violet", Ne="green", Mg="skyblue", Si="olive",
S="indigo", Ar="brown", Ca="purple", Ti="hotpink",
Cr="m", Fe='maroon', Ni='magenta')
snec_el_lntypes = dict((k, '--') for k, v in snec_el_colors.items()) # no y-shift
snec_el_lntypes['H'] = '-'
snec_el_lntypes['He'] = '-'
snec_el_lntypes['O'] = '-'
snec_el_lntypes['C'] = '-'
snec_el_lntypes['Ni56'] = '-'
snec_profile_cols = "i M R T Rho V".split()
class Snec:
def __init__(self, name):
"""Creates a Problem instance. It's initial conditions for SNEC. Required parameters: name."""
self._name = name
self._chem_file = None
self._chem = None
self._profile_file = None
self._profile = None
@property
def Name(self):
return self._name
@property
def chem_file(self):
return self._chem_file
@property
def r(self):
"""radius"""
return self._chem[PreSN.sR]
@property
def nzon(self):
"""Number of zones"""
return len(self.r)
@property
def m(self):
"""Mass"""
return self._chem[PreSN.sM]
@property
def is_chem_load(self):
"""Check if data has been loaded."""
return self._chem is not None
@property
def chem(self):
"""Full data"""
return self._chem
# Profile structure
@property
def profile_file(self):
return self._profile_file
@property
def profile(self):
"""Full data"""
return self._profile
@property
def is_profile_load(self):
"""Check if data has been loaded."""
return self._profile is not None
@property
def pmass(self):
"""Mass"""
return self.hyd(PreSN.sM)
@property
def pradius(self):
"""Radius"""
return self.hyd(PreSN.sR)
@property
def ptemp(self):
"""Temperature"""
return self.hyd(PreSN.sT)
@property
def prho(self):
"""Density"""
return self.hyd(PreSN.sRho)
@property
def pvel(self):
"""Velocity"""
return self.hyd(PreSN.sV)
@property
def Elements(self):
"""Elements"""
els = []
keys = self.chem.dtype.names
for el in snec_elements:
if el in keys:
els.append(el)
return els
def hyd(self, v):
"""Hydro data"""
if v not in snec_profile_cols:
raise ValueError("There is no information about the parameter [%s]. You should set it." % v)
return self._profile[v]
def load_profile(self, fname):
if not os.path.isfile(fname):
logger.error(' No snec profile-data for %s' % fname)
return None
self._profile_file = fname
logger.info('Load profile data from %s' % self.profile_file)
use_cols = list(range(0, len(snec_profile_cols)))
dtype = np.dtype({'names': snec_profile_cols, 'formats': [np.float64] * len(snec_profile_cols)})
self._profile = np.loadtxt(self.profile_file, skiprows=1, dtype=dtype, usecols=use_cols)
return self
def write_profile(self, fname):
"""
Write profile to file
Format:
# ibuffer, pmass(i), pradius(i), ptemp(i), prho(i), pvel(i)
# 1 1.04019E+31 7.94499E+06 1.00140E+10 4.91485E+09 -1.21857E+07 4.57036E-01 0.00000E+00
:return: True if fname exists
"""
# dum = np.zeros(self.nzon)
logger.info(' Write profile-data to %s' % fname)
zones = range(1, self.nzon + 1)
with open(fname, 'w') as f:
f.write('{:6d}\n'.format(self.nzon))
for _ in zip(zones, self.pmass, self.pradius, self.ptemp, self.prho, self.pvel):
f.write('%4d %12.5e %12.5e %12.5e %12.5e %12.5e\n' % _)
return os.path.isfile(fname)
def el(self, el):
if el not in snec_elements:
raise ValueError("There is no such element [%s]." % el)
if not self.is_chem_load:
raise Exception("SNEC chem-data has not been loaded. Check and load from %s" % self._chem_file)
return self._chem[el]
def set_el(self, el, data):
if el not in snec_elements:
raise ValueError("There is no such element [%s]." % el)
if not self.is_chem_load:
raise Exception("SNEC chem-data has not been created.")
if self.nzon != len(data):
raise ValueError("The data(len={}) should be have the same nzon={} as SNEC. ".format(len(data), self.nzon))
self._chem[el] = data
def load_chem(self, fname):
if not os.path.isfile(fname):
logger.error(' No snec chem-data for %s' % fname)
return None
self._chem_file = fname
logger.info('Load chemical data from %s' % self.chem_file)
names = [PreSN.sM, PreSN.sR] + snec_elements
print("Names: %s" % ' '.join(names))
dtype = np.dtype({'names': names, 'formats': [np.float64] * len(names)})
self._chem = np.loadtxt(fname, skiprows=3, dtype=dtype, comments='#')
return self
def write_chem(self, fname, is_header=True):
"""
Write data to file in iso.dat format
:return:
"""
logger.info(' Write chem-data to %s' % fname)
with open(fname, 'w') as f:
# write nzon nElements
if is_header:
f.write('{:d} {:d}\n'.format(self.nzon, len(snec_elements)))
f.write('{}\n'.format(snec_elements_A_str))
f.write('{}\n'.format(snec_elements_Z_str))
for i in range(self.nzon):
s = '{:.5e} {:.5e}'.format(self.pmass[i], self.pradius[i])
for el in snec_elements:
s += ' {:.5e}'.format(self.el(el)[i])
f.write('{}\n'.format(s))
return os.path.isfile(fname)
# Plotting
def plot_chem(self, x='m', ax=None, xlim=None, ylim=None, **kwargs):
elements = kwargs.get('elements', snec_elements)
lntypes = kwargs.get('lntypes', snec_el_lntypes)
if isinstance(lntypes, str):
lntypes = {el: lntypes for el in elements}
colors = kwargs.get('colors', snec_el_colors)
loc = kwargs.get('leg_loc', 3)
font_size = kwargs.get('font_size', 14)
leg_ncol = kwargs.get('leg_ncol', 4)
lw = kwargs.get('lw', 2)
is_save = kwargs.get('is_save', False)
alpha = kwargs.get('alpha', 1.)
is_new_plot = ax is None
# setup figure
if is_new_plot:
plt.matplotlib.rcParams.update({'font.size': font_size})
fig = plt.figure(num=None, figsize=(12, 12), dpi=100, facecolor='w', edgecolor='k')
gs1 = gridspec.GridSpec(1, 1)
gs1.update(wspace=0.1, hspace=0.1, top=None, left=0.1, right=0.98)
ax = fig.add_subplot(gs1[0, 0])
if x == 'r':
ax.set_xlabel(r'R [cm]')
elif x == 'm':
ax.set_xlabel(r'M [$M_\odot$]')
else:
ax.set_xlabel(r'R [cm]')
ax.set_xscale('log')
is_x_lim = xlim is not None
is_y_lim = ylim is not None
if x == 'r':
x = self.r
elif x == 'm':
x = self.m / phys.M_sun
else:
x = self.r
y_min = []
y_max = []
for el in elements:
y = self.el(el)
# y = np.log10(self.el(el))
ax.semilogy(x, y, label='%s' % el, color=colors[el], ls=lntypes[el], linewidth=lw, alpha=alpha)
# ax.plot(x, y, label='%s' % el, color=colors[el], marker='o', ls=':', markersize=3)
if not is_y_lim:
y_min.append(np.min(y))
y_max.append(np.max(y))
if not is_y_lim:
ylim = [np.min(y_min), np.max(y_min)]
if not is_x_lim:
xlim = np.min(x), np.max(x)
ax.set_xlim(xlim)
# ax.set_yscale('log')
# ax.set_ylim(ylim)
# ax.set_ylabel(r'$log10(X_i)$')
ax.set_ylim(ylim)
ax.set_ylabel(r'$X_i$')
if is_new_plot:
ax.legend(prop={'size': 9}, loc=loc, ncol=leg_ncol, fancybox=False, frameon=True)
if is_save:
fsave = os.path.join(os.path.expanduser('~/'), 'chem_%s.pdf' % self._name)
logger.info(" Save plot to %s " % fsave)
ax.get_figure().savefig(fsave, bbox_inches='tight', format='pdf')
return ax
@staticmethod
def presn2snec(presn):
snec = Snec(presn.Name)
# Create profile
dtype = [('i', '<f8'), (PreSN.sM, '<f8'), (PreSN.sR, '<f8'), (PreSN.sT, '<f8'),
(PreSN.sRho, '<f8'), (PreSN.sV, '<f8')]
aprofile = np.zeros((presn.nzon,), dtype=dtype)
# Fill profile
aprofile[PreSN.sM] = presn.m
aprofile[PreSN.sR] = presn.r
aprofile[PreSN.sT] = presn.T
aprofile[PreSN.sRho] = presn.rho
aprofile[PreSN.sV] = presn.V
snec._profile = aprofile
# Create chemical composition
dtype = [(PreSN.sM, '<f8'), (PreSN.sR, '<f8'), ('NN', '<f8'), ('H', '<f8'), ('He', '<f8'),
('C', '<f8'), ('O', '<f8'), ('Ne', '<f8'), ('Mg', '<f8'), ('Si', '<f8'),
('S', '<f8'), ('Ar', '<f8'), ('Ca', '<f8'), ('Ti', '<f8'), ('Cr', '<f8'),
('Fe', '<f8'), ('Ni', '<f8')]
achem = np.zeros((presn.nzon,), dtype=dtype)
# Fill
achem[PreSN.sM] = presn.m
achem[PreSN.sR] = presn.r
for e in presn.Elements:
if e in snec_elements:
achem[e] = presn.el(e)
snec._chem = achem
return snec
class ParserXg:
pass
def to_presn(p):
if not p.is_profile_load:
raise ValueError("There are no data in SNEC problem. "
"Probably, You should run: load_profile and load_chem.")
presn = PreSN(p.Name, p.nzon)
col_map = {'R', 'M', 'T', 'Rho', 'V'}
for v in col_map:
presn.set_hyd(v, p.hyd(v))
for e in presn.Elements:
if e in snec_elements:
presn.set_chem(e, p.el(e))
else:
presn.set_chem(e, np.zeros(presn.nzon))
# todo check with Viktoriya: in SNEC Ni used Ni as Ni56
presn.set_chem('Ni56', presn.el('Ni'))
return presn
|
[
"os.path.expanduser",
"logging.debug",
"numpy.zeros",
"os.path.isfile",
"matplotlib.pyplot.figure",
"numpy.min",
"numpy.loadtxt",
"numpy.max",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.matplotlib.rcParams.update",
"pystella.model.sn_eve.PreSN",
"logging.getLogger"
] |
[((136, 163), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (153, 163), False, 'import logging\n'), ((513, 540), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (530, 540), False, 'import logging\n'), ((11263, 11284), 'pystella.model.sn_eve.PreSN', 'PreSN', (['p.Name', 'p.nzon'], {}), '(p.Name, p.nzon)\n', (11268, 11284), False, 'from pystella.model.sn_eve import PreSN\n'), ((292, 351), 'logging.debug', 'logging.debug', (['"""matplotlib failed to import"""'], {'exc_info': '(True)'}), "('matplotlib failed to import', exc_info=True)\n", (305, 351), False, 'import logging\n'), ((4329, 4401), 'numpy.loadtxt', 'np.loadtxt', (['self.profile_file'], {'skiprows': '(1)', 'dtype': 'dtype', 'usecols': 'use_cols'}), '(self.profile_file, skiprows=1, dtype=dtype, usecols=use_cols)\n', (4339, 4401), True, 'import numpy as np\n'), ((5143, 5164), 'os.path.isfile', 'os.path.isfile', (['fname'], {}), '(fname)\n', (5157, 5164), False, 'import os\n'), ((6346, 6402), 'numpy.loadtxt', 'np.loadtxt', (['fname'], {'skiprows': '(3)', 'dtype': 'dtype', 'comments': '"""#"""'}), "(fname, skiprows=3, dtype=dtype, comments='#')\n", (6356, 6402), True, 'import numpy as np\n'), ((7178, 7199), 'os.path.isfile', 'os.path.isfile', (['fname'], {}), '(fname)\n', (7192, 7199), False, 'import os\n'), ((10088, 10124), 'numpy.zeros', 'np.zeros', (['(presn.nzon,)'], {'dtype': 'dtype'}), '((presn.nzon,), dtype=dtype)\n', (10096, 10124), True, 'import numpy as np\n'), ((10754, 10790), 'numpy.zeros', 'np.zeros', (['(presn.nzon,)'], {'dtype': 'dtype'}), '((presn.nzon,), dtype=dtype)\n', (10762, 10790), True, 'import numpy as np\n'), ((3924, 3945), 'os.path.isfile', 'os.path.isfile', (['fname'], {}), '(fname)\n', (3938, 3945), False, 'import os\n'), ((5936, 5957), 'os.path.isfile', 'os.path.isfile', (['fname'], {}), '(fname)\n', (5950, 5957), False, 'import os\n'), ((7894, 7950), 'matplotlib.pyplot.matplotlib.rcParams.update', 'plt.matplotlib.rcParams.update', (["{'font.size': font_size}"], {}), "({'font.size': font_size})\n", (7924, 7950), True, 'import matplotlib.pyplot as plt\n'), ((7969, 8046), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'num': 'None', 'figsize': '(12, 12)', 'dpi': '(100)', 'facecolor': '"""w"""', 'edgecolor': '"""k"""'}), "(num=None, figsize=(12, 12), dpi=100, facecolor='w', edgecolor='k')\n", (7979, 8046), True, 'import matplotlib.pyplot as plt\n'), ((8066, 8089), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(1)'], {}), '(1, 1)\n', (8083, 8089), False, 'from matplotlib import gridspec\n'), ((9161, 9174), 'numpy.min', 'np.min', (['y_min'], {}), '(y_min)\n', (9167, 9174), True, 'import numpy as np\n'), ((9176, 9189), 'numpy.max', 'np.max', (['y_min'], {}), '(y_min)\n', (9182, 9189), True, 'import numpy as np\n'), ((9236, 9245), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (9242, 9245), True, 'import numpy as np\n'), ((9247, 9256), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (9253, 9256), True, 'import numpy as np\n'), ((9616, 9640), 'os.path.expanduser', 'os.path.expanduser', (['"""~/"""'], {}), "('~/')\n", (9634, 9640), False, 'import os\n'), ((11528, 11548), 'numpy.zeros', 'np.zeros', (['presn.nzon'], {}), '(presn.nzon)\n', (11536, 11548), True, 'import numpy as np\n'), ((9064, 9073), 'numpy.min', 'np.min', (['y'], {}), '(y)\n', (9070, 9073), True, 'import numpy as np\n'), ((9104, 9113), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (9110, 9113), True, 'import numpy as np\n')]
|
import os
from typing import List
from enum import IntEnum
import cv2 as cv
import numpy as np
from pydicom import dcmread
from pydicom.dataset import Dataset
from pydicom.sequence import Sequence
from rt_utils.utils import ROIData, SOPClassUID
def load_sorted_image_series(dicom_series_path: str):
"""
File contains helper methods for loading / formatting DICOM images and contours
"""
series_data = load_dcm_images_from_path(dicom_series_path)
if len(series_data) == 0:
raise Exception("No DICOM Images found in input path")
# Sort slices in ascending order
series_data.sort(key=lambda ds: ds.ImagePositionPatient[2], reverse=False)
return series_data
def load_dcm_images_from_path(dicom_series_path: str) -> List[Dataset]:
series_data = []
for root, _, files in os.walk(dicom_series_path):
for file in files:
try:
ds = dcmread(os.path.join(root, file))
if hasattr(ds, 'pixel_array'):
series_data.append(ds)
except Exception:
# Not a valid DICOM file
continue
return series_data
def get_contours_coords(mask_slice: np.ndarray, series_slice: Dataset, roi_data: ROIData):
# Create pin hole mask if specified
if roi_data.use_pin_hole:
mask_slice = create_pin_hole_mask(mask_slice, roi_data.approximate_contours)
# Get contours from mask
contours, _ = find_mask_contours(mask_slice, roi_data.approximate_contours)
validate_contours(contours)
# Format for DICOM
formatted_contours = []
for contour in contours:
contour = np.array(contour) # Type cannot be a list
translated_contour = translate_contour_to_data_coordinants(contour, series_slice)
dicom_formatted_contour = format_contour_for_dicom(translated_contour, series_slice)
formatted_contours.append(dicom_formatted_contour)
return formatted_contours
def find_mask_contours(mask: np.ndarray, approximate_contours: bool):
approximation_method = cv.CHAIN_APPROX_SIMPLE if approximate_contours else cv.CHAIN_APPROX_NONE
contours, hierarchy = cv.findContours(mask.astype(np.uint8), cv.RETR_TREE, approximation_method)
# Format extra array out of data
for i, contour in enumerate(contours):
contours[i] = [[pos[0][0], pos[0][1]] for pos in contour]
hierarchy = hierarchy[0] # Format extra array out of data
return contours, hierarchy
def create_pin_hole_mask(mask: np.ndarray, approximate_contours: bool):
"""
Creates masks with pin holes added to contour regions with holes.
This is done so that a given region can be represented by a single contour.
"""
contours, hierarchy = find_mask_contours(mask, approximate_contours)
pin_hole_mask = mask.copy()
# Iterate through the hierarchy, for child nodes, draw a line upwards from the first point
for i, array in enumerate(hierarchy):
parent_contour_index = array[Hierarchy.parent_node]
if parent_contour_index == -1: continue # Contour is not a child
child_contour = contours[i]
line_start = tuple(child_contour[0])
pin_hole_mask = draw_line_upwards_from_point(pin_hole_mask, line_start, fill_value=0)
return pin_hole_mask
def draw_line_upwards_from_point(mask: np.ndarray, start, fill_value: int) -> np.ndarray:
line_width = 2
end = (start[0], start[1] - 1)
mask = mask.astype(np.uint8) # Type that OpenCV expects
# Draw one point at a time until we hit a point that already has the desired value
while mask[end] != fill_value:
cv.line(mask, start, end, fill_value, line_width)
# Update start and end to the next positions
start = end
end = (start[0], start[1] - line_width)
return mask.astype(bool)
def validate_contours(contours: list):
if len(contours) == 0:
raise Exception("Unable to find contour in non empty mask, please check your mask formatting")
def translate_contour_to_data_coordinants(contour, series_slice: Dataset):
offset = series_slice.ImagePositionPatient
spacing = series_slice.PixelSpacing
contour[:, 0] = (contour[:, 0]) * spacing[0] + offset[0]
contour[:, 1] = (contour[:, 1]) * spacing[1] + offset[1]
return contour
def translate_contour_to_pixel_coordinants(contour, series_slice: Dataset):
offset = series_slice.ImagePositionPatient
spacing = series_slice.PixelSpacing
contour[:, 0] = (contour[:, 0] - offset[0]) / spacing[0]
contour[:, 1] = (contour[:, 1] - + offset[1]) / spacing[1]
return contour
def format_contour_for_dicom(contour, series_slice: Dataset):
# DICOM uses a 1d array of x, y, z coords
z_indicies = np.ones((contour.shape[0], 1)) * series_slice.SliceLocation
contour = np.concatenate((contour, z_indicies), axis = 1)
contour = np.ravel(contour)
contour = contour.tolist()
return contour
def create_series_mask_from_contour_sequence(series_data, contour_sequence: Sequence):
mask = create_empty_series_mask(series_data)
# Iterate through each slice of the series, If it is a part of the contour, add the contour mask
for i, series_slice in enumerate(series_data):
slice_contour_data = get_slice_contour_data(series_slice, contour_sequence)
if len(slice_contour_data):
mask[:, :, i] = get_slice_mask_from_slice_contour_data(series_slice, slice_contour_data)
return mask
def get_slice_contour_data(series_slice: Dataset, contour_sequence: Sequence):
slice_contour_data = []
# Traverse through sequence data and get all contour data pertaining to the given slice
for contour in contour_sequence:
for contour_image in contour.ContourImageSequence:
if contour_image.ReferencedSOPInstanceUID == series_slice.SOPInstanceUID:
slice_contour_data.append(contour.ContourData)
return slice_contour_data
def get_slice_mask_from_slice_contour_data(series_slice: Dataset, slice_contour_data):
slice_mask = create_empty_slice_mask(series_slice)
for contour_coords in slice_contour_data:
fill_mask = get_contour_fill_mask(series_slice, contour_coords)
# Invert values in the region to be filled. This will create holes where needed if contours are stacked on top of each other
slice_mask[fill_mask == 1] = np.invert(slice_mask[fill_mask == 1])
return slice_mask
def get_contour_fill_mask(series_slice: Dataset, contour_coords):
# Format data
reshaped_contour_data = np.reshape(contour_coords, [len(contour_coords) // 3, 3])
translated_contour_data = translate_contour_to_pixel_coordinants(reshaped_contour_data, series_slice)
translated_contour_data = np.around(translated_contour_data)
polygon = [np.array([translated_contour_data[:, :2]], dtype=np.int32)]
# Create mask for the region. Fill with 1 for ROI
fill_mask = create_empty_slice_mask(series_slice).astype(np.uint8)
cv.fillPoly(img=fill_mask, pts=polygon, color=1)
return fill_mask
def create_empty_series_mask(series_data):
ref_dicom_image = series_data[0]
mask_dims = (int(ref_dicom_image.Columns), int(ref_dicom_image.Rows), len(series_data))
mask = np.zeros(mask_dims).astype(bool)
return mask
def create_empty_slice_mask(series_slice):
mask_dims = (int(series_slice.Columns), int(series_slice.Rows))
mask = np.zeros(mask_dims).astype(bool)
return mask
class Hierarchy(IntEnum):
"""
Enum class for what the positions in the OpenCV hierarchy array mean
"""
next_node = 0
previous_node = 1
first_child = 2
parent_node = 3
|
[
"cv2.line",
"numpy.invert",
"numpy.ravel",
"os.walk",
"numpy.zeros",
"numpy.ones",
"cv2.fillPoly",
"numpy.around",
"numpy.array",
"os.path.join",
"numpy.concatenate"
] |
[((823, 849), 'os.walk', 'os.walk', (['dicom_series_path'], {}), '(dicom_series_path)\n', (830, 849), False, 'import os\n'), ((4837, 4882), 'numpy.concatenate', 'np.concatenate', (['(contour, z_indicies)'], {'axis': '(1)'}), '((contour, z_indicies), axis=1)\n', (4851, 4882), True, 'import numpy as np\n'), ((4899, 4916), 'numpy.ravel', 'np.ravel', (['contour'], {}), '(contour)\n', (4907, 4916), True, 'import numpy as np\n'), ((6781, 6815), 'numpy.around', 'np.around', (['translated_contour_data'], {}), '(translated_contour_data)\n', (6790, 6815), True, 'import numpy as np\n'), ((7021, 7069), 'cv2.fillPoly', 'cv.fillPoly', ([], {'img': 'fill_mask', 'pts': 'polygon', 'color': '(1)'}), '(img=fill_mask, pts=polygon, color=1)\n', (7032, 7069), True, 'import cv2 as cv\n'), ((1654, 1671), 'numpy.array', 'np.array', (['contour'], {}), '(contour)\n', (1662, 1671), True, 'import numpy as np\n'), ((3649, 3698), 'cv2.line', 'cv.line', (['mask', 'start', 'end', 'fill_value', 'line_width'], {}), '(mask, start, end, fill_value, line_width)\n', (3656, 3698), True, 'import cv2 as cv\n'), ((4763, 4793), 'numpy.ones', 'np.ones', (['(contour.shape[0], 1)'], {}), '((contour.shape[0], 1))\n', (4770, 4793), True, 'import numpy as np\n'), ((6413, 6450), 'numpy.invert', 'np.invert', (['slice_mask[fill_mask == 1]'], {}), '(slice_mask[fill_mask == 1])\n', (6422, 6450), True, 'import numpy as np\n'), ((6831, 6889), 'numpy.array', 'np.array', (['[translated_contour_data[:, :2]]'], {'dtype': 'np.int32'}), '([translated_contour_data[:, :2]], dtype=np.int32)\n', (6839, 6889), True, 'import numpy as np\n'), ((7276, 7295), 'numpy.zeros', 'np.zeros', (['mask_dims'], {}), '(mask_dims)\n', (7284, 7295), True, 'import numpy as np\n'), ((7449, 7468), 'numpy.zeros', 'np.zeros', (['mask_dims'], {}), '(mask_dims)\n', (7457, 7468), True, 'import numpy as np\n'), ((924, 948), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (936, 948), False, 'import os\n')]
|
import os
import json
import pickle
from datetime import datetime
import torch
import numpy as np
from pathlib import Path
from mushroom_rl.core import Serializable
from mushroom_rl.core.logger import ConsoleLogger
class BenchmarkLogger(ConsoleLogger):
"""
Class to handle all interactions with the log directory.
"""
def __init__(self, log_dir=None, log_id=None, use_timestamp=True):
"""
Constructor.
Args:
log_dir (str, None): path to the log directory, if not specified defaults to ./logs or to
/work/scratch/$USER if the second directory exists;
log_id (str, None): log id, if not specified defaults to: benchmark[_YY-mm-ddTHH:MM:SS.zzz]);
use_timestamp (bool, True): select if a timestamp should be appended to the log id.
"""
self._file_J = 'J.pkl'
self._file_R = 'R.pkl'
self._file_V = 'V.pkl'
self._file_entropy = 'entropy.pkl'
self._file_best_agent = 'best_agent.msh'
self._file_last_agent = 'last_agent.msh'
self._file_env_builder = 'environment_builder.pkl'
self._file_agent_builder = 'agent_builder.pkl'
self._file_config = 'config.json'
self._file_stats = 'stats.json'
self._log_dir = ''
self._log_id = ''
# Set and create log directories
self.set_log_dir(log_dir)
self.set_log_id(log_id, use_timestamp=use_timestamp)
super().__init__(self._log_id, Path(self.get_path()), log_file_name='console')
def set_log_dir(self, log_dir):
if log_dir is None:
default_dir = './logs'
scratch_dir = os.path.join('/work', 'scratch', os.getenv('USER'))
if Path(scratch_dir).is_dir():
log_dir = os.path.join(scratch_dir, 'logs')
else:
log_dir = default_dir
if not os.path.exists(log_dir):
Path(log_dir).mkdir(parents=True, exist_ok=True)
if not os.path.isdir(log_dir):
raise NotADirectoryError("Path to save builders is not valid")
self._log_dir = log_dir
def get_log_dir(self):
return self._log_dir
def set_log_id(self, log_id, use_timestamp=True):
if log_id is None:
log_id = 'benchmark'
if use_timestamp:
log_id += '_{}'.format(datetime.now().strftime('%Y-%m-%d-%H-%M-%S'))
path = os.path.join(self._log_dir, log_id, '')
if not os.path.exists(path):
Path(path).mkdir(parents=True, exist_ok=True)
if not os.path.isdir(path):
raise NotADirectoryError("Path to save builders is not valid")
self._log_id = log_id
def get_log_id(self):
return self._log_id
def get_path(self, filename=''):
return os.path.join(self._log_dir, self._log_id, filename)
def get_figure_path(self, filename='', subfolder=None):
figure_dir = Path(self._log_dir) / self._log_id / 'plots'
if subfolder is not None:
figure_dir = figure_dir / subfolder
if not figure_dir.exists():
figure_dir.mkdir(parents=True, exist_ok=True)
return str(figure_dir / filename)
def save_J(self, J):
self._save_pickle(self.get_path(self._file_J), J)
def load_J(self):
return self._load_pickle(self.get_path(self._file_J))
def save_R(self, R):
self._save_pickle(self.get_path(self._file_R), R)
def load_R(self):
return self._load_pickle(self.get_path(self._file_R))
def save_V(self, V):
self._save_pickle(self.get_path(self._file_V), V)
def load_V(self):
return self._load_pickle(self.get_path(self._file_V))
def save_entropy(self, entropy):
self._save_pickle(self.get_path(self._file_entropy), entropy)
def load_entropy(self):
path = self.get_path(self._file_entropy)
if os.path.exists(path):
return self._load_pickle(path)
else:
return None
def exists_policy_entropy(self):
return Path(self.get_path(self._file_entropy)).exists()
def save_best_agent(self, agent):
agent.save(self.get_path(self._file_best_agent))
def save_last_agent(self, agent):
agent.save(self.get_path(self._file_last_agent))
def load_best_agent(self):
return Serializable.load(self.get_path(self._file_best_agent))
def load_last_agent(self):
return Serializable.load(self.get_path(self._file_last_agent))
def save_environment_builder(self, env_builder):
self._save_pickle(self.get_path(self._file_env_builder), env_builder)
def load_environment_builder(self):
return self._load_pickle(self.get_path(self._file_env_builder))
def save_agent_builder(self, agent_builder):
self._save_pickle(self.get_path(self._file_agent_builder), agent_builder)
def load_agent_builder(self):
return self._load_pickle(self.get_path(self._file_agent_builder))
def save_config(self, config):
self._save_json(self.get_path(self._file_config), config)
def load_config(self):
return self._load_json(self.get_path(self._file_config))
def save_stats(self, stats):
self._save_json(self.get_path(self._file_stats), stats)
def load_stats(self):
return self._load_json(self.get_path(self._file_stats))
def save_figure(self, figure, figname, subfolder=None):
figure.savefig(self.get_figure_path(figname + ".pdf", subfolder))
@staticmethod
def _save_pickle(path, obj):
with Path(path).open('wb') as f:
pickle.dump(obj, f, protocol=pickle.HIGHEST_PROTOCOL)
@staticmethod
def _save_numpy(path, obj):
with Path(path).open('wb') as f:
np.save(f, obj)
@staticmethod
def _save_torch(path, obj):
torch.save(obj, path)
@staticmethod
def _save_json(path, obj):
with Path(path).open('w') as f:
json.dump(obj, f, indent=2)
@staticmethod
def _load_pickle(path):
with Path(path).open('rb') as f:
return pickle.load(f)
@staticmethod
def _load_numpy(path):
with Path(path).open('rb') as f:
return np.load(f)
@staticmethod
def _load_torch(path):
return torch.load(path)
@staticmethod
def _load_json(path):
with Path(path).open('r') as f:
return json.load(f)
@classmethod
def from_path(cls, path):
"""
Method to create a BenchmarkLogger from a path.
"""
path = Path(path)
return cls(path.parent, path.name, False)
|
[
"json.dump",
"pickle.dump",
"numpy.save",
"numpy.load",
"json.load",
"os.path.isdir",
"torch.load",
"os.path.exists",
"datetime.datetime.now",
"torch.save",
"pathlib.Path",
"pickle.load",
"os.path.join",
"os.getenv"
] |
[((2434, 2473), 'os.path.join', 'os.path.join', (['self._log_dir', 'log_id', '""""""'], {}), "(self._log_dir, log_id, '')\n", (2446, 2473), False, 'import os\n'), ((2818, 2869), 'os.path.join', 'os.path.join', (['self._log_dir', 'self._log_id', 'filename'], {}), '(self._log_dir, self._log_id, filename)\n', (2830, 2869), False, 'import os\n'), ((3920, 3940), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (3934, 3940), False, 'import os\n'), ((5874, 5895), 'torch.save', 'torch.save', (['obj', 'path'], {}), '(obj, path)\n', (5884, 5895), False, 'import torch\n'), ((6338, 6354), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (6348, 6354), False, 'import torch\n'), ((6620, 6630), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (6624, 6630), False, 'from pathlib import Path\n'), ((1908, 1931), 'os.path.exists', 'os.path.exists', (['log_dir'], {}), '(log_dir)\n', (1922, 1931), False, 'import os\n'), ((2009, 2031), 'os.path.isdir', 'os.path.isdir', (['log_dir'], {}), '(log_dir)\n', (2022, 2031), False, 'import os\n'), ((2489, 2509), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2503, 2509), False, 'import os\n'), ((2584, 2603), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (2597, 2603), False, 'import os\n'), ((5633, 5686), 'pickle.dump', 'pickle.dump', (['obj', 'f'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(obj, f, protocol=pickle.HIGHEST_PROTOCOL)\n', (5644, 5686), False, 'import pickle\n'), ((5795, 5810), 'numpy.save', 'np.save', (['f', 'obj'], {}), '(f, obj)\n', (5802, 5810), True, 'import numpy as np\n'), ((6002, 6029), 'json.dump', 'json.dump', (['obj', 'f'], {'indent': '(2)'}), '(obj, f, indent=2)\n', (6011, 6029), False, 'import json\n'), ((6137, 6151), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6148, 6151), False, 'import pickle\n'), ((6262, 6272), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (6269, 6272), True, 'import numpy as np\n'), ((6463, 6475), 'json.load', 'json.load', (['f'], {}), '(f)\n', (6472, 6475), False, 'import json\n'), ((1715, 1732), 'os.getenv', 'os.getenv', (['"""USER"""'], {}), "('USER')\n", (1724, 1732), False, 'import os\n'), ((1803, 1836), 'os.path.join', 'os.path.join', (['scratch_dir', '"""logs"""'], {}), "(scratch_dir, 'logs')\n", (1815, 1836), False, 'import os\n'), ((2952, 2971), 'pathlib.Path', 'Path', (['self._log_dir'], {}), '(self._log_dir)\n', (2956, 2971), False, 'from pathlib import Path\n'), ((1749, 1766), 'pathlib.Path', 'Path', (['scratch_dir'], {}), '(scratch_dir)\n', (1753, 1766), False, 'from pathlib import Path\n'), ((1945, 1958), 'pathlib.Path', 'Path', (['log_dir'], {}), '(log_dir)\n', (1949, 1958), False, 'from pathlib import Path\n'), ((2523, 2533), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (2527, 2533), False, 'from pathlib import Path\n'), ((5593, 5603), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (5597, 5603), False, 'from pathlib import Path\n'), ((5755, 5765), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (5759, 5765), False, 'from pathlib import Path\n'), ((5963, 5973), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (5967, 5973), False, 'from pathlib import Path\n'), ((6090, 6100), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (6094, 6100), False, 'from pathlib import Path\n'), ((6215, 6225), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (6219, 6225), False, 'from pathlib import Path\n'), ((6417, 6427), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (6421, 6427), False, 'from pathlib import Path\n'), ((2373, 2387), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2385, 2387), False, 'from datetime import datetime\n')]
|
import math
import numpy as np
def _is_in_china(func):
def wrapper(cls, lnglat):
if 72.004 < lnglat[0] < 137.8347 and .8293 < lnglat[1] < 55.8271:
return func(cls, lnglat)
return lnglat
return wrapper
class Convert:
_XPI = math.pi * 3000 / 180
_PI = math.pi
_A = 6378245
_EE = .00669342162296594323
_MERCATOR = 20037508.34 / 180
_SIZE = 78271516
@classmethod
def _transform_lng(cls, lng: float, lat: float) -> float:
ret = 300 + lng + 2 * lat + .1 * lng * lng + \
.1 * lng * lat + .1 * math.sqrt(math.fabs(lng))
ret += (20 * math.sin(6.0 * lng * cls._PI) + 20 *
math.sin(2 * lng * cls._PI)) * 2 / 3
ret += (20 * math.sin(lng * cls._PI) + 40 *
math.sin(lng / 3 * cls._PI)) * 2 / 3
ret += (150 * math.sin(lng / 12 * cls._PI) + 300 *
math.sin(lng / 30 * cls._PI)) * 2 / 3
return ret
@classmethod
def _transform_lat(cls, lng: float, lat: float) -> float:
ret = -100 + 2 * lng + 3 * lat + .2 * lat * lat + \
.1 * lng * lat + .2 * math.sqrt(math.fabs(lng))
ret += (20 * math.sin(6.0 * lng * cls._PI) + 20 *
math.sin(2 * lng * cls._PI)) * 2 / 3
ret += (20 * math.sin(lat * cls._PI) + 40 *
math.sin(lat / 3 * cls._PI)) * 2 / 3
ret += (160 * math.sin(lat / 12 * cls._PI) + 320 *
math.sin(lat * cls._PI / 30)) * 2 / 3
return ret
@classmethod
@_is_in_china
def wgs84togcj02(cls, lnglat: list) -> list:
"""
将wgs84坐标系转为火星坐标
:param lnglat: list[float] 经纬度数组
:return: list[float] 经纬度数组
"""
dlng = cls._transform_lng(lnglat[0] - 105, lnglat[1] - 35)
dlat = cls._transform_lat(lnglat[0] - 105, lnglat[1] - 35)
radlat = lnglat[1] / 180 * cls._PI
magic = math.sin(radlat)
magic = 1 - cls._EE * magic * magic
sqrtmagic = math.sqrt(magic)
dlat = (dlat * 180) / ((cls._A * (1 - cls._EE)) / (magic * sqrtmagic) * cls._PI)
dlng = (dlng * 180) / (cls._A / sqrtmagic * math.cos(radlat) * cls._PI)
mglat = lnglat[1] + dlat
mglng = lnglat[0] + dlng
return [mglng, mglat]
@classmethod
@_is_in_china
def wgs84tobd09(cls, lnglat: list) -> list:
"""
将wgs84坐标系转为百度坐标
:param lnglat: list[float] 经纬度数组
:return: list[float] 经纬度数组
"""
lnglat = cls.wgs84togcj02(lnglat)
return cls.gcj02tobd09(lnglat)
@classmethod
@_is_in_china
def gcj02towgs84(cls, lnglat: list) -> list:
"""
将火星坐标系转为wgs84坐标
:param lnglat: list[float] 经纬度数组
:return: list[float] 经纬度数组
"""
dlat = cls._transform_lat(lnglat[0] - 105, lnglat[1] - 35)
dlng = cls._transform_lng(lnglat[0] - 105, lnglat[1] - 35)
radlat = lnglat[1] / 180.0 * cls._PI
magic = math.sin(radlat)
magic = 1 - cls._EE * magic * magic
sqrtmagic = math.sqrt(magic)
dlat = (dlat * 180) / ((cls._A * (1 - cls._EE)) / (magic * sqrtmagic) * cls._PI)
dlng = (dlng * 180) / (cls._A / sqrtmagic * math.cos(radlat) * cls._PI)
mglat = lnglat[1] + dlat
mglng = lnglat[0] + dlng
return [lnglat[0] * 2 - mglng, lnglat[1] * 2 - mglat]
@classmethod
@_is_in_china
def gcj02tobd09(cls, lnglat: list) -> list:
"""
将火星坐标系转为百度坐标
:param lnglat: list[float] 经纬度数组
:return: list[float] 经纬度数组
"""
z = math.sqrt(lnglat[0] * lnglat[0] + lnglat[1] * lnglat[1]) + .00002 * math.sin(lnglat[1] * cls._XPI)
theta = math.atan2(lnglat[1], lnglat[0]) + .000003 * math.cos(lnglat[0] * cls._XPI)
bd_lng = z * math.cos(theta) + .0065
bd_lat = z * math.sin(theta) + .006
return [bd_lng, bd_lat]
@classmethod
@_is_in_china
def bd09towgs84(cls, lnglat: list) -> list:
"""
将百度坐标系转为wgs84坐标
:param lnglat: list[float] 经纬度数组
:return: list[float] 经纬度数组
"""
lnglat = cls.bd09togcj02(lnglat)
return cls.gcj02towgs84(lnglat)
@classmethod
def bd09togcj02(cls, lnglat: list) -> list:
"""
将百度坐标系转为火星坐标
:param lnglat: list[float] 经纬度数组
:return: list[float] 经纬度数组
"""
x = lnglat[0] - .0065
y = lnglat[1] - .006
z = math.sqrt(x * x + y * y) - .00002 * math.sin(y * cls._XPI)
theta = math.atan2(y, x) - .000003 * math.cos(x * cls._XPI)
gcj_lng = z * math.cos(theta)
gcj_lat = z * math.sin(theta)
return [gcj_lng, gcj_lat]
@classmethod
def lnglat_to_mercator(
cls,
lnglat: list,
reference_position=(0, 0),
convert_rate=(1, 1),
unit='mm'
) -> list:
"""
将经纬度坐标二维展开为平面坐标
:param lnglat: list[float] 经纬度
:param reference_position: list 经纬度参照零点坐标,如城市中心或项目中心
:param convert_rate: list 形变比例
:return: list 展开后的二纬坐标
"""
x = lnglat[0] - reference_position[0]
y = lnglat[1] - reference_position[1]
x = x * cls._MERCATOR
y = math.log(math.tan((90 + y) * cls._PI / 360)) / (cls._PI / 180)
y = y * cls._MERCATOR
if unit == 'mm':
x *= 1000
y *= 1000
return [x * convert_rate[0], y * convert_rate[1]]
@classmethod
def mercator_to_lnglat(
cls,
mercator,
reference_position=(0, 0),
convert_rate=(1, 1)
) -> list:
"""
将平面座标回经纬度坐标
:param mercator: list[float] 墨卡托 xy 坐标
:param reference_position: list 经纬度参照零点坐标,如城市中心或项目中心
:param convert_rate: list 形变比例
:return: list 回归后的经纬度
"""
x, y = mercator[0] / convert_rate[0], mercator[1] / convert_rate[1]
x, y = x / cls._MERCATOR, y / cls._MERCATOR
y = 180 / cls._PI * (2 * math.atan(math.exp(y * cls._PI / 180)) - cls._PI / 2)
x += reference_position[0]
y += reference_position[1]
return [x, y]
@classmethod
def lnglat_to_tile_index(cls, lnglat: list, level: int) -> list:
n = 2 ** level
x = int((lnglat[0] + 180.0) / 360.0 * n)
lat_rad = math.radians(lnglat[1])
y = int((1.0 - math.asinh(math.tan(lat_rad)) / cls._PI) / 2.0 * n)
return [x, y, level]
@staticmethod
def tile_index_to_lnglat(tiles) -> list:
n = 2 ** tiles[2]
lng = tiles[0] / n * 360.0 - 180.0
lat_rad = math.atan(math.sinh(math.pi * (1 - 2 * tiles[1] / n)))
lat = math.degrees(lat_rad)
return [lng, lat]
@classmethod
def tile_size_by_zoom(cls, level: int, unit='mm'):
"""
得到某等级下每片瓦片的标准大小
:param level:
:return:
"""
a = cls._SIZE * 2 ** (- level - 1)
return a * 1000 if unit == 'mm' else a
@staticmethod
def rgb_to_hex(rgb: tuple) -> str:
return '#%02x%02x%02x' % tuple(rgb)
@staticmethod
def hex_to_rgb(hex: str) -> tuple:
return tuple(int(hex[i:i+2], 16) for i in (0, 2, 4))
@staticmethod
def to_list(location: str) -> tuple:
"""
将字符格式的经纬坐标转为数字列表格式的经纬坐标,用以计算
:param location: str 如'123.456, 123.456'
:return: tuple 如(123.456, 123.456)
"""
# 预设location为'123.456, 123.456'
return list(eval(location))
@staticmethod
def to_string(location: tuple) -> str:
"""
将数字列表格式的经纬坐标转为字符格式的经纬坐标,用以请求
:param location: list 如[123.456, 123.456]
:return: str 如'123.456, 123.456'
"""
# 预设location为[123.456, 123.456]
# 输出 '123.456, 123.456'
return ','.join(list(map(str, location)))
@staticmethod
def stringtolist(string, reverse=False):
"""
string = "113.52546031343,22.129509715856;113.52673029534,22.12949968767;113.52803031317,22.129279677622;113.52832026393,22.129219617399;113.52899033426,22.12907959059;
113.53028032877,22.128819536949;113.53039032742,22.128789572229;113.5322202692,22.12864953033;113.53390023979,22.128729548104;113.53566024254,22.128759520287;113.5
3599023855,22.128759507971;113.53607024919,22.128759566279;113.53644027672,22.128759511018;113.53818016921,22.128749559991;113.53828022438,22.128749595569;113.5385
101591,22.12874961982;113.53944022455,22.128739604046"
"""
ls = string.split(';')
c = list(map(eval, ls))
d = np.array(c)
if reverse:
d = np.flip(d, axis=1)
return d.tolist()
|
[
"math.exp",
"numpy.flip",
"math.sqrt",
"math.atan2",
"math.radians",
"math.tan",
"math.fabs",
"math.sin",
"numpy.array",
"math.cos",
"math.sinh",
"math.degrees"
] |
[((1975, 1991), 'math.sin', 'math.sin', (['radlat'], {}), '(radlat)\n', (1983, 1991), False, 'import math\n'), ((2056, 2072), 'math.sqrt', 'math.sqrt', (['magic'], {}), '(magic)\n', (2065, 2072), False, 'import math\n'), ((3034, 3050), 'math.sin', 'math.sin', (['radlat'], {}), '(radlat)\n', (3042, 3050), False, 'import math\n'), ((3115, 3131), 'math.sqrt', 'math.sqrt', (['magic'], {}), '(magic)\n', (3124, 3131), False, 'import math\n'), ((6360, 6383), 'math.radians', 'math.radians', (['lnglat[1]'], {}), '(lnglat[1])\n', (6372, 6383), False, 'import math\n'), ((6708, 6729), 'math.degrees', 'math.degrees', (['lat_rad'], {}), '(lat_rad)\n', (6720, 6729), False, 'import math\n'), ((8600, 8611), 'numpy.array', 'np.array', (['c'], {}), '(c)\n', (8608, 8611), True, 'import numpy as np\n'), ((3648, 3704), 'math.sqrt', 'math.sqrt', (['(lnglat[0] * lnglat[0] + lnglat[1] * lnglat[1])'], {}), '(lnglat[0] * lnglat[0] + lnglat[1] * lnglat[1])\n', (3657, 3704), False, 'import math\n'), ((3763, 3795), 'math.atan2', 'math.atan2', (['lnglat[1]', 'lnglat[0]'], {}), '(lnglat[1], lnglat[0])\n', (3773, 3795), False, 'import math\n'), ((4507, 4531), 'math.sqrt', 'math.sqrt', (['(x * x + y * y)'], {}), '(x * x + y * y)\n', (4516, 4531), False, 'import math\n'), ((4582, 4598), 'math.atan2', 'math.atan2', (['y', 'x'], {}), '(y, x)\n', (4592, 4598), False, 'import math\n'), ((4656, 4671), 'math.cos', 'math.cos', (['theta'], {}), '(theta)\n', (4664, 4671), False, 'import math\n'), ((4694, 4709), 'math.sin', 'math.sin', (['theta'], {}), '(theta)\n', (4702, 4709), False, 'import math\n'), ((6649, 6692), 'math.sinh', 'math.sinh', (['(math.pi * (1 - 2 * tiles[1] / n))'], {}), '(math.pi * (1 - 2 * tiles[1] / n))\n', (6658, 6692), False, 'import math\n'), ((8649, 8667), 'numpy.flip', 'np.flip', (['d'], {'axis': '(1)'}), '(d, axis=1)\n', (8656, 8667), True, 'import numpy as np\n'), ((3716, 3746), 'math.sin', 'math.sin', (['(lnglat[1] * cls._XPI)'], {}), '(lnglat[1] * cls._XPI)\n', (3724, 3746), False, 'import math\n'), ((3808, 3838), 'math.cos', 'math.cos', (['(lnglat[0] * cls._XPI)'], {}), '(lnglat[0] * cls._XPI)\n', (3816, 3838), False, 'import math\n'), ((3860, 3875), 'math.cos', 'math.cos', (['theta'], {}), '(theta)\n', (3868, 3875), False, 'import math\n'), ((3905, 3920), 'math.sin', 'math.sin', (['theta'], {}), '(theta)\n', (3913, 3920), False, 'import math\n'), ((4543, 4565), 'math.sin', 'math.sin', (['(y * cls._XPI)'], {}), '(y * cls._XPI)\n', (4551, 4565), False, 'import math\n'), ((4611, 4633), 'math.cos', 'math.cos', (['(x * cls._XPI)'], {}), '(x * cls._XPI)\n', (4619, 4633), False, 'import math\n'), ((5285, 5319), 'math.tan', 'math.tan', (['((90 + y) * cls._PI / 360)'], {}), '((90 + y) * cls._PI / 360)\n', (5293, 5319), False, 'import math\n'), ((597, 611), 'math.fabs', 'math.fabs', (['lng'], {}), '(lng)\n', (606, 611), False, 'import math\n'), ((1181, 1195), 'math.fabs', 'math.fabs', (['lng'], {}), '(lng)\n', (1190, 1195), False, 'import math\n'), ((2215, 2231), 'math.cos', 'math.cos', (['radlat'], {}), '(radlat)\n', (2223, 2231), False, 'import math\n'), ((3274, 3290), 'math.cos', 'math.cos', (['radlat'], {}), '(radlat)\n', (3282, 3290), False, 'import math\n'), ((638, 667), 'math.sin', 'math.sin', (['(6.0 * lng * cls._PI)'], {}), '(6.0 * lng * cls._PI)\n', (646, 667), False, 'import math\n'), ((695, 722), 'math.sin', 'math.sin', (['(2 * lng * cls._PI)'], {}), '(2 * lng * cls._PI)\n', (703, 722), False, 'import math\n'), ((757, 780), 'math.sin', 'math.sin', (['(lng * cls._PI)'], {}), '(lng * cls._PI)\n', (765, 780), False, 'import math\n'), ((808, 835), 'math.sin', 'math.sin', (['(lng / 3 * cls._PI)'], {}), '(lng / 3 * cls._PI)\n', (816, 835), False, 'import math\n'), ((871, 899), 'math.sin', 'math.sin', (['(lng / 12 * cls._PI)'], {}), '(lng / 12 * cls._PI)\n', (879, 899), False, 'import math\n'), ((928, 956), 'math.sin', 'math.sin', (['(lng / 30 * cls._PI)'], {}), '(lng / 30 * cls._PI)\n', (936, 956), False, 'import math\n'), ((1222, 1251), 'math.sin', 'math.sin', (['(6.0 * lng * cls._PI)'], {}), '(6.0 * lng * cls._PI)\n', (1230, 1251), False, 'import math\n'), ((1279, 1306), 'math.sin', 'math.sin', (['(2 * lng * cls._PI)'], {}), '(2 * lng * cls._PI)\n', (1287, 1306), False, 'import math\n'), ((1341, 1364), 'math.sin', 'math.sin', (['(lat * cls._PI)'], {}), '(lat * cls._PI)\n', (1349, 1364), False, 'import math\n'), ((1392, 1419), 'math.sin', 'math.sin', (['(lat / 3 * cls._PI)'], {}), '(lat / 3 * cls._PI)\n', (1400, 1419), False, 'import math\n'), ((1455, 1483), 'math.sin', 'math.sin', (['(lat / 12 * cls._PI)'], {}), '(lat / 12 * cls._PI)\n', (1463, 1483), False, 'import math\n'), ((1512, 1540), 'math.sin', 'math.sin', (['(lat * cls._PI / 30)'], {}), '(lat * cls._PI / 30)\n', (1520, 1540), False, 'import math\n'), ((6046, 6073), 'math.exp', 'math.exp', (['(y * cls._PI / 180)'], {}), '(y * cls._PI / 180)\n', (6054, 6073), False, 'import math\n'), ((6418, 6435), 'math.tan', 'math.tan', (['lat_rad'], {}), '(lat_rad)\n', (6426, 6435), False, 'import math\n')]
|
"""
Some codes from https://github.com/Newmu/dcgan_code
"""
from __future__ import division
import math
import json
import random
import pprint
import scipy.misc
import numpy as np
import os
from time import gmtime, strftime
#pp = pprint.PrettyPrinter()
#get_stddev = lambda x, k_h, k_w: 1/math.sqrt(k_w*k_h*x.get_shape()[-1])
def load_data(image_path, flip=False, is_test=False, image_size = 128):
img = load_image(image_path)
img = preprocess_img(img, img_size=image_size, flip=flip, is_test=is_test)
img = img/127.5 - 1.
if len(img.shape)<3:
img = np.expand_dims(img, axis=2)
return img
def load_image(image_path):
img = imread(image_path)
return img
def preprocess_img(img, img_size=128, flip=False, is_test=False):
img = scipy.misc.imresize(img, [img_size, img_size])
if (not is_test) and flip and np.random.random() > 0.5:
img = np.fliplr(img)
return img
def get_image(image_path, image_size, is_crop=True, resize_w=64, is_grayscale = False):
return transform(imread(image_path, is_grayscale), image_size, is_crop, resize_w)
def save_images(images, size, image_path):
dir = os.path.dirname(image_path)
if not os.path.exists(dir):
os.makedirs(dir)
return imsave(inverse_transform(images), size, image_path)
def imread(path, is_grayscale = False):
if (is_grayscale):
return scipy.misc.imread(path, flatten = True)#.astype(np.float)
else:
return scipy.misc.imread(path)#.astype(np.float)
def merge_images(images, size):
return inverse_transform(images)
def merge(images, size):
h, w = images.shape[1], images.shape[2]
if len(images.shape) < 4:
img = np.zeros((h * size[0], w * size[1], 1))
images = np.expand_dims(images, axis = 3)
else:
img = np.zeros((h * size[0], w * size[1], images.shape[3]))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j*h:j*h+h, i*w:i*w+w, :] = image
if images.shape[3] ==1:
return np.concatenate([img,img,img],axis=2)
else:
return img.astype(np.uint8)
def imsave(images, size, path):
return scipy.misc.imsave(path, merge(images, size))
def transform(image, npx=64, is_crop=True, resize_w=64):
# npx : # of pixels width/height of image
if is_crop:
cropped_image = center_crop(image, npx, resize_w=resize_w)
else:
cropped_image = image
return np.array(cropped_image)/127.5 - 1.
def inverse_transform(images):
return ((images+1.)*127.5)
|
[
"os.makedirs",
"os.path.dirname",
"os.path.exists",
"numpy.expand_dims",
"numpy.zeros",
"numpy.fliplr",
"numpy.random.random",
"numpy.array",
"numpy.concatenate"
] |
[((1157, 1184), 'os.path.dirname', 'os.path.dirname', (['image_path'], {}), '(image_path)\n', (1172, 1184), False, 'import os\n'), ((584, 611), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(2)'}), '(img, axis=2)\n', (598, 611), True, 'import numpy as np\n'), ((898, 912), 'numpy.fliplr', 'np.fliplr', (['img'], {}), '(img)\n', (907, 912), True, 'import numpy as np\n'), ((1196, 1215), 'os.path.exists', 'os.path.exists', (['dir'], {}), '(dir)\n', (1210, 1215), False, 'import os\n'), ((1225, 1241), 'os.makedirs', 'os.makedirs', (['dir'], {}), '(dir)\n', (1236, 1241), False, 'import os\n'), ((1693, 1732), 'numpy.zeros', 'np.zeros', (['(h * size[0], w * size[1], 1)'], {}), '((h * size[0], w * size[1], 1))\n', (1701, 1732), True, 'import numpy as np\n'), ((1750, 1780), 'numpy.expand_dims', 'np.expand_dims', (['images'], {'axis': '(3)'}), '(images, axis=3)\n', (1764, 1780), True, 'import numpy as np\n'), ((1807, 1860), 'numpy.zeros', 'np.zeros', (['(h * size[0], w * size[1], images.shape[3])'], {}), '((h * size[0], w * size[1], images.shape[3]))\n', (1815, 1860), True, 'import numpy as np\n'), ((2043, 2082), 'numpy.concatenate', 'np.concatenate', (['[img, img, img]'], {'axis': '(2)'}), '([img, img, img], axis=2)\n', (2057, 2082), True, 'import numpy as np\n'), ((858, 876), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (874, 876), True, 'import numpy as np\n'), ((2453, 2476), 'numpy.array', 'np.array', (['cropped_image'], {}), '(cropped_image)\n', (2461, 2476), True, 'import numpy as np\n')]
|
import numpy as np
import yaml, pickle, os, librosa, argparse
from concurrent.futures import ThreadPoolExecutor as PE
from collections import deque
from threading import Thread
from tqdm import tqdm
from Audio import Audio_Prep, Mel_Generate
from yin import pitch_calc
with open('Hyper_Parameters.yaml') as f:
hp_Dict = yaml.load(f, Loader=yaml.Loader)
using_Extension = [x.upper() for x in ['.wav', '.m4a', '.flac']]
def Pitch_Generate(audio):
pitch = pitch_calc(
sig= audio,
sr= hp_Dict['Sound']['Sample_Rate'],
w_len= hp_Dict['Sound']['Frame_Length'],
w_step= hp_Dict['Sound']['Frame_Shift'],
confidence_threshold= hp_Dict['Sound']['Confidence_Threshold'],
gaussian_smoothing_sigma = hp_Dict['Sound']['Gaussian_Smoothing_Sigma']
)
return (pitch - np.min(pitch)) / (np.max(pitch) - np.min(pitch) + 1e-7)
def Pattern_Generate(audio= None, path= None, keyword_Index_Dict= None, top_db= 60, reverse= False, invert= False):
audio = audio if not audio is None else Audio_Prep(path, hp_Dict['Sound']['Sample_Rate'], top_db)
if reverse:
audio = audio[::-1]
if invert:
audio = -audio
mel = Mel_Generate(
audio= audio,
sample_rate= hp_Dict['Sound']['Sample_Rate'],
num_frequency= hp_Dict['Sound']['Spectrogram_Dim'],
num_mel= hp_Dict['Sound']['Mel_Dim'],
window_length= hp_Dict['Sound']['Frame_Length'],
hop_length= hp_Dict['Sound']['Frame_Shift'],
mel_fmin= hp_Dict['Sound']['Mel_F_Min'],
mel_fmax= hp_Dict['Sound']['Mel_F_Max'],
max_abs_value= hp_Dict['Sound']['Max_Abs_Mel']
)
pitch = Pitch_Generate(audio)
singer_ID = None
if not keyword_Index_Dict is None:
for keyword, index in keyword_Index_Dict.items():
if keyword in path:
singer_ID = index
break
if singer_ID is None:
raise ValueError('No keyword in keyword_Index_Dict.')
return audio, mel, pitch, singer_ID
def Pattern_File_Generate(path, keyword_Index_Dict, dataset, file_Prefix='', top_db= 60):
for reverse in [False, True]:
for invert in [False, True]:
sig, mel, pitch, singer_ID = Pattern_Generate(
path= path,
keyword_Index_Dict= keyword_Index_Dict,
top_db= top_db,
reverse= reverse,
invert= invert
)
new_Pattern_Dict = {
'Signal': sig.astype(np.float32),
'Mel': mel.astype(np.float32),
'Pitch': pitch.astype(np.float32),
'Singer_ID': singer_ID,
'Dataset': dataset,
}
pickle_File_Name = '{}.{}{}{}{}.PICKLE'.format(
dataset,
file_Prefix,
os.path.splitext(os.path.basename(path))[0],
'.REV' if reverse else '',
'.INV' if invert else '',
).upper()
with open(os.path.join(hp_Dict['Train']['Train_Pattern']['Path'], pickle_File_Name).replace("\\", "/"), 'wb') as f:
pickle.dump(new_Pattern_Dict, f, protocol=4)
def NUS48E_Info_Load(nus48e_Path, sex_Type):
wav_Path_List = []
singer_Dict = {}
sex_Dict = {
'ADIZ': 'F',
'JLEE': 'M',
'JTAN': 'M',
'KENN': 'M',
'MCUR': 'F',
'MPOL': 'F',
'MPUR': 'F',
'NJAT': 'F',
'PMAR': 'F',
'SAMF': 'M',
'VKOW': 'M',
'ZHIY': 'M',
}
sex_Type = sex_Type.upper()
for root, _, files in os.walk(nus48e_Path):
root = root.replace('\\', '/')
for file in files:
if root.strip().split('/')[-1].upper() != 'sing'.upper():
continue
elif not os.path.splitext(file)[1].upper() in using_Extension:
continue
path = os.path.join(root, file).replace('\\', '/')
singer = root.strip().split('/')[-2]
if sex_Type != 'B' and sex_Dict[singer] != sex_Type:
continue
wav_Path_List.append(path)
singer_Dict[path] = singer
print('NUS-48E info generated: {}'.format(len(wav_Path_List)))
return wav_Path_List, singer_Dict, list(sorted(list(set(singer_Dict.values()))))
def Metadata_Generate(keyword_Index_Dict):
new_Metadata_Dict = {
'Sample_Rate': hp_Dict['Sound']['Sample_Rate'],
'Confidence_Threshold': hp_Dict['Sound']['Confidence_Threshold'],
'Gaussian_Smoothing_Sigma': hp_Dict['Sound']['Gaussian_Smoothing_Sigma'],
'Keyword_Index_Dict': keyword_Index_Dict,
'File_List': [],
'Sig_Length_Dict': {},
'Pitch_Length_Dict': {},
'Singer_Index_Dict': {},
'Dataset_Dict': {},
}
files_TQDM = tqdm(
total= sum([len(files) for root, _, files in os.walk(hp_Dict['Train']['Train_Pattern']['Path'])]),
desc= 'Metadata'
)
for root, _, files in os.walk(hp_Dict['Train']['Train_Pattern']['Path']):
for file in files:
with open(os.path.join(root, file).replace("\\", "/"), "rb") as f:
pattern_Dict = pickle.load(f)
try:
new_Metadata_Dict['Sig_Length_Dict'][file] = pattern_Dict['Signal'].shape[0]
new_Metadata_Dict['Pitch_Length_Dict'][file] = pattern_Dict['Pitch'].shape[0]
new_Metadata_Dict['Singer_Index_Dict'][file] = pattern_Dict['Singer_ID']
new_Metadata_Dict['Dataset_Dict'][file] = pattern_Dict['Dataset']
new_Metadata_Dict['File_List'].append(file)
except:
print('File \'{}\' is not correct pattern file. This file is ignored.'.format(file))
files_TQDM.update(1)
with open(os.path.join(hp_Dict['Train']['Train_Pattern']['Path'], hp_Dict['Train']['Train_Pattern']['Metadata_File'].upper()).replace("\\", "/"), 'wb') as f:
pickle.dump(new_Metadata_Dict, f, protocol=2)
print('Metadata generate done.')
if __name__ == "__main__":
argParser = argparse.ArgumentParser()
argParser.add_argument('-nus48e', '--nus48e_path', required=False)
argParser.add_argument('-sex', '--sex_type', required= False, default= 'B')
args = argParser.parse_args()
if not args.sex_type in ['M', 'F', 'B']:
raise ValueError('Unsupported sex type. Only M, F, or B is supported')
total_Pattern_Count = 0
keyword_Index_Dict = {}
if not args.nus48e_path is None:
nus48e_File_Path_List, nus48e_Singer_Dict, nus48e_Keyword_List = NUS48E_Info_Load(
nus48e_Path= args.nus48e_path,
sex_Type= args.sex_type
)
total_Pattern_Count += len(nus48e_File_Path_List)
for index, keyword in enumerate(nus48e_Keyword_List, len(keyword_Index_Dict)):
if keyword in keyword_Index_Dict.keys():
raise ValueError('There is an overlapped keyword: \'{}\'.'.format(keyword))
keyword_Index_Dict[keyword] = index
if total_Pattern_Count == 0:
raise ValueError('Total pattern count is zero.')
os.makedirs(hp_Dict['Train']['Train_Pattern']['Path'], exist_ok= True)
if not args.nus48e_path is None:
for index, file_Path in tqdm(
enumerate(nus48e_File_Path_List),
desc= 'Pattern',
total= len(nus48e_File_Path_List)
):
Pattern_File_Generate(
file_Path,
keyword_Index_Dict,
'NUS48E',
nus48e_Singer_Dict[file_Path],
20
)
Metadata_Generate(keyword_Index_Dict)
|
[
"yaml.load",
"pickle.dump",
"argparse.ArgumentParser",
"os.makedirs",
"os.path.join",
"os.path.basename",
"os.walk",
"Audio.Audio_Prep",
"Audio.Mel_Generate",
"numpy.min",
"numpy.max",
"pickle.load",
"os.path.splitext",
"yin.pitch_calc"
] |
[((326, 358), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.Loader'}), '(f, Loader=yaml.Loader)\n', (335, 358), False, 'import yaml, pickle, os, librosa, argparse\n'), ((465, 749), 'yin.pitch_calc', 'pitch_calc', ([], {'sig': 'audio', 'sr': "hp_Dict['Sound']['Sample_Rate']", 'w_len': "hp_Dict['Sound']['Frame_Length']", 'w_step': "hp_Dict['Sound']['Frame_Shift']", 'confidence_threshold': "hp_Dict['Sound']['Confidence_Threshold']", 'gaussian_smoothing_sigma': "hp_Dict['Sound']['Gaussian_Smoothing_Sigma']"}), "(sig=audio, sr=hp_Dict['Sound']['Sample_Rate'], w_len=hp_Dict[\n 'Sound']['Frame_Length'], w_step=hp_Dict['Sound']['Frame_Shift'],\n confidence_threshold=hp_Dict['Sound']['Confidence_Threshold'],\n gaussian_smoothing_sigma=hp_Dict['Sound']['Gaussian_Smoothing_Sigma'])\n", (475, 749), False, 'from yin import pitch_calc\n'), ((1190, 1590), 'Audio.Mel_Generate', 'Mel_Generate', ([], {'audio': 'audio', 'sample_rate': "hp_Dict['Sound']['Sample_Rate']", 'num_frequency': "hp_Dict['Sound']['Spectrogram_Dim']", 'num_mel': "hp_Dict['Sound']['Mel_Dim']", 'window_length': "hp_Dict['Sound']['Frame_Length']", 'hop_length': "hp_Dict['Sound']['Frame_Shift']", 'mel_fmin': "hp_Dict['Sound']['Mel_F_Min']", 'mel_fmax': "hp_Dict['Sound']['Mel_F_Max']", 'max_abs_value': "hp_Dict['Sound']['Max_Abs_Mel']"}), "(audio=audio, sample_rate=hp_Dict['Sound']['Sample_Rate'],\n num_frequency=hp_Dict['Sound']['Spectrogram_Dim'], num_mel=hp_Dict[\n 'Sound']['Mel_Dim'], window_length=hp_Dict['Sound']['Frame_Length'],\n hop_length=hp_Dict['Sound']['Frame_Shift'], mel_fmin=hp_Dict['Sound'][\n 'Mel_F_Min'], mel_fmax=hp_Dict['Sound']['Mel_F_Max'], max_abs_value=\n hp_Dict['Sound']['Max_Abs_Mel'])\n", (1202, 1590), False, 'from Audio import Audio_Prep, Mel_Generate\n'), ((3652, 3672), 'os.walk', 'os.walk', (['nus48e_Path'], {}), '(nus48e_Path)\n', (3659, 3672), False, 'import yaml, pickle, os, librosa, argparse\n'), ((5056, 5106), 'os.walk', 'os.walk', (["hp_Dict['Train']['Train_Pattern']['Path']"], {}), "(hp_Dict['Train']['Train_Pattern']['Path'])\n", (5063, 5106), False, 'import yaml, pickle, os, librosa, argparse\n'), ((6201, 6226), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6224, 6226), False, 'import yaml, pickle, os, librosa, argparse\n'), ((7263, 7332), 'os.makedirs', 'os.makedirs', (["hp_Dict['Train']['Train_Pattern']['Path']"], {'exist_ok': '(True)'}), "(hp_Dict['Train']['Train_Pattern']['Path'], exist_ok=True)\n", (7274, 7332), False, 'import yaml, pickle, os, librosa, argparse\n'), ((1039, 1096), 'Audio.Audio_Prep', 'Audio_Prep', (['path', "hp_Dict['Sound']['Sample_Rate']", 'top_db'], {}), "(path, hp_Dict['Sound']['Sample_Rate'], top_db)\n", (1049, 1096), False, 'from Audio import Audio_Prep, Mel_Generate\n'), ((6072, 6117), 'pickle.dump', 'pickle.dump', (['new_Metadata_Dict', 'f'], {'protocol': '(2)'}), '(new_Metadata_Dict, f, protocol=2)\n', (6083, 6117), False, 'import yaml, pickle, os, librosa, argparse\n'), ((822, 835), 'numpy.min', 'np.min', (['pitch'], {}), '(pitch)\n', (828, 835), True, 'import numpy as np\n'), ((840, 853), 'numpy.max', 'np.max', (['pitch'], {}), '(pitch)\n', (846, 853), True, 'import numpy as np\n'), ((856, 869), 'numpy.min', 'np.min', (['pitch'], {}), '(pitch)\n', (862, 869), True, 'import numpy as np\n'), ((3177, 3221), 'pickle.dump', 'pickle.dump', (['new_Pattern_Dict', 'f'], {'protocol': '(4)'}), '(new_Pattern_Dict, f, protocol=4)\n', (3188, 3221), False, 'import yaml, pickle, os, librosa, argparse\n'), ((5245, 5259), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5256, 5259), False, 'import yaml, pickle, os, librosa, argparse\n'), ((3954, 3978), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (3966, 3978), False, 'import yaml, pickle, os, librosa, argparse\n'), ((4940, 4990), 'os.walk', 'os.walk', (["hp_Dict['Train']['Train_Pattern']['Path']"], {}), "(hp_Dict['Train']['Train_Pattern']['Path'])\n", (4947, 4990), False, 'import yaml, pickle, os, librosa, argparse\n'), ((3055, 3128), 'os.path.join', 'os.path.join', (["hp_Dict['Train']['Train_Pattern']['Path']", 'pickle_File_Name'], {}), "(hp_Dict['Train']['Train_Pattern']['Path'], pickle_File_Name)\n", (3067, 3128), False, 'import yaml, pickle, os, librosa, argparse\n'), ((5157, 5181), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (5169, 5181), False, 'import yaml, pickle, os, librosa, argparse\n'), ((2893, 2915), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (2909, 2915), False, 'import yaml, pickle, os, librosa, argparse\n'), ((3856, 3878), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (3872, 3878), False, 'import yaml, pickle, os, librosa, argparse\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
assemble.py
This module finds and forms essential structure components, which are the
smallest building blocks that form every repeat in the song.
These functions ensure that each time step of a song is contained in at most
one of the song's essential structure components by checking that there are no
overlapping repeats in time. When repeats overlap, they undergo a process
where they are divided until there are only non-overlapping pieces left.
The module contains the following functions:
* breakup_overlaps_by_intersect
Extracts repeats in input_pattern_obj that has the starting indices
of the repeats, into the essential structure components using bw_vec,
that has the lengths of each repeat.
* check_overlaps
Compares every pair of groups, determining if there are any repeats
in any pairs of the groups that overlap.
* __compare_and_cut
Compares two rows of repeats labeled RED and BLUE, and determines if
there are any overlaps in time between them. If there are overlaps,
we cut the repeats in RED and BLUE into up to 3 pieces.
* __num_of_parts
Determines the number of blocks of consecutive time steps in a list
of time steps. A block of consecutive time steps represents a
distilled section of a repeat.
* __inds_to_rows
Expands a vector containing the starting indices of a piece or two
of a repeat into a matrix representation recording when these pieces
occur in the song with 1's. All remaining entries are marked with
0's.
* __merge_based_on_length
Merges repeats that are the same length, as set by full_bandwidth,
and are repeats of the same piece of structure.
* __merge_rows
Merges rows that have at least one common repeat. These common
repeat(s) must occur at the same time step and be of a common length.
* hierarchical_structure
Distills the repeats encoded in matrix_no_overlaps (and key_no_overlaps)
to the essential structure components and then builds the hierarchical
representation. Optionally outputs visualizations of the hierarchical
representations.
"""
import numpy as np
from inspect import signature
from .search import find_all_repeats, find_complete_list_anno_only
from .utilities import reconstruct_full_block, get_annotation_lst, get_y_labels
from .transform import remove_overlaps
import matplotlib.pyplot as plt
import matplotlib.ticker as plticker
def breakup_overlaps_by_intersect(input_pattern_obj, bw_vec, thresh_bw):
"""
Extracts repeats in input_pattern_obj that has the starting indices of the
repeats, into the essential structure components using bw_vec, that has the
lengths of each repeat. The essential structure components are the
smallest building blocks that form every repeat in the song.
Args
----
input_pattern_obj : np.ndarray
Binary matrix with 1's where repeats begin
and 0's otherwise.
bw_vec : np.ndarray
Vector containing the lengths of the repeats
encoded in input_pattern_obj.
thresh_bw : int
Smallest allowable repeat length.
Returns
-------
pattern_no_overlaps : np.ndrray
Binary matrix with 1's where repeats of
essential structure components begin.
pattern_no_overlaps_key : np.ndarray
Vector containing the lengths of the repeats
of essential structure components in
pattern_no_overlaps.
"""
sig = signature(breakup_overlaps_by_intersect)
params = sig.parameters
if len(params) < 3:
T = 0
else:
T = thresh_bw
if bw_vec.ndim == 1:
# Convert a 1D array into 2D vector
bw_vec = bw_vec[None, :].reshape(-1, 1)
# Initialize input_pattern_obj
pno = input_pattern_obj
# Sort bw_vec and pattern_no_overlaps (pno) so that we process the
# biggest pieces first
# Part 1: Sort the lengths in bw_vec in descending order
desc_bw_vec = np.sort(bw_vec)[::-1] # [::-1] reverses order
# Part 2: Sort the indices of bw_vec in descending order
bw_inds = np.flip(np.argsort(bw_vec, axis=0))
row_bw_inds = np.transpose(bw_inds).flatten()
pno = pno[row_bw_inds, :]
T_inds = np.nonzero(bw_vec == T)
T_inds = np.array(T_inds) - 1
if T_inds.size == 0:
T_inds = max(bw_vec.shape)
pno_block = reconstruct_full_block(pno, desc_bw_vec)
# Check stopping condition -- Are there overlaps?
while np.sum(np.sum(pno_block[:T_inds, :], axis=0) > 1) > 0:
# Find all overlaps by comparing the rows of repeats pairwise
overlaps_pno_block = check_overlaps(pno_block)
# Remove the rows with bandwidth T or less from consideration
overlaps_pno_block[T_inds:, ] = 0
overlaps_pno_block[:, T_inds:] = 0
# Find the first two groups of repeats that overlap, calling one group
# RED and the other group BLUE
[ri, bi] = overlaps_pno_block.nonzero()
ri = ri[0]
bi = bi[0]
# RED overlap
red = pno[ri, :]
RL = desc_bw_vec[ri, :]
# BLUE overlap
blue = pno[bi, :]
BL = desc_bw_vec[bi, :]
# Compare the repeats in RED and BLUE, cutting the repeats in those
# groups into non-overlapping pieces
union_mat, union_length = __compare_and_cut(red, RL, blue, BL)
pno = np.delete(pno, [ri, bi], axis=0)
bw_vec = np.delete(desc_bw_vec, [ri, bi], axis=0)
# Stack the new repeats
if union_mat.size != 0:
pno = np.vstack((pno, union_mat))
bw_vec = np.vstack((bw_vec, union_length))
# Check there are any repeats of length 1 that should be merged into
# other groups of repeats of length 1 and merge them if necessary
if sum(union_length == 1) > 0:
pno, bw_vec = __merge_based_on_length(pno, bw_vec, 1)
# AGAIN, Sort bw_vec and pno so that we process the biggest
# pieces first
# Part 1: Sort the lengths in bw_vec and indices in descending order
desc_bw_vec = np.sort(bw_vec, axis=0)[::-1]
bw_inds = np.flip(np.argsort(bw_vec, axis=0))
row_bw_inds = np.transpose(bw_inds).flatten()
pno = pno[row_bw_inds, :]
# Find the first row that contains repeats of length less than T and
# remove these rows from consideration during the next check of the
# stopping condition
T_inds = np.amin(desc_bw_vec == T) - 1
if T_inds < 0:
T_inds = np.array([])
else:
T_inds = np.array(T_inds) # T_inds is converted into an array
if T_inds.size == 0:
T_inds = max(desc_bw_vec.shape)
pno_block = reconstruct_full_block(pno, desc_bw_vec)
# Sort the lengths in bw_vec in ascending order
bw_vec = np.sort(desc_bw_vec, axis=0)
# Sort the indices of bw_vec in ascending order
bw_inds = np.argsort(desc_bw_vec, axis=0)
pattern_no_overlaps = pno[bw_inds, :].reshape((pno.shape[0], -1))
pattern_no_overlaps_key = bw_vec
output = (pattern_no_overlaps, pattern_no_overlaps_key)
return output
def check_overlaps(input_mat):
"""
Compares every pair of groups and determines if there are any repeats in
any pairs of the groups that overlap.
Args
----
input_mat : np.array[int]
Matrix to be checked for overlaps.
Returns
-------
overlaps_yn : np.array[bool]
Logical array where (i,j) = 1 if row i of input matrix and row j
of input matrix overlap and (i,j) = 0 elsewhere.
"""
# Get the number of rows and columns
rs = input_mat.shape[0]
ws = input_mat.shape[1]
# compare_left -- Every row of input_mat is repeated rs times to create
# a sub-matrix. We stack these sub-matrices on top of each other.
compare_left = np.zeros(((rs * rs), ws))
for i in range(rs):
compare_add = input_mat[i, :]
compare_add_mat = np.tile(compare_add, (rs, 1))
a = i * rs
b = (i + 1) * rs
compare_left[a:b, :] = compare_add_mat
# compare_right -- Stack rs copies of input_mat on top of itself
compare_right = np.tile(input_mat, (rs, 1))
# If input_mat is not binary, create binary temporary objects
compare_left = compare_left > 0
compare_right = compare_right > 0
# Empty matrix to store overlaps
compare_all = np.zeros((compare_left.shape[0], 1))
# For each row
for i in range(compare_left.shape[0]):
# Create new counter
num_overlaps = 0
for j in range(compare_left.shape[1]):
if compare_left[i, j] == 1 and compare_right[i, j] == 1:
# inc count
num_overlaps = num_overlaps + 1
# Append num_overlaps to matrix
compare_all[i, 0] = num_overlaps
compare_all = compare_all > 0
overlap_mat = np.reshape(compare_all, (rs, rs))
# If overlap_mat is symmetric, only keep the upper-triangular portion.
# If not, keep all of overlap_mat.
check_mat = np.allclose(overlap_mat, overlap_mat.T)
if check_mat:
overlap_mat = np.triu(overlap_mat, 1)
overlaps_yn = overlap_mat
return overlaps_yn
def __compare_and_cut(red, red_len, blue, blue_len):
"""
Compares two rows of repeats labeled RED and BLUE, and determines if there
are any overlaps in time between them. If there is, then we cut the
repeats in RED and BLUE into up to 3 pieces.
Args
----
red : np.ndarray
Binary row vector encoding a set of repeats with 1's where each
repeat starts and 0's otherwise.
red_len : int
Length of repeats encoded in red.
blue : np.ndarray
Binary row vector encoding a set of repeats with 1's where each
repeat starts and 0's otherwise.
blue_len : int
Length of repeats encoded in blue.
Returns
-------
union_mat : np.ndarray
Binary matrix representation of up to three rows encoding
non-overlapping repeats cut from red and blue.
union_length : np.ndarray
Vector containing the lengths of the repeats encoded in union_mat.
"""
# Find the total time steps in red
sn = red.shape[0]
assert sn == blue.shape[0]
# Find all starting indices in red and store them as a 2d array
start_red = np.flatnonzero(red)
start_red = start_red[None, :]
# Find all starting indices in blue and store them as a 2d array
start_blue = np.flatnonzero(blue)
start_blue = start_blue[None, :]
# Determine if the rows have any intersections
red_block = reconstruct_full_block(red, red_len)
blue_block = reconstruct_full_block(blue, blue_len)
# Find the intersection of red and blue
red_block = red_block > 0
blue_block = blue_block > 0
purple_block = np.logical_and(red_block, blue_block)
# If there is any intersection between the rows, then start comparing one
# repeat in red to one repeat in blue
if purple_block.sum() > 0:
# Find the number of blocks in red and in blue
lsr = max(start_red.shape)
lsb = max(start_blue.shape)
# Build the pairs of starting indices to search, where each pair
# contains a starting index in red and a starting index in blue
red_inds = np.tile(start_red.transpose(), (lsb, 1))
blue_inds = np.tile(start_blue, (lsr, 1))
tem_blue = blue_inds[0][0]
for i in range(0, blue_inds.shape[1]):
for j in range(0, blue_inds.shape[0]):
tem_blue = np.vstack((tem_blue, blue_inds[j][i]))
tem_blue = np.delete(tem_blue, 1, 0)
compare_inds = np.concatenate((tem_blue, red_inds), axis=1)
# Initialize the output variables union_mat and union_length
union_mat = np.array([])
union_length = np.array([])
# Loop over all pairs of starting indices
for start_ind in range(0, lsr * lsb):
# Isolate one repeat in red and one repeat in blue
ri = compare_inds[start_ind, 1]
bi = compare_inds[start_ind, 0]
red_ri = np.arange(ri, ri + red_len)
blue_bi = np.arange(bi, bi + blue_len)
# Determine if the blocks intersect and call the intersection
# purple
purple = np.intersect1d(red_ri, blue_bi)
if purple.size != 0:
# Remove purple from red_ri, call it red_minus_purple
red_minus_purple = np.setdiff1d(red_ri, purple)
# If red_minus_purple is not empty, then see if there are one
# or two parts in red_minus_purple.
# Then cut purple out of all of the repeats in red.
if red_minus_purple.size != 0:
# red_length_vec will have the length(s) of the parts in
# new_red
red_start_mat, red_length_vec = __num_of_parts(
red_minus_purple, ri, start_red
)
# If there are two parts left in red_minus_purple, then
# the new variable new_red, which holds the part(s) of
# red_minus_purple, should have two rows with 1's for the
# starting indices of the resulting pieces and 0's
# elsewhere.
new_red = __inds_to_rows(red_start_mat, sn)
else:
# If red_minus_purple is empty, then set new_red and
# red_length_vec to empty
new_red = np.array([])
red_length_vec = np.array([])
# Noting that purple is only one part and in both red_ri and
# blue_bi, then we need to find where the purple starting
# indices are in all the red_ri
purple_in_red_mat, purple_length_vec = __num_of_parts(
purple, ri, start_red
)
blue_minus_purple = np.setdiff1d(blue_bi, purple)
# If blue_minus_purple is not empty, then see if there are one
# or two parts in blue_minus_purple. Then cut purple out of
# all of the repeats in blue.
if blue_minus_purple.size != 0:
blue_start_mat, blue_length_vec = __num_of_parts(
blue_minus_purple, bi, start_blue
)
new_blue = __inds_to_rows(blue_start_mat, sn)
# If there are two parts left in blue_minus_purple, then the
# new variable new_blue, which holds the part(s) of
# blue_minus_purple, should have two rows with 1's for the
# starting indices of the resulting pieces and 0's elsewhere.
else:
# If blue_minus_purple is empty, then set new_blue and
# blue_length_vec to empty
new_blue = np.array([])
# Also blue_length_vec will have the length(s) of the
# parts in new_blue.
blue_length_vec = np.array([])
# Recalling that purple is only one part and in both red_rd
# and blue_bi, then we need to find where the purple starting
# indices are in all the blue_ri
purple_in_blue_mat, purple_length = __num_of_parts(
purple, bi, start_blue
)
# Union purple_in_red_mat and purple_in_blue_mat to get
# purple_start, which stores all the purple indices
purple_start = np.union1d(purple_in_red_mat[0],
purple_in_blue_mat[0])
# Use purple_start to get new_purple with 1's where the repeats
# in the purple rows start and 0 otherwise.
new_purple = __inds_to_rows(purple_start, sn)
if new_red.size != 0 or new_blue.size != 0:
# Form the outputs
# Use the condition check to avoid errors when stacking
# an empty array
if new_red.size != 0 and new_blue.size == 0:
union_mat = np.vstack((new_red, new_purple))
union_length = np.vstack((red_length_vec,
purple_length))
elif new_red.size == 0 and new_blue.size != 0:
union_mat = np.vstack((new_blue, new_purple))
union_length = np.vstack((blue_length_vec,
purple_length))
else:
union_mat = np.vstack((new_red, new_blue, new_purple))
union_length = np.vstack(
(red_length_vec, blue_length_vec, purple_length)
)
# Merge repeats that are the same length
union_mat, union_length = __merge_based_on_length(
union_mat, union_length, union_length
)
# When we find union_mat and union_length in this group,
# we break out of the for loop to add them to our final
# output
break
elif new_red.size == 0 and new_blue.size == 0:
new_purple_block = reconstruct_full_block(
new_purple, np.array([purple_length])
)
# Only add the new repeat which has no overlaps
if max(new_purple_block[0]) < 2:
union_mat = new_purple
union_length = np.array([purple_length])
break
# Check that there are no overlaps in each row of union_mat
union_mat_add = np.empty((0, sn), int)
union_mat_add_length = np.empty((0, 1), int)
union_mat_rminds = np.empty((0, 1), int)
# Isolate one row at a time, call it union_row
for i in range(0, union_mat.shape[0]):
union_row = union_mat[i, :]
union_row_width = np.array([union_length[i]])
union_row_block = reconstruct_full_block(union_row, union_row_width)
# If there is at least one overlap, then compare and cut that row
# until there are no overlaps
if (np.sum(union_row_block[0] > 1)) > 0:
union_mat_rminds = np.vstack((union_mat_rminds, i))
union_row_new, union_row_new_length = __compare_and_cut(
union_row, union_row_width, union_row, union_row_width
)
# Add union_row_new and union_row_new_length to union_mat_add and
# union_mat_add_length, respectively
union_mat_add = np.vstack((union_mat_add, union_row_new))
union_mat_add_length = np.vstack(
(union_mat_add_length, union_row_new_length)
)
# Remove the old rows from union_mat (as well as the old lengths from
# union_length)
if union_mat_rminds.size != 0:
union_mat = np.delete(union_mat, union_mat_rminds, axis=0)
union_length = np.delete(union_length, union_mat_rminds)
# Add union_row_new and union_row_new_length to union_mat and
# union_length, respectively, such that union_mat is in order by
# lengths in union_length
if union_mat_add.size != 0:
union_mat = np.vstack((union_mat, union_mat_add))
if union_mat_add_length.size != 0:
union_length = np.vstack((np.array([union_length]).T,
union_mat_add_length))
# Make sure union_length is a 2d vector
if union_length.ndim == 1:
union_length = np.array([union_length]).T
if union_mat.size != 0:
total_array = np.hstack((union_mat, union_length))
# Sort the total_array and form the final output
total_array = total_array[np.argsort(total_array[:, -1])]
union_mat = total_array[:, 0:sn]
union_length = np.array([total_array[:, -1]]).T
output = (union_mat, union_length)
return output
def __num_of_parts(input_vec, input_start, input_all_starts):
"""
Determines the number of blocks of consecutive
time steps in a list of time steps. A block of consecutive time steps
represents a distilled section of a repeat. This distilled section will be
replicated and the starting indices of the repeats within it will be
returned.
Args
----
input_vec : np.ndarray
Vector that contains one or two parts of a repeat that are
overlap(s) in time that may need to be replicated
input_start : np.ndarray
Starting index for the part to be replicated.
input_all_starts : np.ndarray
Starting indices for replication.
Returns
-------
start_mat : np.ndarray
Array of one or two rows, containing the starting indices of the
replicated repeats.
length_vec : np.ndarray
Column vector containing the lengths of the replicated parts.
"""
# Determine where input_vec has a break
diff_vec = np.subtract(input_vec[1:], input_vec[:-1])
diff_vec = np.insert(diff_vec, 0, 1)
break_mark = np.where(diff_vec > 1)[0]
# If input_vec is consecutive
if sum(break_mark) == 0:
# Initialize start_vec and end_vec
start_vec = input_vec[0]
end_vec = input_vec[-1]
# Find the difference between the starts
add_vec = start_vec - input_start
# Find the new start of the distilled section
start_mat = input_all_starts + add_vec
# Else if input_vec has a break
else:
# Initialize start_vec and end_vec
start_vec = np.zeros((2, 1))
end_vec = np.zeros((2, 1))
# Find the start and end time step of the first part
start_vec[0] = input_vec[0]
end_vec[0] = input_vec[break_mark - 1]
# Find the start and end time step of the second part
start_vec[1] = input_vec[break_mark]
end_vec[1] = input_vec[-1]
# Find the difference between the starts
add_vec = np.array(start_vec - input_start).astype(int)
# Make sure input_all_starts contains only integers
input_all_starts = np.array(input_all_starts).astype(int)
# Create start_mat with two parts
start_mat = np.vstack(
(input_all_starts + add_vec[0], input_all_starts + add_vec[1])
)
# Get the length of the new repeats
length_vec = (end_vec - start_vec + 1).astype(int)
# Create output
output = (start_mat, length_vec)
return output
def __inds_to_rows(start_mat, row_length):
"""
Expands a vector containing the starting indices of a piece or two of a
repeat into a matrix representation recording when these pieces occur in
the song with 1's. All remaining entries are marked with 0's.
Args
----
start_mat : np.ndarray
Matrix of one or two rows, containing the starting indices.
row_length : int
Length of the rows.
Returns
-------
new_mat : np.ndarray
Matrix of one or two rows, with 1's where the starting indices
and 0's otherwise.
"""
if start_mat.ndim == 1:
# Convert a 1D array into 2D array
start_mat = start_mat[None, :]
# Initialize mat_rows and new_mat
mat_rows = start_mat.shape[0]
new_mat = np.zeros((mat_rows, row_length))
for i in range(0, mat_rows):
inds = start_mat[i, :]
# Let the starting indices be 1
new_mat[i, inds] = 1
return new_mat.astype(int)
def __merge_based_on_length(full_mat, full_bw, target_bw):
"""
Merges repeats that are the same length, as set by full_bandwidth,
and are repeats of the same piece of structure.
Args
----
full_mat : np.ndarray
Binary matrix with ones where repeats start and zeroes otherwise.
full_bw : np.ndarray
Length of repeats encoded in input_mat.
target_bw : np.ndarray
Lengths of repeats that we seek to merge.
Returns
-------
out_mat : np.ndarray
Binary matrix with ones where repeats start and zeros otherwise
with rows of full_mat merged if appropriate.
one_length_vec : np.ndarray
Length of the repeats encoded in out_mat.
"""
# Sort the elements of full_bandwidth
temp_bandwidth = np.sort(full_bw, axis=None)
# Return the indices that would sort full_bandwidth
bnds = np.argsort(full_bw, axis=None)
temp_mat = full_mat[bnds, :]
# Find the unique elements of target_bandwidth
target_bandwidth = np.unique(target_bw)
# Number of columns
target_size = target_bandwidth.shape[0]
for i in range(1, target_size + 1):
test_bandwidth = target_bandwidth[i - 1]
# Check if temp_bandwidth is equal to test_bandwidth
inds = (temp_bandwidth == test_bandwidth)
# If the sum of all inds elements is greater than 1, then execute this
# if statement
if inds.sum() > 1:
# Isolate rows that correspond to test_bandwidth and merge them
merge_bw = temp_mat[inds, :]
merged_mat = __merge_rows(merge_bw, np.array([test_bandwidth]))
# Number of columns
bandwidth_add_size = merged_mat.shape[0]
bandwidth_add = test_bandwidth * np.ones((bandwidth_add_size,
1)).astype(int)
if np.any(inds):
# Convert the boolean array inds into an array of integers
inds = np.array(inds).astype(int)
remove_inds = np.where(inds == 1)
# Delete the rows that meet the condition set by remove_inds
temp_mat = np.delete(temp_mat, remove_inds, axis=0)
temp_bandwidth = np.delete(temp_bandwidth, remove_inds, axis=0)
# Combine rows into a single matrix
temp_mat = np.vstack((temp_mat, merged_mat))
# Indicates temp_bandwidth is an empty array
if temp_bandwidth.size == 0:
temp_bandwidth = np.concatenate(bandwidth_add)
# Indicates temp_bandwidth is not an empty array
elif temp_bandwidth.size > 0:
temp_bandwidth = np.concatenate(
(temp_bandwidth, bandwidth_add.flatten())
)
# Return the indices that would sort temp_bandwidth
bnds = np.argsort(temp_bandwidth)
# Sort the elements of temp_bandwidth
temp_bandwidth = np.sort(temp_bandwidth)
temp_mat = temp_mat[bnds, ]
# Create output
out_mat = temp_mat
out_length_vec = temp_bandwidth
if out_length_vec.size != 1:
out_length_vec = out_length_vec.reshape(-1, 1)
output = (out_mat, out_length_vec)
return output
def __merge_rows(input_mat, input_width):
"""
Merges rows that have at least one common repeat; said common repeat(s)
must occur at the same time step and be of common length.
Args
----
input_mat : np.ndarray
Binary matrix with ones where repeats start and zeroes otherwise.
input_width : int
Length of repeats encoded in input_mat.
Returns
-------
merge_mat : np.ndarray
Binary matrix with ones where repeats start and zeroes otherwise.
"""
# Step 0: initialize temporary variables
not_merge = input_mat # Everything must be checked
merge_mat = np.empty((0, input_mat.shape[1]), int) # Nothing has been merged
merge_key = np.empty(1, int)
rows = input_mat.shape[0] # How many rows to merge?
# Step 1: has every row been checked?
while rows > 0:
# Step 2: start merge process
# Step 2a: choose first unmerged row
row2check = not_merge[0, :]
# Create a comparison matrix
# with copies of row2check stacked
# so that r2c_mat is the same
# size as the set of rows waiting
# to be merged
r2c_mat = np.kron(np.ones((rows, 1)), row2check)
# Step 2b: find indices of unmerged overlapping rows
merge_inds = np.sum(((r2c_mat + not_merge) == 2), axis=1) > 0
# Step 2c: union rows with starting indices in common with row2check
# and remove those rows from input_mat
union_merge = np.sum(not_merge[merge_inds, :], axis=0) > 0
union_merge = union_merge.astype(int)
not_merge = np.delete(not_merge, np.where(merge_inds == 1), 0)
# Step 2d: check that newly merged rows do not cause overlaps within
# row
# If there are conflicts, rerun compare_and_cut
merge_block = reconstruct_full_block(union_merge, input_width)
if np.max(merge_block) > 1:
(union_merge, union_merge_key) = __compare_and_cut(
union_merge, input_width, union_merge, input_width
)
else:
union_merge_key = input_width
# Step 2e: add unions to merge_mat and merge_key
merge_mat = np.vstack((merge_mat, union_merge))
merge_key = np.vstack((merge_key, union_merge_key))
# Step 3: reinitialize rs for stopping condition
rows = not_merge.shape[0]
if np.ndim(merge_mat) == 1:
# Make sure the output is a 2d array
merge_mat = np.array([merge_mat])
return merge_mat.astype(int)
def hierarchical_structure(matrix_no_overlaps, key_no_overlaps, sn, vis=False):
"""
Distills the repeats encoded in matrix_no_overlaps (and key_no_overlaps)
to the essential structure components and then builds the hierarchical
representation. Optionally shows visualizations of the hierarchical structure
via the vis argument.
Args
-----
matrix_no_overlaps : np.ndarray[int]
Binary matrix with 1's where repeats begin and 0's otherwise.
key_no_overlaps : np.ndarray[int]
Vector containing the lengths of the repeats encoded in matrix_no_overlaps.
sn : int
Song length, which is the number of audio shingles.
vis : bool
Shows visualizations if True (default = False).
Returns
-----
full_visualization : np.ndarray[int]
Binary matrix representation for full_matrix_no_overlaps
with blocks of 1's equal to the length's prescribed
in full_key.
full_key : np.ndarray[int]
Vector containing the lengths of the hierarchical
structure encoded in full_matrix_no_overlaps.
full_matrix_no_overlaps : np.ndarray[int]
Binary matrix with 1's where hierarchical
structure begins and 0's otherwise.
full_anno_lst : np.ndarray[int]
Vector containing the annotation markers of the
hierarchical structure encoded in each row of
full_matrix_no_overlaps.
"""
breakup_tuple = breakup_overlaps_by_intersect(matrix_no_overlaps, key_no_overlaps, 0)
# Using pno and pno_key, we build a vector that tells us the order of the
# repeats of the essential structure components
pno = breakup_tuple[0]
pno_key = breakup_tuple[1]
# Get the block representation for pno, called pno_block
pno_block = reconstruct_full_block(pno, pno_key)
if vis:
# IMAGE 1 construction
pno_anno = get_annotation_lst(pno_key)
pno_y_labels = get_y_labels(pno_key, pno_anno)
num_pno_rows = np.size(pno, axis=0)
twos = np.full((num_pno_rows, sn), 2, dtype=int)
vis_array = twos - (pno_block + pno)
fig, ax = plt.subplots(1, 1)
sdm = ax.imshow(vis_array, cmap="gray", aspect=10)
plt.title("Essential Structure Components")
# Set the number of ticks and set tick intervals to be equal
ax.set_yticks(np.arange(0,np.size(pno_y_labels)-1))
# Set the ticklabels along the y axis and remove 0 in vis_y_labels
ax.set_yticklabels(pno_y_labels[1:])
plt.show()
# Assign a unique (nonzero) number for each row in PNO. We refer these
# unique numbers COLORS.
num_colors = pno.shape[0]
num_timesteps = pno.shape[1]
# Create unique color identifier for num_colors
color_lst = np.arange(1, num_colors + 1)
# Turn it into a column
color_lst = color_lst.reshape(np.size(color_lst), 1)
color_mat = np.tile(color_lst, (1, num_timesteps))
# For each time step in row i that equals 1, change the value at that time
# step to i
pno_color = color_mat * pno
pno_color_vec = pno_color.sum(axis=0)
# Find where repeats exist in time, paying special attention to the starts
# and ends of each repeat of an essential structure component
# take sums down columns --- conv to logical
pno_block_vec = (np.sum(pno_block, axis=0)) > 0
pno_block_vec = pno_block_vec.astype(np.float32)
one_vec = pno_block_vec[0 : sn - 1] - pno_block_vec[1:sn]
# Find all the blocks of consecutive time steps that are not contained in
# any of the essential structure components
# We call these blocks zero blocks
# Shift pno_block_vec so that the zero blocks are marked at the correct
# time steps with 1's
if pno_block_vec[0] == 0:
one_vec = np.insert(one_vec, 0, 1)
elif pno_block_vec[0] == 1:
one_vec = np.insert(one_vec, 0, 0)
# Assign one new unique number to all the zero blocks
pno_color_vec[one_vec == 1] = num_colors + 1
# We are only concerned with the order that repeats of the essential
# structure components occur in. So we create a vector that only contains
# the starting indices for each repeat of the essential structure
# components.
# We isolate the starting index of each repeat of the essential structure
# components and save a binary vector with 1 at a time step if a repeat of
# any essential structure component occurs there
non_zero_inds = (pno_color_vec > 0)
num_nzi = non_zero_inds.sum(axis=0)
pno_color_inds_only = pno_color_vec[non_zero_inds]
# For indices that signals the start of a zero block, turn those indices
# back to 0
zero_inds_short = (pno_color_inds_only == (num_colors + 1))
pno_color_inds_only[zero_inds_short] = 0
# Create a binary matrix symm_pno_inds_only such that the (i,j) entry is 1
# if the following three conditions are true:
# 1) a repeat of an essential structure component is the i-th thing in
# the ordering
# 2) a repeat of an essential structure component is the j-th thing in
# the ordering
# 3) the repeat occurring in the i-th place of the ordering and the
# one occurring in the j-th place of the ordering are repeats of the
# same essential structure component.
# If any of the above conditions are not true, then the (i,j) entry of
# symm_pno_inds_only is 0.
# Turn our pattern row into a square matrix by stacking that row the
# number of times equal to the columns in that row
pno_io_mat = np.tile(pno_color_inds_only, (num_nzi, 1))
pno_io_mat = pno_io_mat.astype(np.float32)
pno_io_mask = (
(pno_io_mat > 0).astype(np.float32)
+ (pno_io_mat.transpose() > 0).astype(np.float32)
) == 2
symm_pno_inds_only = (
pno_io_mat.astype(np.float32) == pno_io_mat.transpose(
).astype(np.float32)
) * pno_io_mask
if vis:
# IMAGE 2
fig, ax = plt.subplots(1, 1)
sdm = ax.imshow(symm_pno_inds_only, cmap="binary", aspect=0.8)
plt.title(
"Threshold Self-dissimilarity matrix of" +
"the ordering Essential Structure Components"
)
# this locator puts ticks at regular intervals
loc = plticker.MultipleLocator(base=1.0)
ax.yaxis.set_major_locator(loc)
ax.xaxis.set_major_locator(loc)
plt.show()
# Extract all the diagonals in symm_pno_inds_only and get pairs of
# repeated sublists in the order that repeats of essential structure
# components.
# These pairs of repeated sublists are the basis of our hierarchical
# representation.
nzi_lst = find_all_repeats(symm_pno_inds_only, np.arange(1, num_nzi + 1))
remove_inds = (nzi_lst[:, 0] == nzi_lst[:, 2])
# Remove any pairs of repeats that are two copies of the same repeat (i.e.
# a pair (A,B) where A == B)
if np.any(remove_inds):
remove_inds = np.array(remove_inds).astype(int)
remove = np.where(remove_inds == 1)
nzi_lst = np.delete(nzi_lst, remove, axis=0)
# Add the annotation markers to the pairs in nzi_lst
nzi_lst_anno = find_complete_list_anno_only(nzi_lst, num_nzi)
# Remove the overlaps
output_tuple = remove_overlaps(nzi_lst_anno, num_nzi)
(nzi_matrix_no_overlaps, nzi_key_no_overlaps) = output_tuple[1:3]
# Reconstruct full block
nzi_pattern_block = reconstruct_full_block(nzi_matrix_no_overlaps, nzi_key_no_overlaps)
nzi_rows = nzi_pattern_block.shape[0]
if vis:
# IMAGE 3
fig, ax = plt.subplots(1, 1)
sdm = ax.imshow(nzi_pattern_block, cmap="binary", aspect=0.8)
plt.title(
"Repeated ordered sublists of the" +
"Essential Structure Components"
)
# This locator puts ticks at regular intervals
loc = plticker.MultipleLocator(base=1.0)
ax.yaxis.set_major_locator(loc)
ax.xaxis.set_major_locator(loc)
plt.show()
# IMAGE 4
fig, ax = plt.subplots(1, 1)
sdm = ax.imshow((nzi_pattern_block + nzi_matrix_no_overlaps), cmap="binary",
aspect=0.8)
plt.title(
"Repeated ordered sublists of the" +
"Essential Structure Components" +
"with leading index highlighted"
)
loc = plticker.MultipleLocator(
base=1.0
) # This locator puts ticks at regular intervals
ax.yaxis.set_major_locator(loc)
ax.xaxis.set_major_locator(loc)
plt.show()
nzi_rows = nzi_pattern_block.shape[0]
# Find where all blocks start and end
pattern_starts = np.nonzero(non_zero_inds)[0]
pattern_ends = np.array([pattern_starts[1:] - 1])
pattern_ends = np.insert(pattern_ends, np.shape(pattern_ends)[1], sn - 1)
pattern_lengths = np.array(pattern_ends - pattern_starts + 1)
full_visualization = np.zeros((nzi_rows, sn), dtype=int)
full_matrix_no_overlaps = np.zeros((nzi_rows, sn), dtype=int)
for i in range(0, num_nzi):
repeated_sect = nzi_pattern_block[:, i].reshape(
np.shape(nzi_pattern_block)[0], 1
)
full_visualization[:,
pattern_starts[i]: pattern_ends[i] + 1] = np.tile(
repeated_sect, (1, pattern_lengths[i])
)
full_matrix_no_overlaps[:, pattern_starts[i]] = nzi_matrix_no_overlaps[:, i]
# Get full_key, the matching bandwidth key for full_matrix_no_overlaps
full_key = np.zeros((nzi_rows, 1), dtype=int)
find_key_mat = full_visualization + full_matrix_no_overlaps
for i in range(0, nzi_rows):
one_start = np.where(find_key_mat[i, :] == 2)[0][0]
temp_row = find_key_mat[i, :]
temp_row[0 : one_start + 1] = 1
find_zero = np.where(temp_row == 0)[0][0]
if np.size(find_zero) == 0:
find_zero = sn
find_two = np.where(temp_row == 2)[0][0]
if np.size(find_two) == 0:
find_two = sn
one_end = np.minimum(find_zero, find_two)
full_key[i] = one_end - one_start
full_key_inds = np.argsort(full_key, axis=0)
# Switch to row
full_key_inds = full_key_inds[:, 0]
full_key = np.sort(full_key, axis=0)
full_visualization = full_visualization[full_key_inds, :]
full_matrix_no_overlaps = full_matrix_no_overlaps[full_key_inds, :]
# Remove rows of our hierarchical representation that contain only one
# repeat
inds_remove = np.where(np.sum(full_matrix_no_overlaps, 1) <= 1)
full_key = np.delete(full_key, inds_remove, axis=0)
full_matrix_no_overlaps = np.delete(full_matrix_no_overlaps, inds_remove, axis=0)
full_visualization = np.delete(full_visualization, inds_remove, axis=0)
full_anno_lst = get_annotation_lst(full_key)
output = (full_visualization, full_key, full_matrix_no_overlaps, full_anno_lst)
if vis:
# IMAGE 5
full_anno_lst = get_annotation_lst(full_key)
vis_y_labels = get_y_labels(full_key, full_anno_lst)
num_vis_rows = np.size(full_visualization, axis=0)
twos = np.full((num_vis_rows, sn), 2, dtype=int)
vis_array = twos - (full_visualization + full_matrix_no_overlaps)
fig, ax = plt.subplots(1, 1)
sdm = ax.imshow(vis_array, cmap="gray", aspect=5)
plt.title("Complete Aligned Hierarchies")
# Set the number of ticks and set tick intervals to be equal
ax.set_yticks(np.arange(0,np.size(vis_y_labels)-1))
# Set the ticklabels along the y axis and remove 0 in vis_y_labels
ax.set_yticklabels(vis_y_labels[1:])
plt.show()
return output
|
[
"matplotlib.pyplot.title",
"numpy.triu",
"numpy.sum",
"numpy.amin",
"numpy.empty",
"numpy.allclose",
"numpy.ones",
"numpy.argsort",
"numpy.shape",
"numpy.arange",
"numpy.tile",
"numpy.unique",
"numpy.full",
"numpy.ndim",
"numpy.transpose",
"numpy.insert",
"numpy.max",
"inspect.signature",
"numpy.reshape",
"matplotlib.ticker.MultipleLocator",
"numpy.intersect1d",
"matplotlib.pyplot.subplots",
"numpy.union1d",
"numpy.size",
"numpy.minimum",
"matplotlib.pyplot.show",
"numpy.hstack",
"numpy.sort",
"numpy.delete",
"numpy.concatenate",
"numpy.vstack",
"numpy.subtract",
"numpy.logical_and",
"numpy.flatnonzero",
"numpy.zeros",
"numpy.setdiff1d",
"numpy.nonzero",
"numpy.any",
"numpy.where",
"numpy.array"
] |
[((3662, 3702), 'inspect.signature', 'signature', (['breakup_overlaps_by_intersect'], {}), '(breakup_overlaps_by_intersect)\n', (3671, 3702), False, 'from inspect import signature\n'), ((4414, 4437), 'numpy.nonzero', 'np.nonzero', (['(bw_vec == T)'], {}), '(bw_vec == T)\n', (4424, 4437), True, 'import numpy as np\n'), ((7022, 7050), 'numpy.sort', 'np.sort', (['desc_bw_vec'], {'axis': '(0)'}), '(desc_bw_vec, axis=0)\n', (7029, 7050), True, 'import numpy as np\n'), ((7117, 7148), 'numpy.argsort', 'np.argsort', (['desc_bw_vec'], {'axis': '(0)'}), '(desc_bw_vec, axis=0)\n', (7127, 7148), True, 'import numpy as np\n'), ((8062, 8085), 'numpy.zeros', 'np.zeros', (['(rs * rs, ws)'], {}), '((rs * rs, ws))\n', (8070, 8085), True, 'import numpy as np\n'), ((8388, 8415), 'numpy.tile', 'np.tile', (['input_mat', '(rs, 1)'], {}), '(input_mat, (rs, 1))\n', (8395, 8415), True, 'import numpy as np\n'), ((8613, 8649), 'numpy.zeros', 'np.zeros', (['(compare_left.shape[0], 1)'], {}), '((compare_left.shape[0], 1))\n', (8621, 8649), True, 'import numpy as np\n'), ((9094, 9127), 'numpy.reshape', 'np.reshape', (['compare_all', '(rs, rs)'], {}), '(compare_all, (rs, rs))\n', (9104, 9127), True, 'import numpy as np\n'), ((9260, 9299), 'numpy.allclose', 'np.allclose', (['overlap_mat', 'overlap_mat.T'], {}), '(overlap_mat, overlap_mat.T)\n', (9271, 9299), True, 'import numpy as np\n'), ((10574, 10593), 'numpy.flatnonzero', 'np.flatnonzero', (['red'], {}), '(red)\n', (10588, 10593), True, 'import numpy as np\n'), ((10716, 10736), 'numpy.flatnonzero', 'np.flatnonzero', (['blue'], {}), '(blue)\n', (10730, 10736), True, 'import numpy as np\n'), ((11061, 11098), 'numpy.logical_and', 'np.logical_and', (['red_block', 'blue_block'], {}), '(red_block, blue_block)\n', (11075, 11098), True, 'import numpy as np\n'), ((18202, 18224), 'numpy.empty', 'np.empty', (['(0, sn)', 'int'], {}), '((0, sn), int)\n', (18210, 18224), True, 'import numpy as np\n'), ((18252, 18273), 'numpy.empty', 'np.empty', (['(0, 1)', 'int'], {}), '((0, 1), int)\n', (18260, 18273), True, 'import numpy as np\n'), ((18297, 18318), 'numpy.empty', 'np.empty', (['(0, 1)', 'int'], {}), '((0, 1), int)\n', (18305, 18318), True, 'import numpy as np\n'), ((21476, 21518), 'numpy.subtract', 'np.subtract', (['input_vec[1:]', 'input_vec[:-1]'], {}), '(input_vec[1:], input_vec[:-1])\n', (21487, 21518), True, 'import numpy as np\n'), ((21534, 21559), 'numpy.insert', 'np.insert', (['diff_vec', '(0)', '(1)'], {}), '(diff_vec, 0, 1)\n', (21543, 21559), True, 'import numpy as np\n'), ((23795, 23827), 'numpy.zeros', 'np.zeros', (['(mat_rows, row_length)'], {}), '((mat_rows, row_length))\n', (23803, 23827), True, 'import numpy as np\n'), ((24796, 24823), 'numpy.sort', 'np.sort', (['full_bw'], {'axis': 'None'}), '(full_bw, axis=None)\n', (24803, 24823), True, 'import numpy as np\n'), ((24892, 24922), 'numpy.argsort', 'np.argsort', (['full_bw'], {'axis': 'None'}), '(full_bw, axis=None)\n', (24902, 24922), True, 'import numpy as np\n'), ((25031, 25051), 'numpy.unique', 'np.unique', (['target_bw'], {}), '(target_bw)\n', (25040, 25051), True, 'import numpy as np\n'), ((27930, 27968), 'numpy.empty', 'np.empty', (['(0, input_mat.shape[1])', 'int'], {}), '((0, input_mat.shape[1]), int)\n', (27938, 27968), True, 'import numpy as np\n'), ((28012, 28028), 'numpy.empty', 'np.empty', (['(1)', 'int'], {}), '(1, int)\n', (28020, 28028), True, 'import numpy as np\n'), ((32635, 32663), 'numpy.arange', 'np.arange', (['(1)', '(num_colors + 1)'], {}), '(1, num_colors + 1)\n', (32644, 32663), True, 'import numpy as np\n'), ((32766, 32804), 'numpy.tile', 'np.tile', (['color_lst', '(1, num_timesteps)'], {}), '(color_lst, (1, num_timesteps))\n', (32773, 32804), True, 'import numpy as np\n'), ((35452, 35494), 'numpy.tile', 'np.tile', (['pno_color_inds_only', '(num_nzi, 1)'], {}), '(pno_color_inds_only, (num_nzi, 1))\n', (35459, 35494), True, 'import numpy as np\n'), ((36814, 36833), 'numpy.any', 'np.any', (['remove_inds'], {}), '(remove_inds)\n', (36820, 36833), True, 'import numpy as np\n'), ((38619, 38653), 'numpy.array', 'np.array', (['[pattern_starts[1:] - 1]'], {}), '([pattern_starts[1:] - 1])\n', (38627, 38653), True, 'import numpy as np\n'), ((38754, 38797), 'numpy.array', 'np.array', (['(pattern_ends - pattern_starts + 1)'], {}), '(pattern_ends - pattern_starts + 1)\n', (38762, 38797), True, 'import numpy as np\n'), ((38824, 38859), 'numpy.zeros', 'np.zeros', (['(nzi_rows, sn)'], {'dtype': 'int'}), '((nzi_rows, sn), dtype=int)\n', (38832, 38859), True, 'import numpy as np\n'), ((38890, 38925), 'numpy.zeros', 'np.zeros', (['(nzi_rows, sn)'], {'dtype': 'int'}), '((nzi_rows, sn), dtype=int)\n', (38898, 38925), True, 'import numpy as np\n'), ((39432, 39466), 'numpy.zeros', 'np.zeros', (['(nzi_rows, 1)'], {'dtype': 'int'}), '((nzi_rows, 1), dtype=int)\n', (39440, 39466), True, 'import numpy as np\n'), ((40043, 40071), 'numpy.argsort', 'np.argsort', (['full_key'], {'axis': '(0)'}), '(full_key, axis=0)\n', (40053, 40071), True, 'import numpy as np\n'), ((40148, 40173), 'numpy.sort', 'np.sort', (['full_key'], {'axis': '(0)'}), '(full_key, axis=0)\n', (40155, 40173), True, 'import numpy as np\n'), ((40484, 40524), 'numpy.delete', 'np.delete', (['full_key', 'inds_remove'], {'axis': '(0)'}), '(full_key, inds_remove, axis=0)\n', (40493, 40524), True, 'import numpy as np\n'), ((40556, 40611), 'numpy.delete', 'np.delete', (['full_matrix_no_overlaps', 'inds_remove'], {'axis': '(0)'}), '(full_matrix_no_overlaps, inds_remove, axis=0)\n', (40565, 40611), True, 'import numpy as np\n'), ((40637, 40687), 'numpy.delete', 'np.delete', (['full_visualization', 'inds_remove'], {'axis': '(0)'}), '(full_visualization, inds_remove, axis=0)\n', (40646, 40687), True, 'import numpy as np\n'), ((4162, 4177), 'numpy.sort', 'np.sort', (['bw_vec'], {}), '(bw_vec)\n', (4169, 4177), True, 'import numpy as np\n'), ((4293, 4319), 'numpy.argsort', 'np.argsort', (['bw_vec'], {'axis': '(0)'}), '(bw_vec, axis=0)\n', (4303, 4319), True, 'import numpy as np\n'), ((4451, 4467), 'numpy.array', 'np.array', (['T_inds'], {}), '(T_inds)\n', (4459, 4467), True, 'import numpy as np\n'), ((5567, 5599), 'numpy.delete', 'np.delete', (['pno', '[ri, bi]'], {'axis': '(0)'}), '(pno, [ri, bi], axis=0)\n', (5576, 5599), True, 'import numpy as np\n'), ((5617, 5657), 'numpy.delete', 'np.delete', (['desc_bw_vec', '[ri, bi]'], {'axis': '(0)'}), '(desc_bw_vec, [ri, bi], axis=0)\n', (5626, 5657), True, 'import numpy as np\n'), ((8177, 8206), 'numpy.tile', 'np.tile', (['compare_add', '(rs, 1)'], {}), '(compare_add, (rs, 1))\n', (8184, 8206), True, 'import numpy as np\n'), ((9341, 9364), 'numpy.triu', 'np.triu', (['overlap_mat', '(1)'], {}), '(overlap_mat, 1)\n', (9348, 9364), True, 'import numpy as np\n'), ((11604, 11633), 'numpy.tile', 'np.tile', (['start_blue', '(lsr, 1)'], {}), '(start_blue, (lsr, 1))\n', (11611, 11633), True, 'import numpy as np\n'), ((11852, 11877), 'numpy.delete', 'np.delete', (['tem_blue', '(1)', '(0)'], {}), '(tem_blue, 1, 0)\n', (11861, 11877), True, 'import numpy as np\n'), ((11901, 11945), 'numpy.concatenate', 'np.concatenate', (['(tem_blue, red_inds)'], {'axis': '(1)'}), '((tem_blue, red_inds), axis=1)\n', (11915, 11945), True, 'import numpy as np\n'), ((12036, 12048), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (12044, 12048), True, 'import numpy as np\n'), ((12072, 12084), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (12080, 12084), True, 'import numpy as np\n'), ((18476, 18503), 'numpy.array', 'np.array', (['[union_length[i]]'], {}), '([union_length[i]])\n', (18484, 18503), True, 'import numpy as np\n'), ((19430, 19476), 'numpy.delete', 'np.delete', (['union_mat', 'union_mat_rminds'], {'axis': '(0)'}), '(union_mat, union_mat_rminds, axis=0)\n', (19439, 19476), True, 'import numpy as np\n'), ((19500, 19541), 'numpy.delete', 'np.delete', (['union_length', 'union_mat_rminds'], {}), '(union_length, union_mat_rminds)\n', (19509, 19541), True, 'import numpy as np\n'), ((19760, 19797), 'numpy.vstack', 'np.vstack', (['(union_mat, union_mat_add)'], {}), '((union_mat, union_mat_add))\n', (19769, 19797), True, 'import numpy as np\n'), ((20133, 20169), 'numpy.hstack', 'np.hstack', (['(union_mat, union_length)'], {}), '((union_mat, union_length))\n', (20142, 20169), True, 'import numpy as np\n'), ((21577, 21599), 'numpy.where', 'np.where', (['(diff_vec > 1)'], {}), '(diff_vec > 1)\n', (21585, 21599), True, 'import numpy as np\n'), ((22079, 22095), 'numpy.zeros', 'np.zeros', (['(2, 1)'], {}), '((2, 1))\n', (22087, 22095), True, 'import numpy as np\n'), ((22114, 22130), 'numpy.zeros', 'np.zeros', (['(2, 1)'], {}), '((2, 1))\n', (22122, 22130), True, 'import numpy as np\n'), ((22723, 22796), 'numpy.vstack', 'np.vstack', (['(input_all_starts + add_vec[0], input_all_starts + add_vec[1])'], {}), '((input_all_starts + add_vec[0], input_all_starts + add_vec[1]))\n', (22732, 22796), True, 'import numpy as np\n'), ((29485, 29520), 'numpy.vstack', 'np.vstack', (['(merge_mat, union_merge)'], {}), '((merge_mat, union_merge))\n', (29494, 29520), True, 'import numpy as np\n'), ((29541, 29580), 'numpy.vstack', 'np.vstack', (['(merge_key, union_merge_key)'], {}), '((merge_key, union_merge_key))\n', (29550, 29580), True, 'import numpy as np\n'), ((29681, 29699), 'numpy.ndim', 'np.ndim', (['merge_mat'], {}), '(merge_mat)\n', (29688, 29699), True, 'import numpy as np\n'), ((29771, 29792), 'numpy.array', 'np.array', (['[merge_mat]'], {}), '([merge_mat])\n', (29779, 29792), True, 'import numpy as np\n'), ((31850, 31870), 'numpy.size', 'np.size', (['pno'], {'axis': '(0)'}), '(pno, axis=0)\n', (31857, 31870), True, 'import numpy as np\n'), ((31886, 31927), 'numpy.full', 'np.full', (['(num_pno_rows, sn)', '(2)'], {'dtype': 'int'}), '((num_pno_rows, sn), 2, dtype=int)\n', (31893, 31927), True, 'import numpy as np\n'), ((31991, 32009), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (32003, 32009), True, 'import matplotlib.pyplot as plt\n'), ((32077, 32120), 'matplotlib.pyplot.title', 'plt.title', (['"""Essential Structure Components"""'], {}), "('Essential Structure Components')\n", (32086, 32120), True, 'import matplotlib.pyplot as plt\n'), ((32379, 32389), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (32387, 32389), True, 'import matplotlib.pyplot as plt\n'), ((32727, 32745), 'numpy.size', 'np.size', (['color_lst'], {}), '(color_lst)\n', (32734, 32745), True, 'import numpy as np\n'), ((33191, 33216), 'numpy.sum', 'np.sum', (['pno_block'], {'axis': '(0)'}), '(pno_block, axis=0)\n', (33197, 33216), True, 'import numpy as np\n'), ((33657, 33681), 'numpy.insert', 'np.insert', (['one_vec', '(0)', '(1)'], {}), '(one_vec, 0, 1)\n', (33666, 33681), True, 'import numpy as np\n'), ((35868, 35886), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (35880, 35886), True, 'import matplotlib.pyplot as plt\n'), ((35966, 36069), 'matplotlib.pyplot.title', 'plt.title', (["('Threshold Self-dissimilarity matrix of' +\n 'the ordering Essential Structure Components')"], {}), "('Threshold Self-dissimilarity matrix of' +\n 'the ordering Essential Structure Components')\n", (35975, 36069), True, 'import matplotlib.pyplot as plt\n'), ((36169, 36203), 'matplotlib.ticker.MultipleLocator', 'plticker.MultipleLocator', ([], {'base': '(1.0)'}), '(base=1.0)\n', (36193, 36203), True, 'import matplotlib.ticker as plticker\n'), ((36294, 36304), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (36302, 36304), True, 'import matplotlib.pyplot as plt\n'), ((36616, 36641), 'numpy.arange', 'np.arange', (['(1)', '(num_nzi + 1)'], {}), '(1, num_nzi + 1)\n', (36625, 36641), True, 'import numpy as np\n'), ((36908, 36934), 'numpy.where', 'np.where', (['(remove_inds == 1)'], {}), '(remove_inds == 1)\n', (36916, 36934), True, 'import numpy as np\n'), ((36953, 36987), 'numpy.delete', 'np.delete', (['nzi_lst', 'remove'], {'axis': '(0)'}), '(nzi_lst, remove, axis=0)\n', (36962, 36987), True, 'import numpy as np\n'), ((37480, 37498), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (37492, 37498), True, 'import matplotlib.pyplot as plt\n'), ((37577, 37662), 'matplotlib.pyplot.title', 'plt.title', (["('Repeated ordered sublists of the' + 'Essential Structure Components')"], {}), "('Repeated ordered sublists of the' + 'Essential Structure Components'\n )\n", (37586, 37662), True, 'import matplotlib.pyplot as plt\n'), ((37761, 37795), 'matplotlib.ticker.MultipleLocator', 'plticker.MultipleLocator', ([], {'base': '(1.0)'}), '(base=1.0)\n', (37785, 37795), True, 'import matplotlib.ticker as plticker\n'), ((37886, 37896), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (37894, 37896), True, 'import matplotlib.pyplot as plt\n'), ((37934, 37952), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (37946, 37952), True, 'import matplotlib.pyplot as plt\n'), ((38084, 38203), 'matplotlib.pyplot.title', 'plt.title', (["('Repeated ordered sublists of the' + 'Essential Structure Components' +\n 'with leading index highlighted')"], {}), "('Repeated ordered sublists of the' +\n 'Essential Structure Components' + 'with leading index highlighted')\n", (38093, 38203), True, 'import matplotlib.pyplot as plt\n'), ((38260, 38294), 'matplotlib.ticker.MultipleLocator', 'plticker.MultipleLocator', ([], {'base': '(1.0)'}), '(base=1.0)\n', (38284, 38294), True, 'import matplotlib.ticker as plticker\n'), ((38453, 38463), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (38461, 38463), True, 'import matplotlib.pyplot as plt\n'), ((38571, 38596), 'numpy.nonzero', 'np.nonzero', (['non_zero_inds'], {}), '(non_zero_inds)\n', (38581, 38596), True, 'import numpy as np\n'), ((39170, 39217), 'numpy.tile', 'np.tile', (['repeated_sect', '(1, pattern_lengths[i])'], {}), '(repeated_sect, (1, pattern_lengths[i]))\n', (39177, 39217), True, 'import numpy as np\n'), ((39948, 39979), 'numpy.minimum', 'np.minimum', (['find_zero', 'find_two'], {}), '(find_zero, find_two)\n', (39958, 39979), True, 'import numpy as np\n'), ((40991, 41026), 'numpy.size', 'np.size', (['full_visualization'], {'axis': '(0)'}), '(full_visualization, axis=0)\n', (40998, 41026), True, 'import numpy as np\n'), ((41042, 41083), 'numpy.full', 'np.full', (['(num_vis_rows, sn)', '(2)'], {'dtype': 'int'}), '((num_vis_rows, sn), 2, dtype=int)\n', (41049, 41083), True, 'import numpy as np\n'), ((41176, 41194), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (41188, 41194), True, 'import matplotlib.pyplot as plt\n'), ((41261, 41302), 'matplotlib.pyplot.title', 'plt.title', (['"""Complete Aligned Hierarchies"""'], {}), "('Complete Aligned Hierarchies')\n", (41270, 41302), True, 'import matplotlib.pyplot as plt\n'), ((41561, 41571), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (41569, 41571), True, 'import matplotlib.pyplot as plt\n'), ((4339, 4360), 'numpy.transpose', 'np.transpose', (['bw_inds'], {}), '(bw_inds)\n', (4351, 4360), True, 'import numpy as np\n'), ((5741, 5768), 'numpy.vstack', 'np.vstack', (['(pno, union_mat)'], {}), '((pno, union_mat))\n', (5750, 5768), True, 'import numpy as np\n'), ((5790, 5823), 'numpy.vstack', 'np.vstack', (['(bw_vec, union_length)'], {}), '((bw_vec, union_length))\n', (5799, 5823), True, 'import numpy as np\n'), ((6272, 6295), 'numpy.sort', 'np.sort', (['bw_vec'], {'axis': '(0)'}), '(bw_vec, axis=0)\n', (6279, 6295), True, 'import numpy as np\n'), ((6328, 6354), 'numpy.argsort', 'np.argsort', (['bw_vec'], {'axis': '(0)'}), '(bw_vec, axis=0)\n', (6338, 6354), True, 'import numpy as np\n'), ((6644, 6669), 'numpy.amin', 'np.amin', (['(desc_bw_vec == T)'], {}), '(desc_bw_vec == T)\n', (6651, 6669), True, 'import numpy as np\n'), ((6718, 6730), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6726, 6730), True, 'import numpy as np\n'), ((6766, 6782), 'numpy.array', 'np.array', (['T_inds'], {}), '(T_inds)\n', (6774, 6782), True, 'import numpy as np\n'), ((12356, 12383), 'numpy.arange', 'np.arange', (['ri', '(ri + red_len)'], {}), '(ri, ri + red_len)\n', (12365, 12383), True, 'import numpy as np\n'), ((12406, 12434), 'numpy.arange', 'np.arange', (['bi', '(bi + blue_len)'], {}), '(bi, bi + blue_len)\n', (12415, 12434), True, 'import numpy as np\n'), ((12552, 12583), 'numpy.intersect1d', 'np.intersect1d', (['red_ri', 'blue_bi'], {}), '(red_ri, blue_bi)\n', (12566, 12583), True, 'import numpy as np\n'), ((18706, 18736), 'numpy.sum', 'np.sum', (['(union_row_block[0] > 1)'], {}), '(union_row_block[0] > 1)\n', (18712, 18736), True, 'import numpy as np\n'), ((18774, 18806), 'numpy.vstack', 'np.vstack', (['(union_mat_rminds, i)'], {}), '((union_mat_rminds, i))\n', (18783, 18806), True, 'import numpy as np\n'), ((19117, 19158), 'numpy.vstack', 'np.vstack', (['(union_mat_add, union_row_new)'], {}), '((union_mat_add, union_row_new))\n', (19126, 19158), True, 'import numpy as np\n'), ((19194, 19249), 'numpy.vstack', 'np.vstack', (['(union_mat_add_length, union_row_new_length)'], {}), '((union_mat_add_length, union_row_new_length))\n', (19203, 19249), True, 'import numpy as np\n'), ((20056, 20080), 'numpy.array', 'np.array', (['[union_length]'], {}), '([union_length])\n', (20064, 20080), True, 'import numpy as np\n'), ((20261, 20291), 'numpy.argsort', 'np.argsort', (['total_array[:, -1]'], {}), '(total_array[:, -1])\n', (20271, 20291), True, 'import numpy as np\n'), ((20357, 20387), 'numpy.array', 'np.array', (['[total_array[:, -1]]'], {}), '([total_array[:, -1]])\n', (20365, 20387), True, 'import numpy as np\n'), ((25893, 25905), 'numpy.any', 'np.any', (['inds'], {}), '(inds)\n', (25899, 25905), True, 'import numpy as np\n'), ((26380, 26413), 'numpy.vstack', 'np.vstack', (['(temp_mat, merged_mat)'], {}), '((temp_mat, merged_mat))\n', (26389, 26413), True, 'import numpy as np\n'), ((26892, 26918), 'numpy.argsort', 'np.argsort', (['temp_bandwidth'], {}), '(temp_bandwidth)\n', (26902, 26918), True, 'import numpy as np\n'), ((26999, 27022), 'numpy.sort', 'np.sort', (['temp_bandwidth'], {}), '(temp_bandwidth)\n', (27006, 27022), True, 'import numpy as np\n'), ((28478, 28496), 'numpy.ones', 'np.ones', (['(rows, 1)'], {}), '((rows, 1))\n', (28485, 28496), True, 'import numpy as np\n'), ((28592, 28632), 'numpy.sum', 'np.sum', (['(r2c_mat + not_merge == 2)'], {'axis': '(1)'}), '(r2c_mat + not_merge == 2, axis=1)\n', (28598, 28632), True, 'import numpy as np\n'), ((28788, 28828), 'numpy.sum', 'np.sum', (['not_merge[merge_inds, :]'], {'axis': '(0)'}), '(not_merge[merge_inds, :], axis=0)\n', (28794, 28828), True, 'import numpy as np\n'), ((28920, 28945), 'numpy.where', 'np.where', (['(merge_inds == 1)'], {}), '(merge_inds == 1)\n', (28928, 28945), True, 'import numpy as np\n'), ((29181, 29200), 'numpy.max', 'np.max', (['merge_block'], {}), '(merge_block)\n', (29187, 29200), True, 'import numpy as np\n'), ((33732, 33756), 'numpy.insert', 'np.insert', (['one_vec', '(0)', '(0)'], {}), '(one_vec, 0, 0)\n', (33741, 33756), True, 'import numpy as np\n'), ((38697, 38719), 'numpy.shape', 'np.shape', (['pattern_ends'], {}), '(pattern_ends)\n', (38705, 38719), True, 'import numpy as np\n'), ((39765, 39783), 'numpy.size', 'np.size', (['find_zero'], {}), '(find_zero)\n', (39772, 39783), True, 'import numpy as np\n'), ((39879, 39896), 'numpy.size', 'np.size', (['find_two'], {}), '(find_two)\n', (39886, 39896), True, 'import numpy as np\n'), ((40428, 40462), 'numpy.sum', 'np.sum', (['full_matrix_no_overlaps', '(1)'], {}), '(full_matrix_no_overlaps, 1)\n', (40434, 40462), True, 'import numpy as np\n'), ((4663, 4700), 'numpy.sum', 'np.sum', (['pno_block[:T_inds, :]'], {'axis': '(0)'}), '(pno_block[:T_inds, :], axis=0)\n', (4669, 4700), True, 'import numpy as np\n'), ((6378, 6399), 'numpy.transpose', 'np.transpose', (['bw_inds'], {}), '(bw_inds)\n', (6390, 6399), True, 'import numpy as np\n'), ((11794, 11832), 'numpy.vstack', 'np.vstack', (['(tem_blue, blue_inds[j][i])'], {}), '((tem_blue, blue_inds[j][i]))\n', (11803, 11832), True, 'import numpy as np\n'), ((12723, 12751), 'numpy.setdiff1d', 'np.setdiff1d', (['red_ri', 'purple'], {}), '(red_ri, purple)\n', (12735, 12751), True, 'import numpy as np\n'), ((14251, 14280), 'numpy.setdiff1d', 'np.setdiff1d', (['blue_bi', 'purple'], {}), '(blue_bi, purple)\n', (14263, 14280), True, 'import numpy as np\n'), ((15906, 15961), 'numpy.union1d', 'np.union1d', (['purple_in_red_mat[0]', 'purple_in_blue_mat[0]'], {}), '(purple_in_red_mat[0], purple_in_blue_mat[0])\n', (15916, 15961), True, 'import numpy as np\n'), ((22487, 22520), 'numpy.array', 'np.array', (['(start_vec - input_start)'], {}), '(start_vec - input_start)\n', (22495, 22520), True, 'import numpy as np\n'), ((22621, 22647), 'numpy.array', 'np.array', (['input_all_starts'], {}), '(input_all_starts)\n', (22629, 22647), True, 'import numpy as np\n'), ((25618, 25644), 'numpy.array', 'np.array', (['[test_bandwidth]'], {}), '([test_bandwidth])\n', (25626, 25644), True, 'import numpy as np\n'), ((26062, 26081), 'numpy.where', 'np.where', (['(inds == 1)'], {}), '(inds == 1)\n', (26070, 26081), True, 'import numpy as np\n'), ((26187, 26227), 'numpy.delete', 'np.delete', (['temp_mat', 'remove_inds'], {'axis': '(0)'}), '(temp_mat, remove_inds, axis=0)\n', (26196, 26227), True, 'import numpy as np\n'), ((26261, 26307), 'numpy.delete', 'np.delete', (['temp_bandwidth', 'remove_inds'], {'axis': '(0)'}), '(temp_bandwidth, remove_inds, axis=0)\n', (26270, 26307), True, 'import numpy as np\n'), ((26546, 26575), 'numpy.concatenate', 'np.concatenate', (['bandwidth_add'], {}), '(bandwidth_add)\n', (26560, 26575), True, 'import numpy as np\n'), ((36857, 36878), 'numpy.array', 'np.array', (['remove_inds'], {}), '(remove_inds)\n', (36865, 36878), True, 'import numpy as np\n'), ((39032, 39059), 'numpy.shape', 'np.shape', (['nzi_pattern_block'], {}), '(nzi_pattern_block)\n', (39040, 39059), True, 'import numpy as np\n'), ((39585, 39618), 'numpy.where', 'np.where', (['(find_key_mat[i, :] == 2)'], {}), '(find_key_mat[i, :] == 2)\n', (39593, 39618), True, 'import numpy as np\n'), ((39723, 39746), 'numpy.where', 'np.where', (['(temp_row == 0)'], {}), '(temp_row == 0)\n', (39731, 39746), True, 'import numpy as np\n'), ((39837, 39860), 'numpy.where', 'np.where', (['(temp_row == 2)'], {}), '(temp_row == 2)\n', (39845, 39860), True, 'import numpy as np\n'), ((13821, 13833), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (13829, 13833), True, 'import numpy as np\n'), ((13871, 13883), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (13879, 13883), True, 'import numpy as np\n'), ((15222, 15234), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (15230, 15234), True, 'import numpy as np\n'), ((15388, 15400), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (15396, 15400), True, 'import numpy as np\n'), ((19871, 19895), 'numpy.array', 'np.array', (['[union_length]'], {}), '([union_length])\n', (19879, 19895), True, 'import numpy as np\n'), ((32225, 32246), 'numpy.size', 'np.size', (['pno_y_labels'], {}), '(pno_y_labels)\n', (32232, 32246), True, 'import numpy as np\n'), ((41407, 41428), 'numpy.size', 'np.size', (['vis_y_labels'], {}), '(vis_y_labels)\n', (41414, 41428), True, 'import numpy as np\n'), ((16522, 16554), 'numpy.vstack', 'np.vstack', (['(new_red, new_purple)'], {}), '((new_red, new_purple))\n', (16531, 16554), True, 'import numpy as np\n'), ((16594, 16636), 'numpy.vstack', 'np.vstack', (['(red_length_vec, purple_length)'], {}), '((red_length_vec, purple_length))\n', (16603, 16636), True, 'import numpy as np\n'), ((25777, 25809), 'numpy.ones', 'np.ones', (['(bandwidth_add_size, 1)'], {}), '((bandwidth_add_size, 1))\n', (25784, 25809), True, 'import numpy as np\n'), ((26005, 26019), 'numpy.array', 'np.array', (['inds'], {}), '(inds)\n', (26013, 26019), True, 'import numpy as np\n'), ((16791, 16824), 'numpy.vstack', 'np.vstack', (['(new_blue, new_purple)'], {}), '((new_blue, new_purple))\n', (16800, 16824), True, 'import numpy as np\n'), ((16864, 16907), 'numpy.vstack', 'np.vstack', (['(blue_length_vec, purple_length)'], {}), '((blue_length_vec, purple_length))\n', (16873, 16907), True, 'import numpy as np\n'), ((17021, 17063), 'numpy.vstack', 'np.vstack', (['(new_red, new_blue, new_purple)'], {}), '((new_red, new_blue, new_purple))\n', (17030, 17063), True, 'import numpy as np\n'), ((17103, 17162), 'numpy.vstack', 'np.vstack', (['(red_length_vec, blue_length_vec, purple_length)'], {}), '((red_length_vec, blue_length_vec, purple_length))\n', (17112, 17162), True, 'import numpy as np\n'), ((17806, 17831), 'numpy.array', 'np.array', (['[purple_length]'], {}), '([purple_length])\n', (17814, 17831), True, 'import numpy as np\n'), ((18061, 18086), 'numpy.array', 'np.array', (['[purple_length]'], {}), '([purple_length])\n', (18069, 18086), True, 'import numpy as np\n')]
|
from __future__ import print_function, division
import numpy as np
import healpy as hp
from matplotlib import pyplot as plt
import geometry
# given nside | number of pixels | resolution (pixel size in degree) | Maximum angular distance (degree) | pixel area (in square degrees)
# 1 | 12 | 58.6323 | 48.1897 | 3437.746771
# 2 | 48 | 29.3162 | 27.5857 | 859.436693
# 4 | 192 | 14.6581 | 14.5722 | 214.859173
# 8 | 768 | 7.3290 | 7.4728 | 53.714793
# 16 | 3072 | 3.6645 | 3.7824 | 13.428698
# 32 | 12288 | 1.8323 | 1.9026 | 3.357175
# 64 | 49152 | 0.9161 | 0.9541 | 0.839294
# 128 | 196608 | 0.4581 | 0.4778 | 0.209823
# 256 | 786432 | 0.2290 | 0.2391 | 0.052456
# 512 | 3145728 | 0.1145 | 0.1196 | 0.013114
# 1024 | 12582912 | 0.0573 | 0.0598 | 0.003278
def calculate_nside_resolution():
NSIDE = [2**i for i in range(11)]
print('given nside | number of pixels | resolution (pixel size in degree) | Maximum angular distance (degree) | pixel area (in square degrees)')
for nside in NSIDE:
npix = hp.nside2npix(nside)
resol = np.rad2deg(hp.nside2resol(nside))
maxrad = np.rad2deg(hp.max_pixrad(nside))
pixarea = hp.nside2pixarea(nside, degrees=True)
print('{0:^11} | {1:^16} | {2:^33.4f} | {3:^33.4f} | {4:^30.6f}'.format(nside, npix, resol, maxrad, pixarea))
if __name__ == '__main__':
calculate_nside_resolution()
# generate random distribution of Euler angles
v = np.random.randn(100,3)
v = v / np.linalg.norm(v, axis=1).repeat(3).reshape(-1,3)
EA = geometry.genEA(v)
phi = EA[:, 0]
# phi += 2 * np.pi
theta = EA[:, 1]
# visulization
hp.mollview()
hp.visufunc.projscatter(theta, phi, 'r.')
hp.graticule()
plt.show()
|
[
"geometry.genEA",
"healpy.max_pixrad",
"healpy.visufunc.projscatter",
"matplotlib.pyplot.show",
"healpy.mollview",
"numpy.random.randn",
"healpy.graticule",
"healpy.nside2pixarea",
"healpy.nside2npix",
"numpy.linalg.norm",
"healpy.nside2resol"
] |
[((2358, 2381), 'numpy.random.randn', 'np.random.randn', (['(100)', '(3)'], {}), '(100, 3)\n', (2373, 2381), True, 'import numpy as np\n'), ((2453, 2470), 'geometry.genEA', 'geometry.genEA', (['v'], {}), '(v)\n', (2467, 2470), False, 'import geometry\n'), ((2557, 2570), 'healpy.mollview', 'hp.mollview', ([], {}), '()\n', (2568, 2570), True, 'import healpy as hp\n'), ((2575, 2616), 'healpy.visufunc.projscatter', 'hp.visufunc.projscatter', (['theta', 'phi', '"""r."""'], {}), "(theta, phi, 'r.')\n", (2598, 2616), True, 'import healpy as hp\n'), ((2621, 2635), 'healpy.graticule', 'hp.graticule', ([], {}), '()\n', (2633, 2635), True, 'import healpy as hp\n'), ((2640, 2650), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2648, 2650), True, 'from matplotlib import pyplot as plt\n'), ((1942, 1962), 'healpy.nside2npix', 'hp.nside2npix', (['nside'], {}), '(nside)\n', (1955, 1962), True, 'import healpy as hp\n'), ((2081, 2118), 'healpy.nside2pixarea', 'hp.nside2pixarea', (['nside'], {'degrees': '(True)'}), '(nside, degrees=True)\n', (2097, 2118), True, 'import healpy as hp\n'), ((1990, 2011), 'healpy.nside2resol', 'hp.nside2resol', (['nside'], {}), '(nside)\n', (2004, 2011), True, 'import healpy as hp\n'), ((2041, 2061), 'healpy.max_pixrad', 'hp.max_pixrad', (['nside'], {}), '(nside)\n', (2054, 2061), True, 'import healpy as hp\n'), ((2394, 2419), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {'axis': '(1)'}), '(v, axis=1)\n', (2408, 2419), True, 'import numpy as np\n')]
|
# Helpful classes
import numpy as np
# Helper function for calculating dists
def dists(array):
lens = []
for i in range(len(array)):
lens.append(np.linalg.norm(np.array(array[i][0])-
np.array(array[i][1])))
return lens
# This is for the original shape you want to cut
class Shape:
def __init__(self, ls):
self.edges = np.array(ls)
self.lengths = dists(ls)
self.vertices = self.edges[:,0]
# For the circles that are the point
class Circle:
def __init__(self, vert, rad):
self.vert = vert
self.rad = rad
|
[
"numpy.array"
] |
[((344, 356), 'numpy.array', 'np.array', (['ls'], {}), '(ls)\n', (352, 356), True, 'import numpy as np\n'), ((170, 191), 'numpy.array', 'np.array', (['array[i][0]'], {}), '(array[i][0])\n', (178, 191), True, 'import numpy as np\n'), ((199, 220), 'numpy.array', 'np.array', (['array[i][1]'], {}), '(array[i][1])\n', (207, 220), True, 'import numpy as np\n')]
|
import io
import cv2
import numpy as np
def predict(image):
nparr = np.fromstring(image, np.uint8)
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
gray_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
res, im_png = cv2.imencode(".png", gray_image)
return im_png
def details():
details = {
"doi": "10.1371/journal.pone.0029740",
"example_figure": "https://camo.githubusercontent.com/5eb8b4f1f63dbdbb5c30afb10575d6ebe24bb0a156e6b81296c8191183f33edf/68747470733a2f2f692e6962622e636f2f3559304d3258622f6578616d706c652e706e67",
"description": "Image Uncolorization will vintage your picture to turn them into black and white style.",
}
details += get_doi(details["doi"])
return details
def get_doi(doi):
crossref_url = f"http://api.crossref.org/works/{doi}"
req = requests.get(crossref_url)
return req.content
|
[
"cv2.cvtColor",
"cv2.imdecode",
"numpy.fromstring",
"cv2.imencode"
] |
[((74, 104), 'numpy.fromstring', 'np.fromstring', (['image', 'np.uint8'], {}), '(image, np.uint8)\n', (87, 104), True, 'import numpy as np\n'), ((115, 152), 'cv2.imdecode', 'cv2.imdecode', (['nparr', 'cv2.IMREAD_COLOR'], {}), '(nparr, cv2.IMREAD_COLOR)\n', (127, 152), False, 'import cv2\n'), ((170, 207), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (182, 207), False, 'import cv2\n'), ((226, 258), 'cv2.imencode', 'cv2.imencode', (['""".png"""', 'gray_image'], {}), "('.png', gray_image)\n", (238, 258), False, 'import cv2\n')]
|
import numpy as np
from numpy.random import randn
from numpy.linalg import norm
from numpy.random import permutation
from numpy.testing import assert_array_almost_equal, assert_array_equal
import tensor.utils as tu
from tensor.tensor_train import ttsvd, tt_product
# np.random.seed(20)
shape_A = (3, 4, 5, 6, 7)
A = randn(*shape_A)
A = A / norm(A)
# higher tolerance means worse approximation, but more compression
tol = 0
dim_order = permutation(np.arange(len(shape_A)))
G, ranks = ttsvd(A, tol, dim_order=dim_order, ranks=None)
Ak = tt_product(G, shape_A, dim_order=dim_order)
err = norm(A - Ak) / norm(A)
print('dim order: ', dim_order)
print('shape: ', shape_A)
print('ranks: ', ranks)
print('ttsvd: error = %0.6e' % err)
print('tol: tol = %0.2e' % tol)
print('check tolerance: %d' % (err < tol))
|
[
"tensor.tensor_train.ttsvd",
"numpy.linalg.norm",
"tensor.tensor_train.tt_product",
"numpy.random.randn"
] |
[((318, 333), 'numpy.random.randn', 'randn', (['*shape_A'], {}), '(*shape_A)\n', (323, 333), False, 'from numpy.random import randn\n'), ((487, 533), 'tensor.tensor_train.ttsvd', 'ttsvd', (['A', 'tol'], {'dim_order': 'dim_order', 'ranks': 'None'}), '(A, tol, dim_order=dim_order, ranks=None)\n', (492, 533), False, 'from tensor.tensor_train import ttsvd, tt_product\n'), ((540, 583), 'tensor.tensor_train.tt_product', 'tt_product', (['G', 'shape_A'], {'dim_order': 'dim_order'}), '(G, shape_A, dim_order=dim_order)\n', (550, 583), False, 'from tensor.tensor_train import ttsvd, tt_product\n'), ((342, 349), 'numpy.linalg.norm', 'norm', (['A'], {}), '(A)\n', (346, 349), False, 'from numpy.linalg import norm\n'), ((592, 604), 'numpy.linalg.norm', 'norm', (['(A - Ak)'], {}), '(A - Ak)\n', (596, 604), False, 'from numpy.linalg import norm\n'), ((607, 614), 'numpy.linalg.norm', 'norm', (['A'], {}), '(A)\n', (611, 614), False, 'from numpy.linalg import norm\n')]
|
''' Visualization code for point clouds and 3D bounding boxes with mayavi.
Modified by <NAME>
Date: September 2017
Ref: https://github.com/hengck23/didi-udacity-2017/blob/master/baseline-04/kitti_data/draw.py
'''
import warnings
import numpy as np
try:
import mayavi.mlab as mlab
except ImportError:
warnings.warn("mayavi is not installed")
import pandas as pd
from dataset.prepare_lyft_data import parse_string_to_box, transform_box_from_world_to_sensor_coordinates, \
get_sensor_to_world_transform_matrix_from_sample_data_token
from dataset.prepare_lyft_data_v2 import transform_pc_to_camera_coord
from lyft_dataset_sdk.lyftdataset import LyftDataset
from lyft_dataset_sdk.utils.data_classes import LidarPointCloud
from lyft_dataset_sdk.utils.geometry_utils import box_in_image,BoxVisibility
from skimage.io import imread
import matplotlib.pyplot as plt
class PredViewer(object):
def __init__(self, pred_file, lyftd: LyftDataset):
self.pred_pd = pd.read_csv(pred_file, index_col="Id")
self.lyftd = lyftd
def get_boxes_from_token(self, sample_token):
boxes_str = self.pred_pd.loc[sample_token, 'PredictionString']
sample_token=sample_token
boxes = parse_string_to_box(boxes_str,sample_token=sample_token)
return boxes
def get_sample_record_from_token(self, sample_token):
pass
def render_camera_image(self, ax, sample_token, cam_key='CAM_FRONT', prob_threshold=0.7):
sample_record = self.lyftd.get('sample', sample_token)
camera_token = sample_record['data'][cam_key]
camera_image_path, _, cam_intrinsic = self.lyftd.get_sample_data(camera_token)
boxes = self.get_boxes_from_token(sample_token)
image_array = imread(camera_image_path)
intrinsic = np.identity(3)
ax.imshow(image_array)
for pred_box in boxes:
if pred_box.score > prob_threshold :
box_in_camera_coord = transform_box_from_world_to_sensor_coordinates(pred_box, camera_token, self.lyftd)
if box_in_camera_coord.center[2] > 0:
box_in_camera_coord.render(ax, view=cam_intrinsic, normalize=True, linewidth=2.0)
ax.set_xlim([0, image_array.shape[1]])
ax.set_ylim([image_array.shape[0], 0])
def render_lidar_points(self, ax, sample_token, lidar_key='LIDAR_TOP', prob_threshold=0):
lidar_top_token, lpc = self.get_lidar_points(lidar_key, sample_token)
boxes = self.get_boxes_from_token(sample_token)
for pred_box in boxes:
if pred_box.score > prob_threshold:
box_in_lidar_coord = transform_box_from_world_to_sensor_coordinates(pred_box, lidar_top_token,
self.lyftd)
pts = lpc.points
ax.scatter(pts[0, :], pts[1, :], s=0.05)
ax.set_xlim([-50, 50])
ax.set_ylim([-50, 50])
view_mtx = np.eye(2)
box_in_lidar_coord.render(ax, view=view_mtx)
def get_lidar_points(self, lidar_key, sample_token):
sample_record = self.lyftd.get('sample', sample_token)
lidar_top_token = sample_record['data'][lidar_key]
lidar_path = self.lyftd.get_sample_data_path(lidar_top_token)
lpc = LidarPointCloud.from_file(lidar_path)
return lidar_top_token, lpc
def render_3d_lidar_points(self, sample_token, lidar_key='LIDAR_TOP', prob_threshold=0):
lidar_token, lpc = self.get_lidar_points(lidar_key=lidar_key, sample_token=sample_token)
fig = draw_lidar_simple(np.transpose(lpc.points))
boxes = self.get_boxes_from_token(sample_token)
box_pts = []
for pred_box in boxes:
if pred_box.score > prob_threshold:
box_in_lidar_coord = transform_box_from_world_to_sensor_coordinates(pred_box, lidar_token,
self.lyftd)
box_3d_pts = np.transpose(box_in_lidar_coord.corners())
box_pts.append(box_3d_pts)
draw_gt_boxes3d(box_pts, fig)
def render_3d_lidar_points_to_camera_coordinates(self, sample_token, lidar_key="LIDAR_TOP",
cam_key="CAM_FRONT", prob_threshold=0):
lidar_token, lpc = self.get_lidar_points(lidar_key=lidar_key, sample_token=sample_token)
# Get camera coordiate calibration information
sample_record = self.lyftd.get('sample', sample_token)
camera_token = sample_record['data'][cam_key]
camera_data = self.lyftd.get('sample_data', camera_token)
lidar_record = self.lyftd.get('sample_data', lidar_token)
lpc, _ = transform_pc_to_camera_coord(camera_data, lidar_record, lpc, self.lyftd)
# Transform lidar points
fig = draw_lidar_simple(np.transpose(lpc.points))
boxes = self.get_boxes_from_token(sample_token)
box_pts = []
for pred_box in boxes:
if pred_box.score > prob_threshold:
box_in_lidar_coord = transform_box_from_world_to_sensor_coordinates(pred_box, camera_token,
self.lyftd)
box_3d_pts = np.transpose(box_in_lidar_coord.corners())
box_pts.append(box_3d_pts)
draw_gt_boxes3d(box_pts, fig)
# mlab.view(azimuth=270, elevation=150,
# focalpoint=[0, 0, 0], distance=62.0, figure=fig)
return fig
def draw_lidar_simple(pc, color=None):
''' Draw lidar points. simplest set up. '''
fig = mlab.figure(figure=None, bgcolor=(0, 0, 0), fgcolor=None, engine=None, size=(1600, 1000))
if color is None: color = pc[:, 2]
# draw points
mlab.points3d(pc[:, 0], pc[:, 1], pc[:, 2], color, color=None, mode='point', colormap='cool', scale_factor=1,
figure=fig)
# draw origin
mlab.points3d(0, 0, 0, color=(1, 1, 1), mode='sphere', scale_factor=0.2)
# draw axis
axes = np.array([
[2., 0., 0., 0.],
[0., 2., 0., 0.],
[0., 0., 2., 0.],
], dtype=np.float64)
mlab.plot3d([0, axes[0, 0]], [0, axes[0, 1]], [0, axes[0, 2]], color=(1, 0, 0), tube_radius=None, figure=fig)
mlab.plot3d([0, axes[1, 0]], [0, axes[1, 1]], [0, axes[1, 2]], color=(0, 1, 0), tube_radius=None, figure=fig)
mlab.plot3d([0, axes[2, 0]], [0, axes[2, 1]], [0, axes[2, 2]], color=(0, 0, 1), tube_radius=None, figure=fig)
mlab.view(azimuth=180, elevation=70, focalpoint=[12.0909996, -1.04700089, -2.03249991], distance=62.0, figure=fig)
return fig
def draw_lidar(pc, color=None, fig=None, bgcolor=(0, 0, 0), pts_scale=1, pts_mode='point', pts_color=None):
''' Draw lidar points
Args:
pc: numpy array (n,3) of XYZ
color: numpy array (n) of intensity or whatever
fig: mayavi figure handler, if None create new one otherwise will use it
Returns:
fig: created or used fig
'''
if fig is None: fig = mlab.figure(figure=None, bgcolor=bgcolor, fgcolor=None, engine=None, size=(1600, 1000))
if color is None: color = pc[2, :]
mlab.points3d(pc[0, :], pc[1, :], pc[2, :], color, color=pts_color, mode=pts_mode, colormap='gnuplot',
scale_factor=pts_scale, figure=fig)
# draw origin
mlab.points3d(0, 0, 0, color=(1, 1, 1), mode='sphere', scale_factor=0.2)
# draw axis
axes = np.array([
[2., 0., 0., 0.],
[0., 2., 0., 0.],
[0., 0., 2., 0.],
], dtype=np.float64)
mlab.plot3d([0, axes[0, 0]], [0, axes[0, 1]], [0, axes[0, 2]], color=(1, 0, 0), tube_radius=None, figure=fig)
mlab.plot3d([0, axes[1, 0]], [0, axes[1, 1]], [0, axes[1, 2]], color=(0, 1, 0), tube_radius=None, figure=fig)
mlab.plot3d([0, axes[2, 0]], [0, axes[2, 1]], [0, axes[2, 2]], color=(0, 0, 1), tube_radius=None, figure=fig)
# draw fov (todo: update to real sensor spec.)
fov = np.array([ # 45 degree
[20., 20., 0., 0.],
[20., -20., 0., 0.],
], dtype=np.float64)
mlab.plot3d([0, fov[0, 0]], [0, fov[0, 1]], [0, fov[0, 2]], color=(1, 1, 1), tube_radius=None, line_width=1,
figure=fig)
mlab.plot3d([0, fov[1, 0]], [0, fov[1, 1]], [0, fov[1, 2]], color=(1, 1, 1), tube_radius=None, line_width=1,
figure=fig)
# draw square region
TOP_Y_MIN = -20
TOP_Y_MAX = 20
TOP_X_MIN = 0
TOP_X_MAX = 40
TOP_Z_MIN = -2.0
TOP_Z_MAX = 0.4
x1 = TOP_X_MIN
x2 = TOP_X_MAX
y1 = TOP_Y_MIN
y2 = TOP_Y_MAX
mlab.plot3d([x1, x1], [y1, y2], [0, 0], color=(0.5, 0.5, 0.5), tube_radius=0.1, line_width=1, figure=fig)
mlab.plot3d([x2, x2], [y1, y2], [0, 0], color=(0.5, 0.5, 0.5), tube_radius=0.1, line_width=1, figure=fig)
mlab.plot3d([x1, x2], [y1, y1], [0, 0], color=(0.5, 0.5, 0.5), tube_radius=0.1, line_width=1, figure=fig)
mlab.plot3d([x1, x2], [y2, y2], [0, 0], color=(0.5, 0.5, 0.5), tube_radius=0.1, line_width=1, figure=fig)
# mlab.orientation_axes()
mlab.view(azimuth=180, elevation=70, focalpoint=[12.0909996, -1.04700089, -2.03249991], distance=62.0, figure=fig)
return fig
def draw_gt_boxes3d(gt_boxes3d, fig, color=(1, 1, 1), line_width=1, draw_text=True, text_scale=(1, 1, 1),
color_list=None):
''' Draw 3D bounding boxes
Args:
gt_boxes3d: numpy array (n,8,3) for XYZs of the box corners
fig: mayavi figure handler
color: RGB value tuple in range (0,1), box line color
line_width: box line width
draw_text: boolean, if true, write box indices beside boxes
text_scale: three number tuple
color_list: a list of RGB tuple, if not None, overwrite color.
Returns:
fig: updated fig
'''
num = len(gt_boxes3d)
for n in range(num):
b = gt_boxes3d[n]
if color_list is not None:
color = color_list[n]
if draw_text: mlab.text3d(b[4, 0], b[4, 1], b[4, 2], '%d' % n, scale=text_scale, color=color, figure=fig)
for k in range(0, 4):
# http://docs.enthought.com/mayavi/mayavi/auto/mlab_helper_functions.html
i, j = k, (k + 1) % 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=None,
line_width=line_width, figure=fig)
i, j = k + 4, (k + 1) % 4 + 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=None,
line_width=line_width, figure=fig)
i, j = k, k + 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=None,
line_width=line_width, figure=fig)
# mlab.show(1)
# mlab.view(azimuth=180, elevation=70, focalpoint=[ 12.0909996 , -1.04700089, -2.03249991], distance=62.0, figure=fig)
return fig
if __name__ == '__main__':
import pickle
pfile = "/Users/kanhua/Downloads/3d-object-detection-for-autonomous-vehicles/artifacts/val_pc.pickle"
with open(pfile, 'rb') as fp:
item = pickle.load(fp)
print(type(item))
# point_cloud_3d = np.loadtxt('mayavi/kitti_sample_scan.txt')
fig = draw_lidar_simple(item['pcl'][3])
mlab.savefig('pc_view.jpg', figure=fig)
input()
|
[
"lyft_dataset_sdk.utils.data_classes.LidarPointCloud.from_file",
"dataset.prepare_lyft_data_v2.transform_pc_to_camera_coord",
"mayavi.mlab.text3d",
"mayavi.mlab.figure",
"numpy.eye",
"pandas.read_csv",
"dataset.prepare_lyft_data.transform_box_from_world_to_sensor_coordinates",
"mayavi.mlab.view",
"numpy.identity",
"mayavi.mlab.points3d",
"dataset.prepare_lyft_data.parse_string_to_box",
"numpy.transpose",
"pickle.load",
"numpy.array",
"mayavi.mlab.savefig",
"mayavi.mlab.plot3d",
"warnings.warn",
"skimage.io.imread"
] |
[((5711, 5805), 'mayavi.mlab.figure', 'mlab.figure', ([], {'figure': 'None', 'bgcolor': '(0, 0, 0)', 'fgcolor': 'None', 'engine': 'None', 'size': '(1600, 1000)'}), '(figure=None, bgcolor=(0, 0, 0), fgcolor=None, engine=None, size\n =(1600, 1000))\n', (5722, 5805), True, 'import mayavi.mlab as mlab\n'), ((5862, 5987), 'mayavi.mlab.points3d', 'mlab.points3d', (['pc[:, 0]', 'pc[:, 1]', 'pc[:, 2]', 'color'], {'color': 'None', 'mode': '"""point"""', 'colormap': '"""cool"""', 'scale_factor': '(1)', 'figure': 'fig'}), "(pc[:, 0], pc[:, 1], pc[:, 2], color, color=None, mode='point',\n colormap='cool', scale_factor=1, figure=fig)\n", (5875, 5987), True, 'import mayavi.mlab as mlab\n'), ((6024, 6096), 'mayavi.mlab.points3d', 'mlab.points3d', (['(0)', '(0)', '(0)'], {'color': '(1, 1, 1)', 'mode': '"""sphere"""', 'scale_factor': '(0.2)'}), "(0, 0, 0, color=(1, 1, 1), mode='sphere', scale_factor=0.2)\n", (6037, 6096), True, 'import mayavi.mlab as mlab\n'), ((6124, 6222), 'numpy.array', 'np.array', (['[[2.0, 0.0, 0.0, 0.0], [0.0, 2.0, 0.0, 0.0], [0.0, 0.0, 2.0, 0.0]]'], {'dtype': 'np.float64'}), '([[2.0, 0.0, 0.0, 0.0], [0.0, 2.0, 0.0, 0.0], [0.0, 0.0, 2.0, 0.0]],\n dtype=np.float64)\n', (6132, 6222), True, 'import numpy as np\n'), ((6242, 6355), 'mayavi.mlab.plot3d', 'mlab.plot3d', (['[0, axes[0, 0]]', '[0, axes[0, 1]]', '[0, axes[0, 2]]'], {'color': '(1, 0, 0)', 'tube_radius': 'None', 'figure': 'fig'}), '([0, axes[0, 0]], [0, axes[0, 1]], [0, axes[0, 2]], color=(1, 0,\n 0), tube_radius=None, figure=fig)\n', (6253, 6355), True, 'import mayavi.mlab as mlab\n'), ((6356, 6469), 'mayavi.mlab.plot3d', 'mlab.plot3d', (['[0, axes[1, 0]]', '[0, axes[1, 1]]', '[0, axes[1, 2]]'], {'color': '(0, 1, 0)', 'tube_radius': 'None', 'figure': 'fig'}), '([0, axes[1, 0]], [0, axes[1, 1]], [0, axes[1, 2]], color=(0, 1,\n 0), tube_radius=None, figure=fig)\n', (6367, 6469), True, 'import mayavi.mlab as mlab\n'), ((6470, 6583), 'mayavi.mlab.plot3d', 'mlab.plot3d', (['[0, axes[2, 0]]', '[0, axes[2, 1]]', '[0, axes[2, 2]]'], {'color': '(0, 0, 1)', 'tube_radius': 'None', 'figure': 'fig'}), '([0, axes[2, 0]], [0, axes[2, 1]], [0, axes[2, 2]], color=(0, 0,\n 1), tube_radius=None, figure=fig)\n', (6481, 6583), True, 'import mayavi.mlab as mlab\n'), ((6584, 6703), 'mayavi.mlab.view', 'mlab.view', ([], {'azimuth': '(180)', 'elevation': '(70)', 'focalpoint': '[12.0909996, -1.04700089, -2.03249991]', 'distance': '(62.0)', 'figure': 'fig'}), '(azimuth=180, elevation=70, focalpoint=[12.0909996, -1.04700089, -\n 2.03249991], distance=62.0, figure=fig)\n', (6593, 6703), True, 'import mayavi.mlab as mlab\n'), ((7245, 7388), 'mayavi.mlab.points3d', 'mlab.points3d', (['pc[0, :]', 'pc[1, :]', 'pc[2, :]', 'color'], {'color': 'pts_color', 'mode': 'pts_mode', 'colormap': '"""gnuplot"""', 'scale_factor': 'pts_scale', 'figure': 'fig'}), "(pc[0, :], pc[1, :], pc[2, :], color, color=pts_color, mode=\n pts_mode, colormap='gnuplot', scale_factor=pts_scale, figure=fig)\n", (7258, 7388), True, 'import mayavi.mlab as mlab\n'), ((7425, 7497), 'mayavi.mlab.points3d', 'mlab.points3d', (['(0)', '(0)', '(0)'], {'color': '(1, 1, 1)', 'mode': '"""sphere"""', 'scale_factor': '(0.2)'}), "(0, 0, 0, color=(1, 1, 1), mode='sphere', scale_factor=0.2)\n", (7438, 7497), True, 'import mayavi.mlab as mlab\n'), ((7526, 7624), 'numpy.array', 'np.array', (['[[2.0, 0.0, 0.0, 0.0], [0.0, 2.0, 0.0, 0.0], [0.0, 0.0, 2.0, 0.0]]'], {'dtype': 'np.float64'}), '([[2.0, 0.0, 0.0, 0.0], [0.0, 2.0, 0.0, 0.0], [0.0, 0.0, 2.0, 0.0]],\n dtype=np.float64)\n', (7534, 7624), True, 'import numpy as np\n'), ((7644, 7757), 'mayavi.mlab.plot3d', 'mlab.plot3d', (['[0, axes[0, 0]]', '[0, axes[0, 1]]', '[0, axes[0, 2]]'], {'color': '(1, 0, 0)', 'tube_radius': 'None', 'figure': 'fig'}), '([0, axes[0, 0]], [0, axes[0, 1]], [0, axes[0, 2]], color=(1, 0,\n 0), tube_radius=None, figure=fig)\n', (7655, 7757), True, 'import mayavi.mlab as mlab\n'), ((7758, 7871), 'mayavi.mlab.plot3d', 'mlab.plot3d', (['[0, axes[1, 0]]', '[0, axes[1, 1]]', '[0, axes[1, 2]]'], {'color': '(0, 1, 0)', 'tube_radius': 'None', 'figure': 'fig'}), '([0, axes[1, 0]], [0, axes[1, 1]], [0, axes[1, 2]], color=(0, 1,\n 0), tube_radius=None, figure=fig)\n', (7769, 7871), True, 'import mayavi.mlab as mlab\n'), ((7872, 7985), 'mayavi.mlab.plot3d', 'mlab.plot3d', (['[0, axes[2, 0]]', '[0, axes[2, 1]]', '[0, axes[2, 2]]'], {'color': '(0, 0, 1)', 'tube_radius': 'None', 'figure': 'fig'}), '([0, axes[2, 0]], [0, axes[2, 1]], [0, axes[2, 2]], color=(0, 0,\n 1), tube_radius=None, figure=fig)\n', (7883, 7985), True, 'import mayavi.mlab as mlab\n'), ((8044, 8121), 'numpy.array', 'np.array', (['[[20.0, 20.0, 0.0, 0.0], [20.0, -20.0, 0.0, 0.0]]'], {'dtype': 'np.float64'}), '([[20.0, 20.0, 0.0, 0.0], [20.0, -20.0, 0.0, 0.0]], dtype=np.float64)\n', (8052, 8121), True, 'import numpy as np\n'), ((8155, 8279), 'mayavi.mlab.plot3d', 'mlab.plot3d', (['[0, fov[0, 0]]', '[0, fov[0, 1]]', '[0, fov[0, 2]]'], {'color': '(1, 1, 1)', 'tube_radius': 'None', 'line_width': '(1)', 'figure': 'fig'}), '([0, fov[0, 0]], [0, fov[0, 1]], [0, fov[0, 2]], color=(1, 1, 1),\n tube_radius=None, line_width=1, figure=fig)\n', (8166, 8279), True, 'import mayavi.mlab as mlab\n'), ((8296, 8420), 'mayavi.mlab.plot3d', 'mlab.plot3d', (['[0, fov[1, 0]]', '[0, fov[1, 1]]', '[0, fov[1, 2]]'], {'color': '(1, 1, 1)', 'tube_radius': 'None', 'line_width': '(1)', 'figure': 'fig'}), '([0, fov[1, 0]], [0, fov[1, 1]], [0, fov[1, 2]], color=(1, 1, 1),\n tube_radius=None, line_width=1, figure=fig)\n', (8307, 8420), True, 'import mayavi.mlab as mlab\n'), ((8657, 8767), 'mayavi.mlab.plot3d', 'mlab.plot3d', (['[x1, x1]', '[y1, y2]', '[0, 0]'], {'color': '(0.5, 0.5, 0.5)', 'tube_radius': '(0.1)', 'line_width': '(1)', 'figure': 'fig'}), '([x1, x1], [y1, y2], [0, 0], color=(0.5, 0.5, 0.5), tube_radius=\n 0.1, line_width=1, figure=fig)\n', (8668, 8767), True, 'import mayavi.mlab as mlab\n'), ((8767, 8877), 'mayavi.mlab.plot3d', 'mlab.plot3d', (['[x2, x2]', '[y1, y2]', '[0, 0]'], {'color': '(0.5, 0.5, 0.5)', 'tube_radius': '(0.1)', 'line_width': '(1)', 'figure': 'fig'}), '([x2, x2], [y1, y2], [0, 0], color=(0.5, 0.5, 0.5), tube_radius=\n 0.1, line_width=1, figure=fig)\n', (8778, 8877), True, 'import mayavi.mlab as mlab\n'), ((8877, 8987), 'mayavi.mlab.plot3d', 'mlab.plot3d', (['[x1, x2]', '[y1, y1]', '[0, 0]'], {'color': '(0.5, 0.5, 0.5)', 'tube_radius': '(0.1)', 'line_width': '(1)', 'figure': 'fig'}), '([x1, x2], [y1, y1], [0, 0], color=(0.5, 0.5, 0.5), tube_radius=\n 0.1, line_width=1, figure=fig)\n', (8888, 8987), True, 'import mayavi.mlab as mlab\n'), ((8987, 9097), 'mayavi.mlab.plot3d', 'mlab.plot3d', (['[x1, x2]', '[y2, y2]', '[0, 0]'], {'color': '(0.5, 0.5, 0.5)', 'tube_radius': '(0.1)', 'line_width': '(1)', 'figure': 'fig'}), '([x1, x2], [y2, y2], [0, 0], color=(0.5, 0.5, 0.5), tube_radius=\n 0.1, line_width=1, figure=fig)\n', (8998, 9097), True, 'import mayavi.mlab as mlab\n'), ((9128, 9247), 'mayavi.mlab.view', 'mlab.view', ([], {'azimuth': '(180)', 'elevation': '(70)', 'focalpoint': '[12.0909996, -1.04700089, -2.03249991]', 'distance': '(62.0)', 'figure': 'fig'}), '(azimuth=180, elevation=70, focalpoint=[12.0909996, -1.04700089, -\n 2.03249991], distance=62.0, figure=fig)\n', (9137, 9247), True, 'import mayavi.mlab as mlab\n'), ((11391, 11430), 'mayavi.mlab.savefig', 'mlab.savefig', (['"""pc_view.jpg"""'], {'figure': 'fig'}), "('pc_view.jpg', figure=fig)\n", (11403, 11430), True, 'import mayavi.mlab as mlab\n'), ((312, 352), 'warnings.warn', 'warnings.warn', (['"""mayavi is not installed"""'], {}), "('mayavi is not installed')\n", (325, 352), False, 'import warnings\n'), ((979, 1017), 'pandas.read_csv', 'pd.read_csv', (['pred_file'], {'index_col': '"""Id"""'}), "(pred_file, index_col='Id')\n", (990, 1017), True, 'import pandas as pd\n'), ((1217, 1274), 'dataset.prepare_lyft_data.parse_string_to_box', 'parse_string_to_box', (['boxes_str'], {'sample_token': 'sample_token'}), '(boxes_str, sample_token=sample_token)\n', (1236, 1274), False, 'from dataset.prepare_lyft_data import parse_string_to_box, transform_box_from_world_to_sensor_coordinates, get_sensor_to_world_transform_matrix_from_sample_data_token\n'), ((1746, 1771), 'skimage.io.imread', 'imread', (['camera_image_path'], {}), '(camera_image_path)\n', (1752, 1771), False, 'from skimage.io import imread\n'), ((1792, 1806), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (1803, 1806), True, 'import numpy as np\n'), ((3340, 3377), 'lyft_dataset_sdk.utils.data_classes.LidarPointCloud.from_file', 'LidarPointCloud.from_file', (['lidar_path'], {}), '(lidar_path)\n', (3365, 3377), False, 'from lyft_dataset_sdk.utils.data_classes import LidarPointCloud\n'), ((4793, 4865), 'dataset.prepare_lyft_data_v2.transform_pc_to_camera_coord', 'transform_pc_to_camera_coord', (['camera_data', 'lidar_record', 'lpc', 'self.lyftd'], {}), '(camera_data, lidar_record, lpc, self.lyftd)\n', (4821, 4865), False, 'from dataset.prepare_lyft_data_v2 import transform_pc_to_camera_coord\n'), ((7114, 7206), 'mayavi.mlab.figure', 'mlab.figure', ([], {'figure': 'None', 'bgcolor': 'bgcolor', 'fgcolor': 'None', 'engine': 'None', 'size': '(1600, 1000)'}), '(figure=None, bgcolor=bgcolor, fgcolor=None, engine=None, size=(\n 1600, 1000))\n', (7125, 7206), True, 'import mayavi.mlab as mlab\n'), ((11234, 11249), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (11245, 11249), False, 'import pickle\n'), ((3639, 3663), 'numpy.transpose', 'np.transpose', (['lpc.points'], {}), '(lpc.points)\n', (3651, 3663), True, 'import numpy as np\n'), ((4932, 4956), 'numpy.transpose', 'np.transpose', (['lpc.points'], {}), '(lpc.points)\n', (4944, 4956), True, 'import numpy as np\n'), ((10037, 10133), 'mayavi.mlab.text3d', 'mlab.text3d', (['b[4, 0]', 'b[4, 1]', 'b[4, 2]', "('%d' % n)"], {'scale': 'text_scale', 'color': 'color', 'figure': 'fig'}), "(b[4, 0], b[4, 1], b[4, 2], '%d' % n, scale=text_scale, color=\n color, figure=fig)\n", (10048, 10133), True, 'import mayavi.mlab as mlab\n'), ((10291, 10432), 'mayavi.mlab.plot3d', 'mlab.plot3d', (['[b[i, 0], b[j, 0]]', '[b[i, 1], b[j, 1]]', '[b[i, 2], b[j, 2]]'], {'color': 'color', 'tube_radius': 'None', 'line_width': 'line_width', 'figure': 'fig'}), '([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]],\n color=color, tube_radius=None, line_width=line_width, figure=fig)\n', (10302, 10432), True, 'import mayavi.mlab as mlab\n'), ((10508, 10649), 'mayavi.mlab.plot3d', 'mlab.plot3d', (['[b[i, 0], b[j, 0]]', '[b[i, 1], b[j, 1]]', '[b[i, 2], b[j, 2]]'], {'color': 'color', 'tube_radius': 'None', 'line_width': 'line_width', 'figure': 'fig'}), '([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]],\n color=color, tube_radius=None, line_width=line_width, figure=fig)\n', (10519, 10649), True, 'import mayavi.mlab as mlab\n'), ((10711, 10852), 'mayavi.mlab.plot3d', 'mlab.plot3d', (['[b[i, 0], b[j, 0]]', '[b[i, 1], b[j, 1]]', '[b[i, 2], b[j, 2]]'], {'color': 'color', 'tube_radius': 'None', 'line_width': 'line_width', 'figure': 'fig'}), '([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]],\n color=color, tube_radius=None, line_width=line_width, figure=fig)\n', (10722, 10852), True, 'import mayavi.mlab as mlab\n'), ((1957, 2044), 'dataset.prepare_lyft_data.transform_box_from_world_to_sensor_coordinates', 'transform_box_from_world_to_sensor_coordinates', (['pred_box', 'camera_token', 'self.lyftd'], {}), '(pred_box, camera_token, self\n .lyftd)\n', (2003, 2044), False, 'from dataset.prepare_lyft_data import parse_string_to_box, transform_box_from_world_to_sensor_coordinates, get_sensor_to_world_transform_matrix_from_sample_data_token\n'), ((2639, 2728), 'dataset.prepare_lyft_data.transform_box_from_world_to_sensor_coordinates', 'transform_box_from_world_to_sensor_coordinates', (['pred_box', 'lidar_top_token', 'self.lyftd'], {}), '(pred_box, lidar_top_token,\n self.lyftd)\n', (2685, 2728), False, 'from dataset.prepare_lyft_data import parse_string_to_box, transform_box_from_world_to_sensor_coordinates, get_sensor_to_world_transform_matrix_from_sample_data_token\n'), ((3005, 3014), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (3011, 3014), True, 'import numpy as np\n'), ((3860, 3946), 'dataset.prepare_lyft_data.transform_box_from_world_to_sensor_coordinates', 'transform_box_from_world_to_sensor_coordinates', (['pred_box', 'lidar_token', 'self.lyftd'], {}), '(pred_box, lidar_token, self.\n lyftd)\n', (3906, 3946), False, 'from dataset.prepare_lyft_data import parse_string_to_box, transform_box_from_world_to_sensor_coordinates, get_sensor_to_world_transform_matrix_from_sample_data_token\n'), ((5153, 5240), 'dataset.prepare_lyft_data.transform_box_from_world_to_sensor_coordinates', 'transform_box_from_world_to_sensor_coordinates', (['pred_box', 'camera_token', 'self.lyftd'], {}), '(pred_box, camera_token, self\n .lyftd)\n', (5199, 5240), False, 'from dataset.prepare_lyft_data import parse_string_to_box, transform_box_from_world_to_sensor_coordinates, get_sensor_to_world_transform_matrix_from_sample_data_token\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
manipulated bfgs method from scipy.optimize (V 1.5.2)
"""
#__docformat__ = "restructuredtext en"
# ******NOTICE***************
# optimize.py module by <NAME>
#
# You may copy and use this module as you see fit with no
# guarantee implied provided you keep this notice in all copies.
# *****END NOTICE************
# A collection of optimization algorithms. Version 0.5
# CHANGES
# Added fminbound (July 2001)
# Added brute (Aug. 2002)
# Finished line search satisfying strong Wolfe conditions (Mar. 2004)
# Updated strong Wolfe conditions line search to use
# cubic-interpolation (Mar. 2004)
# Minimization routines
__all__ = ['fmin_bfgs', 'line_search', 'OptimizeResult',
'OptimizeWarning']
__docformat__ = "restructuredtext en"
import warnings
from numpy import (asarray, sqrt, Inf, isinf)
import numpy as np
from scipy.optimize.linesearch import (line_search_wolfe1, line_search_wolfe2,
line_search_wolfe2 as line_search,
LineSearchWarning)
from scipy.optimize._differentiable_functions import ScalarFunction, FD_METHODS
# standard status messages of optimizers
_status_message = {'success': 'Optimization terminated successfully.',
'maxfev': 'Maximum number of function evaluations has '
'been exceeded.',
'maxiter': 'Maximum number of iterations has been '
'exceeded.',
'pr_loss': 'Desired error not necessarily achieved due '
'to precision loss.',
'nan': 'NaN result encountered.',
'out_of_bounds': 'The result is outside of the provided '
'bounds.'}
class MemoizeJac(object):
""" Decorator that caches the return values of a function returning `(fun, grad)`
each time it is called. """
def __init__(self, fun):
self.fun = fun
self.jac = None
self._value = None
self.x = None
def _compute_if_needed(self, x, *args):
if not np.all(x == self.x) or self._value is None or self.jac is None:
self.x = np.asarray(x).copy()
fg = self.fun(x, *args)
self.jac = fg[1]
self._value = fg[0]
def __call__(self, x, *args):
""" returns the the function value """
self._compute_if_needed(x, *args)
return self._value
def derivative(self, x, *args):
self._compute_if_needed(x, *args)
return self.jac
class OptimizeResult(dict):
""" Represents the optimization result.
Attributes
----------
x : ndarray
The solution of the optimization.
success : bool
Whether or not the optimizer exited successfully.
status : int
Termination status of the optimizer. Its value depends on the
underlying solver. Refer to `message` for details.
message : str
Description of the cause of the termination.
fun, jac, hess: ndarray
Values of objective function, its Jacobian and its Hessian (if
available). The Hessians may be approximations, see the documentation
of the function in question.
hess_inv : object
Inverse of the objective function's Hessian; may be an approximation.
Not available for all solvers. The type of this attribute may be
either np.ndarray or scipy.sparse.linalg.LinearOperator.
nfev, njev, nhev : int
Number of evaluations of the objective functions and of its
Jacobian and Hessian.
nit : int
Number of iterations performed by the optimizer.
maxcv : float
The maximum constraint violation.
Notes
-----
There may be additional attributes not listed above depending of the
specific solver. Since this class is essentially a subclass of dict
with attribute accessors, one can see which attributes are available
using the `keys()` method.
"""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def __repr__(self):
if self.keys():
m = max(map(len, list(self.keys()))) + 1
return '\n'.join([k.rjust(m) + ': ' + repr(v)
for k, v in sorted(self.items())])
else:
return self.__class__.__name__ + "()"
def __dir__(self):
return list(self.keys())
class OptimizeWarning(UserWarning):
pass
def _check_unknown_options(unknown_options):
if unknown_options:
msg = ", ".join(map(str, unknown_options.keys()))
# Stack level 4: this is called from _minimize_*, which is
# called from another function in SciPy. Level 4 is the first
# level in user code.
warnings.warn("Unknown solver options: %s" % msg, OptimizeWarning, 4)
def is_array_scalar(x):
"""Test whether `x` is either a scalar or an array scalar.
"""
return np.size(x) == 1
_epsilon = sqrt(np.finfo(float).eps)
def vecnorm(x, ord=2):
if ord == Inf:
return np.amax(np.abs(x))
elif ord == -Inf:
return np.amin(np.abs(x))
else:
return np.sum(np.abs(x)**ord, axis=0)**(1.0 / ord)
def _prepare_scalar_function(fun, x0, jac=None, args=(), bounds=None,
epsilon=None, finite_diff_rel_step=None,
hess=None):
"""
Creates a ScalarFunction object for use with scalar minimizers
(BFGS/LBFGSB/SLSQP/TNC/CG/etc).
Parameters
----------
fun : callable
The objective function to be minimized.
``fun(x, *args) -> float``
where ``x`` is an 1-D array with shape (n,) and ``args``
is a tuple of the fixed parameters needed to completely
specify the function.
x0 : ndarray, shape (n,)
Initial guess. Array of real elements of size (n,),
where 'n' is the number of independent variables.
jac : {callable, '2-point', '3-point', 'cs', None}, optional
Method for computing the gradient vector. If it is a callable, it
should be a function that returns the gradient vector:
``jac(x, *args) -> array_like, shape (n,)``
If one of `{'2-point', '3-point', 'cs'}` is selected then the gradient
is calculated with a relative step for finite differences. If `None`,
then two-point finite differences with an absolute step is used.
args : tuple, optional
Extra arguments passed to the objective function and its
derivatives (`fun`, `jac` functions).
bounds : sequence, optional
Bounds on variables. 'new-style' bounds are required.
eps : float or ndarray
If `jac is None` the absolute step size used for numerical
approximation of the jacobian via forward differences.
finite_diff_rel_step : None or array_like, optional
If `jac in ['2-point', '3-point', 'cs']` the relative step size to
use for numerical approximation of the jacobian. The absolute step
size is computed as ``h = rel_step * sign(x0) * max(1, abs(x0))``,
possibly adjusted to fit into the bounds. For ``method='3-point'``
the sign of `h` is ignored. If None (default) then step is selected
automatically.
hess : {callable, '2-point', '3-point', 'cs', None}
Computes the Hessian matrix. If it is callable, it should return the
Hessian matrix:
``hess(x, *args) -> {LinearOperator, spmatrix, array}, (n, n)``
Alternatively, the keywords {'2-point', '3-point', 'cs'} select a
finite difference scheme for numerical estimation.
Whenever the gradient is estimated via finite-differences, the Hessian
cannot be estimated with options {'2-point', '3-point', 'cs'} and needs
to be estimated using one of the quasi-Newton strategies.
Returns
-------
sf : ScalarFunction
"""
if callable(jac):
grad = jac
elif jac in FD_METHODS:
# epsilon is set to None so that ScalarFunction is made to use
# rel_step
epsilon = None
grad = jac
else:
# default (jac is None) is to do 2-point finite differences with
# absolute step size. ScalarFunction has to be provided an
# epsilon value that is not None to use absolute steps. This is
# normally the case from most _minimize* methods.
grad = '2-point'
epsilon = epsilon
if hess is None:
# ScalarFunction requires something for hess, so we give a dummy
# implementation here if nothing is provided, return a value of None
# so that downstream minimisers halt. The results of `fun.hess`
# should not be used.
def hess(x, *args):
return None
if bounds is None:
bounds = (-np.inf, np.inf)
# ScalarFunction caches. Reuse of fun(x) during grad
# calculation reduces overall function evaluations.
sf = ScalarFunction(fun, x0, args, grad, hess,
finite_diff_rel_step, bounds, epsilon=epsilon)
return sf
class _LineSearchError(RuntimeError):
pass
def _line_search_wolfe12(f, fprime, xk, pk, gfk, old_fval, old_old_fval,
**kwargs):
"""
Same as line_search_wolfe1, but fall back to line_search_wolfe2 if
suitable step length is not found, and raise an exception if a
suitable step length is not found.
Raises
------
_LineSearchError
If no suitable step size is found
Returns
-------
alpha (float): or None
computed step-size if the algorithm did converge, or None
fc (int):
number of function evaluations
gc(int):
number of gradient evaluations
new_fval(float): or None
new function value at xk + alpha pk
old_fval (float):
old function value
new_slope(float): or None
local slope <fprime(x_new), pk>
"""
extra_condition = kwargs.pop('extra_condition', None)
ret1 = line_search_wolfe1(f, fprime, xk, pk, gfk,
old_fval, old_old_fval,
**kwargs)
if ret1[0] is not None and extra_condition is not None:
xp1 = xk + ret1[0] * pk
if not extra_condition(ret1[0], xp1, ret1[3], ret1[5]):
# Reject step if extra_condition fails
ret1 = (None,ret1[1:])
if ret1[0] is None:
# line search failed: try different one.
with warnings.catch_warnings():
warnings.simplefilter('ignore', LineSearchWarning)
kwargs2 = {}
for key in ('c1', 'c2', 'amax'):
if key in kwargs:
kwargs2[key] = kwargs[key]
ret2 = line_search_wolfe2(f, fprime, xk, pk, gfk,
old_fval, old_old_fval,
extra_condition=extra_condition,
**kwargs2)
# if ret2[0] is None:
# raise _LineSearchError()
# sum up number of function calls
return ret2 + (ret1[1] + ret2[1], ret1[2] + ret2[2])
return ret1 + (ret1[1] , ret1[2])
def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf,
epsilon=_epsilon, maxiter=None, full_output=0, disp=1,
retall=0, H0 = None, callback=None, self_scaling = False):
"""
Minimize a function using the BFGS algorithm.
Parameters
----------
f : callable f(x,*args)
Objective function to be minimized.
x0 : ndarray
Initial guess.
fprime : callable f'(x,*args), optional
Gradient of f.
args : tuple, optional
Extra arguments passed to f and fprime.
gtol : float, optional
Gradient norm must be less than gtol before successful termination.
norm : float, optional
Order of norm (Inf is max, -Inf is min)
epsilon : int or ndarray, optional
If fprime is approximated, use this value for the step size.
callback : callable, optional
An optional user-supplied function to call after each
iteration. Called as callback(xk), where xk is the
current parameter vector.
maxiter : int, optional
Maximum number of iterations to perform.
full_output : bool, optional
If True,return fopt, func_calls, grad_calls, and warnflag
in addition to xopt.
disp : bool, optional
Print convergence message if True.
retall : bool, optional
Return a list of results at each iteration if True.
H0 : ndarray, optional
Initialization of inverse of Hessian approximation.
Returns
-------
xopt : ndarray
Parameters which minimize f, i.e., f(xopt) == fopt.
fopt : float
Minimum value.
gopt : ndarray
Value of gradient at minimum, f'(xopt), which should be near 0.
Bopt : ndarray
Value of 1/f''(xopt), i.e., the inverse Hessian matrix.
func_calls : int
Number of function_calls made.
grad_calls : int
Number of gradient calls made.
warnflag : integer
1 : Maximum number of iterations exceeded.
2 : Gradient and/or function calls not changing.
3 : NaN result encountered.
allvecs : list
The value of xopt at each iteration. Only returned if retall is True.
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'BFGS' `method` in particular.
Notes
-----
Optimize the function, f, whose gradient is given by fprime
using the quasi-Newton method of Broyden, Fletcher, Goldfarb,
and Shanno (BFGS)
References
----------
Wright, and Nocedal 'Numerical Optimization', 1999, p. 198.
"""
opts = {'gtol': gtol,
'norm': norm,
'eps': epsilon,
'disp': disp,
'maxiter': maxiter,
'return_all': retall,
'H0': H0,
'self_scaling': self_scaling}
res = _minimize_bfgs(f, x0, args, fprime, callback=callback, **opts)
if full_output:
retlist = (res['x'], res['fun'], res['jac'], res['hess_inv'],
res['nfev'], res['njev'], res['status'])
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_bfgs(fun, x0, args=(), jac=None, callback=None,
gtol=1e-5, norm=Inf, eps=_epsilon, maxiter=None,
disp=False, return_all=False, H0 = None,
finite_diff_rel_step=None,self_scaling = False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
BFGS algorithm.
Options
-------
disp : bool
Set to True to print convergence messages.
maxiter : int
Maximum number of iterations to perform.
gtol : float
Gradient norm must be less than `gtol` before successful
termination.
norm : float
Order of norm (Inf is max, -Inf is min).
eps : float or ndarray
If `jac is None` the absolute step size used for numerical
approximation of the jacobian via forward differences.
return_all : bool, optional
Set to True to return a list of the best solution at each of the
iterations.
H0 : ndarray, optional
Initialization of inverse of Hessian approximation.
finite_diff_rel_step : None or array_like, optional
If `jac in ['2-point', '3-point', 'cs']` the relative step size to
use for numerical approximation of the jacobian. The absolute step
size is computed as ``h = rel_step * sign(x0) * max(1, abs(x0))``,
possibly adjusted to fit into the bounds. For ``method='3-point'``
the sign of `h` is ignored. If None (default) then step is selected
automatically.
self_scaling : bool, optional
whether to use a self-scaling method for updating the matrix
"""
_check_unknown_options(unknown_options)
retall = return_all
x0 = asarray(x0).flatten()
if x0.ndim == 0:
x0.shape = (1,)
if maxiter is None:
maxiter = len(x0) * 200
sf = _prepare_scalar_function(fun, x0, jac, args=args, epsilon=eps,
finite_diff_rel_step=finite_diff_rel_step)
f = sf.fun
myfprime = sf.grad
old_fval = f(x0)
gfk = myfprime(x0)
if not np.isscalar(old_fval):
try:
old_fval = old_fval.item()
except (ValueError, AttributeError):
raise ValueError("The user-provided "
"objective function must "
"return a scalar value.")
k = 0
N = len(x0)
I = np.eye(N, dtype=int)
# initialize Hk with given initial value
if H0 is None:
Hk = I
else:
Hk = H0
# Sets the initial step guess to dx ~ 1
old_old_fval = old_fval + np.linalg.norm(gfk) / 2
xk = x0
if retall:
allvecs = [x0]
allHs = [Hk]
allrhos = [0]
warnflag = 0
gnorm = vecnorm(gfk, ord=norm)
while (gnorm > gtol) and (k < maxiter):
pk = -np.dot(Hk, gfk)
try:
alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \
_line_search_wolfe12(f, myfprime, xk, pk, gfk,
old_fval, old_old_fval, amin=1e-100, amax=1e100)
except _LineSearchError:
# Line search failed to find a better solution.
warnflag = 2
break
xkp1 = xk + alpha_k * pk
if retall:
allvecs.append(xkp1)
sk = xkp1 - xk
xk = xkp1
if gfkp1 is None:
gfkp1 = myfprime(xkp1)
yk = gfkp1 - gfk
gfk = gfkp1
if callback is not None:
callback(xk)
k += 1
gnorm = vecnorm(gfk, ord=norm)
if (gnorm <= gtol):
break
if not np.isfinite(old_fval):
# We correctly found +-Inf as optimal value, or something went
# wrong.
warnflag = 2
break
try: # this was handled in numeric, let it remaines for more safety
rhok = 1.0 / (np.dot(yk, sk))
except ZeroDivisionError:
rhok = 1000.0
if disp:
print("Divide-by-zero encountered: rhok assumed large")
if isinf(rhok): # this is patch for NumPy
rhok = 1000.0
if disp:
print("Divide-by-zero encountered: rhok assumed large")
if rhok < 0: # no update
rhok = 0
A1 = I - sk[:, np.newaxis] * yk[np.newaxis, :] * rhok
A2 = I - yk[:, np.newaxis] * sk[np.newaxis, :] * rhok
if self_scaling:
gammak = np.inner(sk,Hk @ sk) * rhok
Hk = 1/gammak * np.dot(A1, np.dot(Hk, A2)) + (rhok * sk[:, np.newaxis] *
sk[np.newaxis, :])
else:
Hk = np.dot(A1, np.dot(Hk, A2)) + (rhok * sk[:, np.newaxis] *
sk[np.newaxis, :])
if retall:
allHs.append(Hk)
allrhos.append(rhok)
fval = old_fval
if warnflag == 2:
msg = _status_message['pr_loss']
elif k >= maxiter:
warnflag = 1
msg = _status_message['maxiter']
elif np.isnan(gnorm) or np.isnan(fval) or np.isnan(xk).any():
warnflag = 3
msg = _status_message['nan']
else:
msg = _status_message['success']
if disp:
print("%s%s" % ("Warning: " if warnflag != 0 else "", msg))
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % sf.nfev)
print(" Gradient evaluations: %d" % sf.ngev)
result = OptimizeResult(fun=fval, jac=gfk, hess_inv=Hk, nfev=sf.nfev,
njev=sf.ngev, status=warnflag,
success=(warnflag == 0), message=msg, x=xk,
nit=k)
if retall:
result['allvecs'] = [allvecs,allHs,allrhos]
return result
|
[
"numpy.abs",
"numpy.isnan",
"numpy.linalg.norm",
"numpy.inner",
"warnings.simplefilter",
"numpy.isfinite",
"numpy.finfo",
"warnings.catch_warnings",
"numpy.size",
"numpy.asarray",
"numpy.isinf",
"scipy.optimize._differentiable_functions.ScalarFunction",
"numpy.dot",
"numpy.all",
"numpy.isscalar",
"scipy.optimize.linesearch.line_search_wolfe1",
"scipy.optimize.linesearch.line_search_wolfe2",
"numpy.eye",
"warnings.warn"
] |
[((9140, 9232), 'scipy.optimize._differentiable_functions.ScalarFunction', 'ScalarFunction', (['fun', 'x0', 'args', 'grad', 'hess', 'finite_diff_rel_step', 'bounds'], {'epsilon': 'epsilon'}), '(fun, x0, args, grad, hess, finite_diff_rel_step, bounds,\n epsilon=epsilon)\n', (9154, 9232), False, 'from scipy.optimize._differentiable_functions import ScalarFunction, FD_METHODS\n'), ((10202, 10278), 'scipy.optimize.linesearch.line_search_wolfe1', 'line_search_wolfe1', (['f', 'fprime', 'xk', 'pk', 'gfk', 'old_fval', 'old_old_fval'], {}), '(f, fprime, xk, pk, gfk, old_fval, old_old_fval, **kwargs)\n', (10220, 10278), False, 'from scipy.optimize.linesearch import line_search_wolfe1, line_search_wolfe2, line_search_wolfe2 as line_search, LineSearchWarning\n'), ((17091, 17111), 'numpy.eye', 'np.eye', (['N'], {'dtype': 'int'}), '(N, dtype=int)\n', (17097, 17111), True, 'import numpy as np\n'), ((4947, 5016), 'warnings.warn', 'warnings.warn', (["('Unknown solver options: %s' % msg)", 'OptimizeWarning', '(4)'], {}), "('Unknown solver options: %s' % msg, OptimizeWarning, 4)\n", (4960, 5016), False, 'import warnings\n'), ((5125, 5135), 'numpy.size', 'np.size', (['x'], {}), '(x)\n', (5132, 5135), True, 'import numpy as np\n'), ((5159, 5174), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (5167, 5174), True, 'import numpy as np\n'), ((16775, 16796), 'numpy.isscalar', 'np.isscalar', (['old_fval'], {}), '(old_fval)\n', (16786, 16796), True, 'import numpy as np\n'), ((18764, 18775), 'numpy.isinf', 'isinf', (['rhok'], {}), '(rhok)\n', (18769, 18775), False, 'from numpy import asarray, sqrt, Inf, isinf\n'), ((5247, 5256), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (5253, 5256), True, 'import numpy as np\n'), ((10692, 10717), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (10715, 10717), False, 'import warnings\n'), ((10731, 10781), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'LineSearchWarning'], {}), "('ignore', LineSearchWarning)\n", (10752, 10781), False, 'import warnings\n'), ((10952, 11066), 'scipy.optimize.linesearch.line_search_wolfe2', 'line_search_wolfe2', (['f', 'fprime', 'xk', 'pk', 'gfk', 'old_fval', 'old_old_fval'], {'extra_condition': 'extra_condition'}), '(f, fprime, xk, pk, gfk, old_fval, old_old_fval,\n extra_condition=extra_condition, **kwargs2)\n', (10970, 11066), False, 'from scipy.optimize.linesearch import line_search_wolfe1, line_search_wolfe2, line_search_wolfe2 as line_search, LineSearchWarning\n'), ((16406, 16417), 'numpy.asarray', 'asarray', (['x0'], {}), '(x0)\n', (16413, 16417), False, 'from numpy import asarray, sqrt, Inf, isinf\n'), ((17295, 17314), 'numpy.linalg.norm', 'np.linalg.norm', (['gfk'], {}), '(gfk)\n', (17309, 17314), True, 'import numpy as np\n'), ((17523, 17538), 'numpy.dot', 'np.dot', (['Hk', 'gfk'], {}), '(Hk, gfk)\n', (17529, 17538), True, 'import numpy as np\n'), ((18318, 18339), 'numpy.isfinite', 'np.isfinite', (['old_fval'], {}), '(old_fval)\n', (18329, 18339), True, 'import numpy as np\n'), ((2140, 2159), 'numpy.all', 'np.all', (['(x == self.x)'], {}), '(x == self.x)\n', (2146, 2159), True, 'import numpy as np\n'), ((5303, 5312), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (5309, 5312), True, 'import numpy as np\n'), ((18584, 18598), 'numpy.dot', 'np.dot', (['yk', 'sk'], {}), '(yk, sk)\n', (18590, 18598), True, 'import numpy as np\n'), ((19154, 19175), 'numpy.inner', 'np.inner', (['sk', '(Hk @ sk)'], {}), '(sk, Hk @ sk)\n', (19162, 19175), True, 'import numpy as np\n'), ((19755, 19770), 'numpy.isnan', 'np.isnan', (['gnorm'], {}), '(gnorm)\n', (19763, 19770), True, 'import numpy as np\n'), ((19774, 19788), 'numpy.isnan', 'np.isnan', (['fval'], {}), '(fval)\n', (19782, 19788), True, 'import numpy as np\n'), ((2225, 2238), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (2235, 2238), True, 'import numpy as np\n'), ((19381, 19395), 'numpy.dot', 'np.dot', (['Hk', 'A2'], {}), '(Hk, A2)\n', (19387, 19395), True, 'import numpy as np\n'), ((5346, 5355), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (5352, 5355), True, 'import numpy as np\n'), ((19221, 19235), 'numpy.dot', 'np.dot', (['Hk', 'A2'], {}), '(Hk, A2)\n', (19227, 19235), True, 'import numpy as np\n'), ((19792, 19804), 'numpy.isnan', 'np.isnan', (['xk'], {}), '(xk)\n', (19800, 19804), True, 'import numpy as np\n')]
|
from numpy.random import seed
seed(42)
from tensorflow import set_random_seed
set_random_seed(42)
import nltk
from nltk.corpus import stopwords
from xml.dom.minidom import parse
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
import keras as k
from numpy.random import seed
import pandas as pd
from keras.preprocessing.sequence import pad_sequences
from nltk.tokenize import word_tokenize
from os import listdir
import string, sys
import numpy as np
import pickle
from keras.models import load_model
from keras.utils import to_categorical
from keras.callbacks import ModelCheckpoint
from keras_contrib.losses import crf_loss
from keras_contrib.metrics import crf_viterbi_accuracy
import matplotlib.pyplot as plt
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('stopwords')
stopwords_ = set(stopwords.words('english'))
from keras.models import Model, Input
from keras.layers import LSTM, Embedding, concatenate, Dense, TimeDistributed, Dropout, Bidirectional, Lambda, Layer
from keras_contrib.layers import CRF
from keras_contrib.losses import crf_loss
from keras_contrib.metrics import crf_accuracy
sys.path.append("../")
import evaluator
class Learner():
def __init__(self):
print("[WELCOME]... Init learning progress")
def tokenize(self, sentence):
'''
Task :
Given a sentence , calls nltk . tokenize to split it in
tokens , and adds to each token its start / end offset
in the original sentence .
'''
tokens = []
offset = 0
words = word_tokenize(sentence)
for w in words:
if (w in stopwords_) or (w in string.punctuation):
continue
offset = sentence.find(w, offset)
tokens.append((w, offset, offset + len(w) - 1))
offset += len(w) +1
return tokens
def get_tag(self,token, gold):
'''
Task :
Given a token and a list of ground truth entites in a sentence , decide
which is the B-I-O tag for the token
'''
(form, start, end) = token
for (gold_start, gold_end, gold_type) in gold:
if start == gold_start and end <= gold_end:
return "B-" + gold_type
elif start >= gold_start and end <= gold_end:
return "I-" + gold_type
return "O"
def load_data(self, datadir):
'''
Load XML files in given directory , tokenize each sentence , and extract ground truth BIO labels for each token .
'''
result = {}
# process each file in directory
for f in listdir(datadir):
# parse XML file , obtaining a DOM tree
tree = parse(datadir + "/" + f)
# process each sentence in the file
sentences = tree.getElementsByTagName("sentence")
for s in sentences:
sid = s.attributes["id"].value # get sentence id
stext = s.attributes["text"].value # get sentence text
# load ground truth entities .
gold = []
entities = s.getElementsByTagName("entity")
for e in entities:
# for discontinuous entities , we only get the first span
offset = e.attributes["charOffset"].value
(start, end) = offset.split(";")[0].split("-")
gold.append((int(start), int(end), e.attributes["type"].value))
# tokenize text
tokens = self.tokenize(stext)
info_ = []
for tok_ in tokens:
tag_ = self.get_tag(tok_, gold)
n, i1, i2 = tok_
info_.append((n, i1, i2, tag_))
result[sid] = info_
return result
def create_indexs(self, dataset, max_length):
'''
Create index dictionaries both for input ( words ) and output ( labels ) from given dataset .
'''
words = ['<PAD>', '<UNK>']
prefixes = ['<PAD>', '<UNK>']
suffixes = ['<PAD>', '<UNK>']
labels = ['<PAD>']
positions = ['<PAD>','<UNK>']
prevword = ['<PAD>','<UNK>']
nextword = ['<PAD>','<UNK>']
class_rules = ['<PAD>', 'brand', 'drug', 'drug_n', 'group', 'none']
for data in list(dataset.values()):
pos = 0
w_pack_prev = '<START>'
for w_pack in data:
if w_pack[0] not in words:
words.append(w_pack[0])
if w_pack[3] not in labels:
labels.append(w_pack[3])
if w_pack[0][:3] not in prefixes:
prefixes.append(w_pack[0][:3])
if w_pack[0][-3:] not in suffixes:
suffixes.append(w_pack[0][-3:])
if pos not in positions:
positions.append(pos)
if w_pack_prev not in prevword:
prevword.append(w_pack_prev)
if w_pack[0] not in nextword:
nextword.append(w_pack[0])
w_pack_prev = w_pack[0]
pos+=1
if '<END>' not in nextword:
nextword.append('<END>')
words = {k: v for v, k in enumerate(words)}
labels = {k: v for v, k in enumerate(labels)}
prefixes = {k: v for v, k in enumerate(prefixes)}
suffixes = {k: v for v, k in enumerate(suffixes)}
positions = {k: v for v, k in enumerate(positions)}
prevword = {k: v for v, k in enumerate(prevword)}
nextword = {k: v for v, k in enumerate(nextword)}
class_rules = {k: v for v, k in enumerate(class_rules)}
result = {}
result['words'] = words
result['labels'] = labels
result['maxlen'] = max_length
result['prev'] = prevword
result['next'] = nextword
result["pref"] = prefixes
result["suff"] = suffixes
result["position"] = positions
result["class_rules"] = class_rules
return result
def encode_words(self, dataset, idx):
'''
Encode the words in a sentence dataset formed by lists of tokens
into lists of indexes suitable for NN input .
The dataset encoded as a list of sentence , each of them is a list of
word indices . If the word is not in the index , <UNK > code is used . If
the sentence is shorter than max_len it is padded with <PAD > code .
'''
results = []
for sentence in dataset.values():
encoded_sentence = []
for word in sentence:
if word[0] in idx["words"]:
index = idx["words"][word[0]]
else:
index = idx["words"]['<UNK>']
encoded_sentence.append(index)
while len(encoded_sentence) < idx["maxlen"]:
encoded_sentence.append(idx["words"]['<PAD>'])
results.append(np.array(encoded_sentence))
return np.array(results)
def encode_words_lower(self, dataset, idx):
results = []
for sentence in dataset.values():
encoded_sentence = []
for word in sentence:
if word[0].lower() in idx["words_lower"]:
index = idx["words_lower"][word[0].lower()]
else:
index = idx["words_lower"]['<UNK>']
encoded_sentence.append(index)
while len(encoded_sentence) < idx["maxlen"]:
encoded_sentence.append(idx["words_lower"]['<PAD>'])
results.append(np.array(encoded_sentence))
return np.array(results)
def encode_positions(self, dataset, idx):
results = []
for sentence in dataset.values():
encoded_sentence = []
pos = 0
for word in sentence:
if pos in idx["position"]:
index = idx["position"][pos]
else:
index = idx["position"]['<UNK>']
encoded_sentence.append(index)
pos+=1
while len(encoded_sentence) < idx["maxlen"]:
encoded_sentence.append(idx["position"]['<PAD>'])
results.append(np.array(encoded_sentence))
return np.array(results)
def encode_prefixes(self, dataset, idx):
results = []
for sentence in dataset.values():
encoded_sentence = []
for word in sentence:
if word[0][:3] in idx["pref"]:
index = idx["pref"][word[0][:3]]
else:
index = idx["pref"]['<UNK>']
encoded_sentence.append(index)
while len(encoded_sentence) < idx["maxlen"]:
encoded_sentence.append(idx["pref"]['<PAD>'])
results.append(np.array(encoded_sentence))
return np.array(results)
def encode_suffixes(self, dataset, idx):
results = []
for sentence in dataset.values():
encoded_sentence = []
for word in sentence:
if word[0][-3:] in idx["suff"]:
index = idx["suff"][word[0][-3:]]
else:
index = idx["suff"]['<UNK>']
encoded_sentence.append(index)
while len(encoded_sentence) < idx["maxlen"]:
encoded_sentence.append(idx["suff"]['<PAD>'])
results.append(np.array(encoded_sentence))
return np.array(results)
def encode_prevwords(self, dataset, idx):
results = []
for sentence in dataset.values():
encoded_sentence = []
prevword = '<START>'
for word in sentence:
if prevword in idx["prev"]:
index = idx["prev"][prevword]
else:
index = idx["prev"]['<UNK>']
encoded_sentence.append(index)
prevword=word[0]
while len(encoded_sentence) < idx["maxlen"]:
encoded_sentence.append(idx["prev"]['<PAD>'])
results.append(np.array(encoded_sentence))
return np.array(results)
def encode_nextwords(self, dataset, idx):
results = []
for sentence in dataset.values():
encoded_sentence = []
for i in range(len(sentence)-1):
if sentence[i+1][0] in idx["next"]:
index = idx["next"][sentence[i+1][0]]
else:
index = idx["next"]['<UNK>']
encoded_sentence.append(index)
index = idx["next"]['<END>']
while len(encoded_sentence) < idx["maxlen"]:
encoded_sentence.append(idx["next"]['<PAD>'])
results.append(np.array(encoded_sentence))
return np.array(results)
def check_Prefixes(self, tok, pref):
for p in pref:
if str(tok).lower().startswith(p):
return True
return False
def check_Suffixes(self, tok, pref):
for p in pref:
if str(tok).endswith(p):
return True
return False
def check_contains(self, tok, cont):
for p in cont:
if p in str(tok):
return True
return False
def encode_class_rules(self, dataset, idx):
suffixes = ["azole", "idine", "amine", "mycin", "xacin", "ostol", "adiol"]
suffixes_drug = ["ine", "cin", "ium", "vir","ide", "lam", "il", "ril", "cin", "tin"]
#suffixes_brand = ["gen"]
suffixes_brand = []
suffixes_group = ["ines", "ides", "cins", "oles"]
prefixes_drug_n = ['ibog', 'endo', "bombe", "contor", "dmp", "egf", "ginse", "heo", "ibo", "jac", "phen"]
#prefixes_brand = ["SPR", "Acc", "equ", "EQU"]
prefixes_brand = []
prefixes_group = ["beta-adre", "hmg", "monoamine", "calcium", "drugs", "sali", "quino", "ssri", "cepha", "sulfo", "TCA", "thiaz", "benzo", "barb", "contracept", "cortico", "digitalis", "diu", "central", "nervous", "system", "beta", "psycho", "cepha", "macro", "prot", "ace", "mao", "cardiac"]
prefixes_drug = ['digox', 'warfa', 'meth', 'theophy', 'lith', 'keto', 'cime', 'insu', 'fluox', 'alcoh', 'cyclos', 'eryth', 'carba', 'rifa', 'caffe']
contains_drug_n = ["MHD", "NaC", "MC", "gaine", "PTX", "PCP"]
contains_group = ["ids", "urea" ]
contains_brand = ["PEGA", "aspirin", "Aspirin", "XX", "IVA"]
'''
suffixes = ["azole", "idine", "amine", "mycin", "xacin", "ostol", "adiol"]
suffixes_drug = ["ine", "cin", "ium"]
suffixes_brand = ["gen"]
suffixes_group = ["ines", "ides", "cins", "oles"]
'''
results = []
for sentence in dataset.values():
encoded_sentence = []
for word in sentence:
token = word[0]
if self.check_Suffixes(token, suffixes_drug) or self.check_Suffixes(token, suffixes) or self.check_Prefixes(token, prefixes_drug):
index = idx["class_rules"]['drug']
elif self.check_Suffixes(token, suffixes_group) or "agent" in token or self.check_Prefixes(token, prefixes_group) or self.check_contains(token, contains_group):
index = idx["class_rules"]['group']
elif self.check_Prefixes(token, prefixes_drug_n) or self.check_contains(token, contains_drug_n):
index = idx["class_rules"]['drug_n']
elif token.isupper() or self.check_contains(token, contains_brand):
index = idx["class_rules"]['brand']
else:
index = idx["class_rules"]['none']
encoded_sentence.append(index)
while len(encoded_sentence) < idx["maxlen"]:
encoded_sentence.append(idx["class_rules"]['<PAD>'])
results.append(np.array(encoded_sentence))
return np.array(results)
def encode_labels(self, dataset, idx):
'''
Encode the ground truth labels in a sentence dataset formed by lists of
tokens into lists of indexes suitable for NN output .
'''
results = []
for sentence in dataset.values():
encoded_sentence = []
for word in sentence:
index = idx["labels"][word[3]]
encoded_sentence.append(index)
while len(encoded_sentence) < idx["maxlen"]:
index = idx["labels"]['<PAD>']
encoded_sentence.append(index)
results.append(np.array(encoded_sentence))
n_tags = len(idx["labels"])
results = [to_categorical(i, num_classes=n_tags) for i in results]
results = np.array(results)
print(results.shape)
return results
def save_model_and_indexs(self, model, idx, filename):
'''
Save given model and indexs to disk
'''
model.save_weights(filename + '.h5')
with open(filename + '.idx', 'wb') as fp:
pickle.dump(idx, fp, protocol=pickle.HIGHEST_PROTOCOL)
def load_model_and_indexs(self, filename):
'''
Save given model and indexs to disk
'''
with open(filename + '.idx', 'rb') as fp:
data = pickle.load(fp)
n_words = len(data['words'])
n_labels = len(data['labels'])
max_len = data['maxlen']
n_prev = len(data['prev'])
n_next = len(data['next'])
n_pref = len(data["pref"])
n_suff = len(data["suff"])
n_pos = len(data["position"])
n_class = len(data["class_rules"])
numbers=[n_words, n_suff, n_pref,n_pos,n_prev, n_next, n_class]
model = self.defineModel(numbers, n_labels, max_len)
model.load_weights(filename + '.h5')
return model, data
def output_entities(self, dataset, preds, outfile):
'''
Output detected entities in the format expected by the evaluator
'''
# if it's not waiting will print the BI elements without the marks
# in order to not print the O's or print together the BI
wait = False # while it's waiting will not print the elements
name = ''
off_start = '0'
element = {'name': '', 'offset': '', 'type': ''}
f = open(outfile, "w+")
for i, (sid, sentence) in enumerate(dataset.items()):
for ind, token in enumerate(sentence):
curr = preds[i][ind]
if curr == 'O' or curr=='<PAD>': # if it's a O or <PAD> element, we do nothing
wait = True
elif ind == (len(sentence) - 1): # if it's the last element of the sentence
if curr.startswith('B'):
element = {'name': token[0],
'offset': str(token[1]) + '-' + str(token[2]),
'type': curr.split('-')[1] # without B or I
}
elif curr.startswith('I'):
name = token[0] if name is '' else name + ' ' + token[0]
element = {'name': name,
'offset': off_start + '-' + str(token[2]),
'type': curr.split('-')[1]
}
else: # only to check
print('There\'s something wrong')
wait = False
else:
next = preds[i][ind+1]
if curr.startswith('B'):
if next.startswith('O') or next.startswith('B') or next.startswith('<'):
element = {'name': token[0],
'offset': str(token[1]) + '-' + str(token[2]),
'type': curr.split('-')[1] # without B or I
}
wait = False
elif next.startswith('I'):
name = token[0]
off_start = str(token[1])
wait = True
elif curr.startswith('I'):
if next.startswith('O') or next.startswith('B') or next.startswith('<'):
element = {'name': name + ' ' + token[0],
'offset': off_start + '-' + str(token[2]),
'type': curr.split('-')[1]
}
if name == '':
element["name"] = token[0]
wait = False
elif next.startswith('I'):
name = token[0] if name is '' else name + ' ' + token[0]
wait = True
else: # only to check
print('There\'s something wrong2')
if not wait:
f.write(sid + '|' + element['offset'] + '|' + element['name'] + '|' + element['type'] + '\n')
f.close()
def predict(self, modelname, datadir, outfile):
'''
Loads a NN model from file ’modelname ’ and uses it to extract drugs
in datadir . Saves results to ’outfile ’ in the appropriate format .
'''
print("[INFO]... Model in inference process")
# load model and associated encoding data
model, idx = self.load_model_and_indexs(modelname)
# load data to annotate
testdata = self.load_data(datadir)
# encode dataset
X = self.encode_words(testdata, idx)
X_pref = self.encode_prefixes(testdata, idx)
X_suff = self.encode_suffixes(testdata, idx)
X_pos = self.encode_positions(testdata, idx)
X_prev = self.encode_prevwords(testdata, idx)
X_next = self.encode_nextwords(testdata, idx)
X_class_rules = self.encode_class_rules(testdata, idx)
# tag sentences in dataset
Y = model.predict([X, X_suff, X_pref, X_pos, X_prev, X_next, X_class_rules])
reverse_labels= {y: x for x, y in idx['labels'].items()}
Y = [[reverse_labels[np.argmax(y)] for y in s] for s in Y]
# extract entities and dump them to output file
self.output_entities(testdata, Y, outfile)
# evaluate using official evaluator
self.evaluation(datadir, outfile)
def checkOutputs(self, modelname, datadir, outfile):
print("[INFO]... Model in checking process")
# load model and associated encoding data
model, idx = self.load_model_and_indexs(modelname)
# load data to annotate
testdata = self.load_data(datadir)
# encode dataset
Y = self.encode_labels(testdata, idx)
print(idx["labels"])
reverse_labels = {y: x for x, y in idx['labels'].items()}
Y = [[reverse_labels[np.argmax(y)] for y in s] for s in Y]
# extract entities and dump them to output file
self.output_entities(testdata, Y, outfile)
# evaluate using official evaluator
self.evaluation(datadir, outfile)
def evaluation(self, datadir, outfile):
evaluator.evaluate("NER", datadir, outfile)
def learn(self, traindir, validationdir, modelname):
'''
Learns a NN model using traindir as training data , and validationdir
as validation data . Saves learnt model in a file named modelname
'''
print("[INFO]... Model architecture in training process")
# load train and validation data in a suitable form
traindata = self.load_data(traindir)
valdata = self.load_data(validationdir)
# create indexes from training data
max_len = 100
idx = self.create_indexs(traindata, max_len)
# encode datasets
Xtrain = self.encode_words(traindata, idx)
Xtrain_pref = self.encode_prefixes(traindata, idx)
Xtrain_suff = self.encode_suffixes(traindata, idx)
Xtrain_pos = self.encode_positions(traindata, idx)
Xtrain_prev = self.encode_prevwords(traindata, idx)
Xtrain_next = self.encode_nextwords(traindata, idx)
Xtrain_class_rules = self.encode_class_rules(traindata, idx)
Ytrain = self.encode_labels(traindata, idx)
Xval = self.encode_words(valdata, idx)
Xval_pref = self.encode_prefixes(valdata, idx)
Xval_suff = self.encode_suffixes(valdata, idx)
Xval_pos = self.encode_positions(valdata, idx)
Xval_prev = self.encode_prevwords(valdata, idx)
Xval_next = self.encode_nextwords(valdata, idx)
Xval_class_rules = self.encode_class_rules(valdata, idx)
Yval = self.encode_labels(valdata, idx)
n_words=len(idx['words'])
# load the whole embedding into memory
embeddings_index = dict()
f = open('../data/glove.6B/glove.6B.100d.txt', encoding="utf8")
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
embedding_matrix = np.zeros((n_words, max_len))
h=0
for word in idx['words']:
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[h] = embedding_vector
h+=1
f = open("./embedding_matrix.txt", 'w')
for row in embedding_matrix:
np.savetxt(f,row)
f.close()
# train model
# build network
model = self.build_network(idx)
# Saving the best model only
filepath = modelname+"-{val_crf_viterbi_accuracy:.3f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_crf_viterbi_accuracy', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
# Fit the best model
history = model.fit([Xtrain, Xtrain_suff, Xtrain_pref, Xtrain_pos, Xtrain_prev, Xtrain_next, Xtrain_class_rules], Ytrain, validation_data=([Xval, Xval_suff, Xval_pref, Xval_pos, Xval_prev, Xval_next, Xval_class_rules], Yval), batch_size=256, epochs=20, verbose=1, callbacks=callbacks_list)
'''
model.fit(Xtrain, Ytrain, validation_data=(Xval, Yval), batch_size=256)
'''
# save model and indexs , for later use in prediction
self.save_model_and_indexs(model, idx, modelname)
self.plot(history)
return embedding_matrix
def plot(self, history):
# Plot the graph
plt.style.use('ggplot')
accuracy = history.history['crf_viterbi_accuracy']
val_accuracy = history.history['val_crf_viterbi_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
x = range(1, len(accuracy) + 1)
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
plt.plot(x, accuracy, 'b', label='Training acc')
plt.plot(x, val_accuracy, 'r', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(x, loss, 'b', label='Training loss')
plt.plot(x, val_loss, 'r', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.savefig("History_model.jpg")
def defineModel(self, numbers, n_labels, max_len):
embedding_matrix=np.loadtxt("./embedding_matrix.txt").reshape(numbers[0], 100)
word_in = Input(shape=(max_len,))
word_emb = Embedding(input_dim=numbers[0], output_dim=100, input_length=max_len, trainable=False, weights = [embedding_matrix])(word_in) # 20-dim embedding
suf_in = Input(shape=(max_len,))
suf_emb = Embedding(input_dim=numbers[1], output_dim=100,
input_length=max_len)(suf_in)
pref_in = Input(shape=(max_len,))
pref_emb = Embedding(input_dim=numbers[2], output_dim=100,
input_length=max_len)(pref_in)
pos_in = Input(shape=(max_len,))
pos_emb = Embedding(input_dim=numbers[3], output_dim=100,
input_length=max_len)(pos_in)
prev_in = Input(shape=(max_len,))
prev_emb = Embedding(input_dim=numbers[4], output_dim=100,
input_length=max_len)(prev_in)
next_in = Input(shape=(max_len,))
next_emb = Embedding(input_dim=numbers[5], output_dim=100,
input_length=max_len)(next_in)
class_rules_in = Input(shape=(max_len,))
class_rules_emb = Embedding(input_dim=numbers[6], output_dim=100,
input_length=max_len)(class_rules_in)
concat = concatenate([word_emb, suf_emb, pref_emb, pos_emb, prev_emb, next_emb, class_rules_emb])
model = Dropout(0.55)(concat)
'''
model = LSTM(units=max_len * 2,
return_sequences=True,
dropout=0.5,
recurrent_dropout=0.5,
kernel_initializer=k.initializers.he_normal())(model)
'''
model = Bidirectional(LSTM(units=32,return_sequences=True,recurrent_dropout=0.3,))(model) # variational biLSTM
#model = Bidirectional(LSTM(units=32,return_sequences=True,recurrent_dropout=0.5,))(model) # variational biLSTM
#model = Bidirectional(LSTM(units=32,return_sequences=True,recurrent_dropout=0.5,))(model) # variational biLSTM
model = TimeDistributed(Dense(n_labels, activation="relu"))(model) # a dense layer as suggested by neuralNer
crf = CRF(units=n_labels, activation='linear') # CRF layer
out = crf(model) # output
# create and compile model
model = Model([word_in, suf_in, pref_in, pos_in, prev_in, next_in, class_rules_in], out)
return model
def build_network(self,idx):
from keras.optimizers import RMSprop
'''
Create network for the learner
'''
# sizes
n_words = len(idx['words'])
n_prev = len(idx['prev'])
n_next = len(idx['next'])
n_pref = len(idx["pref"])
n_suff = len(idx["suff"])
n_pos = len(idx["position"])
n_labels = len(idx['labels'])
n_class = len(idx["class_rules"])
numbers=[n_words, n_suff, n_pref,n_pos,n_prev, n_next, n_class]
max_len = idx['maxlen']
# create network layers
model = self.defineModel(numbers, n_labels, max_len)
# set appropriate parameters (optimizer, loss, etc)
optimizer = RMSprop(lr=0.001, epsilon=None, decay=0.0)
crf = CRF(n_labels, activation='linear') # CRF layer
model.compile(optimizer=optimizer, loss=crf.loss_function, metrics=[crf.accuracy])
model.summary()
return model
if __name__ == '__main__':
learner = Learner()
learner.learn("../data/train", "../data/devel", "firstmodel")
#learner.checkOutputs("firstmodel", "../data/test", "results.txt", emb_matrix)
print("TRAIN")
learner.predict("firstmodel", "../data/train", "results.txt")
print("\nDEVEL")
learner.predict("firstmodel", "../data/devel", "results.txt")
print("\nTEST")
learner.predict("firstmodel", "../data/test", "results.txt")
|
[
"matplotlib.pyplot.title",
"pickle.dump",
"numpy.random.seed",
"numpy.argmax",
"evaluator.evaluate",
"keras.models.Model",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure",
"keras_contrib.layers.CRF",
"pickle.load",
"nltk.download",
"keras.layers.concatenate",
"sys.path.append",
"warnings.simplefilter",
"numpy.savetxt",
"tensorflow.set_random_seed",
"numpy.loadtxt",
"nltk.tokenize.word_tokenize",
"keras.utils.to_categorical",
"keras.callbacks.ModelCheckpoint",
"keras.layers.Dropout",
"matplotlib.pyplot.legend",
"numpy.asarray",
"nltk.corpus.stopwords.words",
"keras.optimizers.RMSprop",
"os.listdir",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.plot",
"warnings.filterwarnings",
"keras.layers.LSTM",
"numpy.zeros",
"keras.models.Input",
"xml.dom.minidom.parse",
"keras.layers.Dense",
"numpy.array",
"keras.layers.Embedding",
"matplotlib.pyplot.savefig"
] |
[((30, 38), 'numpy.random.seed', 'seed', (['(42)'], {}), '(42)\n', (34, 38), False, 'from numpy.random import seed\n'), ((78, 97), 'tensorflow.set_random_seed', 'set_random_seed', (['(42)'], {}), '(42)\n', (93, 97), False, 'from tensorflow import set_random_seed\n'), ((195, 257), 'warnings.simplefilter', 'warnings.simplefilter', ([], {'action': '"""ignore"""', 'category': 'FutureWarning'}), "(action='ignore', category=FutureWarning)\n", (216, 257), False, 'import warnings\n'), ((258, 320), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'DeprecationWarning'}), "('ignore', category=DeprecationWarning)\n", (281, 320), False, 'import warnings\n'), ((812, 834), 'nltk.download', 'nltk.download', (['"""punkt"""'], {}), "('punkt')\n", (825, 834), False, 'import nltk\n'), ((835, 859), 'nltk.download', 'nltk.download', (['"""wordnet"""'], {}), "('wordnet')\n", (848, 859), False, 'import nltk\n'), ((860, 886), 'nltk.download', 'nltk.download', (['"""stopwords"""'], {}), "('stopwords')\n", (873, 886), False, 'import nltk\n'), ((1215, 1237), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (1230, 1237), False, 'import string, sys\n'), ((904, 930), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (919, 930), False, 'from nltk.corpus import stopwords\n'), ((1642, 1665), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['sentence'], {}), '(sentence)\n', (1655, 1665), False, 'from nltk.tokenize import word_tokenize\n'), ((2702, 2718), 'os.listdir', 'listdir', (['datadir'], {}), '(datadir)\n', (2709, 2718), False, 'from os import listdir\n'), ((7129, 7146), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (7137, 7146), True, 'import numpy as np\n'), ((7778, 7795), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (7786, 7795), True, 'import numpy as np\n'), ((8424, 8441), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (8432, 8441), True, 'import numpy as np\n'), ((9034, 9051), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (9042, 9051), True, 'import numpy as np\n'), ((9638, 9655), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (9646, 9655), True, 'import numpy as np\n'), ((10301, 10318), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (10309, 10318), True, 'import numpy as np\n'), ((10966, 10983), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (10974, 10983), True, 'import numpy as np\n'), ((14106, 14123), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (14114, 14123), True, 'import numpy as np\n'), ((14894, 14911), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (14902, 14911), True, 'import numpy as np\n'), ((21436, 21479), 'evaluator.evaluate', 'evaluator.evaluate', (['"""NER"""', 'datadir', 'outfile'], {}), "('NER', datadir, outfile)\n", (21454, 21479), False, 'import evaluator\n'), ((23420, 23448), 'numpy.zeros', 'np.zeros', (['(n_words, max_len)'], {}), '((n_words, max_len))\n', (23428, 23448), True, 'import numpy as np\n'), ((24018, 24127), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['filepath'], {'monitor': '"""val_crf_viterbi_accuracy"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""max"""'}), "(filepath, monitor='val_crf_viterbi_accuracy', verbose=1,\n save_best_only=True, mode='max')\n", (24033, 24127), False, 'from keras.callbacks import ModelCheckpoint\n'), ((24837, 24860), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (24850, 24860), True, 'import matplotlib.pyplot as plt\n'), ((25123, 25150), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 5)'}), '(figsize=(12, 5))\n', (25133, 25150), True, 'import matplotlib.pyplot as plt\n'), ((25159, 25179), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (25170, 25179), True, 'import matplotlib.pyplot as plt\n'), ((25188, 25236), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'accuracy', '"""b"""'], {'label': '"""Training acc"""'}), "(x, accuracy, 'b', label='Training acc')\n", (25196, 25236), True, 'import matplotlib.pyplot as plt\n'), ((25245, 25299), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'val_accuracy', '"""r"""'], {'label': '"""Validation acc"""'}), "(x, val_accuracy, 'r', label='Validation acc')\n", (25253, 25299), True, 'import matplotlib.pyplot as plt\n'), ((25308, 25353), 'matplotlib.pyplot.title', 'plt.title', (['"""Training and validation accuracy"""'], {}), "('Training and validation accuracy')\n", (25317, 25353), True, 'import matplotlib.pyplot as plt\n'), ((25362, 25374), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (25372, 25374), True, 'import matplotlib.pyplot as plt\n'), ((25383, 25403), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (25394, 25403), True, 'import matplotlib.pyplot as plt\n'), ((25412, 25457), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'loss', '"""b"""'], {'label': '"""Training loss"""'}), "(x, loss, 'b', label='Training loss')\n", (25420, 25457), True, 'import matplotlib.pyplot as plt\n'), ((25466, 25517), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'val_loss', '"""r"""'], {'label': '"""Validation loss"""'}), "(x, val_loss, 'r', label='Validation loss')\n", (25474, 25517), True, 'import matplotlib.pyplot as plt\n'), ((25526, 25567), 'matplotlib.pyplot.title', 'plt.title', (['"""Training and validation loss"""'], {}), "('Training and validation loss')\n", (25535, 25567), True, 'import matplotlib.pyplot as plt\n'), ((25576, 25588), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (25586, 25588), True, 'import matplotlib.pyplot as plt\n'), ((25597, 25629), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""History_model.jpg"""'], {}), "('History_model.jpg')\n", (25608, 25629), True, 'import matplotlib.pyplot as plt\n'), ((25793, 25816), 'keras.models.Input', 'Input', ([], {'shape': '(max_len,)'}), '(shape=(max_len,))\n', (25798, 25816), False, 'from keras.models import Model, Input\n'), ((26008, 26031), 'keras.models.Input', 'Input', ([], {'shape': '(max_len,)'}), '(shape=(max_len,))\n', (26013, 26031), False, 'from keras.models import Model, Input\n'), ((26171, 26194), 'keras.models.Input', 'Input', ([], {'shape': '(max_len,)'}), '(shape=(max_len,))\n', (26176, 26194), False, 'from keras.models import Model, Input\n'), ((26335, 26358), 'keras.models.Input', 'Input', ([], {'shape': '(max_len,)'}), '(shape=(max_len,))\n', (26340, 26358), False, 'from keras.models import Model, Input\n'), ((26498, 26521), 'keras.models.Input', 'Input', ([], {'shape': '(max_len,)'}), '(shape=(max_len,))\n', (26503, 26521), False, 'from keras.models import Model, Input\n'), ((26663, 26686), 'keras.models.Input', 'Input', ([], {'shape': '(max_len,)'}), '(shape=(max_len,))\n', (26668, 26686), False, 'from keras.models import Model, Input\n'), ((26835, 26858), 'keras.models.Input', 'Input', ([], {'shape': '(max_len,)'}), '(shape=(max_len,))\n', (26840, 26858), False, 'from keras.models import Model, Input\n'), ((27013, 27105), 'keras.layers.concatenate', 'concatenate', (['[word_emb, suf_emb, pref_emb, pos_emb, prev_emb, next_emb, class_rules_emb]'], {}), '([word_emb, suf_emb, pref_emb, pos_emb, prev_emb, next_emb,\n class_rules_emb])\n', (27024, 27105), False, 'from keras.layers import LSTM, Embedding, concatenate, Dense, TimeDistributed, Dropout, Bidirectional, Lambda, Layer\n'), ((27898, 27938), 'keras_contrib.layers.CRF', 'CRF', ([], {'units': 'n_labels', 'activation': '"""linear"""'}), "(units=n_labels, activation='linear')\n", (27901, 27938), False, 'from keras_contrib.layers import CRF\n'), ((28039, 28124), 'keras.models.Model', 'Model', (['[word_in, suf_in, pref_in, pos_in, prev_in, next_in, class_rules_in]', 'out'], {}), '([word_in, suf_in, pref_in, pos_in, prev_in, next_in, class_rules_in], out\n )\n', (28044, 28124), False, 'from keras.models import Model, Input\n'), ((28867, 28909), 'keras.optimizers.RMSprop', 'RMSprop', ([], {'lr': '(0.001)', 'epsilon': 'None', 'decay': '(0.0)'}), '(lr=0.001, epsilon=None, decay=0.0)\n', (28874, 28909), False, 'from keras.optimizers import RMSprop\n'), ((28925, 28959), 'keras_contrib.layers.CRF', 'CRF', (['n_labels'], {'activation': '"""linear"""'}), "(n_labels, activation='linear')\n", (28928, 28959), False, 'from keras_contrib.layers import CRF\n'), ((2791, 2815), 'xml.dom.minidom.parse', 'parse', (["(datadir + '/' + f)"], {}), "(datadir + '/' + f)\n", (2796, 2815), False, 'from xml.dom.minidom import parse\n'), ((14820, 14857), 'keras.utils.to_categorical', 'to_categorical', (['i'], {'num_classes': 'n_tags'}), '(i, num_classes=n_tags)\n', (14834, 14857), False, 'from keras.utils import to_categorical\n'), ((15199, 15253), 'pickle.dump', 'pickle.dump', (['idx', 'fp'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(idx, fp, protocol=pickle.HIGHEST_PROTOCOL)\n', (15210, 15253), False, 'import pickle\n'), ((15439, 15454), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (15450, 15454), False, 'import pickle\n'), ((23291, 23330), 'numpy.asarray', 'np.asarray', (['values[1:]'], {'dtype': '"""float32"""'}), "(values[1:], dtype='float32')\n", (23301, 23330), True, 'import numpy as np\n'), ((23768, 23786), 'numpy.savetxt', 'np.savetxt', (['f', 'row'], {}), '(f, row)\n', (23778, 23786), True, 'import numpy as np\n'), ((25836, 25954), 'keras.layers.Embedding', 'Embedding', ([], {'input_dim': 'numbers[0]', 'output_dim': '(100)', 'input_length': 'max_len', 'trainable': '(False)', 'weights': '[embedding_matrix]'}), '(input_dim=numbers[0], output_dim=100, input_length=max_len,\n trainable=False, weights=[embedding_matrix])\n', (25845, 25954), False, 'from keras.layers import LSTM, Embedding, concatenate, Dense, TimeDistributed, Dropout, Bidirectional, Lambda, Layer\n'), ((26050, 26119), 'keras.layers.Embedding', 'Embedding', ([], {'input_dim': 'numbers[1]', 'output_dim': '(100)', 'input_length': 'max_len'}), '(input_dim=numbers[1], output_dim=100, input_length=max_len)\n', (26059, 26119), False, 'from keras.layers import LSTM, Embedding, concatenate, Dense, TimeDistributed, Dropout, Bidirectional, Lambda, Layer\n'), ((26214, 26283), 'keras.layers.Embedding', 'Embedding', ([], {'input_dim': 'numbers[2]', 'output_dim': '(100)', 'input_length': 'max_len'}), '(input_dim=numbers[2], output_dim=100, input_length=max_len)\n', (26223, 26283), False, 'from keras.layers import LSTM, Embedding, concatenate, Dense, TimeDistributed, Dropout, Bidirectional, Lambda, Layer\n'), ((26377, 26446), 'keras.layers.Embedding', 'Embedding', ([], {'input_dim': 'numbers[3]', 'output_dim': '(100)', 'input_length': 'max_len'}), '(input_dim=numbers[3], output_dim=100, input_length=max_len)\n', (26386, 26446), False, 'from keras.layers import LSTM, Embedding, concatenate, Dense, TimeDistributed, Dropout, Bidirectional, Lambda, Layer\n'), ((26541, 26610), 'keras.layers.Embedding', 'Embedding', ([], {'input_dim': 'numbers[4]', 'output_dim': '(100)', 'input_length': 'max_len'}), '(input_dim=numbers[4], output_dim=100, input_length=max_len)\n', (26550, 26610), False, 'from keras.layers import LSTM, Embedding, concatenate, Dense, TimeDistributed, Dropout, Bidirectional, Lambda, Layer\n'), ((26706, 26775), 'keras.layers.Embedding', 'Embedding', ([], {'input_dim': 'numbers[5]', 'output_dim': '(100)', 'input_length': 'max_len'}), '(input_dim=numbers[5], output_dim=100, input_length=max_len)\n', (26715, 26775), False, 'from keras.layers import LSTM, Embedding, concatenate, Dense, TimeDistributed, Dropout, Bidirectional, Lambda, Layer\n'), ((26885, 26954), 'keras.layers.Embedding', 'Embedding', ([], {'input_dim': 'numbers[6]', 'output_dim': '(100)', 'input_length': 'max_len'}), '(input_dim=numbers[6], output_dim=100, input_length=max_len)\n', (26894, 26954), False, 'from keras.layers import LSTM, Embedding, concatenate, Dense, TimeDistributed, Dropout, Bidirectional, Lambda, Layer\n'), ((27118, 27131), 'keras.layers.Dropout', 'Dropout', (['(0.55)'], {}), '(0.55)\n', (27125, 27131), False, 'from keras.layers import LSTM, Embedding, concatenate, Dense, TimeDistributed, Dropout, Bidirectional, Lambda, Layer\n'), ((7086, 7112), 'numpy.array', 'np.array', (['encoded_sentence'], {}), '(encoded_sentence)\n', (7094, 7112), True, 'import numpy as np\n'), ((7735, 7761), 'numpy.array', 'np.array', (['encoded_sentence'], {}), '(encoded_sentence)\n', (7743, 7761), True, 'import numpy as np\n'), ((8381, 8407), 'numpy.array', 'np.array', (['encoded_sentence'], {}), '(encoded_sentence)\n', (8389, 8407), True, 'import numpy as np\n'), ((8991, 9017), 'numpy.array', 'np.array', (['encoded_sentence'], {}), '(encoded_sentence)\n', (8999, 9017), True, 'import numpy as np\n'), ((9595, 9621), 'numpy.array', 'np.array', (['encoded_sentence'], {}), '(encoded_sentence)\n', (9603, 9621), True, 'import numpy as np\n'), ((10258, 10284), 'numpy.array', 'np.array', (['encoded_sentence'], {}), '(encoded_sentence)\n', (10266, 10284), True, 'import numpy as np\n'), ((10923, 10949), 'numpy.array', 'np.array', (['encoded_sentence'], {}), '(encoded_sentence)\n', (10931, 10949), True, 'import numpy as np\n'), ((14063, 14089), 'numpy.array', 'np.array', (['encoded_sentence'], {}), '(encoded_sentence)\n', (14071, 14089), True, 'import numpy as np\n'), ((14737, 14763), 'numpy.array', 'np.array', (['encoded_sentence'], {}), '(encoded_sentence)\n', (14745, 14763), True, 'import numpy as np\n'), ((25712, 25748), 'numpy.loadtxt', 'np.loadtxt', (['"""./embedding_matrix.txt"""'], {}), "('./embedding_matrix.txt')\n", (25722, 25748), True, 'import numpy as np\n'), ((27433, 27493), 'keras.layers.LSTM', 'LSTM', ([], {'units': '(32)', 'return_sequences': '(True)', 'recurrent_dropout': '(0.3)'}), '(units=32, return_sequences=True, recurrent_dropout=0.3)\n', (27437, 27493), False, 'from keras.layers import LSTM, Embedding, concatenate, Dense, TimeDistributed, Dropout, Bidirectional, Lambda, Layer\n'), ((27797, 27831), 'keras.layers.Dense', 'Dense', (['n_labels'], {'activation': '"""relu"""'}), "(n_labels, activation='relu')\n", (27802, 27831), False, 'from keras.layers import LSTM, Embedding, concatenate, Dense, TimeDistributed, Dropout, Bidirectional, Lambda, Layer\n'), ((20429, 20441), 'numpy.argmax', 'np.argmax', (['y'], {}), '(y)\n', (20438, 20441), True, 'import numpy as np\n'), ((21151, 21163), 'numpy.argmax', 'np.argmax', (['y'], {}), '(y)\n', (21160, 21163), True, 'import numpy as np\n')]
|
import numpy as np
from icecream import ic
if __name__ == '__main__':
length = 12
size = 6
a = np.ones(size) * -1
counter = 0
for i in range(size):
if i < size-1:
a[i] = i
else:
remain = length - (i+1)
counter += remain
a_mask = np.where(a==-1)[0]
idx = a_mask[0]
ic(a)
ic(a_mask)
ic(idx)
ic(counter)
n =len(a) + counter
ic(n)
p = np.empty_like(a)
p[: idx] = 1/n
p[idx]= 1 - np.sum(p[:idx])
ic(p)
assert np.sum(p) == 1
samp = np.random.choice(a, size=size, replace=True, p=p)
ic(samp)
|
[
"icecream.ic",
"numpy.sum",
"numpy.empty_like",
"numpy.ones",
"numpy.where",
"numpy.random.choice"
] |
[((349, 354), 'icecream.ic', 'ic', (['a'], {}), '(a)\n', (351, 354), False, 'from icecream import ic\n'), ((359, 369), 'icecream.ic', 'ic', (['a_mask'], {}), '(a_mask)\n', (361, 369), False, 'from icecream import ic\n'), ((374, 381), 'icecream.ic', 'ic', (['idx'], {}), '(idx)\n', (376, 381), False, 'from icecream import ic\n'), ((386, 397), 'icecream.ic', 'ic', (['counter'], {}), '(counter)\n', (388, 397), False, 'from icecream import ic\n'), ((426, 431), 'icecream.ic', 'ic', (['n'], {}), '(n)\n', (428, 431), False, 'from icecream import ic\n'), ((440, 456), 'numpy.empty_like', 'np.empty_like', (['a'], {}), '(a)\n', (453, 456), True, 'import numpy as np\n'), ((512, 517), 'icecream.ic', 'ic', (['p'], {}), '(p)\n', (514, 517), False, 'from icecream import ic\n'), ((555, 604), 'numpy.random.choice', 'np.random.choice', (['a'], {'size': 'size', 'replace': '(True)', 'p': 'p'}), '(a, size=size, replace=True, p=p)\n', (571, 604), True, 'import numpy as np\n'), ((609, 617), 'icecream.ic', 'ic', (['samp'], {}), '(samp)\n', (611, 617), False, 'from icecream import ic\n'), ((108, 121), 'numpy.ones', 'np.ones', (['size'], {}), '(size)\n', (115, 121), True, 'import numpy as np\n'), ((306, 323), 'numpy.where', 'np.where', (['(a == -1)'], {}), '(a == -1)\n', (314, 323), True, 'import numpy as np\n'), ((492, 507), 'numpy.sum', 'np.sum', (['p[:idx]'], {}), '(p[:idx])\n', (498, 507), True, 'import numpy as np\n'), ((529, 538), 'numpy.sum', 'np.sum', (['p'], {}), '(p)\n', (535, 538), True, 'import numpy as np\n')]
|
import time
import os
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from scipy.io import savemat
parser = argparse.ArgumentParser()
parser.add_argument('--tol', type=float, default=1e-3)
parser.add_argument('--adjoint', type=eval, default=False)
parser.add_argument('--niters', type=int, default=1000)
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--experiment_no', type=int, default=3)
args = parser.parse_args()
if args.adjoint:
from torchdiffeq import odeint_adjoint as odeint
else:
from torchdiffeq import odeint
class ODEfunc(nn.Module):
def __init__(self, dim, nhidden):
super(ODEfunc, self).__init__()
# self.elu = nn.ELU(inplace=False)
self.elu = nn.Tanh()
self.fc1 = nn.Linear(2*dim, nhidden)
self.fc2 = nn.Linear(nhidden, nhidden)
self.fc3 = nn.Linear(nhidden, dim)
self.nfe = 0
def forward(self, t, z):
cutoff = int(len(z)/2)
x = z[:cutoff]
v = z[cutoff:]
into = torch.cat((x, v), dim=1)
self.nfe += 1
out = self.fc1(into)
out = self.elu(out)
out = self.fc2(out)
out = self.elu(out)
out = self.fc3(out)
return torch.cat((v, out))
class ODEBlock(nn.Module):
def __init__(self, odefunc, integration_times):
super(ODEBlock, self).__init__()
self.odefunc = odefunc
self.integration_times = integration_times
def forward(self, x):
out = odeint(self.odefunc, x, self.integration_times, rtol=args.tol, atol=args.tol)
return out
@property
def nfe(self):
return self.odefunc.nfe
@nfe.setter
def nfe(self, value):
self.odefunc.nfe = value
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
if __name__ == '__main__':
device = torch.device('cuda:' + str(args.gpu) if torch.cuda.is_available() else 'cpu')
filename = 'sonode./'+str(args.experiment_no)+'./'
try:
os.makedirs('./'+filename)
except FileExistsError:
pass
torch.random.manual_seed(2021) # Set random seed for repeatability package
data_dim = 1
dim = data_dim
#dim does not equal data_dim for ANODEs where they are augmented with extra zeros
#download data
z0 = torch.tensor(np.load('data/z0.npy')).float().to(device)
z = torch.tensor(np.load('data/z.npy')).float().to(device)
samp_ts = torch.tensor(np.load('data/samp_ts.npy')).float().to(device)
# model
if args.experiment_no == 1:
nhidden = 15
elif args.experiment_no == 2:
nhidden = 20
elif args.experiment_no == 3:
nhidden = 25
else:
nhidden = 20
feature_layers = [ODEBlock(ODEfunc(dim, nhidden), samp_ts)]
model = nn.Sequential(*feature_layers).to(device)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
loss_func = nn.MSELoss()
itr_arr = np.empty(args.niters)
loss_arr = np.empty(args.niters)
nfe_arr = np.empty(args.niters)
time_arr = np.empty(args.niters)
# training
start_time = time.time()
for itr in range(1, args.niters+1):
model[0].nfe = 0
iter_start_time = time.time()
optimizer.zero_grad()
#forward in time and solve ode
pred_z = model(z0).to(device)
# compute loss
loss = loss_func(pred_z, z)
loss.backward()
optimizer.step()
iter_end_time = time.time()
# make arrays
itr_arr[itr-1] = itr
loss_arr[itr-1] = loss
nfe_arr[itr-1] = model[0].nfe
time_arr[itr-1] = iter_end_time-iter_start_time
print('Iter: {}, running MSE: {:.4f}'.format(itr, loss))
end_time = time.time()
print('\n')
print('Training complete after {} iterations.'.format(itr))
loss = loss.detach().numpy()
print('Train MSE = ' +str(loss))
print('NFE = ' +str(model[0].nfe))
print('Total time = '+str(end_time-start_time))
print('No. parameters = '+str(count_parameters(model)))
np.save(filename+'itr_arr.npy', itr_arr)
np.save(filename+'nfe_arr.npy', nfe_arr)
np.save(filename+'loss_arr.npy', loss_arr)
np.save(filename+'time_arr.npy', time_arr)
torch.save(model, filename+'model.pth')
names = []
params = []
params_orig = []
for name,param in model.named_parameters():
names.append(name)
params.append(param.detach().numpy())
params_orig.append(param)
for name,param in model.named_buffers():
names.append(name)
params.append(param.detach().numpy())
nn1 = dict({'Wb':params,'names':names,'mse':loss})
savemat(filename+'model.mat',nn1)
|
[
"torch.nn.MSELoss",
"numpy.save",
"numpy.load",
"argparse.ArgumentParser",
"torch.random.manual_seed",
"os.makedirs",
"torch.nn.Tanh",
"numpy.empty",
"torch.nn.Sequential",
"scipy.io.savemat",
"torch.cat",
"time.time",
"torch.save",
"torchdiffeq.odeint",
"torch.cuda.is_available",
"torch.nn.Linear"
] |
[((160, 185), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (183, 185), False, 'import argparse\n'), ((2234, 2264), 'torch.random.manual_seed', 'torch.random.manual_seed', (['(2021)'], {}), '(2021)\n', (2258, 2264), False, 'import torch\n'), ((3069, 3081), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (3079, 3081), True, 'import torch.nn as nn\n'), ((3101, 3122), 'numpy.empty', 'np.empty', (['args.niters'], {}), '(args.niters)\n', (3109, 3122), True, 'import numpy as np\n'), ((3138, 3159), 'numpy.empty', 'np.empty', (['args.niters'], {}), '(args.niters)\n', (3146, 3159), True, 'import numpy as np\n'), ((3174, 3195), 'numpy.empty', 'np.empty', (['args.niters'], {}), '(args.niters)\n', (3182, 3195), True, 'import numpy as np\n'), ((3211, 3232), 'numpy.empty', 'np.empty', (['args.niters'], {}), '(args.niters)\n', (3219, 3232), True, 'import numpy as np\n'), ((3266, 3277), 'time.time', 'time.time', ([], {}), '()\n', (3275, 3277), False, 'import time\n'), ((3912, 3923), 'time.time', 'time.time', ([], {}), '()\n', (3921, 3923), False, 'import time\n'), ((4234, 4276), 'numpy.save', 'np.save', (["(filename + 'itr_arr.npy')", 'itr_arr'], {}), "(filename + 'itr_arr.npy', itr_arr)\n", (4241, 4276), True, 'import numpy as np\n'), ((4279, 4321), 'numpy.save', 'np.save', (["(filename + 'nfe_arr.npy')", 'nfe_arr'], {}), "(filename + 'nfe_arr.npy', nfe_arr)\n", (4286, 4321), True, 'import numpy as np\n'), ((4324, 4368), 'numpy.save', 'np.save', (["(filename + 'loss_arr.npy')", 'loss_arr'], {}), "(filename + 'loss_arr.npy', loss_arr)\n", (4331, 4368), True, 'import numpy as np\n'), ((4371, 4415), 'numpy.save', 'np.save', (["(filename + 'time_arr.npy')", 'time_arr'], {}), "(filename + 'time_arr.npy', time_arr)\n", (4378, 4415), True, 'import numpy as np\n'), ((4418, 4459), 'torch.save', 'torch.save', (['model', "(filename + 'model.pth')"], {}), "(model, filename + 'model.pth')\n", (4428, 4459), False, 'import torch\n'), ((4867, 4903), 'scipy.io.savemat', 'savemat', (["(filename + 'model.mat')", 'nn1'], {}), "(filename + 'model.mat', nn1)\n", (4874, 4903), False, 'from scipy.io import savemat\n'), ((833, 842), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (840, 842), True, 'import torch.nn as nn\n'), ((862, 889), 'torch.nn.Linear', 'nn.Linear', (['(2 * dim)', 'nhidden'], {}), '(2 * dim, nhidden)\n', (871, 889), True, 'import torch.nn as nn\n'), ((907, 934), 'torch.nn.Linear', 'nn.Linear', (['nhidden', 'nhidden'], {}), '(nhidden, nhidden)\n', (916, 934), True, 'import torch.nn as nn\n'), ((954, 977), 'torch.nn.Linear', 'nn.Linear', (['nhidden', 'dim'], {}), '(nhidden, dim)\n', (963, 977), True, 'import torch.nn as nn\n'), ((1121, 1145), 'torch.cat', 'torch.cat', (['(x, v)'], {'dim': '(1)'}), '((x, v), dim=1)\n', (1130, 1145), False, 'import torch\n'), ((1324, 1343), 'torch.cat', 'torch.cat', (['(v, out)'], {}), '((v, out))\n', (1333, 1343), False, 'import torch\n'), ((1598, 1675), 'torchdiffeq.odeint', 'odeint', (['self.odefunc', 'x', 'self.integration_times'], {'rtol': 'args.tol', 'atol': 'args.tol'}), '(self.odefunc, x, self.integration_times, rtol=args.tol, atol=args.tol)\n', (1604, 1675), False, 'from torchdiffeq import odeint\n'), ((2157, 2185), 'os.makedirs', 'os.makedirs', (["('./' + filename)"], {}), "('./' + filename)\n", (2168, 2185), False, 'import os\n'), ((3370, 3381), 'time.time', 'time.time', ([], {}), '()\n', (3379, 3381), False, 'import time\n'), ((3621, 3632), 'time.time', 'time.time', ([], {}), '()\n', (3630, 3632), False, 'import time\n'), ((2047, 2072), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2070, 2072), False, 'import torch\n'), ((2952, 2982), 'torch.nn.Sequential', 'nn.Sequential', (['*feature_layers'], {}), '(*feature_layers)\n', (2965, 2982), True, 'import torch.nn as nn\n'), ((2479, 2501), 'numpy.load', 'np.load', (['"""data/z0.npy"""'], {}), "('data/z0.npy')\n", (2486, 2501), True, 'import numpy as np\n'), ((2543, 2564), 'numpy.load', 'np.load', (['"""data/z.npy"""'], {}), "('data/z.npy')\n", (2550, 2564), True, 'import numpy as np\n'), ((2612, 2639), 'numpy.load', 'np.load', (['"""data/samp_ts.npy"""'], {}), "('data/samp_ts.npy')\n", (2619, 2639), True, 'import numpy as np\n')]
|
"""
Permits calling arbitrary functions and passing some forms of data from C++
to Python (only one direction) as a server-client pair.
The server in this case is the C++ program, and the client is this binary.
For an example of C++ usage, see `call_python_server_test.cc`.
Here's an example of running with the C++ test program:
cd drake
bazel build //common/proto:call_python_client_cli //common/proto:call_python_server_test # noqa
# Create default pipe file.
rm -f /tmp/python_rpc && mkfifo /tmp/python_rpc
# In Terminal 1, run client.
./bazel-bin/common/proto/call_python_client_cli
# In Terminal 2, run server (or your C++ program).
./bazel-bin/common/proto/call_python_server_test
To use in Jupyter (if you have it installed) without a FIFO file (such that
it's non-blocking):
cd drake
bazel build //common/proto:call_python_client_cli //common/proto:call_python_server_test # noqa
rm -f /tmp/python_rpc # Do not make it FIFO
# In Terminal 1, run server, create output.
./bazel-bin/common/proto/call_python_server_test
# In Terminal 2, run client in notebook.
./bazel-bin/common/proto/call_python_client_cli \
-c jupyter notebook ${PWD}/common/proto/call_python_client_notebook.ipynb # noqa
# Execute: Cell > Run All
Note:
Occasionally, the plotting will not come through on the notebook. I (Eric)
am unsure why.
"""
import argparse
import os
from queue import Queue
import signal
import stat
import sys
from threading import Thread
import time
import traceback
import numpy as np
from drake import lcmt_call_python, lcmt_call_python_data
def _ensure_sigint_handler():
# @ref https://stackoverflow.com/a/47801921/2654527
if signal.getsignal(signal.SIGINT) == signal.SIG_IGN:
signal.signal(signal.SIGINT, signal.default_int_handler)
def _get_required_helpers(scope_locals):
# Provides helpers to keep C++ interface as simple as possible.
# @returns Dictionary containing the helpers needed.
def getitem(obj, index):
"""Global function for `obj[index]`. """
return obj[index]
def setitem(obj, index, value):
"""Global function for `obj[index] = value`. """
obj[index] = value
return obj[index]
def call(obj, *args, **kwargs):
return obj(*args, **kwargs)
def pass_through(value):
"""Pass-through for direct variable access. """
return value
def make_tuple(*args):
"""Create a tuple from an argument list. """
return tuple(args)
def make_list(*args):
"""Create a list from an argument list. """
return list(args)
def make_kwargs(*args):
"""Create a keyword argument object from an argument list. """
assert len(args) % 2 == 0
keys = args[0::2]
values = args[1::2]
kwargs = dict(zip(keys, values))
return _KwArgs(**kwargs)
def _make_slice(expr):
"""Parse a slice object from a string. """
def to_piece(s):
return s and int(s) or None
pieces = list(map(to_piece, expr.split(':')))
if len(pieces) == 1:
return slice(pieces[0], pieces[0] + 1)
else:
return slice(*pieces)
def make_slice_arg(*args):
"""Create a scalar or tuple for accessing objects via slices. """
out = [None] * len(args)
for i, arg in enumerate(args):
if isinstance(arg, str):
out[i] = _make_slice(arg)
else:
out[i] = arg
# Special case: If single index, collapse.
if len(out) == 1:
return out[0]
else:
return tuple(out)
def setvar(var, value):
"""Sets a variable in the client's locals. """
scope_locals[var] = value
def setvars(*args):
"""Sets multiple variables in the client's locals. """
scope_locals.update(make_kwargs(*args))
execution_check = _ExecutionCheck()
out = locals().copy()
# Scrub extra stuff.
del out["scope_locals"]
return out
class _KwArgs(dict):
# Indicates values meant solely for `**kwargs`.
pass
class _ExecutionCheck:
# Allows checking that we received and executed a complete set of
# instructions.
def __init__(self):
self.count = 0
def start(self):
self.count += 1
def finish(self):
assert self.count > 0
self.count -= 1
def _merge_dicts(*args):
# Merges a list of dict's.
out = {}
for arg in args:
out.update(arg)
return out
def _fix_pyplot(plt):
# This patches matplotlib/matplotlib#9412 by injecting `time` into the
# module (#7597).
cur = plt.__dict__
if 'time' not in cur:
cur['time'] = time
def default_globals():
"""Creates default globals for code that the client side can execute.
This is geared for convenient (not necessarily efficient) plotting
with `matplotlib`.
"""
# @note This imports modules at a function-scope rather than at a
# module-scope, which does not satisfy PEP8. This is intentional, as it
# allows for a cleaner scope separation between the client core code (e.g.
# `CallPythonClient`) and the client user code (e.g. `plot(x, y)`).
# TODO(eric.cousineau): Consider relegating this to a different module,
# possibly when this falls under `pydrake`.
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib
import matplotlib.pyplot as plt
import pylab # See `%pylab?` in IPython.
# TODO(eric.cousineau): Where better to put this?
matplotlib.interactive(True)
_fix_pyplot(plt)
def disp(value):
"""Alias for print."""
print(value)
def wait():
"""Waits to allow user interaction with plots."""
plt.show(block=True)
def pause(interval):
"""Pause for `interval` seconds, letting the GUI flush its event queue.
@note This is a *necessary* function to be defined if these globals are
not used!
"""
plt.pause(interval)
def box(bmin, bmax, rstride=1, cstride=1, **kwargs):
"""Plots a box bmin[i] <= x[i] <= bmax[i] for i < 3."""
fig = plt.gcf()
ax = fig.gca(projection='3d')
u = np.linspace(1, 9, 5) * np.pi / 4
U, V = np.meshgrid(u, u)
cx, cy, cz = (bmax + bmin) / 2
dx, dy, dz = bmax - bmin
X = cx + dx * np.cos(U) * np.sin(V)
Y = cy + dy * np.sin(U) * np.sin(V)
Z = cz + dz * np.cos(V) / np.sqrt(2)
ax.plot_surface(X, Y, Z, rstride=rstride, cstride=cstride, **kwargs)
def plot3(x, y, z, **kwargs):
"""Plots a 3d line plot."""
fig = plt.gcf()
ax = fig.gca(projection='3d')
ax.plot(x, y, z, **kwargs)
def sphere(n, rstride=1, cstride=1, **kwargs):
"""Plots a sphere."""
fig = plt.gcf()
ax = fig.gca(projection='3d')
u = np.linspace(0, np.pi, n)
v = np.linspace(0, 2 * np.pi, n)
X = np.outer(np.sin(u), np.sin(v))
Y = np.outer(np.sin(u), np.cos(v))
Z = np.outer(np.cos(u), np.ones_like(v))
ax.plot_surface(X, Y, Z, rstride=rstride, cstride=cstride, **kwargs)
def surf(x, y, Z, rstride=1, cstride=1, **kwargs):
"""Plots a 3d surface."""
fig = plt.gcf()
ax = fig.gca(projection='3d')
X, Y = np.meshgrid(x, y)
ax.plot_surface(X, Y, Z, rstride=rstride, cstride=cstride, **kwargs)
def show():
"""Shows `matplotlib` images without blocking.
Generally not needed if `matplotlib.is_interactive()` is true.
"""
plt.show(block=False)
def magic(N):
"""Provides simple odd-only case for magic squares.
@ref https://scipython.com/book/chapter-6-numpy/examples/creating-a-magic-square # noqa
"""
assert N % 2 == 1
magic_square = np.zeros((N, N), dtype=int)
n = 1
i, j = 0, N//2
while n <= N**2:
magic_square[i, j] = n
n += 1
newi, newj = (i - 1) % N, (j + 1) % N
if magic_square[newi, newj]:
i += 1
else:
i, j = newi, newj
return magic_square
# Use <module>.__dict__ to simulate `from <module> import *`, since that is
# normally invalid in a function with nested functions.
return _merge_dicts(
globals(),
plt.__dict__,
pylab.__dict__,
locals())
class CallPythonClient:
"""Provides a client to receive Python commands.
Enables printing or plotting from a C++ application for debugging
purposes.
"""
def __init__(self, filename=None, stop_on_error=True,
scope_globals=None, scope_locals=None,
threaded=False, wait=False):
if filename is None:
# TODO(jamiesnape): Implement and use a
# drake.common.GetRpcPipeTempDirectory function.
temp_directory = os.environ.get("TEST_TMPDIR", "/tmp")
self.filename = os.path.join(temp_directory, "python_rpc")
else:
self.filename = filename
# Scope. Give it access to everything here.
# However, keep it's written values scoped.
if scope_locals is None:
self.scope_locals = {}
else:
self.scope_locals = scope_locals
# Define globals as (a) required helpers for C++ interface, and
# (b) convenience plotting functionality.
# N.B. The provided locals OR globals can shadow the helpers. BE
# CAREFUL!
required_helpers = _get_required_helpers(self.scope_locals)
if scope_globals is None:
scope_globals = default_globals()
self.scope_globals = _merge_dicts(required_helpers, scope_globals)
self._stop_on_error = stop_on_error
self._threaded = threaded
self._loop = False
self._wait = False
if wait:
if _is_fifo(self.filename):
self._loop = True
print("Looping for FIFO file (wait=True).")
else:
self._wait = True
print("Waiting after processing non-FIFO file (wait=True).")
# Variables indexed by GUID.
self._client_vars = {}
self._had_error = False
self._done = False
self._file = None
def _to_array(self, arg, dtype):
# Converts a lcmt_call_python argument to the appropriate NumPy array
# (or scalar).
np_raw = np.frombuffer(arg.data, dtype=dtype)
if arg.shape_type == lcmt_call_python_data.SCALAR:
assert arg.cols == 1 and arg.rows == 1
return np_raw[0]
elif arg.shape_type == lcmt_call_python_data.VECTOR:
assert arg.cols == 1
return np_raw.reshape(arg.rows)
elif arg.shape_type is None or \
arg.shape_type == lcmt_call_python_data.MATRIX:
# TODO(eric.cousineau): Figure out how to ensure `np.frombuffer`
# creates a column-major array?
return np_raw.reshape(arg.cols, arg.rows).T
def _execute_message(self, msg):
# Executes a message, handling / recording that an error occurred.
if self._stop_on_error:
# Do not wrap in a `try` / `catch` to simplify debugging.
self._execute_message_impl(msg)
else:
try:
self._execute_message_impl(msg)
except Exception as e:
traceback.print_exc(file=sys.stderr)
sys.stderr.write(" Continuing (no --stop_on_error)\n")
self._had_error = True
def _execute_message_impl(self, msg):
# Executes relevant portions of a message.
# Create input arguments.
inputs = []
kwargs = None
for i, arg in enumerate(msg.rhs):
value = None
if (arg.data_type
== lcmt_call_python_data.REMOTE_VARIABLE_REFERENCE):
id = np.frombuffer(arg.data, dtype=np.uint64).reshape(1)[0]
if id not in self._client_vars:
raise RuntimeError("Unknown local variable. "
"Dropping message.")
value = self._client_vars[id]
elif arg.data_type == lcmt_call_python_data.DOUBLE:
value = self._to_array(arg, np.double)
elif arg.data_type == lcmt_call_python_data.CHAR:
assert arg.rows == 1
value = arg.data.decode('utf8')
elif arg.data_type == lcmt_call_python_data.LOGICAL:
value = self._to_array(arg, np.bool)
elif arg.data_type == lcmt_call_python_data.INT:
value = self._to_array(arg, np.int32)
else:
assert False
if isinstance(value, _KwArgs):
assert kwargs is None
kwargs = value
else:
inputs.append(value)
# Call the function
# N.B. No security measures to sanitize function name.
function_name = msg.function_name
assert isinstance(function_name, str), type(function_name)
self.scope_locals.update(_tmp_args=inputs, _tmp_kwargs=kwargs or {})
# N.B. No try-catch block here. Can change this if needed.
if function_name == "exec":
assert len(inputs) == 1
assert kwargs is None or len(kwargs) == 0
exec(inputs[0], self.scope_globals, self.scope_locals)
out = None
else:
out = eval(function_name + "(*_tmp_args, **_tmp_kwargs)",
self.scope_globals, self.scope_locals)
self.scope_locals.update(_tmp_out=out)
# Update outputs.
self._client_vars[msg.lhs] = out
def run(self):
"""Runs the client code.
@return True if no error encountered.
"""
if self._threaded:
self._handle_messages_threaded()
else:
self.handle_messages(record=False)
# Check any execution in progress.
execution_check = self.scope_globals['execution_check']
if not self._had_error and execution_check.count != 0:
self._had_error = True
sys.stderr.write(
"ERROR: Invalid termination. "
"'execution_check.finish' called insufficient number of "
"times: {}\n".format(execution_check.count))
if self._wait and not self._had_error:
wait_func = self.scope_globals["wait"]
wait_func()
return not self._had_error
def _handle_messages_threaded(self):
# Handles messages in a threaded fashion.
queue = Queue()
def producer_loop():
# Read messages from file, and queue them for execution.
for msg in self._read_next_message():
queue.put(msg)
# Check if an error occurred.
if self._done:
break
# Wait until the queue empties out to signal completion from the
# producer's side.
if not self._done:
queue.join()
self._done = True
producer = Thread(name="Producer", target=producer_loop)
# @note Previously, when trying to do `queue.clear()` in the consumer,
# and `queue.join()` in the producer, there would be intermittent
# deadlocks. By demoting the producer to a daemon, I (eric.c) have not
# yet encountered a deadlock.
producer.daemon = True
producer.start()
# Consume.
# TODO(eric.cousineau): Trying to quit via Ctrl+C is awkward (but kinda
# works). Is there a way to have `plt.pause` handle Ctrl+C differently?
try:
pause = self.scope_globals['pause']
while not self._done:
# Process messages.
while not queue.empty():
msg = queue.get()
queue.task_done()
self._execute_message(msg)
# Spin busy for a bit, let matplotlib (or whatever) flush its
# event queue.
pause(0.01)
except KeyboardInterrupt:
# User pressed Ctrl+C.
self._done = True
print("Quitting")
except Exception as e:
# We encountered an error, and must stop.
self._done = True
self._had_error = True
traceback.print_exc(file=sys.stderr)
sys.stderr.write(" Stopping (--stop_on_error)\n")
# No need to worry about waiting for the producer, as it is a daemon
# thread.
def handle_messages(self, max_count=None, record=True, execute=True):
"""Handle all messages sent (e.g., through IPython).
@param max_count Maximum number of messages to handle.
@param record Record all messages and return them.
@param execute Execute the given message upon receiving it.
@return (count, msgs) where `count` is how many messages were processed
(e.g. 0 if no more messages left).
and `msgs` are either the messages themselves for playback.
and (b) the messages themselves for playback (if record==True),
otherwise an empty list.
"""
assert record or execute, "Not doing anything useful?"
count = 0
msgs = []
for msg in self._read_next_message():
if execute:
self._execute_message(msg)
count += 1
if record:
msgs.append(msg)
if max_count is not None and count >= max_count:
break
return (count, msgs)
def execute_messages(self, msgs):
"""Executes a set of recorded messages."""
for msg in msgs:
self._execute_message(msg)
def _read_next_message(self):
"""Returns incoming messages using a generator."""
while not self._done:
fifo = self._get_file()
# Close the file if we reach the end, NOT when exiting the scope
# (which is why `with` is not used here).
# This way the user can read a few messages at a time, with the
# same file handle.
# @note We must close / reopen the file when looping because the
# C++ program will effectively send a EOF signal when it closes
# the pipe.
while not self._done:
message = self._read_fifo_message(fifo)
if message is not None:
yield message
self._close_file()
if not self._loop:
break
def _read_fifo_message(self, fifo):
"""Reads at most one message from the given fifo."""
# Read the datagram size. (The C++ code encodes the datagram_size
# integer as an ASCII string.)
datagram_size = None
buffer = bytearray()
while not self._done:
byte = fifo.read(1)
if not byte: # EOF
return None
if byte == b'\0': # EOM
datagram_size = int(buffer.decode())
break
else:
buffer.extend(byte)
# Read the payload.
buffer[:] = ()
while not self._done:
byte = fifo.read(1)
if not byte: # EOF
return None
buffer.extend(byte)
if len(buffer) == datagram_size:
byte = fifo.read(1)
assert byte == b'\0' # EOM
return lcmt_call_python.decode(bytes(buffer))
def _get_file(self):
# Gets file handle, opening if needed.
if self._file is None:
self._file = open(self.filename, 'rb')
return self._file
def _close_file(self):
# Closes file if open.
if self._file is not None:
self._file.close()
self._file = None
def _is_fifo(filepath):
# Determine if a file is a FIFO named pipe or not.
# @ref https://stackoverflow.com/a/8558940/7829525
return stat.S_ISFIFO(os.stat(filepath).st_mode)
def main(argv):
_ensure_sigint_handler()
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"--no_wait", action='store_true',
help="Close client after messages are processed. "
"For FIFO, this means the client will close after the C++ "
"binary is executed once.")
parser.add_argument(
"--no_threading", action='store_true',
help="Disable threaded dispatch.")
parser.add_argument(
"--stop_on_error", action='store_true',
help="Stop client if there is an error when executing a call.")
parser.add_argument("-f", "--file", type=str, default=None)
parser.add_argument(
"-c", "--command", type=str, nargs='+', default=None,
help="Execute command (e.g. `jupyter notebook`) instead of running "
"client.")
args = parser.parse_args(argv)
if args.command is not None:
# Execute command s.t. it has access to the relevant PYTHNOPATH.
os.execvp(args.command[0], args.command)
# Control should not return to this program unless there was an error.
return False
else:
client = CallPythonClient(
args.file, stop_on_error=args.stop_on_error,
threaded=not args.no_threading, wait=not args.no_wait)
good = client.run()
return good
if __name__ == "__main__":
good = main(sys.argv[1:])
if not good:
exit(1)
|
[
"argparse.ArgumentParser",
"numpy.sin",
"os.path.join",
"numpy.meshgrid",
"traceback.print_exc",
"numpy.linspace",
"matplotlib.pyplot.pause",
"threading.Thread",
"matplotlib.pyplot.show",
"numpy.ones_like",
"os.stat",
"matplotlib.interactive",
"numpy.frombuffer",
"signal.getsignal",
"numpy.cos",
"signal.signal",
"matplotlib.pyplot.gcf",
"queue.Queue",
"numpy.zeros",
"os.environ.get",
"sys.stderr.write",
"os.execvp",
"numpy.sqrt"
] |
[((5645, 5673), 'matplotlib.interactive', 'matplotlib.interactive', (['(True)'], {}), '(True)\n', (5667, 5673), False, 'import matplotlib\n'), ((20361, 20464), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__', 'formatter_class': 'argparse.RawDescriptionHelpFormatter'}), '(description=__doc__, formatter_class=argparse.\n RawDescriptionHelpFormatter)\n', (20384, 20464), False, 'import argparse\n'), ((1738, 1769), 'signal.getsignal', 'signal.getsignal', (['signal.SIGINT'], {}), '(signal.SIGINT)\n', (1754, 1769), False, 'import signal\n'), ((1797, 1853), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'signal.default_int_handler'], {}), '(signal.SIGINT, signal.default_int_handler)\n', (1810, 1853), False, 'import signal\n'), ((5852, 5872), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(True)'}), '(block=True)\n', (5860, 5872), True, 'import matplotlib.pyplot as plt\n'), ((6098, 6117), 'matplotlib.pyplot.pause', 'plt.pause', (['interval'], {}), '(interval)\n', (6107, 6117), True, 'import matplotlib.pyplot as plt\n'), ((6254, 6263), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (6261, 6263), True, 'import matplotlib.pyplot as plt\n'), ((6362, 6379), 'numpy.meshgrid', 'np.meshgrid', (['u', 'u'], {}), '(u, u)\n', (6373, 6379), True, 'import numpy as np\n'), ((6747, 6756), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (6754, 6756), True, 'import matplotlib.pyplot as plt\n'), ((6926, 6935), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (6933, 6935), True, 'import matplotlib.pyplot as plt\n'), ((6986, 7010), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', 'n'], {}), '(0, np.pi, n)\n', (6997, 7010), True, 'import numpy as np\n'), ((7023, 7051), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', 'n'], {}), '(0, 2 * np.pi, n)\n', (7034, 7051), True, 'import numpy as np\n'), ((7368, 7377), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (7375, 7377), True, 'import matplotlib.pyplot as plt\n'), ((7431, 7448), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (7442, 7448), True, 'import numpy as np\n'), ((7690, 7711), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (7698, 7711), True, 'import matplotlib.pyplot as plt\n'), ((7950, 7977), 'numpy.zeros', 'np.zeros', (['(N, N)'], {'dtype': 'int'}), '((N, N), dtype=int)\n', (7958, 7977), True, 'import numpy as np\n'), ((10593, 10629), 'numpy.frombuffer', 'np.frombuffer', (['arg.data'], {'dtype': 'dtype'}), '(arg.data, dtype=dtype)\n', (10606, 10629), True, 'import numpy as np\n'), ((14824, 14831), 'queue.Queue', 'Queue', ([], {}), '()\n', (14829, 14831), False, 'from queue import Queue\n'), ((15337, 15382), 'threading.Thread', 'Thread', ([], {'name': '"""Producer"""', 'target': 'producer_loop'}), "(name='Producer', target=producer_loop)\n", (15343, 15382), False, 'from threading import Thread\n'), ((21379, 21419), 'os.execvp', 'os.execvp', (['args.command[0]', 'args.command'], {}), '(args.command[0], args.command)\n', (21388, 21419), False, 'import os\n'), ((7073, 7082), 'numpy.sin', 'np.sin', (['u'], {}), '(u)\n', (7079, 7082), True, 'import numpy as np\n'), ((7084, 7093), 'numpy.sin', 'np.sin', (['v'], {}), '(v)\n', (7090, 7093), True, 'import numpy as np\n'), ((7116, 7125), 'numpy.sin', 'np.sin', (['u'], {}), '(u)\n', (7122, 7125), True, 'import numpy as np\n'), ((7127, 7136), 'numpy.cos', 'np.cos', (['v'], {}), '(v)\n', (7133, 7136), True, 'import numpy as np\n'), ((7159, 7168), 'numpy.cos', 'np.cos', (['u'], {}), '(u)\n', (7165, 7168), True, 'import numpy as np\n'), ((7170, 7185), 'numpy.ones_like', 'np.ones_like', (['v'], {}), '(v)\n', (7182, 7185), True, 'import numpy as np\n'), ((9040, 9077), 'os.environ.get', 'os.environ.get', (['"""TEST_TMPDIR"""', '"""/tmp"""'], {}), "('TEST_TMPDIR', '/tmp')\n", (9054, 9077), False, 'import os\n'), ((9106, 9148), 'os.path.join', 'os.path.join', (['temp_directory', '"""python_rpc"""'], {}), "(temp_directory, 'python_rpc')\n", (9118, 9148), False, 'import os\n'), ((20274, 20291), 'os.stat', 'os.stat', (['filepath'], {}), '(filepath)\n', (20281, 20291), False, 'import os\n'), ((6314, 6334), 'numpy.linspace', 'np.linspace', (['(1)', '(9)', '(5)'], {}), '(1, 9, 5)\n', (6325, 6334), True, 'import numpy as np\n'), ((6486, 6495), 'numpy.sin', 'np.sin', (['V'], {}), '(V)\n', (6492, 6495), True, 'import numpy as np\n'), ((6530, 6539), 'numpy.sin', 'np.sin', (['V'], {}), '(V)\n', (6536, 6539), True, 'import numpy as np\n'), ((6574, 6584), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (6581, 6584), True, 'import numpy as np\n'), ((16612, 16648), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'sys.stderr'}), '(file=sys.stderr)\n', (16631, 16648), False, 'import traceback\n'), ((16661, 16711), 'sys.stderr.write', 'sys.stderr.write', (['""" Stopping (--stop_on_error)\n"""'], {}), "(' Stopping (--stop_on_error)\\n')\n", (16677, 16711), False, 'import sys\n'), ((6474, 6483), 'numpy.cos', 'np.cos', (['U'], {}), '(U)\n', (6480, 6483), True, 'import numpy as np\n'), ((6518, 6527), 'numpy.sin', 'np.sin', (['U'], {}), '(U)\n', (6524, 6527), True, 'import numpy as np\n'), ((6562, 6571), 'numpy.cos', 'np.cos', (['V'], {}), '(V)\n', (6568, 6571), True, 'import numpy as np\n'), ((11578, 11614), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'sys.stderr'}), '(file=sys.stderr)\n', (11597, 11614), False, 'import traceback\n'), ((11631, 11686), 'sys.stderr.write', 'sys.stderr.write', (['""" Continuing (no --stop_on_error)\n"""'], {}), "(' Continuing (no --stop_on_error)\\n')\n", (11647, 11686), False, 'import sys\n'), ((12087, 12127), 'numpy.frombuffer', 'np.frombuffer', (['arg.data'], {'dtype': 'np.uint64'}), '(arg.data, dtype=np.uint64)\n', (12100, 12127), True, 'import numpy as np\n')]
|
""" Plot SV3 Results """
# LRGs
import sys
sys.path.append('/home/mehdi/github/LSSutils')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import healpy as hp
import numpy as np
from time import time
import fitsio as ft
from lssutils.lab import (make_overdensity, AnaFast,
histogram_cell, hpixsum, get_meandensity)
from lssutils.stats.pcc import pcc
from lssutils.dataviz import setup_color
import pandas as pd
root_dir = '/home/mehdi/data/dr9v0.57.0/'
def cutphotmask(aa, bits):
print(f'{len(aa)} before imaging veto')
keep = (aa['NOBS_G']>0) & (aa['NOBS_R']>0) & (aa['NOBS_Z']>0)
for biti in bits:
keep &= ((aa['MASKBITS'] & 2**biti)==0)
print(f'{keep.sum()} after imaging veto')
print(keep)
return keep
class SV3Data:
def __init__(self, target, region, mversion):
columns = ['RA', 'DEC', 'NOBS_R', 'NOBS_G', 'NOBS_Z', 'MASKBITS']
bits = [1, 5, 6, 7, 8, 9, 11, 12, 13]
self.nside = 256
p = f'{root_dir}sv3_v1/'
self.dcat = ft.read(f'{p}sv3target_{target}_{region}.fits',
columns=columns)
self.rcat = ft.read(f'{p}{region}_randoms-1-0x2.fits',
columns=columns)
self.wrf = ft.read(f'{p}sv3target_{target}_{region}.fits_EdWsys/wsys_v0.fits')['wsys']
self.wnn = ft.read(f'{p}sv3target_{target}_{region}.fits_MrWsys/wsys_{mversion}.fits')['wsys']
ix_d = cutphotmask(self.dcat, bits)
self.dcat = self.dcat[ix_d]
self.wrf = self.wrf[ix_d]
self.wnn = self.wnn[ix_d]
ix_r = cutphotmask(self.rcat, bits)
self.rcat = self.rcat[ix_r]
print(f'mean(wrf): {self.wrf.mean():.2f}, {self.wrf.min():.1f} < wrf < {self.wrf.max():.1f}')
print(f'mean(wnn): {self.wnn.mean():.2f}, {self.wnn.min():.1f} < wnn < {self.wnn.max():.1f}')
self.af = AnaFast()
tmpl = pd.read_hdf(f'/home/mehdi/data/templates/dr9/pixweight_dark_dr9m_nside{self.nside}.h5')
#self.cols = ['nstar', 'ebv', 'loghi']\
# +[f'{s}_{b}' for s in ['ccdskymag_mean', 'fwhm_mean', 'fwhm_min', 'fwhm_max', 'depth_total',
# 'mjd_mean', 'mjd_min', 'mjd_max', 'airmass_mean', 'exptime_total']\
# for b in ['g', 'r', 'z']]
self.cols = ['stardens', 'ebv', 'loghi',
'psfdepth_g', 'psfdepth_r', 'psfdepth_z',
'galdepth_g', 'galdepth_r', 'galdepth_z',
'psfsize_g', 'psfsize_r', 'psfsize_z',
'psfdepth_w1', 'psfdepth_w2']
self.tmpl = tmpl[self.cols].values
def make_delta(self):
nran = hpixsum(self.nside, self.rcat['RA'], self.rcat['DEC'])*1.0
self.mask = (nran > 0)
print(f'mask: {self.mask.sum()} pixels')
is_good = np.isfinite(self.tmpl).sum(axis=1) == len(self.cols)
self.mask &= is_good
print(f'mask: {self.mask.sum()} pixels (with imaging)')
self.frac = nran / nran[self.mask].mean()
self.mask &= (self.frac > 0.2)
print(f'mask: {self.mask.sum()} pixels (with frac>0.2)')
self.ngal_now = hpixsum(self.nside, self.dcat['RA'], self.dcat['DEC'])*1.0
self.ngal_rf = hpixsum(self.nside, self.dcat['RA'], self.dcat['DEC'], weights=self.wrf)
self.ngal_wnn = hpixsum(self.nside, self.dcat['RA'], self.dcat['DEC'], weights=self.wnn)
self.delta_now = make_overdensity(self.ngal_now, self.frac, self.mask)
self.delta_rf = make_overdensity(self.ngal_rf, self.frac, self.mask)
self.delta_wnn = make_overdensity(self.ngal_wnn, self.frac, self.mask)
def make_cl(self):
self.cl_now = self.af(self.delta_now, self.frac, self.mask)
self.cl_rf = self.af(self.delta_rf, self.frac, self.mask)
self.cl_nn = self.af(self.delta_wnn, self.frac, self.mask)
def make_nbar(self):
self.nbar_now = get_meandensity(self.ngal_now, self.frac, self.mask, self.tmpl)
self.nbar_rf = get_meandensity(self.ngal_rf, self.frac, self.mask, self.tmpl)
self.nbar_nn = get_meandensity(self.ngal_wnn, self.frac, self.mask, self.tmpl)
def make_pcc(self):
self.pcc_now = pcc(self.tmpl[self.mask], self.delta_now[self.mask], return_err=True)
self.pcc_rf = pcc(self.tmpl[self.mask], self.delta_rf[self.mask])
self.pcc_nn = pcc(self.tmpl[self.mask], self.delta_wnn[self.mask])
setup_color()
region = sys.argv[1] # NDECALS
target = sys.argv[2] # QSO
mversion = sys.argv[3]
assert region in ['NDECALS', 'SDECALS', 'NBMZLS', 'DES', 'SDECALS_noDES', 'DES_noLMC']
assert target in ['QSO', 'LRG', 'ELG', 'BGS_ANY']
print(f'target: {target}, region: {region}, mversion: {mversion}')
target_region = f'{target}-{region}-{mversion}'
t0 = time()
sv = SV3Data(target, region, mversion)
t1 = time()
print(f'Finished reading in {t1-t0:.1f} sec')
sv.make_delta()
t2 = time()
print(f'Finished deltas in {t2-t1:.1f} sec')
sv.make_cl()
t3 = time()
print(f'Finished Cell in {t3-t2:.1f} sec')
sv.make_nbar()
t4 = time()
print(f'Finished nbar in {t4-t3:.1f} sec')
sv.make_pcc()
t5 = time()
print(f'Finished pcc in {t5-t4:.1f} sec')
pp = PdfPages(''.join([f'{root_dir}sv3_v1/', target_region, '.pdf']))
# C_ell
methods = ['No weight', 'RF weight', 'NN weight']
cls = [sv.cl_now, sv.cl_rf, sv.cl_nn]
fg, ax = plt.subplots(figsize=(8, 6))
for n_i, cl_i in zip(methods, cls ):
lb, clb = histogram_cell(cl_i['cl'], bins=np.logspace(0, np.log10(770), 10))
l_, = ax.plot(cl_i['cl'], lw=1, zorder=-1, alpha=0.2)
ax.plot(lb, clb, marker='.', mfc='w', ls='None', color=l_.get_color(), label=n_i)
ax.legend(title=target_region, frameon=False)
ax.set(xscale='log', yscale='log', ylim=(2.0e-8, 8.0e-3),
xlabel=r'$\ell$', ylabel=r'C$_{\ell}$')
#fg.savefig('cl_lrg_bmzls.png', dpi=300, bbox_inches='tight')
pp.savefig(bbox_inches='tight')
# Nbar
fig, ax = plt.subplots(ncols=3, nrows=5, figsize=(22, 25), sharey=True)
fig.subplots_adjust(hspace=0.35, wspace=0.1)
ax = ax.flatten()
nbars = [sv.nbar_now, sv.nbar_rf, sv.nbar_nn]
for name_i, nbar_i in zip(methods, nbars):
for j, nbar_ij in enumerate(nbar_i):
ax[j].plot(nbar_ij['bin_avg'], nbar_ij['nnbar'], marker='.', mfc='w', label=name_i)
if name_i == 'No weight':
ax[j].fill_between(nbar_ij['bin_avg'], 1-nbar_ij['nnbar_err'], 1+nbar_ij['nnbar_err'],
color='grey', alpha=0.2, zorder=-1)
ax[2].legend(title=target_region, frameon=False)
for j, colj in enumerate(sv.cols):
ax[j].set_xlabel(colj)
if j%3==0:
ax[j].set_ylabel('Mean Density')
pp.savefig(bbox_inches='tight')
# PCC
fg, ax = plt.subplots(figsize=(12, 4))
x_columns = np.arange(len(sv.cols))
ax.set_xticks(x_columns)
ax.set_xticklabels(sv.cols, rotation=90)
pcc_min, pcc_max = np.percentile(sv.pcc_now[1], [2.5, 97.5], axis=0)
ax.bar(x_columns-0.25, sv.pcc_now[0], width=0.25, label='No weight')
ax.bar(x_columns, sv.pcc_rf[0], width=0.25, label='RF')
ax.bar(x_columns+0.25, sv.pcc_nn[0], width=0.25, label='NN')
ax.fill_between(x_columns, pcc_min, pcc_max, color='grey', alpha=0.2, zorder=10)
ax.legend(title=target_region, frameon=False)
ax.grid(ls=':')
ax.set(ylabel='PCC')
pp.savefig(bbox_inches='tight')
pp.close()
|
[
"sys.path.append",
"pandas.read_hdf",
"lssutils.lab.make_overdensity",
"lssutils.stats.pcc.pcc",
"lssutils.lab.hpixsum",
"lssutils.lab.AnaFast",
"lssutils.lab.get_meandensity",
"numpy.isfinite",
"time.time",
"numpy.percentile",
"fitsio.read",
"numpy.log10",
"lssutils.dataviz.setup_color",
"matplotlib.pyplot.subplots"
] |
[((44, 90), 'sys.path.append', 'sys.path.append', (['"""/home/mehdi/github/LSSutils"""'], {}), "('/home/mehdi/github/LSSutils')\n", (59, 90), False, 'import sys\n'), ((4698, 4711), 'lssutils.dataviz.setup_color', 'setup_color', ([], {}), '()\n', (4709, 4711), False, 'from lssutils.dataviz import setup_color\n'), ((5059, 5065), 'time.time', 'time', ([], {}), '()\n', (5063, 5065), False, 'from time import time\n'), ((5110, 5116), 'time.time', 'time', ([], {}), '()\n', (5114, 5116), False, 'from time import time\n'), ((5185, 5191), 'time.time', 'time', ([], {}), '()\n', (5189, 5191), False, 'from time import time\n'), ((5257, 5263), 'time.time', 'time', ([], {}), '()\n', (5261, 5263), False, 'from time import time\n'), ((5329, 5335), 'time.time', 'time', ([], {}), '()\n', (5333, 5335), False, 'from time import time\n'), ((5400, 5406), 'time.time', 'time', ([], {}), '()\n', (5404, 5406), False, 'from time import time\n'), ((5631, 5659), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (5643, 5659), True, 'import matplotlib.pyplot as plt\n'), ((6199, 6260), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(3)', 'nrows': '(5)', 'figsize': '(22, 25)', 'sharey': '(True)'}), '(ncols=3, nrows=5, figsize=(22, 25), sharey=True)\n', (6211, 6260), True, 'import matplotlib.pyplot as plt\n'), ((7007, 7036), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 4)'}), '(figsize=(12, 4))\n', (7019, 7036), True, 'import matplotlib.pyplot as plt\n'), ((7159, 7208), 'numpy.percentile', 'np.percentile', (['sv.pcc_now[1]', '[2.5, 97.5]'], {'axis': '(0)'}), '(sv.pcc_now[1], [2.5, 97.5], axis=0)\n', (7172, 7208), True, 'import numpy as np\n'), ((1101, 1165), 'fitsio.read', 'ft.read', (['f"""{p}sv3target_{target}_{region}.fits"""'], {'columns': 'columns'}), "(f'{p}sv3target_{target}_{region}.fits', columns=columns)\n", (1108, 1165), True, 'import fitsio as ft\n'), ((1215, 1274), 'fitsio.read', 'ft.read', (['f"""{p}{region}_randoms-1-0x2.fits"""'], {'columns': 'columns'}), "(f'{p}{region}_randoms-1-0x2.fits', columns=columns)\n", (1222, 1274), True, 'import fitsio as ft\n'), ((2003, 2012), 'lssutils.lab.AnaFast', 'AnaFast', ([], {}), '()\n', (2010, 2012), False, 'from lssutils.lab import make_overdensity, AnaFast, histogram_cell, hpixsum, get_meandensity\n'), ((2037, 2129), 'pandas.read_hdf', 'pd.read_hdf', (['f"""/home/mehdi/data/templates/dr9/pixweight_dark_dr9m_nside{self.nside}.h5"""'], {}), "(\n f'/home/mehdi/data/templates/dr9/pixweight_dark_dr9m_nside{self.nside}.h5')\n", (2048, 2129), True, 'import pandas as pd\n'), ((3460, 3532), 'lssutils.lab.hpixsum', 'hpixsum', (['self.nside', "self.dcat['RA']", "self.dcat['DEC']"], {'weights': 'self.wrf'}), "(self.nside, self.dcat['RA'], self.dcat['DEC'], weights=self.wrf)\n", (3467, 3532), False, 'from lssutils.lab import make_overdensity, AnaFast, histogram_cell, hpixsum, get_meandensity\n'), ((3557, 3629), 'lssutils.lab.hpixsum', 'hpixsum', (['self.nside', "self.dcat['RA']", "self.dcat['DEC']"], {'weights': 'self.wnn'}), "(self.nside, self.dcat['RA'], self.dcat['DEC'], weights=self.wnn)\n", (3564, 3629), False, 'from lssutils.lab import make_overdensity, AnaFast, histogram_cell, hpixsum, get_meandensity\n'), ((3664, 3717), 'lssutils.lab.make_overdensity', 'make_overdensity', (['self.ngal_now', 'self.frac', 'self.mask'], {}), '(self.ngal_now, self.frac, self.mask)\n', (3680, 3717), False, 'from lssutils.lab import make_overdensity, AnaFast, histogram_cell, hpixsum, get_meandensity\n'), ((3743, 3795), 'lssutils.lab.make_overdensity', 'make_overdensity', (['self.ngal_rf', 'self.frac', 'self.mask'], {}), '(self.ngal_rf, self.frac, self.mask)\n', (3759, 3795), False, 'from lssutils.lab import make_overdensity, AnaFast, histogram_cell, hpixsum, get_meandensity\n'), ((3823, 3876), 'lssutils.lab.make_overdensity', 'make_overdensity', (['self.ngal_wnn', 'self.frac', 'self.mask'], {}), '(self.ngal_wnn, self.frac, self.mask)\n', (3839, 3876), False, 'from lssutils.lab import make_overdensity, AnaFast, histogram_cell, hpixsum, get_meandensity\n'), ((4179, 4242), 'lssutils.lab.get_meandensity', 'get_meandensity', (['self.ngal_now', 'self.frac', 'self.mask', 'self.tmpl'], {}), '(self.ngal_now, self.frac, self.mask, self.tmpl)\n', (4194, 4242), False, 'from lssutils.lab import make_overdensity, AnaFast, histogram_cell, hpixsum, get_meandensity\n'), ((4267, 4329), 'lssutils.lab.get_meandensity', 'get_meandensity', (['self.ngal_rf', 'self.frac', 'self.mask', 'self.tmpl'], {}), '(self.ngal_rf, self.frac, self.mask, self.tmpl)\n', (4282, 4329), False, 'from lssutils.lab import make_overdensity, AnaFast, histogram_cell, hpixsum, get_meandensity\n'), ((4355, 4418), 'lssutils.lab.get_meandensity', 'get_meandensity', (['self.ngal_wnn', 'self.frac', 'self.mask', 'self.tmpl'], {}), '(self.ngal_wnn, self.frac, self.mask, self.tmpl)\n', (4370, 4418), False, 'from lssutils.lab import make_overdensity, AnaFast, histogram_cell, hpixsum, get_meandensity\n'), ((4475, 4544), 'lssutils.stats.pcc.pcc', 'pcc', (['self.tmpl[self.mask]', 'self.delta_now[self.mask]'], {'return_err': '(True)'}), '(self.tmpl[self.mask], self.delta_now[self.mask], return_err=True)\n', (4478, 4544), False, 'from lssutils.stats.pcc import pcc\n'), ((4568, 4619), 'lssutils.stats.pcc.pcc', 'pcc', (['self.tmpl[self.mask]', 'self.delta_rf[self.mask]'], {}), '(self.tmpl[self.mask], self.delta_rf[self.mask])\n', (4571, 4619), False, 'from lssutils.stats.pcc import pcc\n'), ((4643, 4695), 'lssutils.stats.pcc.pcc', 'pcc', (['self.tmpl[self.mask]', 'self.delta_wnn[self.mask]'], {}), '(self.tmpl[self.mask], self.delta_wnn[self.mask])\n', (4646, 4695), False, 'from lssutils.stats.pcc import pcc\n'), ((1332, 1399), 'fitsio.read', 'ft.read', (['f"""{p}sv3target_{target}_{region}.fits_EdWsys/wsys_v0.fits"""'], {}), "(f'{p}sv3target_{target}_{region}.fits_EdWsys/wsys_v0.fits')\n", (1339, 1399), True, 'import fitsio as ft\n'), ((1427, 1502), 'fitsio.read', 'ft.read', (['f"""{p}sv3target_{target}_{region}.fits_MrWsys/wsys_{mversion}.fits"""'], {}), "(f'{p}sv3target_{target}_{region}.fits_MrWsys/wsys_{mversion}.fits')\n", (1434, 1502), True, 'import fitsio as ft\n'), ((2865, 2919), 'lssutils.lab.hpixsum', 'hpixsum', (['self.nside', "self.rcat['RA']", "self.rcat['DEC']"], {}), "(self.nside, self.rcat['RA'], self.rcat['DEC'])\n", (2872, 2919), False, 'from lssutils.lab import make_overdensity, AnaFast, histogram_cell, hpixsum, get_meandensity\n'), ((3377, 3431), 'lssutils.lab.hpixsum', 'hpixsum', (['self.nside', "self.dcat['RA']", "self.dcat['DEC']"], {}), "(self.nside, self.dcat['RA'], self.dcat['DEC'])\n", (3384, 3431), False, 'from lssutils.lab import make_overdensity, AnaFast, histogram_cell, hpixsum, get_meandensity\n'), ((5759, 5772), 'numpy.log10', 'np.log10', (['(770)'], {}), '(770)\n', (5767, 5772), True, 'import numpy as np\n'), ((3031, 3053), 'numpy.isfinite', 'np.isfinite', (['self.tmpl'], {}), '(self.tmpl)\n', (3042, 3053), True, 'import numpy as np\n')]
|
"""This module implements a time series class with related methods."""
from collections import deque
from datetime import datetime, timedelta
from IPython.display import display
from matplotlib.axes import Axes
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from typing import Any, Callable, Dict, Iterator, List, Optional, Set, Tuple
from waad.utils.asset import Asset
from waad.utils.config import ANOMALIES_SCORES
from waad.utils.postgreSQL_utils import Table
class StatSeries:
"""This class defines a statistical series and implements some computing and plotting methods on it.
Attributes:
name (str): Name of the series.
series(List[float]): Contains the actual data of the series.
"""
def __init__(self, name: str, series: List[float]):
self.name = name
self.series = series
self.anomalies: List = []
def IQR_outlier_detection(self, factor: float = 1.5) -> List[int]:
"""Implement IQR outliers detection.
Args:
factor: IQR outliers detection factor (1.5 for standard method, up to 2 or 3 for only extrem outliers).
"""
series = pd.Series(self.series)
Q1 = series.quantile(0.25)
Q3 = series.quantile(0.75)
IQR = Q3 - Q1
self.anomalies = series[((series < Q1 - factor * IQR) | (series > Q3 + factor * IQR))].index.values.tolist()
return self.anomalies
def std_outlier_detection(self, factor: float = 2) -> List[int]:
"""Implement std outliers detection.
Args:
factor: std outliers detection factor (2 for standard method 95%, up to 3 for only extrem outliers).
Returns:
A ``List`` containing indexes of outlier values detected.
"""
series = pd.Series(self.series)
std = series.std()
mean = series.mean()
self.anomalies = series[((series < mean - factor * std) | (series > mean + factor * std))].index.values.tolist()
return self.anomalies
def custom_outlier_detection(self, indicator_bound: Optional[float] = None, IQR_factor: float = 2, sigma_factor: float = 3):
"""Implement custom IQR detection, enriched by a std criterion to be more robust.
Args:
indicator_bound: Physical criterion that helps remove False Positives. For example with a series representing the number of authentications over time and containing
a vast majority of zeros, the IQR would raise a lot of outliers even if it they only represent an increase of 2 authentications from the median (apparently 0). This
is due to the fact that an attacker work pattern is highly non gaussiann.
IQR_factor: IQR outliers detection factor (1.5 for standard method, up to 2 or 3 for only extrem outliers).
sigma_factor: std outliers detection factor (2 for standard method 95%, up to 3 for only extrem outliers).
Returns:
A ``List`` containing indexes of outlier values detected.
"""
series = pd.Series(self.series)
std = series.std()
mean = series.mean()
median = series.median()
Q1 = series.quantile(0.25)
Q3 = series.quantile(0.75)
IQR = Q3 - Q1
# Combination of a custom (stricter) IQR method and the 3-sigma rule. Even if distributions over time are not gaussians, this is supposed to show up outliers
outliers = series[((series < Q1 - IQR_factor * IQR) | (series > Q3 + IQR_factor * IQR)) & ((series < mean - sigma_factor * std) | (series > mean + sigma_factor * std))].index.values.tolist()
# Apply ``indicator_bound``
if indicator_bound is not None:
to_remove = []
for index in outliers:
if (indicator_bound > 0) and (series[index] < median + indicator_bound):
to_remove.append(index)
elif (indicator_bound < 0) and (series[index] > median + indicator_bound):
to_remove.append(index)
for index in to_remove:
outliers.remove(index)
self.anomalies = outliers
return outliers
def contains_isolated_values(self, percentage_null_values: int = 90) -> bool:
"""Detect if a series contains isolated values.
Args:
percentage_null_values: Percentage of zero values used as a threshold to evaluate if the series contains isolated points.
Returns:
A ``bool`` describing whether a time series contains isolated values or not.
"""
nb_non_null_values = np.flatnonzero(self.series).size
if nb_non_null_values < (1 - percentage_null_values / 100) * len(self.series) and len(self.series) >= 1:
return True
return False
def detect_isolated_groups(self) -> List[List[int]]:
"""Detect isolated groups of values in ``time_series``.
Returns:
Groups of consecutive indices, corresponding to the isolated values (separated by zeros).
"""
indices = np.flatnonzero(self.series)
groups: List = []
if indices.size == 0:
return groups
current_group = [indices[0]]
for index in indices[1:]:
if index - current_group[-1] == 1:
current_group.append(index)
else:
groups.append(current_group)
current_group = [index]
return groups
def detect_abnormal_outbreak(self, legitimate_model_duration: int = 50):
"""Detect if there is an abnormal outbreak values in ``time_series`` if the first
`legitimate_model_duration` percentage of the series is zero."""
index = next((i for i, x in enumerate(self.series) if x), None)
if index is not None and index > legitimate_model_duration / 100 * len(self.series):
self.anomalies = [index]
@staticmethod
def detect_abnormal_outbreak_static(series: List[float], legitimate_model_duration: int = 50):
"""Detect if there is an abnormal outbreak values in ``time_series`` if the first
`legitimate_model_duration` percentage of the series is zero."""
index = next((i for i, x in enumerate(series) if x), None)
if index is not None and index > legitimate_model_duration / 100 * len(series):
return [index]
else:
return []
def compute_anomalies(self, anomalies_detector: Optional[Callable] = None, config: Optional[Dict[str, Dict]] = None):
if anomalies_detector is not None:
self.anomalies = anomalies_detector(self.series)
else:
if config is not None:
self.custom_outlier_detection(indicator_bound=config[self.name]["indicator_bound"])
else:
self.custom_outlier_detection()
def plot_series(self, ax: Axes):
"""Plot a series.
Examples:
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from waad.utils.indicators import plot_series
>>>
>>> data = [355, 368, 0, 0, 0, 447, 466, 250, 367, 0, 0, 0, 320,
307, 395, 601, 258, 0, 0, 0, 382, 400, 326, 319, 0, 0,
304, 360, 327, 368, 0, 0, 0, 383, 327, 422, 290, 253, 0,
0, 446, 414, 381, 393, 0, 0, 0, 0, 373, 387, 312, 327,
0, 0, 370, 275, 436, 348]
>>>
>>> demo = StatSeries('demo', data)
>>> fig, ax = plt.subplots(figsize=(30, 5))
>>> demo.plot_series(ax)
.. testcleanup::
fig.savefig(f'{DOCTEST_FIGURES_PATH}/test.png')
.. figure:: ../../_static/doctest_figures/time_series_plot_example.png
:align: center
:alt: time series plot example
Args:
ax: ``Axes`` to plot series on.
"""
ax.plot([i for i in range(1, len(self.series) + 1)], self.series)
ax.set_title(self.name)
def get_figure(self, figsize: Tuple[int, int] = (20, 4)) -> Figure:
fig, ax = plt.subplots(figsize=figsize)
self.plot_series(ax)
return fig
def display(self):
fig = self.get_figure()
fig.axes[0].vlines(np.array(self.anomalies) + 1, *fig.axes[0].get_ylim(), colors="r")
display(fig)
class TimeSeries(StatSeries):
"""This class is a child of ``StatSeries`` taking into account a notion of time.
Attributes:
time_step (float): Time step in seconds between each index.
start_time (Optional[str]): Start time of the series in ISO format.
intermediary_content (Optional[Any]): Helper that keeps in memory intermediary content used during previous computations.
"""
def __init__(self, name: str, series: List[float], time_step: float, start_time: Optional[str] = None, intermediary_content: Optional[Any] = None):
super().__init__(name, series)
self.time_step = time_step
self.start_time = start_time
self.intermediary_content = intermediary_content
def get_anomalies_date(self):
res = []
for anomaly in self.anomalies:
try:
start = datetime.fromisoformat(self.start_time) + timedelta(seconds=self.time_step * anomaly)
end = start + timedelta(seconds=self.time_step)
res.append(f'{start.isoformat()} - {end.isoformat()}')
except Exception as e:
print(e)
pass
return res
def detailed_display(self):
self.display()
anomalies_date = self.get_anomalies_date()
for i, anomaly in enumerate(self.anomalies):
print(f"Anomaly found at time step {anomaly} / {anomalies_date[i]}")
print(f"Pic value of {self.series[anomaly]} on indicator")
if self.intermediary_content is not None:
print(f"Intermediary content : {self.intermediary_content[anomaly]}")
print()
|
[
"datetime.datetime.fromisoformat",
"numpy.flatnonzero",
"IPython.display.display",
"numpy.array",
"pandas.Series",
"datetime.timedelta",
"matplotlib.pyplot.subplots"
] |
[((1209, 1231), 'pandas.Series', 'pd.Series', (['self.series'], {}), '(self.series)\n', (1218, 1231), True, 'import pandas as pd\n'), ((1834, 1856), 'pandas.Series', 'pd.Series', (['self.series'], {}), '(self.series)\n', (1843, 1856), True, 'import pandas as pd\n'), ((3106, 3128), 'pandas.Series', 'pd.Series', (['self.series'], {}), '(self.series)\n', (3115, 3128), True, 'import pandas as pd\n'), ((5118, 5145), 'numpy.flatnonzero', 'np.flatnonzero', (['self.series'], {}), '(self.series)\n', (5132, 5145), True, 'import numpy as np\n'), ((8233, 8262), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (8245, 8262), True, 'import matplotlib.pyplot as plt\n'), ((8469, 8481), 'IPython.display.display', 'display', (['fig'], {}), '(fig)\n', (8476, 8481), False, 'from IPython.display import display\n'), ((4655, 4682), 'numpy.flatnonzero', 'np.flatnonzero', (['self.series'], {}), '(self.series)\n', (4669, 4682), True, 'import numpy as np\n'), ((8394, 8418), 'numpy.array', 'np.array', (['self.anomalies'], {}), '(self.anomalies)\n', (8402, 8418), True, 'import numpy as np\n'), ((9351, 9390), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['self.start_time'], {}), '(self.start_time)\n', (9373, 9390), False, 'from datetime import datetime, timedelta\n'), ((9393, 9436), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(self.time_step * anomaly)'}), '(seconds=self.time_step * anomaly)\n', (9402, 9436), False, 'from datetime import datetime, timedelta\n'), ((9467, 9500), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'self.time_step'}), '(seconds=self.time_step)\n', (9476, 9500), False, 'from datetime import datetime, timedelta\n')]
|
import argparse
import collections
import numpy as np
parser = argparse.ArgumentParser(
description='Convert T5 predictions into a TREC-formatted run.')
parser.add_argument('--predictions', type=str, required=True, help='T5 predictions file.')
parser.add_argument('--query_run_ids', type=str, required=True,
help='File containing query doc id pairs paired with the T5\'s predictions file.')
parser.add_argument('--output', type=str, required=True, help='run file in the TREC format.')
args = parser.parse_args()
examples = collections.defaultdict(dict)
with open(args.query_run_ids) as f_query_run_ids, open(args.predictions) as f_pred:
for line_query_doc_id, line_pred in zip(f_query_run_ids, f_pred):
query_id, doc_id_a, doc_id_b = line_query_doc_id.strip().split()
doc_id_a = doc_id_a.split("#")[0]
doc_id_b = doc_id_b.split("#")[0]
_, score = line_pred.strip().split()
score = float(score)
if doc_id_a not in examples[query_id]:
examples[query_id][doc_id_a] = 0
if doc_id_b not in examples[query_id]:
examples[query_id][doc_id_b] = 0
examples[query_id][doc_id_a] += np.exp(score)
examples[query_id][doc_id_b] += 1 - np.exp(score)
with open(args.output, 'w') as fout:
for query_id, doc_ids_scores in examples.items():
doc_ids_scores = [
(doc_id, scores)
for doc_id, scores in doc_ids_scores.items()]
doc_ids_scores.sort(key=lambda x: x[1], reverse=True)
for rank, (doc_id, score) in enumerate(doc_ids_scores):
print(2*(len(doc_ids_scores) - 1))
fout.write(
f'{query_id} Q0 {doc_id} {rank + 1} {score/(2*(len(doc_ids_scores) - 1))} duot5\n')
|
[
"collections.defaultdict",
"numpy.exp",
"argparse.ArgumentParser"
] |
[((65, 158), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Convert T5 predictions into a TREC-formatted run."""'}), "(description=\n 'Convert T5 predictions into a TREC-formatted run.')\n", (88, 158), False, 'import argparse\n'), ((551, 580), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (574, 580), False, 'import collections\n'), ((1190, 1203), 'numpy.exp', 'np.exp', (['score'], {}), '(score)\n', (1196, 1203), True, 'import numpy as np\n'), ((1248, 1261), 'numpy.exp', 'np.exp', (['score'], {}), '(score)\n', (1254, 1261), True, 'import numpy as np\n')]
|
import numpy as np
import gym
from gym import Wrapper
from gym.spaces import Discrete, Box
from gym_pomdp.envs.rock import RockEnv, Obs
class RockSampleHistoryEnv(Wrapper):
"""
takes observations from an RockSample environment and stacks to history given hist_len of history length
"""
def __init__(self, env_id, hist_len=4, history_type='standard', kwargs={}):
"""
Parameters
----------
env_id - id of registered gym environment (currently only implemented for Rock-v0)
history_type - * one_hot: encodes the actions as one hot vector in the history
* one_hot_pos: one hot agent position and history of 'one_hot' observations
* standard: encodes the actions as action_index+1 (reason for this is that the initial history is
all zeros and we don't want to collide with action 0, which is move north)
* standard_pos: one hot agent position and history of 'standard' observations
* field_vision: encodes the actions as action_index+1 (reason: see 'standard')
and noisy observation for each rock
* field_vision_pos: one hot agent position and history of noisy observations for each rock
* fully_observable: one hot agent position and history of true observations for each rock
* mixed_full_pomdp: flag to indicate if full information is avail + true observations for each rock +
one hot agent position and history of 'one_hot' observations
* history_full: complete history of: flag to indicate if full information is avail (=1) + true observations for each rock +
one hot agent position + 'one_hot' action + noisy rock observation
* history_pomdp: complete history of: flag to indicate if full information is avail (=0) + zeros(num rocks) +
one hot agent position + 'one_hot' action + noisy rock observation
* history_rockpos_full: complete history of: flag to indicate if full information is avail (=1) + true observations for each rock +
one hot agent position + 'one_hot' action + noisy rock observation + one hot position for all rocks
hist_len - length of the history (hist_len==0 is without history, just current observation)
kwargs - optional arguments for initializing the wrapped environment
"""
if not env_id == "Rock-v0":
raise NotImplementedError("history only implemented for Rock-v0")
env = gym.make(env_id)
env.__init__(**kwargs)
super(RockSampleHistoryEnv, self).__init__(env)
self._wrapped_env = env
self.hist_len = hist_len
self.hist_type = history_type
self.history = None
self.full_obs_dim = 1
self.num_rocks = self._wrapped_env.num_rocks
self.size_x, self.size_y = self._wrapped_env.grid.get_size
# specify observation space and arrangement according to selected history type
if self.hist_type == "standard":
self.historyIgnoreIdx = 0
self.total_obs_dim = (1+1) # standard obs
self.observation_space = Box(low=0, high=(4+1)+self.num_rocks, shape=(self.total_obs_dim*(self.hist_len+1),)) # history of: ac + ob pairs
self.genObservation = self.generateObservationStandard
elif self.hist_type == "standard_pos":
self.historyIgnoreIdx = self.size_x + self.size_y
self.total_obs_dim = self.historyIgnoreIdx+(1+1) # agent pos + standard obs
self.observation_space = Box(low=0, high=(4+1)+self.num_rocks, shape=(self.historyIgnoreIdx + (1+1)*(self.hist_len+1),)) # agent pos + history of: ac + ob pairs
self.genObservation = self.generateObservationStandardPos
elif self.hist_type == "one_hot":
self.historyIgnoreIdx = 0
self.nact = self._wrapped_env.action_space.n
self.total_obs_dim = (self.nact+1) # one hot encoded actaion + single ob
self.observation_space = Box(low=0, high=len(Obs)-1, shape=(self.total_obs_dim*(self.hist_len+1),)) # history of: one_hot_ac + ob pairs
self.genObservation = self.generateObservationOneHot
elif self.hist_type == "one_hot_pos":
self.historyIgnoreIdx = self.size_x + self.size_y
self.nact = self._wrapped_env.action_space.n
self.total_obs_dim = self.historyIgnoreIdx+(self.nact+1) # agent pos + one hot encoded actaion + single ob
self.observation_space = Box(low=0, high=len(Obs)-1, shape=(self.historyIgnoreIdx + (self.nact+1)*(self.hist_len+1),)) # agent pos + history of: one_hot_ac + ob pairs
self.genObservation = self.generateObservationOneHotPos
elif self.hist_type == "field_vision":
self.historyIgnoreIdx = 0
self.total_obs_dim = (1+self.num_rocks) # actaion + ob (for each rock)
self.observation_space = Box(low=0, high=(4+1)+self.num_rocks, shape=(self.total_obs_dim*(self.hist_len+1),)) # history of: ac + ob (for each rock) pairs
self.genObservation = self.generateObservationFieldVision
elif self.hist_type == "field_vision_pos":
self.historyIgnoreIdx = self.size_x + self.size_y
self.total_obs_dim = (self.historyIgnoreIdx+self.num_rocks) # oneHot agent position + ob (for each rock)
self.observation_space = Box(low=0, high=len(Obs)-1, shape=(self.historyIgnoreIdx + self.num_rocks*(self.hist_len+1),)) # agent pos + history of: ac + ob (for each rock) pairs
self.genObservation = self.generateObservationFieldVisionPos
elif self.hist_type == "fully_observable":
self.historyIgnoreIdx = self.size_x + self.size_y
self.total_obs_dim = (self.historyIgnoreIdx+self.num_rocks) # oneHot agent position + ob (for each rock)
self.observation_space = Box(low=0, high=len(Obs)-1, shape=(self.historyIgnoreIdx + self.num_rocks*(self.hist_len+1),)) # agent pos + history of: ac + ob (for each rock) pairs
self.genObservation = self.generateObservationFullState
elif self.hist_type == "mixed_full_pomdp":
self.historyIgnoreIdx = 1 + self.num_rocks + self.size_x + self.size_y
self.nact = self._wrapped_env.action_space.n
self.total_obs_dim = self.historyIgnoreIdx+(self.nact+1) # ignore index + agent pos + one hot encoded action + single ob
self.observation_space = Box(low=0, high=len(Obs)-1, shape=(self.historyIgnoreIdx + (self.nact+1)*(self.hist_len+1),)) # flag + full obs + agent pos + history of: one_hot_ac + ob pairs
self.genObservation = self.generateObservationMixed
elif self.hist_type == "history_full":
self.historyIgnoreIdx = 0
self.nact = self._wrapped_env.action_space.n
self.total_obs_dim = 1 + self.size_x + self.size_y + self.num_rocks + self.nact + 1 # flag + one hot agent pos + rock obs + one hot action + single ob
self.observation_space = Box(low=0, high=len(Obs)-1, shape=(self.historyIgnoreIdx + self.total_obs_dim*(self.hist_len+1),))
self.genObservation = self.generateObservationHistoryFull
elif self.hist_type == "history_pomdp":
self.historyIgnoreIdx = 0
self.nact = self._wrapped_env.action_space.n
self.total_obs_dim = 1 + self.size_x + self.size_y + self.num_rocks + self.nact + 1 # flag + one hot agent pos + rock obs (zeros) + one hot action + single ob
self.observation_space = Box(low=0, high=len(Obs)-1, shape=(self.historyIgnoreIdx + self.total_obs_dim*(self.hist_len+1),))
self.genObservation = self.generateObservationHistoryPomdp
elif self.hist_type == "history_rockpos_full":
self.historyIgnoreIdx = (self.size_x + self.size_y) * self.num_rocks # num of one_hot encoded rock positions
self.nact = self._wrapped_env.action_space.n
self.total_history_ob_dim = 1 + self.size_x + self.size_y + self.num_rocks + self.nact + 1
self.total_obs_dim = self.historyIgnoreIdx + self.total_history_ob_dim # ignoreIndex + flag + one hot agent pos + rock obs + one hot action + single ob
self.observation_space = Box(low=0, high=len(Obs)-1, shape=(self.historyIgnoreIdx + self.total_history_ob_dim*(self.hist_len+1),))
self.genObservation = self.generateObservationHistoryRockPosFull
else:
raise NameError("error: wrong history type")
self.observation_dim_hist_part = self.total_obs_dim - self.historyIgnoreIdx
print('-------- History Info: --------')
print('total obs dim:', self.total_obs_dim)
print('original obs dim:', self.full_obs_dim)
print('history obs dim:', self.observation_dim_hist_part)
print('-------------------------------')
def reset_history(self, new_):
self.history = np.zeros((self.observation_space.shape[0]-self.historyIgnoreIdx, ))
self.history[0:self.observation_dim_hist_part] = new_[self.historyIgnoreIdx:]
def add_to_history(self, new_):
self.history[self.observation_dim_hist_part:] = self.history[:-self.observation_dim_hist_part]
self.history[0:self.observation_dim_hist_part] = new_[self.historyIgnoreIdx:]
def reset(self):
obs = self._wrapped_env.reset()
xpos, ypos = self.generatePosOneHot(False)
if self.hist_type == "standard":
new_ob = np.array([np.zeros(1), obs])
elif self.hist_type == "standard_pos":
std_ob = np.array([np.zeros(1), obs])
new_ob = np.concatenate([xpos, ypos, std_ob])
elif self.hist_type == "one_hot":
new_ob = np.concatenate([np.zeros(self.nact), [obs]])
elif self.hist_type == "one_hot_pos":
new_ob = np.concatenate([xpos, ypos,np.zeros(self.nact), [obs]])
elif self.hist_type == "field_vision":
observation_rocks = self.generateFieldVisionRockObservation(False)
new_ob = np.concatenate([np.zeros(1), observation_rocks])
elif self.hist_type == "field_vision_pos":
observation_rocks = self.generateFieldVisionRockObservation(False)
new_ob = np.concatenate([xpos, ypos, observation_rocks])
elif self.hist_type == "fully_observable":
observation_rocks = self.generateTrueRockOvservation(False)
new_ob = np.concatenate([xpos, ypos, observation_rocks])
elif self.hist_type == "mixed_full_pomdp" or self.hist_type == "history_full":
observation_rocks = self.generateTrueRockOvservation(False)
flag = 1
new_ob = np.concatenate([[flag],observation_rocks,xpos,ypos,np.zeros(self.nact),[obs]])
elif self.hist_type == "history_pomdp":
observation_rocks = np.zeros(self.num_rocks)
flag = 0
new_ob = np.concatenate([[flag],observation_rocks,xpos,ypos,np.zeros(self.nact),[obs]])
elif self.hist_type == "history_rockpos_full":
observation_rocks = self.generateTrueRockOvservation(False)
flag = 1
rock_pos = self.generateRockPosOneHot(False)
new_ob = np.concatenate([[flag],observation_rocks,xpos,ypos,np.zeros(self.nact),[obs],rock_pos])
else:
raise NameError("error: wrong history type")
self.reset_history(new_ob)
# we return copy so that we can modify the history without changing already returned histories
return np.concatenate([new_ob[0:self.historyIgnoreIdx],self.history])
def step(self, action):
next_obs, reward, done, info = self._wrapped_env.step(action)
ob = self.genObservation(next_obs, action, done)
self.add_to_history(ob)
# we return copy so that we can modify the history without changing already returned histories
return np.concatenate([ob[0:self.historyIgnoreIdx],self.history]), reward, done, info
def generateObservationStandard(self, ob, a, done):
return np.array([a+1, ob])
def generateObservationStandardPos(self, ob, a, done):
xpos, ypos = self.generatePosOneHot(done)
std_ob = np.array([a+1, ob])
return np.concatenate([xpos,ypos,std_ob])
def generateObservationOneHot(self, ob, a, done):
one_hot_a = np.zeros(self.nact, dtype=np.int)
one_hot_a[int(a)] = 1
return np.concatenate([one_hot_a, [ob]])
def generateObservationOneHotPos(self, ob, a, done):
xpos, ypos = self.generatePosOneHot(done)
one_hot_a = np.zeros(self.nact, dtype=np.int)
one_hot_a[int(a)] = 1
return np.concatenate([xpos,ypos,one_hot_a,[ob]])
def generateObservationFieldVision(self, ob, a, done):
# action + noisy value of all rocks
observation_rocks = self.generateFieldVisionRockObservation(done)
return np.concatenate([[a+1], observation_rocks])
def generateObservationFieldVisionPos(self, ob, a, done):
# agent pos + noisy value of all rocks
observation_rocks = self.generateFieldVisionRockObservation(done)
xpos, ypos = self.generatePosOneHot(done)
return np.concatenate([xpos,ypos,observation_rocks])
def generateObservationFullState(self, ob, a, done):
# agent pos + true value of all rocks
observation_rocks = self.generateTrueRockOvservation(done)
xpos, ypos = self.generatePosOneHot(done)
return np.concatenate([xpos,ypos,observation_rocks])
def generateObservationMixed(self, ob, a, done):
# flag + true value of all rocks + agent pos + history of: one_hot_ac + noisy ob pairs
flag = 1
observation_rocks = self.generateTrueRockOvservation(done)
xpos, ypos = self.generatePosOneHot(done)
one_hot_a = np.zeros(self.nact, dtype=np.int)
one_hot_a[int(a)] = 1
return np.concatenate([[flag],observation_rocks,xpos,ypos,one_hot_a,[ob]])
def generateObservationHistoryFull(self, ob, a, done):
# flag + one hot agent pos + rock obs + one hot action + single ob
return self.generateObservationMixed(ob, a, done)
def generateObservationHistoryPomdp(self, ob, a, done):
# flag + one hot agent pos + rock obs (zeros) + one hot action + single ob
flag = 0
observation_rocks = np.zeros(self.num_rocks)
xpos, ypos = self.generatePosOneHot(done)
one_hot_a = np.zeros(self.nact, dtype=np.int)
one_hot_a[int(a)] = 1
return np.concatenate([[flag],observation_rocks,xpos,ypos,one_hot_a,[ob]])
def generateObservationHistoryRockPosFull(self, ob, a, done):
# num of one_hot encoded rock positions
# flag + one hot agent pos + rock obs + one hot action + single ob + one hot rock positions
rock_pos = self.generateRockPosOneHot(done)
full_ob = self.generateObservationMixed(ob, a, done)
return np.concatenate([full_ob, rock_pos])
def generateFieldVisionRockObservation(self, done):
# noisy value of all rocks
observation_rocks = np.zeros((self.num_rocks,))
if not done:
for rock in range(0, self.num_rocks):
if self._wrapped_env.state.rocks[rock].status == 0: # collected
ob = Obs.NULL.value
else:
ob = self._wrapped_env._sample_ob(self._wrapped_env.state.agent_pos, self._wrapped_env.state.rocks[rock])
observation_rocks[rock] = ob
return observation_rocks
def generateTrueRockOvservation(self, done):
# true value of all rocks
observation_rocks = np.zeros((self.num_rocks,))
if not done:
for rock in range(0, self.num_rocks):
rock_status = self._wrapped_env.state.rocks[rock].status
if rock_status == 1: #good
observation_rocks[rock] = Obs.GOOD.value
elif rock_status == -1: #bad
observation_rocks[rock] = Obs.BAD.value
else: # collected
observation_rocks[rock] = Obs.NULL.value
return observation_rocks
def generatePosOneHot(self, done):
xpos=np.zeros(self.size_x)
ypos=np.zeros(self.size_y)
if not done:
# one hot encoded x and y position of the agent
xpos = np.zeros(self.size_x, dtype=np.int)
xpos[int(self._wrapped_env.state.agent_pos.x)] = 1
ypos = np.zeros(self.size_y, dtype=np.int)
ypos[int(self._wrapped_env.state.agent_pos.y)] = 1
return xpos, ypos
def generateRockPosOneHot(self, done):
rocks = []
if not done:
for rock in self._wrapped_env._rock_pos:
# one hot encoded x and y position of the rocks
xpos = np.zeros(self.size_x, dtype=np.int)
xpos[int(rock.x)] = 1
ypos = np.zeros(self.size_y, dtype=np.int)
ypos[int(rock.y)] = 1
rocks.append(xpos)
rocks.append(ypos)
if len(rocks) > 0:
return np.hstack(rocks)
else:
return np.zeros((self.size_x+self.size_y)*self.num_rocks)
|
[
"gym.make",
"numpy.zeros",
"numpy.hstack",
"numpy.array",
"gym.spaces.Box",
"numpy.concatenate"
] |
[((2699, 2715), 'gym.make', 'gym.make', (['env_id'], {}), '(env_id)\n', (2707, 2715), False, 'import gym\n'), ((9135, 9203), 'numpy.zeros', 'np.zeros', (['(self.observation_space.shape[0] - self.historyIgnoreIdx,)'], {}), '((self.observation_space.shape[0] - self.historyIgnoreIdx,))\n', (9143, 9203), True, 'import numpy as np\n'), ((11736, 11799), 'numpy.concatenate', 'np.concatenate', (['[new_ob[0:self.historyIgnoreIdx], self.history]'], {}), '([new_ob[0:self.historyIgnoreIdx], self.history])\n', (11750, 11799), True, 'import numpy as np\n'), ((12256, 12277), 'numpy.array', 'np.array', (['[a + 1, ob]'], {}), '([a + 1, ob])\n', (12264, 12277), True, 'import numpy as np\n'), ((12403, 12424), 'numpy.array', 'np.array', (['[a + 1, ob]'], {}), '([a + 1, ob])\n', (12411, 12424), True, 'import numpy as np\n'), ((12438, 12474), 'numpy.concatenate', 'np.concatenate', (['[xpos, ypos, std_ob]'], {}), '([xpos, ypos, std_ob])\n', (12452, 12474), True, 'import numpy as np\n'), ((12548, 12581), 'numpy.zeros', 'np.zeros', (['self.nact'], {'dtype': 'np.int'}), '(self.nact, dtype=np.int)\n', (12556, 12581), True, 'import numpy as np\n'), ((12627, 12660), 'numpy.concatenate', 'np.concatenate', (['[one_hot_a, [ob]]'], {}), '([one_hot_a, [ob]])\n', (12641, 12660), True, 'import numpy as np\n'), ((12789, 12822), 'numpy.zeros', 'np.zeros', (['self.nact'], {'dtype': 'np.int'}), '(self.nact, dtype=np.int)\n', (12797, 12822), True, 'import numpy as np\n'), ((12868, 12913), 'numpy.concatenate', 'np.concatenate', (['[xpos, ypos, one_hot_a, [ob]]'], {}), '([xpos, ypos, one_hot_a, [ob]])\n', (12882, 12913), True, 'import numpy as np\n'), ((13104, 13148), 'numpy.concatenate', 'np.concatenate', (['[[a + 1], observation_rocks]'], {}), '([[a + 1], observation_rocks])\n', (13118, 13148), True, 'import numpy as np\n'), ((13396, 13443), 'numpy.concatenate', 'np.concatenate', (['[xpos, ypos, observation_rocks]'], {}), '([xpos, ypos, observation_rocks])\n', (13410, 13443), True, 'import numpy as np\n'), ((13678, 13725), 'numpy.concatenate', 'np.concatenate', (['[xpos, ypos, observation_rocks]'], {}), '([xpos, ypos, observation_rocks])\n', (13692, 13725), True, 'import numpy as np\n'), ((14027, 14060), 'numpy.zeros', 'np.zeros', (['self.nact'], {'dtype': 'np.int'}), '(self.nact, dtype=np.int)\n', (14035, 14060), True, 'import numpy as np\n'), ((14106, 14178), 'numpy.concatenate', 'np.concatenate', (['[[flag], observation_rocks, xpos, ypos, one_hot_a, [ob]]'], {}), '([[flag], observation_rocks, xpos, ypos, one_hot_a, [ob]])\n', (14120, 14178), True, 'import numpy as np\n'), ((14556, 14580), 'numpy.zeros', 'np.zeros', (['self.num_rocks'], {}), '(self.num_rocks)\n', (14564, 14580), True, 'import numpy as np\n'), ((14651, 14684), 'numpy.zeros', 'np.zeros', (['self.nact'], {'dtype': 'np.int'}), '(self.nact, dtype=np.int)\n', (14659, 14684), True, 'import numpy as np\n'), ((14730, 14802), 'numpy.concatenate', 'np.concatenate', (['[[flag], observation_rocks, xpos, ypos, one_hot_a, [ob]]'], {}), '([[flag], observation_rocks, xpos, ypos, one_hot_a, [ob]])\n', (14744, 14802), True, 'import numpy as np\n'), ((15142, 15177), 'numpy.concatenate', 'np.concatenate', (['[full_ob, rock_pos]'], {}), '([full_ob, rock_pos])\n', (15156, 15177), True, 'import numpy as np\n'), ((15298, 15325), 'numpy.zeros', 'np.zeros', (['(self.num_rocks,)'], {}), '((self.num_rocks,))\n', (15306, 15325), True, 'import numpy as np\n'), ((15856, 15883), 'numpy.zeros', 'np.zeros', (['(self.num_rocks,)'], {}), '((self.num_rocks,))\n', (15864, 15883), True, 'import numpy as np\n'), ((16423, 16444), 'numpy.zeros', 'np.zeros', (['self.size_x'], {}), '(self.size_x)\n', (16431, 16444), True, 'import numpy as np\n'), ((16458, 16479), 'numpy.zeros', 'np.zeros', (['self.size_y'], {}), '(self.size_y)\n', (16466, 16479), True, 'import numpy as np\n'), ((3342, 3437), 'gym.spaces.Box', 'Box', ([], {'low': '(0)', 'high': '(4 + 1 + self.num_rocks)', 'shape': '(self.total_obs_dim * (self.hist_len + 1),)'}), '(low=0, high=4 + 1 + self.num_rocks, shape=(self.total_obs_dim * (self.\n hist_len + 1),))\n', (3345, 3437), False, 'from gym.spaces import Discrete, Box\n'), ((12105, 12164), 'numpy.concatenate', 'np.concatenate', (['[ob[0:self.historyIgnoreIdx], self.history]'], {}), '([ob[0:self.historyIgnoreIdx], self.history])\n', (12119, 12164), True, 'import numpy as np\n'), ((16580, 16615), 'numpy.zeros', 'np.zeros', (['self.size_x'], {'dtype': 'np.int'}), '(self.size_x, dtype=np.int)\n', (16588, 16615), True, 'import numpy as np\n'), ((16698, 16733), 'numpy.zeros', 'np.zeros', (['self.size_y'], {'dtype': 'np.int'}), '(self.size_y, dtype=np.int)\n', (16706, 16733), True, 'import numpy as np\n'), ((17334, 17350), 'numpy.hstack', 'np.hstack', (['rocks'], {}), '(rocks)\n', (17343, 17350), True, 'import numpy as np\n'), ((17384, 17438), 'numpy.zeros', 'np.zeros', (['((self.size_x + self.size_y) * self.num_rocks)'], {}), '((self.size_x + self.size_y) * self.num_rocks)\n', (17392, 17438), True, 'import numpy as np\n'), ((3756, 3863), 'gym.spaces.Box', 'Box', ([], {'low': '(0)', 'high': '(4 + 1 + self.num_rocks)', 'shape': '(self.historyIgnoreIdx + (1 + 1) * (self.hist_len + 1),)'}), '(low=0, high=4 + 1 + self.num_rocks, shape=(self.historyIgnoreIdx + (1 +\n 1) * (self.hist_len + 1),))\n', (3759, 3863), False, 'from gym.spaces import Discrete, Box\n'), ((9837, 9873), 'numpy.concatenate', 'np.concatenate', (['[xpos, ypos, std_ob]'], {}), '([xpos, ypos, std_ob])\n', (9851, 9873), True, 'import numpy as np\n'), ((17047, 17082), 'numpy.zeros', 'np.zeros', (['self.size_x'], {'dtype': 'np.int'}), '(self.size_x, dtype=np.int)\n', (17055, 17082), True, 'import numpy as np\n'), ((17144, 17179), 'numpy.zeros', 'np.zeros', (['self.size_y'], {'dtype': 'np.int'}), '(self.size_y, dtype=np.int)\n', (17152, 17179), True, 'import numpy as np\n'), ((9700, 9711), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (9708, 9711), True, 'import numpy as np\n'), ((9797, 9808), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (9805, 9808), True, 'import numpy as np\n'), ((5133, 5228), 'gym.spaces.Box', 'Box', ([], {'low': '(0)', 'high': '(4 + 1 + self.num_rocks)', 'shape': '(self.total_obs_dim * (self.hist_len + 1),)'}), '(low=0, high=4 + 1 + self.num_rocks, shape=(self.total_obs_dim * (self.\n hist_len + 1),))\n', (5136, 5228), False, 'from gym.spaces import Discrete, Box\n'), ((9953, 9972), 'numpy.zeros', 'np.zeros', (['self.nact'], {}), '(self.nact)\n', (9961, 9972), True, 'import numpy as np\n'), ((10076, 10095), 'numpy.zeros', 'np.zeros', (['self.nact'], {}), '(self.nact)\n', (10084, 10095), True, 'import numpy as np\n'), ((10452, 10499), 'numpy.concatenate', 'np.concatenate', (['[xpos, ypos, observation_rocks]'], {}), '([xpos, ypos, observation_rocks])\n', (10466, 10499), True, 'import numpy as np\n'), ((10268, 10279), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (10276, 10279), True, 'import numpy as np\n'), ((10644, 10691), 'numpy.concatenate', 'np.concatenate', (['[xpos, ypos, observation_rocks]'], {}), '([xpos, ypos, observation_rocks])\n', (10658, 10691), True, 'import numpy as np\n'), ((11052, 11076), 'numpy.zeros', 'np.zeros', (['self.num_rocks'], {}), '(self.num_rocks)\n', (11060, 11076), True, 'import numpy as np\n'), ((10944, 10963), 'numpy.zeros', 'np.zeros', (['self.nact'], {}), '(self.nact)\n', (10952, 10963), True, 'import numpy as np\n'), ((11170, 11189), 'numpy.zeros', 'np.zeros', (['self.nact'], {}), '(self.nact)\n', (11178, 11189), True, 'import numpy as np\n'), ((11475, 11494), 'numpy.zeros', 'np.zeros', (['self.nact'], {}), '(self.nact)\n', (11483, 11494), True, 'import numpy as np\n')]
|
import numpy as np
# The last dimensions of box_1 and box_2 are both 4. (x, y, w, h)
class IOU(object):
def __init__(self, box_1, box_2):
self.box_1_min, self.box_1_max = self.__get_box_min_and_max(box_1)
self.box_2_min, self.box_2_max = self.__get_box_min_and_max(box_2)
self.box_1_area = self.__get_box_area(box_1)
self.box_2_area = self.__get_box_area(box_2)
@staticmethod
def __get_box_min_and_max(box):
box_xy = box[..., 0:2]
box_wh = box[..., 2:4]
box_min = box_xy - box_wh / 2
box_max = box_xy + box_wh / 2
return box_min, box_max
@staticmethod
def __get_box_area(box):
return box[..., 2] * box[..., 3]
def calculate_iou(self):
intersect_min = np.maximum(self.box_1_min, self.box_2_min)
intersect_max = np.minimum(self.box_1_max, self.box_2_max)
intersect_wh = np.maximum(intersect_max - intersect_min, 0.0)
intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
union_area = self.box_1_area + self.box_2_area - intersect_area
iou = intersect_area / union_area
return iou
|
[
"numpy.minimum",
"numpy.maximum"
] |
[((768, 810), 'numpy.maximum', 'np.maximum', (['self.box_1_min', 'self.box_2_min'], {}), '(self.box_1_min, self.box_2_min)\n', (778, 810), True, 'import numpy as np\n'), ((835, 877), 'numpy.minimum', 'np.minimum', (['self.box_1_max', 'self.box_2_max'], {}), '(self.box_1_max, self.box_2_max)\n', (845, 877), True, 'import numpy as np\n'), ((901, 947), 'numpy.maximum', 'np.maximum', (['(intersect_max - intersect_min)', '(0.0)'], {}), '(intersect_max - intersect_min, 0.0)\n', (911, 947), True, 'import numpy as np\n')]
|
'''
Function:
Algorithm implementation.
Author:
Charles
微信公众号:
Charles的皮卡丘
'''
import cv2
import math
import numpy as np
from PIL import Image
from scipy import signal
from utils.utils import *
from scipy.ndimage import interpolation
from scipy.sparse.linalg import spsolve
from scipy.sparse import csr_matrix, spdiags
import warnings
warnings.filterwarnings("ignore")
'''pencil drawing'''
class PencilDrawing():
def __init__(self, **kwargs):
self.kernel_size_scale = kwargs.get('kernel_size_scale')
self.stroke_width = kwargs.get('stroke_width')
self.weights_color = kwargs.get('weights_color')
self.weights_gray = kwargs.get('weights_gray')
self.texture_path = kwargs.get('texture_path')
self.color_depth = kwargs.get('color_depth')
'''in order to call'''
def draw(self, image_path, mode='gray', savename='output.jpg'):
img = cv2.imread(image_path)
if mode == 'color':
'''
img_ycbcr = cv2.cvtColor(img, cv2.COLOR_BGR2YCR_CB)
Y = img_ycbcr[:, :, 0]
img_ycbcr_new = img_ycbcr.copy()
img_ycbcr_new.flags.writeable = True
img_ycbcr_new[:, :, 0] = self.__strokeGeneration(Y) * self.__toneGeneration(Y) * 255
img_out = cv2.cvtColor(img_ycbcr_new, cv2.COLOR_YCR_CB2BGR)
img = cv2.imwrite(savename, img_out)
'''
img = Image.open(image_path)
img_ycbcr = img.convert('YCbCr')
img = np.ndarray((img.size[1], img.size[0], 3), 'u1', img_ycbcr.tobytes())
img_out = img.copy()
img_out.flags.writeable = True
img_out[:, :, 0] = self.__strokeGeneration(img[:, :, 0]) * self.__toneGeneration(img[:, :, 0]) * 255
img_out = cv2.cvtColor(img_out, cv2.COLOR_YCR_CB2BGR)
img_out = Image.fromarray(img_out)
img_out.save(savename)
elif mode == 'gray':
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_s = self.__strokeGeneration(img)
img_t = self.__toneGeneration(img)
img_out = img_s * img_t * 255
img = cv2.imwrite(savename, img_out)
else:
raise ValueError('PencilDrawing.draw unsupport mode <%s>...' % mode)
'''pencil stroke generation'''
def __strokeGeneration(self, img):
h, w = img.shape
kernel_size = int(min(w, h) * self.kernel_size_scale)
kernel_size += kernel_size % 2
# compute gradients, yielding magnitude
img_double = im2double(img)
dx = np.concatenate((np.abs(img_double[:, 0:-1]-img_double[:, 1:]), np.zeros((h, 1))), 1)
dy = np.concatenate((np.abs(img_double[0:-1, :]-img_double[1:, :]), np.zeros((1, w))), 0)
img_gradient = np.sqrt(np.power(dx, 2) + np.power(dy, 2))
# choose eight reference directions
line_segments = np.zeros((kernel_size, kernel_size, 8))
for i in [0, 1, 2, 7]:
for x in range(kernel_size):
y = round((x + 1 - kernel_size / 2) * math.tan(math.pi / 8 * i))
y = kernel_size / 2 - y
if y > 0 and y <= kernel_size:
line_segments[int(y-1), x, i] = 1
if i == 7:
line_segments[:, :, 3] = np.rot90(line_segments[:, :, 7], -1)
else:
line_segments[:, :, i+4] = np.rot90(line_segments[:, :, i], 1)
# get response maps for the reference directions
response_maps = np.zeros((h, w, 8))
for i in range(8):
response_maps[:, :, i] = signal.convolve2d(img_gradient, line_segments[:, :, i], 'same')
response_maps_maxvalueidx = response_maps.argmax(axis=-1)
# the classification is performed by selecting the maximum value among the responses in all directions
magnitude_maps = np.zeros_like(response_maps)
for i in range(8):
magnitude_maps[:, :, i] = img_gradient * (response_maps_maxvalueidx == i).astype('float')
# line shaping
stroke_maps = np.zeros_like(response_maps)
for i in range(8):
stroke_maps[:, :, i] = signal.convolve2d(magnitude_maps[:, :, i], line_segments[:, :, i], 'same')
stroke_maps = stroke_maps.sum(axis=-1)
stroke_maps = (stroke_maps - stroke_maps.min()) / (stroke_maps.max() - stroke_maps.min())
stroke_maps = (1 - stroke_maps) * self.stroke_width
return stroke_maps
'''pencil tone drawing'''
def __toneGeneration(self, img, mode=None):
height, width = img.shape
# histogram matching
img_hist_match = self.__histogramMatching(img, mode) ** self.color_depth
# get texture
texture = cv2.imread(self.texture_path)
texture = cv2.cvtColor(texture, cv2.COLOR_BGR2GRAY)[99: texture.shape[0]-100, 99: texture.shape[1]-100]
ratio = 0.2 * min(img.shape[0], img.shape[1]) / float(1024)
texture = interpolation.zoom(texture, (ratio, ratio))
texture = im2double(texture)
texture = horizontalStitch(texture, img.shape[1])
texture = verticalStitch(texture, img.shape[0])
size = img.size
nzmax = 2 * (size-1)
i = np.zeros((nzmax, 1))
j = np.zeros((nzmax, 1))
s = np.zeros((nzmax, 1))
for m in range(1, nzmax+1):
i[m-1] = int(math.ceil((m + 0.1) / 2)) - 1
j[m-1] = int(math.ceil((m - 0.1) / 2)) - 1
s[m-1] = -2 * (m % 2) + 1
dx = csr_matrix((s.T[0], (i.T[0], j.T[0])), shape=(size, size))
nzmax = 2 * (size - img.shape[1])
i = np.zeros((nzmax, 1))
j = np.zeros((nzmax, 1))
s = np.zeros((nzmax, 1))
for m in range(1, nzmax+1):
i[m-1, :] = int(math.ceil((m - 1 + 0.1) / 2) + img.shape[1] * (m % 2)) - 1
j[m-1, :] = math.ceil((m - 0.1) / 2) - 1
s[m-1, :] = -2 * (m % 2) + 1
dy = csr_matrix((s.T[0], (i.T[0], j.T[0])), shape=(size, size))
texture_sparse = spdiags(np.log(np.reshape(texture.T, (1, texture.size), order="f") + 0.01), 0, size, size)
img_hist_match1d = np.log(np.reshape(img_hist_match.T, (1, img_hist_match.size), order="f").T + 0.01)
nat = texture_sparse.T.dot(img_hist_match1d)
a = np.dot(texture_sparse.T, texture_sparse)
b = dx.T.dot(dx)
c = dy.T.dot(dy)
mat = a + 0.2 * (b + c)
beta1d = spsolve(mat, nat)
beta = np.reshape(beta1d, (img.shape[0], img.shape[1]), order="c")
tone = texture ** beta
tone = (tone - tone.min()) / (tone.max() - tone.min())
return tone
'''histogram matching'''
def __histogramMatching(self, img, mode=None):
weights = self.weights_color if mode == 'color' else self.weights_gray
# img
histogram_img = cv2.calcHist([img], [0], None, [256], [0, 256])
histogram_img.resize(histogram_img.size)
histogram_img /= histogram_img.sum()
histogram_img_cdf = np.cumsum(histogram_img)
# natural
histogram_natural = np.zeros_like(histogram_img)
for x in range(256):
histogram_natural[x] = weights[0] * Laplace(x) + weights[1] * Uniform(x) + weights[2] * Gaussian(x)
histogram_natural /= histogram_natural.sum()
histogram_natural_cdf = np.cumsum(histogram_natural)
# do the histogram matching
img_hist_match = np.zeros_like(img)
for x in range(img.shape[0]):
for y in range(img.shape[1]):
value = histogram_img_cdf[img[x, y]]
img_hist_match[x, y] = (np.abs(histogram_natural_cdf-value)).argmin()
img_hist_match = np.true_divide(img_hist_match, 255)
return img_hist_match
|
[
"numpy.abs",
"numpy.rot90",
"numpy.zeros_like",
"numpy.true_divide",
"scipy.signal.convolve2d",
"cv2.cvtColor",
"cv2.imwrite",
"numpy.power",
"scipy.ndimage.interpolation.zoom",
"numpy.cumsum",
"numpy.reshape",
"scipy.sparse.linalg.spsolve",
"math.ceil",
"cv2.calcHist",
"scipy.sparse.csr_matrix",
"numpy.dot",
"warnings.filterwarnings",
"math.tan",
"numpy.zeros",
"PIL.Image.open",
"cv2.imread",
"PIL.Image.fromarray"
] |
[((338, 371), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (361, 371), False, 'import warnings\n'), ((850, 872), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (860, 872), False, 'import cv2\n'), ((2535, 2574), 'numpy.zeros', 'np.zeros', (['(kernel_size, kernel_size, 8)'], {}), '((kernel_size, kernel_size, 8))\n', (2543, 2574), True, 'import numpy as np\n'), ((3032, 3051), 'numpy.zeros', 'np.zeros', (['(h, w, 8)'], {}), '((h, w, 8))\n', (3040, 3051), True, 'import numpy as np\n'), ((3349, 3377), 'numpy.zeros_like', 'np.zeros_like', (['response_maps'], {}), '(response_maps)\n', (3362, 3377), True, 'import numpy as np\n'), ((3525, 3553), 'numpy.zeros_like', 'np.zeros_like', (['response_maps'], {}), '(response_maps)\n', (3538, 3553), True, 'import numpy as np\n'), ((4110, 4139), 'cv2.imread', 'cv2.imread', (['self.texture_path'], {}), '(self.texture_path)\n', (4120, 4139), False, 'import cv2\n'), ((4320, 4363), 'scipy.ndimage.interpolation.zoom', 'interpolation.zoom', (['texture', '(ratio, ratio)'], {}), '(texture, (ratio, ratio))\n', (4338, 4363), False, 'from scipy.ndimage import interpolation\n'), ((4544, 4564), 'numpy.zeros', 'np.zeros', (['(nzmax, 1)'], {}), '((nzmax, 1))\n', (4552, 4564), True, 'import numpy as np\n'), ((4571, 4591), 'numpy.zeros', 'np.zeros', (['(nzmax, 1)'], {}), '((nzmax, 1))\n', (4579, 4591), True, 'import numpy as np\n'), ((4598, 4618), 'numpy.zeros', 'np.zeros', (['(nzmax, 1)'], {}), '((nzmax, 1))\n', (4606, 4618), True, 'import numpy as np\n'), ((4777, 4835), 'scipy.sparse.csr_matrix', 'csr_matrix', (['(s.T[0], (i.T[0], j.T[0]))'], {'shape': '(size, size)'}), '((s.T[0], (i.T[0], j.T[0])), shape=(size, size))\n', (4787, 4835), False, 'from scipy.sparse import csr_matrix, spdiags\n'), ((4878, 4898), 'numpy.zeros', 'np.zeros', (['(nzmax, 1)'], {}), '((nzmax, 1))\n', (4886, 4898), True, 'import numpy as np\n'), ((4905, 4925), 'numpy.zeros', 'np.zeros', (['(nzmax, 1)'], {}), '((nzmax, 1))\n', (4913, 4925), True, 'import numpy as np\n'), ((4932, 4952), 'numpy.zeros', 'np.zeros', (['(nzmax, 1)'], {}), '((nzmax, 1))\n', (4940, 4952), True, 'import numpy as np\n'), ((5144, 5202), 'scipy.sparse.csr_matrix', 'csr_matrix', (['(s.T[0], (i.T[0], j.T[0]))'], {'shape': '(size, size)'}), '((s.T[0], (i.T[0], j.T[0])), shape=(size, size))\n', (5154, 5202), False, 'from scipy.sparse import csr_matrix, spdiags\n'), ((5470, 5510), 'numpy.dot', 'np.dot', (['texture_sparse.T', 'texture_sparse'], {}), '(texture_sparse.T, texture_sparse)\n', (5476, 5510), True, 'import numpy as np\n'), ((5586, 5603), 'scipy.sparse.linalg.spsolve', 'spsolve', (['mat', 'nat'], {}), '(mat, nat)\n', (5593, 5603), False, 'from scipy.sparse.linalg import spsolve\n'), ((5613, 5672), 'numpy.reshape', 'np.reshape', (['beta1d', '(img.shape[0], img.shape[1])'], {'order': '"""c"""'}), "(beta1d, (img.shape[0], img.shape[1]), order='c')\n", (5623, 5672), True, 'import numpy as np\n'), ((5942, 5989), 'cv2.calcHist', 'cv2.calcHist', (['[img]', '[0]', 'None', '[256]', '[0, 256]'], {}), '([img], [0], None, [256], [0, 256])\n', (5954, 5989), False, 'import cv2\n'), ((6094, 6118), 'numpy.cumsum', 'np.cumsum', (['histogram_img'], {}), '(histogram_img)\n', (6103, 6118), True, 'import numpy as np\n'), ((6153, 6181), 'numpy.zeros_like', 'np.zeros_like', (['histogram_img'], {}), '(histogram_img)\n', (6166, 6181), True, 'import numpy as np\n'), ((6381, 6409), 'numpy.cumsum', 'np.cumsum', (['histogram_natural'], {}), '(histogram_natural)\n', (6390, 6409), True, 'import numpy as np\n'), ((6459, 6477), 'numpy.zeros_like', 'np.zeros_like', (['img'], {}), '(img)\n', (6472, 6477), True, 'import numpy as np\n'), ((6677, 6712), 'numpy.true_divide', 'np.true_divide', (['img_hist_match', '(255)'], {}), '(img_hist_match, 255)\n', (6691, 6712), True, 'import numpy as np\n'), ((1266, 1288), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (1276, 1288), False, 'from PIL import Image\n'), ((1578, 1621), 'cv2.cvtColor', 'cv2.cvtColor', (['img_out', 'cv2.COLOR_YCR_CB2BGR'], {}), '(img_out, cv2.COLOR_YCR_CB2BGR)\n', (1590, 1621), False, 'import cv2\n'), ((1635, 1659), 'PIL.Image.fromarray', 'Image.fromarray', (['img_out'], {}), '(img_out)\n', (1650, 1659), False, 'from PIL import Image\n'), ((3101, 3164), 'scipy.signal.convolve2d', 'signal.convolve2d', (['img_gradient', 'line_segments[:, :, i]', '"""same"""'], {}), "(img_gradient, line_segments[:, :, i], 'same')\n", (3118, 3164), False, 'from scipy import signal\n'), ((3601, 3675), 'scipy.signal.convolve2d', 'signal.convolve2d', (['magnitude_maps[:, :, i]', 'line_segments[:, :, i]', '"""same"""'], {}), "(magnitude_maps[:, :, i], line_segments[:, :, i], 'same')\n", (3618, 3675), False, 'from scipy import signal\n'), ((4152, 4193), 'cv2.cvtColor', 'cv2.cvtColor', (['texture', 'cv2.COLOR_BGR2GRAY'], {}), '(texture, cv2.COLOR_BGR2GRAY)\n', (4164, 4193), False, 'import cv2\n'), ((1718, 1755), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (1730, 1755), False, 'import cv2\n'), ((1876, 1906), 'cv2.imwrite', 'cv2.imwrite', (['savename', 'img_out'], {}), '(savename, img_out)\n', (1887, 1906), False, 'import cv2\n'), ((2258, 2305), 'numpy.abs', 'np.abs', (['(img_double[:, 0:-1] - img_double[:, 1:])'], {}), '(img_double[:, 0:-1] - img_double[:, 1:])\n', (2264, 2305), True, 'import numpy as np\n'), ((2305, 2321), 'numpy.zeros', 'np.zeros', (['(h, 1)'], {}), '((h, 1))\n', (2313, 2321), True, 'import numpy as np\n'), ((2350, 2397), 'numpy.abs', 'np.abs', (['(img_double[0:-1, :] - img_double[1:, :])'], {}), '(img_double[0:-1, :] - img_double[1:, :])\n', (2356, 2397), True, 'import numpy as np\n'), ((2397, 2413), 'numpy.zeros', 'np.zeros', (['(1, w)'], {}), '((1, w))\n', (2405, 2413), True, 'import numpy as np\n'), ((2444, 2459), 'numpy.power', 'np.power', (['dx', '(2)'], {}), '(dx, 2)\n', (2452, 2459), True, 'import numpy as np\n'), ((2462, 2477), 'numpy.power', 'np.power', (['dy', '(2)'], {}), '(dy, 2)\n', (2470, 2477), True, 'import numpy as np\n'), ((5076, 5100), 'math.ceil', 'math.ceil', (['((m - 0.1) / 2)'], {}), '((m - 0.1) / 2)\n', (5085, 5100), False, 'import math\n'), ((2848, 2884), 'numpy.rot90', 'np.rot90', (['line_segments[:, :, 7]', '(-1)'], {}), '(line_segments[:, :, 7], -1)\n', (2856, 2884), True, 'import numpy as np\n'), ((2927, 2962), 'numpy.rot90', 'np.rot90', (['line_segments[:, :, i]', '(1)'], {}), '(line_segments[:, :, i], 1)\n', (2935, 2962), True, 'import numpy as np\n'), ((4665, 4689), 'math.ceil', 'math.ceil', (['((m + 0.1) / 2)'], {}), '((m + 0.1) / 2)\n', (4674, 4689), False, 'import math\n'), ((4711, 4735), 'math.ceil', 'math.ceil', (['((m - 0.1) / 2)'], {}), '((m - 0.1) / 2)\n', (4720, 4735), False, 'import math\n'), ((5237, 5288), 'numpy.reshape', 'np.reshape', (['texture.T', '(1, texture.size)'], {'order': '"""f"""'}), "(texture.T, (1, texture.size), order='f')\n", (5247, 5288), True, 'import numpy as np\n'), ((5341, 5406), 'numpy.reshape', 'np.reshape', (['img_hist_match.T', '(1, img_hist_match.size)'], {'order': '"""f"""'}), "(img_hist_match.T, (1, img_hist_match.size), order='f')\n", (5351, 5406), True, 'import numpy as np\n'), ((2674, 2699), 'math.tan', 'math.tan', (['(math.pi / 8 * i)'], {}), '(math.pi / 8 * i)\n', (2682, 2699), False, 'import math\n'), ((5002, 5030), 'math.ceil', 'math.ceil', (['((m - 1 + 0.1) / 2)'], {}), '((m - 1 + 0.1) / 2)\n', (5011, 5030), False, 'import math\n'), ((6612, 6649), 'numpy.abs', 'np.abs', (['(histogram_natural_cdf - value)'], {}), '(histogram_natural_cdf - value)\n', (6618, 6649), True, 'import numpy as np\n')]
|
"""
Tests for the blaze interface to the pipeline api.
"""
from __future__ import division
from collections import OrderedDict
from datetime import timedelta
from unittest import TestCase
import warnings
import blaze as bz
from datashape import dshape, var, Record
from nose_parameterized import parameterized
import numpy as np
from numpy.testing.utils import assert_array_almost_equal
import pandas as pd
from pandas.util.testing import assert_frame_equal
from toolz import keymap, valmap, concatv
from toolz.curried import operator as op
from zipline.pipeline import Pipeline, CustomFactor
from zipline.pipeline.data import DataSet, BoundColumn
from zipline.pipeline.engine import SimplePipelineEngine
from zipline.pipeline.loaders.blaze import (
from_blaze,
BlazeLoader,
NoDeltasWarning,
NonNumpyField,
NonPipelineField,
)
from zipline.utils.numpy_utils import repeat_last_axis
from zipline.utils.test_utils import tmp_asset_finder, make_simple_asset_info
nameof = op.attrgetter('name')
dtypeof = op.attrgetter('dtype')
asset_infos = (
(make_simple_asset_info(
tuple(map(ord, 'ABC')),
pd.Timestamp(0),
pd.Timestamp('2015'),
),),
(make_simple_asset_info(
tuple(map(ord, 'ABCD')),
pd.Timestamp(0),
pd.Timestamp('2015'),
),),
)
with_extra_sid = parameterized.expand(asset_infos)
class BlazeToPipelineTestCase(TestCase):
@classmethod
def setUpClass(cls):
cls.dates = dates = pd.date_range('2014-01-01', '2014-01-03')
dates = cls.dates.repeat(3)
cls.sids = sids = ord('A'), ord('B'), ord('C')
cls.df = df = pd.DataFrame({
'sid': sids * 3,
'value': (0, 1, 2, 1, 2, 3, 2, 3, 4),
'asof_date': dates,
'timestamp': dates,
})
cls.dshape = dshape("""
var * {
sid: ?int64,
value: ?float64,
asof_date: datetime,
timestamp: datetime
}
""")
cls.macro_df = df[df.sid == 65].drop('sid', axis=1)
dshape_ = OrderedDict(cls.dshape.measure.fields)
del dshape_['sid']
cls.macro_dshape = var * Record(dshape_)
cls.garbage_loader = BlazeLoader()
def test_tabular(self):
name = 'expr'
expr = bz.Data(self.df, name=name, dshape=self.dshape)
ds = from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
)
self.assertEqual(ds.__name__, name)
self.assertTrue(issubclass(ds, DataSet))
self.assertEqual(
{c.name: c.dtype for c in ds._columns},
{'sid': np.int64, 'value': np.float64},
)
for field in ('timestamp', 'asof_date'):
with self.assertRaises(AttributeError) as e:
getattr(ds, field)
self.assertIn("'%s'" % field, str(e.exception))
self.assertIn("'datetime'", str(e.exception))
# test memoization
self.assertIs(
from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
),
ds,
)
def test_column(self):
exprname = 'expr'
expr = bz.Data(self.df, name=exprname, dshape=self.dshape)
value = from_blaze(
expr.value,
loader=self.garbage_loader,
no_deltas_rule='ignore',
)
self.assertEqual(value.name, 'value')
self.assertIsInstance(value, BoundColumn)
self.assertEqual(value.dtype, np.float64)
# test memoization
self.assertIs(
from_blaze(
expr.value,
loader=self.garbage_loader,
no_deltas_rule='ignore',
),
value,
)
self.assertIs(
from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
).value,
value,
)
# test the walk back up the tree
self.assertIs(
from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
),
value.dataset,
)
self.assertEqual(value.dataset.__name__, exprname)
def test_missing_asof(self):
expr = bz.Data(
self.df.loc[:, ['sid', 'value', 'timestamp']],
name='expr',
dshape="""
var * {
sid: ?int64,
value: float64,
timestamp: datetime,
}""",
)
with self.assertRaises(TypeError) as e:
from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
)
self.assertIn("'asof_date'", str(e.exception))
self.assertIn(repr(str(expr.dshape.measure)), str(e.exception))
def test_auto_deltas(self):
expr = bz.Data(
{'ds': self.df,
'ds_deltas': pd.DataFrame(columns=self.df.columns)},
dshape=var * Record((
('ds', self.dshape.measure),
('ds_deltas', self.dshape.measure),
)),
)
loader = BlazeLoader()
ds = from_blaze(expr.ds, loader=loader)
self.assertEqual(len(loader), 1)
exprdata = loader[ds]
self.assertTrue(exprdata.expr.isidentical(expr.ds))
self.assertTrue(exprdata.deltas.isidentical(expr.ds_deltas))
def test_auto_deltas_fail_warn(self):
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter('always')
loader = BlazeLoader()
expr = bz.Data(self.df, dshape=self.dshape)
from_blaze(
expr,
loader=loader,
no_deltas_rule='warn',
)
self.assertEqual(len(ws), 1)
w = ws[0].message
self.assertIsInstance(w, NoDeltasWarning)
self.assertIn(str(expr), str(w))
def test_auto_deltas_fail_raise(self):
loader = BlazeLoader()
expr = bz.Data(self.df, dshape=self.dshape)
with self.assertRaises(ValueError) as e:
from_blaze(
expr,
loader=loader,
no_deltas_rule='raise',
)
self.assertIn(str(expr), str(e.exception))
def test_non_numpy_field(self):
expr = bz.Data(
[],
dshape="""
var * {
a: datetime,
asof_date: datetime,
timestamp: datetime,
}""",
)
ds = from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
)
with self.assertRaises(AttributeError):
ds.a
self.assertIsInstance(object.__getattribute__(ds, 'a'), NonNumpyField)
def test_non_pipeline_field(self):
# NOTE: This test will fail if we ever allow string types in
# the Pipeline API. If this happens, change the dtype of the `a` field
# of expr to another type we don't allow.
expr = bz.Data(
[],
dshape="""
var * {
a: string,
asof_date: datetime,
timestamp: datetime,
}""",
)
ds = from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
)
with self.assertRaises(AttributeError):
ds.a
self.assertIsInstance(
object.__getattribute__(ds, 'a'),
NonPipelineField,
)
def test_complex_expr(self):
expr = bz.Data(self.df, dshape=self.dshape)
# put an Add in the table
expr_with_add = bz.transform(expr, value=expr.value + 1)
# Test that we can have complex expressions with no deltas
from_blaze(
expr_with_add,
deltas=None,
loader=self.garbage_loader,
)
with self.assertRaises(TypeError):
from_blaze(
expr.value + 1, # put an Add in the column
deltas=None,
loader=self.garbage_loader,
)
deltas = bz.Data(
pd.DataFrame(columns=self.df.columns),
dshape=self.dshape,
)
with self.assertRaises(TypeError):
from_blaze(
expr_with_add,
deltas=deltas,
loader=self.garbage_loader,
)
with self.assertRaises(TypeError):
from_blaze(
expr.value + 1,
deltas=deltas,
loader=self.garbage_loader,
)
def test_id(self):
expr = bz.Data(self.df, name='expr', dshape=self.dshape)
loader = BlazeLoader()
ds = from_blaze(
expr,
loader=loader,
no_deltas_rule='ignore',
)
p = Pipeline()
p.add(ds.value.latest, 'value')
dates = self.dates
with tmp_asset_finder() as finder:
result = SimplePipelineEngine(
loader,
dates,
finder,
).run_pipeline(p, dates[0], dates[-1])
expected = self.df.drop('asof_date', axis=1).set_index(
['timestamp', 'sid'],
)
expected.index = pd.MultiIndex.from_product((
expected.index.levels[0],
finder.retrieve_all(expected.index.levels[1]),
))
assert_frame_equal(result, expected, check_dtype=False)
def test_id_macro_dataset(self):
expr = bz.Data(self.macro_df, name='expr', dshape=self.macro_dshape)
loader = BlazeLoader()
ds = from_blaze(
expr,
loader=loader,
no_deltas_rule='ignore',
)
p = Pipeline()
p.add(ds.value.latest, 'value')
dates = self.dates
asset_info = asset_infos[0][0]
with tmp_asset_finder(asset_info) as finder:
result = SimplePipelineEngine(
loader,
dates,
finder,
).run_pipeline(p, dates[0], dates[-1])
nassets = len(asset_info)
expected = pd.DataFrame(
list(concatv([0] * nassets, [1] * nassets, [2] * nassets)),
index=pd.MultiIndex.from_product((
self.macro_df.timestamp,
finder.retrieve_all(asset_info.index),
)),
columns=('value',),
)
assert_frame_equal(result, expected, check_dtype=False)
def _run_pipeline(self,
expr,
deltas,
expected_views,
expected_output,
finder,
calendar,
start,
end,
window_length,
compute_fn):
loader = BlazeLoader()
ds = from_blaze(
expr,
deltas,
loader=loader,
no_deltas_rule='raise',
)
p = Pipeline()
# prevent unbound locals issue in the inner class
window_length_ = window_length
class TestFactor(CustomFactor):
inputs = ds.value,
window_length = window_length_
def compute(self, today, assets, out, data):
assert_array_almost_equal(data, expected_views[today])
out[:] = compute_fn(data)
p.add(TestFactor(), 'value')
result = SimplePipelineEngine(
loader,
calendar,
finder,
).run_pipeline(p, start, end)
assert_frame_equal(
result,
expected_output,
check_dtype=False,
)
@with_extra_sid
def test_deltas(self, asset_info):
expr = bz.Data(self.df, name='expr', dshape=self.dshape)
deltas = bz.Data(self.df, name='deltas', dshape=self.dshape)
deltas = bz.transform(
deltas,
value=deltas.value + 10,
timestamp=deltas.timestamp + timedelta(days=1),
)
expected_views = keymap(pd.Timestamp, {
'2014-01-02': np.array([[10.0, 11.0, 12.0],
[1.0, 2.0, 3.0]]),
'2014-01-03': np.array([[11.0, 12.0, 13.0],
[2.0, 3.0, 4.0]]),
'2014-01-04': np.array([[12.0, 13.0, 14.0],
[12.0, 13.0, 14.0]]),
})
nassets = len(asset_info)
if nassets == 4:
expected_views = valmap(
lambda view: np.c_[view, [np.nan, np.nan]],
expected_views,
)
with tmp_asset_finder(asset_info) as finder:
expected_output = pd.DataFrame(
list(concatv([12] * nassets, [13] * nassets, [14] * nassets)),
index=pd.MultiIndex.from_product((
sorted(expected_views.keys()),
finder.retrieve_all(asset_info.index),
)),
columns=('value',),
)
dates = self.dates
dates = dates.insert(len(dates), dates[-1] + timedelta(days=1))
self._run_pipeline(
expr,
deltas,
expected_views,
expected_output,
finder,
calendar=dates,
start=dates[1],
end=dates[-1],
window_length=2,
compute_fn=np.nanmax,
)
def test_deltas_macro(self):
asset_info = asset_infos[0][0]
expr = bz.Data(self.macro_df, name='expr', dshape=self.macro_dshape)
deltas = bz.Data(
self.macro_df.iloc[:-1],
name='deltas',
dshape=self.macro_dshape,
)
deltas = bz.transform(
deltas,
value=deltas.value + 10,
timestamp=deltas.timestamp + timedelta(days=1),
)
nassets = len(asset_info)
expected_views = keymap(pd.Timestamp, {
'2014-01-02': repeat_last_axis(np.array([10.0, 1.0]), nassets),
'2014-01-03': repeat_last_axis(np.array([11.0, 2.0]), nassets),
})
with tmp_asset_finder(asset_info) as finder:
expected_output = pd.DataFrame(
list(concatv([10] * nassets, [11] * nassets)),
index=pd.MultiIndex.from_product((
sorted(expected_views.keys()),
finder.retrieve_all(asset_info.index),
)),
columns=('value',),
)
dates = self.dates
self._run_pipeline(
expr,
deltas,
expected_views,
expected_output,
finder,
calendar=dates,
start=dates[1],
end=dates[-1],
window_length=2,
compute_fn=np.nanmax,
)
@with_extra_sid
def test_novel_deltas(self, asset_info):
base_dates = pd.DatetimeIndex([
pd.Timestamp('2014-01-01'),
pd.Timestamp('2014-01-04')
])
repeated_dates = base_dates.repeat(3)
baseline = pd.DataFrame({
'sid': self.sids * 2,
'value': (0, 1, 2, 1, 2, 3),
'asof_date': repeated_dates,
'timestamp': repeated_dates,
})
expr = bz.Data(baseline, name='expr', dshape=self.dshape)
deltas = bz.Data(baseline, name='deltas', dshape=self.dshape)
deltas = bz.transform(
deltas,
value=deltas.value + 10,
timestamp=deltas.timestamp + timedelta(days=1),
)
expected_views = keymap(pd.Timestamp, {
'2014-01-03': np.array([[10.0, 11.0, 12.0],
[10.0, 11.0, 12.0],
[10.0, 11.0, 12.0]]),
'2014-01-06': np.array([[10.0, 11.0, 12.0],
[10.0, 11.0, 12.0],
[11.0, 12.0, 13.0]]),
})
if len(asset_info) == 4:
expected_views = valmap(
lambda view: np.c_[view, [np.nan, np.nan, np.nan]],
expected_views,
)
expected_output_buffer = [10, 11, 12, np.nan, 11, 12, 13, np.nan]
else:
expected_output_buffer = [10, 11, 12, 11, 12, 13]
cal = pd.DatetimeIndex([
pd.Timestamp('2014-01-01'),
pd.Timestamp('2014-01-02'),
pd.Timestamp('2014-01-03'),
# omitting the 4th and 5th to simulate a weekend
pd.Timestamp('2014-01-06'),
])
with tmp_asset_finder(asset_info) as finder:
expected_output = pd.DataFrame(
expected_output_buffer,
index=pd.MultiIndex.from_product((
sorted(expected_views.keys()),
finder.retrieve_all(asset_info.index),
)),
columns=('value',),
)
self._run_pipeline(
expr,
deltas,
expected_views,
expected_output,
finder,
calendar=cal,
start=cal[2],
end=cal[-1],
window_length=3,
compute_fn=op.itemgetter(-1),
)
def test_novel_deltas_macro(self):
asset_info = asset_infos[0][0]
base_dates = pd.DatetimeIndex([
pd.Timestamp('2014-01-01'),
pd.Timestamp('2014-01-04')
])
baseline = pd.DataFrame({
'value': (0, 1),
'asof_date': base_dates,
'timestamp': base_dates,
})
expr = bz.Data(baseline, name='expr', dshape=self.macro_dshape)
deltas = bz.Data(baseline, name='deltas', dshape=self.macro_dshape)
deltas = bz.transform(
deltas,
value=deltas.value + 10,
timestamp=deltas.timestamp + timedelta(days=1),
)
nassets = len(asset_info)
expected_views = keymap(pd.Timestamp, {
'2014-01-03': repeat_last_axis(
np.array([10.0, 10.0, 10.0]),
nassets,
),
'2014-01-06': repeat_last_axis(
np.array([10.0, 10.0, 11.0]),
nassets,
),
})
cal = pd.DatetimeIndex([
pd.Timestamp('2014-01-01'),
pd.Timestamp('2014-01-02'),
pd.Timestamp('2014-01-03'),
# omitting the 4th and 5th to simulate a weekend
pd.Timestamp('2014-01-06'),
])
with tmp_asset_finder(asset_info) as finder:
expected_output = pd.DataFrame(
list(concatv([10] * nassets, [11] * nassets)),
index=pd.MultiIndex.from_product((
sorted(expected_views.keys()),
finder.retrieve_all(asset_info.index),
)),
columns=('value',),
)
self._run_pipeline(
expr,
deltas,
expected_views,
expected_output,
finder,
calendar=cal,
start=cal[2],
end=cal[-1],
window_length=3,
compute_fn=op.itemgetter(-1),
)
|
[
"zipline.pipeline.engine.SimplePipelineEngine",
"toolz.curried.operator.itemgetter",
"blaze.transform",
"pandas.DataFrame",
"toolz.curried.operator.attrgetter",
"nose_parameterized.parameterized.expand",
"datashape.dshape",
"warnings.simplefilter",
"zipline.pipeline.loaders.blaze.BlazeLoader",
"warnings.catch_warnings",
"numpy.testing.utils.assert_array_almost_equal",
"datetime.timedelta",
"zipline.pipeline.Pipeline",
"pandas.date_range",
"pandas.util.testing.assert_frame_equal",
"toolz.valmap",
"toolz.concatv",
"datashape.Record",
"zipline.pipeline.loaders.blaze.from_blaze",
"pandas.Timestamp",
"blaze.Data",
"zipline.utils.test_utils.tmp_asset_finder",
"numpy.array",
"collections.OrderedDict"
] |
[((994, 1015), 'toolz.curried.operator.attrgetter', 'op.attrgetter', (['"""name"""'], {}), "('name')\n", (1007, 1015), True, 'from toolz.curried import operator as op\n'), ((1026, 1048), 'toolz.curried.operator.attrgetter', 'op.attrgetter', (['"""dtype"""'], {}), "('dtype')\n", (1039, 1048), True, 'from toolz.curried import operator as op\n'), ((1335, 1368), 'nose_parameterized.parameterized.expand', 'parameterized.expand', (['asset_infos'], {}), '(asset_infos)\n', (1355, 1368), False, 'from nose_parameterized import parameterized\n'), ((1482, 1523), 'pandas.date_range', 'pd.date_range', (['"""2014-01-01"""', '"""2014-01-03"""'], {}), "('2014-01-01', '2014-01-03')\n", (1495, 1523), True, 'import pandas as pd\n'), ((1637, 1750), 'pandas.DataFrame', 'pd.DataFrame', (["{'sid': sids * 3, 'value': (0, 1, 2, 1, 2, 3, 2, 3, 4), 'asof_date': dates,\n 'timestamp': dates}"], {}), "({'sid': sids * 3, 'value': (0, 1, 2, 1, 2, 3, 2, 3, 4),\n 'asof_date': dates, 'timestamp': dates})\n", (1649, 1750), True, 'import pandas as pd\n'), ((1827, 2005), 'datashape.dshape', 'dshape', (['"""\n var * {\n sid: ?int64,\n value: ?float64,\n asof_date: datetime,\n timestamp: datetime\n }\n """'], {}), '(\n """\n var * {\n sid: ?int64,\n value: ?float64,\n asof_date: datetime,\n timestamp: datetime\n }\n """\n )\n', (1833, 2005), False, 'from datashape import dshape, var, Record\n'), ((2074, 2112), 'collections.OrderedDict', 'OrderedDict', (['cls.dshape.measure.fields'], {}), '(cls.dshape.measure.fields)\n', (2085, 2112), False, 'from collections import OrderedDict\n'), ((2219, 2232), 'zipline.pipeline.loaders.blaze.BlazeLoader', 'BlazeLoader', ([], {}), '()\n', (2230, 2232), False, 'from zipline.pipeline.loaders.blaze import from_blaze, BlazeLoader, NoDeltasWarning, NonNumpyField, NonPipelineField\n'), ((2299, 2346), 'blaze.Data', 'bz.Data', (['self.df'], {'name': 'name', 'dshape': 'self.dshape'}), '(self.df, name=name, dshape=self.dshape)\n', (2306, 2346), True, 'import blaze as bz\n'), ((2360, 2429), 'zipline.pipeline.loaders.blaze.from_blaze', 'from_blaze', (['expr'], {'loader': 'self.garbage_loader', 'no_deltas_rule': '"""ignore"""'}), "(expr, loader=self.garbage_loader, no_deltas_rule='ignore')\n", (2370, 2429), False, 'from zipline.pipeline.loaders.blaze import from_blaze, BlazeLoader, NoDeltasWarning, NonNumpyField, NonPipelineField\n'), ((3262, 3313), 'blaze.Data', 'bz.Data', (['self.df'], {'name': 'exprname', 'dshape': 'self.dshape'}), '(self.df, name=exprname, dshape=self.dshape)\n', (3269, 3313), True, 'import blaze as bz\n'), ((3330, 3405), 'zipline.pipeline.loaders.blaze.from_blaze', 'from_blaze', (['expr.value'], {'loader': 'self.garbage_loader', 'no_deltas_rule': '"""ignore"""'}), "(expr.value, loader=self.garbage_loader, no_deltas_rule='ignore')\n", (3340, 3405), False, 'from zipline.pipeline.loaders.blaze import from_blaze, BlazeLoader, NoDeltasWarning, NonNumpyField, NonPipelineField\n'), ((4391, 4615), 'blaze.Data', 'bz.Data', (["self.df.loc[:, ['sid', 'value', 'timestamp']]"], {'name': '"""expr"""', 'dshape': '"""\n var * {\n sid: ?int64,\n value: float64,\n timestamp: datetime,\n }"""'}), '(self.df.loc[:, [\'sid\', \'value\', \'timestamp\']], name=\'expr\', dshape=\n """\n var * {\n sid: ?int64,\n value: float64,\n timestamp: datetime,\n }"""\n )\n', (4398, 4615), True, 'import blaze as bz\n'), ((5299, 5312), 'zipline.pipeline.loaders.blaze.BlazeLoader', 'BlazeLoader', ([], {}), '()\n', (5310, 5312), False, 'from zipline.pipeline.loaders.blaze import from_blaze, BlazeLoader, NoDeltasWarning, NonNumpyField, NonPipelineField\n'), ((5326, 5360), 'zipline.pipeline.loaders.blaze.from_blaze', 'from_blaze', (['expr.ds'], {'loader': 'loader'}), '(expr.ds, loader=loader)\n', (5336, 5360), False, 'from zipline.pipeline.loaders.blaze import from_blaze, BlazeLoader, NoDeltasWarning, NonNumpyField, NonPipelineField\n'), ((6141, 6154), 'zipline.pipeline.loaders.blaze.BlazeLoader', 'BlazeLoader', ([], {}), '()\n', (6152, 6154), False, 'from zipline.pipeline.loaders.blaze import from_blaze, BlazeLoader, NoDeltasWarning, NonNumpyField, NonPipelineField\n'), ((6170, 6206), 'blaze.Data', 'bz.Data', (['self.df'], {'dshape': 'self.dshape'}), '(self.df, dshape=self.dshape)\n', (6177, 6206), True, 'import blaze as bz\n'), ((6490, 6666), 'blaze.Data', 'bz.Data', (['[]'], {'dshape': '"""\n var * {\n a: datetime,\n asof_date: datetime,\n timestamp: datetime,\n }"""'}), '([], dshape=\n """\n var * {\n a: datetime,\n asof_date: datetime,\n timestamp: datetime,\n }"""\n )\n', (6497, 6666), True, 'import blaze as bz\n'), ((6705, 6774), 'zipline.pipeline.loaders.blaze.from_blaze', 'from_blaze', (['expr'], {'loader': 'self.garbage_loader', 'no_deltas_rule': '"""ignore"""'}), "(expr, loader=self.garbage_loader, no_deltas_rule='ignore')\n", (6715, 6774), False, 'from zipline.pipeline.loaders.blaze import from_blaze, BlazeLoader, NoDeltasWarning, NonNumpyField, NonPipelineField\n'), ((7219, 7393), 'blaze.Data', 'bz.Data', (['[]'], {'dshape': '"""\n var * {\n a: string,\n asof_date: datetime,\n timestamp: datetime,\n }"""'}), '([], dshape=\n """\n var * {\n a: string,\n asof_date: datetime,\n timestamp: datetime,\n }"""\n )\n', (7226, 7393), True, 'import blaze as bz\n'), ((7432, 7501), 'zipline.pipeline.loaders.blaze.from_blaze', 'from_blaze', (['expr'], {'loader': 'self.garbage_loader', 'no_deltas_rule': '"""ignore"""'}), "(expr, loader=self.garbage_loader, no_deltas_rule='ignore')\n", (7442, 7501), False, 'from zipline.pipeline.loaders.blaze import from_blaze, BlazeLoader, NoDeltasWarning, NonNumpyField, NonPipelineField\n'), ((7780, 7816), 'blaze.Data', 'bz.Data', (['self.df'], {'dshape': 'self.dshape'}), '(self.df, dshape=self.dshape)\n', (7787, 7816), True, 'import blaze as bz\n'), ((7875, 7915), 'blaze.transform', 'bz.transform', (['expr'], {'value': '(expr.value + 1)'}), '(expr, value=expr.value + 1)\n', (7887, 7915), True, 'import blaze as bz\n'), ((7992, 8058), 'zipline.pipeline.loaders.blaze.from_blaze', 'from_blaze', (['expr_with_add'], {'deltas': 'None', 'loader': 'self.garbage_loader'}), '(expr_with_add, deltas=None, loader=self.garbage_loader)\n', (8002, 8058), False, 'from zipline.pipeline.loaders.blaze import from_blaze, BlazeLoader, NoDeltasWarning, NonNumpyField, NonPipelineField\n'), ((8856, 8905), 'blaze.Data', 'bz.Data', (['self.df'], {'name': '"""expr"""', 'dshape': 'self.dshape'}), "(self.df, name='expr', dshape=self.dshape)\n", (8863, 8905), True, 'import blaze as bz\n'), ((8923, 8936), 'zipline.pipeline.loaders.blaze.BlazeLoader', 'BlazeLoader', ([], {}), '()\n', (8934, 8936), False, 'from zipline.pipeline.loaders.blaze import from_blaze, BlazeLoader, NoDeltasWarning, NonNumpyField, NonPipelineField\n'), ((8950, 9006), 'zipline.pipeline.loaders.blaze.from_blaze', 'from_blaze', (['expr'], {'loader': 'loader', 'no_deltas_rule': '"""ignore"""'}), "(expr, loader=loader, no_deltas_rule='ignore')\n", (8960, 9006), False, 'from zipline.pipeline.loaders.blaze import from_blaze, BlazeLoader, NoDeltasWarning, NonNumpyField, NonPipelineField\n'), ((9066, 9076), 'zipline.pipeline.Pipeline', 'Pipeline', ([], {}), '()\n', (9074, 9076), False, 'from zipline.pipeline import Pipeline, CustomFactor\n'), ((9632, 9687), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {'check_dtype': '(False)'}), '(result, expected, check_dtype=False)\n', (9650, 9687), False, 'from pandas.util.testing import assert_frame_equal\n'), ((9741, 9802), 'blaze.Data', 'bz.Data', (['self.macro_df'], {'name': '"""expr"""', 'dshape': 'self.macro_dshape'}), "(self.macro_df, name='expr', dshape=self.macro_dshape)\n", (9748, 9802), True, 'import blaze as bz\n'), ((9820, 9833), 'zipline.pipeline.loaders.blaze.BlazeLoader', 'BlazeLoader', ([], {}), '()\n', (9831, 9833), False, 'from zipline.pipeline.loaders.blaze import from_blaze, BlazeLoader, NoDeltasWarning, NonNumpyField, NonPipelineField\n'), ((9847, 9903), 'zipline.pipeline.loaders.blaze.from_blaze', 'from_blaze', (['expr'], {'loader': 'loader', 'no_deltas_rule': '"""ignore"""'}), "(expr, loader=loader, no_deltas_rule='ignore')\n", (9857, 9903), False, 'from zipline.pipeline.loaders.blaze import from_blaze, BlazeLoader, NoDeltasWarning, NonNumpyField, NonPipelineField\n'), ((9963, 9973), 'zipline.pipeline.Pipeline', 'Pipeline', ([], {}), '()\n', (9971, 9973), False, 'from zipline.pipeline import Pipeline, CustomFactor\n'), ((10648, 10703), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected'], {'check_dtype': '(False)'}), '(result, expected, check_dtype=False)\n', (10666, 10703), False, 'from pandas.util.testing import assert_frame_equal\n'), ((11075, 11088), 'zipline.pipeline.loaders.blaze.BlazeLoader', 'BlazeLoader', ([], {}), '()\n', (11086, 11088), False, 'from zipline.pipeline.loaders.blaze import from_blaze, BlazeLoader, NoDeltasWarning, NonNumpyField, NonPipelineField\n'), ((11102, 11165), 'zipline.pipeline.loaders.blaze.from_blaze', 'from_blaze', (['expr', 'deltas'], {'loader': 'loader', 'no_deltas_rule': '"""raise"""'}), "(expr, deltas, loader=loader, no_deltas_rule='raise')\n", (11112, 11165), False, 'from zipline.pipeline.loaders.blaze import from_blaze, BlazeLoader, NoDeltasWarning, NonNumpyField, NonPipelineField\n'), ((11237, 11247), 'zipline.pipeline.Pipeline', 'Pipeline', ([], {}), '()\n', (11245, 11247), False, 'from zipline.pipeline import Pipeline, CustomFactor\n'), ((11819, 11881), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'expected_output'], {'check_dtype': '(False)'}), '(result, expected_output, check_dtype=False)\n', (11837, 11881), False, 'from pandas.util.testing import assert_frame_equal\n'), ((12004, 12053), 'blaze.Data', 'bz.Data', (['self.df'], {'name': '"""expr"""', 'dshape': 'self.dshape'}), "(self.df, name='expr', dshape=self.dshape)\n", (12011, 12053), True, 'import blaze as bz\n'), ((12071, 12122), 'blaze.Data', 'bz.Data', (['self.df'], {'name': '"""deltas"""', 'dshape': 'self.dshape'}), "(self.df, name='deltas', dshape=self.dshape)\n", (12078, 12122), True, 'import blaze as bz\n'), ((13830, 13891), 'blaze.Data', 'bz.Data', (['self.macro_df'], {'name': '"""expr"""', 'dshape': 'self.macro_dshape'}), "(self.macro_df, name='expr', dshape=self.macro_dshape)\n", (13837, 13891), True, 'import blaze as bz\n'), ((13909, 13982), 'blaze.Data', 'bz.Data', (['self.macro_df.iloc[:-1]'], {'name': '"""deltas"""', 'dshape': 'self.macro_dshape'}), "(self.macro_df.iloc[:-1], name='deltas', dshape=self.macro_dshape)\n", (13916, 13982), True, 'import blaze as bz\n'), ((15465, 15592), 'pandas.DataFrame', 'pd.DataFrame', (["{'sid': self.sids * 2, 'value': (0, 1, 2, 1, 2, 3), 'asof_date':\n repeated_dates, 'timestamp': repeated_dates}"], {}), "({'sid': self.sids * 2, 'value': (0, 1, 2, 1, 2, 3),\n 'asof_date': repeated_dates, 'timestamp': repeated_dates})\n", (15477, 15592), True, 'import pandas as pd\n'), ((15663, 15713), 'blaze.Data', 'bz.Data', (['baseline'], {'name': '"""expr"""', 'dshape': 'self.dshape'}), "(baseline, name='expr', dshape=self.dshape)\n", (15670, 15713), True, 'import blaze as bz\n'), ((15731, 15783), 'blaze.Data', 'bz.Data', (['baseline'], {'name': '"""deltas"""', 'dshape': 'self.dshape'}), "(baseline, name='deltas', dshape=self.dshape)\n", (15738, 15783), True, 'import blaze as bz\n'), ((17891, 17976), 'pandas.DataFrame', 'pd.DataFrame', (["{'value': (0, 1), 'asof_date': base_dates, 'timestamp': base_dates}"], {}), "({'value': (0, 1), 'asof_date': base_dates, 'timestamp':\n base_dates})\n", (17903, 17976), True, 'import pandas as pd\n'), ((18035, 18091), 'blaze.Data', 'bz.Data', (['baseline'], {'name': '"""expr"""', 'dshape': 'self.macro_dshape'}), "(baseline, name='expr', dshape=self.macro_dshape)\n", (18042, 18091), True, 'import blaze as bz\n'), ((18109, 18167), 'blaze.Data', 'bz.Data', (['baseline'], {'name': '"""deltas"""', 'dshape': 'self.macro_dshape'}), "(baseline, name='deltas', dshape=self.macro_dshape)\n", (18116, 18167), True, 'import blaze as bz\n'), ((1134, 1149), 'pandas.Timestamp', 'pd.Timestamp', (['(0)'], {}), '(0)\n', (1146, 1149), True, 'import pandas as pd\n'), ((1159, 1179), 'pandas.Timestamp', 'pd.Timestamp', (['"""2015"""'], {}), "('2015')\n", (1171, 1179), True, 'import pandas as pd\n'), ((1260, 1275), 'pandas.Timestamp', 'pd.Timestamp', (['(0)'], {}), '(0)\n', (1272, 1275), True, 'import pandas as pd\n'), ((1285, 1305), 'pandas.Timestamp', 'pd.Timestamp', (['"""2015"""'], {}), "('2015')\n", (1297, 1305), True, 'import pandas as pd\n'), ((2173, 2188), 'datashape.Record', 'Record', (['dshape_'], {}), '(dshape_)\n', (2179, 2188), False, 'from datashape import dshape, var, Record\n'), ((3033, 3102), 'zipline.pipeline.loaders.blaze.from_blaze', 'from_blaze', (['expr'], {'loader': 'self.garbage_loader', 'no_deltas_rule': '"""ignore"""'}), "(expr, loader=self.garbage_loader, no_deltas_rule='ignore')\n", (3043, 3102), False, 'from zipline.pipeline.loaders.blaze import from_blaze, BlazeLoader, NoDeltasWarning, NonNumpyField, NonPipelineField\n'), ((3662, 3737), 'zipline.pipeline.loaders.blaze.from_blaze', 'from_blaze', (['expr.value'], {'loader': 'self.garbage_loader', 'no_deltas_rule': '"""ignore"""'}), "(expr.value, loader=self.garbage_loader, no_deltas_rule='ignore')\n", (3672, 3737), False, 'from zipline.pipeline.loaders.blaze import from_blaze, BlazeLoader, NoDeltasWarning, NonNumpyField, NonPipelineField\n'), ((4112, 4181), 'zipline.pipeline.loaders.blaze.from_blaze', 'from_blaze', (['expr'], {'loader': 'self.garbage_loader', 'no_deltas_rule': '"""ignore"""'}), "(expr, loader=self.garbage_loader, no_deltas_rule='ignore')\n", (4122, 4181), False, 'from zipline.pipeline.loaders.blaze import from_blaze, BlazeLoader, NoDeltasWarning, NonNumpyField, NonPipelineField\n'), ((4714, 4783), 'zipline.pipeline.loaders.blaze.from_blaze', 'from_blaze', (['expr'], {'loader': 'self.garbage_loader', 'no_deltas_rule': '"""ignore"""'}), "(expr, loader=self.garbage_loader, no_deltas_rule='ignore')\n", (4724, 4783), False, 'from zipline.pipeline.loaders.blaze import from_blaze, BlazeLoader, NoDeltasWarning, NonNumpyField, NonPipelineField\n'), ((5617, 5653), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (5640, 5653), False, 'import warnings\n'), ((5673, 5704), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""'], {}), "('always')\n", (5694, 5704), False, 'import warnings\n'), ((5726, 5739), 'zipline.pipeline.loaders.blaze.BlazeLoader', 'BlazeLoader', ([], {}), '()\n', (5737, 5739), False, 'from zipline.pipeline.loaders.blaze import from_blaze, BlazeLoader, NoDeltasWarning, NonNumpyField, NonPipelineField\n'), ((5759, 5795), 'blaze.Data', 'bz.Data', (['self.df'], {'dshape': 'self.dshape'}), '(self.df, dshape=self.dshape)\n', (5766, 5795), True, 'import blaze as bz\n'), ((5808, 5862), 'zipline.pipeline.loaders.blaze.from_blaze', 'from_blaze', (['expr'], {'loader': 'loader', 'no_deltas_rule': '"""warn"""'}), "(expr, loader=loader, no_deltas_rule='warn')\n", (5818, 5862), False, 'from zipline.pipeline.loaders.blaze import from_blaze, BlazeLoader, NoDeltasWarning, NonNumpyField, NonPipelineField\n'), ((6268, 6323), 'zipline.pipeline.loaders.blaze.from_blaze', 'from_blaze', (['expr'], {'loader': 'loader', 'no_deltas_rule': '"""raise"""'}), "(expr, loader=loader, no_deltas_rule='raise')\n", (6278, 6323), False, 'from zipline.pipeline.loaders.blaze import from_blaze, BlazeLoader, NoDeltasWarning, NonNumpyField, NonPipelineField\n'), ((8162, 8229), 'zipline.pipeline.loaders.blaze.from_blaze', 'from_blaze', (['(expr.value + 1)'], {'deltas': 'None', 'loader': 'self.garbage_loader'}), '(expr.value + 1, deltas=None, loader=self.garbage_loader)\n', (8172, 8229), False, 'from zipline.pipeline.loaders.blaze import from_blaze, BlazeLoader, NoDeltasWarning, NonNumpyField, NonPipelineField\n'), ((8360, 8397), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'self.df.columns'}), '(columns=self.df.columns)\n', (8372, 8397), True, 'import pandas as pd\n'), ((8496, 8564), 'zipline.pipeline.loaders.blaze.from_blaze', 'from_blaze', (['expr_with_add'], {'deltas': 'deltas', 'loader': 'self.garbage_loader'}), '(expr_with_add, deltas=deltas, loader=self.garbage_loader)\n', (8506, 8564), False, 'from zipline.pipeline.loaders.blaze import from_blaze, BlazeLoader, NoDeltasWarning, NonNumpyField, NonPipelineField\n'), ((8684, 8753), 'zipline.pipeline.loaders.blaze.from_blaze', 'from_blaze', (['(expr.value + 1)'], {'deltas': 'deltas', 'loader': 'self.garbage_loader'}), '(expr.value + 1, deltas=deltas, loader=self.garbage_loader)\n', (8694, 8753), False, 'from zipline.pipeline.loaders.blaze import from_blaze, BlazeLoader, NoDeltasWarning, NonNumpyField, NonPipelineField\n'), ((9158, 9176), 'zipline.utils.test_utils.tmp_asset_finder', 'tmp_asset_finder', ([], {}), '()\n', (9174, 9176), False, 'from zipline.utils.test_utils import tmp_asset_finder, make_simple_asset_info\n'), ((10094, 10122), 'zipline.utils.test_utils.tmp_asset_finder', 'tmp_asset_finder', (['asset_info'], {}), '(asset_info)\n', (10110, 10122), False, 'from zipline.utils.test_utils import tmp_asset_finder, make_simple_asset_info\n'), ((12766, 12832), 'toolz.valmap', 'valmap', (['(lambda view: np.c_[view, [np.nan, np.nan]])', 'expected_views'], {}), '(lambda view: np.c_[view, [np.nan, np.nan]], expected_views)\n', (12772, 12832), False, 'from toolz import keymap, valmap, concatv\n'), ((12894, 12922), 'zipline.utils.test_utils.tmp_asset_finder', 'tmp_asset_finder', (['asset_info'], {}), '(asset_info)\n', (12910, 12922), False, 'from zipline.utils.test_utils import tmp_asset_finder, make_simple_asset_info\n'), ((14448, 14476), 'zipline.utils.test_utils.tmp_asset_finder', 'tmp_asset_finder', (['asset_info'], {}), '(asset_info)\n', (14464, 14476), False, 'from zipline.utils.test_utils import tmp_asset_finder, make_simple_asset_info\n'), ((16403, 16477), 'toolz.valmap', 'valmap', (['(lambda view: np.c_[view, [np.nan, np.nan, np.nan]])', 'expected_views'], {}), '(lambda view: np.c_[view, [np.nan, np.nan, np.nan]], expected_views)\n', (16409, 16477), False, 'from toolz import keymap, valmap, concatv\n'), ((16959, 16987), 'zipline.utils.test_utils.tmp_asset_finder', 'tmp_asset_finder', (['asset_info'], {}), '(asset_info)\n', (16975, 16987), False, 'from zipline.utils.test_utils import tmp_asset_finder, make_simple_asset_info\n'), ((18959, 18987), 'zipline.utils.test_utils.tmp_asset_finder', 'tmp_asset_finder', (['asset_info'], {}), '(asset_info)\n', (18975, 18987), False, 'from zipline.utils.test_utils import tmp_asset_finder, make_simple_asset_info\n'), ((3866, 3935), 'zipline.pipeline.loaders.blaze.from_blaze', 'from_blaze', (['expr'], {'loader': 'self.garbage_loader', 'no_deltas_rule': '"""ignore"""'}), "(expr, loader=self.garbage_loader, no_deltas_rule='ignore')\n", (3876, 3935), False, 'from zipline.pipeline.loaders.blaze import from_blaze, BlazeLoader, NoDeltasWarning, NonNumpyField, NonPipelineField\n'), ((5085, 5122), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'self.df.columns'}), '(columns=self.df.columns)\n', (5097, 5122), True, 'import pandas as pd\n'), ((10384, 10436), 'toolz.concatv', 'concatv', (['([0] * nassets)', '([1] * nassets)', '([2] * nassets)'], {}), '([0] * nassets, [1] * nassets, [2] * nassets)\n', (10391, 10436), False, 'from toolz import keymap, valmap, concatv\n'), ((11535, 11589), 'numpy.testing.utils.assert_array_almost_equal', 'assert_array_almost_equal', (['data', 'expected_views[today]'], {}), '(data, expected_views[today])\n', (11560, 11589), False, 'from numpy.testing.utils import assert_array_almost_equal\n'), ((11688, 11734), 'zipline.pipeline.engine.SimplePipelineEngine', 'SimplePipelineEngine', (['loader', 'calendar', 'finder'], {}), '(loader, calendar, finder)\n', (11708, 11734), False, 'from zipline.pipeline.engine import SimplePipelineEngine\n'), ((12356, 12403), 'numpy.array', 'np.array', (['[[10.0, 11.0, 12.0], [1.0, 2.0, 3.0]]'], {}), '([[10.0, 11.0, 12.0], [1.0, 2.0, 3.0]])\n', (12364, 12403), True, 'import numpy as np\n'), ((12467, 12514), 'numpy.array', 'np.array', (['[[11.0, 12.0, 13.0], [2.0, 3.0, 4.0]]'], {}), '([[11.0, 12.0, 13.0], [2.0, 3.0, 4.0]])\n', (12475, 12514), True, 'import numpy as np\n'), ((12578, 12628), 'numpy.array', 'np.array', (['[[12.0, 13.0, 14.0], [12.0, 13.0, 14.0]]'], {}), '([[12.0, 13.0, 14.0], [12.0, 13.0, 14.0]])\n', (12586, 12628), True, 'import numpy as np\n'), ((15322, 15348), 'pandas.Timestamp', 'pd.Timestamp', (['"""2014-01-01"""'], {}), "('2014-01-01')\n", (15334, 15348), True, 'import pandas as pd\n'), ((15362, 15388), 'pandas.Timestamp', 'pd.Timestamp', (['"""2014-01-04"""'], {}), "('2014-01-04')\n", (15374, 15388), True, 'import pandas as pd\n'), ((16016, 16086), 'numpy.array', 'np.array', (['[[10.0, 11.0, 12.0], [10.0, 11.0, 12.0], [10.0, 11.0, 12.0]]'], {}), '([[10.0, 11.0, 12.0], [10.0, 11.0, 12.0], [10.0, 11.0, 12.0]])\n', (16024, 16086), True, 'import numpy as np\n'), ((16186, 16256), 'numpy.array', 'np.array', (['[[10.0, 11.0, 12.0], [10.0, 11.0, 12.0], [11.0, 12.0, 13.0]]'], {}), '([[10.0, 11.0, 12.0], [10.0, 11.0, 12.0], [11.0, 12.0, 13.0]])\n', (16194, 16256), True, 'import numpy as np\n'), ((16725, 16751), 'pandas.Timestamp', 'pd.Timestamp', (['"""2014-01-01"""'], {}), "('2014-01-01')\n", (16737, 16751), True, 'import pandas as pd\n'), ((16765, 16791), 'pandas.Timestamp', 'pd.Timestamp', (['"""2014-01-02"""'], {}), "('2014-01-02')\n", (16777, 16791), True, 'import pandas as pd\n'), ((16805, 16831), 'pandas.Timestamp', 'pd.Timestamp', (['"""2014-01-03"""'], {}), "('2014-01-03')\n", (16817, 16831), True, 'import pandas as pd\n'), ((16906, 16932), 'pandas.Timestamp', 'pd.Timestamp', (['"""2014-01-06"""'], {}), "('2014-01-06')\n", (16918, 16932), True, 'import pandas as pd\n'), ((17794, 17820), 'pandas.Timestamp', 'pd.Timestamp', (['"""2014-01-01"""'], {}), "('2014-01-01')\n", (17806, 17820), True, 'import pandas as pd\n'), ((17834, 17860), 'pandas.Timestamp', 'pd.Timestamp', (['"""2014-01-04"""'], {}), "('2014-01-04')\n", (17846, 17860), True, 'import pandas as pd\n'), ((18726, 18752), 'pandas.Timestamp', 'pd.Timestamp', (['"""2014-01-01"""'], {}), "('2014-01-01')\n", (18738, 18752), True, 'import pandas as pd\n'), ((18766, 18792), 'pandas.Timestamp', 'pd.Timestamp', (['"""2014-01-02"""'], {}), "('2014-01-02')\n", (18778, 18792), True, 'import pandas as pd\n'), ((18806, 18832), 'pandas.Timestamp', 'pd.Timestamp', (['"""2014-01-03"""'], {}), "('2014-01-03')\n", (18818, 18832), True, 'import pandas as pd\n'), ((18907, 18933), 'pandas.Timestamp', 'pd.Timestamp', (['"""2014-01-06"""'], {}), "('2014-01-06')\n", (18919, 18933), True, 'import pandas as pd\n'), ((5150, 5223), 'datashape.Record', 'Record', (["(('ds', self.dshape.measure), ('ds_deltas', self.dshape.measure))"], {}), "((('ds', self.dshape.measure), ('ds_deltas', self.dshape.measure)))\n", (5156, 5223), False, 'from datashape import dshape, var, Record\n'), ((9209, 9252), 'zipline.pipeline.engine.SimplePipelineEngine', 'SimplePipelineEngine', (['loader', 'dates', 'finder'], {}), '(loader, dates, finder)\n', (9229, 9252), False, 'from zipline.pipeline.engine import SimplePipelineEngine\n'), ((10155, 10198), 'zipline.pipeline.engine.SimplePipelineEngine', 'SimplePipelineEngine', (['loader', 'dates', 'finder'], {}), '(loader, dates, finder)\n', (10175, 10198), False, 'from zipline.pipeline.engine import SimplePipelineEngine\n'), ((12252, 12269), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (12261, 12269), False, 'from datetime import timedelta\n'), ((12999, 13054), 'toolz.concatv', 'concatv', (['([12] * nassets)', '([13] * nassets)', '([14] * nassets)'], {}), '([12] * nassets, [13] * nassets, [14] * nassets)\n', (13006, 13054), False, 'from toolz import keymap, valmap, concatv\n'), ((13376, 13393), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (13385, 13393), False, 'from datetime import timedelta\n'), ((14159, 14176), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (14168, 14176), False, 'from datetime import timedelta\n'), ((14314, 14335), 'numpy.array', 'np.array', (['[10.0, 1.0]'], {}), '([10.0, 1.0])\n', (14322, 14335), True, 'import numpy as np\n'), ((14390, 14411), 'numpy.array', 'np.array', (['[11.0, 2.0]'], {}), '([11.0, 2.0])\n', (14398, 14411), True, 'import numpy as np\n'), ((14553, 14592), 'toolz.concatv', 'concatv', (['([10] * nassets)', '([11] * nassets)'], {}), '([10] * nassets, [11] * nassets)\n', (14560, 14592), False, 'from toolz import keymap, valmap, concatv\n'), ((15913, 15930), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (15922, 15930), False, 'from datetime import timedelta\n'), ((17630, 17647), 'toolz.curried.operator.itemgetter', 'op.itemgetter', (['(-1)'], {}), '(-1)\n', (17643, 17647), True, 'from toolz.curried import operator as op\n'), ((18297, 18314), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (18306, 18314), False, 'from datetime import timedelta\n'), ((18469, 18497), 'numpy.array', 'np.array', (['[10.0, 10.0, 10.0]'], {}), '([10.0, 10.0, 10.0])\n', (18477, 18497), True, 'import numpy as np\n'), ((18599, 18627), 'numpy.array', 'np.array', (['[10.0, 10.0, 11.0]'], {}), '([10.0, 10.0, 11.0])\n', (18607, 18627), True, 'import numpy as np\n'), ((19064, 19103), 'toolz.concatv', 'concatv', (['([10] * nassets)', '([11] * nassets)'], {}), '([10] * nassets, [11] * nassets)\n', (19071, 19103), False, 'from toolz import keymap, valmap, concatv\n'), ((19653, 19670), 'toolz.curried.operator.itemgetter', 'op.itemgetter', (['(-1)'], {}), '(-1)\n', (19666, 19670), True, 'from toolz.curried import operator as op\n')]
|
import cv2
import numpy as np
from pyzbar.pyzbar import decode
def decoder(image):
gray_img = cv2.cvtColor(image, 0)
barcode = decode(gray_img)
for obj in barcode:
points = obj.polygon
(x, y, w, h) = obj.rect
pts = np.array(points, np.int32)
pts = pts.reshape((-1, 1, 2))
cv2.polylines(image, [pts], True, (0, 255, 0), 3)
barcodeData = obj.data.decode("utf-8")
barcodeType = obj.type
string = "Data " + str(barcodeData) + " | Type " + str(barcodeType)
cv2.putText(frame, string, (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 0, 0), 2)
print("Barcode: " + barcodeData + " | Type: " + barcodeType)
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
decoder(frame)
cv2.imshow('Image', frame)
code = cv2.waitKey(10)
if code == ord('q'):
break
|
[
"cv2.putText",
"cv2.polylines",
"cv2.cvtColor",
"pyzbar.pyzbar.decode",
"cv2.waitKey",
"cv2.VideoCapture",
"numpy.array",
"cv2.imshow"
] |
[((700, 719), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (716, 719), False, 'import cv2\n'), ((100, 122), 'cv2.cvtColor', 'cv2.cvtColor', (['image', '(0)'], {}), '(image, 0)\n', (112, 122), False, 'import cv2\n'), ((137, 153), 'pyzbar.pyzbar.decode', 'decode', (['gray_img'], {}), '(gray_img)\n', (143, 153), False, 'from pyzbar.pyzbar import decode\n'), ((783, 809), 'cv2.imshow', 'cv2.imshow', (['"""Image"""', 'frame'], {}), "('Image', frame)\n", (793, 809), False, 'import cv2\n'), ((821, 836), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (832, 836), False, 'import cv2\n'), ((254, 280), 'numpy.array', 'np.array', (['points', 'np.int32'], {}), '(points, np.int32)\n', (262, 280), True, 'import numpy as np\n'), ((327, 376), 'cv2.polylines', 'cv2.polylines', (['image', '[pts]', '(True)', '(0, 255, 0)', '(3)'], {}), '(image, [pts], True, (0, 255, 0), 3)\n', (340, 376), False, 'import cv2\n'), ((541, 627), 'cv2.putText', 'cv2.putText', (['frame', 'string', '(x, y)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.8)', '(255, 0, 0)', '(2)'], {}), '(frame, string, (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 0, \n 0), 2)\n', (552, 627), False, 'import cv2\n')]
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the ParallelInterleaveDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.experimental.ops import interleave_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
class ParallelInterleaveDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def setUp(self):
self.input_values = np.array([4, 5, 6], dtype=np.int64)
self.num_repeats = 2
self.num_outputs = np.sum(self.input_values) * 2
def _build_ds(self, cycle_length, block_length, sloppy=False):
return (dataset_ops.Dataset.from_tensor_slices(
self.input_values).repeat(self.num_repeats).apply(
interleave_ops.parallel_interleave(
lambda x: dataset_ops.Dataset.range(10 * x, 11 * x),
cycle_length, block_length, sloppy)))
def testSerializationCore(self):
# cycle_length > 1, block_length > 1
cycle_length = 2
block_length = 3
self.run_core_tests(lambda: self._build_ds(cycle_length, block_length),
self.num_outputs)
# cycle_length = 1
cycle_length = 1
block_length = 3
self.run_core_tests(lambda: self._build_ds(cycle_length, block_length),
self.num_outputs)
# block_length = 1
cycle_length = 2
block_length = 1
self.run_core_tests(lambda: self._build_ds(cycle_length, block_length),
self.num_outputs)
def testSerializationWithSloppy(self):
break_points = self.gen_break_points(self.num_outputs, 10)
expected_outputs = np.repeat(
np.concatenate([np.arange(10 * x, 11 * x) for x in self.input_values]),
self.num_repeats).tolist()
def run_test(cycle_length, block_length):
actual = self.gen_outputs(
lambda: self._build_ds(cycle_length, block_length, True),
break_points, self.num_outputs)
self.assertSequenceEqual(sorted(actual), expected_outputs)
# cycle_length > 1, block_length > 1
run_test(2, 3)
# cycle_length = 1
run_test(1, 3)
# block_length = 1
run_test(2, 1)
def testSparseCore(self):
def _map_fn(i):
return sparse_tensor.SparseTensorValue(
indices=[[0, 0], [1, 1]], values=(i * [1, -1]), dense_shape=[2, 2])
def _interleave_fn(x):
return dataset_ops.Dataset.from_tensor_slices(
sparse_ops.sparse_to_dense(x.indices, x.dense_shape, x.values))
def _build_dataset():
return dataset_ops.Dataset.range(10).map(_map_fn).apply(
interleave_ops.parallel_interleave(_interleave_fn, 1))
self.run_core_tests(_build_dataset, 20)
if __name__ == '__main__':
test.main()
|
[
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.sparse_ops.sparse_to_dense",
"numpy.sum",
"tensorflow.python.data.experimental.ops.interleave_ops.parallel_interleave",
"tensorflow.python.framework.sparse_tensor.SparseTensorValue",
"tensorflow.python.data.ops.dataset_ops.Dataset.range",
"numpy.array",
"tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices",
"numpy.arange"
] |
[((3690, 3701), 'tensorflow.python.platform.test.main', 'test.main', ([], {}), '()\n', (3699, 3701), False, 'from tensorflow.python.platform import test\n'), ((1411, 1446), 'numpy.array', 'np.array', (['[4, 5, 6]'], {'dtype': 'np.int64'}), '([4, 5, 6], dtype=np.int64)\n', (1419, 1446), True, 'import numpy as np\n'), ((1495, 1520), 'numpy.sum', 'np.sum', (['self.input_values'], {}), '(self.input_values)\n', (1501, 1520), True, 'import numpy as np\n'), ((3193, 3295), 'tensorflow.python.framework.sparse_tensor.SparseTensorValue', 'sparse_tensor.SparseTensorValue', ([], {'indices': '[[0, 0], [1, 1]]', 'values': '(i * [1, -1])', 'dense_shape': '[2, 2]'}), '(indices=[[0, 0], [1, 1]], values=i * [1, -1\n ], dense_shape=[2, 2])\n', (3224, 3295), False, 'from tensorflow.python.framework import sparse_tensor\n'), ((3395, 3457), 'tensorflow.python.ops.sparse_ops.sparse_to_dense', 'sparse_ops.sparse_to_dense', (['x.indices', 'x.dense_shape', 'x.values'], {}), '(x.indices, x.dense_shape, x.values)\n', (3421, 3457), False, 'from tensorflow.python.ops import sparse_ops\n'), ((3559, 3612), 'tensorflow.python.data.experimental.ops.interleave_ops.parallel_interleave', 'interleave_ops.parallel_interleave', (['_interleave_fn', '(1)'], {}), '(_interleave_fn, 1)\n', (3593, 3612), False, 'from tensorflow.python.data.experimental.ops import interleave_ops\n'), ((1776, 1817), 'tensorflow.python.data.ops.dataset_ops.Dataset.range', 'dataset_ops.Dataset.range', (['(10 * x)', '(11 * x)'], {}), '(10 * x, 11 * x)\n', (1801, 1817), False, 'from tensorflow.python.data.ops import dataset_ops\n'), ((1603, 1660), 'tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices', 'dataset_ops.Dataset.from_tensor_slices', (['self.input_values'], {}), '(self.input_values)\n', (1641, 1660), False, 'from tensorflow.python.data.ops import dataset_ops\n'), ((2639, 2664), 'numpy.arange', 'np.arange', (['(10 * x)', '(11 * x)'], {}), '(10 * x, 11 * x)\n', (2648, 2664), True, 'import numpy as np\n'), ((3499, 3528), 'tensorflow.python.data.ops.dataset_ops.Dataset.range', 'dataset_ops.Dataset.range', (['(10)'], {}), '(10)\n', (3524, 3528), False, 'from tensorflow.python.data.ops import dataset_ops\n')]
|
import tensorflow_quantum as tfq
import tensorflow as tf
import cirq
import sympy
import matplotlib.pyplot as plt
import numpy as np
def make_data(qubits):
train, train_label = [], []
# 0 XOR 0
cir = cirq.Circuit()
cir.append([cirq.I(qubits[0])])
cir.append([cirq.I(qubits[1])])
train.append(cir)
train_label.append(-1)
# 1 XOR 0
cir = cirq.Circuit()
cir.append([cirq.X(qubits[0])])
cir.append([cirq.I(qubits[1])])
train.append(cir)
train_label.append(1)
# 0 XOR 1
cir = cirq.Circuit()
cir.append([cirq.I(qubits[0])])
cir.append([cirq.X(qubits[1])])
train.append(cir)
train_label.append(1)
# 1 XOR 1
cir = cirq.Circuit()
cir.append([cirq.X(qubits[0])])
cir.append([cirq.X(qubits[1])])
train.append(cir)
train_label.append(-1)
return tfq.convert_to_tensor(train), np.array(train_label), tfq.convert_to_tensor(train), np.array(train_label)
def one_qubit_unitary(bit, symbols):
return cirq.Circuit(
cirq.rx(symbols[0]).on(bit),
cirq.ry(symbols[1]).on(bit),
cirq.rz(symbols[2]).on(bit))
def two_qubit_pool(source_qubit, sink_qubit, symbols):
pool_circuit = cirq.Circuit()
sink_basis_selector = one_qubit_unitary(sink_qubit, symbols[0:3])
source_basis_selector = one_qubit_unitary(source_qubit, symbols[3:6])
pool_circuit.append(sink_basis_selector)
pool_circuit.append(source_basis_selector)
pool_circuit.append(cirq.CNOT(control=source_qubit, target=sink_qubit))
pool_circuit.append(sink_basis_selector**-1)
return pool_circuit
def make_circuit(qubits):
x1 = sympy.symbols('X1_rot')
y1 = sympy.symbols('Y1_rot')
z1 = sympy.symbols('Z1_rot')
x2 = sympy.symbols('X2_rot')
y2 = sympy.symbols('Y2_rot')
z2 = sympy.symbols('Z2_rot')
pool = sympy.symbols('pooling0:6')
c = cirq.Circuit()
c.append(cirq.CNOT(qubits[0], qubits[1]))
c.append(cirq.rx(x1).on(qubits[0]))
c.append(cirq.ry(y1).on(qubits[0]))
c.append(cirq.rz(z1).on(qubits[0]))
c.append(cirq.rx(x2).on(qubits[1]))
c.append(cirq.ry(y2).on(qubits[1]))
c.append(cirq.rz(z2).on(qubits[1]))
c += two_qubit_pool(qubits[0], qubits[1], pool)
return c
def hinge_accuracy(y_true, y_pred):
y_true = tf.squeeze(y_true) > 0.0
y_pred = tf.squeeze(y_pred) > 0.0
result = tf.cast(y_true == y_pred, tf.float32)
return tf.reduce_mean(result)
qubits = [cirq.GridQubit(0,i) for i in range(2)]
train, train_label, test, test_label = make_data(qubits)
readout_operators = [cirq.Z(qubits[1])]
inputs = tf.keras.Input(shape=(), dtype=tf.dtypes.string)
trial_circuit = make_circuit(qubits)
print(trial_circuit)
layer1 = tfq.layers.PQC(make_circuit(qubits), readout_operators, repetitions=1000, \
differentiator=tfq.differentiators.ParameterShift())(inputs)
model = tf.keras.models.Model(inputs=inputs, outputs=layer1)
def np_hinge(true, pred):
t = true > 0
p = pred > 0
result = t == p
return np.mean(result)
tf_loss = []
tf_acc = []
N = 100
params = np.random.uniform(0, 2 * np.pi, 12)
#params = np.zeros((12,))
model.set_weights(np.array([params]))
opt = tf.keras.optimizers.Adam(lr=0.01)
for i in range(N):
with tf.GradientTape() as tape:
guess = model(train)
error = tf.keras.losses.MAE(train_label, tf.squeeze(guess))
grad = tape.gradient(error, model.trainable_variables)
opt.apply_gradients(zip(grad, model.trainable_variables))
acc = np_hinge(train_label, guess.numpy().flatten())
tf_loss.append(error)
tf_acc.append(acc)
if i % 10 == 0:
print("Epoch {}/{}, Loss {}, Acc {}".format(i, N, error, acc))
import optimizers
from quantum_diffs import ParameterShift
def f(x):
model.set_weights(np.array([x]))
ret = model(train)
return tf.keras.losses.MAE(train_label, tf.squeeze(ret)).numpy()
def f1(x):
model.set_weights(np.array([x]))
ret = model(train)
return ret.numpy()
opt = optimizers.Adam(lr=0.01)
cutsom = []
accs = []
i = 0
while i < N:
guess = f(params)
cutsom.append(guess)
gradients = ParameterShift(f, params)
params = opt.apply_grad(gradients, params)
acc = np_hinge(train_label, f1(params).flatten())
accs.append(acc)
if i % 10 == 0:
print("Epoch {}/{}, Loss {}, Acc {}".format(i, N, guess, acc))
i += 1
plt.plot(tf_loss, label='TFQ')
plt.plot(cutsom, label='Custom')
plt.legend()
plt.title("Training Loss")
plt.xlabel("Epochs")
plt.ylabel("MAE Loss")
plt.show()
plt.plot(tf_acc, label='TFQ')
plt.plot(accs, label='Custom')
plt.legend()
plt.title("Training Acc")
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.show()
|
[
"matplotlib.pyplot.title",
"cirq.rx",
"cirq.ry",
"numpy.mean",
"cirq.CNOT",
"cirq.rz",
"cirq.I",
"tensorflow.keras.Input",
"tensorflow.cast",
"tensorflow.keras.optimizers.Adam",
"tensorflow.squeeze",
"cirq.Z",
"matplotlib.pyplot.show",
"tensorflow_quantum.differentiators.ParameterShift",
"matplotlib.pyplot.legend",
"tensorflow.reduce_mean",
"cirq.GridQubit",
"tensorflow.keras.models.Model",
"cirq.X",
"matplotlib.pyplot.ylabel",
"numpy.random.uniform",
"sympy.symbols",
"matplotlib.pyplot.plot",
"optimizers.Adam",
"numpy.array",
"quantum_diffs.ParameterShift",
"cirq.Circuit",
"tensorflow_quantum.convert_to_tensor",
"matplotlib.pyplot.xlabel",
"tensorflow.GradientTape"
] |
[((2587, 2635), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '()', 'dtype': 'tf.dtypes.string'}), '(shape=(), dtype=tf.dtypes.string)\n', (2601, 2635), True, 'import tensorflow as tf\n'), ((2852, 2904), 'tensorflow.keras.models.Model', 'tf.keras.models.Model', ([], {'inputs': 'inputs', 'outputs': 'layer1'}), '(inputs=inputs, outputs=layer1)\n', (2873, 2904), True, 'import tensorflow as tf\n'), ((3056, 3091), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(2 * np.pi)', '(12)'], {}), '(0, 2 * np.pi, 12)\n', (3073, 3091), True, 'import numpy as np\n'), ((3164, 3197), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'lr': '(0.01)'}), '(lr=0.01)\n', (3188, 3197), True, 'import tensorflow as tf\n'), ((3972, 3996), 'optimizers.Adam', 'optimizers.Adam', ([], {'lr': '(0.01)'}), '(lr=0.01)\n', (3987, 3996), False, 'import optimizers\n'), ((4352, 4382), 'matplotlib.pyplot.plot', 'plt.plot', (['tf_loss'], {'label': '"""TFQ"""'}), "(tf_loss, label='TFQ')\n", (4360, 4382), True, 'import matplotlib.pyplot as plt\n'), ((4383, 4415), 'matplotlib.pyplot.plot', 'plt.plot', (['cutsom'], {'label': '"""Custom"""'}), "(cutsom, label='Custom')\n", (4391, 4415), True, 'import matplotlib.pyplot as plt\n'), ((4416, 4428), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4426, 4428), True, 'import matplotlib.pyplot as plt\n'), ((4429, 4455), 'matplotlib.pyplot.title', 'plt.title', (['"""Training Loss"""'], {}), "('Training Loss')\n", (4438, 4455), True, 'import matplotlib.pyplot as plt\n'), ((4456, 4476), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (4466, 4476), True, 'import matplotlib.pyplot as plt\n'), ((4477, 4499), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""MAE Loss"""'], {}), "('MAE Loss')\n", (4487, 4499), True, 'import matplotlib.pyplot as plt\n'), ((4500, 4510), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4508, 4510), True, 'import matplotlib.pyplot as plt\n'), ((4513, 4542), 'matplotlib.pyplot.plot', 'plt.plot', (['tf_acc'], {'label': '"""TFQ"""'}), "(tf_acc, label='TFQ')\n", (4521, 4542), True, 'import matplotlib.pyplot as plt\n'), ((4543, 4573), 'matplotlib.pyplot.plot', 'plt.plot', (['accs'], {'label': '"""Custom"""'}), "(accs, label='Custom')\n", (4551, 4573), True, 'import matplotlib.pyplot as plt\n'), ((4574, 4586), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4584, 4586), True, 'import matplotlib.pyplot as plt\n'), ((4587, 4612), 'matplotlib.pyplot.title', 'plt.title', (['"""Training Acc"""'], {}), "('Training Acc')\n", (4596, 4612), True, 'import matplotlib.pyplot as plt\n'), ((4613, 4633), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (4623, 4633), True, 'import matplotlib.pyplot as plt\n'), ((4634, 4656), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (4644, 4656), True, 'import matplotlib.pyplot as plt\n'), ((4657, 4667), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4665, 4667), True, 'import matplotlib.pyplot as plt\n'), ((213, 227), 'cirq.Circuit', 'cirq.Circuit', ([], {}), '()\n', (225, 227), False, 'import cirq\n'), ((373, 387), 'cirq.Circuit', 'cirq.Circuit', ([], {}), '()\n', (385, 387), False, 'import cirq\n'), ((532, 546), 'cirq.Circuit', 'cirq.Circuit', ([], {}), '()\n', (544, 546), False, 'import cirq\n'), ((691, 705), 'cirq.Circuit', 'cirq.Circuit', ([], {}), '()\n', (703, 705), False, 'import cirq\n'), ((1192, 1206), 'cirq.Circuit', 'cirq.Circuit', ([], {}), '()\n', (1204, 1206), False, 'import cirq\n'), ((1628, 1651), 'sympy.symbols', 'sympy.symbols', (['"""X1_rot"""'], {}), "('X1_rot')\n", (1641, 1651), False, 'import sympy\n'), ((1661, 1684), 'sympy.symbols', 'sympy.symbols', (['"""Y1_rot"""'], {}), "('Y1_rot')\n", (1674, 1684), False, 'import sympy\n'), ((1694, 1717), 'sympy.symbols', 'sympy.symbols', (['"""Z1_rot"""'], {}), "('Z1_rot')\n", (1707, 1717), False, 'import sympy\n'), ((1727, 1750), 'sympy.symbols', 'sympy.symbols', (['"""X2_rot"""'], {}), "('X2_rot')\n", (1740, 1750), False, 'import sympy\n'), ((1760, 1783), 'sympy.symbols', 'sympy.symbols', (['"""Y2_rot"""'], {}), "('Y2_rot')\n", (1773, 1783), False, 'import sympy\n'), ((1793, 1816), 'sympy.symbols', 'sympy.symbols', (['"""Z2_rot"""'], {}), "('Z2_rot')\n", (1806, 1816), False, 'import sympy\n'), ((1828, 1855), 'sympy.symbols', 'sympy.symbols', (['"""pooling0:6"""'], {}), "('pooling0:6')\n", (1841, 1855), False, 'import sympy\n'), ((1864, 1878), 'cirq.Circuit', 'cirq.Circuit', ([], {}), '()\n', (1876, 1878), False, 'import cirq\n'), ((2356, 2393), 'tensorflow.cast', 'tf.cast', (['(y_true == y_pred)', 'tf.float32'], {}), '(y_true == y_pred, tf.float32)\n', (2363, 2393), True, 'import tensorflow as tf\n'), ((2406, 2428), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['result'], {}), '(result)\n', (2420, 2428), True, 'import tensorflow as tf\n'), ((2440, 2460), 'cirq.GridQubit', 'cirq.GridQubit', (['(0)', 'i'], {}), '(0, i)\n', (2454, 2460), False, 'import cirq\n'), ((2559, 2576), 'cirq.Z', 'cirq.Z', (['qubits[1]'], {}), '(qubits[1])\n', (2565, 2576), False, 'import cirq\n'), ((2997, 3012), 'numpy.mean', 'np.mean', (['result'], {}), '(result)\n', (3004, 3012), True, 'import numpy as np\n'), ((3137, 3155), 'numpy.array', 'np.array', (['[params]'], {}), '([params])\n', (3145, 3155), True, 'import numpy as np\n'), ((4101, 4126), 'quantum_diffs.ParameterShift', 'ParameterShift', (['f', 'params'], {}), '(f, params)\n', (4115, 4126), False, 'from quantum_diffs import ParameterShift\n'), ((838, 866), 'tensorflow_quantum.convert_to_tensor', 'tfq.convert_to_tensor', (['train'], {}), '(train)\n', (859, 866), True, 'import tensorflow_quantum as tfq\n'), ((868, 889), 'numpy.array', 'np.array', (['train_label'], {}), '(train_label)\n', (876, 889), True, 'import numpy as np\n'), ((891, 919), 'tensorflow_quantum.convert_to_tensor', 'tfq.convert_to_tensor', (['train'], {}), '(train)\n', (912, 919), True, 'import tensorflow_quantum as tfq\n'), ((921, 942), 'numpy.array', 'np.array', (['train_label'], {}), '(train_label)\n', (929, 942), True, 'import numpy as np\n'), ((1467, 1517), 'cirq.CNOT', 'cirq.CNOT', ([], {'control': 'source_qubit', 'target': 'sink_qubit'}), '(control=source_qubit, target=sink_qubit)\n', (1476, 1517), False, 'import cirq\n'), ((1892, 1923), 'cirq.CNOT', 'cirq.CNOT', (['qubits[0]', 'qubits[1]'], {}), '(qubits[0], qubits[1])\n', (1901, 1923), False, 'import cirq\n'), ((2280, 2298), 'tensorflow.squeeze', 'tf.squeeze', (['y_true'], {}), '(y_true)\n', (2290, 2298), True, 'import tensorflow as tf\n'), ((2318, 2336), 'tensorflow.squeeze', 'tf.squeeze', (['y_pred'], {}), '(y_pred)\n', (2328, 2336), True, 'import tensorflow as tf\n'), ((3227, 3244), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (3242, 3244), True, 'import tensorflow as tf\n'), ((3763, 3776), 'numpy.array', 'np.array', (['[x]'], {}), '([x])\n', (3771, 3776), True, 'import numpy as np\n'), ((3904, 3917), 'numpy.array', 'np.array', (['[x]'], {}), '([x])\n', (3912, 3917), True, 'import numpy as np\n'), ((244, 261), 'cirq.I', 'cirq.I', (['qubits[0]'], {}), '(qubits[0])\n', (250, 261), False, 'import cirq\n'), ((280, 297), 'cirq.I', 'cirq.I', (['qubits[1]'], {}), '(qubits[1])\n', (286, 297), False, 'import cirq\n'), ((404, 421), 'cirq.X', 'cirq.X', (['qubits[0]'], {}), '(qubits[0])\n', (410, 421), False, 'import cirq\n'), ((440, 457), 'cirq.I', 'cirq.I', (['qubits[1]'], {}), '(qubits[1])\n', (446, 457), False, 'import cirq\n'), ((563, 580), 'cirq.I', 'cirq.I', (['qubits[0]'], {}), '(qubits[0])\n', (569, 580), False, 'import cirq\n'), ((599, 616), 'cirq.X', 'cirq.X', (['qubits[1]'], {}), '(qubits[1])\n', (605, 616), False, 'import cirq\n'), ((722, 739), 'cirq.X', 'cirq.X', (['qubits[0]'], {}), '(qubits[0])\n', (728, 739), False, 'import cirq\n'), ((758, 775), 'cirq.X', 'cirq.X', (['qubits[1]'], {}), '(qubits[1])\n', (764, 775), False, 'import cirq\n'), ((2798, 2834), 'tensorflow_quantum.differentiators.ParameterShift', 'tfq.differentiators.ParameterShift', ([], {}), '()\n', (2832, 2834), True, 'import tensorflow_quantum as tfq\n'), ((3332, 3349), 'tensorflow.squeeze', 'tf.squeeze', (['guess'], {}), '(guess)\n', (3342, 3349), True, 'import tensorflow as tf\n'), ((1014, 1033), 'cirq.rx', 'cirq.rx', (['symbols[0]'], {}), '(symbols[0])\n', (1021, 1033), False, 'import cirq\n'), ((1051, 1070), 'cirq.ry', 'cirq.ry', (['symbols[1]'], {}), '(symbols[1])\n', (1058, 1070), False, 'import cirq\n'), ((1088, 1107), 'cirq.rz', 'cirq.rz', (['symbols[2]'], {}), '(symbols[2])\n', (1095, 1107), False, 'import cirq\n'), ((1938, 1949), 'cirq.rx', 'cirq.rx', (['x1'], {}), '(x1)\n', (1945, 1949), False, 'import cirq\n'), ((1978, 1989), 'cirq.ry', 'cirq.ry', (['y1'], {}), '(y1)\n', (1985, 1989), False, 'import cirq\n'), ((2018, 2029), 'cirq.rz', 'cirq.rz', (['z1'], {}), '(z1)\n', (2025, 2029), False, 'import cirq\n'), ((2058, 2069), 'cirq.rx', 'cirq.rx', (['x2'], {}), '(x2)\n', (2065, 2069), False, 'import cirq\n'), ((2098, 2109), 'cirq.ry', 'cirq.ry', (['y2'], {}), '(y2)\n', (2105, 2109), False, 'import cirq\n'), ((2138, 2149), 'cirq.rz', 'cirq.rz', (['z2'], {}), '(z2)\n', (2145, 2149), False, 'import cirq\n'), ((3845, 3860), 'tensorflow.squeeze', 'tf.squeeze', (['ret'], {}), '(ret)\n', (3855, 3860), True, 'import tensorflow as tf\n')]
|
from unittest import TestCase
import numpy as np
from hamcrest import assert_that, is_
from core.batch_generator import BatchGenerator
class DummyBatchGenerator(BatchGenerator):
def __init__(self, batch_items, batch_size):
super().__init__(batch_items, batch_size, 'en')
def shuffle_entries(self):
pass
def extract_features(self, first, last):
return np.random.rand(i, 26)[first:last]
def extract_labels(self, first, last):
return [f'some label' for i in range(first, last)]
class TestBatchGenerator(TestCase):
def test_batch_generator_attributes(self):
batch_items = list(range(33))
batch_size = 16
generator = DummyBatchGenerator(batch_items, batch_size)
assert_that(len(generator), is_(3), f'len() should reflect the number of batches')
assert_that(len(generator[0][0]['the_input']), is_(batch_size), f'first batch should be full')
assert_that(len(generator[1][0]['the_input']), is_(batch_size), f'second batch should be full')
assert_that(len(generator[2][0]['the_input']), is_(1), f'last batch should be residual')
def test_batch_generator_finite(self):
batch_items = [1, 2, 3, 4, 5, 6, 7]
batch_size = 3
generator = DummyBatchGenerator(batch_items, batch_size)
assert_that(len(generator), is_(3))
for i, (batch_inputs, batch_outputs) in enumerate(generator):
assert_that(batch_inputs['the_input'].ndim, is_(3))
if i % len(generator) == len(generator) - 1:
assert_that(batch_inputs['the_input'].shape[0], is_(1), f'last batch should be residual')
else:
assert_that(batch_inputs['the_input'].shape[0], is_(batch_size), 'batch should be full')
assert_that(batch_inputs['the_input'].shape[2], is_(26))
if i >= len(generator):
break # we need to break out because generator is infinite
assert_that(generator.cur_index, is_(1), f'finite generator should be exhausted')
def test_bath_generator_infinite(self):
batch_items = [1, 2, 3, 4, 5, 6, 7]
batch_size = 3
generator = DummyBatchGenerator(batch_items, batch_size)
assert_that(len(generator), is_(3), 'length should still reflect the number of batches')
first_batch = generator[0]
second_batch = generator[1]
third_batch = generator[2]
for i, (batch_inputs, batch_outputs) in enumerate(generator):
if i % batch_size == 0:
assert_that(batch_inputs['the_input'].shape, is_(first_batch[0]['the_input'].shape))
elif i % batch_size == 1:
assert_that(batch_inputs['the_input'].shape, is_(second_batch[0]['the_input'].shape))
else:
assert_that(batch_inputs['the_input'].shape, is_(third_batch[0]['the_input'].shape))
if i > 10:
break # we need to break out because generator is infinite
assert_that(i, is_(11))
assert_that(generator.cur_index, is_(3), )
|
[
"numpy.random.rand",
"hamcrest.is_"
] |
[((394, 415), 'numpy.random.rand', 'np.random.rand', (['i', '(26)'], {}), '(i, 26)\n', (408, 415), True, 'import numpy as np\n'), ((780, 786), 'hamcrest.is_', 'is_', (['(3)'], {}), '(3)\n', (783, 786), False, 'from hamcrest import assert_that, is_\n'), ((890, 905), 'hamcrest.is_', 'is_', (['batch_size'], {}), '(batch_size)\n', (893, 905), False, 'from hamcrest import assert_that, is_\n'), ((993, 1008), 'hamcrest.is_', 'is_', (['batch_size'], {}), '(batch_size)\n', (996, 1008), False, 'from hamcrest import assert_that, is_\n'), ((1097, 1103), 'hamcrest.is_', 'is_', (['(1)'], {}), '(1)\n', (1100, 1103), False, 'from hamcrest import assert_that, is_\n'), ((1351, 1357), 'hamcrest.is_', 'is_', (['(3)'], {}), '(3)\n', (1354, 1357), False, 'from hamcrest import assert_that, is_\n'), ((2002, 2008), 'hamcrest.is_', 'is_', (['(1)'], {}), '(1)\n', (2005, 2008), False, 'from hamcrest import assert_that, is_\n'), ((2264, 2270), 'hamcrest.is_', 'is_', (['(3)'], {}), '(3)\n', (2267, 2270), False, 'from hamcrest import assert_that, is_\n'), ((3020, 3027), 'hamcrest.is_', 'is_', (['(11)'], {}), '(11)\n', (3023, 3027), False, 'from hamcrest import assert_that, is_\n'), ((3070, 3076), 'hamcrest.is_', 'is_', (['(3)'], {}), '(3)\n', (3073, 3076), False, 'from hamcrest import assert_that, is_\n'), ((1485, 1491), 'hamcrest.is_', 'is_', (['(3)'], {}), '(3)\n', (1488, 1491), False, 'from hamcrest import assert_that, is_\n'), ((1839, 1846), 'hamcrest.is_', 'is_', (['(26)'], {}), '(26)\n', (1842, 1846), False, 'from hamcrest import assert_that, is_\n'), ((1614, 1620), 'hamcrest.is_', 'is_', (['(1)'], {}), '(1)\n', (1617, 1620), False, 'from hamcrest import assert_that, is_\n'), ((1738, 1753), 'hamcrest.is_', 'is_', (['batch_size'], {}), '(batch_size)\n', (1741, 1753), False, 'from hamcrest import assert_that, is_\n'), ((2598, 2636), 'hamcrest.is_', 'is_', (["first_batch[0]['the_input'].shape"], {}), "(first_batch[0]['the_input'].shape)\n", (2601, 2636), False, 'from hamcrest import assert_that, is_\n'), ((2737, 2776), 'hamcrest.is_', 'is_', (["second_batch[0]['the_input'].shape"], {}), "(second_batch[0]['the_input'].shape)\n", (2740, 2776), False, 'from hamcrest import assert_that, is_\n'), ((2857, 2895), 'hamcrest.is_', 'is_', (["third_batch[0]['the_input'].shape"], {}), "(third_batch[0]['the_input'].shape)\n", (2860, 2895), False, 'from hamcrest import assert_that, is_\n')]
|
# -*- coding: utf-8 -*-
"""
Seismic wavelets.
:copyright: 2015 Agile Geoscience
:license: Apache 2.0
"""
from collections import namedtuple
import numpy as np
from scipy.signal import hilbert
from scipy.signal import chirp
def sinc(duration, dt, f, return_t=False, taper='blackman'):
"""
sinc function centered on t=0, with a dominant frequency of f Hz.
If you pass a 1D array of frequencies, you get a wavelet bank in return.
Args:
duration (float): The length in seconds of the wavelet.
dt (float): The sample interval in seconds (often one of 0.001, 0.002,
or 0.004).
f (ndarray): Dominant frequency of the wavelet in Hz. If a sequence is
passed, you will get a 2D array in return, one row per frequency.
return_t (bool): If True, then the function returns a tuple of
wavelet, time-basis, where time is the range from -duration/2 to
duration/2 in steps of dt.
taper (str or function): The window or tapering function to apply.
To use one of NumPy's functions, pass 'bartlett', 'blackman' (the
default), 'hamming', or 'hanning'; to apply no tapering, pass
'none'. To apply your own function, pass a function taking only
the length of the window and returning the window function.
Returns:
ndarray. sinc wavelet(s) with centre frequency f sampled on t.
"""
f = np.asanyarray(f).reshape(-1, 1)
t = np.arange(-duration/2., duration/2., dt)
t[t == 0] = 1e-12 # Avoid division by zero.
f[f == 0] = 1e-12 # Avoid division by zero.
w = np.squeeze(np.sin(2*np.pi*f*t) / (2*np.pi*f*t))
if taper:
funcs = {
'bartlett': np.bartlett,
'blackman': np.blackman,
'hamming': np.hamming,
'hanning': np.hanning,
'none': lambda x: x,
}
func = funcs.get(taper, taper)
w *= func(t.size)
if return_t:
RickerWavelet = namedtuple('RickerWavelet', ['amplitude', 'time'])
return RickerWavelet(w, t)
else:
return w
def ricker(duration, dt, f, return_t=False):
"""
Also known as the mexican hat wavelet, models the function:
A = (1-2 \pi^2 f^2 t^2) e^{-\pi^2 f^2 t^2}
If you pass a 1D array of frequencies, you get a wavelet bank in return.
Args:
duration (float): The length in seconds of the wavelet.
dt (float): The sample interval in seconds (often one of 0.001, 0.002,
or 0.004).
f (ndarray): Centre frequency of the wavelet in Hz. If a sequence is
passed, you will get a 2D array in return, one row per frequency.
return_t (bool): If True, then the function returns a tuple of
wavelet, time-basis, where time is the range from -duration/2 to
duration/2 in steps of dt.
Returns:
ndarray. Ricker wavelet(s) with centre frequency f sampled on t.
"""
f = np.asanyarray(f).reshape(-1, 1)
t = np.arange(-duration/2, duration/2, dt)
pft2 = (np.pi * f * t)**2
w = np.squeeze((1 - (2 * pft2)) * np.exp(-pft2))
if return_t:
RickerWavelet = namedtuple('RickerWavelet', ['amplitude', 'time'])
return RickerWavelet(w, t)
else:
return w
def sweep(duration, dt, f,
autocorrelate=True,
return_t=False,
taper='blackman',
**kwargs):
"""
Generates a linear frequency modulated wavelet (sweep). Wraps
scipy.signal.chirp, adding dimensions as necessary.
Args:
duration (float): The length in seconds of the wavelet.
dt (float): is the sample interval in seconds (usually 0.001, 0.002,
or 0.004)
f (ndarray): Any sequence like (f1, f2). A list of lists will create a
wavelet bank.
autocorrelate (bool): Whether to autocorrelate the sweep(s) to create
a wavelet. Default is `True`.
return_t (bool): If True, then the function returns a tuple of
wavelet, time-basis, where time is the range from -duration/2 to
duration/2 in steps of dt.
taper (str or function): The window or tapering function to apply.
To use one of NumPy's functions, pass 'bartlett', 'blackman' (the
default), 'hamming', or 'hanning'; to apply no tapering, pass
'none'. To apply your own function, pass a function taking only
the length of the window and returning the window function.
**kwargs: Further arguments are passed to scipy.signal.chirp. They are
`method` ('linear','quadratic','logarithmic'), `phi` (phase offset
in degrees), and `vertex_zero`.
Returns:
ndarray: The waveform.
"""
t0, t1 = -duration/2, duration/2
t = np.arange(t0, t1, dt)
f = np.asanyarray(f).reshape(-1, 1)
f1, f2 = f
c = [chirp(t, f1_+(f2_-f1_)/2., t1, f2_, **kwargs)
for f1_, f2_
in zip(f1, f2)]
if autocorrelate:
w = [np.correlate(c_, c_, mode='same') for c_ in c]
w = np.squeeze(w) / np.amax(w)
if taper:
funcs = {
'bartlett': np.bartlett,
'blackman': np.blackman,
'hamming': np.hamming,
'hanning': np.hanning,
'none': lambda x: x,
}
func = funcs.get(taper, taper)
w *= func(t.size)
if return_t:
Sweep = namedtuple('Sweep', ['amplitude', 'time'])
return Sweep(w, t)
else:
return w
def ormsby(duration, dt, f, return_t=False):
"""
The Ormsby wavelet requires four frequencies which together define a
trapezoid shape in the spectrum. The Ormsby wavelet has several sidelobes,
unlike Ricker wavelets.
Args:
duration (float): The length in seconds of the wavelet.
dt (float): The sample interval in seconds (usually 0.001, 0.002,
or 0.004).
f (ndarray): Sequence of form (f1, f2, f3, f4), or list of lists of
frequencies, which will return a 2D wavelet bank.
Returns:
ndarray: A vector containing the Ormsby wavelet, or a bank of them.
"""
f = np.asanyarray(f).reshape(-1, 1)
try:
f1, f2, f3, f4 = f
except ValueError:
raise ValueError("The last dimension must be 4")
def numerator(f, t):
return (np.sinc(f * t)**2) * ((np.pi * f) ** 2)
pf43 = (np.pi * f4) - (np.pi * f3)
pf21 = (np.pi * f2) - (np.pi * f1)
t = np.arange(-duration/2, duration/2, dt)
w = ((numerator(f4, t)/pf43) - (numerator(f3, t)/pf43) -
(numerator(f2, t)/pf21) + (numerator(f1, t)/pf21))
w = np.squeeze(w) / np.amax(w)
if return_t:
OrmsbyWavelet = namedtuple('OrmsbyWavelet', ['amplitude', 'time'])
return OrmsbyWavelet(w, t)
else:
return w
def rotate_phase(w, phi, degrees=False):
"""
Performs a phase rotation of wavelet or wavelet bank using:
The analytic signal can be written in the form S(t) = A(t)exp(j*theta(t))
where A(t) = magnitude(hilbert(w(t))) and theta(t) = angle(hilbert(w(t))
then a constant phase rotation phi would produce the analytic signal
S(t) = A(t)exp(j*(theta(t) + phi)). To get the non analytic signal
we take real(S(t)) == A(t)cos(theta(t) + phi)
== A(t)(cos(theta(t))cos(phi) - sin(theta(t))sin(phi)) <= trig idenity
== w(t)cos(phi) - h(t)sin(phi)
A = w(t)Cos(phi) - h(t)Sin(phi)
Where w(t) is the wavelet and h(t) is its Hilbert transform.
Args:
w (ndarray): The wavelet vector, can be a 2D wavelet bank.
phi (float): The phase rotation angle (in radians) to apply.
degrees (bool): If phi is in degrees not radians.
Returns:
The phase rotated signal (or bank of signals).
"""
if degrees:
phi = phi * np.pi / 180.0
a = hilbert(w, axis=0)
w = (np.real(a) * np.cos(phi) - np.imag(a) * np.sin(phi))
return w
|
[
"numpy.asanyarray",
"numpy.sinc",
"numpy.amax",
"numpy.imag",
"scipy.signal.chirp",
"numpy.arange",
"collections.namedtuple",
"scipy.signal.hilbert",
"numpy.sin",
"numpy.squeeze",
"numpy.exp",
"numpy.correlate",
"numpy.real",
"numpy.cos"
] |
[((1481, 1527), 'numpy.arange', 'np.arange', (['(-duration / 2.0)', '(duration / 2.0)', 'dt'], {}), '(-duration / 2.0, duration / 2.0, dt)\n', (1490, 1527), True, 'import numpy as np\n'), ((3024, 3066), 'numpy.arange', 'np.arange', (['(-duration / 2)', '(duration / 2)', 'dt'], {}), '(-duration / 2, duration / 2, dt)\n', (3033, 3066), True, 'import numpy as np\n'), ((4826, 4847), 'numpy.arange', 'np.arange', (['t0', 't1', 'dt'], {}), '(t0, t1, dt)\n', (4835, 4847), True, 'import numpy as np\n'), ((6512, 6554), 'numpy.arange', 'np.arange', (['(-duration / 2)', '(duration / 2)', 'dt'], {}), '(-duration / 2, duration / 2, dt)\n', (6521, 6554), True, 'import numpy as np\n'), ((7882, 7900), 'scipy.signal.hilbert', 'hilbert', (['w'], {'axis': '(0)'}), '(w, axis=0)\n', (7889, 7900), False, 'from scipy.signal import hilbert\n'), ((2003, 2053), 'collections.namedtuple', 'namedtuple', (['"""RickerWavelet"""', "['amplitude', 'time']"], {}), "('RickerWavelet', ['amplitude', 'time'])\n", (2013, 2053), False, 'from collections import namedtuple\n'), ((3188, 3238), 'collections.namedtuple', 'namedtuple', (['"""RickerWavelet"""', "['amplitude', 'time']"], {}), "('RickerWavelet', ['amplitude', 'time'])\n", (3198, 3238), False, 'from collections import namedtuple\n'), ((4914, 4966), 'scipy.signal.chirp', 'chirp', (['t', '(f1_ + (f2_ - f1_) / 2.0)', 't1', 'f2_'], {}), '(t, f1_ + (f2_ - f1_) / 2.0, t1, f2_, **kwargs)\n', (4919, 4966), False, 'from scipy.signal import chirp\n'), ((5099, 5112), 'numpy.squeeze', 'np.squeeze', (['w'], {}), '(w)\n', (5109, 5112), True, 'import numpy as np\n'), ((5115, 5125), 'numpy.amax', 'np.amax', (['w'], {}), '(w)\n', (5122, 5125), True, 'import numpy as np\n'), ((5445, 5487), 'collections.namedtuple', 'namedtuple', (['"""Sweep"""', "['amplitude', 'time']"], {}), "('Sweep', ['amplitude', 'time'])\n", (5455, 5487), False, 'from collections import namedtuple\n'), ((6682, 6695), 'numpy.squeeze', 'np.squeeze', (['w'], {}), '(w)\n', (6692, 6695), True, 'import numpy as np\n'), ((6698, 6708), 'numpy.amax', 'np.amax', (['w'], {}), '(w)\n', (6705, 6708), True, 'import numpy as np\n'), ((6751, 6801), 'collections.namedtuple', 'namedtuple', (['"""OrmsbyWavelet"""', "['amplitude', 'time']"], {}), "('OrmsbyWavelet', ['amplitude', 'time'])\n", (6761, 6801), False, 'from collections import namedtuple\n'), ((1441, 1457), 'numpy.asanyarray', 'np.asanyarray', (['f'], {}), '(f)\n', (1454, 1457), True, 'import numpy as np\n'), ((1639, 1664), 'numpy.sin', 'np.sin', (['(2 * np.pi * f * t)'], {}), '(2 * np.pi * f * t)\n', (1645, 1664), True, 'import numpy as np\n'), ((2984, 3000), 'numpy.asanyarray', 'np.asanyarray', (['f'], {}), '(f)\n', (2997, 3000), True, 'import numpy as np\n'), ((3131, 3144), 'numpy.exp', 'np.exp', (['(-pft2)'], {}), '(-pft2)\n', (3137, 3144), True, 'import numpy as np\n'), ((4857, 4873), 'numpy.asanyarray', 'np.asanyarray', (['f'], {}), '(f)\n', (4870, 4873), True, 'import numpy as np\n'), ((5043, 5076), 'numpy.correlate', 'np.correlate', (['c_', 'c_'], {'mode': '"""same"""'}), "(c_, c_, mode='same')\n", (5055, 5076), True, 'import numpy as np\n'), ((6193, 6209), 'numpy.asanyarray', 'np.asanyarray', (['f'], {}), '(f)\n', (6206, 6209), True, 'import numpy as np\n'), ((7910, 7920), 'numpy.real', 'np.real', (['a'], {}), '(a)\n', (7917, 7920), True, 'import numpy as np\n'), ((7923, 7934), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (7929, 7934), True, 'import numpy as np\n'), ((7937, 7947), 'numpy.imag', 'np.imag', (['a'], {}), '(a)\n', (7944, 7947), True, 'import numpy as np\n'), ((7950, 7961), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (7956, 7961), True, 'import numpy as np\n'), ((6384, 6398), 'numpy.sinc', 'np.sinc', (['(f * t)'], {}), '(f * t)\n', (6391, 6398), True, 'import numpy as np\n')]
|
import ee
import numpy as np
import pandas as pd
import geopandas as gpd
from shapely.geometry import box
import rabpro
from rabpro.basin_stats import Dataset
# coords_file = gpd.read_file(r"tests/data/Big Blue River.geojson")
# total_bounds = coords_file.total_bounds
total_bounds = np.array([-85.91331249, 39.42609864, -85.88453019, 39.46429816])
gdf = gpd.GeoDataFrame({"idx": [1], "geometry": [box(*total_bounds)]}, crs="EPSG:4326")
def clean_res(feature):
res = pd.DataFrame(feature["properties"], index=[0])
res["id"] = feature["id"]
return res
def test_customreducer():
def asdf(feat):
return feat.getNumber("max")
data, task = rabpro.basin_stats.compute(
[Dataset("JRC/GSW1_3/YearlyHistory", "waterClass", stats=["max"])],
basins_gdf=gdf,
reducer_funcs=[asdf],
test=True,
)
res = pd.concat([clean_res(feature) for feature in data[0]["features"]])
assert all(res["asdf"] == res["max"])
def test_categorical_imgcol():
urls, task = rabpro.basin_stats.compute(
[Dataset("MODIS/006/MCD12Q1", "LC_Type1", stats=["freqhist"])], basins_gdf=gdf
)
res = rabpro.basin_stats.fetch_gee(urls, ["lulc"])
assert res.shape[1] > 1
def test_timeindexed_imgcol():
urls, tasks = rabpro.basin_stats.compute(
[Dataset("JRC/GSW1_3/YearlyHistory", "waterClass",)], basins_gdf=gdf
)
res = rabpro.basin_stats.fetch_gee(urls, ["waterclass"])
assert res["waterclass_mean"].iloc[0] > 0
assert res.shape[0] > 0
def test_timeindexedspecific_imgcol():
data, task = rabpro.basin_stats.compute(
[
Dataset(
"JRC/GSW1_3/YearlyHistory",
"waterClass",
start="2017-01-01",
end="2019-01-01",
)
],
basins_gdf=gdf,
test=True,
)
res = pd.concat([clean_res(feature) for feature in data[0]["features"]])
assert res.shape[0] == 2
def test_nontimeindexed_imgcol():
data, task = rabpro.basin_stats.compute(
[Dataset("JRC/GSW1_3/MonthlyRecurrence", "monthly_recurrence",)],
basins_gdf=gdf,
test=True,
)
res = pd.concat([clean_res(feature) for feature in data[0]["features"]])
assert res.shape[0] > 0
def test_img():
data, task = rabpro.basin_stats.compute(
[
Dataset(
"JRC/GSW1_3/GlobalSurfaceWater",
"occurrence",
stats=["min", "max", "range", "std", "sum", "pct50", "pct3"],
)
],
basins_gdf=gdf,
test=True,
)
res = pd.DataFrame(data[0]["features"][0]["properties"], index=[0])
assert float(res["mean"]) > 0
assert res.shape[1] == 9
|
[
"pandas.DataFrame",
"rabpro.basin_stats.Dataset",
"rabpro.basin_stats.fetch_gee",
"numpy.array",
"shapely.geometry.box"
] |
[((287, 351), 'numpy.array', 'np.array', (['[-85.91331249, 39.42609864, -85.88453019, 39.46429816]'], {}), '([-85.91331249, 39.42609864, -85.88453019, 39.46429816])\n', (295, 351), True, 'import numpy as np\n'), ((476, 522), 'pandas.DataFrame', 'pd.DataFrame', (["feature['properties']"], {'index': '[0]'}), "(feature['properties'], index=[0])\n", (488, 522), True, 'import pandas as pd\n'), ((1157, 1201), 'rabpro.basin_stats.fetch_gee', 'rabpro.basin_stats.fetch_gee', (['urls', "['lulc']"], {}), "(urls, ['lulc'])\n", (1185, 1201), False, 'import rabpro\n'), ((1405, 1455), 'rabpro.basin_stats.fetch_gee', 'rabpro.basin_stats.fetch_gee', (['urls', "['waterclass']"], {}), "(urls, ['waterclass'])\n", (1433, 1455), False, 'import rabpro\n'), ((2624, 2685), 'pandas.DataFrame', 'pd.DataFrame', (["data[0]['features'][0]['properties']"], {'index': '[0]'}), "(data[0]['features'][0]['properties'], index=[0])\n", (2636, 2685), True, 'import pandas as pd\n'), ((401, 419), 'shapely.geometry.box', 'box', (['*total_bounds'], {}), '(*total_bounds)\n', (404, 419), False, 'from shapely.geometry import box\n'), ((708, 772), 'rabpro.basin_stats.Dataset', 'Dataset', (['"""JRC/GSW1_3/YearlyHistory"""', '"""waterClass"""'], {'stats': "['max']"}), "('JRC/GSW1_3/YearlyHistory', 'waterClass', stats=['max'])\n", (715, 772), False, 'from rabpro.basin_stats import Dataset\n'), ((1063, 1123), 'rabpro.basin_stats.Dataset', 'Dataset', (['"""MODIS/006/MCD12Q1"""', '"""LC_Type1"""'], {'stats': "['freqhist']"}), "('MODIS/006/MCD12Q1', 'LC_Type1', stats=['freqhist'])\n", (1070, 1123), False, 'from rabpro.basin_stats import Dataset\n'), ((1320, 1369), 'rabpro.basin_stats.Dataset', 'Dataset', (['"""JRC/GSW1_3/YearlyHistory"""', '"""waterClass"""'], {}), "('JRC/GSW1_3/YearlyHistory', 'waterClass')\n", (1327, 1369), False, 'from rabpro.basin_stats import Dataset\n'), ((1640, 1732), 'rabpro.basin_stats.Dataset', 'Dataset', (['"""JRC/GSW1_3/YearlyHistory"""', '"""waterClass"""'], {'start': '"""2017-01-01"""', 'end': '"""2019-01-01"""'}), "('JRC/GSW1_3/YearlyHistory', 'waterClass', start='2017-01-01', end=\n '2019-01-01')\n", (1647, 1732), False, 'from rabpro.basin_stats import Dataset\n'), ((2066, 2127), 'rabpro.basin_stats.Dataset', 'Dataset', (['"""JRC/GSW1_3/MonthlyRecurrence"""', '"""monthly_recurrence"""'], {}), "('JRC/GSW1_3/MonthlyRecurrence', 'monthly_recurrence')\n", (2073, 2127), False, 'from rabpro.basin_stats import Dataset\n'), ((2373, 2493), 'rabpro.basin_stats.Dataset', 'Dataset', (['"""JRC/GSW1_3/GlobalSurfaceWater"""', '"""occurrence"""'], {'stats': "['min', 'max', 'range', 'std', 'sum', 'pct50', 'pct3']"}), "('JRC/GSW1_3/GlobalSurfaceWater', 'occurrence', stats=['min', 'max',\n 'range', 'std', 'sum', 'pct50', 'pct3'])\n", (2380, 2493), False, 'from rabpro.basin_stats import Dataset\n')]
|
import logging
import numpy as np
import pytest
import xskillscore as xs
from climpred.exceptions import CoordinateError
from climpred.prediction import compute_hindcast
def test_same_inits_initializations(
hind_ds_initialized_1d_cftime, reconstruction_ds_1d_cftime, caplog
):
"""Tests that inits are identical at all leads for `same_inits` alignment."""
with caplog.at_level(logging.INFO):
compute_hindcast(
hind_ds_initialized_1d_cftime,
reconstruction_ds_1d_cftime,
alignment="same_inits",
)
for i, record in enumerate(caplog.record_tuples):
if i >= 2:
print(record)
assert "inits: 1954-01-01 00:00:00-2007-01-01 00:00:00" in record[2]
def test_same_inits_verification_dates(
hind_ds_initialized_1d_cftime, reconstruction_ds_1d_cftime, caplog
):
"""Tests that appropriate verifs are being used at each lead for `same_inits`
alignment."""
with caplog.at_level(logging.INFO):
FIRST_INIT, LAST_INIT = 1954, 2007
compute_hindcast(
hind_ds_initialized_1d_cftime,
reconstruction_ds_1d_cftime,
alignment="same_inits",
)
nleads = hind_ds_initialized_1d_cftime["lead"].size
for i, record in zip(
np.arange(nleads + 2),
caplog.record_tuples,
):
if i >= 2:
print(record)
assert (
f"verifs: {FIRST_INIT+i}-01-01 00:00:00-{LAST_INIT+i}-01-01"
in record[2]
)
@pytest.mark.parametrize("alignment", ["same_inits", "same_verifs"])
def test_disjoint_verif_time(small_initialized_da, small_verif_da, alignment):
"""Tests that alignment works with disjoint time in the verification
data, i.e., non-continuous time sampling to verify against."""
hind = small_initialized_da
verif = small_verif_da.drop_sel(time=1992)
actual = compute_hindcast(hind, verif, alignment=alignment, metric="mse")
assert actual.notnull().all()
# hindcast inits: [1990, 1991, 1992, 1993]
# verif times: [1990, 1991, 1993, 1994]
a = hind.sel(init=[1990, 1992, 1993]).rename({"init": "time"})
b = verif.sel(time=[1991, 1993, 1994])
a["time"] = b["time"]
expected = xs.mse(a, b, "time")
assert actual == expected
@pytest.mark.parametrize("alignment", ["same_inits", "same_verifs"])
def test_disjoint_inits(small_initialized_da, small_verif_da, alignment):
"""Tests that alignment works with disjoint inits in the verification
data, i.e., non-continuous initializing to verify with."""
hind = small_initialized_da.drop_sel(init=1991)
verif = small_verif_da
actual = compute_hindcast(hind, verif, alignment=alignment, metric="mse")
assert actual.notnull().all()
# hindcast inits: [1990, 1992, 1993]
# verif times: [1990, 1991, 1992, 1993, 1994]
a = hind.rename({"init": "time"})
b = verif.sel(time=[1991, 1993, 1994])
a["time"] = b["time"]
expected = xs.mse(a, b, "time")
assert actual == expected
def test_same_verifs_verification_dates(
hind_ds_initialized_1d_cftime, reconstruction_ds_1d_cftime, caplog
):
"""Tests that verifs are identical at all leads for `same_verifs` alignment."""
with caplog.at_level(logging.INFO):
compute_hindcast(
hind_ds_initialized_1d_cftime,
reconstruction_ds_1d_cftime,
alignment="same_verifs",
)
for i, record in enumerate(caplog.record_tuples):
if i >= 2:
print(record)
assert "verifs: 1964-01-01 00:00:00-2017-01-01 00:00:00" in record[2]
def test_same_verifs_initializations(
hind_ds_initialized_1d_cftime, reconstruction_ds_1d_cftime, caplog
):
"""Tests that appropriate verifs are being used at each lead for `same_inits`
alignment."""
with caplog.at_level(logging.INFO):
FIRST_INIT, LAST_INIT = 1964, 2017
compute_hindcast(
hind_ds_initialized_1d_cftime,
reconstruction_ds_1d_cftime,
alignment="same_verifs",
)
nleads = hind_ds_initialized_1d_cftime["lead"].size
for i, record in zip(
np.arange(nleads + 2),
caplog.record_tuples,
):
if i >= 2:
print(record)
assert (
f"inits: {FIRST_INIT-i}-01-01 00:00:00-{LAST_INIT-i}-01-01 00:00:00"
in record[2]
)
def test_same_verifs_raises_error_when_not_possible(
hind_ds_initialized_1d_cftime, reconstruction_ds_1d_cftime
):
"""Tests that appropriate error is raised when a common set of verification dates
cannot be found with the supplied initializations."""
hind = hind_ds_initialized_1d_cftime.isel(lead=slice(0, 3), init=[1, 3, 5, 7, 9])
with pytest.raises(CoordinateError):
compute_hindcast(hind, reconstruction_ds_1d_cftime, alignment="same_verifs")
def test_maximize_alignment_inits(
hind_ds_initialized_1d_cftime, reconstruction_ds_1d_cftime, caplog
):
"""Tests that appropriate inits are selected for `maximize` alignment."""
with caplog.at_level(logging.INFO):
compute_hindcast(
hind_ds_initialized_1d_cftime,
reconstruction_ds_1d_cftime,
alignment="maximize",
)
# Add dummy values for the first two lines since they are just metadata.
for i, record in zip(
np.concatenate(([0, 0], hind_ds_initialized_1d_cftime.lead.values)),
caplog.record_tuples,
):
if i >= 1:
print(record)
assert (
f"inits: 1954-01-01 00:00:00-{2016-i}-01-01 00:00:00" in record[2]
)
def test_maximize_alignment_verifs(
hind_ds_initialized_1d_cftime, reconstruction_ds_1d_cftime, caplog
):
"""Tests that appropriate verifs are selected for `maximize` alignment."""
with caplog.at_level(logging.INFO):
compute_hindcast(
hind_ds_initialized_1d_cftime,
reconstruction_ds_1d_cftime,
alignment="maximize",
)
# Add dummy values for the first two lines since they are just metadata.
for i, record in zip(
np.concatenate(([0, 0], hind_ds_initialized_1d_cftime.lead.values)),
caplog.record_tuples,
):
if i >= 1:
print(record)
assert (
f"verifs: {1955+i}-01-01 00:00:00-2017-01-01 00:00:00" in record[2]
)
|
[
"climpred.prediction.compute_hindcast",
"pytest.raises",
"numpy.arange",
"xskillscore.mse",
"pytest.mark.parametrize",
"numpy.concatenate"
] |
[((1597, 1664), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""alignment"""', "['same_inits', 'same_verifs']"], {}), "('alignment', ['same_inits', 'same_verifs'])\n", (1620, 1664), False, 'import pytest\n'), ((2371, 2438), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""alignment"""', "['same_inits', 'same_verifs']"], {}), "('alignment', ['same_inits', 'same_verifs'])\n", (2394, 2438), False, 'import pytest\n'), ((1976, 2040), 'climpred.prediction.compute_hindcast', 'compute_hindcast', (['hind', 'verif'], {'alignment': 'alignment', 'metric': '"""mse"""'}), "(hind, verif, alignment=alignment, metric='mse')\n", (1992, 2040), False, 'from climpred.prediction import compute_hindcast\n'), ((2317, 2337), 'xskillscore.mse', 'xs.mse', (['a', 'b', '"""time"""'], {}), "(a, b, 'time')\n", (2323, 2337), True, 'import xskillscore as xs\n'), ((2742, 2806), 'climpred.prediction.compute_hindcast', 'compute_hindcast', (['hind', 'verif'], {'alignment': 'alignment', 'metric': '"""mse"""'}), "(hind, verif, alignment=alignment, metric='mse')\n", (2758, 2806), False, 'from climpred.prediction import compute_hindcast\n'), ((3054, 3074), 'xskillscore.mse', 'xs.mse', (['a', 'b', '"""time"""'], {}), "(a, b, 'time')\n", (3060, 3074), True, 'import xskillscore as xs\n'), ((415, 519), 'climpred.prediction.compute_hindcast', 'compute_hindcast', (['hind_ds_initialized_1d_cftime', 'reconstruction_ds_1d_cftime'], {'alignment': '"""same_inits"""'}), "(hind_ds_initialized_1d_cftime, reconstruction_ds_1d_cftime,\n alignment='same_inits')\n", (431, 519), False, 'from climpred.prediction import compute_hindcast\n'), ((1066, 1170), 'climpred.prediction.compute_hindcast', 'compute_hindcast', (['hind_ds_initialized_1d_cftime', 'reconstruction_ds_1d_cftime'], {'alignment': '"""same_inits"""'}), "(hind_ds_initialized_1d_cftime, reconstruction_ds_1d_cftime,\n alignment='same_inits')\n", (1082, 1170), False, 'from climpred.prediction import compute_hindcast\n'), ((3354, 3459), 'climpred.prediction.compute_hindcast', 'compute_hindcast', (['hind_ds_initialized_1d_cftime', 'reconstruction_ds_1d_cftime'], {'alignment': '"""same_verifs"""'}), "(hind_ds_initialized_1d_cftime, reconstruction_ds_1d_cftime,\n alignment='same_verifs')\n", (3370, 3459), False, 'from climpred.prediction import compute_hindcast\n'), ((4005, 4110), 'climpred.prediction.compute_hindcast', 'compute_hindcast', (['hind_ds_initialized_1d_cftime', 'reconstruction_ds_1d_cftime'], {'alignment': '"""same_verifs"""'}), "(hind_ds_initialized_1d_cftime, reconstruction_ds_1d_cftime,\n alignment='same_verifs')\n", (4021, 4110), False, 'from climpred.prediction import compute_hindcast\n'), ((4902, 4932), 'pytest.raises', 'pytest.raises', (['CoordinateError'], {}), '(CoordinateError)\n', (4915, 4932), False, 'import pytest\n'), ((4942, 5018), 'climpred.prediction.compute_hindcast', 'compute_hindcast', (['hind', 'reconstruction_ds_1d_cftime'], {'alignment': '"""same_verifs"""'}), "(hind, reconstruction_ds_1d_cftime, alignment='same_verifs')\n", (4958, 5018), False, 'from climpred.prediction import compute_hindcast\n'), ((5256, 5358), 'climpred.prediction.compute_hindcast', 'compute_hindcast', (['hind_ds_initialized_1d_cftime', 'reconstruction_ds_1d_cftime'], {'alignment': '"""maximize"""'}), "(hind_ds_initialized_1d_cftime, reconstruction_ds_1d_cftime,\n alignment='maximize')\n", (5272, 5358), False, 'from climpred.prediction import compute_hindcast\n'), ((6061, 6163), 'climpred.prediction.compute_hindcast', 'compute_hindcast', (['hind_ds_initialized_1d_cftime', 'reconstruction_ds_1d_cftime'], {'alignment': '"""maximize"""'}), "(hind_ds_initialized_1d_cftime, reconstruction_ds_1d_cftime,\n alignment='maximize')\n", (6077, 6163), False, 'from climpred.prediction import compute_hindcast\n'), ((1316, 1337), 'numpy.arange', 'np.arange', (['(nleads + 2)'], {}), '(nleads + 2)\n', (1325, 1337), True, 'import numpy as np\n'), ((4256, 4277), 'numpy.arange', 'np.arange', (['(nleads + 2)'], {}), '(nleads + 2)\n', (4265, 4277), True, 'import numpy as np\n'), ((5525, 5592), 'numpy.concatenate', 'np.concatenate', (['([0, 0], hind_ds_initialized_1d_cftime.lead.values)'], {}), '(([0, 0], hind_ds_initialized_1d_cftime.lead.values))\n', (5539, 5592), True, 'import numpy as np\n'), ((6330, 6397), 'numpy.concatenate', 'np.concatenate', (['([0, 0], hind_ds_initialized_1d_cftime.lead.values)'], {}), '(([0, 0], hind_ds_initialized_1d_cftime.lead.values))\n', (6344, 6397), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
"""
Plot signal heatmaps from TFBS across different bigwigs
@author: <NAME>
@contact: mette.bentsen (at) mpi-bn.mpg.de
@license: MIT
"""
import os
import sys
import argparse
import logging
import numpy as np
import matplotlib as mpl
mpl.use("Agg") #non-interactive backend
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import matplotlib.gridspec as gridspec
from datetime import datetime
from sklearn import preprocessing
import pyBigWig
import pysam
import pybedtools as pb
from tobias.parsers import add_heatmap_arguments
from tobias.utils.regions import *
from tobias.utils.utilities import *
#----------------------------------------------------------------------------------------#
def run_heatmap(args):
#Start logger
logger = TobiasLogger("PlotHeatmap", args.verbosity)
logger.begin()
parser = add_heatmap_arguments(argparse.ArgumentParser())
logger.arguments_overview(parser, args)
logger.output_files([args.output])
check_required(args, ["TFBS", "signals"])
#Setup TFBS names if not yet
if args.TFBS_labels == None:
args.TFBS_labels = [[os.path.basename(fil) for fil in args.TFBS[i]] for i in range(len(args.TFBS))]
if args.signal_labels == None:
args.signal_labels = [os.path.basename(fil) for fil in args.signals]
########################################################
#Check valid input parameters (number of input TFBS vs. bigwig etc.)
no_signals = len(args.signals)
no_columns = len(args.show_columns)
no_TFBS_col = len(args.TFBS)
if no_TFBS_col > 1 and len(args.show_columns) > 0:
sys.exit("Error: option --show_columns is not available for multiple --TFBS inputs.")
if no_TFBS_col > 1 and no_signals != no_TFBS_col:
sys.exit("Error: Number of --TFBS does not match number of signals")
elif no_TFBS_col == 1 and no_signals > 1:
#copy bed_f to other columns
logger.info("Using bedfiles: {0} across all bigwigs".format(args.TFBS))
for i in range(no_signals-1):
args.TFBS.append(args.TFBS[0])
args.TFBS_labels.append(args.TFBS_labels[0])
else:
for i, signal in enumerate(args.signals):
logger.info("Using {0} with signal from {1}".format(args.TFBS[i], signal))
#todo: logger overview of bedfiles per column?
######################################################################################
##################################### INPUT DATA #####################################
######################################################################################
#Setup info dict
heatmap_info = {col:{row:{"bigwig_f": args.signals[col], "bed_f":args.TFBS[col][row]} for row in range(len(args.TFBS[col]))} for col in range(len(args.signals))}
#Add extra columns
for i, bed_column in enumerate(args.show_columns):
heatmap_info[no_signals+i] = {row:{"column": bed_column, "bed_f":args.TFBS[0][row]} for row in range(len(args.TFBS[0]))}
#------------------------------------------------------------------------------------#
#------------------------ Read input files to RegionLists ---------------------------#
#------------------------------------------------------------------------------------#
seen_bed = []
#Read regions per heatmap in grid
logger.comment("")
logger.info("Reading bedfiles")
for col in range(len(heatmap_info)):
for row in range(len(heatmap_info[col])):
heatmap_info[col][row]["regions"] = RegionList().from_bed(heatmap_info[col][row]["bed_f"])
#Estimate region width
distri = heatmap_info[col][row]["regions"].get_width_distri()
if len(distri) > 1:
logger.warning("Input regions have differing lengths: {0}".format(distri))
heatmap_info[col][row]["width"] = list(distri.keys())[0]
#Extend to flank
heatmap_info[col][row]["regions"] = heatmap_info[col][row]["regions"].apply_method(OneRegion.set_width, 2*args.flank)
#Sort if chosen
if args.sort_by != None:
try:
heatmap_info[col][row]["regions"].sort(key=lambda region: float(region[args.sort_by]), reverse=True)
except:
heatmap_info[col][row]["regions"].sort(key=lambda region: region[args.sort_by], reverse=True)
#Get scores from file
invalid = []
for i, bed_column in enumerate(args.show_columns):
heatmap_info[no_signals+i][row]["column_{0}".format(bed_column)] = [region[bed_column] for region in heatmap_info[col][row]["regions"]]
try:
heatmap_info[no_signals+i][row]["column_{0}".format(bed_column)] = [float(element) for element in heatmap_info[no_signals+i][row]["column_{0}".format(bed_column)]]
except:
logger.info("Column {0} cannot be converted to float - excluding".format(bed_column))
del heatmap_info[no_signals+i][row]["column_{0}".format(bed_column)]
invalid.append(bed_column)
for bed_column in invalid:
args.show_columns.remove(bed_column)
#Logger info about bedfile
if heatmap_info[col][row]["bed_f"] not in seen_bed:
logger.info("- Read {1} sites from {0} of width {2}".format(heatmap_info[col][row]["bed_f"], len(heatmap_info[col][row]["regions"]), heatmap_info[col][row]["width"]))
seen_bed.append(heatmap_info[col][row]["bed_f"])
#------------------------------------------------------------------------------------#
#------------------------------ Signals from all sites ------------------------------#
#------------------------------------------------------------------------------------#
logger.comment("")
logger.info("Reading signals from bigwigs")
for col in range(len(args.TFBS)):
bigwig_f = heatmap_info[col][0]["bigwig_f"] #bigwig is the same for all rows, therefore row == 0
pybw = pyBigWig.open(bigwig_f, "rb")
for row in heatmap_info[col]:
logger.info("- Reading {0} from {1}".format(heatmap_info[col][row]["bed_f"], bigwig_f))
if len(heatmap_info[col][row]["regions"]) > 0:
heatmap_info[col][row]["signal_mat"] = np.array([region.get_signal(pybw) for region in heatmap_info[col][row]["regions"]])
heatmap_info[col][row]["aggregate"] = np.mean(heatmap_info[col][row]["signal_mat"], axis=0)
else:
heatmap_info[col][row]["signal_mat"] = None
heatmap_info[col][row]["aggregate"] = None
pybw.close()
logger.comment("")
#------------------------------------------------------------------------------------#
#---------------------------------- Colorbar min/max --------------------------------#
#------------------------------------------------------------------------------------#
#Estimate min/max from all matrices
if args.share_colorbar == True:
mats = []
for col, bigwig in enumerate(args.signals):
for row in heatmap_info[col]:
if heatmap_info[col][row]["signal_mat"] is not None:
mats.append(heatmap_info[col][row]["signal_mat"])
vmin, vmax = (0,0)
if len(mats) > 0:
joined = np.vstack(mats)
vmin, vmax = np.percentile(joined, [1, 99])
#Set vmin/vmax for all plots
for col, bigwig in enumerate(args.signals):
for row in heatmap_info[col]:
heatmap_info[col][row].update({"vmin":vmin, "vmax":vmax})
# Estimate min/max for each bigwig
else:
for col, bigwig in enumerate(args.signals):
mats = [heatmap_info[col][row]["signal_mat"] for row in heatmap_info[col] if heatmap_info[col][row]["signal_mat"] is not None]
vmin, vmax = (0,0)
if len(mats) > 0:
joined = np.vstack(mats)
vmin, vmax = np.percentile(joined, [1, 99])
for row in heatmap_info[col]:
heatmap_info[col][row].update({"vmin":vmin, "vmax":vmax})
del mats
del joined
# Estimate min/max for extra columns
for i, name in enumerate(args.show_columns):
col = no_signals + i
glob_values = []
for row in range(len(args.TFBS[0])):
glob_values.extend(heatmap_info[col][row]["column_{0}".format(name)])
vmin, vmax = np.percentile(glob_values, [1, 99])
for row in range(len(args.TFBS[0])):
heatmap_info[col][row]["vmin"] = vmin
heatmap_info[col][row]["vmax"] = vmax
del glob_values
######################################################################################
##################################### PLOTTING #######################################
######################################################################################
#------------------------------------------------------------------------------------#
#------------------------------------ Set up plots ----------------------------------#
#------------------------------------------------------------------------------------#
logger.info("Setting up plotting grid")
total_columns = no_signals + no_columns
xvals = np.arange(-args.flank, args.flank)
fig = plt.figure(figsize = (no_signals*5, 5*5))
h_ratios = [2,10,0.1]
w_ratios = [1]*no_signals + [0.1]*no_columns
gs = gridspec.GridSpec(3, total_columns, height_ratios=h_ratios, width_ratios=w_ratios, hspace=0.1, wspace=0.3) #aggregate + heatmaps (with sub heatmaps) + colorbar
#Setup axarr fitting to grid
axdict = {col:{row:"ax" for row in ["aggregate"] + list(heatmap_info[col]) + ["colorbar"]} for col in range(no_signals)}
axdict.update({col:{row:"ax" for row in ["aggregate"] + list(heatmap_info[col]) + ["colorbar"]} for col in range(no_signals, no_signals+no_columns)})
#Per signal column
xvals = np.arange(-args.flank, args.flank)
for col in range(no_signals):
#Aggregates
axdict[col]["aggregate"] = fig.add_subplot(gs[0,col])
axdict[col]["aggregate"].set_xlim(left=-args.flank, right=args.flank)
axdict[col]["aggregate"].set_xlabel('bp from center')
axdict[col]["aggregate"].set_ylabel('Mean aggregate signal')
axdict[col]["aggregate"].set_title("{0}".format(args.signal_labels[col]))
#Heatmaps
no_beds = len(args.TFBS[col])
h_ratios = [len(heatmap_info[col][row]["regions"]) for row in heatmap_info[col]]
h_ratios = [max(num,1) for num in h_ratios] #deal with empty beds
gs_sub = gridspec.GridSpecFromSubplotSpec(no_beds, 1, subplot_spec=gs[1,col], height_ratios=h_ratios, hspace=0.05)
for row in range(no_beds):
axdict[col][row] = plt.Subplot(fig, gs_sub[row,0])
fig.add_subplot(axdict[col][row])
#Appearance
plt.setp(axdict[col][row].get_yticklabels(), visible=False) #Hide y-axis ticks
plt.setp(axdict[col][row].get_xticklabels(), visible=False) #Hide x-axis ticks
axdict[col][row].tick_params(direction="in")
axdict[col][row].set_ylabel("{0} ({1})".format(args.TFBS_labels[col][row], len(heatmap_info[col][row]["regions"])))
#Last row
if row == no_beds-1:
axdict[col][row].set_xlabel('bp from center')
#Colorbar
axdict[col]["colorbar"] = fig.add_subplot(gs[2,col]) #row number 3
for col in range(no_signals, no_signals + no_columns):
gs_sub = gridspec.GridSpecFromSubplotSpec(no_beds, 1, subplot_spec=gs[1,col], height_ratios=h_ratios, hspace=0.05)
for row in range(no_beds):
axdict[col][row] = plt.Subplot(fig, gs_sub[row,0])
plt.setp(axdict[col][row].get_yticklabels(), visible=False) #Hide y-axis ticks
plt.setp(axdict[col][row].get_xticklabels(), visible=False) #Hide x-axis ticks
axdict[col][row].tick_params(direction="in")
fig.add_subplot(axdict[col][row])
#------------------------------------------------------------------------------------#
#--------------------------------- Fill in plots ------------------------------------#
#------------------------------------------------------------------------------------#
logger.info("Filling in grid")
#Colormaps
for col, bigwig in enumerate(args.signals):
colors = mpl.cm.jet(np.linspace(0, 1, len(heatmap_info[col]))) #colors for aggregate plots
for row in heatmap_info[col]:
if heatmap_info[col][row]["signal_mat"] is not None:
#Aggregate
axdict[col]["aggregate"].plot(xvals, heatmap_info[col][row]["aggregate"], color=colors[row], linewidth=2, label=args.TFBS_labels[col][row])
#Heatmap
lim = np.max([np.abs(heatmap_info[col][row]["vmin"]),np.abs(heatmap_info[col][row]["vmax"])])
heatmap_info[col][row]["vmin"] = -lim
heatmap_info[col][row]["vmax"] = lim
heatmap = axdict[col][row].imshow(heatmap_info[col][row]["signal_mat"], aspect="auto", cmap="seismic", norm=mpl.colors.Normalize(vmin=heatmap_info[col][row]["vmin"], vmax=heatmap_info[col][row]["vmax"]))
#Insert colorbar (inserted multiple times for each bigwig, but since it is shared for the same bigwig, it doesn't matter)
fig.colorbar(heatmap, cax=axdict[col]["colorbar"], orientation="horizontal")
#Extra columns w/ scores from bed
for i, col in enumerate(range(no_signals, no_signals + no_columns)):
bed_column = args.show_columns[i]
for row in heatmap_info[col]:
values = np.array(heatmap_info[col][row]["column_{0}".format(bed_column)])
values = values.reshape(-1,1)
vmin, vmax = np.percentile(values, [1, 99])
lim = np.max([abs(vmin), abs(vmax)])
axdict[col][row].imshow(values, aspect="auto", cmap="seismic", norm=mpl.colors.Normalize(vmin=-lim, vmax=lim))
#------------------------------------------------------------------------------------#
#-------------------------------- Plot decorations ----------------------------------#
#------------------------------------------------------------------------------------#
if args.plot_boundaries:
for col in heatmap_info:
motif_len = heatmap_info[col][0]["width"]
mstart = int(-np.floor(motif_len/2.0))
mend = int(np.ceil(motif_len/2.0))
axdict[col]["aggregate"].axvline(mstart, color="black", linestyle="dashed", linewidth=1)
axdict[col]["aggregate"].axvline(mend, color="black", linestyle="dashed", linewidth=1)
for row in heatmap_info[col]:
motif_len = heatmap_info[col][row]["width"]
mstart = int(-np.floor(motif_len/2.0))
mend = int(np.ceil(motif_len/2.0))
axdict[col][row].axvline(mstart+args.flank, color="black", linestyle="dashed", linewidth=1)
axdict[col][row].axvline(mend+args.flank, color="black", linestyle="dashed", linewidth=1)
#Add legend to aggregate plots
for col in range(len(args.signals)):
axdict[col]["aggregate"].legend(loc=1, prop={"size":6})
if args.share_colorbar == True:
ymin = min([axdict[col]["aggregate"].get_ylim()[0] for col in range(no_signals)])
ymax = max([axdict[col]["aggregate"].get_ylim()[1] for col in range(no_signals)])
for col in range(no_signals):
axdict[col]["aggregate"].set_ylim([ymin, ymax])
#------------------------------------------------------------------------------------#
#----------------------------- Finish off and output --------------------------------#
#------------------------------------------------------------------------------------#
"""
#For each heatmap
for row in [1,2]:
plt.setp(axarr[row].get_yticklabels(), visible=False) #Hide y-axis ticks
plt.setp(axarr[row].get_xticklabels(), visible=False) #Hide x-axis ticks
axarr[row].tick_params(direction="in")
"""
plt.subplots_adjust(top=0.95)
plt.suptitle(args.title, fontsize=25)
logger.info("Writing output file")
plt.savefig(args.output, bbox_inches='tight')
plt.close()
logger.end()
#--------------------------------------------------------------------------------------------------------#
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser = add_heatmap_arguments(parser)
args = parser.parse_args()
if len(sys.argv[1:]) == 0:
parser.print_help()
sys.exit()
run_heatmap(args)
|
[
"numpy.abs",
"argparse.ArgumentParser",
"matplotlib.pyplot.suptitle",
"numpy.floor",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.arange",
"tobias.parsers.add_heatmap_arguments",
"matplotlib.pyplot.Subplot",
"matplotlib.colors.Normalize",
"matplotlib.pyplot.close",
"numpy.ceil",
"os.path.basename",
"numpy.percentile",
"matplotlib.use",
"matplotlib.pyplot.subplots_adjust",
"numpy.vstack",
"sys.exit",
"pyBigWig.open",
"matplotlib.gridspec.GridSpec",
"matplotlib.gridspec.GridSpecFromSubplotSpec",
"matplotlib.pyplot.savefig"
] |
[((259, 273), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (266, 273), True, 'import matplotlib as mpl\n'), ((8567, 8601), 'numpy.arange', 'np.arange', (['(-args.flank)', 'args.flank'], {}), '(-args.flank, args.flank)\n', (8576, 8601), True, 'import numpy as np\n'), ((8610, 8653), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(no_signals * 5, 5 * 5)'}), '(figsize=(no_signals * 5, 5 * 5))\n', (8620, 8653), True, 'import matplotlib.pyplot as plt\n'), ((8727, 8838), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(3)', 'total_columns'], {'height_ratios': 'h_ratios', 'width_ratios': 'w_ratios', 'hspace': '(0.1)', 'wspace': '(0.3)'}), '(3, total_columns, height_ratios=h_ratios, width_ratios=\n w_ratios, hspace=0.1, wspace=0.3)\n', (8744, 8838), True, 'import matplotlib.gridspec as gridspec\n'), ((9221, 9255), 'numpy.arange', 'np.arange', (['(-args.flank)', 'args.flank'], {}), '(-args.flank, args.flank)\n', (9230, 9255), True, 'import numpy as np\n'), ((14807, 14836), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.95)'}), '(top=0.95)\n', (14826, 14836), True, 'import matplotlib.pyplot as plt\n'), ((14838, 14875), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['args.title'], {'fontsize': '(25)'}), '(args.title, fontsize=25)\n', (14850, 14875), True, 'import matplotlib.pyplot as plt\n'), ((14914, 14959), 'matplotlib.pyplot.savefig', 'plt.savefig', (['args.output'], {'bbox_inches': '"""tight"""'}), "(args.output, bbox_inches='tight')\n", (14925, 14959), True, 'import matplotlib.pyplot as plt\n'), ((14961, 14972), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (14970, 14972), True, 'import matplotlib.pyplot as plt\n'), ((15134, 15159), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (15157, 15159), False, 'import argparse\n'), ((15170, 15199), 'tobias.parsers.add_heatmap_arguments', 'add_heatmap_arguments', (['parser'], {}), '(parser)\n', (15191, 15199), False, 'from tobias.parsers import add_heatmap_arguments\n'), ((905, 930), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (928, 930), False, 'import argparse\n'), ((1607, 1702), 'sys.exit', 'sys.exit', (['"""Error: option --show_columns is not available for multiple --TFBS inputs."""'], {}), "(\n 'Error: option --show_columns is not available for multiple --TFBS inputs.'\n )\n", (1615, 1702), False, 'import sys\n'), ((1747, 1815), 'sys.exit', 'sys.exit', (['"""Error: Number of --TFBS does not match number of signals"""'], {}), "('Error: Number of --TFBS does not match number of signals')\n", (1755, 1815), False, 'import sys\n'), ((5630, 5659), 'pyBigWig.open', 'pyBigWig.open', (['bigwig_f', '"""rb"""'], {}), "(bigwig_f, 'rb')\n", (5643, 5659), False, 'import pyBigWig\n'), ((7767, 7802), 'numpy.percentile', 'np.percentile', (['glob_values', '[1, 99]'], {}), '(glob_values, [1, 99])\n', (7780, 7802), True, 'import numpy as np\n'), ((9834, 9944), 'matplotlib.gridspec.GridSpecFromSubplotSpec', 'gridspec.GridSpecFromSubplotSpec', (['no_beds', '(1)'], {'subplot_spec': 'gs[1, col]', 'height_ratios': 'h_ratios', 'hspace': '(0.05)'}), '(no_beds, 1, subplot_spec=gs[1, col],\n height_ratios=h_ratios, hspace=0.05)\n', (9866, 9944), True, 'import matplotlib.gridspec as gridspec\n'), ((10647, 10757), 'matplotlib.gridspec.GridSpecFromSubplotSpec', 'gridspec.GridSpecFromSubplotSpec', (['no_beds', '(1)'], {'subplot_spec': 'gs[1, col]', 'height_ratios': 'h_ratios', 'hspace': '(0.05)'}), '(no_beds, 1, subplot_spec=gs[1, col],\n height_ratios=h_ratios, hspace=0.05)\n', (10679, 10757), True, 'import matplotlib.gridspec as gridspec\n'), ((15281, 15291), 'sys.exit', 'sys.exit', ([], {}), '()\n', (15289, 15291), False, 'import sys\n'), ((1275, 1296), 'os.path.basename', 'os.path.basename', (['fil'], {}), '(fil)\n', (1291, 1296), False, 'import os\n'), ((6808, 6823), 'numpy.vstack', 'np.vstack', (['mats'], {}), '(mats)\n', (6817, 6823), True, 'import numpy as np\n'), ((6840, 6870), 'numpy.percentile', 'np.percentile', (['joined', '[1, 99]'], {}), '(joined, [1, 99])\n', (6853, 6870), True, 'import numpy as np\n'), ((9992, 10024), 'matplotlib.pyplot.Subplot', 'plt.Subplot', (['fig', 'gs_sub[row, 0]'], {}), '(fig, gs_sub[row, 0])\n', (10003, 10024), True, 'import matplotlib.pyplot as plt\n'), ((10804, 10836), 'matplotlib.pyplot.Subplot', 'plt.Subplot', (['fig', 'gs_sub[row, 0]'], {}), '(fig, gs_sub[row, 0])\n', (10815, 10836), True, 'import matplotlib.pyplot as plt\n'), ((12706, 12736), 'numpy.percentile', 'np.percentile', (['values', '[1, 99]'], {}), '(values, [1, 99])\n', (12719, 12736), True, 'import numpy as np\n'), ((1138, 1159), 'os.path.basename', 'os.path.basename', (['fil'], {}), '(fil)\n', (1154, 1159), False, 'import os\n'), ((6014, 6067), 'numpy.mean', 'np.mean', (["heatmap_info[col][row]['signal_mat']"], {'axis': '(0)'}), "(heatmap_info[col][row]['signal_mat'], axis=0)\n", (6021, 6067), True, 'import numpy as np\n'), ((7322, 7337), 'numpy.vstack', 'np.vstack', (['mats'], {}), '(mats)\n', (7331, 7337), True, 'import numpy as np\n'), ((7356, 7386), 'numpy.percentile', 'np.percentile', (['joined', '[1, 99]'], {}), '(joined, [1, 99])\n', (7369, 7386), True, 'import numpy as np\n'), ((13315, 13339), 'numpy.ceil', 'np.ceil', (['(motif_len / 2.0)'], {}), '(motif_len / 2.0)\n', (13322, 13339), True, 'import numpy as np\n'), ((12849, 12890), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', ([], {'vmin': '(-lim)', 'vmax': 'lim'}), '(vmin=-lim, vmax=lim)\n', (12869, 12890), True, 'import matplotlib as mpl\n'), ((13276, 13301), 'numpy.floor', 'np.floor', (['(motif_len / 2.0)'], {}), '(motif_len / 2.0)\n', (13284, 13301), True, 'import numpy as np\n'), ((13664, 13688), 'numpy.ceil', 'np.ceil', (['(motif_len / 2.0)'], {}), '(motif_len / 2.0)\n', (13671, 13688), True, 'import numpy as np\n'), ((11824, 11862), 'numpy.abs', 'np.abs', (["heatmap_info[col][row]['vmin']"], {}), "(heatmap_info[col][row]['vmin'])\n", (11830, 11862), True, 'import numpy as np\n'), ((11863, 11901), 'numpy.abs', 'np.abs', (["heatmap_info[col][row]['vmax']"], {}), "(heatmap_info[col][row]['vmax'])\n", (11869, 11901), True, 'import numpy as np\n'), ((12099, 12198), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', ([], {'vmin': "heatmap_info[col][row]['vmin']", 'vmax': "heatmap_info[col][row]['vmax']"}), "(vmin=heatmap_info[col][row]['vmin'], vmax=heatmap_info\n [col][row]['vmax'])\n", (12119, 12198), True, 'import matplotlib as mpl\n'), ((13624, 13649), 'numpy.floor', 'np.floor', (['(motif_len / 2.0)'], {}), '(motif_len / 2.0)\n', (13632, 13649), True, 'import numpy as np\n')]
|
import torch
import numpy as np
class Network(torch.nn.Module):
def __init__(self, structure):
super(Network, self).__init__()
self.structure = structure
self.layers_pool_inited = self.init_layers(self.structure)
def init_layers(self, structure):
# pool of layers, which should be initialised and connected
layers_pool = [0]
# pool of initialised layers
layers_pool_inited = {}
# pool of broken (invalid) layers) such as inconsistent number of dimensions
layers_pool_removed = []
while layers_pool:
# take first layer in a pool
layer_index = layers_pool[0]
# find all connections before this layer
enter_layers = set(np.where(self.structure.matrix[:, layer_index] == 1)[0])
# check if some of previous layers were not initialized
# that means - we should initialise them first
not_inited_layers = [i for i in enter_layers if i not in (layers_pool_inited.keys())]
not_inited_layers_selected = [layer for layer in not_inited_layers if layer not in layers_pool_removed]
if not_inited_layers_selected:
# remove layers, which are in pool already
# this is possible due to complex connections with different orders
not_inited_layers_selected = [layer for layer in not_inited_layers_selected if layer not in layers_pool]
# add not initialised layers to the pool
layers_pool.extend(not_inited_layers_selected)
# current layer should be shift to the end of the queue
acc = layers_pool.pop(0)
layers_pool.append(acc)
continue
# take Layer instance of the previous layers
input_layers = [self.structure.layers_index_reverse[layer] for layer in enter_layers]
# layer without rank is broken and we ignore that
input_layers = [layer for layer in input_layers if layer.config.get('rank', False)]
enter_layers = [i for i in enter_layers if i not in layers_pool_removed]
# if curent layer is the Input - initialise without any input connections
if not input_layers and self.structure.layers_index_reverse[layer_index].layer_type == 'input':
inited_layer = (None, None, self.structure.layers_index_reverse[layer_index].init_layer(None))
# detect hanging node - some of mutations could remove connection to the layer
elif not input_layers:
layers_pool_removed.append(layers_pool.pop(0))
continue
# if there are multiple input connections
elif len(input_layers) > 1:
# this case does not require additional processing - all logic is inside Layer instance,
# which handles multiple connections
inited_layer = self.structure.layers_index_reverse[layer_index]([None for _ in range(len(input_layers))], input_layers)
else:
input_layers_inited = [layers_pool_inited[layer] for layer in enter_layers][0]
inited_layer = self.structure.layers_index_reverse[layer_index](None, input_layers[0])
# add new initialised layer
layers_pool_inited[layer_index] = inited_layer
setattr(self, 'layer_{}'.format(layer_index), inited_layer[2])
# find outgoing connections and add them to the pool
output_layers = [layer for layer in np.where(self.structure.matrix[layer_index] == 1)[0]
if layer not in layers_pool and layer not in layers_pool_inited.keys()]
layers_pool.extend(output_layers)
# remove current layer from the pool
layers_pool.pop(layers_pool.index(layer_index))
self.layers_pool_removed = layers_pool_removed
return layers_pool_inited
def forward(self, x):
# pool of layers, which should be initialised and connected
layers_pool = [0]
buffer_x = {-1: x}
last_value = None
while layers_pool:
# take first layer in a pool
layer_index = layers_pool[0]
# find all connections before this layer
enter_layers = set(np.where(self.structure.matrix[:, layer_index] == 1)[0])
enter_layers = [i for i in enter_layers if i not in self.layers_pool_removed]
# check if some of previous layers were not initialized
# that means - we should initialise them first
not_inited_layers = [i for i in enter_layers if i not in (buffer_x.keys())]
not_inited_layers_selected = [layer for layer in not_inited_layers if layer not in self.layers_pool_removed]
if not_inited_layers_selected:
# remove layers, which are in pool already
# this is possible due to complex connections with different orders
not_inited_layers_selected = [layer for layer in not_inited_layers_selected if layer not in layers_pool]
# add not initialised layers to the pool
layers_pool.extend(not_inited_layers_selected)
# current layer should be shift to the end of the queue
layers_pool.append(layers_pool.pop(0))
continue
# take Layer instance of the previous layers
temp_x = [buffer_x[layer] for layer in enter_layers]
# if curent layer is the Input - initialise without any input connections
if not enter_layers and self.structure.layers_index_reverse[layer_index].layer_type == 'input':
if self.layers_pool_inited[layer_index][0] is not None:
raise "Input layer is not the first one. Incorrect graph structure"
if self.layers_pool_inited[layer_index][1] is not None:
reshaper = self.layers_pool_inited[layer_index][1] # .init_layer(None)
temp_x = reshaper(buffer_x[-1])
else:
temp_x = buffer_x[-1]
result_x = self.process_layer_output(self.layers_pool_inited[layer_index][2](temp_x), self.structure.layers_index_reverse[layer_index].layer_type)
buffer_x[layer_index] = result_x
# detect hanging node - some of mutations could remove connection to the layer
elif not enter_layers:
continue
# if there are multiple input connections
elif len(enter_layers) > 1:
if self.layers_pool_inited[layer_index][0] is not None:
reshapers = self.layers_pool_inited[layer_index][0][0]
axis = self.layers_pool_inited[layer_index][0][1]
if reshapers is not None:
reshapers = [i.init_layer(None) for i in reshapers]
temp_x = [r(temp_x[i]) for i, r in enumerate(reshapers)]
temp_x = torch.cat(temp_x, axis)
if self.layers_pool_inited[layer_index][1] is not None:
temp_x = self.layers_pool_inited[layer_index][1](temp_x)
result_x = self.process_layer_output(self.layers_pool_inited[layer_index][2](temp_x), self.structure.layers_index_reverse[layer_index].layer_type)
buffer_x[layer_index] = result_x
else:
temp_x = temp_x[0]
if self.layers_pool_inited[layer_index][1] is not None:
reshaper = self.layers_pool_inited[layer_index][1] # .init_layer(None)
temp_x = reshaper(temp_x)
result_x = self.process_layer_output(self.layers_pool_inited[layer_index][2](temp_x), self.structure.layers_index_reverse[layer_index].layer_type)
buffer_x[layer_index] = result_x
# find outgoing connections and add them to the pool
output_layers = [layer for layer in np.where(self.structure.matrix[layer_index] == 1)[0]
if layer not in layers_pool and layer not in buffer_x.keys()]
last_value = result_x
layers_pool.extend(output_layers)
# remove current layer from the pool
layers_pool.pop(layers_pool.index(layer_index))
return last_value
def process_layer_output(self, x, layer_type):
"""
Some layer returns intermediate results, usually we dont need that
"""
if layer_type == 'lstm':
return x[0]
else:
return x
def recalculate_shapes(structure):
# pool of layers, which should be initialised and connected
layers_pool = [0]
# pool of initialised layers
layers_pool_inited = {}
# pool of broken (invalid) layers) such as inconsistent number of dimensions
layers_pool_removed = []
while layers_pool:
# take first layer in a pool
layer_index = layers_pool[0]
# find all connections before this layer
enter_layers = set(np.where(structure.matrix[:, layer_index] == 1)[0])
# check if some of previous layers were not initialized
# that means - we should initialise them first
not_inited_layers = [i for i in enter_layers if i not in (layers_pool_inited.keys())]
not_inited_layers_selected = [layer for layer in not_inited_layers if layer not in layers_pool_removed]
if not_inited_layers_selected:
# remove layers, which are in pool already
# this is possible due to complex connections with different orders
not_inited_layers_selected = [layer for layer in not_inited_layers_selected if layer not in layers_pool]
# add not initialised layers to the pool
layers_pool.extend(not_inited_layers_selected)
# current layer should be shift to the end of the queue
acc = layers_pool.pop(0)
layers_pool.append(acc)
continue
# take Layer instance of the previous layers
input_layers = [structure.layers_index_reverse[layer] for layer in enter_layers]
# layer without rank is broken and we ignore that
input_layers = [layer for layer in input_layers if layer.config.get('rank', False)]
enter_layers = [i for i in enter_layers if i not in layers_pool_removed]
# if curent layer is the Input - initialise without any input connections
if not input_layers and structure.layers_index_reverse[layer_index].layer_type == 'input':
inited_layer = (None, None, None)
# detect hanging node - some of mutations could remove connection to the layer
elif not input_layers:
layers_pool_removed.append(layers_pool.pop(0))
continue
# if there are multiple input connections
elif len(input_layers) > 1:
# this case does not require additional processing - all logic is inside Layer instance,
# which handles multiple connections
inited_layer = structure.layers_index_reverse[layer_index]([None for _ in range(len(input_layers))], input_layers, init=False)
else:
input_layers_inited = [layers_pool_inited[layer] for layer in enter_layers][0]
inited_layer = structure.layers_index_reverse[layer_index](None, input_layers[0], init=False)
# add new initialised layer
layers_pool_inited[layer_index] = inited_layer
# find outgoing connections and add them to the pool
output_layers = [layer for layer in np.where(structure.matrix[layer_index] == 1)[0]
if layer not in layers_pool and layer not in layers_pool_inited.keys()]
layers_pool.extend(output_layers)
# remove current layer from the pool
layers_pool.pop(layers_pool.index(layer_index))
|
[
"numpy.where",
"torch.cat"
] |
[((9206, 9253), 'numpy.where', 'np.where', (['(structure.matrix[:, layer_index] == 1)'], {}), '(structure.matrix[:, layer_index] == 1)\n', (9214, 9253), True, 'import numpy as np\n'), ((768, 820), 'numpy.where', 'np.where', (['(self.structure.matrix[:, layer_index] == 1)'], {}), '(self.structure.matrix[:, layer_index] == 1)\n', (776, 820), True, 'import numpy as np\n'), ((4383, 4435), 'numpy.where', 'np.where', (['(self.structure.matrix[:, layer_index] == 1)'], {}), '(self.structure.matrix[:, layer_index] == 1)\n', (4391, 4435), True, 'import numpy as np\n'), ((11740, 11784), 'numpy.where', 'np.where', (['(structure.matrix[layer_index] == 1)'], {}), '(structure.matrix[layer_index] == 1)\n', (11748, 11784), True, 'import numpy as np\n'), ((3591, 3640), 'numpy.where', 'np.where', (['(self.structure.matrix[layer_index] == 1)'], {}), '(self.structure.matrix[layer_index] == 1)\n', (3599, 3640), True, 'import numpy as np\n'), ((8095, 8144), 'numpy.where', 'np.where', (['(self.structure.matrix[layer_index] == 1)'], {}), '(self.structure.matrix[layer_index] == 1)\n', (8103, 8144), True, 'import numpy as np\n'), ((7117, 7140), 'torch.cat', 'torch.cat', (['temp_x', 'axis'], {}), '(temp_x, axis)\n', (7126, 7140), False, 'import torch\n')]
|
# coding: utf-8
import numpy as np
import torch
def convert_to_np(weights):
for k, v in weights.items():
if isinstance(v, torch.Tensor):
weights[k] = v.cpu().numpy()
elif isinstance(v, np.ndarray):
pass
elif isinstance(v, list):
weights[k] = np.array(v)
else:
raise SystemError("NOT SUPPORT THE DATATYPE", type(v))
return weights
def convert_to_tensor(weights):
for k, v in weights.items():
if isinstance(v, torch.Tensor):
pass
elif isinstance(v, np.ndarray):
weights[k] = torch.from_numpy(v)
elif isinstance(v, list):
weights[k] = torch.from_numpy(np.array(v))
else:
raise SystemError("NOT SUPPORT THE DATATYPE", type(v))
return weights
def cdw_feature_distance(old_model, new_model, device, train_loader):
"""cosine distance weight (cdw): calculate feature distance of
the features of a batch of data by cosine distance.
old_classifier,
"""
old_model = old_model.to(device)
# old_classifier = old_classifier.to(device)
for data in train_loader:
inputs, _ = data
inputs = inputs.to(device)
with torch.no_grad():
# old_out = old_classifier(old_model(inputs))
old_out = old_model(inputs)
new_out = new_model(inputs)
distance = 1 - torch.cosine_similarity(old_out, new_out)
return torch.mean(distance).cpu().numpy()
|
[
"torch.mean",
"numpy.array",
"torch.no_grad",
"torch.cosine_similarity",
"torch.from_numpy"
] |
[((1234, 1249), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1247, 1249), False, 'import torch\n'), ((1413, 1454), 'torch.cosine_similarity', 'torch.cosine_similarity', (['old_out', 'new_out'], {}), '(old_out, new_out)\n', (1436, 1454), False, 'import torch\n'), ((610, 629), 'torch.from_numpy', 'torch.from_numpy', (['v'], {}), '(v)\n', (626, 629), False, 'import torch\n'), ((308, 319), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (316, 319), True, 'import numpy as np\n'), ((706, 717), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (714, 717), True, 'import numpy as np\n'), ((1470, 1490), 'torch.mean', 'torch.mean', (['distance'], {}), '(distance)\n', (1480, 1490), False, 'import torch\n')]
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
from torch.autograd import Variable
from torch.nn.parameter import Parameter
from torchvision import datasets, transforms
from torch.utils.data import DataLoader, Dataset
from xbbo.utils.constants import MAXINT
from xbbo.core import TestFunction
class LossIsNaN(Exception):
pass
class Model(TestFunction):
def __init__(self, cfg, seed, **kwargs):
# np.random.seed(cfg.GENERAL.random_seed)
self.cfg = cfg
# self.dim = 30
# assert self.dim % 2 == 0
super().__init__(seed=seed)
self.api_config = self._load_api_config()
torch.seed(self.rng.randint(MAXINT))
torch.manual_seed(self.rng.randint(MAXINT))
self.device = torch.device(kwargs.get('device', 'cpu'))
self.theta = Parameter(torch.FloatTensor([0.9, 0.9]).to(self.device))
# self.opt_wrap = lambda params: optim.SGD(self.net.parameters(), lr=lr, momentum=momentum)
self.opt = optim.SGD([self.theta], lr=0.01)
self.step_num = 0
self.history_hp = [] # for record strategy
self.trajectory_hp = []
self.trajectory_loss = [] # 记录该个体score过程
self.history_loss = [] # 记录使用了(考虑权重迁移)hp-stategy后的score过程
self.hp = torch.empty(2, device=self.device)
self.obj_val_func = lambda theta: 1.2 - (theta ** 2).sum()
self.obj_train_func = lambda theta, h: 1.2 - ((h * theta) ** 2).sum()
self.trajectory_theta = []
def __len__(self): # one epoch has how many batchs
return 1
def update_hp(self, params: dict):
self.history_hp.append((self.step_num, params)) # 在该steps上更改超参,acc为该step时的结果(受该step*前*所有超参影响)
self.trajectory_hp.append((self.step_num, params))
self.trajectory_theta.append(self.theta.detach().cpu().numpy())
self.hp[0] = params['h1']
self.hp[1] = params['h2']
def step(self, num): # train need training(optimizer)
for it in range(num):
self.trajectory_theta.append(self.theta.detach().cpu().numpy())
loss = self.obj_train_func(self.theta, self.hp)
if np.isnan(loss.item()):
print("Loss is NaN.")
self.step_num += 1
return
# raise LossIsNaN
self.opt.zero_grad()
loss.backward()
self.opt.step()
self.step_num += 1
def evaluate(self): # val no training need(optimizer)
with torch.no_grad():
loss = self.obj_val_func(self.theta).item()
self.loss = np.inf if np.isnan(loss) else loss
self.trajectory_loss.append((self.step_num, self.loss))
self.history_loss.append((self.step_num, self.loss))
return self.loss
def load_checkpoint(self, checkpoint):
with torch.no_grad():
self.theta.set_(checkpoint['model_state_dict'])
# self.opt.load_state_dict(checkpoint['optim_state_dict'])
def save_checkpoint(self):
checkpoint = dict(model_state_dict=self.theta.data.clone())
return checkpoint
def _load_api_config(self):
return {
'h1': {
'type': 'float', 'warp': 'linear', 'range': [0, 1]},
'h2': {
'type': 'float', 'warp': 'linear', 'range': [0, 1]
}
}
|
[
"torch.FloatTensor",
"torch.empty",
"numpy.isnan",
"torch.no_grad",
"torch.optim.SGD"
] |
[((1051, 1083), 'torch.optim.SGD', 'optim.SGD', (['[self.theta]'], {'lr': '(0.01)'}), '([self.theta], lr=0.01)\n', (1060, 1083), False, 'from torch import optim\n'), ((1326, 1360), 'torch.empty', 'torch.empty', (['(2)'], {'device': 'self.device'}), '(2, device=self.device)\n', (1337, 1360), False, 'import torch\n'), ((2542, 2557), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2555, 2557), False, 'import torch\n'), ((2645, 2659), 'numpy.isnan', 'np.isnan', (['loss'], {}), '(loss)\n', (2653, 2659), True, 'import numpy as np\n'), ((2877, 2892), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2890, 2892), False, 'import torch\n'), ((885, 914), 'torch.FloatTensor', 'torch.FloatTensor', (['[0.9, 0.9]'], {}), '([0.9, 0.9])\n', (902, 914), False, 'import torch\n')]
|
import torch as ch
import utils
import numpy as np
from tqdm import tqdm
if __name__ == "__main__":
import sys
model_arch = sys.argv[1]
model_type = sys.argv[2]
prefix = sys.argv[3]
dataset = sys.argv[4]
if dataset == 'cifar10':
dx = utils.CIFAR10()
elif dataset == 'imagenet':
dx = utils.ImageNet1000()
else:
raise ValueError("Dataset not supported")
ds = dx.get_dataset()
model = dx.get_model(model_type, model_arch)
batch_size = 128
all_reps = []
train_loader = None
if dataset == 'cifar10':
train_loader, val_loader = ds.make_loaders(batch_size=batch_size, workers=8)
else:
_, val_loader = ds.make_loaders(batch_size=batch_size, workers=8, only_val=True)
def get_reps(data_loader):
for (im, label) in tqdm(data_loader):
with ch.no_grad():
(_, rep), _ = model(im, with_latent=True)
all_reps.append(rep.cpu())
if train_loader:
get_reps(train_loader)
get_reps(val_loader)
all_reps = ch.cat(all_reps)
ch_mean = ch.mean(all_reps, dim=0)
ch_std = ch.std(all_reps, dim=0)
# Dump mean, std vectors for later use:
np_mean = ch_mean.cpu().numpy()
np_std = ch_std.cpu().numpy()
np.save(prefix + "feature_mean", np_mean)
np.save(prefix + "feature_std", np_std)
|
[
"torch.mean",
"tqdm.tqdm",
"numpy.save",
"utils.ImageNet1000",
"utils.CIFAR10",
"torch.cat",
"torch.std",
"torch.no_grad"
] |
[((952, 968), 'torch.cat', 'ch.cat', (['all_reps'], {}), '(all_reps)\n', (958, 968), True, 'import torch as ch\n'), ((981, 1005), 'torch.mean', 'ch.mean', (['all_reps'], {'dim': '(0)'}), '(all_reps, dim=0)\n', (988, 1005), True, 'import torch as ch\n'), ((1018, 1041), 'torch.std', 'ch.std', (['all_reps'], {'dim': '(0)'}), '(all_reps, dim=0)\n', (1024, 1041), True, 'import torch as ch\n'), ((1150, 1191), 'numpy.save', 'np.save', (["(prefix + 'feature_mean')", 'np_mean'], {}), "(prefix + 'feature_mean', np_mean)\n", (1157, 1191), True, 'import numpy as np\n'), ((1193, 1232), 'numpy.save', 'np.save', (["(prefix + 'feature_std')", 'np_std'], {}), "(prefix + 'feature_std', np_std)\n", (1200, 1232), True, 'import numpy as np\n'), ((261, 276), 'utils.CIFAR10', 'utils.CIFAR10', ([], {}), '()\n', (274, 276), False, 'import utils\n'), ((755, 772), 'tqdm.tqdm', 'tqdm', (['data_loader'], {}), '(data_loader)\n', (759, 772), False, 'from tqdm import tqdm\n'), ((313, 333), 'utils.ImageNet1000', 'utils.ImageNet1000', ([], {}), '()\n', (331, 333), False, 'import utils\n'), ((782, 794), 'torch.no_grad', 'ch.no_grad', ([], {}), '()\n', (792, 794), True, 'import torch as ch\n')]
|
# This is my main script
import json
import multiprocessing as mp
import os
import time
import matplotlib.cm
import matplotlib.pyplot
import matplotlib.pyplot as plt
import numpy as np
from sklearn import metrics
from sklearn.cluster import AgglomerativeClustering
from sklearn.cluster import OPTICS
import FeatureProcessing as fp
import malware_stats
import my_sorter as my_sort
import process_cuckoo_reports as pcr
def dist_metric(x, y):
print("X = " + str(int(x[0])) + " Y = " + str(int(y[0])))
data_x = data[int(x[0])]
data_y = data[int(y[0])]
max_len = max(len(data_x), len(data_y))
# I divide with MAX to get the Levenshtein ratio
return fp.levenshtein_distance_dp(data_x, data_y) / max_len
def dist_metric_alt(x, y, dm):
print("X = " + str(x) + " Y = " + str(y))
data_x = dm[x]
data_y = dm[y]
max_len = max(len(data_x), len(data_y))
# I divide with MAX to get the Levenshtein ratio
return fp.levenshtein_distance_dp(data_x, data_y) / max_len
def alt_dist_metric(i):
if i[0] == i[1]:
return 0.0
data_x = i[2]
data_y = i[3]
max_len = max(len(data_x), len(data_y))
# I divide with MAX to get the Levenshtein ratio
dist = fp.levenshtein_distance_dp(data_x, data_y) / max_len
return [i[0], i[1], dist]
def mp_calc_dist_matrix(idxs, dm):
calcs = []
# Define all pairwise:
for i in range(0, len(idxs)):
for j in range(i + 1, len(idxs)):
if i < len(idxs) and j < len(idxs):
calcs.append([i, j, dm[i], dm[j]])
# Submit to pools calculations to pools:
pool = mp.Pool()
res = pool.map(alt_dist_metric, calcs)
pool.close()
m_out = np.zeros((int(len(idxs)), int(len(idxs))))
for r in res:
m_out[r[0]][r[1]] = r[2]
m_out[r[1]][r[0]] = r[2]
return m_out
def swap(dist_mat, i, j):
for y in range(0, len(dist_mat)):
t = dist_mat[y][i]
dist_mat[y][i] = dist_mat[y][j]
dist_mat[y][j] = t
tmp = dist_mat[i].copy()
tmp2 = dist_mat[j].copy()
dist_mat[i] = tmp2
dist_mat[j] = tmp
def order_dist_matrix(dist_matrix, labels):
for iter_num in range(len(labels) - 1, 0, -1):
for idx in range(iter_num):
if labels[idx] > labels[idx + 1]:
temp = labels[idx]
labels[idx] = labels[idx + 1]
labels[idx + 1] = temp
swap(dist_matrix, idx, idx + 1)
def store_ordered_dist_matrix_as_png(dist_matrix, labels, title):
ticks = []
for i in range(1, len(labels)):
if labels[i] != labels[i - 1]:
ticks.append(i - 0.5)
plt.clf()
fig, ax = plt.subplots(figsize=(20, 20), sharey=True)
# fig, ax = plt.subplots(sharey=True)
cmap = matplotlib.cm.get_cmap('CMRmap')
cax = ax.matshow(dist_matrix, interpolation='nearest', cmap=cmap)
# cax = ax.matshow(dist_matrix, interpolation='nearest')
ax.grid(False)
plt.suptitle('Clustered Distance Matrix')
plt.grid(True)
plt.title(title)
plt.xticks(ticks, color="w")
plt.yticks(ticks, color="w")
fig.colorbar(cax, ticks=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
plt.savefig("images/Dist_matrix_" + str(title) + ".png", bbox_inches='tight')
plt.close()
def do_hierarchical_cluster_analysis_routine(api_call_description, dist_matrix):
print('DOING HIERARCHICAL AGGLOMERATIVE CLUSTERING:')
n = len(dist_matrix)
best_mean_silhouette = -1.0
best_nc = -1
best_labels = []
ncs = []
mss = []
top = min(301, n)
for n_c in range(3, top - 1):
print('Trying with ' + str(n_c) + ' clusters:')
agg = AgglomerativeClustering(n_clusters=n_c, affinity='precomputed', linkage='average')
labels = agg.fit_predict(dist_matrix)
mean_silhouette = metrics.silhouette_score(dist_matrix, labels=labels, metric="precomputed")
print("For " + str(n_c) + " clusters the mean silhouette score is: " + str(mean_silhouette))
ncs.append(n_c)
mss.append(mean_silhouette)
best_nc = n_c if mean_silhouette > best_mean_silhouette else best_nc
best_labels = labels if mean_silhouette > best_mean_silhouette else best_labels
best_mean_silhouette = mean_silhouette if mean_silhouette > best_mean_silhouette else best_mean_silhouette
# Display info about cluster to silhouette:
plt.clf()
plt.plot(ncs, mss)
plt.title('HIERARCHICAL - Cluster count to Silhouette score')
plt.xlabel('Number of clusters')
plt.ylabel('Silhouette score')
plt.yticks(np.arange(0, 1, 0.1))
# plt.ylim([0.0, 1.0])
plt.grid(True)
plt.savefig("images/EVAL_HIERARCHICAL_" + api_call_description + ".png", bbox_inches='tight')
plt.close()
# Show info about best found nCluster and store dm to image:
print('Best # of clusters: ' + str(best_nc))
print("The mean Silhouette score is: " + str(best_mean_silhouette))
# sorted_dm = dist_matrix.copy()
# order_dist_matrix(sorted_dm, best_labels)
sorted_dm, sorted_labels = my_sort.optimal_sort(dist_matrix, best_labels)
store_ordered_dist_matrix_as_png(sorted_dm, sorted_labels, "HIERARCHICAL_analysis_n=" + str(n) + "_nCluster=" + str(
best_nc) + "_API_format" + api_call_description)
print("Sorted dist matrix saved as image.")
def plot_optics_reachability(clust, X, title):
space = np.arange(len(X))
reachability = clust.reachability_[clust.ordering_]
labels = clust.labels_[clust.ordering_]
plt.clf()
plt.figure(figsize=(20, 10))
ax1 = plt.subplot()
# Plotting the Reachability-Distance Plot
colors = ['c.', 'b.', 'r.', 'y.', 'g.']
for Class, colour in zip(range(0, len(labels)), colors):
Xk = space[labels == Class]
Rk = reachability[labels == Class]
ax1.plot(Xk, Rk, colour, alpha=0.3)
ax1.plot(space[labels == -1], reachability[labels == -1], 'k.', alpha=0.3)
ax1.plot(space, np.full_like(space, 2., dtype=float), 'k-', alpha=0.5)
ax1.plot(space, np.full_like(space, 0.5, dtype=float), 'k-.', alpha=0.5)
ax1.set_ylabel('Reachability Distance')
plt.title('Reachability plot')
plt.xlabel('Samples')
plt.ylabel('Reachability (epsilon distance)')
plt.title('Reachability Plot')
plt.savefig("images/Reachability_plot_" + str(title) + ".png", bbox_inches='tight')
plt.close()
def do_optics_cluster_analysis_routine(api_call_description, dist_matrix):
print('DOING OPTICS ANALYSIS:')
n = len(dist_matrix)
best_ms = -1
best_mean_silhouette = -1
best_labels = []
list_min_samples = []
list_mean_silhouettes = []
list_mean_silhouettes_no_noise = []
list_clusters = []
list_noise_count = []
for ms in range(2, 21):
try:
cluster_analyzer = OPTICS(metric="precomputed", min_samples=ms)
labels = cluster_analyzer.fit_predict(dist_matrix)
# plot_optics_reachability(cluster_analyzer, dist_matrix, api_call_description + '_min_samples=' + str(ms))
lbl_count = len(set(labels)) - (1 if -1 in labels else 0)
mean_s_coefficient = metrics.silhouette_score(dist_matrix, labels=labels, metric="precomputed")
print('For min_samples=' + str(ms) + ' found ' + str(lbl_count) + ' clusters, and mean_silhouette=' + str(
mean_s_coefficient))
list_noise_count.append(np.count_nonzero(labels == -1))
list_min_samples.append(ms)
list_mean_silhouettes.append(mean_s_coefficient)
best_ms = ms if mean_s_coefficient > best_mean_silhouette else best_ms
best_labels = labels.copy() if mean_s_coefficient > best_mean_silhouette else best_labels
list_clusters.append(len(set(labels)) - (1 if -1 in best_labels else 0))
best_mean_silhouette = mean_s_coefficient if mean_s_coefficient > best_mean_silhouette else best_mean_silhouette
no_noise_dm, no_noise_labels = get_noise_free_dm_n_labels_copy(dist_matrix, labels)
no_noise_mean_s_coefficient = metrics.silhouette_score(no_noise_dm, labels=no_noise_labels,
metric="precomputed")
print('For min_samples=' + str(ms) + ' found no_noise mean_silhouette=' + str(no_noise_mean_s_coefficient))
list_mean_silhouettes_no_noise.append(no_noise_mean_s_coefficient)
except Exception as e:
print("Could not do optics for min_samples=" + str(ms))
print(e)
# Display info about cluster to min_samples:
plt.clf()
fig, axes = plt.subplots()
axes.spines['left'].set_color('green')
axes.set_ylim([0.0, 1.0])
axes.xaxis.set_ticks(np.arange(0, 21, 1))
axes.yaxis.set_ticks(np.arange(0, 1, 0.1))
axes.grid(True)
fig.subplots_adjust(right=0.75)
twin_axes = axes.twinx()
twin_axes.spines['right'].set_color('red')
#twin_axes.set_ylim([100, 600])
second_twin = axes.twinx()
second_twin.spines['right'].set_position(('axes', 1.2))
second_twin.spines['right'].set_color('blue')
#second_twin.set_ylim([0, 250])
p1, = axes.plot(list_min_samples, list_mean_silhouettes, color='green', label='Silhouette score')
p2, = axes.plot(list_min_samples, list_mean_silhouettes_no_noise, color='green', dashes=[6, 2], label="Silhouette without noise")
axes.set_xlabel("Min samples")
axes.set_ylabel("Silhouette score")
p3, = twin_axes.plot(list_min_samples, list_noise_count, color='red', label='Noise')
twin_axes.set_ylabel("Noise samples")
p4, = second_twin.plot(list_min_samples, list_clusters, color='blue', label='Clusters')
second_twin.set_ylabel('Cluster count')
axes.legend(handles=[p1, p2, p3, p4], bbox_to_anchor=(0.5, 1.1), loc='lower center')
plt.title('OPTICS - Min_samples size to Silhouette score')
plt.savefig("images/EVAL_OPTICS_" + api_call_description + ".png", bbox_inches='tight')
plt.close()
print('')
print('***** OPTICS Analysis done *****')
print('Best min_sample=' + str(best_ms))
print('Finds ' + str(len(set(best_labels)) - (1 if -1 in best_labels else 0)) + ' clusters')
# print('Samples counted as noise: ' + str(best_labels.count(-1)))
print('Samples counted as noise: ' + str(np.count_nonzero(best_labels == -1)))
print('Mean silhouette: ' + str(best_mean_silhouette))
# sorted_dm = dist_matrix.copy()
# my_sort.tim_sort(best_labels, sorted_dm)
# order_dist_matrix(sorted_dm, best_labels)
sorted_dm, sorted_lbls = my_sort.optimal_sort(dist_matrix, best_labels)
noise_count = np.count_nonzero(best_labels == -1)
print("Samples considered noise: " + str(noise_count))
store_ordered_dist_matrix_as_png(sorted_dm, sorted_lbls, "OPTICS_analysis_n=" + str(n) + "_min_samples=" + str(
best_ms) + "_API_format=" + api_call_description)
print("Sorted dist matrix saved as image.")
def get_noise_free_dm_n_labels_copy(dist_matrix, labels):
no_noise_dm = np.delete(dist_matrix, np.where(labels == -1), axis=0)
no_noise_dm = np.delete(no_noise_dm, np.where(labels == -1), axis=1)
no_noise_labels = np.delete(labels, np.where(labels == -1))
return no_noise_dm, no_noise_labels
def do_final_optics(dm, min_sampels):
print('')
print('***** DOING FINAL OPTICS *****')
cluster_analyzer = OPTICS(metric="precomputed", min_samples=min_sampels)
labels = cluster_analyzer.fit_predict(dm)
lbl_count = len(set(labels)) - (1 if -1 in labels else 0)
mean_s_coefficient = metrics.silhouette_score(dm, labels=labels, metric="precomputed")
print('For min_samples=' + str(min_sampels) + ' found ' + str(lbl_count) + ' clusters, and mean_silhouette=' + str(
mean_s_coefficient))
# orderd_dm = dm.copy()
# order_dist_matrix(orderd_dm, labels)
sorted_dm, sorted_labels = my_sort.optimal_sort(dm, labels)
title = 'FINAL_OPTICS_min_samples=' + str(min_sampels)
title += '_n_clusters=' + str(lbl_count)
title += '_n_samples=' + str(len(labels))
store_ordered_dist_matrix_as_png(sorted_dm, sorted_labels, title)
print('........... DONE!')
def do_final_hierarchical(dm, n_clusters):
print('')
print('***** DOING FINAL Hierarchical *****')
agg = AgglomerativeClustering(n_clusters=n_clusters, affinity='precomputed', linkage='complete')
labels = agg.fit_predict(dm)
mean_silhouette = metrics.silhouette_score(dm, labels=labels, metric="precomputed")
print("For " + str(n_clusters) + " clusters the mean silhouette score is: " + str(mean_silhouette))
sorted_dm, sorted_labels = my_sort.optimal_sort(dm, labels)
title = 'FINAL_HIERARCHICAL'
title += '_n_clusters=' + str(n_clusters)
title += '_n_samples=' + str(len(labels))
store_ordered_dist_matrix_as_png(sorted_dm, sorted_labels, title)
print('........... DONE!')
def find_optimal_values():
global j
labels = ['FILTERED=FALSE_COLLAPSED=FALSE', 'FILTERED=TRUE_COLLAPSED=FALSE', 'FILTERED=TRUE_COLLAPSED=TRUE']
for dm_id in range(0, 3):
dist_list = []
for i in range(0, len(m[dm_id])):
for j in range(i, len(m[dm_id])):
if not i == j:
dist_list.append(m[dm_id][i][j])
print("Calculating frequency of distances in distance matrix:")
print("Dist counts: " + str(len(dist_list)))
plt.clf()
plt.hist(dist_list, bins=50)
plt.gca().set(title='Frequency of Distances', ylabel='Frequency', xlabel='Levenshtein ratio distance')
plt.title('Frequency of distances with: ' + labels[dm_id])
#plt.ylim([0, 350000])
plt.grid(True)
plt.savefig("images/Dist_frequencies_" + labels[dm_id] + ".png")
plt.close()
print(".......... DONE")
print('')
do_optics_cluster_analysis_routine(labels[dm_id] + '_API_SEQ', m[dm_id])
do_hierarchical_cluster_analysis_routine(labels[dm_id] + '_API_SEQ', m[dm_id])
if __name__ == '__main__':
start = time.time()
mp.freeze_support()
stored_dist_matrix = "data/dist_matrix.json"
global glob_data
data = []
global toShare
print("###GO GO GO###")
m = []
if os.path.isfile(stored_dist_matrix):
print("Loading stored distance matrix")
f = open(stored_dist_matrix, "r")
j = f.read()
f.close()
data = json.loads(j)
m = np.array(data)
print(".......... DONE")
print('')
else:
workdir = "C:\\Users\\stegg\\OneDrive\\Documents\\Master Projekt\\Data\\Nov2020_First_250\\"
d = pcr.mp_get_all_files_api_sequences(workdir)
# print("Samples: " + str(len(data)))
print("Creating a new distance matrices")
for dms in d:
X = np.arange(len(dms)).reshape(-1, 1)
currX = -1
start = time.time()
m = mp_calc_dist_matrix(X, dms)
end = time.time()
print("Time take: " + str(end - start))
print(".......... DONE")
print('')
data.append(m)
print("Saving distance matrix to file:")
m_list = []
for meh in data:
m_list.append(meh.tolist())
m_as_json = json.dumps(m_list)
f = open("data/dist_matrix.json", "w")
f.write(m_as_json)
f.close()
print(".......... DONE")
print('')
m = np.array(m_list)
# m has the distance matrices of the three tracks:
find_optimal_values()
do_final_hierarchical(m[2], 45)
do_final_hierarchical(m[2], 75)
do_final_optics(m[2], 2)
do_final_optics(m[2], 4)
do_final_optics(m[2], 5)
do_final_optics(m[2], 6)
do_final_optics(m[2], 7)
do_final_optics(m[2], 8)
# malware_stats.do_api_analysis("C:\\Users\\stegg\\OneDrive\\Documents\\Master Projekt\\Data\\Nov2020_First_250\\")
end = time.time()
print('Time taken: ' + str(end - start) + " sec.")
print('')
print("********* ALL DONE **********")
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.suptitle",
"json.dumps",
"matplotlib.pyplot.figure",
"os.path.isfile",
"numpy.arange",
"matplotlib.pyplot.gca",
"numpy.full_like",
"json.loads",
"matplotlib.pyplot.close",
"process_cuckoo_reports.mp_get_all_files_api_sequences",
"matplotlib.pyplot.yticks",
"FeatureProcessing.levenshtein_distance_dp",
"sklearn.cluster.AgglomerativeClustering",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.subplots",
"my_sorter.optimal_sort",
"sklearn.metrics.silhouette_score",
"multiprocessing.Pool",
"sklearn.cluster.OPTICS",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.subplot",
"numpy.count_nonzero",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.hist",
"time.time",
"numpy.where",
"numpy.array",
"matplotlib.pyplot.xlabel",
"multiprocessing.freeze_support",
"matplotlib.pyplot.savefig"
] |
[((1607, 1616), 'multiprocessing.Pool', 'mp.Pool', ([], {}), '()\n', (1614, 1616), True, 'import multiprocessing as mp\n'), ((2641, 2650), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2648, 2650), True, 'import matplotlib.pyplot as plt\n'), ((2665, 2708), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(20, 20)', 'sharey': '(True)'}), '(figsize=(20, 20), sharey=True)\n', (2677, 2708), True, 'import matplotlib.pyplot as plt\n'), ((2949, 2990), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Clustered Distance Matrix"""'], {}), "('Clustered Distance Matrix')\n", (2961, 2990), True, 'import matplotlib.pyplot as plt\n'), ((2995, 3009), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (3003, 3009), True, 'import matplotlib.pyplot as plt\n'), ((3014, 3030), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (3023, 3030), True, 'import matplotlib.pyplot as plt\n'), ((3035, 3063), 'matplotlib.pyplot.xticks', 'plt.xticks', (['ticks'], {'color': '"""w"""'}), "(ticks, color='w')\n", (3045, 3063), True, 'import matplotlib.pyplot as plt\n'), ((3068, 3096), 'matplotlib.pyplot.yticks', 'plt.yticks', (['ticks'], {'color': '"""w"""'}), "(ticks, color='w')\n", (3078, 3096), True, 'import matplotlib.pyplot as plt\n'), ((3268, 3279), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3277, 3279), True, 'import matplotlib.pyplot as plt\n'), ((4392, 4401), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4399, 4401), True, 'import matplotlib.pyplot as plt\n'), ((4406, 4424), 'matplotlib.pyplot.plot', 'plt.plot', (['ncs', 'mss'], {}), '(ncs, mss)\n', (4414, 4424), True, 'import matplotlib.pyplot as plt\n'), ((4429, 4490), 'matplotlib.pyplot.title', 'plt.title', (['"""HIERARCHICAL - Cluster count to Silhouette score"""'], {}), "('HIERARCHICAL - Cluster count to Silhouette score')\n", (4438, 4490), True, 'import matplotlib.pyplot as plt\n'), ((4495, 4527), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of clusters"""'], {}), "('Number of clusters')\n", (4505, 4527), True, 'import matplotlib.pyplot as plt\n'), ((4532, 4562), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Silhouette score"""'], {}), "('Silhouette score')\n", (4542, 4562), True, 'import matplotlib.pyplot as plt\n'), ((4631, 4645), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (4639, 4645), True, 'import matplotlib.pyplot as plt\n'), ((4650, 4747), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('images/EVAL_HIERARCHICAL_' + api_call_description + '.png')"], {'bbox_inches': '"""tight"""'}), "('images/EVAL_HIERARCHICAL_' + api_call_description + '.png',\n bbox_inches='tight')\n", (4661, 4747), True, 'import matplotlib.pyplot as plt\n'), ((4748, 4759), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4757, 4759), True, 'import matplotlib.pyplot as plt\n'), ((5062, 5108), 'my_sorter.optimal_sort', 'my_sort.optimal_sort', (['dist_matrix', 'best_labels'], {}), '(dist_matrix, best_labels)\n', (5082, 5108), True, 'import my_sorter as my_sort\n'), ((5518, 5527), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5525, 5527), True, 'import matplotlib.pyplot as plt\n'), ((5532, 5560), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (5542, 5560), True, 'import matplotlib.pyplot as plt\n'), ((5572, 5585), 'matplotlib.pyplot.subplot', 'plt.subplot', ([], {}), '()\n', (5583, 5585), True, 'import matplotlib.pyplot as plt\n'), ((6138, 6168), 'matplotlib.pyplot.title', 'plt.title', (['"""Reachability plot"""'], {}), "('Reachability plot')\n", (6147, 6168), True, 'import matplotlib.pyplot as plt\n'), ((6173, 6194), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Samples"""'], {}), "('Samples')\n", (6183, 6194), True, 'import matplotlib.pyplot as plt\n'), ((6199, 6244), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Reachability (epsilon distance)"""'], {}), "('Reachability (epsilon distance)')\n", (6209, 6244), True, 'import matplotlib.pyplot as plt\n'), ((6249, 6279), 'matplotlib.pyplot.title', 'plt.title', (['"""Reachability Plot"""'], {}), "('Reachability Plot')\n", (6258, 6279), True, 'import matplotlib.pyplot as plt\n'), ((6372, 6383), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6381, 6383), True, 'import matplotlib.pyplot as plt\n'), ((8598, 8607), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (8605, 8607), True, 'import matplotlib.pyplot as plt\n'), ((8624, 8638), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (8636, 8638), True, 'import matplotlib.pyplot as plt\n'), ((9828, 9886), 'matplotlib.pyplot.title', 'plt.title', (['"""OPTICS - Min_samples size to Silhouette score"""'], {}), "('OPTICS - Min_samples size to Silhouette score')\n", (9837, 9886), True, 'import matplotlib.pyplot as plt\n'), ((9891, 9982), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('images/EVAL_OPTICS_' + api_call_description + '.png')"], {'bbox_inches': '"""tight"""'}), "('images/EVAL_OPTICS_' + api_call_description + '.png',\n bbox_inches='tight')\n", (9902, 9982), True, 'import matplotlib.pyplot as plt\n'), ((9983, 9994), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9992, 9994), True, 'import matplotlib.pyplot as plt\n'), ((10571, 10617), 'my_sorter.optimal_sort', 'my_sort.optimal_sort', (['dist_matrix', 'best_labels'], {}), '(dist_matrix, best_labels)\n', (10591, 10617), True, 'import my_sorter as my_sort\n'), ((10637, 10672), 'numpy.count_nonzero', 'np.count_nonzero', (['(best_labels == -1)'], {}), '(best_labels == -1)\n', (10653, 10672), True, 'import numpy as np\n'), ((11385, 11438), 'sklearn.cluster.OPTICS', 'OPTICS', ([], {'metric': '"""precomputed"""', 'min_samples': 'min_sampels'}), "(metric='precomputed', min_samples=min_sampels)\n", (11391, 11438), False, 'from sklearn.cluster import OPTICS\n'), ((11572, 11637), 'sklearn.metrics.silhouette_score', 'metrics.silhouette_score', (['dm'], {'labels': 'labels', 'metric': '"""precomputed"""'}), "(dm, labels=labels, metric='precomputed')\n", (11596, 11637), False, 'from sklearn import metrics\n'), ((11891, 11923), 'my_sorter.optimal_sort', 'my_sort.optimal_sort', (['dm', 'labels'], {}), '(dm, labels)\n', (11911, 11923), True, 'import my_sorter as my_sort\n'), ((12295, 12389), 'sklearn.cluster.AgglomerativeClustering', 'AgglomerativeClustering', ([], {'n_clusters': 'n_clusters', 'affinity': '"""precomputed"""', 'linkage': '"""complete"""'}), "(n_clusters=n_clusters, affinity='precomputed',\n linkage='complete')\n", (12318, 12389), False, 'from sklearn.cluster import AgglomerativeClustering\n'), ((12441, 12506), 'sklearn.metrics.silhouette_score', 'metrics.silhouette_score', (['dm'], {'labels': 'labels', 'metric': '"""precomputed"""'}), "(dm, labels=labels, metric='precomputed')\n", (12465, 12506), False, 'from sklearn import metrics\n'), ((12643, 12675), 'my_sorter.optimal_sort', 'my_sort.optimal_sort', (['dm', 'labels'], {}), '(dm, labels)\n', (12663, 12675), True, 'import my_sorter as my_sort\n'), ((14049, 14060), 'time.time', 'time.time', ([], {}), '()\n', (14058, 14060), False, 'import time\n'), ((14066, 14085), 'multiprocessing.freeze_support', 'mp.freeze_support', ([], {}), '()\n', (14083, 14085), True, 'import multiprocessing as mp\n'), ((14236, 14270), 'os.path.isfile', 'os.path.isfile', (['stored_dist_matrix'], {}), '(stored_dist_matrix)\n', (14250, 14270), False, 'import os\n'), ((15918, 15929), 'time.time', 'time.time', ([], {}), '()\n', (15927, 15929), False, 'import time\n'), ((672, 714), 'FeatureProcessing.levenshtein_distance_dp', 'fp.levenshtein_distance_dp', (['data_x', 'data_y'], {}), '(data_x, data_y)\n', (698, 714), True, 'import FeatureProcessing as fp\n'), ((950, 992), 'FeatureProcessing.levenshtein_distance_dp', 'fp.levenshtein_distance_dp', (['data_x', 'data_y'], {}), '(data_x, data_y)\n', (976, 992), True, 'import FeatureProcessing as fp\n'), ((1213, 1255), 'FeatureProcessing.levenshtein_distance_dp', 'fp.levenshtein_distance_dp', (['data_x', 'data_y'], {}), '(data_x, data_y)\n', (1239, 1255), True, 'import FeatureProcessing as fp\n'), ((3668, 3755), 'sklearn.cluster.AgglomerativeClustering', 'AgglomerativeClustering', ([], {'n_clusters': 'n_c', 'affinity': '"""precomputed"""', 'linkage': '"""average"""'}), "(n_clusters=n_c, affinity='precomputed', linkage=\n 'average')\n", (3691, 3755), False, 'from sklearn.cluster import AgglomerativeClustering\n'), ((3823, 3897), 'sklearn.metrics.silhouette_score', 'metrics.silhouette_score', (['dist_matrix'], {'labels': 'labels', 'metric': '"""precomputed"""'}), "(dist_matrix, labels=labels, metric='precomputed')\n", (3847, 3897), False, 'from sklearn import metrics\n'), ((4578, 4598), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.1)'], {}), '(0, 1, 0.1)\n', (4587, 4598), True, 'import numpy as np\n'), ((5957, 5994), 'numpy.full_like', 'np.full_like', (['space', '(2.0)'], {'dtype': 'float'}), '(space, 2.0, dtype=float)\n', (5969, 5994), True, 'import numpy as np\n'), ((6032, 6069), 'numpy.full_like', 'np.full_like', (['space', '(0.5)'], {'dtype': 'float'}), '(space, 0.5, dtype=float)\n', (6044, 6069), True, 'import numpy as np\n'), ((8738, 8757), 'numpy.arange', 'np.arange', (['(0)', '(21)', '(1)'], {}), '(0, 21, 1)\n', (8747, 8757), True, 'import numpy as np\n'), ((8784, 8804), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.1)'], {}), '(0, 1, 0.1)\n', (8793, 8804), True, 'import numpy as np\n'), ((11055, 11077), 'numpy.where', 'np.where', (['(labels == -1)'], {}), '(labels == -1)\n', (11063, 11077), True, 'import numpy as np\n'), ((11128, 11150), 'numpy.where', 'np.where', (['(labels == -1)'], {}), '(labels == -1)\n', (11136, 11150), True, 'import numpy as np\n'), ((11200, 11222), 'numpy.where', 'np.where', (['(labels == -1)'], {}), '(labels == -1)\n', (11208, 11222), True, 'import numpy as np\n'), ((13416, 13425), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (13423, 13425), True, 'import matplotlib.pyplot as plt\n'), ((13434, 13462), 'matplotlib.pyplot.hist', 'plt.hist', (['dist_list'], {'bins': '(50)'}), '(dist_list, bins=50)\n', (13442, 13462), True, 'import matplotlib.pyplot as plt\n'), ((13582, 13640), 'matplotlib.pyplot.title', 'plt.title', (["('Frequency of distances with: ' + labels[dm_id])"], {}), "('Frequency of distances with: ' + labels[dm_id])\n", (13591, 13640), True, 'import matplotlib.pyplot as plt\n'), ((13680, 13694), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (13688, 13694), True, 'import matplotlib.pyplot as plt\n'), ((13703, 13767), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('images/Dist_frequencies_' + labels[dm_id] + '.png')"], {}), "('images/Dist_frequencies_' + labels[dm_id] + '.png')\n", (13714, 13767), True, 'import matplotlib.pyplot as plt\n'), ((13776, 13787), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (13785, 13787), True, 'import matplotlib.pyplot as plt\n'), ((14416, 14429), 'json.loads', 'json.loads', (['j'], {}), '(j)\n', (14426, 14429), False, 'import json\n'), ((14442, 14456), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (14450, 14456), True, 'import numpy as np\n'), ((14631, 14674), 'process_cuckoo_reports.mp_get_all_files_api_sequences', 'pcr.mp_get_all_files_api_sequences', (['workdir'], {}), '(workdir)\n', (14665, 14674), True, 'import process_cuckoo_reports as pcr\n'), ((15266, 15284), 'json.dumps', 'json.dumps', (['m_list'], {}), '(m_list)\n', (15276, 15284), False, 'import json\n'), ((15440, 15456), 'numpy.array', 'np.array', (['m_list'], {}), '(m_list)\n', (15448, 15456), True, 'import numpy as np\n'), ((6808, 6852), 'sklearn.cluster.OPTICS', 'OPTICS', ([], {'metric': '"""precomputed"""', 'min_samples': 'ms'}), "(metric='precomputed', min_samples=ms)\n", (6814, 6852), False, 'from sklearn.cluster import OPTICS\n'), ((7139, 7213), 'sklearn.metrics.silhouette_score', 'metrics.silhouette_score', (['dist_matrix'], {'labels': 'labels', 'metric': '"""precomputed"""'}), "(dist_matrix, labels=labels, metric='precomputed')\n", (7163, 7213), False, 'from sklearn import metrics\n'), ((8074, 8162), 'sklearn.metrics.silhouette_score', 'metrics.silhouette_score', (['no_noise_dm'], {'labels': 'no_noise_labels', 'metric': '"""precomputed"""'}), "(no_noise_dm, labels=no_noise_labels, metric=\n 'precomputed')\n", (8098, 8162), False, 'from sklearn import metrics\n'), ((14887, 14898), 'time.time', 'time.time', ([], {}), '()\n', (14896, 14898), False, 'import time\n'), ((14961, 14972), 'time.time', 'time.time', ([], {}), '()\n', (14970, 14972), False, 'import time\n'), ((7407, 7437), 'numpy.count_nonzero', 'np.count_nonzero', (['(labels == -1)'], {}), '(labels == -1)\n', (7423, 7437), True, 'import numpy as np\n'), ((10313, 10348), 'numpy.count_nonzero', 'np.count_nonzero', (['(best_labels == -1)'], {}), '(best_labels == -1)\n', (10329, 10348), True, 'import numpy as np\n'), ((13471, 13480), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (13478, 13480), True, 'import matplotlib.pyplot as plt\n')]
|
"""
SAMS umbrella sampling for DDR1 kinase DFG loop flip.
"""
__author__ = '<NAME>'
################################################################################
# IMPORTS
################################################################################
import os, os.path
import sys, math
import numpy as np
import time
from simtk import openmm, unit
from simtk.openmm import app
import mdtraj as md
import netCDF4
from sams import ThermodynamicState
################################################################################
# MAJOR SETTINGS AND PARAMETERS
################################################################################
# Define paths for explicitly-solvated complex
system_xml_filename = 'setup/system.xml'
state_xml_filename = 'setup/state_DFG_IN.xml'
state_pdb_filename = 'setup/state_DFG_IN.pdb'
pdb_filename = 'setup/systems/Abl-STI/complex.pdb'
# Specify umbrellas for distance restraint
umbrella_sigma = 5*unit.degrees # umbrella stddev width in absence of external PMF (no Jacobian)
umbrella_atoms = [2817, 2815, 2825, 2830] # atoms involved in umbrella restraint
#ATOM 2818 CB ALA A 180 1.927 52.416 41.379 1.00 0.00 C
#ATOM 2816 CA ALA A 180 3.319 52.098 40.823 1.00 0.00 C
#ATOM 2826 CA ASP A 181 5.071 50.442 43.834 1.00 0.00 C
#ATOM 2831 CG ASP A 181 2.928 49.040 44.337 1.00 0.00 C
min_dihedral = -180*unit.degrees
max_dihedral = +180*unit.degrees
dihedral_unit = unit.degrees
numbrellas = int((max_dihedral - min_dihedral) / umbrella_sigma + 2)
umbrella_values = np.linspace(min_dihedral/dihedral_unit, max_dihedral/dihedral_unit, numbrellas) * dihedral_unit
# Output SAMS filename
netcdf_filename = 'output.nc'
pdb_trajectory_filename = 'trajectory.pdb' # first frame of trajectory to be written at end
dcd_trajectory_filename = 'trajectory.dcd' # DCD format for trajectory to be written at end
# Simulation conditions
temperature = 298.0 * unit.kelvin
pressure = 1.0 * unit.atmospheres
collision_rate = 1.0 / unit.picoseconds
timestep = 2.0 * unit.femtoseconds
#minimize = True # if True, will minimize the structure before simulation (highly recommended)
minimize = False
################################################################################
# SUBROUTINES
################################################################################
def read_file(filename):
infile = open(filename, 'r')
contents = infile.read()
return contents
################################################################################
# MAIN
################################################################################
from sams import kB
kT = kB * temperature
beta = 1.0 / kT
# Load system
print('Loading system...')
system = openmm.XmlSerializer.deserialize(read_file(system_xml_filename))
pdbfile = app.PDBFile(state_pdb_filename)
topology = pdbfile.topology
state = openmm.XmlSerializer.deserialize(read_file(state_xml_filename))
positions = state.getPositions(asNumpy=True)
box_vectors = state.getPeriodicBoxVectors()
print('System has %d atoms.' % system.getNumParticles())
forces = { force.__class__.__name__ : force for force in system.getForces() }
if (pressure is not None) and ('MonteCarloBarostat' not in forces):
# Add a barostat
print("Adding barostat...")
barostat = openmm.MonteCarloBarostat(pressure, temperature)
reference_system.addForce(barostat)
else:
# TODO: Update barostat
pass
# Add umbrella restraint with global variable to control umbrella position
print('umbrella schedule for dihedral defined by atoms %s : %s' % (str(umbrella_atoms), str(umbrella_values)))
from numpy import pi
energy_function = '- (umbrella_K/2) * cos(min(dtheta, 2*pi-dtheta)); dtheta = abs(theta-umbrella_r0);'
energy_function += 'pi = %f;' % pi
umbrella_force = openmm.CustomTorsionForce(energy_function)
umbrella_force.addGlobalParameter('umbrella_K', 0.0)
umbrella_force.addGlobalParameter('umbrella_r0', 0.0)
umbrella_force.addTorsion(*umbrella_atoms, [])
umbrella_K = kT/umbrella_sigma**2
system.addForce(umbrella_force)
# Create thermodynamic states
thermodynamic_states = list()
# Umbrella off state
parameters = {
'umbrella_K' : 0.0, 'umbrella_r0' : 0.0, # umbrella parameters
}
thermodynamic_states.append( ThermodynamicState(system=system, temperature=temperature, pressure=pressure, parameters=parameters) )
# Umbrella on state
alchemical_lambda = 0.0
for umbrella_value in umbrella_values:
parameters = {
'umbrella_K' : umbrella_K.value_in_unit_system(unit.md_unit_system), 'umbrella_r0' : umbrella_value.value_in_unit_system(unit.md_unit_system), # umbrella parameters
}
thermodynamic_states.append( ThermodynamicState(system=system, temperature=temperature, pressure=pressure, parameters=parameters) )
# Analyze
from sams import analysis
# States
from collections import namedtuple
MockTestsystem = namedtuple('MockTestsystem', ['description', 'thermodynamic_states'])
testsystem = MockTestsystem(description='DDR1 umbrella states', thermodynamic_states=thermodynamic_states)
analysis.analyze(netcdf_filename, testsystem, 'output.pdf')
# Write trajectory
reference_pdb_filename = 'trajectory.pdb'
trajectory_filename = 'trajectory.xtc'
analysis.write_trajectory(netcdf_filename, topology, reference_pdb_filename, trajectory_filename)
|
[
"sams.analysis.analyze",
"simtk.openmm.CustomTorsionForce",
"simtk.openmm.MonteCarloBarostat",
"sams.ThermodynamicState",
"collections.namedtuple",
"numpy.linspace",
"simtk.openmm.app.PDBFile",
"sams.analysis.write_trajectory"
] |
[((2874, 2905), 'simtk.openmm.app.PDBFile', 'app.PDBFile', (['state_pdb_filename'], {}), '(state_pdb_filename)\n', (2885, 2905), False, 'from simtk.openmm import app\n'), ((3863, 3905), 'simtk.openmm.CustomTorsionForce', 'openmm.CustomTorsionForce', (['energy_function'], {}), '(energy_function)\n', (3888, 3905), False, 'from simtk import openmm, unit\n'), ((4941, 5010), 'collections.namedtuple', 'namedtuple', (['"""MockTestsystem"""', "['description', 'thermodynamic_states']"], {}), "('MockTestsystem', ['description', 'thermodynamic_states'])\n", (4951, 5010), False, 'from collections import namedtuple\n'), ((5118, 5177), 'sams.analysis.analyze', 'analysis.analyze', (['netcdf_filename', 'testsystem', '"""output.pdf"""'], {}), "(netcdf_filename, testsystem, 'output.pdf')\n", (5134, 5177), False, 'from sams import analysis\n'), ((5278, 5379), 'sams.analysis.write_trajectory', 'analysis.write_trajectory', (['netcdf_filename', 'topology', 'reference_pdb_filename', 'trajectory_filename'], {}), '(netcdf_filename, topology, reference_pdb_filename,\n trajectory_filename)\n', (5303, 5379), False, 'from sams import analysis\n'), ((1621, 1708), 'numpy.linspace', 'np.linspace', (['(min_dihedral / dihedral_unit)', '(max_dihedral / dihedral_unit)', 'numbrellas'], {}), '(min_dihedral / dihedral_unit, max_dihedral / dihedral_unit,\n numbrellas)\n', (1632, 1708), True, 'import numpy as np\n'), ((3367, 3415), 'simtk.openmm.MonteCarloBarostat', 'openmm.MonteCarloBarostat', (['pressure', 'temperature'], {}), '(pressure, temperature)\n', (3392, 3415), False, 'from simtk import openmm, unit\n'), ((4322, 4427), 'sams.ThermodynamicState', 'ThermodynamicState', ([], {'system': 'system', 'temperature': 'temperature', 'pressure': 'pressure', 'parameters': 'parameters'}), '(system=system, temperature=temperature, pressure=\n pressure, parameters=parameters)\n', (4340, 4427), False, 'from sams import ThermodynamicState\n'), ((4740, 4845), 'sams.ThermodynamicState', 'ThermodynamicState', ([], {'system': 'system', 'temperature': 'temperature', 'pressure': 'pressure', 'parameters': 'parameters'}), '(system=system, temperature=temperature, pressure=\n pressure, parameters=parameters)\n', (4758, 4845), False, 'from sams import ThermodynamicState\n')]
|
import numpy as np
from numpy.random import beta
import sys
#sys.path.append('../h5hep')
#from write import *
import hepfile
################################################################################
def calc_energy(mass,px,py,pz):
energy = np.sqrt(mass*mass + px*px + py*py + pz*pz)
return energy
################################################################################
data = hepfile.initialize()
hepfile.create_group(data,'jet',counter='njet')
hepfile.create_dataset(data,['e','px','py','pz','btag'],group='jet',dtype=float)
hepfile.create_group(data,'muon',counter='nmuon')
hepfile.create_dataset(data,['e','px','py','pz','q'],group='muon',dtype=float)
hepfile.create_group(data,'electron',counter='nelectron')
hepfile.create_dataset(data,['e','px','py','pz','q'],group='electron',dtype=float)
hepfile.create_group(data,'photon',counter='nphoton')
hepfile.create_dataset(data,['e','px','py','pz'],group='photon',dtype=float)
hepfile.create_group(data,'MET',counter='nMET')
hepfile.create_dataset(data,['pt','phi'],group='MET',dtype=float)
event = hepfile.create_single_bucket(data)
nentries = 10000
#print(data)
#print(event)
#'''
for i in range(0,nentries):
if i%1000==0:
print(i)
njet = np.random.randint(10)
event['jet/njet'] = njet
for n in range(njet):
px = 300*beta(2,9)
py = 300*beta(2,9)
pz = 300*beta(2,9)
mass = 5*beta(2,9)
energy = calc_energy(mass,px,py,pz)
event['jet/px'].append(px)
event['jet/py'].append(py)
event['jet/pz'].append(pz)
event['jet/e'].append(energy)
event['jet/btag'].append(np.random.random())
nmuon = np.random.randint(10)
event['muon/nmuon'] = nmuon
for n in range(nmuon):
px = 300*beta(2,9)
py = 300*beta(2,9)
pz = 300*beta(2,9)
mass = 0.105
energy = calc_energy(mass,px,py,pz)
event['muon/px'].append(px)
event['muon/py'].append(py)
event['muon/pz'].append(pz)
event['muon/e'].append(energy)
event['muon/q'].append(2*np.random.randint(2) - 1)
nelectron = np.random.randint(10)
event['electron/nelectron'] = nelectron
for n in range(nelectron):
px = 300*beta(2,9)
py = 300*beta(2,9)
pz = 300*beta(2,9)
mass = 0.000511
energy = calc_energy(mass,px,py,pz)
event['electron/px'].append(px)
event['electron/py'].append(py)
event['electron/pz'].append(pz)
event['electron/e'].append(energy)
event['electron/q'].append(2*np.random.randint(2) - 1)
nphoton = np.random.randint(10)
event['photon/nphoton'] = nphoton
for n in range(nphoton):
px = 300*beta(2,9)
py = 300*beta(2,9)
pz = 300*beta(2,9)
mass = 0.0
energy = calc_energy(mass,px,py,pz)
event['photon/px'].append(px)
event['photon/py'].append(py)
event['photon/pz'].append(pz)
event['photon/e'].append(energy)
hepfile.pack(data,event)
print("Writing the file...")
#hdfile = write_to_file('output.hdf5',data)
hdfile = hepfile.write_to_file('HEP_random_file_LARGE.hdf5',data,comp_type='gzip',comp_opts=9)
#'''
|
[
"hepfile.initialize",
"hepfile.pack",
"numpy.random.beta",
"hepfile.create_single_bucket",
"hepfile.write_to_file",
"numpy.random.randint",
"numpy.random.random",
"hepfile.create_group",
"numpy.sqrt",
"hepfile.create_dataset"
] |
[((405, 425), 'hepfile.initialize', 'hepfile.initialize', ([], {}), '()\n', (423, 425), False, 'import hepfile\n'), ((427, 476), 'hepfile.create_group', 'hepfile.create_group', (['data', '"""jet"""'], {'counter': '"""njet"""'}), "(data, 'jet', counter='njet')\n", (447, 476), False, 'import hepfile\n'), ((475, 566), 'hepfile.create_dataset', 'hepfile.create_dataset', (['data', "['e', 'px', 'py', 'pz', 'btag']"], {'group': '"""jet"""', 'dtype': 'float'}), "(data, ['e', 'px', 'py', 'pz', 'btag'], group='jet',\n dtype=float)\n", (497, 566), False, 'import hepfile\n'), ((557, 608), 'hepfile.create_group', 'hepfile.create_group', (['data', '"""muon"""'], {'counter': '"""nmuon"""'}), "(data, 'muon', counter='nmuon')\n", (577, 608), False, 'import hepfile\n'), ((607, 696), 'hepfile.create_dataset', 'hepfile.create_dataset', (['data', "['e', 'px', 'py', 'pz', 'q']"], {'group': '"""muon"""', 'dtype': 'float'}), "(data, ['e', 'px', 'py', 'pz', 'q'], group='muon',\n dtype=float)\n", (629, 696), False, 'import hepfile\n'), ((687, 746), 'hepfile.create_group', 'hepfile.create_group', (['data', '"""electron"""'], {'counter': '"""nelectron"""'}), "(data, 'electron', counter='nelectron')\n", (707, 746), False, 'import hepfile\n'), ((745, 838), 'hepfile.create_dataset', 'hepfile.create_dataset', (['data', "['e', 'px', 'py', 'pz', 'q']"], {'group': '"""electron"""', 'dtype': 'float'}), "(data, ['e', 'px', 'py', 'pz', 'q'], group='electron',\n dtype=float)\n", (767, 838), False, 'import hepfile\n'), ((829, 884), 'hepfile.create_group', 'hepfile.create_group', (['data', '"""photon"""'], {'counter': '"""nphoton"""'}), "(data, 'photon', counter='nphoton')\n", (849, 884), False, 'import hepfile\n'), ((883, 970), 'hepfile.create_dataset', 'hepfile.create_dataset', (['data', "['e', 'px', 'py', 'pz']"], {'group': '"""photon"""', 'dtype': 'float'}), "(data, ['e', 'px', 'py', 'pz'], group='photon', dtype\n =float)\n", (905, 970), False, 'import hepfile\n'), ((961, 1010), 'hepfile.create_group', 'hepfile.create_group', (['data', '"""MET"""'], {'counter': '"""nMET"""'}), "(data, 'MET', counter='nMET')\n", (981, 1010), False, 'import hepfile\n'), ((1009, 1078), 'hepfile.create_dataset', 'hepfile.create_dataset', (['data', "['pt', 'phi']"], {'group': '"""MET"""', 'dtype': 'float'}), "(data, ['pt', 'phi'], group='MET', dtype=float)\n", (1031, 1078), False, 'import hepfile\n'), ((1084, 1118), 'hepfile.create_single_bucket', 'hepfile.create_single_bucket', (['data'], {}), '(data)\n', (1112, 1118), False, 'import hepfile\n'), ((3131, 3223), 'hepfile.write_to_file', 'hepfile.write_to_file', (['"""HEP_random_file_LARGE.hdf5"""', 'data'], {'comp_type': '"""gzip"""', 'comp_opts': '(9)'}), "('HEP_random_file_LARGE.hdf5', data, comp_type='gzip',\n comp_opts=9)\n", (3152, 3223), False, 'import hepfile\n'), ((253, 303), 'numpy.sqrt', 'np.sqrt', (['(mass * mass + px * px + py * py + pz * pz)'], {}), '(mass * mass + px * px + py * py + pz * pz)\n', (260, 303), True, 'import numpy as np\n'), ((1247, 1268), 'numpy.random.randint', 'np.random.randint', (['(10)'], {}), '(10)\n', (1264, 1268), True, 'import numpy as np\n'), ((1685, 1706), 'numpy.random.randint', 'np.random.randint', (['(10)'], {}), '(10)\n', (1702, 1706), True, 'import numpy as np\n'), ((2135, 2156), 'numpy.random.randint', 'np.random.randint', (['(10)'], {}), '(10)\n', (2152, 2156), True, 'import numpy as np\n'), ((2622, 2643), 'numpy.random.randint', 'np.random.randint', (['(10)'], {}), '(10)\n', (2639, 2643), True, 'import numpy as np\n'), ((3023, 3048), 'hepfile.pack', 'hepfile.pack', (['data', 'event'], {}), '(data, event)\n', (3035, 3048), False, 'import hepfile\n'), ((1341, 1351), 'numpy.random.beta', 'beta', (['(2)', '(9)'], {}), '(2, 9)\n', (1345, 1351), False, 'from numpy.random import beta\n'), ((1368, 1378), 'numpy.random.beta', 'beta', (['(2)', '(9)'], {}), '(2, 9)\n', (1372, 1378), False, 'from numpy.random import beta\n'), ((1395, 1405), 'numpy.random.beta', 'beta', (['(2)', '(9)'], {}), '(2, 9)\n', (1399, 1405), False, 'from numpy.random import beta\n'), ((1422, 1432), 'numpy.random.beta', 'beta', (['(2)', '(9)'], {}), '(2, 9)\n', (1426, 1432), False, 'from numpy.random import beta\n'), ((1652, 1670), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1668, 1670), True, 'import numpy as np\n'), ((1783, 1793), 'numpy.random.beta', 'beta', (['(2)', '(9)'], {}), '(2, 9)\n', (1787, 1793), False, 'from numpy.random import beta\n'), ((1810, 1820), 'numpy.random.beta', 'beta', (['(2)', '(9)'], {}), '(2, 9)\n', (1814, 1820), False, 'from numpy.random import beta\n'), ((1837, 1847), 'numpy.random.beta', 'beta', (['(2)', '(9)'], {}), '(2, 9)\n', (1841, 1847), False, 'from numpy.random import beta\n'), ((2249, 2259), 'numpy.random.beta', 'beta', (['(2)', '(9)'], {}), '(2, 9)\n', (2253, 2259), False, 'from numpy.random import beta\n'), ((2276, 2286), 'numpy.random.beta', 'beta', (['(2)', '(9)'], {}), '(2, 9)\n', (2280, 2286), False, 'from numpy.random import beta\n'), ((2303, 2313), 'numpy.random.beta', 'beta', (['(2)', '(9)'], {}), '(2, 9)\n', (2307, 2313), False, 'from numpy.random import beta\n'), ((2728, 2738), 'numpy.random.beta', 'beta', (['(2)', '(9)'], {}), '(2, 9)\n', (2732, 2738), False, 'from numpy.random import beta\n'), ((2755, 2765), 'numpy.random.beta', 'beta', (['(2)', '(9)'], {}), '(2, 9)\n', (2759, 2765), False, 'from numpy.random import beta\n'), ((2782, 2792), 'numpy.random.beta', 'beta', (['(2)', '(9)'], {}), '(2, 9)\n', (2786, 2792), False, 'from numpy.random import beta\n'), ((2092, 2112), 'numpy.random.randint', 'np.random.randint', (['(2)'], {}), '(2)\n', (2109, 2112), True, 'import numpy as np\n'), ((2581, 2601), 'numpy.random.randint', 'np.random.randint', (['(2)'], {}), '(2)\n', (2598, 2601), True, 'import numpy as np\n')]
|
import numpy as np
import cv2
import Person
import time
#Contadores de entrada y salida
cnt_up = 0
cnt_down = 0
#Fuente de video
#cap = cv2.VideoCapture(0)
cap = cv2.VideoCapture('peopleCounter.avi')
#Propiedades del video
##cap.set(3, 160) #Width
##cap.set(4, 120) #Height
# Imprime las propiedades de captura a consola
for i in range(19):
print(i, cap.get(i))
w = cap.get(3)
h = cap.get(4)
frameArea = h*w
#areaTH = frameArea/250 # Límite de área para la cual detectará una persona
areaTH = 1500
print('Area Threshold', areaTH)
#Lineas de entrada/salida
line_up = int(2*(h/5))
line_down = int(3*(h/5))
up_limit = int(1*(h/5))
down_limit = int(4*(h/5))
print("Red line y:", str(line_down))
print("Blue line y:", str(line_up))
line_down_color = (255, 0, 0)
line_up_color = (0, 0, 255)
pt1 = [0, line_down];
pt2 = [w, line_down];
pts_L1 = np.array([pt1, pt2], np.int32) # Límite inferior de decisión
pts_L1 = pts_L1.reshape((-1, 1, 2))
pt3 = [0, line_up];
pt4 = [w, line_up];
pts_L2 = np.array([pt3, pt4], np.int32) # Límite superior de decisión
pts_L2 = pts_L2.reshape((-1, 1, 2))
pt5 = [0, up_limit];
pt6 = [w, up_limit];
pts_L3 = np.array([pt5, pt6], np.int32)
pts_L3 = pts_L3.reshape((-1, 1, 2))
pt7 = [0, down_limit];
pt8 = [w, down_limit];
pts_L4 = np.array([pt7,pt8], np.int32)
pts_L4 = pts_L4.reshape((-1, 1, 2))
# Sustractor de fondo
fgbg = cv2.createBackgroundSubtractorMOG2(detectShadows = True)
# Elementos estructurales para filtros morfológicos
kernelOp = np.ones((3, 3), np.uint8)
kernelOp2 = np.ones((5, 5), np.uint8)
kernelCl = np.ones((11, 11), np.uint8)
# Variables
font = cv2.FONT_HERSHEY_SIMPLEX
persons = []
max_p_age = 5
pid = 1
while(cap.isOpened()):
##for image in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
#Lee una imagen de la fuente de video
ret, frame = cap.read()
## frame = image.array
for i in persons:
i.age_one() #age every person one frame
#########################
# PRE-PROCESAMIENTO #
#########################
# Aplica sustracción de fondo
fgmask = fgbg.apply(frame)
fgmask2 = fgbg.apply(frame)
try:
# Binariazción para eliminar sombras (color gris)
ret, imBin= cv2.threshold(fgmask, 200, 255, cv2.THRESH_BINARY)
ret, imBin2 = cv2.threshold(fgmask2, 200, 255, cv2.THRESH_BINARY)
# Opening (erode->dilate) para quitar ruido
mask = cv2.morphologyEx(imBin, cv2.MORPH_OPEN, kernelOp)
mask2 = cv2.morphologyEx(imBin2, cv2.MORPH_OPEN, kernelOp)
# Closing (dilate -> erode) para juntar regiones blancas
mask = cv2.morphologyEx(mask , cv2.MORPH_CLOSE, kernelCl)
mask2 = cv2.morphologyEx(mask2, cv2.MORPH_CLOSE, kernelCl)
except:
print('EOF')
print('UP:', cnt_up)
print('DOWN:', cnt_down)
break
#################
# CONTORNOS #
#################
# RETR_EXTERNAL returns only extreme outer flags. All child contours are left behind.
_, contours0, hierarchy = cv2.findContours(mask2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours0:
area = cv2.contourArea(cnt)
if area > areaTH:
#################
# TRACKING #
#################
#Falta agregar condiciones para multipersonas, salidas y entradas de pantalla.
M = cv2.moments(cnt)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
x, y, w, h = cv2.boundingRect(cnt)
new = True
if cy in range(up_limit, down_limit):
for i in persons:
if abs(cx-i.getX()) <= w and abs(cy-i.getY()) <= h:
# El objeto esta cerca de uno que ya se detecto antes: es el mismo
new = False
i.updateCoords(cx, cy) # Actualiza coordenadas en el objeto and resets age
if i.going_UP(line_down, line_up) == True:
cnt_up += 1;
print("ID:", i.getId(), 'crossed going up at', time.strftime("%c"))
elif i.going_DOWN(line_down, line_up) == True:
cnt_down += 1;
print("ID:", i.getId(), 'crossed going down at', time.strftime("%c"))
break
if i.getState() == '1':
if i.getDir() == 'down' and i.getY() > down_limit:
i.setDone()
elif i.getDir() == 'up' and i.getY() < up_limit:
i.setDone()
if i.timedOut():
# Sacar i de la lista persons
index = persons.index(i)
persons.pop(index)
del i #liberar la memoria de i
if new == True:
p = Person.MyPerson(pid, cx, cy, max_p_age)
persons.append(p)
pid += 1
#################
# DIBUJOS #
#################
cv2.circle(frame, (cx, cy), 5, (0, 0, 255), -1)
img = cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
#cv2.drawContours(frame, cnt, -1, (0,255,0), 3)
#END for cnt in contours0
#########################
# DIBUJAR TRAYECTORIAS #
#########################
for i in persons:
## if len(i.getTracks()) >= 2:
## pts = np.array(i.getTracks(), np.int32)
## pts = pts.reshape((-1,1,2))
## frame = cv2.polylines(frame,[pts],False,i.getRGB())
## if i.getId() == 9:
## print str(i.getX()), ',', str(i.getY())
cv2.putText(frame, str(i.getId()), (i.getX(), i.getY()), font, 0.3, i.getRGB(), 1, cv2.LINE_AA)
#################
# IMAGENES #
#################
str_up = 'UP: ' + str(cnt_up)
str_down = 'DOWN: ' + str(cnt_down)
frame = cv2.polylines(frame, [pts_L1], False, line_down_color, thickness=2)
frame = cv2.polylines(frame, [pts_L2], False, line_up_color, thickness=2)
frame = cv2.polylines(frame, [pts_L3], False, (255,255,255), thickness=1)
frame = cv2.polylines(frame, [pts_L4], False, (255,255,255), thickness=1)
cv2.putText(frame, str_up, (10, 40), font, 0.5, (255, 255, 255), 2, cv2.LINE_AA)
cv2.putText(frame, str_up, (10, 40), font, 0.5, (0, 0, 255), 1, cv2.LINE_AA)
cv2.putText(frame, str_down, (10, 90), font, 0.5, (255, 255, 255), 2, cv2.LINE_AA)
cv2.putText(frame, str_down, (10, 90), font, 0.5, (255, 0, 0), 1, cv2.LINE_AA)
cv2.imshow('Frame', frame)
#cv2.imshow('Mask',mask)
#preisonar ESC para salir
k = cv2.waitKey(30) & 0xff
if k == 27:
break
#END while(cap.isOpened())
#################
# LIMPIEZA #
#################
cap.release()
cv2.destroyAllWindows()
|
[
"numpy.ones",
"time.strftime",
"cv2.rectangle",
"cv2.imshow",
"cv2.contourArea",
"cv2.boundingRect",
"cv2.destroyAllWindows",
"cv2.circle",
"cv2.waitKey",
"cv2.morphologyEx",
"Person.MyPerson",
"cv2.createBackgroundSubtractorMOG2",
"cv2.putText",
"cv2.polylines",
"cv2.threshold",
"cv2.moments",
"cv2.VideoCapture",
"numpy.array",
"cv2.findContours"
] |
[((164, 201), 'cv2.VideoCapture', 'cv2.VideoCapture', (['"""peopleCounter.avi"""'], {}), "('peopleCounter.avi')\n", (180, 201), False, 'import cv2\n'), ((852, 882), 'numpy.array', 'np.array', (['[pt1, pt2]', 'np.int32'], {}), '([pt1, pt2], np.int32)\n', (860, 882), True, 'import numpy as np\n'), ((998, 1028), 'numpy.array', 'np.array', (['[pt3, pt4]', 'np.int32'], {}), '([pt3, pt4], np.int32)\n', (1006, 1028), True, 'import numpy as np\n'), ((1147, 1177), 'numpy.array', 'np.array', (['[pt5, pt6]', 'np.int32'], {}), '([pt5, pt6], np.int32)\n', (1155, 1177), True, 'import numpy as np\n'), ((1269, 1299), 'numpy.array', 'np.array', (['[pt7, pt8]', 'np.int32'], {}), '([pt7, pt8], np.int32)\n', (1277, 1299), True, 'import numpy as np\n'), ((1365, 1419), 'cv2.createBackgroundSubtractorMOG2', 'cv2.createBackgroundSubtractorMOG2', ([], {'detectShadows': '(True)'}), '(detectShadows=True)\n', (1399, 1419), False, 'import cv2\n'), ((1486, 1511), 'numpy.ones', 'np.ones', (['(3, 3)', 'np.uint8'], {}), '((3, 3), np.uint8)\n', (1493, 1511), True, 'import numpy as np\n'), ((1524, 1549), 'numpy.ones', 'np.ones', (['(5, 5)', 'np.uint8'], {}), '((5, 5), np.uint8)\n', (1531, 1549), True, 'import numpy as np\n'), ((1561, 1588), 'numpy.ones', 'np.ones', (['(11, 11)', 'np.uint8'], {}), '((11, 11), np.uint8)\n', (1568, 1588), True, 'import numpy as np\n'), ((7001, 7024), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (7022, 7024), False, 'import cv2\n'), ((3037, 3104), 'cv2.findContours', 'cv2.findContours', (['mask2', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(mask2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (3053, 3104), False, 'import cv2\n'), ((6102, 6169), 'cv2.polylines', 'cv2.polylines', (['frame', '[pts_L1]', '(False)', 'line_down_color'], {'thickness': '(2)'}), '(frame, [pts_L1], False, line_down_color, thickness=2)\n', (6115, 6169), False, 'import cv2\n'), ((6182, 6247), 'cv2.polylines', 'cv2.polylines', (['frame', '[pts_L2]', '(False)', 'line_up_color'], {'thickness': '(2)'}), '(frame, [pts_L2], False, line_up_color, thickness=2)\n', (6195, 6247), False, 'import cv2\n'), ((6260, 6327), 'cv2.polylines', 'cv2.polylines', (['frame', '[pts_L3]', '(False)', '(255, 255, 255)'], {'thickness': '(1)'}), '(frame, [pts_L3], False, (255, 255, 255), thickness=1)\n', (6273, 6327), False, 'import cv2\n'), ((6338, 6405), 'cv2.polylines', 'cv2.polylines', (['frame', '[pts_L4]', '(False)', '(255, 255, 255)'], {'thickness': '(1)'}), '(frame, [pts_L4], False, (255, 255, 255), thickness=1)\n', (6351, 6405), False, 'import cv2\n'), ((6408, 6493), 'cv2.putText', 'cv2.putText', (['frame', 'str_up', '(10, 40)', 'font', '(0.5)', '(255, 255, 255)', '(2)', 'cv2.LINE_AA'], {}), '(frame, str_up, (10, 40), font, 0.5, (255, 255, 255), 2, cv2.LINE_AA\n )\n', (6419, 6493), False, 'import cv2\n'), ((6493, 6569), 'cv2.putText', 'cv2.putText', (['frame', 'str_up', '(10, 40)', 'font', '(0.5)', '(0, 0, 255)', '(1)', 'cv2.LINE_AA'], {}), '(frame, str_up, (10, 40), font, 0.5, (0, 0, 255), 1, cv2.LINE_AA)\n', (6504, 6569), False, 'import cv2\n'), ((6574, 6661), 'cv2.putText', 'cv2.putText', (['frame', 'str_down', '(10, 90)', 'font', '(0.5)', '(255, 255, 255)', '(2)', 'cv2.LINE_AA'], {}), '(frame, str_down, (10, 90), font, 0.5, (255, 255, 255), 2, cv2.\n LINE_AA)\n', (6585, 6661), False, 'import cv2\n'), ((6661, 6739), 'cv2.putText', 'cv2.putText', (['frame', 'str_down', '(10, 90)', 'font', '(0.5)', '(255, 0, 0)', '(1)', 'cv2.LINE_AA'], {}), '(frame, str_down, (10, 90), font, 0.5, (255, 0, 0), 1, cv2.LINE_AA)\n', (6672, 6739), False, 'import cv2\n'), ((6745, 6771), 'cv2.imshow', 'cv2.imshow', (['"""Frame"""', 'frame'], {}), "('Frame', frame)\n", (6755, 6771), False, 'import cv2\n'), ((2229, 2279), 'cv2.threshold', 'cv2.threshold', (['fgmask', '(200)', '(255)', 'cv2.THRESH_BINARY'], {}), '(fgmask, 200, 255, cv2.THRESH_BINARY)\n', (2242, 2279), False, 'import cv2\n'), ((2302, 2353), 'cv2.threshold', 'cv2.threshold', (['fgmask2', '(200)', '(255)', 'cv2.THRESH_BINARY'], {}), '(fgmask2, 200, 255, cv2.THRESH_BINARY)\n', (2315, 2353), False, 'import cv2\n'), ((2421, 2470), 'cv2.morphologyEx', 'cv2.morphologyEx', (['imBin', 'cv2.MORPH_OPEN', 'kernelOp'], {}), '(imBin, cv2.MORPH_OPEN, kernelOp)\n', (2437, 2470), False, 'import cv2\n'), ((2487, 2537), 'cv2.morphologyEx', 'cv2.morphologyEx', (['imBin2', 'cv2.MORPH_OPEN', 'kernelOp'], {}), '(imBin2, cv2.MORPH_OPEN, kernelOp)\n', (2503, 2537), False, 'import cv2\n'), ((2619, 2668), 'cv2.morphologyEx', 'cv2.morphologyEx', (['mask', 'cv2.MORPH_CLOSE', 'kernelCl'], {}), '(mask, cv2.MORPH_CLOSE, kernelCl)\n', (2635, 2668), False, 'import cv2\n'), ((2686, 2736), 'cv2.morphologyEx', 'cv2.morphologyEx', (['mask2', 'cv2.MORPH_CLOSE', 'kernelCl'], {}), '(mask2, cv2.MORPH_CLOSE, kernelCl)\n', (2702, 2736), False, 'import cv2\n'), ((3146, 3166), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (3161, 3166), False, 'import cv2\n'), ((6848, 6863), 'cv2.waitKey', 'cv2.waitKey', (['(30)'], {}), '(30)\n', (6859, 6863), False, 'import cv2\n'), ((3416, 3432), 'cv2.moments', 'cv2.moments', (['cnt'], {}), '(cnt)\n', (3427, 3432), False, 'import cv2\n'), ((3538, 3559), 'cv2.boundingRect', 'cv2.boundingRect', (['cnt'], {}), '(cnt)\n', (3554, 3559), False, 'import cv2\n'), ((5191, 5238), 'cv2.circle', 'cv2.circle', (['frame', '(cx, cy)', '(5)', '(0, 0, 255)', '(-1)'], {}), '(frame, (cx, cy), 5, (0, 0, 255), -1)\n', (5201, 5238), False, 'import cv2\n'), ((5257, 5317), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x, y)', '(x + w, y + h)', '(0, 255, 0)', '(2)'], {}), '(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)\n', (5270, 5317), False, 'import cv2\n'), ((4982, 5021), 'Person.MyPerson', 'Person.MyPerson', (['pid', 'cx', 'cy', 'max_p_age'], {}), '(pid, cx, cy, max_p_age)\n', (4997, 5021), False, 'import Person\n'), ((4149, 4168), 'time.strftime', 'time.strftime', (['"""%c"""'], {}), "('%c')\n", (4162, 4168), False, 'import time\n'), ((4361, 4380), 'time.strftime', 'time.strftime', (['"""%c"""'], {}), "('%c')\n", (4374, 4380), False, 'import time\n')]
|
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
# Contains general matrix utilities. Some, but not all, of these tools are specific to
# matrices over the ints modulo 2.
import numpy as _np
def dotmod2(m1, m2):
"""
Returns the product over the itegers modulo 2 of
two matrices.
"""
return _np.dot(m1, m2) % 2
def multidotmod2(mlist):
"""
Returns the product over the itegers modulo 2 of
a list of matrices.
"""
return _np.linalg.multi_dot(mlist) % 2
def detmod2(m):
"""
Returns the determinant of a matrix over the itegers
modulo 2 (GL(n,2)).
"""
return _np.round(_np.linalg.det(m)) % 2
# A utility function used by the random symplectic matrix sampler.
def matrix_directsum(m1, m2):
"""
Returns the direct sum of two square matrices of integers.
"""
n1 = len(m1[0, :])
n2 = len(m2[0, :])
output = _np.zeros((n1 + n2, n1 + n2), dtype='int8')
output[0:n1, 0:n1] = m1
output[n1:n1 + n2, n1:n1 + n2] = m2
return output
def inv_mod2(m):
"""
Finds the inverse of a matrix over GL(n,2)
"""
t = len(m)
c = _np.append(m, _np.eye(t), 1)
return _np.array(gaussian_elimination_mod2(c)[:, t:])
def Axb_mod2(A, b):
"""
Solves Ax = b over GF(2)
"""
b = _np.array([b]).T
C = _np.append(A, b, 1)
return _np.array([gaussian_elimination_mod2(C)[:, -1]]).T
def gaussian_elimination_mod2(A):
"""
Gaussian elimination mod2 of A.
"""
A = _np.array(A, dtype='int')
m, n = A.shape
i, j = 0, 0
while (i < m) and (j < n):
k = A[i:m, j].argmax() + i
A[_np.array([i, k]), :] = A[_np.array([k, i]), :]
aijn = _np.array([A[i, j:]])
col = _np.array([A[:, j]]).T
col[i] = 0
flip = _np.dot(col, aijn)
A[:, j:] = _np.bitwise_xor(A[:, j:], flip)
i += 1
j += 1
return A
def diagonal_as_vec(m):
"""
Returns a 1D array containing the diagonal of the input square 2D array m.
"""
l = _np.shape(m)[0]
vec = _np.zeros(l, int)
for i in range(0, l):
vec[i] = m[i, i]
return vec
def strictly_upper_triangle(m):
"""
Returns a matrix containing the strictly upper triangle of m and zeros elsewhere.
"""
l = _np.shape(m)[0]
out = m.copy()
for i in range(0, l):
for j in range(0, i + 1):
out[i, j] = 0
return out
def diagonal_as_matrix(m):
"""
Returns a diagonal matrix containing the diagonal of m.
"""
l = _np.shape(m)[0]
out = _np.zeros((l, l), int)
for i in range(0, l):
out[i, i] = m[i, i]
return out
# Code for factorizing a symmetric matrix invertable matrix A over GL(n,2) into
# the form A = F F.T. The algorithm mostly follows the proof in *Orthogonal Matrices
# Over Finite Fields* by <NAME> in The American Mathematical Monthly,
# Vol. 76, No. 2 (Feb., 1969), pp. 152-164
def albert_factor(D, failcount=0):
"""
Returns a matrix M such that D = M M.T for symmetric D, where D and M are
matrices over [0,1] mod 2. The algorithm mostly follows the proof in "Orthogonal Matrices
Over Finite Fields" by <NAME> in The American Mathematical Monthly, Vol. 76, No. 2
(Feb., 1969), pp. 152-164
There is generally not a unique albert factorization, and this algorthm is randomized. It will
general return a different factorizations from multiple calls.
"""
D = _np.array(D, dtype='int')
proper = False
while not proper:
N = onesify(D)
aa = multidotmod2([N, D, N.T])
P = proper_permutation(aa)
A = multidotmod2([P, aa, P.T])
proper = check_proper_permutation(A)
t = len(A)
# Start in lower right
L = _np.array([[1]])
for ind in range(t - 2, -1, -1):
block = A[ind:, ind:].copy()
z = block[0, 1:]
B = block[1:, 1:]
n = Axb_mod2(B, z).T
x = _np.array(_np.dot(n, L), dtype='int')
zer = _np.zeros([t - ind - 1, 1])
L = _np.array(_np.bmat([[_np.eye(1), x], [zer, L]]), dtype='int')
Qinv = inv_mod2(dotmod2(P, N))
L = dotmod2(_np.array(Qinv), L)
return L
def random_bitstring(n, p, failcount=0):
"""
Constructs a random bitstring of length n with parity p
"""
bitstring = _np.random.randint(0, 2, size=n)
if _np.mod(sum(bitstring), 2) == p:
return bitstring
elif failcount < 100:
return _np.array(random_bitstring(n, p, failcount + 1), dtype='int')
def random_invertable_matrix(n, failcount=0):
"""
Finds a random invertable matrix M over GL(n,2)
"""
M = _np.array([random_bitstring(n, _np.random.randint(0, 2)) for x in range(n)])
if detmod2(M) == 0:
if failcount < 100:
return random_invertable_matrix(n, failcount + 1)
else:
return M
def random_symmetric_invertable_matrix(n):
"""
Creates a random, symmetric, invertible matrix from GL(n,2)
"""
M = random_invertable_matrix(n)
return dotmod2(M, M.T)
def onesify(A, failcount=0, maxfailcount=100):
"""
Returns M such that M A M.T has ones along the main diagonal
"""
assert(failcount < maxfailcount), "The function has failed too many times! Perhaps the input is invalid."
# This is probably the slowest function since it just tries things
t = len(A)
count = 0
test_string = _np.diag(A)
M = []
while (len(M) < t) and (count < 40):
bitstr = random_bitstring(t, _np.random.randint(0, 2))
if dotmod2(bitstr, test_string) == 1:
if not _np.any([_np.array_equal(bitstr, m) for m in M]):
M += [bitstr]
else:
count += 1
if len(M) < t:
return onesify(A, failcount + 1)
M = _np.array(M, dtype='int')
if _np.array_equal(dotmod2(M, inv_mod2(M)), _np.identity(t, int)):
return _np.array(M)
else:
return onesify(A, failcount + 1, maxfailcount=maxfailcount)
def permute_top(A, i):
"""
Permutes the first row & col with the i'th row & col
"""
t = len(A)
P = _np.eye(t)
P[0, 0] = 0
P[i, i] = 0
P[0, i] = 1
P[i, 0] = 1
return multidotmod2([P, A, P]), P
def fix_top(A):
"""
Takes a symmetric binary matrix with ones along the diagonal
and returns the permutation matrix P such that the [1:t,1:t]
submatrix of P A P is invertible
"""
if A.shape == (1, 1):
return _np.eye(1, dtype='int')
t = len(A)
found_B = False
for ind in range(t):
aa, P = permute_top(A, ind)
B = _np.round_(aa[1:, 1:])
if detmod2(B) == 0:
continue
else:
found_B = True
break
# Todo : put a more meaningful fail message here #
assert(found_B), "Algorithm failed!"
return P
def proper_permutation(A):
"""
Takes a symmetric binary matrix with ones along the diagonal
and returns the permutation matrix P such that all [n:t,n:t]
submatrices of P A P are invertible.
"""
t = len(A)
Ps = [] # permutation matrices
for ind in range(t):
perm = fix_top(A[ind:, ind:])
zer = _np.zeros([ind, t - ind])
full_perm = _np.array(_np.bmat([[_np.eye(ind), zer], [zer.T, perm]]))
A = multidotmod2([full_perm, A, full_perm.T])
Ps += [full_perm]
# return Ps
return multidotmod2(list(reversed(Ps)))
#return _np.linalg.multi_dot(list(reversed(Ps))) # Should this not be multidot_mod2 ?
def check_proper_permutation(A):
"""
Check to see if the matrix has been properly permuted
This should be redundent to what is already built into
'fix_top'.
"""
t = len(A)
for ind in range(0, t):
b = A[ind:, ind:]
if detmod2(b) == 0:
return False
return True
|
[
"numpy.array_equal",
"numpy.eye",
"numpy.bitwise_xor",
"numpy.zeros",
"numpy.identity",
"numpy.shape",
"numpy.append",
"numpy.random.randint",
"numpy.array",
"numpy.round_",
"numpy.linalg.det",
"numpy.dot",
"numpy.diag",
"numpy.linalg.multi_dot"
] |
[((1528, 1571), 'numpy.zeros', '_np.zeros', (['(n1 + n2, n1 + n2)'], {'dtype': '"""int8"""'}), "((n1 + n2, n1 + n2), dtype='int8')\n", (1537, 1571), True, 'import numpy as _np\n'), ((1952, 1971), 'numpy.append', '_np.append', (['A', 'b', '(1)'], {}), '(A, b, 1)\n', (1962, 1971), True, 'import numpy as _np\n'), ((2132, 2157), 'numpy.array', '_np.array', (['A'], {'dtype': '"""int"""'}), "(A, dtype='int')\n", (2141, 2157), True, 'import numpy as _np\n'), ((2695, 2712), 'numpy.zeros', '_np.zeros', (['l', 'int'], {}), '(l, int)\n', (2704, 2712), True, 'import numpy as _np\n'), ((3202, 3224), 'numpy.zeros', '_np.zeros', (['(l, l)', 'int'], {}), '((l, l), int)\n', (3211, 3224), True, 'import numpy as _np\n'), ((4092, 4117), 'numpy.array', '_np.array', (['D'], {'dtype': '"""int"""'}), "(D, dtype='int')\n", (4101, 4117), True, 'import numpy as _np\n'), ((4393, 4409), 'numpy.array', '_np.array', (['[[1]]'], {}), '([[1]])\n', (4402, 4409), True, 'import numpy as _np\n'), ((4952, 4984), 'numpy.random.randint', '_np.random.randint', (['(0)', '(2)'], {'size': 'n'}), '(0, 2, size=n)\n', (4970, 4984), True, 'import numpy as _np\n'), ((6042, 6053), 'numpy.diag', '_np.diag', (['A'], {}), '(A)\n', (6050, 6053), True, 'import numpy as _np\n'), ((6430, 6455), 'numpy.array', '_np.array', (['M'], {'dtype': '"""int"""'}), "(M, dtype='int')\n", (6439, 6455), True, 'import numpy as _np\n'), ((6756, 6766), 'numpy.eye', '_np.eye', (['t'], {}), '(t)\n', (6763, 6766), True, 'import numpy as _np\n'), ((948, 963), 'numpy.dot', '_np.dot', (['m1', 'm2'], {}), '(m1, m2)\n', (955, 963), True, 'import numpy as _np\n'), ((1099, 1126), 'numpy.linalg.multi_dot', '_np.linalg.multi_dot', (['mlist'], {}), '(mlist)\n', (1119, 1126), True, 'import numpy as _np\n'), ((1778, 1788), 'numpy.eye', '_np.eye', (['t'], {}), '(t)\n', (1785, 1788), True, 'import numpy as _np\n'), ((1927, 1941), 'numpy.array', '_np.array', (['[b]'], {}), '([b])\n', (1936, 1941), True, 'import numpy as _np\n'), ((2333, 2354), 'numpy.array', '_np.array', (['[A[i, j:]]'], {}), '([A[i, j:]])\n', (2342, 2354), True, 'import numpy as _np\n'), ((2426, 2444), 'numpy.dot', '_np.dot', (['col', 'aijn'], {}), '(col, aijn)\n', (2433, 2444), True, 'import numpy as _np\n'), ((2464, 2495), 'numpy.bitwise_xor', '_np.bitwise_xor', (['A[:, j:]', 'flip'], {}), '(A[:, j:], flip)\n', (2479, 2495), True, 'import numpy as _np\n'), ((2669, 2681), 'numpy.shape', '_np.shape', (['m'], {}), '(m)\n', (2678, 2681), True, 'import numpy as _np\n'), ((2924, 2936), 'numpy.shape', '_np.shape', (['m'], {}), '(m)\n', (2933, 2936), True, 'import numpy as _np\n'), ((3176, 3188), 'numpy.shape', '_np.shape', (['m'], {}), '(m)\n', (3185, 3188), True, 'import numpy as _np\n'), ((4629, 4656), 'numpy.zeros', '_np.zeros', (['[t - ind - 1, 1]'], {}), '([t - ind - 1, 1])\n', (4638, 4656), True, 'import numpy as _np\n'), ((4783, 4798), 'numpy.array', '_np.array', (['Qinv'], {}), '(Qinv)\n', (4792, 4798), True, 'import numpy as _np\n'), ((6505, 6525), 'numpy.identity', '_np.identity', (['t', 'int'], {}), '(t, int)\n', (6517, 6525), True, 'import numpy as _np\n'), ((6543, 6555), 'numpy.array', '_np.array', (['M'], {}), '(M)\n', (6552, 6555), True, 'import numpy as _np\n'), ((7112, 7135), 'numpy.eye', '_np.eye', (['(1)'], {'dtype': '"""int"""'}), "(1, dtype='int')\n", (7119, 7135), True, 'import numpy as _np\n'), ((7246, 7268), 'numpy.round_', '_np.round_', (['aa[1:, 1:]'], {}), '(aa[1:, 1:])\n', (7256, 7268), True, 'import numpy as _np\n'), ((7834, 7859), 'numpy.zeros', '_np.zeros', (['[ind, t - ind]'], {}), '([ind, t - ind])\n', (7843, 7859), True, 'import numpy as _np\n'), ((1267, 1284), 'numpy.linalg.det', '_np.linalg.det', (['m'], {}), '(m)\n', (1281, 1284), True, 'import numpy as _np\n'), ((2369, 2389), 'numpy.array', '_np.array', (['[A[:, j]]'], {}), '([A[:, j]])\n', (2378, 2389), True, 'import numpy as _np\n'), ((4587, 4600), 'numpy.dot', '_np.dot', (['n', 'L'], {}), '(n, L)\n', (4594, 4600), True, 'import numpy as _np\n'), ((6144, 6168), 'numpy.random.randint', '_np.random.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (6162, 6168), True, 'import numpy as _np\n'), ((2270, 2287), 'numpy.array', '_np.array', (['[i, k]'], {}), '([i, k])\n', (2279, 2287), True, 'import numpy as _np\n'), ((2296, 2313), 'numpy.array', '_np.array', (['[k, i]'], {}), '([k, i])\n', (2305, 2313), True, 'import numpy as _np\n'), ((5308, 5332), 'numpy.random.randint', '_np.random.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (5326, 5332), True, 'import numpy as _np\n'), ((4690, 4700), 'numpy.eye', '_np.eye', (['(1)'], {}), '(1)\n', (4697, 4700), True, 'import numpy as _np\n'), ((6244, 6270), 'numpy.array_equal', '_np.array_equal', (['bitstr', 'm'], {}), '(bitstr, m)\n', (6259, 6270), True, 'import numpy as _np\n'), ((7901, 7913), 'numpy.eye', '_np.eye', (['ind'], {}), '(ind)\n', (7908, 7913), True, 'import numpy as _np\n')]
|
# Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# Implements weight equalization as per https://arxiv.org/abs/1906.04721
import logging
import math
from copy import copy
import numpy as np
from graph.types import (ActivationParameters, FilterParameters,
ConvFusionParameters)
from stats.ranges import Ranges
from stats.scales import Scales
LOG = logging.getLogger('nntool.'+__name__)
def process_node(node, last_neuron, group, groups, neurons):
if not node.can_equalize:
group = add_group(group, groups, neurons)
return True, None, group
if isinstance(node, FilterParameters):
last_neuron = add_neuron(node.name, node, last_neuron, neurons, group)
return True, last_neuron, group
if isinstance(node, ActivationParameters) and\
last_neuron is not None and node.activation == 'relu':
assert 'activation' not in last_neuron, "weird 2 activations after conv"
last_neuron['activation'] = node
return True, last_neuron, group
return False, last_neuron, group
def discover_groups(G):
groups = []
group = []
neurons = []
last_neuron = None
for step in G.graph_state.steps:
node = step['node']
# nodes cannot have multiple outputs
if len(G.successors(node.name)) != 1 or len(G.successors(node.name)[0]) != 1:
last_neuron = None
group = add_group(group, groups, neurons)
continue
should_continue, last_neuron, group = process_node(node, last_neuron, group,
groups, neurons)
if should_continue:
continue
if isinstance(node, ConvFusionParameters):
for fnode in node.contained_nodes():
_, last_neuron, group = process_node(fnode, last_neuron, group,
groups, neurons)
if group:
add_group(group, groups, neurons)
return groups, neurons
def add_group(group, groups, neurons):
if group:
LOG.info("Adding group with %d neuron pairs", len(group))
groups.append(group)
neurons.append(group[-1][1])
group = []
return group
def add_neuron(node_name, node, last_neuron, neurons, group):
new_neuron = {'name': node_name, 'node': node,
'weights': None, 'biases': None}
if last_neuron is not None:
neurons.append(last_neuron)
LOG.info("Discovered neuron pair %s -> %s", last_neuron['name'], new_neuron['name'])
group.append((last_neuron, new_neuron))
last_neuron = new_neuron
return last_neuron
def calculate_s(range_1, range_2):
assert len(range_1) == len(range_2)
# note: the paper is wrong. It should be 1/range2 not 1/range1
return [(1/range_2[i]) * math.sqrt(range_1[i] * range_2[i]) for i in range(len(range_1))]
class QuantizationError(Exception):
pass
def calculate_precisions(step):
nn_0 = step[0]
nn_1 = step[1]
ranges_0, max_0 = Ranges.range_output(nn_0['node'], weights=nn_0['weights'])
ranges_1, max_1 = Ranges.range_input(nn_1['node'], weights=nn_1['weights'])
prec_0 = ranges_0/max_0
prec_1 = ranges_1/max_1
return prec_0, prec_1
def process_group(group, threshold):
total_precision = 0
cycles = 0
# Keep going until we converge
while True:
precisions = []
cycles += 1
if cycles > 50:
raise QuantizationError("Weight scaling has failed to converge")
for step in group:
prec_0, prec_1 = calculate_precisions(step)
precisions.append(np.sum(prec_0 * prec_1))
new_total_precision = sum(precisions)
# end when the precision change drops below threshold
if abs(new_total_precision - total_precision) < threshold:
LOG.info("group has converged under %f after %d cycles", threshold, cycles)
break
total_precision = new_total_precision
# note: traversing in reverse order. Not sure that it makes any difference.
for step in reversed(group):
nn_0 = step[0]
nn_1 = step[1]
# get the ranges of the output channels of layer 0 and input channels of layer 2
ranges_0, _ = Ranges.range_output(nn_0['node'], weights=nn_0['weights'])
ranges_1, _ = Ranges.range_input(nn_1['node'], weights=nn_1['weights'])
scale = calculate_s(ranges_0, ranges_1)
# now apply the scale to the output and input channels
nn_0['weights'], nn_0['biases'] =\
Scales.scale_output(nn_0['node'], scale, nn_0['weights'], nn_0['biases'])
nn_1['weights'] = Scales.scale_input(nn_1['node'], scale, nn_1['weights'])
def process_groups(groups, threshold=0.01):
for group in groups:
LOG.info("processing group")
process_group(group, float(threshold))
def update_parameters(neurons):
for neuron in neurons:
params = neuron['node']
params.weights = neuron['weights']
if neuron['biases'] is not None:
params.biases = neuron['biases']
def weight_equalization(G, threshold=0.01):
LOG.info("discovering groups")
groups, neurons = discover_groups(G)
if groups and neurons:
LOG.info("found %d groups and %d neurons", len(groups), len(neurons))
process_groups(groups, threshold)
update_parameters(neurons)
G.graph_identity.set_equalized(threshold)
else:
LOG.warning("no groups to equalize found")
def adjust_biases(G, stats):
for nid, stat in stats.items():
node = nid.get_node(G)
if isinstance(node, FilterParameters):
chan_err = np.array(stat['chan_err'], dtype=np.float32)
if node.has_bias:
node.biases = node.biases - chan_err
else:
node.has_bias = True
node.biases = chan_err * -1
# TODO - set quantization of biases
|
[
"stats.ranges.Ranges.range_output",
"numpy.sum",
"math.sqrt",
"stats.scales.Scales.scale_output",
"numpy.array",
"stats.scales.Scales.scale_input",
"stats.ranges.Ranges.range_input",
"logging.getLogger"
] |
[((1034, 1073), 'logging.getLogger', 'logging.getLogger', (["('nntool.' + __name__)"], {}), "('nntool.' + __name__)\n", (1051, 1073), False, 'import logging\n'), ((3704, 3762), 'stats.ranges.Ranges.range_output', 'Ranges.range_output', (["nn_0['node']"], {'weights': "nn_0['weights']"}), "(nn_0['node'], weights=nn_0['weights'])\n", (3723, 3762), False, 'from stats.ranges import Ranges\n'), ((3785, 3842), 'stats.ranges.Ranges.range_input', 'Ranges.range_input', (["nn_1['node']"], {'weights': "nn_1['weights']"}), "(nn_1['node'], weights=nn_1['weights'])\n", (3803, 3842), False, 'from stats.ranges import Ranges\n'), ((3498, 3532), 'math.sqrt', 'math.sqrt', (['(range_1[i] * range_2[i])'], {}), '(range_1[i] * range_2[i])\n', (3507, 3532), False, 'import math\n'), ((4960, 5018), 'stats.ranges.Ranges.range_output', 'Ranges.range_output', (["nn_0['node']"], {'weights': "nn_0['weights']"}), "(nn_0['node'], weights=nn_0['weights'])\n", (4979, 5018), False, 'from stats.ranges import Ranges\n'), ((5045, 5102), 'stats.ranges.Ranges.range_input', 'Ranges.range_input', (["nn_1['node']"], {'weights': "nn_1['weights']"}), "(nn_1['node'], weights=nn_1['weights'])\n", (5063, 5102), False, 'from stats.ranges import Ranges\n'), ((5285, 5358), 'stats.scales.Scales.scale_output', 'Scales.scale_output', (["nn_0['node']", 'scale', "nn_0['weights']", "nn_0['biases']"], {}), "(nn_0['node'], scale, nn_0['weights'], nn_0['biases'])\n", (5304, 5358), False, 'from stats.scales import Scales\n'), ((5389, 5445), 'stats.scales.Scales.scale_input', 'Scales.scale_input', (["nn_1['node']", 'scale', "nn_1['weights']"], {}), "(nn_1['node'], scale, nn_1['weights'])\n", (5407, 5445), False, 'from stats.scales import Scales\n'), ((6405, 6449), 'numpy.array', 'np.array', (["stat['chan_err']"], {'dtype': 'np.float32'}), "(stat['chan_err'], dtype=np.float32)\n", (6413, 6449), True, 'import numpy as np\n'), ((4312, 4335), 'numpy.sum', 'np.sum', (['(prec_0 * prec_1)'], {}), '(prec_0 * prec_1)\n', (4318, 4335), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
An extension of the pystruct OneSlackSSVM module to have a fit_with_valid
method on it
Copyright Xerox(C) 2016 <NAME>
Developed for the EU project READ. The READ project has received funding
from the European Union�s Horizon 2020 research and innovation programme
under grant agreement No 674943.
"""
from time import time
import numpy as np
import cvxopt.solvers
from pystruct.learners import OneSlackSSVM as Pystruct_OneSlackSSVM
from pystruct.learners.one_slack_ssvm import NoConstraint
class OneSlackSSVM(Pystruct_OneSlackSSVM):
"""
Same as its parent with an additional method: fit_with_valid
"""
def __init__(self, model, max_iter=10000, C=1.0, check_constraints=False,
verbose=0, negativity_constraint=None, n_jobs=1,
break_on_bad=False, show_loss_every=0, tol=1e-3,
inference_cache=0, inactive_threshold=1e-5,
inactive_window=50, logger=None, cache_tol='auto',
switch_to=None):
Pystruct_OneSlackSSVM.__init__(self, model, max_iter=max_iter, C=C, check_constraints=check_constraints,
verbose=verbose, negativity_constraint=negativity_constraint, n_jobs=n_jobs,
break_on_bad=break_on_bad, show_loss_every=show_loss_every, tol=tol,
inference_cache=inference_cache, inactive_threshold=inactive_threshold,
inactive_window=inactive_window, logger=logger, cache_tol=cache_tol,
switch_to=switch_to)
def fit_with_valid(self, X, Y, lX_vld, lY_vld, constraints=None
, warm_start=False, initialize=True
, valid_every=50):
"""Learn parameters using cutting plane method.
Parameters
----------
X : iterable
Traing instances. Contains the structured input objects.
No requirement on the particular form of entries of X is made.
Y : iterable
Training labels. Contains the strctured labels for inputs in X.
Needs to have the same length as X.
lX_vld, lY_vld : iterable X and Y validation set
contraints : ignored
warm_start : bool, default=False
Whether we are warmstarting from a previous fit.
initialize : boolean, default=True
Whether to initialize the model for the data.
Leave this true except if you really know what you are doing.
valid_every : integer. Periodic check with validation set to get best model
"""
best_iteration = -1
try:
self._fit_valid_best_score
print("score of best model: %.6f"%self._fit_valid_best_score)
except:
self._fit_valid_best_score = -99999
if self.verbose:
print("Training 1-slack dual structural SVM")
cvxopt.solvers.options['show_progress'] = self.verbose > 3
if initialize:
self.model.initialize(X, Y)
# parse cache_tol parameter
if self.cache_tol is None or self.cache_tol == 'auto':
self.cache_tol_ = self.tol
else:
self.cache_tol_ = self.cache_tol
if not warm_start:
self.w = np.zeros(self.model.size_joint_feature)
constraints = []
self.objective_curve_, self.primal_objective_curve_ = [], []
self.cached_constraint_ = []
self.alphas = [] # dual solutions
# append constraint given by ground truth to make our life easier
constraints.append((np.zeros(self.model.size_joint_feature), 0))
self.alphas.append([self.C])
self.inference_cache_ = None
self.timestamps_ = [time()]
elif warm_start == "soft":
self.w = np.zeros(self.model.size_joint_feature)
constraints = []
self.alphas = [] # dual solutions
# append constraint given by ground truth to make our life easier
constraints.append((np.zeros(self.model.size_joint_feature), 0))
self.alphas.append([self.C])
else:
constraints = self.constraints_
self.last_slack_ = -1
# get the joint_feature of the ground truth
if getattr(self.model, 'rescale_C', False):
joint_feature_gt = self.model.batch_joint_feature(X, Y, Y)
else:
joint_feature_gt = self.model.batch_joint_feature(X, Y)
try:
# catch ctrl+c to stop training
for iteration in range(self.max_iter):
# main loop
cached_constraint = False
if self.verbose > 0:
print("----- %d -----"%iteration)
if self.verbose > 2:
print(self)
try:
Y_hat, djoint_feature, loss_mean = self._constraint_from_cache(
X, Y, joint_feature_gt, constraints)
cached_constraint = True
except NoConstraint:
try:
Y_hat, djoint_feature, loss_mean = self._find_new_constraint(
X, Y, joint_feature_gt, constraints)
self._update_cache(X, Y, Y_hat)
except NoConstraint:
if self.verbose:
print("no additional constraints")
if (self.switch_to is not None
and self.model.inference_method !=
self.switch_to):
if self.verbose:
print(("Switching to %s inference" %
str(self.switch_to)))
self.model.inference_method_ = \
self.model.inference_method
self.model.inference_method = self.switch_to
continue
else:
break
self.timestamps_.append(time() - self.timestamps_[0])
self._compute_training_loss(X, Y, iteration)
constraints.append((djoint_feature, loss_mean))
# compute primal objective
last_slack = -np.dot(self.w, djoint_feature) + loss_mean
primal_objective = (self.C * len(X)
* max(last_slack, 0)
+ np.sum(self.w ** 2) / 2)
self.primal_objective_curve_.append(primal_objective)
self.cached_constraint_.append(cached_constraint)
objective = self._solve_1_slack_qp(constraints,
n_samples=len(X))
# update cache tolerance if cache_tol is auto:
if self.cache_tol == "auto" and not cached_constraint:
self.cache_tol_ = (primal_objective - objective) / 4
self.last_slack_ = np.max([(-np.dot(self.w, djoint_feature) + loss_mean)
for djoint_feature, loss_mean in constraints])
self.last_slack_ = max(self.last_slack_, 0)
if self.verbose > 0:
# the cutting plane objective can also be computed as
# self.C * len(X) * self.last_slack_ + np.sum(self.w**2)/2
print(("cutting plane objective: %f, primal objective %f"
% (objective, primal_objective)))
# we only do this here because we didn't add the gt to the
# constraints, which makes the dual behave a bit oddly
self.objective_curve_.append(objective)
self.constraints_ = constraints
if self.logger is not None:
if iteration % valid_every == 0:
cur_score = self.score(lX_vld, lY_vld)
#print(self._fit_valid_best_score, cur_score)
if cur_score > self._fit_valid_best_score:
best_iteration = iteration
self._fit_valid_best_score = cur_score
self.logger(self, 'final')
if self.verbose > 0: print("Current model is best with validation score=%.6f" % self._fit_valid_best_score)
else:
# we save the last model, even if it is not the best, in case of warm start
self.logger.save(self, self.logger.file_name + "._last_")
print("Current validation score=%.6f (best=%.6f at iteration %d)" % (cur_score, self._fit_valid_best_score, best_iteration))
if self.verbose > 5:
print((self.w))
except KeyboardInterrupt:
pass
if self.verbose and self.n_jobs == 1:
print(("calls to inference: %d" % self.model.inference_calls))
# compute final objective:
self.timestamps_.append(time() - self.timestamps_[0])
primal_objective = self._objective(X, Y)
self.primal_objective_curve_.append(primal_objective)
self.objective_curve_.append(objective)
self.cached_constraint_.append(False)
if self.logger is not None:
cur_score = self.score(lX_vld, lY_vld)
# print("finished ", self._fit_valid_best_score, cur_score)
if cur_score > self._fit_valid_best_score:
self._fit_valid_best_score = cur_score
best_iteration = iteration
self.logger(self, 'final')
if self.verbose > 0: print("Best model saved at iteration %d: validation score=%.6f" % (best_iteration, self._fit_valid_best_score))
if self.verbose > 0:
print(("final primal objective: %f gap: %f (validation score: %.6f)"
% (primal_objective, primal_objective - objective, cur_score)))
return self
|
[
"numpy.sum",
"numpy.zeros",
"time.time",
"pystruct.learners.OneSlackSSVM.__init__",
"numpy.dot"
] |
[((1079, 1515), 'pystruct.learners.OneSlackSSVM.__init__', 'Pystruct_OneSlackSSVM.__init__', (['self', 'model'], {'max_iter': 'max_iter', 'C': 'C', 'check_constraints': 'check_constraints', 'verbose': 'verbose', 'negativity_constraint': 'negativity_constraint', 'n_jobs': 'n_jobs', 'break_on_bad': 'break_on_bad', 'show_loss_every': 'show_loss_every', 'tol': 'tol', 'inference_cache': 'inference_cache', 'inactive_threshold': 'inactive_threshold', 'inactive_window': 'inactive_window', 'logger': 'logger', 'cache_tol': 'cache_tol', 'switch_to': 'switch_to'}), '(self, model, max_iter=max_iter, C=C,\n check_constraints=check_constraints, verbose=verbose,\n negativity_constraint=negativity_constraint, n_jobs=n_jobs,\n break_on_bad=break_on_bad, show_loss_every=show_loss_every, tol=tol,\n inference_cache=inference_cache, inactive_threshold=inactive_threshold,\n inactive_window=inactive_window, logger=logger, cache_tol=cache_tol,\n switch_to=switch_to)\n', (1109, 1515), True, 'from pystruct.learners import OneSlackSSVM as Pystruct_OneSlackSSVM\n'), ((3373, 3412), 'numpy.zeros', 'np.zeros', (['self.model.size_joint_feature'], {}), '(self.model.size_joint_feature)\n', (3381, 3412), True, 'import numpy as np\n'), ((3872, 3878), 'time.time', 'time', ([], {}), '()\n', (3876, 3878), False, 'from time import time\n'), ((3936, 3975), 'numpy.zeros', 'np.zeros', (['self.model.size_joint_feature'], {}), '(self.model.size_joint_feature)\n', (3944, 3975), True, 'import numpy as np\n'), ((9334, 9340), 'time.time', 'time', ([], {}), '()\n', (9338, 9340), False, 'from time import time\n'), ((3713, 3752), 'numpy.zeros', 'np.zeros', (['self.model.size_joint_feature'], {}), '(self.model.size_joint_feature)\n', (3721, 3752), True, 'import numpy as np\n'), ((4162, 4201), 'numpy.zeros', 'np.zeros', (['self.model.size_joint_feature'], {}), '(self.model.size_joint_feature)\n', (4170, 4201), True, 'import numpy as np\n'), ((6241, 6247), 'time.time', 'time', ([], {}), '()\n', (6245, 6247), False, 'from time import time\n'), ((6470, 6500), 'numpy.dot', 'np.dot', (['self.w', 'djoint_feature'], {}), '(self.w, djoint_feature)\n', (6476, 6500), True, 'import numpy as np\n'), ((6660, 6679), 'numpy.sum', 'np.sum', (['(self.w ** 2)'], {}), '(self.w ** 2)\n', (6666, 6679), True, 'import numpy as np\n'), ((7209, 7239), 'numpy.dot', 'np.dot', (['self.w', 'djoint_feature'], {}), '(self.w, djoint_feature)\n', (7215, 7239), True, 'import numpy as np\n')]
|
__copyright__ = "Copyright (C) 2020 <NAME>"
__license__ = """
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import numpy as np
import sys
import mlir.run as mlirrun
import pytest
from pytools.prefork import ExecError
def is_mlir_opt_present():
try:
mlirrun.get_mlir_opt_version()
return True
except ExecError:
return False
@pytest.mark.skipif(not is_mlir_opt_present(), reason="mlir-opt not found")
def test_add():
source = """
#identity = affine_map<(i,j) -> (i,j)>
#attrs = {
indexing_maps = [#identity, #identity, #identity],
iterator_types = ["parallel", "parallel"]
}
func @example(%A: memref<?x?xf64>, %B: memref<?x?xf64>, %C: memref<?x?xf64>) {
linalg.generic #attrs ins(%A, %B: memref<?x?xf64>, memref<?x?xf64>) outs(%C: memref<?x?xf64>) {
^bb0(%a: f64, %b: f64, %c: f64):
%d = addf %a, %b : f64
linalg.yield %d : f64
}
return
}"""
source = mlirrun.mlir_opt(source, ["-convert-linalg-to-loops",
"-convert-scf-to-std"])
a = np.random.rand(10, 10)
b = np.random.rand(10, 10)
c = np.empty_like(a)
mlirrun.call_function(source, "example", [a, b, c])
np.testing.assert_allclose(c, a+b)
@pytest.mark.skipif(not is_mlir_opt_present(), reason="mlir-opt not found")
def test_axpy():
source = """
func @saxpy(%a : f32, %x : memref<?xf32>, %y : memref<?xf32>) {
%c0 = constant 0: index
%n = dim %x, %c0 : memref<?xf32>
affine.for %i = 0 to %n {
%xi = affine.load %x[%i] : memref<?xf32>
%axi = mulf %a, %xi : f32
%yi = affine.load %y[%i] : memref<?xf32>
%axpyi = addf %yi, %axi : f32
affine.store %axpyi, %y[%i] : memref<?xf32>
}
return
}"""
source = mlirrun.mlir_opt(source, ["-lower-affine",
"-convert-scf-to-std"])
alpha = np.float32(np.random.rand())
x_in = np.random.rand(10).astype(np.float32)
y_in = np.random.rand(10).astype(np.float32)
y_out = y_in.copy()
mlirrun.call_function(source, "saxpy", [alpha, x_in, y_out])
np.testing.assert_allclose(y_out, alpha*x_in+y_in)
if __name__ == "__main__":
if len(sys.argv) > 1:
exec(sys.argv[1])
else:
from pytest import main
main([__file__])
|
[
"mlir.run.call_function",
"numpy.empty_like",
"mlir.run.mlir_opt",
"pytest.main",
"mlir.run.get_mlir_opt_version",
"numpy.random.rand",
"numpy.testing.assert_allclose"
] |
[((2362, 2439), 'mlir.run.mlir_opt', 'mlirrun.mlir_opt', (['source', "['-convert-linalg-to-loops', '-convert-scf-to-std']"], {}), "(source, ['-convert-linalg-to-loops', '-convert-scf-to-std'])\n", (2378, 2439), True, 'import mlir.run as mlirrun\n'), ((2487, 2509), 'numpy.random.rand', 'np.random.rand', (['(10)', '(10)'], {}), '(10, 10)\n', (2501, 2509), True, 'import numpy as np\n'), ((2518, 2540), 'numpy.random.rand', 'np.random.rand', (['(10)', '(10)'], {}), '(10, 10)\n', (2532, 2540), True, 'import numpy as np\n'), ((2549, 2565), 'numpy.empty_like', 'np.empty_like', (['a'], {}), '(a)\n', (2562, 2565), True, 'import numpy as np\n'), ((2571, 2622), 'mlir.run.call_function', 'mlirrun.call_function', (['source', '"""example"""', '[a, b, c]'], {}), "(source, 'example', [a, b, c])\n", (2592, 2622), True, 'import mlir.run as mlirrun\n'), ((2628, 2664), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['c', '(a + b)'], {}), '(c, a + b)\n', (2654, 2664), True, 'import numpy as np\n'), ((3164, 3230), 'mlir.run.mlir_opt', 'mlirrun.mlir_opt', (['source', "['-lower-affine', '-convert-scf-to-std']"], {}), "(source, ['-lower-affine', '-convert-scf-to-std'])\n", (3180, 3230), True, 'import mlir.run as mlirrun\n'), ((3438, 3498), 'mlir.run.call_function', 'mlirrun.call_function', (['source', '"""saxpy"""', '[alpha, x_in, y_out]'], {}), "(source, 'saxpy', [alpha, x_in, y_out])\n", (3459, 3498), True, 'import mlir.run as mlirrun\n'), ((3504, 3558), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['y_out', '(alpha * x_in + y_in)'], {}), '(y_out, alpha * x_in + y_in)\n', (3530, 3558), True, 'import numpy as np\n'), ((1659, 1689), 'mlir.run.get_mlir_opt_version', 'mlirrun.get_mlir_opt_version', ([], {}), '()\n', (1687, 1689), True, 'import mlir.run as mlirrun\n'), ((3293, 3309), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3307, 3309), True, 'import numpy as np\n'), ((3686, 3702), 'pytest.main', 'main', (['[__file__]'], {}), '([__file__])\n', (3690, 3702), False, 'from pytest import main\n'), ((3322, 3340), 'numpy.random.rand', 'np.random.rand', (['(10)'], {}), '(10)\n', (3336, 3340), True, 'import numpy as np\n'), ((3371, 3389), 'numpy.random.rand', 'np.random.rand', (['(10)'], {}), '(10)\n', (3385, 3389), True, 'import numpy as np\n')]
|
import random
import numpy as np
import scipy.stats as sps
import torch
import torch.utils.data as tud
import torch.nn.utils as tnnu
import models.dataset as md
import utils.tensorboard as utb
import utils.scaffold as usc
class Action:
def __init__(self, logger=None):
"""
(Abstract) Initializes an action.
:param logger: An optional logger instance.
"""
self.logger = logger
def _log(self, level, msg, *args):
"""
Logs a message with the class logger.
:param level: Log level.
:param msg: Message to log.
:param *args: The arguments to escape.
:return:
"""
if self.logger:
getattr(self.logger, level)(msg, *args)
class TrainModelPostEpochHook(Action):
def __init__(self, logger=None):
"""
Initializes a training hook that runs after every epoch.
This hook enables to save the model, change LR, etc. during training.
:return:
"""
Action.__init__(self, logger)
def run(self, model, training_set, epoch): # pylint: disable=unused-argument
"""
Performs the post-epoch hook. Notice that model should be modified in-place.
:param model: Model instance trained up to that epoch.
:param training_set: List of SMILES used as the training set.
:param epoch: Epoch number (for logging purposes).
:return: Boolean that indicates whether the training should continue or not.
"""
return True # simply does nothing...
class TrainModel(Action):
def __init__(self, model, optimizer, training_sets, batch_size, clip_gradient,
epochs, post_epoch_hook=None, logger=None):
"""
Initializes the training of an epoch.
: param model: A model instance, not loaded in sampling mode.
: param optimizer: The optimizer instance already initialized on the model.
: param training_sets: An iterator with all the training sets (scaffold, decoration) pairs.
: param batch_size: Batch size to use.
: param clip_gradient: Clip the gradients after each backpropagation.
: return:
"""
Action.__init__(self, logger)
self.model = model
self.optimizer = optimizer
self.training_sets = training_sets
self.batch_size = batch_size
self.epochs = epochs
self.clip_gradient = clip_gradient
if not post_epoch_hook:
self.post_epoch_hook = TrainModelPostEpochHook(logger=self.logger)
else:
self.post_epoch_hook = post_epoch_hook
def run(self):
"""
Performs a training epoch with the parameters used in the constructor.
:return: An iterator of (total_batches, epoch_iterator), where the epoch iterator
returns the loss function at each batch in the epoch.
"""
for epoch, training_set in zip(range(1, self.epochs + 1), self.training_sets):
dataloader = self._initialize_dataloader(training_set)
epoch_iterator = self._epoch_iterator(dataloader)
yield len(dataloader), epoch_iterator
self.model.set_mode("eval")
post_epoch_status = self.post_epoch_hook.run(self.model, training_set, epoch)
self.model.set_mode("train")
if not post_epoch_status:
break
def _epoch_iterator(self, dataloader):
for scaffold_batch, decorator_batch in dataloader:
loss = self.model.likelihood(*scaffold_batch, *decorator_batch).mean()
self.optimizer.zero_grad()
loss.backward()
if self.clip_gradient > 0:
tnnu.clip_grad_norm_(self.model.network.parameters(), self.clip_gradient)
self.optimizer.step()
yield loss
def _initialize_dataloader(self, training_set):
dataset = md.DecoratorDataset(training_set, vocabulary=self.model.vocabulary)
return tud.DataLoader(dataset, batch_size=self.batch_size, shuffle=True,
collate_fn=md.DecoratorDataset.collate_fn, drop_last=True)
class CollectStatsFromModel(Action):
"""Collects stats from an existing RNN model."""
def __init__(self, model, epoch, training_set, validation_set, writer, sample_size,
decoration_type="single", with_weights=False, other_values=None, logger=None):
"""
Creates an instance of CollectStatsFromModel.
: param model: A model instance initialized as sampling_mode.
: param epoch: Epoch number to be sampled(informative purposes).
: param training_set: Iterator with the training set.
: param validation_set: Iterator with the validation set.
: param writer: Writer object(Tensorboard writer).
: param other_values: Other values to save for the epoch.
: param sample_size: Number of molecules to sample from the training / validation / sample set.
: param decoration_type: Kind of decorations (single or all).
: param with_weights: To calculate or not the weights.
: return:
"""
Action.__init__(self, logger)
self.model = model
self.epoch = epoch
self.sample_size = sample_size
self.training_set = training_set
self.validation_set = validation_set
self.writer = writer
self.other_values = other_values
self.decoration_type = decoration_type
self.with_weights = with_weights
self.sample_size = max(sample_size, 1)
self.data = {}
self._calc_nlls_action = CalculateNLLsFromModel(self.model, 128, self.logger)
self._sample_model_action = SampleModel(self.model, 128, self.logger)
@torch.no_grad()
def run(self):
"""
Collects stats for a specific model object, epoch, validation set, training set and writer object.
: return: A dictionary with all the data saved for that given epoch.
"""
self._log("info", "Collecting data for epoch %s", self.epoch)
self.data = {}
self._log("debug", "Slicing training and validation sets")
sliced_training_set = list(random.sample(self.training_set, self.sample_size))
sliced_validation_set = list(random.sample(self.validation_set, self.sample_size))
self._log("debug", "Sampling decorations for both sets")
sampled_training_mols, sampled_training_nlls = self._sample_decorations(next(zip(*sliced_training_set)))
sampled_validation_mols, sampled_validation_nlls = self._sample_decorations(next(zip(*sliced_validation_set)))
self._log("debug", "Calculating NLLs for the validation and training sets")
training_nlls = np.array(list(self._calc_nlls_action.run(sliced_training_set)))
validation_nlls = np.array(list(self._calc_nlls_action.run(sliced_validation_set)))
if self.with_weights:
self._log("debug", "Calculating weight stats")
self._weight_stats()
self._log("debug", "Calculating nll stats")
self._nll_stats(sampled_training_nlls, sampled_validation_nlls, training_nlls, validation_nlls)
self._log("debug", "Calculating validity stats")
self._valid_stats(sampled_training_mols, "training")
self._valid_stats(sampled_validation_mols, "validation")
self._log("debug", "Drawing some molecules")
self._draw_mols(sampled_training_mols, "training")
self._draw_mols(sampled_validation_mols, "validation")
if self.other_values:
self._log("debug", "Adding other values")
for name, val in self.other_values.items():
self._add_scalar(name, val)
return self.data
def _sample_decorations(self, scaffold_list):
mols = []
nlls = []
for scaff, decoration, nll in self._sample_model_action.run(scaffold_list):
if self.decoration_type == "single":
mol = usc.join_first_attachment(scaff, decoration)
elif self.decoration_type == "all":
mol = usc.join_joined_attachments(scaff, decoration)
if mol:
mols.append(mol)
nlls.append(nll)
return (mols, np.array(nlls))
def _valid_stats(self, mols, name):
self._add_scalar("valid_{}".format(name), 100.0*len(mols)/self.sample_size)
def _weight_stats(self):
for name, weights in self.model.network.named_parameters():
self._add_histogram("weights/{}".format(name), weights.clone().cpu().data.numpy())
def _nll_stats(self, sampled_training_nlls, sampled_validation_nlls, training_nlls, validation_nlls):
self._add_histogram("nll_plot/sampled_training", sampled_training_nlls)
self._add_histogram("nll_plot/sampled_validation", sampled_validation_nlls)
self._add_histogram("nll_plot/validation", validation_nlls)
self._add_histogram("nll_plot/training", training_nlls)
self._add_scalars("nll/avg", {
"sampled_training": sampled_training_nlls.mean(),
"sampled_validation": sampled_validation_nlls.mean(),
"validation": validation_nlls.mean(),
"training": training_nlls.mean()
})
self._add_scalars("nll/var", {
"sampled_training": sampled_training_nlls.var(),
"sampled_validation": sampled_validation_nlls.var(),
"validation": validation_nlls.var(),
"training": training_nlls.var()
})
def bin_dist(dist, bins=1000, dist_range=(0, 100)):
bins = np.histogram(dist, bins=bins, range=dist_range, density=False)[0]
bins[bins == 0] = 1
return bins / bins.sum()
def jsd(dists, binned=False): # notice that the dists can or cannot be binned
# get the min size of each dist
min_size = min(len(dist) for dist in dists)
dists = [dist[:min_size] for dist in dists]
if binned:
dists = [bin_dist(dist) for dist in dists]
num_dists = len(dists)
avg_dist = np.sum(dists, axis=0) / num_dists
return np.sum([sps.entropy(dist, avg_dist) for dist in dists]) / num_dists
self._add_scalar("nll_plot/jsd_joined_bins",
jsd([sampled_training_nlls, sampled_validation_nlls,
training_nlls, validation_nlls], binned=True))
self._add_scalar("nll_plot/jsd_joined_no_bins",
jsd([sampled_training_nlls, sampled_validation_nlls,
training_nlls, validation_nlls]))
def _draw_mols(self, mols, name):
try:
utb.add_mols(self.writer, "molecules_{}".format(name), random.sample(
mols, 16), mols_per_row=4, global_step=self.epoch)
except ValueError:
pass
def _add_scalar(self, key, val):
self.data[key] = val
self.writer.add_scalar(key, val, self.epoch)
def _add_scalars(self, key, dict_vals):
for k, val in dict_vals.items():
self.data["{}.{}".format(key, k)] = val
self.writer.add_scalars(key, dict_vals, self.epoch)
def _add_histogram(self, key, vals):
self.data[key] = vals
self.writer.add_histogram(key, vals, self.epoch)
class SampleModel(Action):
def __init__(self, model, batch_size, logger=None):
"""
Creates an instance of SampleModel.
:params model: A model instance (better in sampling mode).
:params batch_size: Batch size to use.
:return:
"""
Action.__init__(self, logger)
self.model = model
self.batch_size = batch_size
def run(self, scaffold_list):
"""
Samples the model for the given number of SMILES.
:params scaffold_list: A list of scaffold SMILES.
:return: An iterator with each of the batches sampled in (scaffold, decoration, nll) triplets.
"""
dataset = md.Dataset(scaffold_list, self.model.vocabulary.scaffold_vocabulary,
self.model.vocabulary.scaffold_tokenizer)
dataloader = tud.DataLoader(dataset, batch_size=self.batch_size,
shuffle=False, collate_fn=md.Dataset.collate_fn)
for batch in dataloader:
for scaff, dec, nll in self.model.sample_decorations(*batch):
yield scaff, dec, nll
class CalculateNLLsFromModel(Action):
def __init__(self, model, batch_size, logger=None):
"""
Creates an instance of CalculateNLLsFromModel.
:param model: A model instance.
:param batch_size: Batch size to use.
:return:
"""
Action.__init__(self, logger)
self.model = model
self.batch_size = batch_size
def run(self, scaffold_decoration_list):
"""
Calculates the NLL for a set of SMILES strings.
:param scaffold_decoration_list: List with pairs of (scaffold, decoration) SMILES.
:return: An iterator with each NLLs in the same order as the list.
"""
dataset = md.DecoratorDataset(scaffold_decoration_list, self.model.vocabulary)
dataloader = tud.DataLoader(dataset, batch_size=self.batch_size, collate_fn=md.DecoratorDataset.collate_fn,
shuffle=False)
for scaffold_batch, decorator_batch in dataloader:
for nll in self.model.likelihood(*scaffold_batch, *decorator_batch).data.cpu().numpy():
yield nll
|
[
"numpy.sum",
"torch.utils.data.DataLoader",
"models.dataset.Dataset",
"random.sample",
"utils.scaffold.join_joined_attachments",
"scipy.stats.entropy",
"numpy.histogram",
"models.dataset.DecoratorDataset",
"numpy.array",
"utils.scaffold.join_first_attachment",
"torch.no_grad"
] |
[((5789, 5804), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5802, 5804), False, 'import torch\n'), ((3927, 3994), 'models.dataset.DecoratorDataset', 'md.DecoratorDataset', (['training_set'], {'vocabulary': 'self.model.vocabulary'}), '(training_set, vocabulary=self.model.vocabulary)\n', (3946, 3994), True, 'import models.dataset as md\n'), ((4010, 4138), 'torch.utils.data.DataLoader', 'tud.DataLoader', (['dataset'], {'batch_size': 'self.batch_size', 'shuffle': '(True)', 'collate_fn': 'md.DecoratorDataset.collate_fn', 'drop_last': '(True)'}), '(dataset, batch_size=self.batch_size, shuffle=True,\n collate_fn=md.DecoratorDataset.collate_fn, drop_last=True)\n', (4024, 4138), True, 'import torch.utils.data as tud\n'), ((12077, 12192), 'models.dataset.Dataset', 'md.Dataset', (['scaffold_list', 'self.model.vocabulary.scaffold_vocabulary', 'self.model.vocabulary.scaffold_tokenizer'], {}), '(scaffold_list, self.model.vocabulary.scaffold_vocabulary, self.\n model.vocabulary.scaffold_tokenizer)\n', (12087, 12192), True, 'import models.dataset as md\n'), ((12238, 12342), 'torch.utils.data.DataLoader', 'tud.DataLoader', (['dataset'], {'batch_size': 'self.batch_size', 'shuffle': '(False)', 'collate_fn': 'md.Dataset.collate_fn'}), '(dataset, batch_size=self.batch_size, shuffle=False,\n collate_fn=md.Dataset.collate_fn)\n', (12252, 12342), True, 'import torch.utils.data as tud\n'), ((13211, 13279), 'models.dataset.DecoratorDataset', 'md.DecoratorDataset', (['scaffold_decoration_list', 'self.model.vocabulary'], {}), '(scaffold_decoration_list, self.model.vocabulary)\n', (13230, 13279), True, 'import models.dataset as md\n'), ((13301, 13415), 'torch.utils.data.DataLoader', 'tud.DataLoader', (['dataset'], {'batch_size': 'self.batch_size', 'collate_fn': 'md.DecoratorDataset.collate_fn', 'shuffle': '(False)'}), '(dataset, batch_size=self.batch_size, collate_fn=md.\n DecoratorDataset.collate_fn, shuffle=False)\n', (13315, 13415), True, 'import torch.utils.data as tud\n'), ((6228, 6278), 'random.sample', 'random.sample', (['self.training_set', 'self.sample_size'], {}), '(self.training_set, self.sample_size)\n', (6241, 6278), False, 'import random\n'), ((6317, 6369), 'random.sample', 'random.sample', (['self.validation_set', 'self.sample_size'], {}), '(self.validation_set, self.sample_size)\n', (6330, 6369), False, 'import random\n'), ((8293, 8307), 'numpy.array', 'np.array', (['nlls'], {}), '(nlls)\n', (8301, 8307), True, 'import numpy as np\n'), ((8027, 8071), 'utils.scaffold.join_first_attachment', 'usc.join_first_attachment', (['scaff', 'decoration'], {}), '(scaff, decoration)\n', (8052, 8071), True, 'import utils.scaffold as usc\n'), ((9654, 9716), 'numpy.histogram', 'np.histogram', (['dist'], {'bins': 'bins', 'range': 'dist_range', 'density': '(False)'}), '(dist, bins=bins, range=dist_range, density=False)\n', (9666, 9716), True, 'import numpy as np\n'), ((10173, 10194), 'numpy.sum', 'np.sum', (['dists'], {'axis': '(0)'}), '(dists, axis=0)\n', (10179, 10194), True, 'import numpy as np\n'), ((10821, 10844), 'random.sample', 'random.sample', (['mols', '(16)'], {}), '(mols, 16)\n', (10834, 10844), False, 'import random\n'), ((8142, 8188), 'utils.scaffold.join_joined_attachments', 'usc.join_joined_attachments', (['scaff', 'decoration'], {}), '(scaff, decoration)\n', (8169, 8188), True, 'import utils.scaffold as usc\n'), ((10234, 10261), 'scipy.stats.entropy', 'sps.entropy', (['dist', 'avg_dist'], {}), '(dist, avg_dist)\n', (10245, 10261), True, 'import scipy.stats as sps\n')]
|
import onnx
from onnx import numpy_helper
import numpy as np
# Filter
sobel = {
3: np.array([[1, 0, -1],
[2, 0, -2],
[1, 0, -1]], dtype='float32'),
5: np.array([[2, 1, 0, -1, -2],
[3, 2, 0, -2, -3],
[4, 3, 0, -3, -4],
[3, 2, 0, -2, -3],
[2, 1, 0, -1, -2]], dtype='float32'),
7: np.array([[3, 2, 1, 0, -1, -2, -3],
[4, 3, 2, 0, -2, -3, -4],
[5, 4, 3, 0, -3, -4, -5],
[6, 5, 4, 0, -4, -5, -6],
[5, 4, 3, 0, -3, -4, -5],
[4, 3, 2, 0, -2, -3, -4],
[3, 2, 1, 0, -1, -2, -3]], dtype='float32'),
9: np.array([[4, 3, 2, 1, 0, -1, -2, -3, -4],
[5, 4, 3, 2, 0, -2, -3, -4, -5],
[6, 5, 4, 3, 0, -3, -4, -5, -6],
[7, 6, 5, 4, 0, -4, -5, -6, -7],
[8, 7, 6, 5, 0, -5, -6, -7, -8],
[7, 6, 5, 4, 0, -4, -5, -6, -7],
[6, 5, 4, 3, 0, -3, -4, -5, -6],
[5, 4, 3, 2, 0, -2, -3, -4, -5],
[4, 3, 2, 1, 0, -1, -2, -3, -4]], dtype='float32')
}
def get_output_shape(i):
if i == 3:
return [1, 1, 2046, 2046]
elif i == 5:
return [1, 1, 2044, 2044]
elif i == 7:
return [1, 1, 2042, 2042]
elif i == 9:
return [1, 1, 2040, 2040]
def main():
for i in range(3, 10, 2):
# Filter
w = sobel[i].reshape((1, 1, i, i))
# Input
x = np.random.rand(1, 1, 2048, 2048).astype('float32')
# Initializer of the weight
initializer_w = numpy_helper.from_array(w, 'w')
tensor_w = onnx.helper.make_tensor_value_info('w', onnx.TensorProto.FLOAT, [1, 1, i, i])
tensor_x = onnx.helper.make_tensor_value_info('x', onnx.TensorProto.FLOAT, [1, 1, 2048, 2048])
tensor_y = onnx.helper.make_tensor_value_info('y', onnx.TensorProto.FLOAT, get_output_shape(i))
# Create a node
node_def = onnx.helper.make_node(
'Conv',
inputs=['x', 'w'],
outputs=['y'],
kernel_shape=[i, i]
)
# Create the graph
graph_def = onnx.helper.make_graph(
[node_def],
f'conv_{i}x{i}',
[tensor_x],
[tensor_y],
[initializer_w]
)
# Create the model
model_def = onnx.helper.make_model(graph_def,
producer_name='python_script',
ir_version=6
)
model_def.opset_import[0].version = 10
# Check the model
onnx.checker.check_model(model_def)
# Save the model
onnx.save(model_def, f'conv_{i}x{i}.onnx')
if __name__ == "__main__":
main()
|
[
"onnx.helper.make_node",
"onnx.numpy_helper.from_array",
"onnx.save",
"onnx.helper.make_model",
"onnx.helper.make_tensor_value_info",
"numpy.array",
"numpy.random.rand",
"onnx.checker.check_model",
"onnx.helper.make_graph"
] |
[((86, 149), 'numpy.array', 'np.array', (['[[1, 0, -1], [2, 0, -2], [1, 0, -1]]'], {'dtype': '"""float32"""'}), "([[1, 0, -1], [2, 0, -2], [1, 0, -1]], dtype='float32')\n", (94, 149), True, 'import numpy as np\n'), ((164, 290), 'numpy.array', 'np.array', (['[[2, 1, 0, -1, -2], [3, 2, 0, -2, -3], [4, 3, 0, -3, -4], [3, 2, 0, -2, -3],\n [2, 1, 0, -1, -2]]'], {'dtype': '"""float32"""'}), "([[2, 1, 0, -1, -2], [3, 2, 0, -2, -3], [4, 3, 0, -3, -4], [3, 2, 0,\n -2, -3], [2, 1, 0, -1, -2]], dtype='float32')\n", (172, 290), True, 'import numpy as np\n'), ((309, 527), 'numpy.array', 'np.array', (['[[3, 2, 1, 0, -1, -2, -3], [4, 3, 2, 0, -2, -3, -4], [5, 4, 3, 0, -3, -4, -\n 5], [6, 5, 4, 0, -4, -5, -6], [5, 4, 3, 0, -3, -4, -5], [4, 3, 2, 0, -2,\n -3, -4], [3, 2, 1, 0, -1, -2, -3]]'], {'dtype': '"""float32"""'}), "([[3, 2, 1, 0, -1, -2, -3], [4, 3, 2, 0, -2, -3, -4], [5, 4, 3, 0, \n -3, -4, -5], [6, 5, 4, 0, -4, -5, -6], [5, 4, 3, 0, -3, -4, -5], [4, 3,\n 2, 0, -2, -3, -4], [3, 2, 1, 0, -1, -2, -3]], dtype='float32')\n", (317, 527), True, 'import numpy as np\n'), ((549, 889), 'numpy.array', 'np.array', (['[[4, 3, 2, 1, 0, -1, -2, -3, -4], [5, 4, 3, 2, 0, -2, -3, -4, -5], [6, 5, 4,\n 3, 0, -3, -4, -5, -6], [7, 6, 5, 4, 0, -4, -5, -6, -7], [8, 7, 6, 5, 0,\n -5, -6, -7, -8], [7, 6, 5, 4, 0, -4, -5, -6, -7], [6, 5, 4, 3, 0, -3, -\n 4, -5, -6], [5, 4, 3, 2, 0, -2, -3, -4, -5], [4, 3, 2, 1, 0, -1, -2, -3,\n -4]]'], {'dtype': '"""float32"""'}), "([[4, 3, 2, 1, 0, -1, -2, -3, -4], [5, 4, 3, 2, 0, -2, -3, -4, -5],\n [6, 5, 4, 3, 0, -3, -4, -5, -6], [7, 6, 5, 4, 0, -4, -5, -6, -7], [8, 7,\n 6, 5, 0, -5, -6, -7, -8], [7, 6, 5, 4, 0, -4, -5, -6, -7], [6, 5, 4, 3,\n 0, -3, -4, -5, -6], [5, 4, 3, 2, 0, -2, -3, -4, -5], [4, 3, 2, 1, 0, -1,\n -2, -3, -4]], dtype='float32')\n", (557, 889), True, 'import numpy as np\n'), ((1330, 1361), 'onnx.numpy_helper.from_array', 'numpy_helper.from_array', (['w', '"""w"""'], {}), "(w, 'w')\n", (1353, 1361), False, 'from onnx import numpy_helper\n'), ((1378, 1455), 'onnx.helper.make_tensor_value_info', 'onnx.helper.make_tensor_value_info', (['"""w"""', 'onnx.TensorProto.FLOAT', '[1, 1, i, i]'], {}), "('w', onnx.TensorProto.FLOAT, [1, 1, i, i])\n", (1412, 1455), False, 'import onnx\n'), ((1471, 1558), 'onnx.helper.make_tensor_value_info', 'onnx.helper.make_tensor_value_info', (['"""x"""', 'onnx.TensorProto.FLOAT', '[1, 1, 2048, 2048]'], {}), "('x', onnx.TensorProto.FLOAT, [1, 1, 2048,\n 2048])\n", (1505, 1558), False, 'import onnx\n'), ((1691, 1779), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Conv"""'], {'inputs': "['x', 'w']", 'outputs': "['y']", 'kernel_shape': '[i, i]'}), "('Conv', inputs=['x', 'w'], outputs=['y'],\n kernel_shape=[i, i])\n", (1712, 1779), False, 'import onnx\n'), ((1846, 1942), 'onnx.helper.make_graph', 'onnx.helper.make_graph', (['[node_def]', 'f"""conv_{i}x{i}"""', '[tensor_x]', '[tensor_y]', '[initializer_w]'], {}), "([node_def], f'conv_{i}x{i}', [tensor_x], [tensor_y],\n [initializer_w])\n", (1868, 1942), False, 'import onnx\n'), ((2015, 2093), 'onnx.helper.make_model', 'onnx.helper.make_model', (['graph_def'], {'producer_name': '"""python_script"""', 'ir_version': '(6)'}), "(graph_def, producer_name='python_script', ir_version=6)\n", (2037, 2093), False, 'import onnx\n'), ((2181, 2216), 'onnx.checker.check_model', 'onnx.checker.check_model', (['model_def'], {}), '(model_def)\n', (2205, 2216), False, 'import onnx\n'), ((2243, 2285), 'onnx.save', 'onnx.save', (['model_def', 'f"""conv_{i}x{i}.onnx"""'], {}), "(model_def, f'conv_{i}x{i}.onnx')\n", (2252, 2285), False, 'import onnx\n'), ((1226, 1258), 'numpy.random.rand', 'np.random.rand', (['(1)', '(1)', '(2048)', '(2048)'], {}), '(1, 1, 2048, 2048)\n', (1240, 1258), True, 'import numpy as np\n')]
|
import sys
sys.path.append("../ern/")
sys.path.append("../dies/")
import copy
import torch
import numpy as np
import pandas as pd
from dies.utils import listify
from sklearn.metrics import mean_squared_error as mse
from torch.utils.data.dataloader import DataLoader
from fastai.basic_data import DataBunch
from fastai.basic_data import DatasetType
import glob
def to_short_name(file):
return (
file.split("/")[-1]
.replace(".h5", "")
.replace(".csv", "")
.replace(".pkl", "")
.replace(".pth", "")
.replace("_config", "")
)
def create_databunch(
train_ds, val_ds, test_ds, batch_size, device,
):
train_ds.to_device(device)
tr = DataLoader(
train_ds,
batch_size,
drop_last=True,
shuffle=True,
# num_workers=6,
pin_memory=False,
)
val_ds.to_device(device)
val = DataLoader(val_ds, batch_size, pin_memory=False)
if test_ds is not None:
test_ds.to_device(device)
test = DataLoader(test_ds, batch_size, pin_memory=False)
else:
test = None
data_bunch = DataBunch(tr, val, test_dl=test)
return data_bunch
def get_config(file, include_rmse=False):
df = pd.read_csv(file, sep=",")
min_rmse_idx = df.root_mean_squared_error.idxmin()
relevant_cols = [c for c in df.columns if "config" in c]
rename_cols = {c: c.replace("config/", "") for c in relevant_cols}
if include_rmse:
relevant_cols += ["root_mean_squared_error"]
df = df[relevant_cols].loc[min_rmse_idx]
df = df.rename(rename_cols)
return df
def match_file_names(file_name, file_names):
res = None
file_name = to_short_name(file_name)
for f in file_names:
if file_name == to_short_name(f):
res = f
break
return res
def get_preds(learn, data_type=DatasetType.Test):
y_hats, y = learn.get_preds(data_type)
y_hats = np.clip(y_hats, 0, 1.05)
return y, y_hats
def get_rmse(learn, data_type=DatasetType.Test):
y, y_hats = get_preds(learn, data_type=data_type)
y_hats = np.clip(y_hats, 0, 1.05)
e = mse(y, y_hats) ** 0.5
return e
def get_ds_from_type(data_bunch, data_type):
if data_type == DatasetType.Train:
return data_bunch.train_ds
elif data_type == DatasetType.Valid:
return data_bunch.valid_ds
elif data_type == DatasetType.Test:
return data_bunch.test_ds
def create_rmse_df_lstm(y, y_hat, file, data_bunch, data_type=DatasetType.Test):
res_rmses, park_ids = [], []
pdfs = []
ds = get_ds_from_type(data_bunch, data_type)
y, y_hat = y.ravel(), y_hat.ravel()
res_rmse = mse(y, y_hat) ** 0.5
res_rmses.append(res_rmse)
park_ids.append(file)
df_f = pd.DataFrame({"Y": y, "Yhat": y_hat, "Time": ds.index})
df_f["ParkId"] = to_short_name(file)
pdfs.append(df_f)
df_res = pd.DataFrame({"RMSE": res_rmses, "ParkId": park_ids})
pdfs = pd.concat(pdfs, axis=0)
return df_res, pdfs
def create_rmse_df_mtl(y, y_hat, files, data_bunch, data_type=DatasetType.Test):
res_rmses, park_ids = [], []
pdfs = []
ds = get_ds_from_type(data_bunch, data_type)
for i in range(y.shape[1]):
res_rmse = mse(y[:, i], y_hat[:, i]) ** 0.5
res_rmses.append(res_rmse)
park_ids.append(files[i])
df_f = pd.DataFrame({"Y": y[:, i], "Yhat": y_hat[:, i], "Time": ds.index})
df_f["ParkId"] = to_short_name(data_bunch.files[i])
pdfs.append(df_f)
df_res = pd.DataFrame({"RMSE": res_rmses, "ParkId": park_ids})
pdfs = pd.concat(pdfs, axis=0)
return df_res, pdfs
def create_rmse_df_mlp(y, y_hat, park_ids, data_bunch, data_type=DatasetType.Test):
cat_park_ids = park_ids.ravel()
unique_park_ids = np.unique(park_ids)
ds = get_ds_from_type(data_bunch, data_type)
res_rmses, park_ids = [], []
dfs = []
for cur_park_id in unique_park_ids:
mask = cat_park_ids == cur_park_id
cy = y[mask]
cyh = y_hat[mask]
cid = ds.index[mask]
df_f = pd.DataFrame({"Y": cy.ravel(), "Yhat": cyh.ravel(), "Time": cid})
df_f["ParkId"] = to_short_name(data_bunch.files[cur_park_id])
dfs.append(df_f)
res_rmse = mse(cy, cyh) ** 0.5
res_rmses.append(res_rmse)
park_ids.append(cur_park_id)
dfs = pd.concat(dfs, axis=0)
df_res = pd.DataFrame({"RMSE": res_rmses, "ParkId": park_ids})
return df_res, dfs
def get_test_results(test_folder):
files = glob.glob(test_folder + f"/*.csv")
dfs = []
for f in files:
dfs.append(pd.read_csv(f, sep=";"))
df = pd.concat(dfs, axis=0)
return df
def get_eval_results(base_folder, data_type):
forecast_folder = f"{base_folder}/mtl/"
files = glob.glob(forecast_folder + f"/{data_type}*error.csv")
forecast_folder = f"{base_folder}/lstm/"
files = files + glob.glob(forecast_folder + f"/{data_type}*error.csv")
forecast_folder = f"{base_folder}/mlp/"
files = files + glob.glob(forecast_folder + f"/{data_type}*error.csv")
dfs = []
for f in files:
dfs.append(pd.read_csv(f, sep=","))
df = pd.concat(dfs, axis=0)
return df
|
[
"sys.path.append",
"pandas.DataFrame",
"sklearn.metrics.mean_squared_error",
"pandas.read_csv",
"numpy.clip",
"glob.glob",
"torch.utils.data.dataloader.DataLoader",
"pandas.concat",
"numpy.unique",
"fastai.basic_data.DataBunch"
] |
[((12, 38), 'sys.path.append', 'sys.path.append', (['"""../ern/"""'], {}), "('../ern/')\n", (27, 38), False, 'import sys\n'), ((39, 66), 'sys.path.append', 'sys.path.append', (['"""../dies/"""'], {}), "('../dies/')\n", (54, 66), False, 'import sys\n'), ((701, 786), 'torch.utils.data.dataloader.DataLoader', 'DataLoader', (['train_ds', 'batch_size'], {'drop_last': '(True)', 'shuffle': '(True)', 'pin_memory': '(False)'}), '(train_ds, batch_size, drop_last=True, shuffle=True, pin_memory=False\n )\n', (711, 786), False, 'from torch.utils.data.dataloader import DataLoader\n'), ((896, 944), 'torch.utils.data.dataloader.DataLoader', 'DataLoader', (['val_ds', 'batch_size'], {'pin_memory': '(False)'}), '(val_ds, batch_size, pin_memory=False)\n', (906, 944), False, 'from torch.utils.data.dataloader import DataLoader\n'), ((1121, 1153), 'fastai.basic_data.DataBunch', 'DataBunch', (['tr', 'val'], {'test_dl': 'test'}), '(tr, val, test_dl=test)\n', (1130, 1153), False, 'from fastai.basic_data import DataBunch\n'), ((1230, 1256), 'pandas.read_csv', 'pd.read_csv', (['file'], {'sep': '""","""'}), "(file, sep=',')\n", (1241, 1256), True, 'import pandas as pd\n'), ((1945, 1969), 'numpy.clip', 'np.clip', (['y_hats', '(0)', '(1.05)'], {}), '(y_hats, 0, 1.05)\n', (1952, 1969), True, 'import numpy as np\n'), ((2109, 2133), 'numpy.clip', 'np.clip', (['y_hats', '(0)', '(1.05)'], {}), '(y_hats, 0, 1.05)\n', (2116, 2133), True, 'import numpy as np\n'), ((2775, 2830), 'pandas.DataFrame', 'pd.DataFrame', (["{'Y': y, 'Yhat': y_hat, 'Time': ds.index}"], {}), "({'Y': y, 'Yhat': y_hat, 'Time': ds.index})\n", (2787, 2830), True, 'import pandas as pd\n'), ((2908, 2961), 'pandas.DataFrame', 'pd.DataFrame', (["{'RMSE': res_rmses, 'ParkId': park_ids}"], {}), "({'RMSE': res_rmses, 'ParkId': park_ids})\n", (2920, 2961), True, 'import pandas as pd\n'), ((2973, 2996), 'pandas.concat', 'pd.concat', (['pdfs'], {'axis': '(0)'}), '(pdfs, axis=0)\n', (2982, 2996), True, 'import pandas as pd\n'), ((3539, 3592), 'pandas.DataFrame', 'pd.DataFrame', (["{'RMSE': res_rmses, 'ParkId': park_ids}"], {}), "({'RMSE': res_rmses, 'ParkId': park_ids})\n", (3551, 3592), True, 'import pandas as pd\n'), ((3604, 3627), 'pandas.concat', 'pd.concat', (['pdfs'], {'axis': '(0)'}), '(pdfs, axis=0)\n', (3613, 3627), True, 'import pandas as pd\n'), ((3797, 3816), 'numpy.unique', 'np.unique', (['park_ids'], {}), '(park_ids)\n', (3806, 3816), True, 'import numpy as np\n'), ((4372, 4394), 'pandas.concat', 'pd.concat', (['dfs'], {'axis': '(0)'}), '(dfs, axis=0)\n', (4381, 4394), True, 'import pandas as pd\n'), ((4408, 4461), 'pandas.DataFrame', 'pd.DataFrame', (["{'RMSE': res_rmses, 'ParkId': park_ids}"], {}), "({'RMSE': res_rmses, 'ParkId': park_ids})\n", (4420, 4461), True, 'import pandas as pd\n'), ((4534, 4568), 'glob.glob', 'glob.glob', (["(test_folder + f'/*.csv')"], {}), "(test_folder + f'/*.csv')\n", (4543, 4568), False, 'import glob\n'), ((4656, 4678), 'pandas.concat', 'pd.concat', (['dfs'], {'axis': '(0)'}), '(dfs, axis=0)\n', (4665, 4678), True, 'import pandas as pd\n'), ((4799, 4853), 'glob.glob', 'glob.glob', (["(forecast_folder + f'/{data_type}*error.csv')"], {}), "(forecast_folder + f'/{data_type}*error.csv')\n", (4808, 4853), False, 'import glob\n'), ((5182, 5204), 'pandas.concat', 'pd.concat', (['dfs'], {'axis': '(0)'}), '(dfs, axis=0)\n', (5191, 5204), True, 'import pandas as pd\n'), ((1023, 1072), 'torch.utils.data.dataloader.DataLoader', 'DataLoader', (['test_ds', 'batch_size'], {'pin_memory': '(False)'}), '(test_ds, batch_size, pin_memory=False)\n', (1033, 1072), False, 'from torch.utils.data.dataloader import DataLoader\n'), ((2142, 2156), 'sklearn.metrics.mean_squared_error', 'mse', (['y', 'y_hats'], {}), '(y, y_hats)\n', (2145, 2156), True, 'from sklearn.metrics import mean_squared_error as mse\n'), ((2685, 2698), 'sklearn.metrics.mean_squared_error', 'mse', (['y', 'y_hat'], {}), '(y, y_hat)\n', (2688, 2698), True, 'from sklearn.metrics import mean_squared_error as mse\n'), ((3371, 3438), 'pandas.DataFrame', 'pd.DataFrame', (["{'Y': y[:, i], 'Yhat': y_hat[:, i], 'Time': ds.index}"], {}), "({'Y': y[:, i], 'Yhat': y_hat[:, i], 'Time': ds.index})\n", (3383, 3438), True, 'import pandas as pd\n'), ((4920, 4974), 'glob.glob', 'glob.glob', (["(forecast_folder + f'/{data_type}*error.csv')"], {}), "(forecast_folder + f'/{data_type}*error.csv')\n", (4929, 4974), False, 'import glob\n'), ((5040, 5094), 'glob.glob', 'glob.glob', (["(forecast_folder + f'/{data_type}*error.csv')"], {}), "(forecast_folder + f'/{data_type}*error.csv')\n", (5049, 5094), False, 'import glob\n'), ((3253, 3278), 'sklearn.metrics.mean_squared_error', 'mse', (['y[:, i]', 'y_hat[:, i]'], {}), '(y[:, i], y_hat[:, i])\n', (3256, 3278), True, 'from sklearn.metrics import mean_squared_error as mse\n'), ((4270, 4282), 'sklearn.metrics.mean_squared_error', 'mse', (['cy', 'cyh'], {}), '(cy, cyh)\n', (4273, 4282), True, 'from sklearn.metrics import mean_squared_error as mse\n'), ((4622, 4645), 'pandas.read_csv', 'pd.read_csv', (['f'], {'sep': '""";"""'}), "(f, sep=';')\n", (4633, 4645), True, 'import pandas as pd\n'), ((5148, 5171), 'pandas.read_csv', 'pd.read_csv', (['f'], {'sep': '""","""'}), "(f, sep=',')\n", (5159, 5171), True, 'import pandas as pd\n')]
|
import pytest
import numpy as np
from quantum_systems import BasisSet
def test_add_spin_spf():
spf = (np.arange(15) + 1).reshape(3, 5).T
n = 3
n_a = 2
n_b = n - n_a
l = 2 * spf.shape[0]
assert l == 10
m_a = l // 2 - n_a
assert m_a == 3
m_b = l // 2 - n_b
assert m_b == 4
new_spf = BasisSet.add_spin_spf(spf, np)
# Occupied spin-up
np.testing.assert_allclose(spf[0], new_spf[0])
np.testing.assert_allclose(spf[1], new_spf[2])
# Occupied spin-down
np.testing.assert_allclose(spf[0], new_spf[1])
# Virtual spin-up
np.testing.assert_allclose(spf[2], new_spf[4])
np.testing.assert_allclose(spf[3], new_spf[6])
np.testing.assert_allclose(spf[4], new_spf[8])
# Virtual spin-down
np.testing.assert_allclose(spf[1], new_spf[3])
np.testing.assert_allclose(spf[2], new_spf[5])
np.testing.assert_allclose(spf[3], new_spf[7])
np.testing.assert_allclose(spf[4], new_spf[9])
|
[
"numpy.testing.assert_allclose",
"numpy.arange",
"quantum_systems.BasisSet.add_spin_spf"
] |
[((333, 363), 'quantum_systems.BasisSet.add_spin_spf', 'BasisSet.add_spin_spf', (['spf', 'np'], {}), '(spf, np)\n', (354, 363), False, 'from quantum_systems import BasisSet\n'), ((392, 438), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['spf[0]', 'new_spf[0]'], {}), '(spf[0], new_spf[0])\n', (418, 438), True, 'import numpy as np\n'), ((444, 490), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['spf[1]', 'new_spf[2]'], {}), '(spf[1], new_spf[2])\n', (470, 490), True, 'import numpy as np\n'), ((521, 567), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['spf[0]', 'new_spf[1]'], {}), '(spf[0], new_spf[1])\n', (547, 567), True, 'import numpy as np\n'), ((595, 641), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['spf[2]', 'new_spf[4]'], {}), '(spf[2], new_spf[4])\n', (621, 641), True, 'import numpy as np\n'), ((647, 693), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['spf[3]', 'new_spf[6]'], {}), '(spf[3], new_spf[6])\n', (673, 693), True, 'import numpy as np\n'), ((699, 745), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['spf[4]', 'new_spf[8]'], {}), '(spf[4], new_spf[8])\n', (725, 745), True, 'import numpy as np\n'), ((775, 821), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['spf[1]', 'new_spf[3]'], {}), '(spf[1], new_spf[3])\n', (801, 821), True, 'import numpy as np\n'), ((827, 873), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['spf[2]', 'new_spf[5]'], {}), '(spf[2], new_spf[5])\n', (853, 873), True, 'import numpy as np\n'), ((879, 925), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['spf[3]', 'new_spf[7]'], {}), '(spf[3], new_spf[7])\n', (905, 925), True, 'import numpy as np\n'), ((931, 977), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['spf[4]', 'new_spf[9]'], {}), '(spf[4], new_spf[9])\n', (957, 977), True, 'import numpy as np\n'), ((109, 122), 'numpy.arange', 'np.arange', (['(15)'], {}), '(15)\n', (118, 122), True, 'import numpy as np\n')]
|
"""
Train a spiking Bayesian WTA network and plot weight changes, spike trains and log-likelihood live.
MIT License
Copyright (c) 2019 <NAME>, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
import utility as ut
import network as nt
from tqdm import tqdm as tqdm
from plot import WeightPCAPlotter, WeightPlotter, CurvePlotter, SpiketrainPlotter
from collections import deque
from copy import deepcopy
from data_generator import DataGenerator
delta_T = 1e-3
# parameters
spiking_input = False
labels = [0, 1, 2, 3]
n_outputs = 12
W, H = 24, 24
r_net = 50.0
t_max = 1000
n_inputs = W*H
m_k = 1.0/n_outputs
# load data
x, y = ut.load_mnist(h=H, w=W, labels=labels, train=False, frequencies=spiking_input)
net = nt.EventBasedBinaryWTANetwork(n_inputs=n_inputs, n_outputs=n_outputs,
r_net=r_net, m_k=m_k, eta_v=1e-2, eta_b=1e+0, max_trace_length=1000)
# train
pca_plotter = WeightPCAPlotter(x, y, n_outputs, labels)
weights_plotter = WeightPlotter(ut.sigmoid(net._V).reshape((-1, W, H)))
likelihood_plotter = CurvePlotter(x_label="Time [s]", y_label="$log[p(y)]$")
output_spiketrains = SpiketrainPlotter(n_outputs, 100)
likelihoods = []
def estimate_likelihood(estimation_duration=5.0):
log_likelihoods = deque([])
estimation_net = deepcopy(net)
estimation_net._current_time = 0
estimation_net._trace = deque([])
while estimation_net._current_time < estimation_duration:
estimation_net.step(lambda t: data_generator[t], update_weights=False)
pbar.n = int(net._current_time * 1000) / 1000
pbar.update(0)
# log likelihood
sample = estimation_net._trace[-1][1].reshape((1, -1))
pi = ut.sigmoid(net._V)
log_likelihoods.append(
np.log(1.0 / n_outputs) + np.log(np.sum(np.prod(sample * pi + (1 - sample) * (1 - pi), axis=-1))))
return np.mean(log_likelihoods), np.std(log_likelihoods)
data_generator = DataGenerator(X, 10000, t_image=0.250, delta_T=delta_T, spiking=spiking_input)
pbar = tqdm(total=t_max, unit='Time [s]')
while net._current_time < t_max:
z = net.step(lambda t: data_generator[t])
if output_spiketrains is not None and net._current_time > 100:
output_spiketrains.update([z], [net._current_time])
pbar.n = int(net._current_time * 1000) / 1000
pbar.update(0)
# update plots
if int(pbar.n) > len(likelihoods):
likelihoods.append(estimate_likelihood())
weights_plotter.update(ut.sigmoid(net._V))
pca_plotter.update(ut.sigmoid(net._V))
likelihood_plotter.update(likelihoods)
likelihood = likelihoods[-1][0] if len(likelihoods) > 0 else np.nan
pbar.set_description(
f'<sigma(V)> = {np.mean(ut.sigmoid(net._V)):.4f}, <b> = {np.mean(net._b):.4f}, <L(y)> = {likelihood:.4f}')
pbar.close()
|
[
"utility.sigmoid",
"tqdm.tqdm",
"copy.deepcopy",
"numpy.log",
"utility.load_mnist",
"plot.SpiketrainPlotter",
"plot.WeightPCAPlotter",
"data_generator.DataGenerator",
"numpy.std",
"numpy.prod",
"numpy.mean",
"plot.CurvePlotter",
"network.EventBasedBinaryWTANetwork",
"collections.deque"
] |
[((1636, 1714), 'utility.load_mnist', 'ut.load_mnist', ([], {'h': 'H', 'w': 'W', 'labels': 'labels', 'train': '(False)', 'frequencies': 'spiking_input'}), '(h=H, w=W, labels=labels, train=False, frequencies=spiking_input)\n', (1649, 1714), True, 'import utility as ut\n'), ((1723, 1865), 'network.EventBasedBinaryWTANetwork', 'nt.EventBasedBinaryWTANetwork', ([], {'n_inputs': 'n_inputs', 'n_outputs': 'n_outputs', 'r_net': 'r_net', 'm_k': 'm_k', 'eta_v': '(0.01)', 'eta_b': '(1.0)', 'max_trace_length': '(1000)'}), '(n_inputs=n_inputs, n_outputs=n_outputs, r_net\n =r_net, m_k=m_k, eta_v=0.01, eta_b=1.0, max_trace_length=1000)\n', (1752, 1865), True, 'import network as nt\n'), ((1921, 1962), 'plot.WeightPCAPlotter', 'WeightPCAPlotter', (['x', 'y', 'n_outputs', 'labels'], {}), '(x, y, n_outputs, labels)\n', (1937, 1962), False, 'from plot import WeightPCAPlotter, WeightPlotter, CurvePlotter, SpiketrainPlotter\n'), ((2056, 2111), 'plot.CurvePlotter', 'CurvePlotter', ([], {'x_label': '"""Time [s]"""', 'y_label': '"""$log[p(y)]$"""'}), "(x_label='Time [s]', y_label='$log[p(y)]$')\n", (2068, 2111), False, 'from plot import WeightPCAPlotter, WeightPlotter, CurvePlotter, SpiketrainPlotter\n'), ((2133, 2166), 'plot.SpiketrainPlotter', 'SpiketrainPlotter', (['n_outputs', '(100)'], {}), '(n_outputs, 100)\n', (2150, 2166), False, 'from plot import WeightPCAPlotter, WeightPlotter, CurvePlotter, SpiketrainPlotter\n'), ((2947, 3024), 'data_generator.DataGenerator', 'DataGenerator', (['X', '(10000)'], {'t_image': '(0.25)', 'delta_T': 'delta_T', 'spiking': 'spiking_input'}), '(X, 10000, t_image=0.25, delta_T=delta_T, spiking=spiking_input)\n', (2960, 3024), False, 'from data_generator import DataGenerator\n'), ((3033, 3067), 'tqdm.tqdm', 'tqdm', ([], {'total': 't_max', 'unit': '"""Time [s]"""'}), "(total=t_max, unit='Time [s]')\n", (3037, 3067), True, 'from tqdm import tqdm as tqdm\n'), ((2259, 2268), 'collections.deque', 'deque', (['[]'], {}), '([])\n', (2264, 2268), False, 'from collections import deque\n'), ((2291, 2304), 'copy.deepcopy', 'deepcopy', (['net'], {}), '(net)\n', (2299, 2304), False, 'from copy import deepcopy\n'), ((2370, 2379), 'collections.deque', 'deque', (['[]'], {}), '([])\n', (2375, 2379), False, 'from collections import deque\n'), ((2704, 2722), 'utility.sigmoid', 'ut.sigmoid', (['net._V'], {}), '(net._V)\n', (2714, 2722), True, 'import utility as ut\n'), ((2878, 2902), 'numpy.mean', 'np.mean', (['log_likelihoods'], {}), '(log_likelihoods)\n', (2885, 2902), True, 'import numpy as np\n'), ((2904, 2927), 'numpy.std', 'np.std', (['log_likelihoods'], {}), '(log_likelihoods)\n', (2910, 2927), True, 'import numpy as np\n'), ((1995, 2013), 'utility.sigmoid', 'ut.sigmoid', (['net._V'], {}), '(net._V)\n', (2005, 2013), True, 'import utility as ut\n'), ((3486, 3504), 'utility.sigmoid', 'ut.sigmoid', (['net._V'], {}), '(net._V)\n', (3496, 3504), True, 'import utility as ut\n'), ((3533, 3551), 'utility.sigmoid', 'ut.sigmoid', (['net._V'], {}), '(net._V)\n', (3543, 3551), True, 'import utility as ut\n'), ((2767, 2790), 'numpy.log', 'np.log', (['(1.0 / n_outputs)'], {}), '(1.0 / n_outputs)\n', (2773, 2790), True, 'import numpy as np\n'), ((3764, 3779), 'numpy.mean', 'np.mean', (['net._b'], {}), '(net._b)\n', (3771, 3779), True, 'import numpy as np\n'), ((3731, 3749), 'utility.sigmoid', 'ut.sigmoid', (['net._V'], {}), '(net._V)\n', (3741, 3749), True, 'import utility as ut\n'), ((2807, 2862), 'numpy.prod', 'np.prod', (['(sample * pi + (1 - sample) * (1 - pi))'], {'axis': '(-1)'}), '(sample * pi + (1 - sample) * (1 - pi), axis=-1)\n', (2814, 2862), True, 'import numpy as np\n')]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Feb 26 15:15:37 2018
@author: <NAME>, <NAME>
"""
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
from fnmatch import fnmatch
import sys
import os
import matplotlib.image as mpimg
import scipy
# Make sure that caffe is on the python path:
#set the caffe_FAST path
CAFFE_ROOT = '../path_to/caffe_FAST/'
sys.path.insert(0, CAFFE_ROOT + 'python')
import caffe
caffe.set_mode_gpu()
caffe.set_device(0)
#Set the path of deploy and trained model in the following line:
net = caffe.Net('/path_to/models_pose/model2_MPII_JHMDB/fcn-8s-pascal-deploy_300.prototxt', '/path_to/models_pose/model2_MPII_JHMDB/FCN_8S_snapshot_iter_300000.caffemodel', caffe.TEST)
def get_files_in_dir(DIR, pattern = None):
all_files = []
if os.path.isdir(DIR):
for path, subdirs, files in os.walk(DIR):
for name in files:
if pattern is not None:
if fnmatch(name, pattern):
all_files.append(os.path.join(path, name))
else:
all_files.append(os.path.join(path, name))
else:
print("{} DOES NOT EXIST!!".format(DIR))
return all_files
#
# redo later to properly do using mem_data_layer
#
def segment(in_file,path):
# load image, switch to BGR, subtract mean
im = Image.open(in_file)
#im = im.resize(( 240,320),Image.ANTIALIAS)
in_ = np.array(im, dtype=np.float32)
in_ = in_[:,:,::-1]
#in_ -= np.array((126.8420, 134.2887, 123.7515)) #NTU mean BGR values
in_ -= np.array((103.28, 105.99, 92.54)) #JHMDB mean BGR values
in_ = in_.transpose((2,0,1))
# load net
#net = caffe.Net('fcn-2s-pascal-DEPLOY.prototxt', 'fcn_2s_snapshot_iter_1400000.caffemodel', caffe.TEST)
#NEW NET ARCH
#net = caffe.Net('fcn-16s-pascal-deploy_300.prototxt', 'FCN_16S_snapshot_iter_300000.caffemodel', caffe.TEST)
# shape for input (data blob is N x C x H x W), set data
net.blobs['data'].reshape(1, 3, 240, 320)
net.blobs['data'].data[...] = in_
# run net and take argmax for prediction
net.forward()
out = net.blobs['upscore'].data[0].argmax(axis=0)
for i in range(0, 240):
for j in range(0, 320):
if out[i,j] == 0:
new_color = im.getpixel( (j,i))
D=list(new_color)
D[0]=0
D[1]=0
D[2]=0
new_color=tuple(D)
im.putpixel( (j,i), new_color)
if out[i,j] == 1:
new_color = im.getpixel( (j,i))
D=list(new_color)
D[0]=219
D[1]=112
D[2]=147
new_color=tuple(D)
im.putpixel( (j,i), new_color)
if out[i,j] == 2:
new_color = im.getpixel( (j,i))
D=list(new_color)
D[0]=32
D[1]=178
D[2]=170
new_color=tuple(D)
im.putpixel( (j,i), new_color)
if out[i,j] == 3:
new_color = im.getpixel( (j,i))
D=list(new_color)
D[0]=255
D[1]=182
D[2]=193
new_color=tuple(D)
im.putpixel( (j,i), new_color)
if out[i,j] == 4:
new_color = im.getpixel( (j,i))
D=list(new_color)
D[0]=148
D[1]=0
D[2]=211
new_color=tuple(D)
im.putpixel( (j,i), new_color)
if out[i,j] == 5:
new_color = im.getpixel( (j,i))
D=list(new_color)
D[0]=139
D[1]=0
D[2]=139
new_color=tuple(D)
im.putpixel( (j,i), new_color)
if out[i,j] == 6:
new_color = im.getpixel( (j,i))
D=list(new_color)
D[0]=46
D[1]=139
D[2]=87
new_color=tuple(D)
im.putpixel( (j,i), new_color)
if out[i,j] == 7:
new_color = im.getpixel( (j,i))
D=list(new_color)
D[0]=60
D[1]=179
D[2]=113
new_color=tuple(D)
im.putpixel( (j,i), new_color)
if out[i,j] == 8:
new_color = im.getpixel( (j,i))
D=list(new_color)
D[0]=218
D[1]=112
D[2]=214
new_color=tuple(D)
im.putpixel( (j,i), new_color)
if out[i,j] == 9:
new_color = im.getpixel( (j,i))
D=list(new_color)
D[0]=186
D[1]=85
D[2]=211
new_color=tuple(D)
im.putpixel( (j,i), new_color)
if out[i,j] == 10:
new_color = im.getpixel( (j,i))
D=list(new_color)
D[0]=50
D[1]=205
D[2]=50
new_color=tuple(D)
im.putpixel( (j,i), new_color)
if out[i,j] == 11:
new_color = im.getpixel( (j,i))
D=list(new_color)
D[0]=127
D[1]=255
D[2]=0
new_color=tuple(D)
im.putpixel( (j,i), new_color)
if out[i,j] == 12:
new_color = im.getpixel( (j,i))
D=list(new_color)
D[0]=255
D[1]=69
D[2]=0
new_color=tuple(D)
im.putpixel( (j,i), new_color)
if out[i,j] == 13:
new_color = im.getpixel( (j,i))
D=list(new_color)
D[0]=255
D[1]=127
D[2]=80
new_color=tuple(D)
im.putpixel( (j,i), new_color)
if out[i,j] == 14:
new_color = im.getpixel( (j,i))
D=list(new_color)
D[0]=255
D[1]=215
D[2]=0
new_color=tuple(D)
im.putpixel( (j,i), new_color)
if out[i,j] == 15:
new_color = im.getpixel( (j,i))
D=list(new_color)
D[0]=218
D[1]=165
D[2]=32
new_color=tuple(D)
im.putpixel( (j,i), new_color)
SEGMENTED_DIR = path
#Set the path for saving results
ddBase='/path_to_save/test_d/'
#SEGMENTED_DIR=SEGMENTED_DIR
FILE = (in_file.rsplit('/', 1))[1]
FILE = FILE.replace(" ", "")
FILE = (FILE.rsplit(".",1))[0]
FILE1 = (in_file.rsplit('/', 2))[1]
FILE1 = FILE1.replace(" ", "")
FILE2 = (in_file.rsplit('/', 3))[1]
FILE2 = FILE2.replace(" ", "")
FILE3 = (in_file.rsplit('/', 4))[1]
FILE3 = FILE3.replace(" ", "")
Seg_save_Dir=ddBase + "/" + FILE3 + "/" + FILE2+ "/" + FILE1
if not os.path.exists(Seg_save_Dir):
os.makedirs(Seg_save_Dir)
save_file = Seg_save_Dir+ "/" + FILE + ".jpg"
#print "path %s." % (save_file)
#save_file = SEGMENTED_DIR + "/" + FILE + "_seg.png"
#fig = plt.figure()
#a=fig.add_subplot(121,aspect='equal')
#plt.axis('off')
##img = mpimg.imread(im)
#imgplot = plt.imshow(im)
#a=fig.add_subplot(122,aspect='equal')
#plt.axis('off')
#imgplot = plt.imshow(out)
#fig.savefig(save_file)
#plt.close(fig)
#Uncertainty
#scipy.misc.imsave(save_file, out)
scipy.misc.imsave(save_file, im)
#im = im.resize((1242,375),Image.ANTIALIAS)
#save_file = SEGMENTED_DIR + "/" + FILE + "_seg2.png"
#scipy.misc.imsave(save_file, out2)
if __name__ == '__main__':
tt=1
#read lines and remove \n
#lines = [line.rstrip('\r') for line in open('ntu_videos_crossSubject_woMissedSamples_train.txt')]
#print lines
# Open the file for reading.
#set the path of data list. each line in the list is the location of video frames.
with open('/path_to_list/data_list.txt', 'r') as infile:
data = infile.read() # Read the contents of the file into memory.
# Return a list of the lines, breaking at line boundaries.
my_list = data.splitlines()
for path in my_list:
#print path
DATASET_DIR = path
in_files = get_files_in_dir(DATASET_DIR, "*.jpg")
print(tt,"/",len(my_list))
tt=tt+1
#print in_files
for in_f in range(len(in_files)):
segment(in_files[in_f],path)
|
[
"caffe.set_mode_gpu",
"os.makedirs",
"os.path.isdir",
"os.walk",
"os.path.exists",
"sys.path.insert",
"PIL.Image.open",
"caffe.set_device",
"numpy.array",
"scipy.misc.imsave",
"caffe.Net",
"os.path.join",
"fnmatch.fnmatch"
] |
[((386, 427), 'sys.path.insert', 'sys.path.insert', (['(0)', "(CAFFE_ROOT + 'python')"], {}), "(0, CAFFE_ROOT + 'python')\n", (401, 427), False, 'import sys\n'), ((443, 463), 'caffe.set_mode_gpu', 'caffe.set_mode_gpu', ([], {}), '()\n', (461, 463), False, 'import caffe\n'), ((464, 483), 'caffe.set_device', 'caffe.set_device', (['(0)'], {}), '(0)\n', (480, 483), False, 'import caffe\n'), ((555, 747), 'caffe.Net', 'caffe.Net', (['"""/path_to/models_pose/model2_MPII_JHMDB/fcn-8s-pascal-deploy_300.prototxt"""', '"""/path_to/models_pose/model2_MPII_JHMDB/FCN_8S_snapshot_iter_300000.caffemodel"""', 'caffe.TEST'], {}), "(\n '/path_to/models_pose/model2_MPII_JHMDB/fcn-8s-pascal-deploy_300.prototxt',\n '/path_to/models_pose/model2_MPII_JHMDB/FCN_8S_snapshot_iter_300000.caffemodel'\n , caffe.TEST)\n", (564, 747), False, 'import caffe\n'), ((809, 827), 'os.path.isdir', 'os.path.isdir', (['DIR'], {}), '(DIR)\n', (822, 827), False, 'import os\n'), ((1372, 1391), 'PIL.Image.open', 'Image.open', (['in_file'], {}), '(in_file)\n', (1382, 1391), False, 'from PIL import Image\n'), ((1450, 1480), 'numpy.array', 'np.array', (['im'], {'dtype': 'np.float32'}), '(im, dtype=np.float32)\n', (1458, 1480), True, 'import numpy as np\n'), ((1595, 1628), 'numpy.array', 'np.array', (['(103.28, 105.99, 92.54)'], {}), '((103.28, 105.99, 92.54))\n', (1603, 1628), True, 'import numpy as np\n'), ((7885, 7917), 'scipy.misc.imsave', 'scipy.misc.imsave', (['save_file', 'im'], {}), '(save_file, im)\n', (7902, 7917), False, 'import scipy\n'), ((865, 877), 'os.walk', 'os.walk', (['DIR'], {}), '(DIR)\n', (872, 877), False, 'import os\n'), ((7299, 7327), 'os.path.exists', 'os.path.exists', (['Seg_save_Dir'], {}), '(Seg_save_Dir)\n', (7313, 7327), False, 'import os\n'), ((7337, 7362), 'os.makedirs', 'os.makedirs', (['Seg_save_Dir'], {}), '(Seg_save_Dir)\n', (7348, 7362), False, 'import os\n'), ((973, 995), 'fnmatch.fnmatch', 'fnmatch', (['name', 'pattern'], {}), '(name, pattern)\n', (980, 995), False, 'from fnmatch import fnmatch\n'), ((1123, 1147), 'os.path.join', 'os.path.join', (['path', 'name'], {}), '(path, name)\n', (1135, 1147), False, 'import os\n'), ((1038, 1062), 'os.path.join', 'os.path.join', (['path', 'name'], {}), '(path, name)\n', (1050, 1062), False, 'import os\n')]
|
import numpy as np
from py_vbc.constants import *
from py_vbc.interpolations import interpolate_tf
def sigma(k, tf_spline, R=8.0/hconst):
"""Integrand to calculate the mass fluctuations in a sphere of radius
R, up to some constant of proportionality C, using transfer
functions. Uses the fact that
sigma^2 = int dk/k Delta^2(k) w(k)
and
Delta^2(k) = C k^(3+ns) T^2(k)
w(x) is the window function, defined as the Fourier transform of a
real-space top hat function.
:param k: should be in units of Mpc^-1
:param Tk: value of total matter transfer function at k and at z=0
:param R: radius of sphere to calculate fluctuations, for sigma_8 this is
8 h^-1 Mpc
:returns: dsigma^2/C where C is a normalization constant
:rtype: float
"""
def w(x):
return (3/x**3)*(np.sin(x) - x*np.cos(x))
x = k*R
Tk = tf_spline(k)
return k**(2+ns) * Tk**2 * w(x)**2
def calc_norm():
"""This calculates the value of the normalization constant, with
respect to sigma_8. The idea is that we already calculated
sigma_8/C from the transfer functions, so by dividing the
(specified) value of sigma_8 (at z=0) by our calculated sigma_8/C
we get sqrt(C), which we can use to go from transfer functions to
power spectra.
:returns: normalisation constant
:rtype: float
"""
from scipy.integrate import quad
tf0_spline = interpolate_tf(flag='t', z=0)
# Need to check limits on the spline
kmin = np.min(tf0_spline.x)
kmax = np.max(tf0_spline.x)
# Sigma is highly oscillatory above k ~ 5 Mpc^-1, so best to split
# the integral into two parts to improve the convergence --
# ideally kmid would be dynamically defined but 10 Mpc^-1 seems to
# work
kmid = 10.0
# Arguments for quad
epsrel = 1.0e-6
limit = int(1e6)
sigma_8c1 = np.sqrt(quad(lambda k: sigma(k, tf0_spline), kmin, kmid, limit=limit, epsrel=epsrel)[0])
sigma_8c2 = np.sqrt(quad(lambda k: sigma(k, tf0_spline), kmid, kmax, limit=limit, epsrel=epsrel)[0])
sigma_8c = sigma_8c1 + sigma_8c2
return sigma_8/sigma_8c
def calc_power_spec(k, g, zstart):
"""Calculates the power spectra at z=zinit. First evolves the z=1000
transfer functions forward using the linear growth factors
calculated earlier, then converts to power spectra by using the
normalization constant and
P(k) propto T(k)^2 k^ns
where ns is the tilt of the power spectrum.
:param k: (array) k-values for which the growth factors were calculated
:param g: (array) growth factors as produced by calc_derivs(), where the
first column is for CDM perturbations and the third column
is the baryon perturbations
:returns: the CDM and baryon power spectra
:rtype: arrays
"""
tf_b_spline = interpolate_tf('b', zstart)
tf_c_spline = interpolate_tf('c', zstart)
tf_b = tf_b_spline(k)
tf_c = tf_c_spline(k)
# In CICsASS the CDM TFs are used to calculate all of the power
# spectra -- I'm reproducing that here, but I'm not entirely
# convinced that's correct...
tf_spline = interpolate_tf('t', zstart)
tf = tf_spline(k)
# tf_c = tf
tf_b = tf_c
norm = calc_norm()
p_c = 2*np.pi**2 * norm**2 * g[:, 0]**2 * tf_c**2 * k**ns
p_b = 2*np.pi**2 * norm**2 * g[:, 2]**2 * tf_b**2 * k**ns
return p_c, p_b
def calc_delta(k, p):
"""Calculates the dimensionless power spectrum Delta^2 given a power
spectrum P(k)
:param k: k values of P(k)
:param p: power spectrum
:returns: Delta^2(k)
:rtype: array
"""
return p*k**3/(2*np.pi**2)
|
[
"py_vbc.interpolations.interpolate_tf",
"numpy.max",
"numpy.sin",
"numpy.min",
"numpy.cos"
] |
[((1468, 1497), 'py_vbc.interpolations.interpolate_tf', 'interpolate_tf', ([], {'flag': '"""t"""', 'z': '(0)'}), "(flag='t', z=0)\n", (1482, 1497), False, 'from py_vbc.interpolations import interpolate_tf\n'), ((1551, 1571), 'numpy.min', 'np.min', (['tf0_spline.x'], {}), '(tf0_spline.x)\n', (1557, 1571), True, 'import numpy as np\n'), ((1583, 1603), 'numpy.max', 'np.max', (['tf0_spline.x'], {}), '(tf0_spline.x)\n', (1589, 1603), True, 'import numpy as np\n'), ((2925, 2952), 'py_vbc.interpolations.interpolate_tf', 'interpolate_tf', (['"""b"""', 'zstart'], {}), "('b', zstart)\n", (2939, 2952), False, 'from py_vbc.interpolations import interpolate_tf\n'), ((2971, 2998), 'py_vbc.interpolations.interpolate_tf', 'interpolate_tf', (['"""c"""', 'zstart'], {}), "('c', zstart)\n", (2985, 2998), False, 'from py_vbc.interpolations import interpolate_tf\n'), ((3236, 3263), 'py_vbc.interpolations.interpolate_tf', 'interpolate_tf', (['"""t"""', 'zstart'], {}), "('t', zstart)\n", (3250, 3263), False, 'from py_vbc.interpolations import interpolate_tf\n'), ((868, 877), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (874, 877), True, 'import numpy as np\n'), ((882, 891), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (888, 891), True, 'import numpy as np\n')]
|
import os
import torch
import torch.nn.functional as F
import numpy as np
from collections import namedtuple
import time
import matplotlib.pyplot as plt
# 定义是否使用GPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def LpNormalize_cnn(input, p=2, cp=1, eps=1e-6):
r'''Calculate the unit vector on Lp sphere
:param input: tensor of weight, dims should be >= 2
:param p: the Lp parameter of weight
:param cp: the p power of current input, that means input = c*w^cp
:param eps:
:return: output = input/norm_d, norm_d = norm(input, p/cp)
'''
dim = input.dim()
norm_d = LpNorm_cnn(input, p, cp, eps)
inv_norm_d = 1 / norm_d
if dim == 2:
output = input.mul(inv_norm_d.view(input.size(0), 1))
elif dim == 3:
output = input.mul(inv_norm_d.view(input.size(0), 1, 1))
elif dim == 4:
output = input.mul(inv_norm_d.view(input.size(0), 1, 1, 1))
else:
raise ValueError('Expected input 2 <= dims <=4, got {}'.format(dim))
return output, norm_d
def LpNorm_cnn(input, p=2, cp=1, eps=1e-6):
r'''Calculate the Lp norm of weights
:param input: tensor of weight, dims should be >= 2, and the dim 0 is channels
:param p: the Lp parameter of weight
:param cp: the p power of current input, that means input = c*w^cp
:param eps:
:return: output = input/norm_d, norm_d = norm(input, p/cp)
'''
dim = input.dim()
if dim == 2:
norm_d = input.abs().pow(p / cp).sum(1).pow(cp / p).add(eps)
elif dim == 3:
norm_d = input.abs().pow(p / cp).sum(2).sum(1).pow(cp / p).add(eps)
elif dim == 4:
norm_d = input.abs().pow(p / cp).sum(3).sum(2).sum(1).pow(cp / p).add(eps)
else:
raise ValueError('Expected input 2 <= dims <=4, got {}'.format(dim))
return norm_d
def LpNormalize_layer(input, p=2, cp=1, eps=1e-6):
r'''Calculate the unit vector on Lp sphere (layer as a single vector)
:param input: tensor of weight, dims should be >= 2
:param p: the Lp parameter of weight
:param cp: the p power of current input, that means input = c*w^cp
:param eps:
:return: output = input/norm_d, norm_d = norm(input, p/cp)
'''
dim = input.dim()
if dim >= 2 and dim <= 4:
norm_d = input.abs().pow(p/cp).sum().pow(cp/p).add(eps)
inv_norm_d = 1/norm_d
output = input.mul(inv_norm_d)
else:
raise ValueError('Expected input 2 <= dims <=4, got {}'.format(dim))
return output, inv_norm_d
def Hoyer_layer_sparsity(input, eps=1e-8):
# Hoyer’s sparsity of a layer's weight
# the average sparsity of weight in a layer
dim = input.dim()
abs_in = input.abs()
d = np.prod(input.size()[1:])
sqrt_d = np.sqrt(d)
if dim == 2:
output = abs_in.sum(1).div(abs_in.pow(2).sum(1).pow(0.5).add(eps)).sub(sqrt_d).div(1-sqrt_d).mean(0)
elif dim == 3:
output = abs_in.sum(2).sum(1).div(abs_in.pow(2).sum(2).sum(1).pow(0.5).add(eps)).sub(sqrt_d).div(1-sqrt_d).mean(0)
elif dim == 4:
output = abs_in.sum(3).sum(2).sum(1).div(abs_in.pow(2).sum(3).sum(2).sum(1).pow(0.5).add(eps)).sub(sqrt_d).div(1-sqrt_d).mean(0)
else:
raise ValueError('Expected input 2 <= dims <=4, got {}'.format(dim))
return output
def Hoyer_layervec_sparsity(input, eps=1e-8):
# Hoyer’s sparsity of a layer's weight
# the average sparsity of weight in a layer
dim = input.dim()
abs_in = input.abs()
d = np.prod(input.size())
sqrt_d = np.sqrt(d)
if dim >= 2 and dim <= 4:
output = abs_in.div(abs_in.pow(2).pow(0.5).add(eps)).sub(sqrt_d).div(1-sqrt_d)
else:
raise ValueError('Expected input 2 <= dims <=4, got {}'.format(dim))
return output
def Hoyer_net_sparsity(model):
# Hoyer’s sparsity of whole network
# the average sparsity of weight in whole network
weight_list = get_weight(model)
w_sparsity = []
num_w = [] # number of weight in each layer
for name, weight in weight_list:
if weight.dim() < 2:
continue
# sparsity
c_sparse = Hoyer_layer_sparsity(weight.data).item()
w_sparsity.append(c_sparse)
num_w.append(np.prod(weight.data.size()))
return np.average(w_sparsity, weights=num_w)
def Hoyer_net_ll_sparsity(model):
# Hoyer’s sparsity of whole network
# the average sparsity of weight in whole network
weight_list = get_weight(model)
w_sparsity = []
num_w = [] # number of weight in each layer
for name, weight in weight_list:
if weight.dim() < 2:
continue
# sparsity
c_sparse = Hoyer_layervec_sparsity(weight.data).item()
w_sparsity.append(c_sparse)
num_w.append(np.prod(weight.data.size()))
return np.average(w_sparsity, weights=num_w)
def Hoyer_activation_sparsity(input):
# Hoyer’s sparsity of a layer's activation
# the average sparsity of activation in a layer
return Hoyer_layer_sparsity(input)
def sparsify_weight(w, mask, h=0.1, eps=1e-8):
'''Weight sparsification by setting the small element to zero
Args:
w (torch.tensor): the weight for sparsification
mask (torch.tensor): the mask of no activated weight
h (torch.tensor/float, optional): the weight for sparsification (default: 0.1)
:return: w (sparse), mask
'''
wa = w.abs()
nmask_f = (~mask).float()
dim = wa.dim()
if dim == 2:
hh = wa.mul(nmask_f).sum(1).div(nmask_f.sum(1).add(eps)).mul(h)
mask = wa < hh.view(wa.size(0), 1)
w.masked_fill_(mask, 0)
elif dim == 3:
hh = wa.mul(nmask_f).sum(2).sum(1).div(nmask_f.sum(2).sum(1).add(eps)).mul(h)
mask = wa < hh.view(wa.size(0), 1, 1)
w.masked_fill_(mask, 0)
elif dim == 4:
hh = wa.mul(nmask_f).sum(3).sum(2).sum(1).div(nmask_f.sum(3).sum(2).sum(1).add(eps)).mul(h)
mask = wa < hh.view(wa.size(0), 1, 1, 1)
w.masked_fill_(mask, 0)
else:
raise ValueError('Expected dimension of input 2 <= dims <=4, got {}'.format(dim))
return w, mask
def sparsify_weight_ll(w, h=0.1):
'''Weight sparsification by setting the small element to zero
Args:
w (torch.tensor): the weight for sparsification
h (torch.tensor/float, optional): the weight for sparsification (default: 0.1)
:return: mask
'''
wa = w.abs()
ws = wa.mul(wa)
dim = ws.dim()
if dim >= 2 and dim <= 4:
hh = ws.mean().sqrt().mul(h)
mask = wa < hh
w.masked_fill_(mask, 0)
else:
raise ValueError('Expected input 2 <= dims <=4, got {}'.format(dim))
return w, mask
def sparsify_grad(g, mask, h=0.1, eps=1e-10):
'''grow connection by activate large gradient
Args:
g (torch.tensor): the gradient of weight
mask (torch.tensor): the mask of no activated weight
h (torch.tensor/float, optional): the weight for sparsification (default: 0.1)
:return: mask
'''
ga = g.abs()
nmask_f = (~mask).float()
dim = ga.dim()
if dim == 2:
hh = ga.mul(nmask_f).sum(1).div(nmask_f.sum(1).add(eps)).mul(h)
mask = (ga < hh.view(ga.size(0), 1)) & mask
elif dim == 3:
hh = ga.mul(nmask_f).sum(2).sum(1).div(nmask_f.sum(2).sum(1).add(eps)).mul(h)
mask = (ga < hh.view(ga.size(0), 1, 1)) & mask
elif dim == 4:
hh = ga.mul(nmask_f).sum(3).sum(2).sum(1).div(nmask_f.sum(3).sum(2).sum(1).add(eps)).mul(h)
mask = (ga < hh.view(ga.size(0), 1, 1, 1)) & mask
else:
raise ValueError('Expected dimension of input 2 <= dims <=4, got {}'.format(dim))
return mask
def sparsify_grad_ll(g, mask, h=0.1, eps=1e-8):
'''grow connection by activate large gradient
Args:
g (torch.tensor): the gradient of weight
mask (torch.tensor): the mask of no activated weight
h (torch.tensor/float, optional): the weight for sparsification (default: 0.1)
:return: output = input(input.abs()<h*input.abs().mean())=0
'''
ga = g.abs()
mask_f = mask.float()
nmask_f = 1 - mask_f
dim = ga.dim()
if dim >= 2 and dim <= 4:
hh = ga.mul(nmask_f).sum().div(nmask_f.sum().add(eps)).mul(h)
mask = (ga < hh) & mask
else:
raise ValueError('Expected input 2 <= dims <=4, got {}'.format(dim))
return mask
def orthogonalProjection(w, x):
# the projection of x orthogonal to the w
size_w = w.size()
size_x = x.size()
if size_w != size_x:
raise ValueError('Expected size of x should be same as w {}, got {}'.format(size_w, size_x))
dim = w.dim()
if dim == 2:
r = w.mul(x).sum(1)
p = x.sub(w.mul(r.view(size_w[0], 1)))
elif dim == 3:
r = w.mul(x).sum(2).sum(1)
elif dim == 4:
r = w.mul(x).sum(3).sum(2).sum(1)
else:
raise ValueError('Expected input 2 <= dims <=4, got {}'.format(dim))
return r
def get_weight(model):
'''
获得模型的权重列表
:param model:
:return:
'''
weight_list = []
for name, param in model.named_parameters():
if 'weight' in name:
weight = (name, param)
weight_list.append(weight)
return weight_list
def saveLists(lists, textname):
# save the lists to certain text
file = open(textname,'w')
for data in lists:
m = len(data)
for i, p in enumerate(data):
file.write(str(p))
if i<m-1:
file.write(',')
file.write('\n')
file.close()
|
[
"torch.cuda.is_available",
"numpy.average",
"numpy.sqrt"
] |
[((2774, 2784), 'numpy.sqrt', 'np.sqrt', (['d'], {}), '(d)\n', (2781, 2784), True, 'import numpy as np\n'), ((3545, 3555), 'numpy.sqrt', 'np.sqrt', (['d'], {}), '(d)\n', (3552, 3555), True, 'import numpy as np\n'), ((4278, 4315), 'numpy.average', 'np.average', (['w_sparsity'], {'weights': 'num_w'}), '(w_sparsity, weights=num_w)\n', (4288, 4315), True, 'import numpy as np\n'), ((4820, 4857), 'numpy.average', 'np.average', (['w_sparsity'], {'weights': 'num_w'}), '(w_sparsity, weights=num_w)\n', (4830, 4857), True, 'import numpy as np\n'), ((200, 225), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (223, 225), False, 'import torch\n')]
|
"""Plot 1d ovservables"""
from gna.ui import basecmd, append_typed, qualified
import matplotlib
from matplotlib import pyplot as plt
from matplotlib.ticker import AutoMinorLocator
from mpl_tools.helpers import savefig
import numpy as np
from gna.bindings import common
from gna.env import PartNotFoundError, env
class cmd(basecmd):
def __init__(self, *args, **kwargs):
basecmd.__init__(self, *args, **kwargs)
@classmethod
def initparser(cls, parser, env):
def observable(path):
try:
return env.ns('').getobservable(path)
except KeyError:
raise PartNotFoundError("observable", path)
parser.add_argument('data', metavar=('DATA',), type=observable, help='observable to store')
parser.add_argument('output', help='filename')
parser.add_argument('--header', default='', help='Header')
def run(self):
data = self.opts.data.data()
dt = self.opts.data.datatype()
header = self.opts.header
if dt.isHist():
if dt.shape.size()!=1:
raise Exception('2d histograms not yet implemented')
edges = np.array(dt.edges)
edges_left, edges_right = edges[:-1], edges[1:]
dump = edges_left, edges_right, data
if not header:
header = 'bin_left bin_right data'
elif dt.isPoints():
dump = data,
if not header:
header = 'data'
else:
raise Exception('DataType is undefined')
dump = np.array(dump).T
try:
np.savetxt(self.opts.output, dump, header=header)
except:
raise Exception('Unable to write data to: '+self.opts.output)
print(('Dump data to: '+self.opts.output))
|
[
"gna.ui.basecmd.__init__",
"numpy.savetxt",
"gna.env.PartNotFoundError",
"numpy.array",
"gna.env.env.ns"
] |
[((383, 422), 'gna.ui.basecmd.__init__', 'basecmd.__init__', (['self', '*args'], {}), '(self, *args, **kwargs)\n', (399, 422), False, 'from gna.ui import basecmd, append_typed, qualified\n'), ((1174, 1192), 'numpy.array', 'np.array', (['dt.edges'], {}), '(dt.edges)\n', (1182, 1192), True, 'import numpy as np\n'), ((1577, 1591), 'numpy.array', 'np.array', (['dump'], {}), '(dump)\n', (1585, 1591), True, 'import numpy as np\n'), ((1620, 1669), 'numpy.savetxt', 'np.savetxt', (['self.opts.output', 'dump'], {'header': 'header'}), '(self.opts.output, dump, header=header)\n', (1630, 1669), True, 'import numpy as np\n'), ((631, 668), 'gna.env.PartNotFoundError', 'PartNotFoundError', (['"""observable"""', 'path'], {}), "('observable', path)\n", (648, 668), False, 'from gna.env import PartNotFoundError, env\n'), ((549, 559), 'gna.env.env.ns', 'env.ns', (['""""""'], {}), "('')\n", (555, 559), False, 'from gna.env import PartNotFoundError, env\n')]
|
import numpy as np
import pandas as pd
from tensorflow.keras import Input
from keras.layers.core import Dropout, Dense
from keras.layers import LSTM, Bidirectional, Concatenate
from keras.layers.embeddings import Embedding
from keras.models import Model
from tensorflow.keras.preprocessing.text import Tokenizer
from src.utils import *
from model import (do_padding,get_extra,preprocess_text,convert_cities,convert_countries)
train = pd.read_csv("data/train.csv")
test = pd.read_csv("data/test.csv")
# Processing extra vars
keyword_bins = pd.read_csv("data/keyword_bins.csv", dtype={"keyword":str,
"keyword_bin":str})
locations = pd.read_csv("data/locations.csv")
def process_extra_vars(df, keyword_bins=keyword_bins, locations=locations):
df_plus = df.merge(keyword_bins, how="left", on = "keyword"). \
merge(locations, how="left", on="location")
df_plus.loc[df_plus["keyword_bin"].isna(), "keyword_bin"] = "missing"
df_plus = convert_cities(df_plus)
df_plus = convert_countries(df_plus)
df_plus = get_extra(df_plus)
dummies = pd.get_dummies(df_plus[["mention", "link", "hashtag",
"city", "country", "keyword_bin"]])
dummy_cols = dummies.columns
return dummies, dummy_cols
train_dummies, train_dummy_cols = process_extra_vars(train)
test_dummies, test_dummy_cols = process_extra_vars(test)
train_dummy_cols.difference(test_dummy_cols)
# Given that these countries don't exist in test, and we're building a new
# model, I'm going to drop these
train_dummies.drop(["country_south africa","country_spain"],axis=1,inplace=True)
# ensuring the same order
test_dummies = test_dummies[train_dummies.columns]
# Processing text
vocab_size = 8000
max_len = 25
"""
Preprocessing Text
"""
# Text
text_train = preprocess_text(train["text"])
text_test = preprocess_text(test["text"])
tokenizer = Tokenizer(num_words = vocab_size, oov_token = "<oov>")
tokenizer.fit_on_texts(text_train)
padded_train, _ = do_padding(text_train, tokenizer, max_len, "post", "post")
padded_test, _ = do_padding(text_test, tokenizer, max_len, "post", "post")
"""
Model
Concatenated tensorflow model
"""
dropout_rate = 0.5
input_1 = Input(shape=(max_len,))
input_2 = Input(shape=(len(train_dummies.columns),))
embedding_layer = Embedding(vocab_size, 36)(input_1)
lstm_1 = Bidirectional(LSTM(16, return_sequences=True, dropout=dropout_rate))(embedding_layer)
lstm_2 = Bidirectional(LSTM(16, return_sequences=True, dropout=dropout_rate))(lstm_1)
lstm_3 = Bidirectional(LSTM(16, dropout=dropout_rate))(lstm_2)
dense_1 = Dense(8, activation="relu")(lstm_3)
dense_2 = Dense(64, activation="relu")(input_2)
dropout_1 = Dropout(dropout_rate)(dense_2)
dense_3 = Dense(32, activation="relu")(dropout_1)
dropout_2 = Dropout(dropout_rate)(dense_3)
dense_4 = Dense(8, activation="relu")(dropout_2)
concat_layer = Concatenate()([dense_1, dense_4])
dropout_3 = Dropout(dropout_rate)(concat_layer)
dense_4 = Dense(20, activation="relu")(dropout_3)
dropout_6 = Dropout(dropout_rate)(dense_4)
output = Dense(1, activation='sigmoid')(dropout_6)
model = Model(inputs=[input_1, input_2], outputs=output)
model.compile(loss='binary_crossentropy',
# optimizer=tf.keras.optimizers.Adam(learning_rate=0.01),
optimizer="adam",
metrics=['accuracy'])
model.summary()
history = model.fit(x=[padded_train, train_dummies], y=train["target"],
epochs=5, verbose=1)
preds = model.predict([padded_test, test_dummies])
preds_target = np.where(preds>0.5, 1, 0).reshape(-1)
submission = pd.DataFrame({"id":test["id"],
"target":preds_target})
submission.to_csv("submission.csv", index=False)
|
[
"pandas.DataFrame",
"model.convert_cities",
"tensorflow.keras.preprocessing.text.Tokenizer",
"keras.layers.embeddings.Embedding",
"keras.layers.core.Dense",
"model.convert_countries",
"pandas.read_csv",
"pandas.get_dummies",
"tensorflow.keras.Input",
"keras.layers.LSTM",
"keras.models.Model",
"keras.layers.Concatenate",
"numpy.where",
"model.preprocess_text",
"keras.layers.core.Dropout",
"model.get_extra",
"model.do_padding"
] |
[((440, 469), 'pandas.read_csv', 'pd.read_csv', (['"""data/train.csv"""'], {}), "('data/train.csv')\n", (451, 469), True, 'import pandas as pd\n'), ((477, 505), 'pandas.read_csv', 'pd.read_csv', (['"""data/test.csv"""'], {}), "('data/test.csv')\n", (488, 505), True, 'import pandas as pd\n'), ((546, 631), 'pandas.read_csv', 'pd.read_csv', (['"""data/keyword_bins.csv"""'], {'dtype': "{'keyword': str, 'keyword_bin': str}"}), "('data/keyword_bins.csv', dtype={'keyword': str, 'keyword_bin': str}\n )\n", (557, 631), True, 'import pandas as pd\n'), ((697, 730), 'pandas.read_csv', 'pd.read_csv', (['"""data/locations.csv"""'], {}), "('data/locations.csv')\n", (708, 730), True, 'import pandas as pd\n'), ((1877, 1907), 'model.preprocess_text', 'preprocess_text', (["train['text']"], {}), "(train['text'])\n", (1892, 1907), False, 'from model import do_padding, get_extra, preprocess_text, convert_cities, convert_countries\n'), ((1920, 1949), 'model.preprocess_text', 'preprocess_text', (["test['text']"], {}), "(test['text'])\n", (1935, 1949), False, 'from model import do_padding, get_extra, preprocess_text, convert_cities, convert_countries\n'), ((1963, 2013), 'tensorflow.keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {'num_words': 'vocab_size', 'oov_token': '"""<oov>"""'}), "(num_words=vocab_size, oov_token='<oov>')\n", (1972, 2013), False, 'from tensorflow.keras.preprocessing.text import Tokenizer\n'), ((2072, 2130), 'model.do_padding', 'do_padding', (['text_train', 'tokenizer', 'max_len', '"""post"""', '"""post"""'], {}), "(text_train, tokenizer, max_len, 'post', 'post')\n", (2082, 2130), False, 'from model import do_padding, get_extra, preprocess_text, convert_cities, convert_countries\n'), ((2148, 2205), 'model.do_padding', 'do_padding', (['text_test', 'tokenizer', 'max_len', '"""post"""', '"""post"""'], {}), "(text_test, tokenizer, max_len, 'post', 'post')\n", (2158, 2205), False, 'from model import do_padding, get_extra, preprocess_text, convert_cities, convert_countries\n'), ((2282, 2305), 'tensorflow.keras.Input', 'Input', ([], {'shape': '(max_len,)'}), '(shape=(max_len,))\n', (2287, 2305), False, 'from tensorflow.keras import Input\n'), ((3188, 3236), 'keras.models.Model', 'Model', ([], {'inputs': '[input_1, input_2]', 'outputs': 'output'}), '(inputs=[input_1, input_2], outputs=output)\n', (3193, 3236), False, 'from keras.models import Model\n'), ((3670, 3726), 'pandas.DataFrame', 'pd.DataFrame', (["{'id': test['id'], 'target': preds_target}"], {}), "({'id': test['id'], 'target': preds_target})\n", (3682, 3726), True, 'import pandas as pd\n'), ((1026, 1049), 'model.convert_cities', 'convert_cities', (['df_plus'], {}), '(df_plus)\n', (1040, 1049), False, 'from model import do_padding, get_extra, preprocess_text, convert_cities, convert_countries\n'), ((1064, 1090), 'model.convert_countries', 'convert_countries', (['df_plus'], {}), '(df_plus)\n', (1081, 1090), False, 'from model import do_padding, get_extra, preprocess_text, convert_cities, convert_countries\n'), ((1105, 1123), 'model.get_extra', 'get_extra', (['df_plus'], {}), '(df_plus)\n', (1114, 1123), False, 'from model import do_padding, get_extra, preprocess_text, convert_cities, convert_countries\n'), ((1143, 1236), 'pandas.get_dummies', 'pd.get_dummies', (["df_plus[['mention', 'link', 'hashtag', 'city', 'country', 'keyword_bin']]"], {}), "(df_plus[['mention', 'link', 'hashtag', 'city', 'country',\n 'keyword_bin']])\n", (1157, 1236), True, 'import pandas as pd\n'), ((2378, 2403), 'keras.layers.embeddings.Embedding', 'Embedding', (['vocab_size', '(36)'], {}), '(vocab_size, 36)\n', (2387, 2403), False, 'from keras.layers.embeddings import Embedding\n'), ((2667, 2694), 'keras.layers.core.Dense', 'Dense', (['(8)'], {'activation': '"""relu"""'}), "(8, activation='relu')\n", (2672, 2694), False, 'from keras.layers.core import Dropout, Dense\n'), ((2714, 2742), 'keras.layers.core.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""'}), "(64, activation='relu')\n", (2719, 2742), False, 'from keras.layers.core import Dropout, Dense\n'), ((2764, 2785), 'keras.layers.core.Dropout', 'Dropout', (['dropout_rate'], {}), '(dropout_rate)\n', (2771, 2785), False, 'from keras.layers.core import Dropout, Dense\n'), ((2805, 2833), 'keras.layers.core.Dense', 'Dense', (['(32)'], {'activation': '"""relu"""'}), "(32, activation='relu')\n", (2810, 2833), False, 'from keras.layers.core import Dropout, Dense\n'), ((2857, 2878), 'keras.layers.core.Dropout', 'Dropout', (['dropout_rate'], {}), '(dropout_rate)\n', (2864, 2878), False, 'from keras.layers.core import Dropout, Dense\n'), ((2898, 2925), 'keras.layers.core.Dense', 'Dense', (['(8)'], {'activation': '"""relu"""'}), "(8, activation='relu')\n", (2903, 2925), False, 'from keras.layers.core import Dropout, Dense\n'), ((2953, 2966), 'keras.layers.Concatenate', 'Concatenate', ([], {}), '()\n', (2964, 2966), False, 'from keras.layers import LSTM, Bidirectional, Concatenate\n'), ((2999, 3020), 'keras.layers.core.Dropout', 'Dropout', (['dropout_rate'], {}), '(dropout_rate)\n', (3006, 3020), False, 'from keras.layers.core import Dropout, Dense\n'), ((3045, 3073), 'keras.layers.core.Dense', 'Dense', (['(20)'], {'activation': '"""relu"""'}), "(20, activation='relu')\n", (3050, 3073), False, 'from keras.layers.core import Dropout, Dense\n'), ((3097, 3118), 'keras.layers.core.Dropout', 'Dropout', (['dropout_rate'], {}), '(dropout_rate)\n', (3104, 3118), False, 'from keras.layers.core import Dropout, Dense\n'), ((3137, 3167), 'keras.layers.core.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (3142, 3167), False, 'from keras.layers.core import Dropout, Dense\n'), ((2436, 2489), 'keras.layers.LSTM', 'LSTM', (['(16)'], {'return_sequences': '(True)', 'dropout': 'dropout_rate'}), '(16, return_sequences=True, dropout=dropout_rate)\n', (2440, 2489), False, 'from keras.layers import LSTM, Bidirectional, Concatenate\n'), ((2531, 2584), 'keras.layers.LSTM', 'LSTM', (['(16)'], {'return_sequences': '(True)', 'dropout': 'dropout_rate'}), '(16, return_sequences=True, dropout=dropout_rate)\n', (2535, 2584), False, 'from keras.layers import LSTM, Bidirectional, Concatenate\n'), ((2617, 2647), 'keras.layers.LSTM', 'LSTM', (['(16)'], {'dropout': 'dropout_rate'}), '(16, dropout=dropout_rate)\n', (2621, 2647), False, 'from keras.layers import LSTM, Bidirectional, Concatenate\n'), ((3618, 3645), 'numpy.where', 'np.where', (['(preds > 0.5)', '(1)', '(0)'], {}), '(preds > 0.5, 1, 0)\n', (3626, 3645), True, 'import numpy as np\n')]
|
import numpy as np
from PIL import Image
def save_image_array(img_array, fname, batch_size=100, class_num=10):
channels = img_array.shape[1]
resolution = img_array.shape[-1]
img_rows = 10
img_cols = batch_size//class_num
img = np.full([channels, resolution * img_rows, resolution * img_cols], 0.0)
for r in range(img_rows):
for c in range(img_cols):
img[:,
(resolution * r): (resolution * (r + 1)),
(resolution * (c % img_cols)): (resolution * ((c % img_cols) + 1))
] = img_array[c+(r*img_cols)]
img = (img * 255 + 0.5).clip(0, 255).astype(np.uint8)
if (img.shape[0] == 1):
img = img[0]
else:
img = np.rollaxis(img, 0, 3)
Image.fromarray(img).save(fname)
|
[
"numpy.full",
"PIL.Image.fromarray",
"numpy.rollaxis"
] |
[((249, 319), 'numpy.full', 'np.full', (['[channels, resolution * img_rows, resolution * img_cols]', '(0.0)'], {}), '([channels, resolution * img_rows, resolution * img_cols], 0.0)\n', (256, 319), True, 'import numpy as np\n'), ((710, 732), 'numpy.rollaxis', 'np.rollaxis', (['img', '(0)', '(3)'], {}), '(img, 0, 3)\n', (721, 732), True, 'import numpy as np\n'), ((738, 758), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (753, 758), False, 'from PIL import Image\n')]
|
#!/usr/bin/env python3
import logging
import torch
import numpy as np
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence
from pytorch_translate import rnn # noqa
logger = logging.getLogger(__name__)
def add_args(parser):
parser.add_argument(
"--char-rnn",
action="store_true",
default=False,
help="Assumes input is delimiter-separated character tokens "
"(configurable with --word-delimiter option). Encodes word "
"representations using bi-LSTM over char inputs.",
)
parser.add_argument(
"--char-rnn-units",
type=int,
default=256,
metavar="N",
help=("Number of units for Character LSTM."),
)
parser.add_argument(
"--char-rnn-layers",
type=int,
default=2,
metavar="N",
help=("Number of Character LSTM layers."),
)
parser.add_argument(
"--word-delimiter", type=str, default="@SPACE", help=("Token separating words.")
)
def set_arg_defaults(args):
if hasattr(args, "char_rnn_params"):
return args.char_rnn_params
args.char_rnn_params = None
char_rnn = getattr(args, "char_rnn", False)
if char_rnn:
args.char_rnn_params = {
"char_rnn_units": args.char_rnn_units,
"char_rnn_layers": args.char_rnn_layers,
"word_delimiter": args.word_delimiter,
}
class DelimiterSplit(nn.Module):
"""
nn.Module which takes batched sequence input where the tokens are assumed
to represent characters with a specified delimiter separating words, and
returns the same indices split into words.
Inputs:
src_tokens (batch_size, max_length): character indices
src_lengths (batch_size): lengths in total characters including delimiters
Outputs:
padded_char_inds (max_word_length, total_words)
word_lenths (total_words,)
words_per_sent (batch_size,)
"""
def __init__(self, dictionary, word_delimiter="@SPACE"):
super().__init__()
self.dictionary = dictionary
self.padding_idx = dictionary.pad()
self.word_delim_index = self.dictionary.index(word_delimiter)
if self.word_delim_index == self.dictionary.unk():
raise RuntimeError(
f"Word delimiter {word_delimiter} not in source dictionary!"
)
def forward(self, src_tokens, src_lengths):
words = []
word_lengths = []
words_per_sent = []
src_tokens_numpy = src_tokens.cpu().numpy()
for sentence_array in src_tokens_numpy:
chars = []
words_in_sentence = 0
for idx in sentence_array:
if idx == self.dictionary.pad():
continue
elif idx == self.word_delim_index:
if len(chars) > 0:
word = torch.LongTensor(np.array(chars, dtype=np.int64))
words.append(word)
word_lengths.append(len(chars))
words_in_sentence += 1
chars = []
continue
else:
chars.append(idx)
if len(chars) > 0:
word = torch.LongTensor(np.array(chars, dtype=np.int64))
words.append(word)
word_lengths.append(len(chars))
words_in_sentence += 1
chars = []
words_per_sent.append(words_in_sentence)
max_word_length = max(word_lengths)
padded_char_inds = torch.LongTensor(max_word_length, len(words)).fill_(
self.padding_idx
)
for idx, length in enumerate(word_lengths):
padded_char_inds[:length, idx] = words[idx]
# copy to GPU if necessary
padded_char_inds = padded_char_inds.type_as(src_tokens)
word_lengths = torch.LongTensor(word_lengths).type_as(src_lengths)
words_per_sent = torch.LongTensor(words_per_sent).type_as(src_lengths)
return padded_char_inds, word_lengths, words_per_sent
class CharRNN(nn.Module):
"""
nn.Module to encode character sequences (with word delimiters) into
vectors representing each word with bi-directional RNNS.
"""
def __init__(
self,
dictionary,
embed_dim,
hidden_dim,
num_layers,
bidirectional=True,
word_delimiter="@SPACE",
):
super().__init__()
self.word_split = DelimiterSplit(dictionary, word_delimiter)
self.dictionary = dictionary
num_embeddings = len(dictionary)
self.padding_idx = dictionary.pad()
self.embed_chars = rnn.Embedding(
num_embeddings=num_embeddings,
embedding_dim=embed_dim,
padding_idx=self.padding_idx,
freeze_embed=False,
)
self.bidirectional = bidirectional
if self.bidirectional:
assert hidden_dim % 2 == 0
self.lstm_encoder = rnn.LSTMSequenceEncoder.LSTM(
embed_dim,
hidden_dim // 2 if bidirectional else hidden_dim,
num_layers=num_layers,
bidirectional=bidirectional,
)
def forward(self, src_tokens, src_lengths):
padded_char_inds, word_lengths, words_per_sent = self.word_split(
src_tokens, src_lengths
)
# inputs to RNN must be in descending order of length
sorted_word_lengths, word_length_order = torch.sort(
word_lengths, descending=True
)
# shape: (max_word_len, total_words, embed_dim)
char_rnn_input = self.embed_chars(padded_char_inds[:, word_length_order])
packed_char_input = pack_padded_sequence(char_rnn_input, sorted_word_lengths)
_, (h_last, _) = self.lstm_encoder(packed_char_input)
# take last-layer output only (shape: (total_words, hidden_dim))
if self.bidirectional:
rnn_output = torch.cat((h_last[-2, :, :], h_last[-1, :, :]), dim=1)
else:
rnn_output = h_last[-1, :, :]
# "unsort"
_, inverted_word_length_order = torch.sort(word_length_order)
unsorted_rnn_output = rnn_output[inverted_word_length_order, :]
# (max_words_per_sent, batch_size, word_rep_dim)
output = torch.FloatTensor(
int(words_per_sent.max()), words_per_sent.shape[0], rnn_output.size(1)
).type_as(
rnn_output
)
sent_end_indices = words_per_sent.cumsum(0)
for sent_index in range(words_per_sent.shape[0]):
start = 0 if sent_index == 0 else sent_end_indices[sent_index - 1]
end = sent_end_indices[sent_index]
output[: words_per_sent[sent_index], sent_index, :] = unsorted_rnn_output[
start:end, :
]
return output, words_per_sent
|
[
"pytorch_translate.rnn.Embedding",
"torch.LongTensor",
"torch.cat",
"pytorch_translate.rnn.LSTMSequenceEncoder.LSTM",
"numpy.array",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.sort",
"logging.getLogger"
] |
[((198, 225), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (215, 225), False, 'import logging\n'), ((4715, 4838), 'pytorch_translate.rnn.Embedding', 'rnn.Embedding', ([], {'num_embeddings': 'num_embeddings', 'embedding_dim': 'embed_dim', 'padding_idx': 'self.padding_idx', 'freeze_embed': '(False)'}), '(num_embeddings=num_embeddings, embedding_dim=embed_dim,\n padding_idx=self.padding_idx, freeze_embed=False)\n', (4728, 4838), False, 'from pytorch_translate import rnn\n'), ((5036, 5181), 'pytorch_translate.rnn.LSTMSequenceEncoder.LSTM', 'rnn.LSTMSequenceEncoder.LSTM', (['embed_dim', '(hidden_dim // 2 if bidirectional else hidden_dim)'], {'num_layers': 'num_layers', 'bidirectional': 'bidirectional'}), '(embed_dim, hidden_dim // 2 if bidirectional else\n hidden_dim, num_layers=num_layers, bidirectional=bidirectional)\n', (5064, 5181), False, 'from pytorch_translate import rnn\n'), ((5518, 5559), 'torch.sort', 'torch.sort', (['word_lengths'], {'descending': '(True)'}), '(word_lengths, descending=True)\n', (5528, 5559), False, 'import torch\n'), ((5750, 5807), 'torch.nn.utils.rnn.pack_padded_sequence', 'pack_padded_sequence', (['char_rnn_input', 'sorted_word_lengths'], {}), '(char_rnn_input, sorted_word_lengths)\n', (5770, 5807), False, 'from torch.nn.utils.rnn import pack_padded_sequence\n'), ((6172, 6201), 'torch.sort', 'torch.sort', (['word_length_order'], {}), '(word_length_order)\n', (6182, 6201), False, 'import torch\n'), ((6001, 6055), 'torch.cat', 'torch.cat', (['(h_last[-2, :, :], h_last[-1, :, :])'], {'dim': '(1)'}), '((h_last[-2, :, :], h_last[-1, :, :]), dim=1)\n', (6010, 6055), False, 'import torch\n'), ((3917, 3947), 'torch.LongTensor', 'torch.LongTensor', (['word_lengths'], {}), '(word_lengths)\n', (3933, 3947), False, 'import torch\n'), ((3994, 4026), 'torch.LongTensor', 'torch.LongTensor', (['words_per_sent'], {}), '(words_per_sent)\n', (4010, 4026), False, 'import torch\n'), ((3287, 3318), 'numpy.array', 'np.array', (['chars'], {'dtype': 'np.int64'}), '(chars, dtype=np.int64)\n', (3295, 3318), True, 'import numpy as np\n'), ((2909, 2940), 'numpy.array', 'np.array', (['chars'], {'dtype': 'np.int64'}), '(chars, dtype=np.int64)\n', (2917, 2940), True, 'import numpy as np\n')]
|
"""Multiview Random Gaussian Projection"""
# Authors: <NAME>
#
# License: MIT
import numpy as np
from sklearn.base import TransformerMixin
from sklearn.utils.validation import check_is_fitted
from sklearn.random_projection import GaussianRandomProjection
from .utils import check_n_views
class RandomGaussianProjection(TransformerMixin):
"""
Random Gaussian Projection method for constructing multiple views.
Each view is constructed using sklearn's random Gaussian projection.
Parameters
----------
n_views : int
Number of views to construct
n_components: int or 'auto', optional (default "auto")
Dimensionality of target projection space, see
sklearn.random_projection.GaussianRandomProjection for details.
eps: float, optional (default 0.1)
Parameter for controlling quality of embedding when
n_components = "auto" according to the Johnson-Lindenstrauss lemma
A smaller value leads to a better emedding (see sklearn for details).
random_state : int or RandomState instance, optional (default None)
Controls the random sampling of Gaussian projections. Set for
reproducible results.
Attributes
----------
GaussianRandomProjections_ : list, length n_views
List of GaussianRandomProjection instances fitted to construct each
view.
Notes
-----
From an implementation perspective, this wraps GaussianRandomProjection
from `sklearn.random_projection <https://scikit-learn.org/stable/modules/
classes.html#module-sklearn.random_projection>`_ and creates multiple
projections.
Examples
--------
>>> from mvlearn.compose import RandomGaussianProjection
>>> import numpy as np
>>> X = np.random.rand(1000, 50)
>>> rgp = RandomGaussianProjection(n_views=3, n_components=10)
>>> Xs = rgp.fit_transform(X)
>>> print(len(Xs))
3
>>> print(Xs[0].shape)
(1000, 10)
"""
def __init__(self, n_views, n_components="auto", eps=0.1,
random_state=None):
check_n_views(n_views)
self.n_views = n_views
self.n_components = n_components
self.eps = eps
self.random_state = random_state
def fit(self, X, y=None):
r"""
Fit to the singleview data.
Parameters
----------
X : array of shape (n_samples, n_total_features)
Input dataset
y : Ignored
Returns
-------
self : object
The Transformer instance
"""
# set function level random state
np.random.seed(self.random_state)
self.GaussianRandomProjections_ = [
GaussianRandomProjection(
n_components=self.n_components, eps=self.eps).fit(X)
for _ in range(self.n_views)
]
return self
def transform(self, X):
r"""
Transforms the singleview dataset and into a multiview dataset.
Parameters
----------
X : array of shape (n_samples, n_features)
Input dataset
Returns
-------
Xs : list of array-likes or numpy.ndarray
- Xs length: n_views
- Xs[i] shape: (n_samples, n_components)
"""
check_is_fitted(self)
Xs = [grp.transform(X) for grp in self.GaussianRandomProjections_]
return Xs
|
[
"numpy.random.seed",
"sklearn.random_projection.GaussianRandomProjection",
"sklearn.utils.validation.check_is_fitted"
] |
[((2611, 2644), 'numpy.random.seed', 'np.random.seed', (['self.random_state'], {}), '(self.random_state)\n', (2625, 2644), True, 'import numpy as np\n'), ((3287, 3308), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self'], {}), '(self)\n', (3302, 3308), False, 'from sklearn.utils.validation import check_is_fitted\n'), ((2701, 2771), 'sklearn.random_projection.GaussianRandomProjection', 'GaussianRandomProjection', ([], {'n_components': 'self.n_components', 'eps': 'self.eps'}), '(n_components=self.n_components, eps=self.eps)\n', (2725, 2771), False, 'from sklearn.random_projection import GaussianRandomProjection\n')]
|
import argparse
import numpy as np
import torch
from torch import optim
from torchvision import utils
from tqdm import tqdm
from model import Glow
from samplers import memory_mnist, memory_fashion
from utils import (
net_args,
calc_z_shapes,
calc_loss,
string_args,
create_deltas_sequence,
)
parser = net_args(argparse.ArgumentParser(description="Glow trainer"))
def train(args, model, optimizer):
if args.dataset == "mnist":
dataset_f = memory_mnist
elif args.dataset == "fashion_mnist":
dataset_f = memory_fashion
repr_args = string_args(args)
n_bins = 2.0 ** args.n_bits
z_sample = []
z_shapes = calc_z_shapes(args.n_channels, args.img_size, args.n_flow, args.n_block)
for z in z_shapes:
z_new = torch.randn(args.n_sample, *z) * args.temp
z_sample.append(z_new.to(device))
deltas = create_deltas_sequence(0.1, 0.005)
args.delta = deltas[0]
epoch_losses = []
f_train_loss = open(f"losses/seq_losses_train_{repr_args}_.txt", "w", buffering=1)
f_test_loss = open(f"losses/seq_losses_test_{repr_args}_.txt", "w", buffering=1)
with tqdm(range(200)) as pbar:
for i in pbar:
args.delta = deltas[i]
repr_args = string_args(args)
train_loader, val_loader, train_val_loader = dataset_f(
args.batch, args.img_size, args.n_channels
)
train_losses = []
for image in train_loader:
optimizer.zero_grad()
image = image.to(device)
if args.tr_dq:
noisy_image += torch.rand_like(image) / n_bins
noisy_image += torch.randn_like(image) * args.delta
log_p, logdet, _ = model(noisy_image)
logdet = logdet.mean()
loss, log_p, log_det = calc_loss(
log_p, logdet, args.img_size, n_bins, args.n_channels
)
loss.backward()
optimizer.step()
train_losses.append(loss.item())
current_train_loss = np.mean(train_losses)
print(f"{current_train_loss},{args.delta},{i + 1}", file=f_train_loss)
with torch.no_grad():
utils.save_image(
model.reverse(z_sample).cpu().data,
f"sample/seq_sample_{repr_args}_{str(i + 1).zfill(6)}.png",
normalize=True,
nrow=10,
range=(-0.5, 0.5),
)
losses = []
logdets = []
logps = []
for image in val_loader:
image = image.to(device)
noisy_image = image
if args.te_dq:
noisy_image += torch.rand_like(image) / n_bins
if args.te_noise:
noisy_image += torch.randn_like(image) * args.delta
log_p, logdet, _ = model(noisy_image)
logdet = logdet.mean()
loss, log_p, log_det = calc_loss(
log_p, logdet, args.img_size, n_bins, args.n_channels
)
losses.append(loss.item())
logdets.append(log_det.item())
logps.append(log_p.item())
pbar.set_description(
f"Loss: {np.mean(losses):.5f}; logP: {np.mean(logps):.5f}; logdet: {np.mean(logdets):.5f}; delta: {args.delta:.5f}"
)
current_loss = np.mean(losses)
print(f"{current_loss},{args.delta},{i + 1}", file=f_test_loss)
epoch_losses.append(current_loss)
if (i + 1) % 10 == 0:
torch.save(
model.state_dict(),
f"checkpoint/seq_model_{repr_args}_{i + 1}_.pt",
)
f_ll = open(f"ll/seq_ll_{repr_args}_{i + 1}.txt", "w")
train_loader, val_loader, train_val_loader = dataset_f(
args.batch, args.img_size, args.n_channels
)
train_val_loader = iter(train_val_loader)
for image_val in val_loader:
image = image_val
image = image.to(device)
if args.te_dq:
noisy_image += torch.rand_like(image) / n_bins
if args.te_noise:
noisy_image += torch.randn_like(image) * args.delta
log_p_val, logdet_val, _ = model(noisy_image)
image = next(train_val_loader)
image = image.to(device)
if args.te_dq:
noisy_image += torch.rand_like(image) / n_bins
if args.te_noise:
noisy_image += torch.randn_like(image) * args.delta
log_p_train_val, logdet_train_val, _ = model(noisy_image)
for (
lpv,
ldv,
lptv,
ldtv,
) in zip(log_p_val, logdet_val, log_p_train_val, logdet_train_val):
print(
args.delta,
lpv.item(),
ldv.item(),
lptv.item(),
ldtv.item(),
file=f_ll,
)
f_ll.close()
f_train_loss.close()
f_test_loss.close()
if __name__ == "__main__":
args = parser.parse_args()
print(string_args(args))
device = args.device
model = Glow(
args.n_channels,
args.n_flow,
args.n_block,
affine=args.affine,
conv_lu=not args.no_lu,
)
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
train(args, model, optimizer)
|
[
"argparse.ArgumentParser",
"torch.randn_like",
"utils.create_deltas_sequence",
"utils.calc_z_shapes",
"model.Glow",
"torch.randn",
"torch.rand_like",
"numpy.mean",
"utils.calc_loss",
"utils.string_args",
"torch.no_grad"
] |
[((333, 384), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Glow trainer"""'}), "(description='Glow trainer')\n", (356, 384), False, 'import argparse\n'), ((582, 599), 'utils.string_args', 'string_args', (['args'], {}), '(args)\n', (593, 599), False, 'from utils import net_args, calc_z_shapes, calc_loss, string_args, create_deltas_sequence\n'), ((666, 738), 'utils.calc_z_shapes', 'calc_z_shapes', (['args.n_channels', 'args.img_size', 'args.n_flow', 'args.n_block'], {}), '(args.n_channels, args.img_size, args.n_flow, args.n_block)\n', (679, 738), False, 'from utils import net_args, calc_z_shapes, calc_loss, string_args, create_deltas_sequence\n'), ((877, 911), 'utils.create_deltas_sequence', 'create_deltas_sequence', (['(0.1)', '(0.005)'], {}), '(0.1, 0.005)\n', (899, 911), False, 'from utils import net_args, calc_z_shapes, calc_loss, string_args, create_deltas_sequence\n'), ((5772, 5868), 'model.Glow', 'Glow', (['args.n_channels', 'args.n_flow', 'args.n_block'], {'affine': 'args.affine', 'conv_lu': '(not args.no_lu)'}), '(args.n_channels, args.n_flow, args.n_block, affine=args.affine,\n conv_lu=not args.no_lu)\n', (5776, 5868), False, 'from model import Glow\n'), ((5716, 5733), 'utils.string_args', 'string_args', (['args'], {}), '(args)\n', (5727, 5733), False, 'from utils import net_args, calc_z_shapes, calc_loss, string_args, create_deltas_sequence\n'), ((778, 808), 'torch.randn', 'torch.randn', (['args.n_sample', '*z'], {}), '(args.n_sample, *z)\n', (789, 808), False, 'import torch\n'), ((1252, 1269), 'utils.string_args', 'string_args', (['args'], {}), '(args)\n', (1263, 1269), False, 'from utils import net_args, calc_z_shapes, calc_loss, string_args, create_deltas_sequence\n'), ((2107, 2128), 'numpy.mean', 'np.mean', (['train_losses'], {}), '(train_losses)\n', (2114, 2128), True, 'import numpy as np\n'), ((1857, 1921), 'utils.calc_loss', 'calc_loss', (['log_p', 'logdet', 'args.img_size', 'n_bins', 'args.n_channels'], {}), '(log_p, logdet, args.img_size, n_bins, args.n_channels)\n', (1866, 1921), False, 'from utils import net_args, calc_z_shapes, calc_loss, string_args, create_deltas_sequence\n'), ((2229, 2244), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2242, 2244), False, 'import torch\n'), ((3591, 3606), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (3598, 3606), True, 'import numpy as np\n'), ((1688, 1711), 'torch.randn_like', 'torch.randn_like', (['image'], {}), '(image)\n', (1704, 1711), False, 'import torch\n'), ((3112, 3176), 'utils.calc_loss', 'calc_loss', (['log_p', 'logdet', 'args.img_size', 'n_bins', 'args.n_channels'], {}), '(log_p, logdet, args.img_size, n_bins, args.n_channels)\n', (3121, 3176), False, 'from utils import net_args, calc_z_shapes, calc_loss, string_args, create_deltas_sequence\n'), ((1625, 1647), 'torch.rand_like', 'torch.rand_like', (['image'], {}), '(image)\n', (1640, 1647), False, 'import torch\n'), ((2822, 2844), 'torch.rand_like', 'torch.rand_like', (['image'], {}), '(image)\n', (2837, 2844), False, 'import torch\n'), ((2931, 2954), 'torch.randn_like', 'torch.randn_like', (['image'], {}), '(image)\n', (2947, 2954), False, 'import torch\n'), ((3435, 3450), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (3442, 3450), True, 'import numpy as np\n'), ((3464, 3478), 'numpy.mean', 'np.mean', (['logps'], {}), '(logps)\n', (3471, 3478), True, 'import numpy as np\n'), ((3494, 3510), 'numpy.mean', 'np.mean', (['logdets'], {}), '(logdets)\n', (3501, 3510), True, 'import numpy as np\n'), ((4431, 4453), 'torch.rand_like', 'torch.rand_like', (['image'], {}), '(image)\n', (4446, 4453), False, 'import torch\n'), ((4540, 4563), 'torch.randn_like', 'torch.randn_like', (['image'], {}), '(image)\n', (4556, 4563), False, 'import torch\n'), ((4814, 4836), 'torch.rand_like', 'torch.rand_like', (['image'], {}), '(image)\n', (4829, 4836), False, 'import torch\n'), ((4923, 4946), 'torch.randn_like', 'torch.randn_like', (['image'], {}), '(image)\n', (4939, 4946), False, 'import torch\n')]
|
#!/usr/bin/env python3
import numpy as np
####################
# generate_stimuli #
####################
def generate_stimuli(arg, env):
"""
Function to generate the stimuli
Arguments
---------
arg: Argument for which to generate stimuli (either Argument or ArrayArgument)
env: Dict mapping the variable (SweepVariable or DynamicVariable) names to their value.
"""
# name = arg.name
# if name == "srcA":
# # generate and return stimuli for srcA
# if name == "srcB":
# # generate and return stimuli for srcB
# ...
##################
# compute_result #
##################
def compute_result(result_parameter, inputs, env, fix_point):
"""
Funciton to generate the expected result of the testcase.
Arguments
---------
result_parameter: Either OutputArgument or ReturnValue (see pulp_dsp_test.py)
inputs: Dict mapping name to the Argument, with arg.value, arg.ctype (and arg.length)
env: Dict mapping the variable (SweepVariable or DynamicVariable) names to their value.
fix_point: None (if no fixpoint is used) or decimal point
"""
ctype = inputs['value'].ctype;
if ctype == 'int32_t':
my_type = np.int32
my_bits=32
fracBits=31
elif ctype == 'int16_t':
my_type = np.int16
my_bits=16
fracBits=15
elif ctype == 'int8_t':
my_type = np.int8
elif ctype == 'float' or ctype == 'float32_t':
my_type = np.float32
my_bits = 0;
else:
raise RuntimeError("Unrecognized result type: %s" % ctype)
if my_bits != 0:
input_number = inputs['value'].value
in_rad = 2*np.pi*float(input_number)/2**(my_bits-1)
return q_sat(int(2**(my_bits-1)*np.sin(in_rad)), my_bits)
elif ctype == 'float':
return np.sin(inputs['value'].value).astype(my_type)
######################
# Fixpoint Functions #
######################
def q_sat(x, bits=32):
if x > 2**(bits-1) - 1:
return x - 2**bits
elif x < -2**(bits-1):
return x + 2**bits
else:
return x
def q_add(a, b):
return q_sat(a + b)
def q_sub(a, b):
return q_sat(a - b)
def q_mul(a, b, p):
return q_roundnorm(a * b, p)
def q_roundnorm(a, p):
rounding = 1 << (p - 1)
return q_sat((a + rounding) >> p)
|
[
"numpy.sin"
] |
[((1759, 1773), 'numpy.sin', 'np.sin', (['in_rad'], {}), '(in_rad)\n', (1765, 1773), True, 'import numpy as np\n'), ((1827, 1856), 'numpy.sin', 'np.sin', (["inputs['value'].value"], {}), "(inputs['value'].value)\n", (1833, 1856), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 31 22:33:41 2018
@author: Yulab
"""
import tensorflow as tf
import numpy as np
import math
#%%
def conv(layer_name, x, out_channels, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=True, seed=1):
'''Convolution op wrapper, use RELU activation after convolution
Args:
layer_name: e.g. conv1, pool1...
x: input tensor, [batch_size, height, width, channels]
out_channels: number of output channels (or comvolutional kernels)
kernel_size: the size of convolutional kernel, VGG paper used: [3,3]
stride: A list of ints. 1-D of length 4. VGG paper used: [1, 1, 1, 1]
is_pretrain: if load pretrained parameters, freeze all conv layers.
Depending on different situations, you can just set part of conv layers to be freezed.
the parameters of freezed layers will not change when training.
Returns:
4D tensor
'''
in_channels = x.get_shape()[-1]
with tf.variable_scope(layer_name):
w = tf.get_variable(name='weights',
trainable=is_pretrain,
shape=[kernel_size[0], kernel_size[1], in_channels, out_channels],
initializer=tf.contrib.layers.xavier_initializer(seed=seed)) # default is uniform distribution initialization
b = tf.get_variable(name='biases',
trainable=is_pretrain,
shape=[out_channels],
initializer=tf.constant_initializer(0.0))
x = tf.nn.conv2d(x, w, stride, padding='SAME', name='conv')
x = tf.nn.bias_add(x, b, name='bias_add')
x = tf.nn.relu(x, name='relu')
return x
#%%
def pool(layer_name, x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True):
'''Pooling op
Args:
x: input tensor
kernel: pooling kernel, VGG paper used [1,2,2,1], the size of kernel is 2X2
stride: stride size, VGG paper used [1,2,2,1]
padding:
is_max_pool: boolen
if True: use max pooling
else: use avg pooling
'''
if is_max_pool:
x = tf.nn.max_pool(x, kernel, strides=stride, padding='SAME', name=layer_name)
else:
x = tf.nn.avg_pool(x, kernel, strides=stride, padding='SAME', name=layer_name)
return x
#%%
def FC_layer(layer_name, x, out_nodes, keep_prob=0.5, seed=1):
'''Wrapper for fully connected layers with RELU activation as default
Args:
layer_name: e.g. 'FC1', 'FC2'
x: input feature map
out_nodes: number of neurons for current FC layer
'''
shape = x.get_shape()
if len(shape) == 4:
size = shape[1].value * shape[2].value * shape[3].value
else:
size = shape[-1].value
with tf.variable_scope(layer_name):
w = tf.get_variable('weights',
shape=[size, out_nodes],
initializer=tf.contrib.layers.xavier_initializer(seed=seed))
b = tf.get_variable('biases',
shape=[out_nodes],
initializer=tf.constant_initializer(0.0))
flat_x = tf.reshape(x, [-1, size]) # flatten into 1D
flat_x = tf.nn.dropout(flat_x, keep_prob, seed=seed)
x = tf.nn.bias_add(tf.matmul(flat_x, w), b)
x = tf.nn.relu(x)
return x
#%%
def loss(logits, labels):
'''Compute loss
Args:
logits: logits tensor, [batch_size, n_classes]
labels: one-hot labels
'''
with tf.name_scope('loss') as scope:
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=labels,name='cross-entropy')
loss = tf.reduce_mean(cross_entropy, name='loss')
tf.summary.scalar(scope+'/loss', loss)
return loss
#%%
def accuracy(logits, labels):
"""Evaluate the quality of the logits at predicting the label.
Args:
logits: Logits tensor, float - [batch_size, NUM_CLASSES].
labels: Labels tensor,
"""
with tf.name_scope('accuracy') as scope:
correct = tf.equal(tf.arg_max(logits, 1), tf.argmax(labels, 1))
correct = tf.cast(correct, tf.float32)
accuracy = tf.reduce_mean(correct)*100.0
tf.summary.scalar(scope+'/accuracy', accuracy)
return accuracy
#%%
def num_correct_prediction(logits, labels):
"""Evaluate the quality of the logits at predicting the label.
Return:
the number of correct predictions
"""
correct = tf.equal(tf.arg_max(logits, 1), tf.arg_max(labels, 1))
correct = tf.cast(correct, tf.int32)
n_correct = tf.reduce_sum(correct)
return n_correct
#%%
def optimize(loss, learning_rate, global_step):
'''optimization, use Gradient Descent as default
'''
with tf.name_scope('optimizer'):
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
#optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss, global_step=global_step)
return train_op
#%%
def random_mini_batches(X, Y, mini_batch_size = 64, seed = 0):
"""
Creates a list of random minibatches from (X, Y)
Arguments:
X -- input data, of shape (input size, number of examples)
Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples)
mini_batch_size - size of the mini-batches, integer
seed -- this is only for the purpose of grading, so that you're "random minibatches are the same as ours.
Returns:
mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)
from coursera.org
"""
m = X.shape[0] # number of training examples
mini_batches = []
np.random.seed(seed)
# Step 1: Shuffle (X, Y)
permutation = list(np.random.permutation(m))
shuffled_X = X[permutation, :]
shuffled_Y = Y[permutation, :]
# Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.
num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning
for k in range(0, num_complete_minibatches):
mini_batch_X = shuffled_X[k * mini_batch_size : k * mini_batch_size + mini_batch_size, :]
mini_batch_Y = shuffled_Y[k * mini_batch_size : k * mini_batch_size + mini_batch_size, :]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
# Handling the end case (last mini-batch < mini_batch_size)
if m % mini_batch_size != 0:
mini_batch_X = shuffled_X[num_complete_minibatches * mini_batch_size : m, :]
mini_batch_Y = shuffled_Y[num_complete_minibatches * mini_batch_size : m, :]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches
|
[
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.reduce_sum",
"numpy.random.seed",
"tensorflow.constant_initializer",
"tensorflow.reshape",
"tensorflow.matmul",
"tensorflow.nn.conv2d",
"tensorflow.nn.relu",
"tensorflow.variable_scope",
"tensorflow.nn.softmax_cross_entropy_with_logits_v2",
"tensorflow.cast",
"tensorflow.name_scope",
"tensorflow.nn.bias_add",
"tensorflow.summary.scalar",
"tensorflow.reduce_mean",
"tensorflow.arg_max",
"tensorflow.nn.max_pool",
"numpy.random.permutation",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.argmax",
"math.floor",
"tensorflow.nn.avg_pool",
"tensorflow.nn.dropout"
] |
[((4569, 4595), 'tensorflow.cast', 'tf.cast', (['correct', 'tf.int32'], {}), '(correct, tf.int32)\n', (4576, 4595), True, 'import tensorflow as tf\n'), ((4610, 4632), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['correct'], {}), '(correct)\n', (4623, 4632), True, 'import tensorflow as tf\n'), ((5753, 5773), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (5767, 5773), True, 'import numpy as np\n'), ((6024, 6055), 'math.floor', 'math.floor', (['(m / mini_batch_size)'], {}), '(m / mini_batch_size)\n', (6034, 6055), False, 'import math\n'), ((989, 1018), 'tensorflow.variable_scope', 'tf.variable_scope', (['layer_name'], {}), '(layer_name)\n', (1006, 1018), True, 'import tensorflow as tf\n'), ((1574, 1629), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x', 'w', 'stride'], {'padding': '"""SAME"""', 'name': '"""conv"""'}), "(x, w, stride, padding='SAME', name='conv')\n", (1586, 1629), True, 'import tensorflow as tf\n'), ((1642, 1679), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['x', 'b'], {'name': '"""bias_add"""'}), "(x, b, name='bias_add')\n", (1656, 1679), True, 'import tensorflow as tf\n'), ((1692, 1718), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {'name': '"""relu"""'}), "(x, name='relu')\n", (1702, 1718), True, 'import tensorflow as tf\n'), ((2182, 2256), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['x', 'kernel'], {'strides': 'stride', 'padding': '"""SAME"""', 'name': 'layer_name'}), "(x, kernel, strides=stride, padding='SAME', name=layer_name)\n", (2196, 2256), True, 'import tensorflow as tf\n'), ((2279, 2353), 'tensorflow.nn.avg_pool', 'tf.nn.avg_pool', (['x', 'kernel'], {'strides': 'stride', 'padding': '"""SAME"""', 'name': 'layer_name'}), "(x, kernel, strides=stride, padding='SAME', name=layer_name)\n", (2293, 2353), True, 'import tensorflow as tf\n'), ((2817, 2846), 'tensorflow.variable_scope', 'tf.variable_scope', (['layer_name'], {}), '(layer_name)\n', (2834, 2846), True, 'import tensorflow as tf\n'), ((3201, 3226), 'tensorflow.reshape', 'tf.reshape', (['x', '[-1, size]'], {}), '(x, [-1, size])\n', (3211, 3226), True, 'import tensorflow as tf\n'), ((3262, 3305), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['flat_x', 'keep_prob'], {'seed': 'seed'}), '(flat_x, keep_prob, seed=seed)\n', (3275, 3305), True, 'import tensorflow as tf\n'), ((3370, 3383), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (3380, 3383), True, 'import tensorflow as tf\n'), ((3565, 3586), 'tensorflow.name_scope', 'tf.name_scope', (['"""loss"""'], {}), "('loss')\n", (3578, 3586), True, 'import tensorflow as tf\n'), ((3621, 3719), 'tensorflow.nn.softmax_cross_entropy_with_logits_v2', 'tf.nn.softmax_cross_entropy_with_logits_v2', ([], {'logits': 'logits', 'labels': 'labels', 'name': '"""cross-entropy"""'}), "(logits=logits, labels=labels,\n name='cross-entropy')\n", (3663, 3719), True, 'import tensorflow as tf\n'), ((3730, 3772), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['cross_entropy'], {'name': '"""loss"""'}), "(cross_entropy, name='loss')\n", (3744, 3772), True, 'import tensorflow as tf\n'), ((3781, 3821), 'tensorflow.summary.scalar', 'tf.summary.scalar', (["(scope + '/loss')", 'loss'], {}), "(scope + '/loss', loss)\n", (3798, 3821), True, 'import tensorflow as tf\n'), ((4051, 4076), 'tensorflow.name_scope', 'tf.name_scope', (['"""accuracy"""'], {}), "('accuracy')\n", (4064, 4076), True, 'import tensorflow as tf\n'), ((4173, 4201), 'tensorflow.cast', 'tf.cast', (['correct', 'tf.float32'], {}), '(correct, tf.float32)\n', (4180, 4201), True, 'import tensorflow as tf\n'), ((4255, 4303), 'tensorflow.summary.scalar', 'tf.summary.scalar', (["(scope + '/accuracy')", 'accuracy'], {}), "(scope + '/accuracy', accuracy)\n", (4272, 4303), True, 'import tensorflow as tf\n'), ((4511, 4532), 'tensorflow.arg_max', 'tf.arg_max', (['logits', '(1)'], {}), '(logits, 1)\n', (4521, 4532), True, 'import tensorflow as tf\n'), ((4534, 4555), 'tensorflow.arg_max', 'tf.arg_max', (['labels', '(1)'], {}), '(labels, 1)\n', (4544, 4555), True, 'import tensorflow as tf\n'), ((4775, 4801), 'tensorflow.name_scope', 'tf.name_scope', (['"""optimizer"""'], {}), "('optimizer')\n", (4788, 4801), True, 'import tensorflow as tf\n'), ((4823, 4885), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (4856, 4885), True, 'import tensorflow as tf\n'), ((5826, 5850), 'numpy.random.permutation', 'np.random.permutation', (['m'], {}), '(m)\n', (5847, 5850), True, 'import numpy as np\n'), ((3333, 3353), 'tensorflow.matmul', 'tf.matmul', (['flat_x', 'w'], {}), '(flat_x, w)\n', (3342, 3353), True, 'import tensorflow as tf\n'), ((4112, 4133), 'tensorflow.arg_max', 'tf.arg_max', (['logits', '(1)'], {}), '(logits, 1)\n', (4122, 4133), True, 'import tensorflow as tf\n'), ((4135, 4155), 'tensorflow.argmax', 'tf.argmax', (['labels', '(1)'], {}), '(labels, 1)\n', (4144, 4155), True, 'import tensorflow as tf\n'), ((4219, 4242), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['correct'], {}), '(correct)\n', (4233, 4242), True, 'import tensorflow as tf\n'), ((1250, 1297), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {'seed': 'seed'}), '(seed=seed)\n', (1286, 1297), True, 'import tensorflow as tf\n'), ((1532, 1560), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (1555, 1560), True, 'import tensorflow as tf\n'), ((2980, 3027), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {'seed': 'seed'}), '(seed=seed)\n', (3016, 3027), True, 'import tensorflow as tf\n'), ((3154, 3182), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (3177, 3182), True, 'import tensorflow as tf\n')]
|
from hoomd_periodic import simulate
from md_nnps_periodic import MDNNPSSolverPeriodic
import numpy as np
import matplotlib.pyplot as plt
def run_simulations(num_particles, tf, dt):
# run hoomd simulation
simulate(num_particles, dt, tf, log=True)
# run compyle simulation
solver = MDNNPSSolverPeriodic(num_particles)
solver.solve(tf, dt, log_output=True)
solver.write_log('compyle-output.log')
def plot_props(hoomd_fname, comp_fname):
data_hoomd = np.genfromtxt(fname=hoomd_fname, skip_header=True)
data_compyle = np.genfromtxt(fname=comp_fname)
plt.plot(data_hoomd[:,0], data_hoomd[:,1], label="HooMD")
plt.plot(data_hoomd[:,0], data_compyle[:,1], label="Compyle")
plt.xlabel("Timestep")
plt.ylabel("Potential Energy")
plt.legend()
plt.savefig("hoomd_pe.png", dpi=300)
plt.clf()
plt.plot(data_hoomd[:,0], data_hoomd[:,2], label="HooMD")
plt.plot(data_hoomd[:,0], data_compyle[:,2], label="Compyle")
plt.xlabel("Timestep")
plt.ylabel("Kinetic Energy")
plt.legend()
plt.savefig("hoomd_ke.png", dpi=300)
if __name__ == '__main__':
run_simulations(2000, 200, 0.02)
plot_props('hoomd-output.log', 'compyle-output.log')
|
[
"hoomd_periodic.simulate",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.legend",
"numpy.genfromtxt",
"matplotlib.pyplot.ylabel",
"md_nnps_periodic.MDNNPSSolverPeriodic",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xlabel"
] |
[((213, 254), 'hoomd_periodic.simulate', 'simulate', (['num_particles', 'dt', 'tf'], {'log': '(True)'}), '(num_particles, dt, tf, log=True)\n', (221, 254), False, 'from hoomd_periodic import simulate\n'), ((298, 333), 'md_nnps_periodic.MDNNPSSolverPeriodic', 'MDNNPSSolverPeriodic', (['num_particles'], {}), '(num_particles)\n', (318, 333), False, 'from md_nnps_periodic import MDNNPSSolverPeriodic\n'), ((479, 529), 'numpy.genfromtxt', 'np.genfromtxt', ([], {'fname': 'hoomd_fname', 'skip_header': '(True)'}), '(fname=hoomd_fname, skip_header=True)\n', (492, 529), True, 'import numpy as np\n'), ((549, 580), 'numpy.genfromtxt', 'np.genfromtxt', ([], {'fname': 'comp_fname'}), '(fname=comp_fname)\n', (562, 580), True, 'import numpy as np\n'), ((587, 646), 'matplotlib.pyplot.plot', 'plt.plot', (['data_hoomd[:, 0]', 'data_hoomd[:, 1]'], {'label': '"""HooMD"""'}), "(data_hoomd[:, 0], data_hoomd[:, 1], label='HooMD')\n", (595, 646), True, 'import matplotlib.pyplot as plt\n'), ((649, 712), 'matplotlib.pyplot.plot', 'plt.plot', (['data_hoomd[:, 0]', 'data_compyle[:, 1]'], {'label': '"""Compyle"""'}), "(data_hoomd[:, 0], data_compyle[:, 1], label='Compyle')\n", (657, 712), True, 'import matplotlib.pyplot as plt\n'), ((715, 737), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Timestep"""'], {}), "('Timestep')\n", (725, 737), True, 'import matplotlib.pyplot as plt\n'), ((742, 772), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Potential Energy"""'], {}), "('Potential Energy')\n", (752, 772), True, 'import matplotlib.pyplot as plt\n'), ((777, 789), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (787, 789), True, 'import matplotlib.pyplot as plt\n'), ((794, 830), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""hoomd_pe.png"""'], {'dpi': '(300)'}), "('hoomd_pe.png', dpi=300)\n", (805, 830), True, 'import matplotlib.pyplot as plt\n'), ((836, 845), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (843, 845), True, 'import matplotlib.pyplot as plt\n'), ((851, 910), 'matplotlib.pyplot.plot', 'plt.plot', (['data_hoomd[:, 0]', 'data_hoomd[:, 2]'], {'label': '"""HooMD"""'}), "(data_hoomd[:, 0], data_hoomd[:, 2], label='HooMD')\n", (859, 910), True, 'import matplotlib.pyplot as plt\n'), ((913, 976), 'matplotlib.pyplot.plot', 'plt.plot', (['data_hoomd[:, 0]', 'data_compyle[:, 2]'], {'label': '"""Compyle"""'}), "(data_hoomd[:, 0], data_compyle[:, 2], label='Compyle')\n", (921, 976), True, 'import matplotlib.pyplot as plt\n'), ((979, 1001), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Timestep"""'], {}), "('Timestep')\n", (989, 1001), True, 'import matplotlib.pyplot as plt\n'), ((1006, 1034), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Kinetic Energy"""'], {}), "('Kinetic Energy')\n", (1016, 1034), True, 'import matplotlib.pyplot as plt\n'), ((1039, 1051), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1049, 1051), True, 'import matplotlib.pyplot as plt\n'), ((1056, 1092), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""hoomd_ke.png"""'], {'dpi': '(300)'}), "('hoomd_ke.png', dpi=300)\n", (1067, 1092), True, 'import matplotlib.pyplot as plt\n')]
|
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Generalized doubles factorization
This will go into OpenFermion. Putting here until I write something up
or decide to just publish the code.
"""
from typing import List, Tuple
from itertools import product, groupby
import numpy as np
import scipy as sp
from scipy.linalg import block_diag, sqrtm, polar, schur
import openfermion as of
from fqe.algorithm.brillouin_calculator import get_fermion_op
def doubles_factorization_svd(generator_tensor: np.ndarray, eig_cutoff=None):
"""
Given an antisymmetric antihermitian tensor perform a double factorized
low-rank decomposition.
Given:
A = sum_{pqrs}A^{pq}_{sr}p^ q^ r s
with A^{pq}_{sr} = -A^{qp}_{sr} = -A^{pq}_{rs} = -A^{sr}_{pq}
Rewrite A as a sum-of squares s.t
A = sum_{l}Y_{l}^2
where Y_{l} are normal operator one-body operators such that the spectral
theorem holds and we can use the double factorization to implement an
approximate evolution.
"""
if not np.allclose(generator_tensor.imag, 0):
raise TypeError("generator_tensor must be a real matrix")
if eig_cutoff is not None:
if eig_cutoff % 2 != 0:
raise ValueError("eig_cutoff must be an even number")
nso = generator_tensor.shape[0]
generator_tensor = generator_tensor.real
generator_mat = np.zeros((nso**2, nso**2))
for row_gem, col_gem in product(range(nso**2), repeat=2):
p, s = row_gem // nso, row_gem % nso
q, r = col_gem // nso, col_gem % nso
generator_mat[row_gem, col_gem] = generator_tensor[p, q, r, s]
test_generator_mat = np.reshape(
np.transpose(generator_tensor, [0, 3, 1, 2]),
(nso**2, nso**2)).astype(np.float)
assert np.allclose(test_generator_mat, generator_mat)
if not np.allclose(generator_mat, generator_mat.T):
raise ValueError("generator tensor does not correspond to four-fold"
" antisymmetry")
one_body_residual = -np.einsum('pqrq->pr', generator_tensor)
u, sigma, vh = np.linalg.svd(generator_mat)
ul = []
ul_ops = []
vl = []
vl_ops = []
if eig_cutoff is None:
max_sigma = len(sigma)
else:
max_sigma = eig_cutoff
for ll in range(max_sigma):
ul.append(np.sqrt(sigma[ll]) * u[:, ll].reshape((nso, nso)))
ul_ops.append(
get_fermion_op(np.sqrt(sigma[ll]) * u[:, ll].reshape((nso, nso))))
vl.append(np.sqrt(sigma[ll]) * vh[ll, :].reshape((nso, nso)))
vl_ops.append(
get_fermion_op(np.sqrt(sigma[ll]) * vh[ll, :].reshape((nso, nso))))
S = ul_ops[ll] + vl_ops[ll]
D = ul_ops[ll] - vl_ops[ll]
op1 = S + 1j * of.hermitian_conjugated(S)
op2 = S - 1j * of.hermitian_conjugated(S)
op3 = D + 1j * of.hermitian_conjugated(D)
op4 = D - 1j * of.hermitian_conjugated(D)
assert np.isclose(
of.normal_ordered(of.commutator(
op1, of.hermitian_conjugated(op1))).induced_norm(), 0)
assert np.isclose(
of.normal_ordered(of.commutator(
op2, of.hermitian_conjugated(op2))).induced_norm(), 0)
assert np.isclose(
of.normal_ordered(of.commutator(
op3, of.hermitian_conjugated(op3))).induced_norm(), 0)
assert np.isclose(
of.normal_ordered(of.commutator(
op4, of.hermitian_conjugated(op4))).induced_norm(), 0)
one_body_op = of.FermionOperator()
for p, q in product(range(nso), repeat=2):
tfop = ((p, 1), (q, 0))
one_body_op += of.FermionOperator(tfop,
coefficient=one_body_residual[p, q])
return ul, vl, one_body_residual, ul_ops, vl_ops, one_body_op
def takagi(N, tol=1e-13, rounding=13):
r"""Autonne-Takagi decomposition of a complex symmetric (not Hermitian!) matrix.
Note that singular values of N are considered equal if they are equal after np.round(values, tol).
Taken from Strawberry Fields
[https://github.com/XanaduAI/strawberryfields/blob/master/strawberryfields/decompositions.py#L28]
Args:
N (array[complex]): square, symmetric matrix N
rounding (int): the number of decimal places to use when rounding the singular values of N
tol (float): the tolerance used when checking if the input matrix is symmetric: :math:`|N-N^T| <` tol
Returns:
tuple[array, array]: (rl, U), where rl are the (rounded) singular values,
and U is the Takagi unitary, such that :math:`N = U \diag(rl) U^T`.
"""
(n, m) = N.shape
if n != m:
raise ValueError("The input matrix must be square")
if np.linalg.norm(N - np.transpose(N)) >= tol:
raise ValueError("The input matrix is not symmetric")
N = np.real_if_close(N)
if np.allclose(N, 0):
return np.zeros(n), np.eye(n)
if np.isrealobj(N):
# If the matrix N is real one can be more clever and use its eigendecomposition
l, U = np.linalg.eigh(N)
vals = np.abs(l) # These are the Takagi eigenvalues
phases = np.sqrt(np.complex128([1 if i > 0 else -1 for i in l]))
Uc = U @ np.diag(phases) # One needs to readjust the phases
list_vals = [(vals[i], i) for i in range(len(vals))]
list_vals.sort(reverse=True)
sorted_l, permutation = zip(*list_vals)
permutation = np.array(permutation)
Uc = Uc[:, permutation]
# And also rearrange the unitary and values so that they are decreasingly ordered
return np.array(sorted_l), Uc
v, l, ws = np.linalg.svd(N)
w = np.transpose(np.conjugate(ws))
rl = np.round(l, rounding)
# Generate list with degenerancies
result = []
for k, g in groupby(rl):
result.append(list(g))
# Generate lists containing the columns that correspond to degenerancies
kk = 0
for k in result:
for ind, j in enumerate(k): # pylint: disable=unused-variable
k[ind] = kk
kk = kk + 1
# Generate the lists with the degenerate column subspaces
vas = []
was = []
for i in result:
vas.append(v[:, i])
was.append(w[:, i])
# Generate the matrices qs of the degenerate subspaces
qs = []
for i in range(len(result)):
qs.append(sqrtm(np.transpose(vas[i]) @ was[i]))
# Construct the Takagi unitary
qb = block_diag(*qs)
U = v @ np.conj(qb)
return rl, U
def doubles_factorization_takagi(generator_tensor: np.ndarray, eig_cutoff=None):
"""
Given an antisymmetric antihermitian tensor perform a double factorized
low-rank decomposition. This uses the Takagi decomposition of a complex
symmetric matrix. This reduces the number of tensor from 4 to 2 when
compared against the SVD appraoch.
Given:
A = sum_{pqrs}A^{pq}_{sr}p^ q^ r s
with A^{pq}_{sr} = -A^{qp}_{sr} = -A^{pq}_{rs} = -A^{sr}_{pq}
Rewrite A as a sum-of squares s.t
A = sum_{l}Y_{l}^2
where Y_{l} are normal operator one-body operators such that the spectral
theorem holds and we can use the double factorization to implement an
approximate evolution.
"""
if eig_cutoff is not None:
if eig_cutoff % 2 != 0:
raise ValueError("eig_cutoff must be an even number")
nso = generator_tensor.shape[0]
generator_mat = np.reshape(np.transpose(generator_tensor, [0, 3, 1, 2]),
(nso**2, nso**2))
assert np.allclose(generator_mat, generator_mat.T)
one_body_residual = -np.einsum('pqrq->pr', generator_tensor)
# complex symmetric matrices give Q S Q^T with S diagonal and real
# and Q is unitary.
T, Z = takagi(generator_mat)
nonzero_idx = np.where(T > 1.0E-12)[0]
if eig_cutoff is None:
max_sigma = len(nonzero_idx)
else:
max_sigma = eig_cutoff
Zl = []
Zlp = []
Zlm = []
for idx in nonzero_idx[:max_sigma]:
Zl.append(np.sqrt(T[idx]) * Z[:, idx].reshape((nso, nso)))
Zlp.append(Zl[-1] + 1j * Zl[-1].conj().T)
Zlm.append(Zl[-1] - 1j * Zl[-1].conj().T)
return Zlp, Zlm, Zl, one_body_residual
|
[
"numpy.abs",
"numpy.allclose",
"numpy.einsum",
"numpy.linalg.svd",
"numpy.diag",
"numpy.conjugate",
"numpy.round",
"openfermion.hermitian_conjugated",
"numpy.transpose",
"numpy.isrealobj",
"openfermion.FermionOperator",
"numpy.conj",
"numpy.complex128",
"scipy.linalg.block_diag",
"numpy.real_if_close",
"itertools.groupby",
"numpy.zeros",
"numpy.linalg.eigh",
"numpy.where",
"numpy.array",
"numpy.eye",
"numpy.sqrt"
] |
[((1906, 1936), 'numpy.zeros', 'np.zeros', (['(nso ** 2, nso ** 2)'], {}), '((nso ** 2, nso ** 2))\n', (1914, 1936), True, 'import numpy as np\n'), ((2302, 2348), 'numpy.allclose', 'np.allclose', (['test_generator_mat', 'generator_mat'], {}), '(test_generator_mat, generator_mat)\n', (2313, 2348), True, 'import numpy as np\n'), ((2610, 2638), 'numpy.linalg.svd', 'np.linalg.svd', (['generator_mat'], {}), '(generator_mat)\n', (2623, 2638), True, 'import numpy as np\n'), ((4035, 4055), 'openfermion.FermionOperator', 'of.FermionOperator', ([], {}), '()\n', (4053, 4055), True, 'import openfermion as of\n'), ((5372, 5391), 'numpy.real_if_close', 'np.real_if_close', (['N'], {}), '(N)\n', (5388, 5391), True, 'import numpy as np\n'), ((5400, 5417), 'numpy.allclose', 'np.allclose', (['N', '(0)'], {}), '(N, 0)\n', (5411, 5417), True, 'import numpy as np\n'), ((5465, 5480), 'numpy.isrealobj', 'np.isrealobj', (['N'], {}), '(N)\n', (5477, 5480), True, 'import numpy as np\n'), ((6172, 6188), 'numpy.linalg.svd', 'np.linalg.svd', (['N'], {}), '(N)\n', (6185, 6188), True, 'import numpy as np\n'), ((6237, 6258), 'numpy.round', 'np.round', (['l', 'rounding'], {}), '(l, rounding)\n', (6245, 6258), True, 'import numpy as np\n'), ((6331, 6342), 'itertools.groupby', 'groupby', (['rl'], {}), '(rl)\n', (6338, 6342), False, 'from itertools import product, groupby\n'), ((6976, 6991), 'scipy.linalg.block_diag', 'block_diag', (['*qs'], {}), '(*qs)\n', (6986, 6991), False, 'from scipy.linalg import block_diag, sqrtm, polar, schur\n'), ((8064, 8107), 'numpy.allclose', 'np.allclose', (['generator_mat', 'generator_mat.T'], {}), '(generator_mat, generator_mat.T)\n', (8075, 8107), True, 'import numpy as np\n'), ((1569, 1606), 'numpy.allclose', 'np.allclose', (['generator_tensor.imag', '(0)'], {}), '(generator_tensor.imag, 0)\n', (1580, 1606), True, 'import numpy as np\n'), ((2361, 2404), 'numpy.allclose', 'np.allclose', (['generator_mat', 'generator_mat.T'], {}), '(generator_mat, generator_mat.T)\n', (2372, 2404), True, 'import numpy as np\n'), ((2551, 2590), 'numpy.einsum', 'np.einsum', (['"""pqrq->pr"""', 'generator_tensor'], {}), "('pqrq->pr', generator_tensor)\n", (2560, 2590), True, 'import numpy as np\n'), ((4158, 4219), 'openfermion.FermionOperator', 'of.FermionOperator', (['tfop'], {'coefficient': 'one_body_residual[p, q]'}), '(tfop, coefficient=one_body_residual[p, q])\n', (4176, 4219), True, 'import openfermion as of\n'), ((5585, 5602), 'numpy.linalg.eigh', 'np.linalg.eigh', (['N'], {}), '(N)\n', (5599, 5602), True, 'import numpy as np\n'), ((5618, 5627), 'numpy.abs', 'np.abs', (['l'], {}), '(l)\n', (5624, 5627), True, 'import numpy as np\n'), ((5974, 5995), 'numpy.array', 'np.array', (['permutation'], {}), '(permutation)\n', (5982, 5995), True, 'import numpy as np\n'), ((6210, 6226), 'numpy.conjugate', 'np.conjugate', (['ws'], {}), '(ws)\n', (6222, 6226), True, 'import numpy as np\n'), ((7005, 7016), 'numpy.conj', 'np.conj', (['qb'], {}), '(qb)\n', (7012, 7016), True, 'import numpy as np\n'), ((7958, 8002), 'numpy.transpose', 'np.transpose', (['generator_tensor', '[0, 3, 1, 2]'], {}), '(generator_tensor, [0, 3, 1, 2])\n', (7970, 8002), True, 'import numpy as np\n'), ((8134, 8173), 'numpy.einsum', 'np.einsum', (['"""pqrq->pr"""', 'generator_tensor'], {}), "('pqrq->pr', generator_tensor)\n", (8143, 8173), True, 'import numpy as np\n'), ((8322, 8341), 'numpy.where', 'np.where', (['(T > 1e-12)'], {}), '(T > 1e-12)\n', (8330, 8341), True, 'import numpy as np\n'), ((5434, 5445), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (5442, 5445), True, 'import numpy as np\n'), ((5447, 5456), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (5453, 5456), True, 'import numpy as np\n'), ((5689, 5737), 'numpy.complex128', 'np.complex128', (['[(1 if i > 0 else -1) for i in l]'], {}), '([(1 if i > 0 else -1) for i in l])\n', (5702, 5737), True, 'import numpy as np\n'), ((5754, 5769), 'numpy.diag', 'np.diag', (['phases'], {}), '(phases)\n', (5761, 5769), True, 'import numpy as np\n'), ((6133, 6151), 'numpy.array', 'np.array', (['sorted_l'], {}), '(sorted_l)\n', (6141, 6151), True, 'import numpy as np\n'), ((2201, 2245), 'numpy.transpose', 'np.transpose', (['generator_tensor', '[0, 3, 1, 2]'], {}), '(generator_tensor, [0, 3, 1, 2])\n', (2213, 2245), True, 'import numpy as np\n'), ((2846, 2864), 'numpy.sqrt', 'np.sqrt', (['sigma[ll]'], {}), '(sigma[ll])\n', (2853, 2864), True, 'import numpy as np\n'), ((3017, 3035), 'numpy.sqrt', 'np.sqrt', (['sigma[ll]'], {}), '(sigma[ll])\n', (3024, 3035), True, 'import numpy as np\n'), ((3267, 3293), 'openfermion.hermitian_conjugated', 'of.hermitian_conjugated', (['S'], {}), '(S)\n', (3290, 3293), True, 'import openfermion as of\n'), ((3317, 3343), 'openfermion.hermitian_conjugated', 'of.hermitian_conjugated', (['S'], {}), '(S)\n', (3340, 3343), True, 'import openfermion as of\n'), ((3367, 3393), 'openfermion.hermitian_conjugated', 'of.hermitian_conjugated', (['D'], {}), '(D)\n', (3390, 3393), True, 'import openfermion as of\n'), ((3417, 3443), 'openfermion.hermitian_conjugated', 'of.hermitian_conjugated', (['D'], {}), '(D)\n', (3440, 3443), True, 'import openfermion as of\n'), ((5276, 5291), 'numpy.transpose', 'np.transpose', (['N'], {}), '(N)\n', (5288, 5291), True, 'import numpy as np\n'), ((8549, 8564), 'numpy.sqrt', 'np.sqrt', (['T[idx]'], {}), '(T[idx])\n', (8556, 8564), True, 'import numpy as np\n'), ((2947, 2965), 'numpy.sqrt', 'np.sqrt', (['sigma[ll]'], {}), '(sigma[ll])\n', (2954, 2965), True, 'import numpy as np\n'), ((3119, 3137), 'numpy.sqrt', 'np.sqrt', (['sigma[ll]'], {}), '(sigma[ll])\n', (3126, 3137), True, 'import numpy as np\n'), ((6899, 6919), 'numpy.transpose', 'np.transpose', (['vas[i]'], {}), '(vas[i])\n', (6911, 6919), True, 'import numpy as np\n'), ((3537, 3565), 'openfermion.hermitian_conjugated', 'of.hermitian_conjugated', (['op1'], {}), '(op1)\n', (3560, 3565), True, 'import openfermion as of\n'), ((3680, 3708), 'openfermion.hermitian_conjugated', 'of.hermitian_conjugated', (['op2'], {}), '(op2)\n', (3703, 3708), True, 'import openfermion as of\n'), ((3823, 3851), 'openfermion.hermitian_conjugated', 'of.hermitian_conjugated', (['op3'], {}), '(op3)\n', (3846, 3851), True, 'import openfermion as of\n'), ((3966, 3994), 'openfermion.hermitian_conjugated', 'of.hermitian_conjugated', (['op4'], {}), '(op4)\n', (3989, 3994), True, 'import openfermion as of\n')]
|
import numpy as np
def to_categorical(y, num_classes=None):
"""Converts a class vector (integers) to binary class matrix.
E.g. for use with categorical_crossentropy.
# Arguments
y: class vector to be converted into a matrix
(integers from 0 to num_classes).
num_classes: total number of classes.
# Returns
A binary matrix representation of the input. The classes axis
is placed last.
"""
y = np.array(y, dtype='int')
input_shape = y.shape
if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:
input_shape = tuple(input_shape[:-1])
y = y.ravel()
if not num_classes:
num_classes = np.max(y) + 1
n = y.shape[0]
categorical = np.zeros((n, num_classes), dtype=np.float32)
categorical[np.arange(n), y] = 1
output_shape = input_shape + (num_classes,)
categorical = np.reshape(categorical, output_shape)
return categorical
def map_label(y, map_dict={}, if_binary=False):
"""Convert a class vector (all types) to a class vector (integers)
E.g. for use with to_categorical. ['a', 'b', 4, 7, 'a'] -> [0, 1, 2, 3, 0],
['a', 'b', 'a', 'a'] -> [0, 1, 0, 0]
# Arguments
y: class vector to be converted
map_dict: mapping relations
# Returns:
A converted class vector and two dictionaries of mapping relations.
"""
assert isinstance(map_dict, dict)
y = np.array(y)
y = y.ravel()
if not map_dict:
if_validate = False
else:
if if_binary and len(map_dict) != 2:
raise ValueError(
"Expected a dictionary of 2 elements in map_dict while received %d elements!" % len(map_dict))
if_validate = True
rev_map_dict = {}
class_idx = 0
int_y = []
for label_element in y:
if label_element not in map_dict:
if if_validate:
raise ValueError("Invalid label %s!" % str(label_element))
map_dict[label_element] = class_idx
rev_map_dict[class_idx] = label_element
class_idx += 1
if if_binary and class_idx > 1:
raise ValueError("Found more than 2 classes in label inputs!")
int_y.append(map_dict[label_element])
int_y = np.array(int_y, dtype='int')
return int_y, map_dict, rev_map_dict
def get_classnum(y):
"""Get classnum from one-hot label inputs 'y'. Note that this function will not validate the label inputs
# Arguments
y: label inputs
# Returns:
The number of classes in 'y'
"""
assert isinstance(y, np.ndarray)
inputshape = y.shape
if len(inputshape) == 2:
return inputshape[-1]
else:
raise ValueError("Input labels should be a 2-dim one-hot vector!")
|
[
"numpy.zeros",
"numpy.max",
"numpy.arange",
"numpy.array",
"numpy.reshape"
] |
[((465, 489), 'numpy.array', 'np.array', (['y'], {'dtype': '"""int"""'}), "(y, dtype='int')\n", (473, 489), True, 'import numpy as np\n'), ((747, 791), 'numpy.zeros', 'np.zeros', (['(n, num_classes)'], {'dtype': 'np.float32'}), '((n, num_classes), dtype=np.float32)\n', (755, 791), True, 'import numpy as np\n'), ((895, 932), 'numpy.reshape', 'np.reshape', (['categorical', 'output_shape'], {}), '(categorical, output_shape)\n', (905, 932), True, 'import numpy as np\n'), ((1443, 1454), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (1451, 1454), True, 'import numpy as np\n'), ((2281, 2309), 'numpy.array', 'np.array', (['int_y'], {'dtype': '"""int"""'}), "(int_y, dtype='int')\n", (2289, 2309), True, 'import numpy as np\n'), ((696, 705), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (702, 705), True, 'import numpy as np\n'), ((808, 820), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (817, 820), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Functions relating velocity trend extrapolation
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
__author__ = "yuhao"
import numpy as np
from pygeopressure.basic.well_log import Log
# from ..well_log import Log
v0 = 1600 # take values larger than 1500
def set_v0(v):
"""
set global variable v0 for slotnick()
"""
global v0
v0 = v
def normal(x, a, b):
r"""
Extrapolate velocity using normal trend.
Parameters
----------
x : 1-d ndarray
depth to convert
a, b : scalar
coefficents
Returns
-------
out : 1-d ndarray
esitmated velocity
Notes
-----
.. math:: \log d{t}_{Normal}=a-bz
is transformed to
.. math:: v={e}^{bz-a}
**Note** that the exponential relation is unphysical especially in depth
bellow the interval within which the equation is calibrated.
References
----------
.. [1] <NAME>, <NAME>, and others, "Estimation of formation
pressures from log-derived shale properties," Journal of Petroleum
Technology, vol. 17, no. 6, pp. 717-722, 1965.
"""
return np.exp(x*b - a)
def normal_log(vel_log, a, b):
"""
Returns
-------
Log
normal velocity log
"""
normal_vel = normal(np.array(vel_log.depth), a, b)
mask = np.isnan(np.array(vel_log.data))
normal_vel[mask] = np.nan
log = Log()
log.depth = np.array(vel_log.depth)
log.data = normal_vel
log.name = 'normal_vel_log'
log.descr = "Velocity_normal"
log.units = "m/s"
return log
def slotnick(x, k):
"""
Relation between velocity and depth
Parameters
----------
x : 1-d ndarray
Depth to convert
k : scalar
velocity gradient
Notes
-----
typical values of velocity gradient k falls in the range 0.6-1.0s-1
References
----------
.. [1] <NAME>, "On seismic computations, with applications, I,"
Geophysics, vol. 1, no. 1, pp. 9-22, 1936.
"""
global v0
return v0 + k*x
def normal_dt(x, a, b):
"""
normal trend of transit time
Parameters
----------
x : 1-d ndarray
depth to convert
"""
return a - b * x
|
[
"numpy.array",
"numpy.exp",
"pygeopressure.basic.well_log.Log"
] |
[((1209, 1226), 'numpy.exp', 'np.exp', (['(x * b - a)'], {}), '(x * b - a)\n', (1215, 1226), True, 'import numpy as np\n'), ((1474, 1479), 'pygeopressure.basic.well_log.Log', 'Log', ([], {}), '()\n', (1477, 1479), False, 'from pygeopressure.basic.well_log import Log\n'), ((1496, 1519), 'numpy.array', 'np.array', (['vel_log.depth'], {}), '(vel_log.depth)\n', (1504, 1519), True, 'import numpy as np\n'), ((1359, 1382), 'numpy.array', 'np.array', (['vel_log.depth'], {}), '(vel_log.depth)\n', (1367, 1382), True, 'import numpy as np\n'), ((1410, 1432), 'numpy.array', 'np.array', (['vel_log.data'], {}), '(vel_log.data)\n', (1418, 1432), True, 'import numpy as np\n')]
|
import time
import numpy as np
import matplotlib.pyplot as plt
from test_farfield import make_meshes
from tectosaur.ops.sparse_integral_op import RegularizedSparseIntegralOp
from tectosaur.ops.dense_integral_op import RegularizedDenseIntegralOp
from tectosaur.ops.sparse_farfield_op import TriToTriDirectFarfieldOp
from tectosaur.ops.mass_op import MassOp
from tectosaur.ops.neg_op import MultOp
from tectosaur.ops.sum_op import SumOp
from tectosaur.nearfield.nearfield_op import any_nearfield
from tectosaur.util.test_decorators import kernel
from tectosaur.util.timer import Timer
def plot_fnc(m, surf1_idxs, surf2_idxs, x, outs):
def plot_at_pts(idxs, f):
pts_f = np.full(m[0].shape[0], np.nan)
pts_f[m[1][idxs]] = f
pts_f_not_nan = pts_f[np.logical_not(np.isnan(pts_f))]
min_f = np.min(pts_f_not_nan)
max_f = np.max(pts_f_not_nan)
plt.figure()
plt.tricontourf(
m[0][:,0], m[0][:,2], m[1], pts_f,
levels = np.linspace(min_f, max_f, 21),
extend = 'both'
)
plt.colorbar()
for d in range(3):
plot_at_pts(surf2_idxs, x[:,d].reshape((-1,3)))
for o in outs:
plot_at_pts(surf1_idxs, o.reshape(-1,3,3)[:,:,d])
plt.show()
def build_x_field(m, surf1_idxs, surf2_idxs):
dof_pts = m[0][m[1][surf2_idxs]]
dof_pts[:,:,1] -= dof_pts[0,0,1]
def gaussian(a, b, c, x):
return a * np.exp(-((x - b) ** 2) / (2 * c ** 2))
dist = np.linalg.norm(dof_pts.reshape(-1,3), axis = 1)
x = np.zeros((dof_pts.shape[0] * 3, 3))
for d in range(3):
x[:,d] = gaussian(0.1 * (d + 1), 0.0, 0.3, dist)
return x
def x_ones_field(m, surf1_idxs, surf2_idxs):
x = np.ones(surf2_idxs.shape[0] * 9).reshape((-1,3,3))
from tectosaur.constraint_builders import find_free_edges
free_edges = find_free_edges(m[1])
first_tri_idx = np.min(surf2_idxs)
last_tri_idx = np.max(surf2_idxs)
for tri_idx, edge_idx in free_edges:
if tri_idx < first_tri_idx or tri_idx > last_tri_idx:
continue
for v in range(2):
pt_idx = m[1][tri_idx, (edge_idx + v) % 3]
tris_touching = np.where(m[1] == pt_idx)
x[tris_touching[0] - first_tri_idx, tris_touching[1], :] = 0.0
return x
def regularized_tester(K, sep, continuity, mass_op_factor = 0.0, which = None):
if which is None:
raise Exception('select some operators!')
n_m = 30
full_K_name = f'elastic{K}3'
full_RK_name = f'elasticR{K}3'
m, surf1_idxs, surf2_idxs = make_meshes(n_m = n_m, sep = sep)
if sep == 0.0:
surf2_idxs = surf1_idxs
near_threshold = 2.0
nq_near = 5
nq_far = 2
if any_nearfield(m[0], m[1], surf1_idxs, surf2_idxs, near_threshold):
nearfield = True
else:
nearfield = False
def sparse_unregularized(far_op, Kn):
return SparseIntegralOp(
6, nq_far, nq_near, near_threshold, Kn, [1.0, 0.25], m[0], m[1],
np.float32, farfield_op_type = far_op, obs_subset = surf1_idxs,
src_subset = surf2_idxs
)
def change_K_tri_tri(to):
def f(*args, to = to):
args = list(args)
args[1] = to
return TriToTriDirectFarfieldOp(*args)
return f
def add_sparse_reg(farfield_K, farfield_type):
ops.append(SumOp([
RegularizedSparseIntegralOp(
10, 10, 6, nq_far, nq_near, near_threshold, full_RK_name,
farfield_K, [1.0, 0.25], m[0], m[1],
np.float32, farfield_type,
obs_subset = surf1_idxs, src_subset = surf2_idxs
),
MultOp(MassOp(3, m[0], m[1][surf1_idxs]), mass_op_factor)
]))
ops = [
sparse_unregularized(PtToPtDirectFarfieldOp, full_K_name)
]
if 'pt_to_pt_fmm' in which:
ops.append(sparse_unregularized(PtToPtFMMFarfieldOp(150, 2.5, 5), full_K_name))
if 'tri_farfield_regularized' in which:
ops.append(sparse_unregularized(change_K_tri_tri(full_RK_name), full_K_name))
if 'dense_regularized' in which:
ops.append(SumOp([
RegularizedDenseIntegralOp(
10, 10, 6, nq_far, nq_near, near_threshold, full_RK_name, full_RK_name,
[1.0, 0.25], m[0], m[1], np.float32,
obs_subset = surf1_idxs, src_subset = surf2_idxs
),
MultOp(MassOp(3, m[0], m[1][surf1_idxs]), mass_op_factor)
]))
if 'sparse_regularized' in which:
add_sparse_reg(full_RK_name, TriToTriDirectFarfieldOp)
if 'sparse_regularized_fmm' in which:
add_sparse_reg(full_K_name, PtToPtFMMFarfieldOp(150, 2.5, 5))
if 'sparse_regularized_but_unregularized_far':
add_sparse_reg(full_K_name, change_K_tri_tri(full_K_name))
print('built ops')
x = build_x_field(m, surf1_idxs, surf2_idxs)
x_flat = x.flatten()
outs = [o.dot(x_flat) for o in ops]
if continuity:
from tectosaur.constraint_builders import continuity_constraints, \
free_edge_constraints
from tectosaur.constraints import build_constraint_matrix
cs = continuity_constraints(m[1][surf1_idxs], np.array([]))
cs.extend(free_edge_constraints(m[1][surf1_idxs]))
cm, c_rhs = build_constraint_matrix(cs, outs[0].shape[0])
final_outs = [cm.T.dot(v) for v in outs]
plot_outs = [cm.dot(v) for v in final_outs]
else:
plot_outs = outs
final_outs = outs
should_plot = True
if should_plot:
plot_fnc(m, surf1_idxs, surf2_idxs, x, plot_outs)
for i in range(len(final_outs)):
for j in range(i + 1, len(final_outs)):
print(i,j,final_outs[i] / final_outs[j])
np.testing.assert_almost_equal(final_outs[i], final_outs[j], 6)
def test_regularized_T_farfield():
regularized_tester('T', 2.0, False, which = ['tri_farfield_regularized'])
def test_regularized_A_farfield():
regularized_tester('A', 2.0, True, which = ['tri_farfield_regularized'])
def test_regularized_H_farfield():
regularized_tester('H', 4.0, True, which = ['tri_farfield_regularized'])
def test_regularized_T_nearfield():
regularized_tester(
'T', 0.4, False,
which = ['dense_regularized', 'sparse_regularized']
)
def test_regularized_A_nearfield():
regularized_tester(
'A', 0.4, True,
which = ['dense_regularized', 'sparse_regularized']
)
def test_regularized_H_nearfield():
regularized_tester(
'H', 0.4, True,
which = ['dense_regularized', 'sparse_regularized']
)
def test_regularized_T_self():
regularized_tester(
'T', 0.0, False, -0.5,
which = [
'dense_regularized', 'sparse_regularized',
# 'sparse_regularized_fmm',
# 'sparse_regularized_but_unregularized_far'
]
)
def test_regularized_A_self():
regularized_tester(
'A', 0.0, True, 0.5,
which = ['dense_regularized', 'sparse_regularized']
)
def test_regularized_H_self():
regularized_tester(
'H', 0.0, True,
which = ['dense_regularized', 'sparse_regularized']
)
def test_benchmark_far_tris():
n = 100
m, surf1_idxs, surf2_idxs = make_meshes(n_m = n, sep = 4.0)
op = TriToTriDirectFarfieldOp(
2, 'elasticRH3', [1.0, 0.25], m[0], m[1], np.float32,
surf1_idxs, surf2_idxs
)
x = build_x_field(m, surf1_idxs, surf2_idxs)
x_flat = x.flatten()
op.dot(x_flat)
import tectosaur, logging
tectosaur.logger.setLevel(logging.INFO)
n = 2
for j in range(n):
start = time.time()
print(op.dot(x_flat)[0])
took = time.time() - start
print('op.dot took', took)
total_interactions = surf1_idxs.shape[0] * surf2_idxs.shape[0]
inter_per_sec = total_interactions / took
print('total interactions', total_interactions)
print('billion interactions/sec', inter_per_sec / 1e9)
if __name__ == "__main__":
test_benchmark_far_tris()
|
[
"numpy.ones",
"numpy.isnan",
"matplotlib.pyplot.figure",
"numpy.exp",
"tectosaur.ops.sparse_integral_op.RegularizedSparseIntegralOp",
"tectosaur.logger.setLevel",
"numpy.full",
"tectosaur.ops.dense_integral_op.RegularizedDenseIntegralOp",
"numpy.testing.assert_almost_equal",
"matplotlib.pyplot.colorbar",
"numpy.max",
"numpy.linspace",
"matplotlib.pyplot.show",
"tectosaur.constraint_builders.find_free_edges",
"numpy.min",
"tectosaur.nearfield.nearfield_op.any_nearfield",
"test_farfield.make_meshes",
"numpy.zeros",
"tectosaur.constraints.build_constraint_matrix",
"tectosaur.ops.mass_op.MassOp",
"time.time",
"numpy.where",
"numpy.array",
"tectosaur.ops.sparse_farfield_op.TriToTriDirectFarfieldOp",
"tectosaur.constraint_builders.free_edge_constraints"
] |
[((1550, 1585), 'numpy.zeros', 'np.zeros', (['(dof_pts.shape[0] * 3, 3)'], {}), '((dof_pts.shape[0] * 3, 3))\n', (1558, 1585), True, 'import numpy as np\n'), ((1863, 1884), 'tectosaur.constraint_builders.find_free_edges', 'find_free_edges', (['m[1]'], {}), '(m[1])\n', (1878, 1884), False, 'from tectosaur.constraint_builders import find_free_edges\n'), ((1905, 1923), 'numpy.min', 'np.min', (['surf2_idxs'], {}), '(surf2_idxs)\n', (1911, 1923), True, 'import numpy as np\n'), ((1943, 1961), 'numpy.max', 'np.max', (['surf2_idxs'], {}), '(surf2_idxs)\n', (1949, 1961), True, 'import numpy as np\n'), ((2576, 2605), 'test_farfield.make_meshes', 'make_meshes', ([], {'n_m': 'n_m', 'sep': 'sep'}), '(n_m=n_m, sep=sep)\n', (2587, 2605), False, 'from test_farfield import make_meshes\n'), ((2726, 2791), 'tectosaur.nearfield.nearfield_op.any_nearfield', 'any_nearfield', (['m[0]', 'm[1]', 'surf1_idxs', 'surf2_idxs', 'near_threshold'], {}), '(m[0], m[1], surf1_idxs, surf2_idxs, near_threshold)\n', (2739, 2791), False, 'from tectosaur.nearfield.nearfield_op import any_nearfield\n'), ((7292, 7319), 'test_farfield.make_meshes', 'make_meshes', ([], {'n_m': 'n', 'sep': '(4.0)'}), '(n_m=n, sep=4.0)\n', (7303, 7319), False, 'from test_farfield import make_meshes\n'), ((7334, 7441), 'tectosaur.ops.sparse_farfield_op.TriToTriDirectFarfieldOp', 'TriToTriDirectFarfieldOp', (['(2)', '"""elasticRH3"""', '[1.0, 0.25]', 'm[0]', 'm[1]', 'np.float32', 'surf1_idxs', 'surf2_idxs'], {}), "(2, 'elasticRH3', [1.0, 0.25], m[0], m[1], np.\n float32, surf1_idxs, surf2_idxs)\n", (7358, 7441), False, 'from tectosaur.ops.sparse_farfield_op import TriToTriDirectFarfieldOp\n'), ((7587, 7626), 'tectosaur.logger.setLevel', 'tectosaur.logger.setLevel', (['logging.INFO'], {}), '(logging.INFO)\n', (7612, 7626), False, 'import tectosaur, logging\n'), ((680, 710), 'numpy.full', 'np.full', (['m[0].shape[0]', 'np.nan'], {}), '(m[0].shape[0], np.nan)\n', (687, 710), True, 'import numpy as np\n'), ((821, 842), 'numpy.min', 'np.min', (['pts_f_not_nan'], {}), '(pts_f_not_nan)\n', (827, 842), True, 'import numpy as np\n'), ((859, 880), 'numpy.max', 'np.max', (['pts_f_not_nan'], {}), '(pts_f_not_nan)\n', (865, 880), True, 'import numpy as np\n'), ((890, 902), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (900, 902), True, 'import matplotlib.pyplot as plt\n'), ((1073, 1087), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (1085, 1087), True, 'import matplotlib.pyplot as plt\n'), ((1262, 1272), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1270, 1272), True, 'import matplotlib.pyplot as plt\n'), ((5327, 5372), 'tectosaur.constraints.build_constraint_matrix', 'build_constraint_matrix', (['cs', 'outs[0].shape[0]'], {}), '(cs, outs[0].shape[0])\n', (5350, 5372), False, 'from tectosaur.constraints import build_constraint_matrix\n'), ((7676, 7687), 'time.time', 'time.time', ([], {}), '()\n', (7685, 7687), False, 'import time\n'), ((1444, 1480), 'numpy.exp', 'np.exp', (['(-(x - b) ** 2 / (2 * c ** 2))'], {}), '(-(x - b) ** 2 / (2 * c ** 2))\n', (1450, 1480), True, 'import numpy as np\n'), ((1733, 1765), 'numpy.ones', 'np.ones', (['(surf2_idxs.shape[0] * 9)'], {}), '(surf2_idxs.shape[0] * 9)\n', (1740, 1765), True, 'import numpy as np\n'), ((2196, 2220), 'numpy.where', 'np.where', (['(m[1] == pt_idx)'], {}), '(m[1] == pt_idx)\n', (2204, 2220), True, 'import numpy as np\n'), ((3265, 3296), 'tectosaur.ops.sparse_farfield_op.TriToTriDirectFarfieldOp', 'TriToTriDirectFarfieldOp', (['*args'], {}), '(*args)\n', (3289, 3296), False, 'from tectosaur.ops.sparse_farfield_op import TriToTriDirectFarfieldOp\n'), ((5234, 5246), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5242, 5246), True, 'import numpy as np\n'), ((5266, 5305), 'tectosaur.constraint_builders.free_edge_constraints', 'free_edge_constraints', (['m[1][surf1_idxs]'], {}), '(m[1][surf1_idxs])\n', (5287, 5305), False, 'from tectosaur.constraint_builders import continuity_constraints, free_edge_constraints\n'), ((5788, 5851), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['final_outs[i]', 'final_outs[j]', '(6)'], {}), '(final_outs[i], final_outs[j], 6)\n', (5818, 5851), True, 'import numpy as np\n'), ((7736, 7747), 'time.time', 'time.time', ([], {}), '()\n', (7745, 7747), False, 'import time\n'), ((787, 802), 'numpy.isnan', 'np.isnan', (['pts_f'], {}), '(pts_f)\n', (795, 802), True, 'import numpy as np\n'), ((996, 1025), 'numpy.linspace', 'np.linspace', (['min_f', 'max_f', '(21)'], {}), '(min_f, max_f, 21)\n', (1007, 1025), True, 'import numpy as np\n'), ((3405, 3608), 'tectosaur.ops.sparse_integral_op.RegularizedSparseIntegralOp', 'RegularizedSparseIntegralOp', (['(10)', '(10)', '(6)', 'nq_far', 'nq_near', 'near_threshold', 'full_RK_name', 'farfield_K', '[1.0, 0.25]', 'm[0]', 'm[1]', 'np.float32', 'farfield_type'], {'obs_subset': 'surf1_idxs', 'src_subset': 'surf2_idxs'}), '(10, 10, 6, nq_far, nq_near, near_threshold,\n full_RK_name, farfield_K, [1.0, 0.25], m[0], m[1], np.float32,\n farfield_type, obs_subset=surf1_idxs, src_subset=surf2_idxs)\n', (3432, 3608), False, 'from tectosaur.ops.sparse_integral_op import RegularizedSparseIntegralOp\n'), ((4180, 4369), 'tectosaur.ops.dense_integral_op.RegularizedDenseIntegralOp', 'RegularizedDenseIntegralOp', (['(10)', '(10)', '(6)', 'nq_far', 'nq_near', 'near_threshold', 'full_RK_name', 'full_RK_name', '[1.0, 0.25]', 'm[0]', 'm[1]', 'np.float32'], {'obs_subset': 'surf1_idxs', 'src_subset': 'surf2_idxs'}), '(10, 10, 6, nq_far, nq_near, near_threshold,\n full_RK_name, full_RK_name, [1.0, 0.25], m[0], m[1], np.float32,\n obs_subset=surf1_idxs, src_subset=surf2_idxs)\n', (4206, 4369), False, 'from tectosaur.ops.dense_integral_op import RegularizedDenseIntegralOp\n'), ((3703, 3736), 'tectosaur.ops.mass_op.MassOp', 'MassOp', (['(3)', 'm[0]', 'm[1][surf1_idxs]'], {}), '(3, m[0], m[1][surf1_idxs])\n', (3709, 3736), False, 'from tectosaur.ops.mass_op import MassOp\n'), ((4448, 4481), 'tectosaur.ops.mass_op.MassOp', 'MassOp', (['(3)', 'm[0]', 'm[1][surf1_idxs]'], {}), '(3, m[0], m[1][surf1_idxs])\n', (4454, 4481), False, 'from tectosaur.ops.mass_op import MassOp\n')]
|
from rh_logger.api import logger
import logging
import numpy as np
from scipy.optimize import least_squares
import pickle
import os
import time
import scipy.sparse as spp
from scipy.sparse.linalg import lsqr
import scipy.optimize
from rh_renderer.models import RigidModel
#import common
EPS = 0.000001
class Rigid2DOptimizer(object):
# TODO - make it a class
def __init__(self, **kwargs):
self._damping = float(kwargs.get("damping", 0.0))
self._huber_delta = float(kwargs.get("huber_delta", 15))
self._max_iterations = int(kwargs.get("max_iterations", 1000))
self._init_gamma = float(kwargs.get("init_gamma", 0.00000000001))
self._min_gamma = float(kwargs.get("min_gamma", 1e-30))
self._eps = float(kwargs.get("eps", 1e-9))
self._pre_translate = "pre_translate" in kwargs
@staticmethod
def apply_rigid_transform(pts, theta, t_x, t_y):
cos_theta = np.cos(theta)
sin_theta = np.sin(theta)
return np.dot([[cos_theta, -sin_theta],
[sin_theta, cos_theta]],
pts.T).T + np.array([t_x, t_y])
@staticmethod
def optimize_func(params, tile_names_map, matches, matches_num):
# Compute the residuals of all the matches
residuals = np.empty((matches_num, ), dtype=np.float32)
start_idx = 0
for pair_name, pair_matches in matches.items():
pair_matches_len = len(pair_matches[0])
tile1_params_start_idx = tile_names_map[pair_name[0]] * 3
tile2_params_start_idx = tile_names_map[pair_name[1]] * 3
pts1_transformed = Rigid2DOptimizer.apply_rigid_transform(pair_matches[0], *params[tile1_params_start_idx:tile1_params_start_idx+3])
pts2_transformed = Rigid2DOptimizer.apply_rigid_transform(pair_matches[1], *params[tile2_params_start_idx:tile2_params_start_idx+3])
# compute the L2 distance between the two sets of points
deltas = pts1_transformed - pts2_transformed
residuals[start_idx:start_idx + pair_matches_len] = np.sqrt(np.sum(deltas**2, axis=1))
start_idx += pair_matches_len
# Normalize the residuals by 2*median
#med_residual = np.median(residuals)
#residuals = residuals / (2*med_residual + EPS)
return residuals
@staticmethod
def compute_all_dists(matches, transforms, matches_num):
dists = np.empty((matches_num, ), dtype=np.float32)
start_idx = 0
for pair_name, pair_matches in matches.items():
pair_matches_len = len(pair_matches[0])
transform1 = transforms[pair_name[0]]
transform2 = transforms[pair_name[1]]
pts1_transformed = Rigid2DOptimizer.apply_rigid_transform(pair_matches[0], *transform1)
pts2_transformed = Rigid2DOptimizer.apply_rigid_transform(pair_matches[1], *transform2)
# compute the L2 distance between the two sets of points
deltas = pts1_transformed - pts2_transformed
dists[start_idx:start_idx + pair_matches_len] = np.sqrt(np.sum(deltas**2, axis=1))
start_idx += pair_matches_len
return dists
@staticmethod
def grad_F_huber(huber_delta, params, tile_names_map, matches, matches_num):
# Compute the residuals of all the matches
grad_f_result = np.zeros_like(params)
#start_idx = 0
for pair_name, pair_matches in matches.items():
#pair_matches_len = len(pair_matches[0])
tile1_params_start_idx = tile_names_map[pair_name[0]] * 3
tile2_params_start_idx = tile_names_map[pair_name[1]] * 3
pts1_transformed = Rigid2DOptimizer.apply_rigid_transform(pair_matches[0], *params[tile1_params_start_idx:tile1_params_start_idx+3])
pts2_transformed = Rigid2DOptimizer.apply_rigid_transform(pair_matches[1], *params[tile2_params_start_idx:tile2_params_start_idx+3])
deltas = pts1_transformed - pts2_transformed
delta_x = deltas[:, 0]
delta_y = deltas[:, 1]
residuals = np.sqrt(np.sum(deltas**2, axis=1))
residuals_huber_mask = residuals <= huber_delta
# The gradient coefficient for anything that is below the huber delta is 1, and anything above should be:
# (delta / R), where R is the distance between the two points
grad_f_multiplier = np.ones_like(residuals)
grad_f_multiplier[~residuals_huber_mask] = huber_delta / residuals[~residuals_huber_mask]
# The current matches only add values to the gradient at the indices of the relevant parameters (don't change anything else)
theta1_idx = tile1_params_start_idx
tx1_idx = tile1_params_start_idx + 1
ty1_idx = tile1_params_start_idx + 2
theta2_idx = tile2_params_start_idx
tx2_idx = tile2_params_start_idx + 1
ty2_idx = tile2_params_start_idx + 2
# Update grad(Theta_tile1) , grad(Tx_tile1), grad(Ty_tile1)
grad_f_result[theta1_idx] += np.sum(
np.dot(delta_x * (-pair_matches[0][:, 0]*params[theta1_idx] - pair_matches[0][:, 1]),
grad_f_multiplier)
) + np.sum(
np.dot(delta_y * (pair_matches[0][:, 0] - pair_matches[0][:, 1]*params[theta1_idx]),
grad_f_multiplier)
)
grad_f_result[tx1_idx] += np.sum(np.dot(delta_x, grad_f_multiplier))
grad_f_result[ty1_idx] += np.sum(np.dot(delta_y, grad_f_multiplier))
# Update grad(Theta_tile2) , grad(Tx_tile2), grad(Ty_tile2)
grad_f_result[theta2_idx] += np.sum(
np.dot(delta_x * (pair_matches[1][:, 0]*params[theta2_idx] + pair_matches[1][:, 1]),
grad_f_multiplier)
) + np.sum(
np.dot(delta_y * (-pair_matches[1][:, 0] + pair_matches[1][:, 1]*params[theta2_idx]),
grad_f_multiplier)
)
grad_f_result[tx2_idx] += -np.sum(np.dot(delta_x, grad_f_multiplier))
grad_f_result[ty2_idx] += -np.sum(np.dot(delta_y, grad_f_multiplier))
return grad_f_result
def _gradient_descent(self, optimize_func, p0, grad_F_huber, args=None):
def compute_cost_huber(optimize_func, cur_p, params, huber_delta):
residuals = optimize_func(cur_p, *params)
cost = np.empty_like(residuals)
residuals_huber_mask = residuals <= huber_delta
cost[residuals_huber_mask] = 0.5 * residuals[residuals_huber_mask]**2
cost[~residuals_huber_mask] = huber_delta * residuals[~residuals_huber_mask] - (0.5 * huber_delta**2)
return np.sum(cost)
cur_p = p0
#cur_cost = np.sum(optimize_func(cur_p, *args))
cur_cost = compute_cost_huber(optimize_func, cur_p, args, self._huber_delta)
logger.report_event("Initial cost: {}".format(cur_cost), log_level=logging.INFO)
gamma = self._init_gamma
for it in range(self._max_iterations):
#print("Iteration {}".format(it))
prev_p = cur_p
prev_cost = cur_cost
cur_p = prev_p - gamma * grad_F_huber(self._huber_delta, prev_p, *args)
#print("New params: {}".format(cur_p))
#cur_cost = np.sum(optimize_func(cur_p, *args))
cur_cost = compute_cost_huber(optimize_func, cur_p, args, self._huber_delta)
#print("New cost: {}".format(cur_cost))
if it % 100 == 0:
logger.report_event("iter {}: C: {}".format(it, cur_cost), log_level=logging.INFO)
if cur_cost > prev_cost: # we took a bad step: undo it, scale down gamma, and start over
#print("Backtracking step")
cur_p = prev_p
cur_cost = prev_cost
gamma *= 0.5
elif np.all(np.abs(cur_p - prev_p) <= self._eps): # We took a good step, but the change to the parameters vector is negligible
break
else: # We took a good step, try to increase the step size a bit
gamma *= 1.1
if gamma < self._min_gamma:
break
#print("The local minimum occurs at", cur_p)
logger.report_event("Post-opt cost: {}".format(cur_cost), log_level=logging.INFO)
return cur_p
def optimize(self, orig_locs, matches):
"""
The aim is to find for each tile a triplet: tetha, t_x, and t_y that will define the
rigid transformation that needs to be applied to that tile.
The transformation needs to minimize the L2 distance between the matches of pairs of tiles.
To this end, we define our optimizations as a non-linear least squares problem.
Given that the number of tiles is N, and the total number of matches is M,
we want to find the values for 3*N parameters, s.t., the sum of all distances is minimized.
Note that due to outliers, we would like to use a more robust method, such as huber loss.
"""
tile_names = sorted(list(orig_locs.keys()))
tile_names_map = {name:idx for idx, name in enumerate(tile_names)}
matches_num = np.sum([len(m[0]) for m in matches.values()])
p0 = np.empty((len(orig_locs)*3, ), dtype=np.float32) # all triplets [theta1, t_x1, t_y1, theta2, t_x2, t_y2, ...]
if self._pre_translate:
# For debug:
solution1 = {name:[0, orig_locs[name][0], orig_locs[name][1]] for name, idx in tile_names_map.items()}
dists = Rigid2DOptimizer.compute_all_dists(matches, solution1, matches_num)
logger.report_event("pre optimization distances: min={}, mean={}, median={}, max={}".format(np.min(dists), np.mean(dists), np.median(dists), np.max(dists)), log_level=logging.INFO)
st_time = time.time()
# Find an initial translation only transformation for each tile (better than the initial assumption)
# solve for X
# Create a matrix A that is made of 1's, 0's and -1's of size matches_num*tiles_num,
# and a vector b s.t. b = - matches[0].x + matches[1].x (actually b will be a matches_num*2 matrix, one column for x and the other for y)
# We'll try to find x, s.t. A*x=b, and therefore each row (corresponding to a single match of a pair of tiles),
# will have 1 for the first tile of the match, -1 for the second tile of the match, and 0 elsewhere
#A = spp.csc_matrix( (matches_num, len(orig_locs)), dtype=np.float32 )
A = spp.lil_matrix( (matches_num, len(orig_locs)), dtype=np.float32 )
b = np.empty((matches_num, 2), dtype=np.float32)
start_idx = 0
for pair_name, pair_matches in matches.items():
pair_matches_len = len(pair_matches[0])
tile1_params_idx = tile_names_map[pair_name[0]]
tile2_params_idx = tile_names_map[pair_name[1]]
A[start_idx:start_idx + pair_matches_len, tile1_params_idx] = 1
A[start_idx:start_idx + pair_matches_len, tile2_params_idx] = -1
b[start_idx:start_idx + pair_matches_len] = - pair_matches[0] + pair_matches[1]
start_idx += pair_matches_len
# convert A to row sparse matrix, for faster computations
A = A.tocsr()
#p0_translate_x = np.array([orig_locs[k][0] for k in tile_names]) # [t_x1, t_x2, ...] with the original locations
Tx = lsqr(A, b[:, 0], damp=self._damping)[0]
Ty = lsqr(A, b[:, 1], damp=self._damping)[0]
logger.report_event("translation-only optimization time: {} seconds".format(time.time() - st_time), log_level=logging.INFO)
# Normalize all deltas to (0, 0)
Tx -= np.min(Tx)
Ty -= np.min(Ty)
p0[1::3] = Tx
p0[2::3] = Ty
# For debug:
#solution2 = {name:[0, p0[1::3][idx], p0[2::3][idx]] for name, idx in tile_names_map.items()}
solution2 = {name:[0, Tx[idx], Ty[idx]] for name, idx in tile_names_map.items()}
dists = Rigid2DOptimizer.compute_all_dists(matches, solution2, matches_num)
logger.report_event("post translation optimization distances: min={}, mean={}, median={}, max={}".format(np.min(dists), np.mean(dists), np.median(dists), np.max(dists)), log_level=logging.INFO)
else:
p0[1::3] = [orig_locs[k][0] for k in tile_names] # set default X to original location's X
p0[2::3] = [orig_locs[k][1] for k in tile_names] # set default Y to original location's Y
p0[::3] = 0 # Set default theta to 0
# Create a sparse matrix that has
st_time = time.time()
#res = least_squares(optimize_func, p0, args=(tile_names_map, matches, matches_num), verbose=2)
#res = least_squares(optimize_func, p0, loss='huber', f_scale=15, args=(tile_names_map, matches, matches_num), verbose=2)
#res = least_squares(optimize_func, p0, loss='soft_l1', f_scale=15, args=(tile_names_map, matches, matches_num), verbose=2)
# stepsize = 0.0001
# max_iterations = 1000
# res = gradient_descent(optimize_func, p0, max_iterations, stepsize, args=(tile_names_map, matches, matches_num))
huber_delta = 15 # Maximal L2 distance for a match to be considered inlier
res = self._gradient_descent(Rigid2DOptimizer.optimize_func, p0, Rigid2DOptimizer.grad_F_huber, args=(tile_names_map, matches, matches_num))
end_time = time.time()
logger.report_event("non-linear optimization time: {} seconds".format(end_time - st_time), log_level=logging.INFO)
solution = {}
if res is not None:
for name, idx in tile_names_map.items():
solution[name] = np.array(res[idx * 3:idx*3 + 3]) # Stores [theta, t_x, t_y] of the tile
else:
raise Exception("Could not find a valid solution to the optimization problem")
dists = Rigid2DOptimizer.compute_all_dists(matches, solution, matches_num)
logger.report_event("post optimization distances: min={}, mean={}, median={}, max={}".format(np.min(dists), np.mean(dists), np.median(dists), np.max(dists)), log_level=logging.INFO)
# create the optimized models for each tile
optimized_models = {name:RigidModel(res[idx*3], res[idx*3+1:idx*3+3]) for name, idx in tile_names_map.items()}
return optimized_models
# def fix_matches(orig_locs, matches, new_matches_num=4):
# # # Create "false matches" in case non are there
# # for pair_name, pair_matches in matches.values():
# # if len(pair_matches[0]) < 2:
# # print("Creating made up matches for pair: {} -> {}".format(os.path.basename(pair_name[0]), os.path.basename(pair_name[1])))
# # pair_matches[0] = np.zeros((new_matches_num, 2))
# # pair_matches[1] = np.zeros((new_matches_num, 2))
# # Remove any pair of matched tiles that don't have matches
# to_remove_keys = []
# for pair_name, pair_matches in matches.items():
# if len(pair_matches[0]) == 0:
# print("Removing no matches for pair: {} -> {}".format(os.path.basename(pair_name[0]), os.path.basename(pair_name[1])))
# to_remove_keys.append(pair_name)
#
# for k in to_remove_keys:
# del matches[k]
if __name__ == '__main__':
# in_orig_locs_fname = 'data/W05_Sec001_ROI466_mfovs_475_476_orig_locs.pkl'
# in_matches_fname = 'data/W05_Sec001_ROI466_mfovs_475_476.pkl'
# in_ts_fname = 'data/W05_Sec001_ROI466_mfovs_475_476.json'
# out_ts_fname = 'montaged_optimize3_W05_Sec001_ROI466_mfovs_475_476.json'
in_orig_locs_fname = 'data/W05_Sec001_ROI466_orig_locs.pkl'
in_matches_fname = 'data/W05_Sec001_ROI466.pkl'
in_ts_fname = 'data/W05_Sec001_ROI466.json'
out_ts_fname = 'montaged_optimize3_W05_Sec001_ROI466.json'
# Read the files
with open(in_orig_locs_fname, 'rb') as in_f:
orig_locs = pickle.load(in_f)
with open(in_matches_fname, 'rb') as in_f:
matches = pickle.load(in_f)
fix_matches(orig_locs, matches)
solution = optimize(orig_locs, matches, pre_translate=True)
#common.export_rigid_tilespec(in_ts_fname, out_ts_fname, solution)
|
[
"scipy.sparse.linalg.lsqr",
"numpy.zeros_like",
"numpy.ones_like",
"numpy.sum",
"numpy.abs",
"numpy.median",
"numpy.empty",
"numpy.empty_like",
"time.time",
"numpy.min",
"numpy.sin",
"pickle.load",
"numpy.array",
"numpy.cos",
"rh_renderer.models.RigidModel",
"numpy.mean",
"numpy.dot",
"numpy.max"
] |
[((939, 952), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (945, 952), True, 'import numpy as np\n'), ((973, 986), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (979, 986), True, 'import numpy as np\n'), ((1289, 1331), 'numpy.empty', 'np.empty', (['(matches_num,)'], {'dtype': 'np.float32'}), '((matches_num,), dtype=np.float32)\n', (1297, 1331), True, 'import numpy as np\n'), ((2441, 2483), 'numpy.empty', 'np.empty', (['(matches_num,)'], {'dtype': 'np.float32'}), '((matches_num,), dtype=np.float32)\n', (2449, 2483), True, 'import numpy as np\n'), ((3391, 3412), 'numpy.zeros_like', 'np.zeros_like', (['params'], {}), '(params)\n', (3404, 3412), True, 'import numpy as np\n'), ((13219, 13230), 'time.time', 'time.time', ([], {}), '()\n', (13228, 13230), False, 'import time\n'), ((14032, 14043), 'time.time', 'time.time', ([], {}), '()\n', (14041, 14043), False, 'import time\n'), ((16577, 16594), 'pickle.load', 'pickle.load', (['in_f'], {}), '(in_f)\n', (16588, 16594), False, 'import pickle\n'), ((16660, 16677), 'pickle.load', 'pickle.load', (['in_f'], {}), '(in_f)\n', (16671, 16677), False, 'import pickle\n'), ((1109, 1129), 'numpy.array', 'np.array', (['[t_x, t_y]'], {}), '([t_x, t_y])\n', (1117, 1129), True, 'import numpy as np\n'), ((4449, 4472), 'numpy.ones_like', 'np.ones_like', (['residuals'], {}), '(residuals)\n', (4461, 4472), True, 'import numpy as np\n'), ((6835, 6859), 'numpy.empty_like', 'np.empty_like', (['residuals'], {}), '(residuals)\n', (6848, 6859), True, 'import numpy as np\n'), ((7135, 7147), 'numpy.sum', 'np.sum', (['cost'], {}), '(cost)\n', (7141, 7147), True, 'import numpy as np\n'), ((10284, 10295), 'time.time', 'time.time', ([], {}), '()\n', (10293, 10295), False, 'import time\n'), ((11099, 11143), 'numpy.empty', 'np.empty', (['(matches_num, 2)'], {'dtype': 'np.float32'}), '((matches_num, 2), dtype=np.float32)\n', (11107, 11143), True, 'import numpy as np\n'), ((12268, 12278), 'numpy.min', 'np.min', (['Tx'], {}), '(Tx)\n', (12274, 12278), True, 'import numpy as np\n'), ((12297, 12307), 'numpy.min', 'np.min', (['Ty'], {}), '(Ty)\n', (12303, 12307), True, 'import numpy as np\n'), ((14842, 14896), 'rh_renderer.models.RigidModel', 'RigidModel', (['res[idx * 3]', 'res[idx * 3 + 1:idx * 3 + 3]'], {}), '(res[idx * 3], res[idx * 3 + 1:idx * 3 + 3])\n', (14852, 14896), False, 'from rh_renderer.models import RigidModel\n'), ((1002, 1066), 'numpy.dot', 'np.dot', (['[[cos_theta, -sin_theta], [sin_theta, cos_theta]]', 'pts.T'], {}), '([[cos_theta, -sin_theta], [sin_theta, cos_theta]], pts.T)\n', (1008, 1066), True, 'import numpy as np\n'), ((2093, 2120), 'numpy.sum', 'np.sum', (['(deltas ** 2)'], {'axis': '(1)'}), '(deltas ** 2, axis=1)\n', (2099, 2120), True, 'import numpy as np\n'), ((3111, 3138), 'numpy.sum', 'np.sum', (['(deltas ** 2)'], {'axis': '(1)'}), '(deltas ** 2, axis=1)\n', (3117, 3138), True, 'import numpy as np\n'), ((4138, 4165), 'numpy.sum', 'np.sum', (['(deltas ** 2)'], {'axis': '(1)'}), '(deltas ** 2, axis=1)\n', (4144, 4165), True, 'import numpy as np\n'), ((5671, 5705), 'numpy.dot', 'np.dot', (['delta_x', 'grad_f_multiplier'], {}), '(delta_x, grad_f_multiplier)\n', (5677, 5705), True, 'import numpy as np\n'), ((5752, 5786), 'numpy.dot', 'np.dot', (['delta_y', 'grad_f_multiplier'], {}), '(delta_y, grad_f_multiplier)\n', (5758, 5786), True, 'import numpy as np\n'), ((11959, 11995), 'scipy.sparse.linalg.lsqr', 'lsqr', (['A', 'b[:, 0]'], {'damp': 'self._damping'}), '(A, b[:, 0], damp=self._damping)\n', (11963, 11995), False, 'from scipy.sparse.linalg import lsqr\n'), ((12016, 12052), 'scipy.sparse.linalg.lsqr', 'lsqr', (['A', 'b[:, 1]'], {'damp': 'self._damping'}), '(A, b[:, 1], damp=self._damping)\n', (12020, 12052), False, 'from scipy.sparse.linalg import lsqr\n'), ((14305, 14339), 'numpy.array', 'np.array', (['res[idx * 3:idx * 3 + 3]'], {}), '(res[idx * 3:idx * 3 + 3])\n', (14313, 14339), True, 'import numpy as np\n'), ((14667, 14680), 'numpy.min', 'np.min', (['dists'], {}), '(dists)\n', (14673, 14680), True, 'import numpy as np\n'), ((14682, 14696), 'numpy.mean', 'np.mean', (['dists'], {}), '(dists)\n', (14689, 14696), True, 'import numpy as np\n'), ((14698, 14714), 'numpy.median', 'np.median', (['dists'], {}), '(dists)\n', (14707, 14714), True, 'import numpy as np\n'), ((14716, 14729), 'numpy.max', 'np.max', (['dists'], {}), '(dists)\n', (14722, 14729), True, 'import numpy as np\n'), ((5175, 5285), 'numpy.dot', 'np.dot', (['(delta_x * (-pair_matches[0][:, 0] * params[theta1_idx] - pair_matches[0][:,\n 1]))', 'grad_f_multiplier'], {}), '(delta_x * (-pair_matches[0][:, 0] * params[theta1_idx] -\n pair_matches[0][:, 1]), grad_f_multiplier)\n', (5181, 5285), True, 'import numpy as np\n'), ((5428, 5538), 'numpy.dot', 'np.dot', (['(delta_y * (pair_matches[0][:, 0] - pair_matches[0][:, 1] * params[theta1_idx])\n )', 'grad_f_multiplier'], {}), '(delta_y * (pair_matches[0][:, 0] - pair_matches[0][:, 1] * params[\n theta1_idx]), grad_f_multiplier)\n', (5434, 5538), True, 'import numpy as np\n'), ((5953, 6063), 'numpy.dot', 'np.dot', (['(delta_x * (pair_matches[1][:, 0] * params[theta2_idx] + pair_matches[1][:, 1])\n )', 'grad_f_multiplier'], {}), '(delta_x * (pair_matches[1][:, 0] * params[theta2_idx] + pair_matches\n [1][:, 1]), grad_f_multiplier)\n', (5959, 6063), True, 'import numpy as np\n'), ((6205, 6316), 'numpy.dot', 'np.dot', (['(delta_y * (-pair_matches[1][:, 0] + pair_matches[1][:, 1] * params[\n theta2_idx]))', 'grad_f_multiplier'], {}), '(delta_y * (-pair_matches[1][:, 0] + pair_matches[1][:, 1] * params[\n theta2_idx]), grad_f_multiplier)\n', (6211, 6316), True, 'import numpy as np\n'), ((6450, 6484), 'numpy.dot', 'np.dot', (['delta_x', 'grad_f_multiplier'], {}), '(delta_x, grad_f_multiplier)\n', (6456, 6484), True, 'import numpy as np\n'), ((6532, 6566), 'numpy.dot', 'np.dot', (['delta_y', 'grad_f_multiplier'], {}), '(delta_y, grad_f_multiplier)\n', (6538, 6566), True, 'import numpy as np\n'), ((10172, 10185), 'numpy.min', 'np.min', (['dists'], {}), '(dists)\n', (10178, 10185), True, 'import numpy as np\n'), ((10187, 10201), 'numpy.mean', 'np.mean', (['dists'], {}), '(dists)\n', (10194, 10201), True, 'import numpy as np\n'), ((10203, 10219), 'numpy.median', 'np.median', (['dists'], {}), '(dists)\n', (10212, 10219), True, 'import numpy as np\n'), ((10221, 10234), 'numpy.max', 'np.max', (['dists'], {}), '(dists)\n', (10227, 10234), True, 'import numpy as np\n'), ((12790, 12803), 'numpy.min', 'np.min', (['dists'], {}), '(dists)\n', (12796, 12803), True, 'import numpy as np\n'), ((12805, 12819), 'numpy.mean', 'np.mean', (['dists'], {}), '(dists)\n', (12812, 12819), True, 'import numpy as np\n'), ((12821, 12837), 'numpy.median', 'np.median', (['dists'], {}), '(dists)\n', (12830, 12837), True, 'import numpy as np\n'), ((12839, 12852), 'numpy.max', 'np.max', (['dists'], {}), '(dists)\n', (12845, 12852), True, 'import numpy as np\n'), ((8316, 8338), 'numpy.abs', 'np.abs', (['(cur_p - prev_p)'], {}), '(cur_p - prev_p)\n', (8322, 8338), True, 'import numpy as np\n'), ((12144, 12155), 'time.time', 'time.time', ([], {}), '()\n', (12153, 12155), False, 'import time\n')]
|
from abc import ABC, abstractmethod, abstractclassmethod
from typing import Dict, Optional
import pandas as pd
import numpy as np
from wiseml.models.types.task_type import TaskType
from wiseml.models.types.model_type import ModelType
class TrainSet:
def __init__(self, X: pd.DataFrame, y: pd.Series):
if X.shape[0] != y.shape[0]:
raise ValueError("Len of X and y should be equal")
self.indices = np.arange(X.shape[0])
self.X = X
self.y = y
def __iter__(self):
for i in self.indices:
yield self.X.iloc[i], self.y.iloc[i]
class Model(ABC):
def __init__(self):
self.task_type: Optional[TaskType] = None
self.model_type: Optional[ModelType] = None
@abstractmethod
def fit(self, *args, **kwargs): pass
@abstractmethod
def predict(self, y_true, y_pred, *args, **kwargs): pass
@classmethod
@abstractmethod
def save(cls, path): pass
@classmethod
@abstractmethod
def load(cls, path): pass
|
[
"numpy.arange"
] |
[((434, 455), 'numpy.arange', 'np.arange', (['X.shape[0]'], {}), '(X.shape[0])\n', (443, 455), True, 'import numpy as np\n')]
|
"""Code for AMS 2019 short course."""
import copy
import glob
import errno
import random
import os.path
import json
import pickle
import time
import calendar
import numpy
import netCDF4
import keras
from keras import backend as K
from sklearn.metrics import auc as scikit_learn_auc
import matplotlib.colors
import matplotlib.pyplot as pyplot
from module_4 import keras_metrics
from module_4 import roc_curves
from module_4 import performance_diagrams
from module_4 import attributes_diagrams
# Directories.
# MODULE4_DIR_NAME = '.'
# SHORT_COURSE_DIR_NAME = '..'
MODULE4_DIR_NAME = os.path.dirname(__file__)
SHORT_COURSE_DIR_NAME = os.path.dirname(MODULE4_DIR_NAME)
DEFAULT_IMAGE_DIR_NAME = '{0:s}/data/track_data_ncar_ams_3km_nc_small'.format(
SHORT_COURSE_DIR_NAME)
# Plotting constants.
FIGURE_WIDTH_INCHES = 15
FIGURE_HEIGHT_INCHES = 15
FIGURE_RESOLUTION_DPI = 300
BAR_GRAPH_FACE_COLOUR = numpy.array([166, 206, 227], dtype=float) / 255
BAR_GRAPH_EDGE_COLOUR = numpy.full(3, 0.)
BAR_GRAPH_EDGE_WIDTH = 2.
SALIENCY_COLOUR_MAP_OBJECT = pyplot.cm.Greys
FONT_SIZE = 30
pyplot.rc('font', size=FONT_SIZE)
pyplot.rc('axes', titlesize=FONT_SIZE)
pyplot.rc('axes', labelsize=FONT_SIZE)
pyplot.rc('xtick', labelsize=FONT_SIZE)
pyplot.rc('ytick', labelsize=FONT_SIZE)
pyplot.rc('legend', fontsize=FONT_SIZE)
pyplot.rc('figure', titlesize=FONT_SIZE)
# Naming constants.
CSV_METADATA_COLUMNS = [
'Step_ID', 'Track_ID', 'Ensemble_Name', 'Ensemble_Member', 'Run_Date',
'Valid_Date', 'Forecast_Hour', 'Valid_Hour_UTC'
]
CSV_EXTRANEOUS_COLUMNS = [
'Duration', 'Centroid_Lon', 'Centroid_Lat', 'Centroid_X', 'Centroid_Y',
'Storm_Motion_U', 'Storm_Motion_V', 'Matched', 'Max_Hail_Size',
'Num_Matches', 'Shape', 'Location', 'Scale'
]
CSV_TARGET_NAME = 'RVORT1_MAX-future_max'
TARGET_NAME = 'max_future_vorticity_s01'
NETCDF_REFL_NAME = 'REFL_COM_curr'
NETCDF_TEMP_NAME = 'T2_curr'
NETCDF_U_WIND_NAME = 'U10_curr'
NETCDF_V_WIND_NAME = 'V10_curr'
NETCDF_PREDICTOR_NAMES = [
NETCDF_REFL_NAME, NETCDF_TEMP_NAME, NETCDF_U_WIND_NAME, NETCDF_V_WIND_NAME
]
REFLECTIVITY_NAME = 'reflectivity_dbz'
TEMPERATURE_NAME = 'temperature_kelvins'
U_WIND_NAME = 'u_wind_m_s01'
V_WIND_NAME = 'v_wind_m_s01'
PREDICTOR_NAMES = [
REFLECTIVITY_NAME, TEMPERATURE_NAME, U_WIND_NAME, V_WIND_NAME
]
NETCDF_TRACK_ID_NAME = 'track_id'
NETCDF_TRACK_STEP_NAME = 'track_step'
NETCDF_TARGET_NAME = 'RVORT1_MAX_future'
NUM_VALUES_KEY = 'num_values'
MEAN_VALUE_KEY = 'mean_value'
MEAN_OF_SQUARES_KEY = 'mean_of_squares'
STORM_IDS_KEY = 'storm_ids'
STORM_STEPS_KEY = 'storm_steps'
PREDICTOR_NAMES_KEY = 'predictor_names'
PREDICTOR_MATRIX_KEY = 'predictor_matrix'
TARGET_NAME_KEY = 'target_name'
TARGET_MATRIX_KEY = 'target_matrix'
TRAINING_FILES_KEY = 'training_file_names'
NORMALIZATION_DICT_KEY = 'normalization_dict'
BINARIZATION_THRESHOLD_KEY = 'binarization_threshold'
NUM_EXAMPLES_PER_BATCH_KEY = 'num_examples_per_batch'
NUM_TRAINING_BATCHES_KEY = 'num_training_batches_per_epoch'
VALIDATION_FILES_KEY = 'validation_file_names'
NUM_VALIDATION_BATCHES_KEY = 'num_validation_batches_per_epoch'
CNN_FILE_KEY = 'cnn_file_name'
CNN_FEATURE_LAYER_KEY = 'cnn_feature_layer_name'
PERMUTED_PREDICTORS_KEY = 'permuted_predictor_name_by_step'
HIGHEST_COSTS_KEY = 'highest_cost_by_step'
ORIGINAL_COST_KEY = 'original_cost'
STEP1_PREDICTORS_KEY = 'predictor_names_step1'
STEP1_COSTS_KEY = 'costs_step1'
EOF_MATRIX_KEY = 'eof_matrix'
FEATURE_MEANS_KEY = 'feature_means'
FEATURE_STDEVS_KEY = 'feature_standard_deviations'
NOVEL_IMAGES_ACTUAL_KEY = 'novel_image_matrix_actual'
NOVEL_IMAGES_UPCONV_KEY = 'novel_image_matrix_upconv'
NOVEL_IMAGES_UPCONV_SVD_KEY = 'novel_image_matrix_upconv_svd'
# More plotting constants.
THIS_COLOUR_LIST = [
numpy.array([4, 233, 231]), numpy.array([1, 159, 244]),
numpy.array([3, 0, 244]), numpy.array([2, 253, 2]),
numpy.array([1, 197, 1]), numpy.array([0, 142, 0]),
numpy.array([253, 248, 2]), numpy.array([229, 188, 0]),
numpy.array([253, 149, 0]), numpy.array([253, 0, 0]),
numpy.array([212, 0, 0]), numpy.array([188, 0, 0]),
numpy.array([248, 0, 253]), numpy.array([152, 84, 198])
]
for p in range(len(THIS_COLOUR_LIST)):
THIS_COLOUR_LIST[p] = THIS_COLOUR_LIST[p].astype(float) / 255
REFL_COLOUR_MAP_OBJECT = matplotlib.colors.ListedColormap(THIS_COLOUR_LIST)
REFL_COLOUR_MAP_OBJECT.set_under(numpy.ones(3))
PREDICTOR_TO_COLOUR_MAP_DICT = {
TEMPERATURE_NAME: pyplot.cm.YlOrRd,
REFLECTIVITY_NAME: REFL_COLOUR_MAP_OBJECT,
U_WIND_NAME: pyplot.cm.seismic,
V_WIND_NAME: pyplot.cm.seismic
}
THESE_COLOUR_BOUNDS = numpy.array(
[0.1, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70])
REFL_COLOUR_NORM_OBJECT = matplotlib.colors.BoundaryNorm(
THESE_COLOUR_BOUNDS, REFL_COLOUR_MAP_OBJECT.N)
# Deep-learning constants.
L1_WEIGHT = 0.
L2_WEIGHT = 0.001
NUM_PREDICTORS_TO_FIRST_NUM_FILTERS = 8
NUM_CONV_LAYER_SETS = 2
NUM_CONV_LAYERS_PER_SET = 2
NUM_CONV_FILTER_ROWS = 3
NUM_CONV_FILTER_COLUMNS = 3
CONV_LAYER_DROPOUT_FRACTION = None
USE_BATCH_NORMALIZATION = True
SLOPE_FOR_RELU = 0.2
NUM_POOLING_ROWS = 2
NUM_POOLING_COLUMNS = 2
NUM_DENSE_LAYERS = 3
DENSE_LAYER_DROPOUT_FRACTION = 0.5
NUM_SMOOTHING_FILTER_ROWS = 5
NUM_SMOOTHING_FILTER_COLUMNS = 5
MIN_XENTROPY_DECREASE_FOR_EARLY_STOP = 0.005
MIN_MSE_DECREASE_FOR_EARLY_STOP = 0.005
NUM_EPOCHS_FOR_EARLY_STOPPING = 5
LIST_OF_METRIC_FUNCTIONS = [
keras_metrics.accuracy, keras_metrics.binary_accuracy,
keras_metrics.binary_csi, keras_metrics.binary_frequency_bias,
keras_metrics.binary_pod, keras_metrics.binary_pofd,
keras_metrics.binary_peirce_score, keras_metrics.binary_success_ratio,
keras_metrics.binary_focn
]
METRIC_FUNCTION_DICT = {
'accuracy': keras_metrics.accuracy,
'binary_accuracy': keras_metrics.binary_accuracy,
'binary_csi': keras_metrics.binary_csi,
'binary_frequency_bias': keras_metrics.binary_frequency_bias,
'binary_pod': keras_metrics.binary_pod,
'binary_pofd': keras_metrics.binary_pofd,
'binary_peirce_score': keras_metrics.binary_peirce_score,
'binary_success_ratio': keras_metrics.binary_success_ratio,
'binary_focn': keras_metrics.binary_focn
}
DEFAULT_NUM_BWO_ITERATIONS = 200
DEFAULT_BWO_LEARNING_RATE = 0.01
# Misc constants.
SEPARATOR_STRING = '\n\n' + '*' * 50 + '\n\n'
MINOR_SEPARATOR_STRING = '\n\n' + '-' * 50 + '\n\n'
DATE_FORMAT = '%Y%m%d'
DATE_FORMAT_REGEX = '[0-9][0-9][0-9][0-9][0-1][0-9][0-3][0-9]'
MIN_PROBABILITY = 1e-15
MAX_PROBABILITY = 1. - MIN_PROBABILITY
METRES_PER_SECOND_TO_KT = 3.6 / 1.852
def time_string_to_unix(time_string, time_format):
"""Converts time from string to Unix format.
Unix format = seconds since 0000 UTC 1 Jan 1970.
:param time_string: Time string.
:param time_format: Format of time string (example: "%Y%m%d" or
"%Y-%m-%d-%H%M%S").
:return: unix_time_sec: Time in Unix format.
"""
return calendar.timegm(time.strptime(time_string, time_format))
def time_unix_to_string(unix_time_sec, time_format):
"""Converts time from Unix format to string.
Unix format = seconds since 0000 UTC 1 Jan 1970.
:param unix_time_sec: Time in Unix format.
:param time_format: Desired format of time string (example: "%Y%m%d" or
"%Y-%m-%d-%H%M%S").
:return: time_string: Time string.
"""
return time.strftime(time_format, time.gmtime(unix_time_sec))
def _image_file_name_to_date(netcdf_file_name):
"""Parses date from name of image (NetCDF) file.
:param netcdf_file_name: Path to input file.
:return: date_string: Date (format "yyyymmdd").
"""
pathless_file_name = os.path.split(netcdf_file_name)[-1]
date_string = pathless_file_name.replace(
'NCARSTORM_', '').replace('-0000_d01_model_patches.nc', '')
# Verify.
time_string_to_unix(time_string=date_string, time_format=DATE_FORMAT)
return date_string
def find_many_image_files(first_date_string, last_date_string,
image_dir_name=DEFAULT_IMAGE_DIR_NAME):
"""Finds image (NetCDF) files in the given date range.
:param first_date_string: First date ("yyyymmdd") in range.
:param last_date_string: Last date ("yyyymmdd") in range.
:param image_dir_name: Name of directory with image (NetCDF) files.
:return: netcdf_file_names: 1-D list of paths to image files.
"""
first_time_unix_sec = time_string_to_unix(
time_string=first_date_string, time_format=DATE_FORMAT)
last_time_unix_sec = time_string_to_unix(
time_string=last_date_string, time_format=DATE_FORMAT)
netcdf_file_pattern = (
'{0:s}/NCARSTORM_{1:s}-0000_d01_model_patches.nc'
).format(image_dir_name, DATE_FORMAT_REGEX)
netcdf_file_names = glob.glob(netcdf_file_pattern)
netcdf_file_names.sort()
file_date_strings = [_image_file_name_to_date(f) for f in netcdf_file_names]
file_times_unix_sec = numpy.array([
time_string_to_unix(time_string=d, time_format=DATE_FORMAT)
for d in file_date_strings
], dtype=int)
good_indices = numpy.where(numpy.logical_and(
file_times_unix_sec >= first_time_unix_sec,
file_times_unix_sec <= last_time_unix_sec
))[0]
return [netcdf_file_names[k] for k in good_indices]
def read_image_file(netcdf_file_name):
"""Reads storm-centered images from NetCDF file.
E = number of examples (storm objects) in file
M = number of rows in each storm-centered grid
N = number of columns in each storm-centered grid
C = number of channels (predictor variables)
:param netcdf_file_name: Path to input file.
:return: image_dict: Dictionary with the following keys.
image_dict['storm_ids']: length-E list of storm IDs (integers).
image_dict['storm_steps']: length-E numpy array of storm steps (integers).
image_dict['predictor_names']: length-C list of predictor names.
image_dict['predictor_matrix']: E-by-M-by-N-by-C numpy array of predictor
values.
image_dict['target_name']: Name of target variable.
image_dict['target_matrix']: E-by-M-by-N numpy array of target values.
"""
dataset_object = netCDF4.Dataset(netcdf_file_name)
storm_ids = numpy.array(
dataset_object.variables[NETCDF_TRACK_ID_NAME][:], dtype=int)
storm_steps = numpy.array(
dataset_object.variables[NETCDF_TRACK_STEP_NAME][:], dtype=int)
predictor_matrix = None
for this_predictor_name in NETCDF_PREDICTOR_NAMES:
this_predictor_matrix = numpy.array(
dataset_object.variables[this_predictor_name][:], dtype=float)
this_predictor_matrix = numpy.expand_dims(
this_predictor_matrix, axis=-1)
if predictor_matrix is None:
predictor_matrix = this_predictor_matrix + 0.
else:
predictor_matrix = numpy.concatenate(
(predictor_matrix, this_predictor_matrix), axis=-1)
target_matrix = numpy.array(
dataset_object.variables[NETCDF_TARGET_NAME][:], dtype=float)
return {
STORM_IDS_KEY: storm_ids,
STORM_STEPS_KEY: storm_steps,
PREDICTOR_NAMES_KEY: PREDICTOR_NAMES,
PREDICTOR_MATRIX_KEY: predictor_matrix,
TARGET_NAME_KEY: TARGET_NAME,
TARGET_MATRIX_KEY: target_matrix
}
def read_many_image_files(netcdf_file_names):
"""Reads storm-centered images from many NetCDF files.
:param netcdf_file_names: 1-D list of paths to input files.
:return: image_dict: See doc for `read_image_file`.
"""
image_dict = None
keys_to_concat = [
STORM_IDS_KEY, STORM_STEPS_KEY, PREDICTOR_MATRIX_KEY, TARGET_MATRIX_KEY
]
for this_file_name in netcdf_file_names:
print('Reading data from: "{0:s}"...'.format(this_file_name))
this_image_dict = read_image_file(this_file_name)
if image_dict is None:
image_dict = copy.deepcopy(this_image_dict)
continue
for this_key in keys_to_concat:
image_dict[this_key] = numpy.concatenate(
(image_dict[this_key], this_image_dict[this_key]), axis=0)
return image_dict
def image_files_example1():
"""Runs Example 1 for feature files."""
image_file_names = find_many_image_files(
first_date_string='20150701', last_date_string='20150731')
image_dict = read_many_image_files(image_file_names)
print(MINOR_SEPARATOR_STRING)
print('Variables in dictionary are as follows:')
for this_key in image_dict.keys():
print(this_key)
print('\nPredictor variables are as follows:')
predictor_names = image_dict[PREDICTOR_NAMES_KEY]
for this_name in predictor_names:
print(this_name)
these_predictor_values = image_dict[PREDICTOR_MATRIX_KEY][0, :5, :5, 0]
print(
('\nSome values of predictor variable "{0:s}" for first storm object:'
'\n{1:s}'
).format(predictor_names[0], str(these_predictor_values))
)
these_target_values = image_dict[TARGET_MATRIX_KEY][0, :5, :5]
print(
('\nSome values of target variable "{0:s}" for first storm object:'
'\n{1:s}'
).format(image_dict[TARGET_NAME_KEY], str(these_target_values))
)
def find_training_files_example():
"""Finds training files."""
training_file_names = find_many_image_files(
first_date_string='20100101', last_date_string='20141231')
validation_file_names = find_many_image_files(
first_date_string='20150101', last_date_string='20151231')
def _init_figure_panels(num_rows, num_columns, horizontal_space_fraction=0.1,
vertical_space_fraction=0.1):
"""Initializes paneled figure.
:param num_rows: Number of panel rows.
:param num_columns: Number of panel columns.
:param horizontal_space_fraction: Horizontal space between panels (as
fraction of panel size).
:param vertical_space_fraction: Vertical space between panels (as fraction
of panel size).
:return: figure_object: Instance of `matplotlib.figure.Figure`.
:return: axes_objects_2d_list: 2-D list, where axes_objects_2d_list[i][j] is
the handle (instance of `matplotlib.axes._subplots.AxesSubplot`) for the
[i]th row and [j]th column.
"""
figure_object, axes_objects_2d_list = pyplot.subplots(
num_rows, num_columns, sharex=False, sharey=False,
figsize=(FIGURE_WIDTH_INCHES, FIGURE_HEIGHT_INCHES)
)
if num_rows == num_columns == 1:
axes_objects_2d_list = [[axes_objects_2d_list]]
elif num_columns == 1:
axes_objects_2d_list = [[a] for a in axes_objects_2d_list]
elif num_rows == 1:
axes_objects_2d_list = [axes_objects_2d_list]
pyplot.subplots_adjust(
left=0.02, bottom=0.02, right=0.98, top=0.95,
hspace=vertical_space_fraction, wspace=horizontal_space_fraction)
return figure_object, axes_objects_2d_list
def _add_colour_bar(
axes_object, colour_map_object, values_to_colour, min_colour_value,
max_colour_value, colour_norm_object=None,
orientation_string='vertical', extend_min=True, extend_max=True):
"""Adds colour bar to existing axes.
:param axes_object: Existing axes (instance of
`matplotlib.axes._subplots.AxesSubplot`).
:param colour_map_object: Colour scheme (instance of
`matplotlib.pyplot.cm`).
:param values_to_colour: numpy array of values to colour.
:param min_colour_value: Minimum value in colour map.
:param max_colour_value: Max value in colour map.
:param colour_norm_object: Instance of `matplotlib.colors.BoundaryNorm`,
defining the scale of the colour map. If `colour_norm_object is None`,
will assume that scale is linear.
:param orientation_string: Orientation of colour bar ("vertical" or
"horizontal").
:param extend_min: Boolean flag. If True, the bottom of the colour bar will
have an arrow. If False, it will be a flat line, suggesting that lower
values are not possible.
:param extend_max: Same but for top of colour bar.
:return: colour_bar_object: Colour bar (instance of
`matplotlib.pyplot.colorbar`) created by this method.
"""
if colour_norm_object is None:
colour_norm_object = matplotlib.colors.Normalize(
vmin=min_colour_value, vmax=max_colour_value, clip=False)
scalar_mappable_object = pyplot.cm.ScalarMappable(
cmap=colour_map_object, norm=colour_norm_object)
scalar_mappable_object.set_array(values_to_colour)
if extend_min and extend_max:
extend_string = 'both'
elif extend_min:
extend_string = 'min'
elif extend_max:
extend_string = 'max'
else:
extend_string = 'neither'
if orientation_string == 'horizontal':
padding = 0.075
else:
padding = 0.05
colour_bar_object = pyplot.colorbar(
ax=axes_object, mappable=scalar_mappable_object,
orientation=orientation_string, pad=padding, extend=extend_string)
colour_bar_object.ax.tick_params(labelsize=FONT_SIZE)
return colour_bar_object
def plot_predictor_2d(
predictor_matrix, colour_map_object, colour_norm_object=None,
min_colour_value=None, max_colour_value=None, axes_object=None):
"""Plots predictor variable on 2-D grid.
If `colour_norm_object is None`, both `min_colour_value` and
`max_colour_value` must be specified.
M = number of rows in grid
N = number of columns in grid
:param predictor_matrix: M-by-N numpy array of predictor values.
:param colour_map_object: Instance of `matplotlib.pyplot.cm`.
:param min_colour_value: Minimum value in colour scheme.
:param max_colour_value: Max value in colour scheme.
:param axes_object: Instance of `matplotlib.axes._subplots.AxesSubplot`.
Will plot on these axes.
:return: colour_bar_object: Colour bar (instance of
`matplotlib.pyplot.colorbar`) created by this method.
"""
if axes_object is None:
_, axes_object = pyplot.subplots(
1, 1, figsize=(FIGURE_WIDTH_INCHES, FIGURE_HEIGHT_INCHES)
)
if colour_norm_object is not None:
min_colour_value = colour_norm_object.boundaries[0]
max_colour_value = colour_norm_object.boundaries[-1]
axes_object.pcolormesh(
predictor_matrix, cmap=colour_map_object, norm=colour_norm_object,
vmin=min_colour_value, vmax=max_colour_value, shading='flat',
edgecolors='None')
axes_object.set_xticks([])
axes_object.set_yticks([])
return _add_colour_bar(
axes_object=axes_object, colour_map_object=colour_map_object,
values_to_colour=predictor_matrix, min_colour_value=min_colour_value,
max_colour_value=max_colour_value)
def plot_wind_2d(u_wind_matrix_m_s01, v_wind_matrix_m_s01, axes_object=None):
"""Plots wind velocity on 2-D grid.
M = number of rows in grid
N = number of columns in grid
:param u_wind_matrix_m_s01: M-by-N numpy array of eastward components
(metres per second).
:param v_wind_matrix_m_s01: M-by-N numpy array of northward components
(metres per second).
:param axes_object: Instance of `matplotlib.axes._subplots.AxesSubplot`.
Will plot on these axes.
"""
if axes_object is None:
_, axes_object = pyplot.subplots(
1, 1, figsize=(FIGURE_WIDTH_INCHES, FIGURE_HEIGHT_INCHES)
)
num_grid_rows = u_wind_matrix_m_s01.shape[0]
num_grid_columns = u_wind_matrix_m_s01.shape[1]
x_coords_unique = numpy.linspace(
0, num_grid_columns, num=num_grid_columns + 1, dtype=float)
x_coords_unique = x_coords_unique[:-1]
x_coords_unique = x_coords_unique + numpy.diff(x_coords_unique[:2]) / 2
y_coords_unique = numpy.linspace(
0, num_grid_rows, num=num_grid_rows + 1, dtype=float)
y_coords_unique = y_coords_unique[:-1]
y_coords_unique = y_coords_unique + numpy.diff(y_coords_unique[:2]) / 2
x_coord_matrix, y_coord_matrix = numpy.meshgrid(x_coords_unique,
y_coords_unique)
speed_matrix_m_s01 = numpy.sqrt(u_wind_matrix_m_s01 ** 2
+ v_wind_matrix_m_s01 ** 2)
axes_object.barbs(
x_coord_matrix, y_coord_matrix,
u_wind_matrix_m_s01 * METRES_PER_SECOND_TO_KT,
v_wind_matrix_m_s01 * METRES_PER_SECOND_TO_KT,
speed_matrix_m_s01 * METRES_PER_SECOND_TO_KT, color='k', length=6,
sizes={'emptybarb': 0.1}, fill_empty=True, rounding=False)
axes_object.set_xlim(0, num_grid_columns)
axes_object.set_ylim(0, num_grid_rows)
def plot_many_predictors_with_barbs(
predictor_matrix, predictor_names, min_colour_temp_kelvins,
max_colour_temp_kelvins):
"""Plots many predictor variables on 2-D grid with wind barbs overlain.
M = number of rows in grid
N = number of columns in grid
C = number of predictors
:param predictor_matrix: M-by-N-by-C numpy array of predictor values.
:param predictor_names: length-C list of predictor names.
:param min_colour_temp_kelvins: Minimum value in temperature colour scheme.
:param max_colour_temp_kelvins: Max value in temperature colour scheme.
:return: figure_object: See doc for `_init_figure_panels`.
:return: axes_objects_2d_list: Same.
"""
u_wind_matrix_m_s01 = predictor_matrix[
..., predictor_names.index(U_WIND_NAME)]
v_wind_matrix_m_s01 = predictor_matrix[
..., predictor_names.index(V_WIND_NAME)]
non_wind_predictor_names = [
p for p in predictor_names if p not in [U_WIND_NAME, V_WIND_NAME]
]
figure_object, axes_objects_2d_list = _init_figure_panels(
num_rows=len(non_wind_predictor_names), num_columns=1)
for m in range(len(non_wind_predictor_names)):
this_predictor_index = predictor_names.index(
non_wind_predictor_names[m])
if non_wind_predictor_names[m] == REFLECTIVITY_NAME:
this_colour_norm_object = REFL_COLOUR_NORM_OBJECT
this_min_colour_value = None
this_max_colour_value = None
else:
this_colour_norm_object = None
this_min_colour_value = min_colour_temp_kelvins + 0.
this_max_colour_value = max_colour_temp_kelvins + 0.
this_colour_bar_object = plot_predictor_2d(
predictor_matrix=predictor_matrix[..., this_predictor_index],
colour_map_object=PREDICTOR_TO_COLOUR_MAP_DICT[
non_wind_predictor_names[m]],
colour_norm_object=this_colour_norm_object,
min_colour_value=this_min_colour_value,
max_colour_value=this_max_colour_value,
axes_object=axes_objects_2d_list[m][0])
plot_wind_2d(u_wind_matrix_m_s01=u_wind_matrix_m_s01,
v_wind_matrix_m_s01=v_wind_matrix_m_s01,
axes_object=axes_objects_2d_list[m][0])
this_colour_bar_object.set_label(non_wind_predictor_names[m])
return figure_object, axes_objects_2d_list
def plot_many_predictors_sans_barbs(
predictor_matrix, predictor_names, min_colour_temp_kelvins,
max_colour_temp_kelvins, max_colour_wind_speed_m_s01):
"""Plots many predictor variables on 2-D grid; no wind barbs overlain.
In this case, both u-wind and v-wind are plotted as separate maps.
M = number of rows in grid
N = number of columns in grid
C = number of predictors
:param predictor_matrix: M-by-N-by-C numpy array of predictor values.
:param predictor_names: length-C list of predictor names.
:param min_colour_temp_kelvins: Minimum value in temperature colour scheme.
:param max_colour_temp_kelvins: Max value in temperature colour scheme.
:param max_colour_wind_speed_m_s01: Max wind speed (metres per second) in
colour maps for both u- and v-components. The minimum wind speed be
`-1 * max_colour_wind_speed_m_s01`, so the diverging colour scheme will
be zero-centered.
:return: figure_object: See doc for `_init_figure_panels`.
:return: axes_objects_2d_list: Same.
"""
num_predictors = len(predictor_names)
num_panel_rows = int(numpy.floor(numpy.sqrt(num_predictors)))
num_panel_columns = int(numpy.ceil(float(num_predictors) / num_panel_rows))
figure_object, axes_objects_2d_list = _init_figure_panels(
num_rows=num_panel_rows, num_columns=num_panel_columns)
for i in range(num_panel_rows):
for j in range(num_panel_columns):
this_linear_index = i * num_panel_columns + j
if this_linear_index >= num_predictors:
break
this_colour_map_object = PREDICTOR_TO_COLOUR_MAP_DICT[
predictor_names[this_linear_index]]
if predictor_names[this_linear_index] == REFLECTIVITY_NAME:
this_colour_norm_object = REFL_COLOUR_NORM_OBJECT
this_min_colour_value = None
this_max_colour_value = None
elif predictor_names[this_linear_index] == TEMPERATURE_NAME:
this_colour_norm_object = None
this_min_colour_value = min_colour_temp_kelvins + 0.
this_max_colour_value = max_colour_temp_kelvins + 0.
else:
this_colour_norm_object = None
this_min_colour_value = -1 * max_colour_wind_speed_m_s01
this_max_colour_value = max_colour_wind_speed_m_s01 + 0.
this_colour_bar_object = plot_predictor_2d(
predictor_matrix=predictor_matrix[..., this_linear_index],
colour_map_object=this_colour_map_object,
colour_norm_object=this_colour_norm_object,
min_colour_value=this_min_colour_value,
max_colour_value=this_max_colour_value,
axes_object=axes_objects_2d_list[i][j])
this_colour_bar_object.set_label(predictor_names[this_linear_index])
return figure_object, axes_objects_2d_list
def plot_predictors_example1(validation_file_names):
"""Plots all predictors for random example (storm object).
:param validation_file_names: 1-D list of paths to input files.
"""
validation_image_dict = read_many_image_files(validation_file_names)
print(SEPARATOR_STRING)
predictor_matrix = validation_image_dict[PREDICTOR_MATRIX_KEY][0, ...]
predictor_names = validation_image_dict[PREDICTOR_NAMES_KEY]
temperature_matrix_kelvins = predictor_matrix[
..., predictor_names.index(TEMPERATURE_NAME)]
plot_many_predictors_with_barbs(
predictor_matrix=predictor_matrix,
predictor_names=predictor_names,
min_colour_temp_kelvins=numpy.percentile(temperature_matrix_kelvins, 1),
max_colour_temp_kelvins=numpy.percentile(temperature_matrix_kelvins, 99)
)
pyplot.show()
def plot_predictors_example2(validation_image_dict):
"""Plots all predictors for example with greatest future vorticity.
:param validation_image_dict: Dictionary created by `read_many_image_files`.
"""
target_matrix_s01 = validation_image_dict[TARGET_MATRIX_KEY]
example_index = numpy.unravel_index(
numpy.argmax(target_matrix_s01), target_matrix_s01.shape
)[0]
predictor_matrix = validation_image_dict[PREDICTOR_MATRIX_KEY][
example_index, ...]
predictor_names = validation_image_dict[PREDICTOR_NAMES_KEY]
temperature_matrix_kelvins = predictor_matrix[
..., predictor_names.index(TEMPERATURE_NAME)]
plot_many_predictors_with_barbs(
predictor_matrix=predictor_matrix,
predictor_names=predictor_names,
min_colour_temp_kelvins=numpy.percentile(temperature_matrix_kelvins, 1),
max_colour_temp_kelvins=numpy.percentile(temperature_matrix_kelvins, 99)
)
pyplot.show()
def _update_normalization_params(intermediate_normalization_dict, new_values):
"""Updates normalization params for one predictor.
:param intermediate_normalization_dict: Dictionary with the following keys.
intermediate_normalization_dict['num_values']: Number of values on which
current estimates are based.
intermediate_normalization_dict['mean_value']: Current estimate for mean.
intermediate_normalization_dict['mean_of_squares']: Current mean of squared
values.
:param new_values: numpy array of new values (will be used to update
`intermediate_normalization_dict`).
:return: intermediate_normalization_dict: Same as input but with updated
values.
"""
if MEAN_VALUE_KEY not in intermediate_normalization_dict:
intermediate_normalization_dict = {
NUM_VALUES_KEY: 0,
MEAN_VALUE_KEY: 0.,
MEAN_OF_SQUARES_KEY: 0.
}
these_means = numpy.array([
intermediate_normalization_dict[MEAN_VALUE_KEY], numpy.mean(new_values)
])
these_weights = numpy.array([
intermediate_normalization_dict[NUM_VALUES_KEY], new_values.size
])
intermediate_normalization_dict[MEAN_VALUE_KEY] = numpy.average(
these_means, weights=these_weights)
these_means = numpy.array([
intermediate_normalization_dict[MEAN_OF_SQUARES_KEY],
numpy.mean(new_values ** 2)
])
intermediate_normalization_dict[MEAN_OF_SQUARES_KEY] = numpy.average(
these_means, weights=these_weights)
intermediate_normalization_dict[NUM_VALUES_KEY] += new_values.size
return intermediate_normalization_dict
def _get_standard_deviation(intermediate_normalization_dict):
"""Computes stdev from intermediate normalization params.
:param intermediate_normalization_dict: See doc for
`_update_normalization_params`.
:return: standard_deviation: Standard deviation.
"""
num_values = float(intermediate_normalization_dict[NUM_VALUES_KEY])
multiplier = num_values / (num_values - 1)
return numpy.sqrt(multiplier * (
intermediate_normalization_dict[MEAN_OF_SQUARES_KEY] -
intermediate_normalization_dict[MEAN_VALUE_KEY] ** 2
))
def get_image_normalization_params(netcdf_file_names):
"""Computes normalization params (mean and stdev) for each predictor.
:param netcdf_file_names: 1-D list of paths to input files.
:return: normalization_dict: See input doc for `normalize_images`.
"""
predictor_names = None
norm_dict_by_predictor = None
for this_file_name in netcdf_file_names:
print('Reading data from: "{0:s}"...'.format(this_file_name))
this_image_dict = read_image_file(this_file_name)
if predictor_names is None:
predictor_names = this_image_dict[PREDICTOR_NAMES_KEY]
norm_dict_by_predictor = [{}] * len(predictor_names)
for m in range(len(predictor_names)):
norm_dict_by_predictor[m] = _update_normalization_params(
intermediate_normalization_dict=norm_dict_by_predictor[m],
new_values=this_image_dict[PREDICTOR_MATRIX_KEY][..., m])
print('\n')
normalization_dict = {}
for m in range(len(predictor_names)):
this_mean = norm_dict_by_predictor[m][MEAN_VALUE_KEY]
this_stdev = _get_standard_deviation(norm_dict_by_predictor[m])
normalization_dict[predictor_names[m]] = numpy.array(
[this_mean, this_stdev])
print(
('Mean and standard deviation for "{0:s}" = {1:.4f}, {2:.4f}'
).format(predictor_names[m], this_mean, this_stdev)
)
return normalization_dict
def get_norm_params_example(training_file_names):
"""Gets normalization parameters.
:param training_file_names: 1-D list of paths to input files.
"""
normalization_dict = get_image_normalization_params(training_file_names)
def normalize_images(
predictor_matrix, predictor_names, normalization_dict=None):
"""Normalizes images to z-scores.
E = number of examples (storm objects) in file
M = number of rows in each storm-centered grid
N = number of columns in each storm-centered grid
C = number of channels (predictor variables)
:param predictor_matrix: E-by-M-by-N-by-C numpy array of predictor values.
:param predictor_names: length-C list of predictor names.
:param normalization_dict: Dictionary. Each key is the name of a predictor
value, and the corresponding value is a length-2 numpy array with
[mean, standard deviation]. If `normalization_dict is None`, mean and
standard deviation will be computed for each predictor.
:return: predictor_matrix: Normalized version of input.
:return: normalization_dict: See doc for input variable. If input was None,
this will be a newly created dictionary. Otherwise, this will be the
same dictionary passed as input.
"""
num_predictors = len(predictor_names)
if normalization_dict is None:
normalization_dict = {}
for m in range(num_predictors):
this_mean = numpy.mean(predictor_matrix[..., m])
this_stdev = numpy.std(predictor_matrix[..., m], ddof=1)
normalization_dict[predictor_names[m]] = numpy.array(
[this_mean, this_stdev])
for m in range(num_predictors):
this_mean = normalization_dict[predictor_names[m]][0]
this_stdev = normalization_dict[predictor_names[m]][1]
predictor_matrix[..., m] = (
(predictor_matrix[..., m] - this_mean) / float(this_stdev)
)
return predictor_matrix, normalization_dict
def denormalize_images(predictor_matrix, predictor_names, normalization_dict):
"""Denormalizes images from z-scores back to original scales.
:param predictor_matrix: See doc for `normalize_images`.
:param predictor_names: Same.
:param normalization_dict: Same.
:return: predictor_matrix: Denormalized version of input.
"""
num_predictors = len(predictor_names)
for m in range(num_predictors):
this_mean = normalization_dict[predictor_names[m]][0]
this_stdev = normalization_dict[predictor_names[m]][1]
predictor_matrix[..., m] = (
this_mean + this_stdev * predictor_matrix[..., m]
)
return predictor_matrix
def norm_denorm_example(training_file_names, normalization_dict):
"""Normalizes and denormalizes images.
:param training_file_names: 1-D list of paths to input files.
:param normalization_dict: Dictionary created by
`get_image_normalization_params`.
"""
image_dict = read_image_file(training_file_names[0])
predictor_names = image_dict[PREDICTOR_NAMES_KEY]
these_predictor_values = image_dict[PREDICTOR_MATRIX_KEY][0, :5, :5, 0]
print('\nOriginal values of "{0:s}" for first storm object:\n{1:s}'.format(
predictor_names[0], str(these_predictor_values)
))
image_dict[PREDICTOR_MATRIX_KEY], _ = normalize_images(
predictor_matrix=image_dict[PREDICTOR_MATRIX_KEY],
predictor_names=predictor_names, normalization_dict=normalization_dict)
these_predictor_values = image_dict[PREDICTOR_MATRIX_KEY][0, :5, :5, 0]
print(
'\nNormalized values of "{0:s}" for first storm object:\n{1:s}'.format(
predictor_names[0], str(these_predictor_values))
)
image_dict[PREDICTOR_MATRIX_KEY] = denormalize_images(
predictor_matrix=image_dict[PREDICTOR_MATRIX_KEY],
predictor_names=predictor_names, normalization_dict=normalization_dict)
these_predictor_values = image_dict[PREDICTOR_MATRIX_KEY][0, :5, :5, 0]
print(
('\nDenormalized values of "{0:s}" for first storm object:\n{1:s}'
).format(predictor_names[0], str(these_predictor_values))
)
def get_binarization_threshold(netcdf_file_names, percentile_level):
"""Computes binarization threshold for target variable.
Binarization threshold will be [q]th percentile of all image maxima, where
q = `percentile_level`.
:param netcdf_file_names: 1-D list of paths to input files.
:param percentile_level: q in the above discussion.
:return: binarization_threshold: Binarization threshold (used to turn each
target image into a yes-or-no label).
"""
max_target_values = numpy.array([])
for this_file_name in netcdf_file_names:
print('Reading data from: "{0:s}"...'.format(this_file_name))
this_image_dict = read_image_file(this_file_name)
this_target_matrix = this_image_dict[TARGET_MATRIX_KEY]
this_num_examples = this_target_matrix.shape[0]
these_max_target_values = numpy.full(this_num_examples, numpy.nan)
for i in range(this_num_examples):
these_max_target_values[i] = numpy.max(this_target_matrix[i, ...])
max_target_values = numpy.concatenate((
max_target_values, these_max_target_values))
binarization_threshold = numpy.percentile(
max_target_values, percentile_level)
print('\nBinarization threshold for "{0:s}" = {1:.4e}'.format(
TARGET_NAME, binarization_threshold))
return binarization_threshold
def find_binarization_threshold_example(training_file_names):
"""Finds binarization threshold for target variable.
:param training_file_names: 1-D list of paths to input files.
"""
binarization_threshold = get_binarization_threshold(
netcdf_file_names=training_file_names, percentile_level=90.)
def binarize_target_images(target_matrix, binarization_threshold):
"""Binarizes target images.
Specifically, this method turns each target image into a binary label,
depending on whether or not (max value in image) >= binarization_threshold.
E = number of examples (storm objects) in file
M = number of rows in each storm-centered grid
N = number of columns in each storm-centered grid
:param target_matrix: E-by-M-by-N numpy array of floats.
:param binarization_threshold: Binarization threshold.
:return: target_values: length-E numpy array of target values (integers in
0...1).
"""
num_examples = target_matrix.shape[0]
target_values = numpy.full(num_examples, -1, dtype=int)
for i in range(num_examples):
target_values[i] = (
numpy.max(target_matrix[i, ...]) >= binarization_threshold
)
return target_values
def binarization_example(training_file_names, binarization_threshold):
"""Binarizes target images.
:param training_file_names: 1-D list of paths to input files.
:param binarization_threshold: Binarization threshold.
"""
image_dict = read_image_file(training_file_names[0])
these_max_target_values = numpy.array(
[numpy.max(image_dict[TARGET_MATRIX_KEY][i, ...]) for i in range(10)]
)
print(
('\nSpatial maxima of "{0:s}" for the first few storm objects:\n{1:s}'
).format(image_dict[TARGET_NAME_KEY], str(these_max_target_values))
)
target_values = binarize_target_images(
target_matrix=image_dict[TARGET_MATRIX_KEY],
binarization_threshold=binarization_threshold)
print(
('\nBinarized target values for the first few storm objects:\n{0:s}'
).format(str(target_values[:10]))
)
def _get_dense_layer_dimensions(num_input_units, num_classes, num_dense_layers):
"""Returns dimensions (number of input and output units) for each dense lyr.
D = number of dense layers
:param num_input_units: Number of input units (features created by
flattening layer).
:param num_classes: Number of output classes (possible values of target
variable).
:param num_dense_layers: Number of dense layers.
:return: num_inputs_by_layer: length-D numpy array with number of input
units by dense layer.
:return: num_outputs_by_layer: length-D numpy array with number of output
units by dense layer.
"""
if num_classes == 2:
num_output_units = 1
else:
num_output_units = num_classes + 0
e_folding_param = (
float(-1 * num_dense_layers) /
numpy.log(float(num_output_units) / num_input_units)
)
dense_layer_indices = numpy.linspace(
0, num_dense_layers - 1, num=num_dense_layers, dtype=float)
num_inputs_by_layer = num_input_units * numpy.exp(
-1 * dense_layer_indices / e_folding_param)
num_inputs_by_layer = numpy.round(num_inputs_by_layer).astype(int)
num_outputs_by_layer = numpy.concatenate((
num_inputs_by_layer[1:],
numpy.array([num_output_units], dtype=int)
))
return num_inputs_by_layer, num_outputs_by_layer
def setup_cnn(num_grid_rows, num_grid_columns):
"""Sets up (but does not train) CNN (convolutional neural net).
:param num_grid_rows: Number of rows in each predictor image.
:param num_grid_columns: Number of columns in each predictor image.
:return: cnn_model_object: Untrained instance of `keras.models.Model`.
"""
regularizer_object = keras.regularizers.l1_l2(l1=L1_WEIGHT, l2=L2_WEIGHT)
num_predictors = len(NETCDF_PREDICTOR_NAMES)
input_layer_object = keras.layers.Input(
shape=(num_grid_rows, num_grid_columns, num_predictors)
)
current_num_filters = None
current_layer_object = None
# Add convolutional layers.
for _ in range(NUM_CONV_LAYER_SETS):
for _ in range(NUM_CONV_LAYERS_PER_SET):
if current_num_filters is None:
current_num_filters = (
num_predictors * NUM_PREDICTORS_TO_FIRST_NUM_FILTERS)
this_input_layer_object = input_layer_object
else:
current_num_filters *= 2
this_input_layer_object = current_layer_object
current_layer_object = keras.layers.Conv2D(
filters=current_num_filters,
kernel_size=(NUM_CONV_FILTER_ROWS, NUM_CONV_FILTER_COLUMNS),
strides=(1, 1), padding='valid', data_format='channels_last',
dilation_rate=(1, 1), activation=None, use_bias=True,
kernel_initializer='glorot_uniform', bias_initializer='zeros',
kernel_regularizer=regularizer_object
)(this_input_layer_object)
current_layer_object = keras.layers.LeakyReLU(
alpha=SLOPE_FOR_RELU
)(current_layer_object)
if CONV_LAYER_DROPOUT_FRACTION is not None:
current_layer_object = keras.layers.Dropout(
rate=CONV_LAYER_DROPOUT_FRACTION
)(current_layer_object)
if USE_BATCH_NORMALIZATION:
current_layer_object = keras.layers.BatchNormalization(
axis=-1, center=True, scale=True
)(current_layer_object)
current_layer_object = keras.layers.MaxPooling2D(
pool_size=(NUM_POOLING_ROWS, NUM_POOLING_COLUMNS),
strides=(NUM_POOLING_ROWS, NUM_POOLING_COLUMNS),
padding='valid', data_format='channels_last'
)(current_layer_object)
these_dimensions = numpy.array(
current_layer_object.get_shape().as_list()[1:], dtype=int)
num_features = numpy.prod(these_dimensions)
current_layer_object = keras.layers.Flatten()(current_layer_object)
# Add intermediate dense layers.
_, num_outputs_by_dense_layer = _get_dense_layer_dimensions(
num_input_units=num_features, num_classes=2,
num_dense_layers=NUM_DENSE_LAYERS)
for k in range(NUM_DENSE_LAYERS - 1):
current_layer_object = keras.layers.Dense(
num_outputs_by_dense_layer[k], activation=None, use_bias=True,
kernel_initializer='glorot_uniform', bias_initializer='zeros',
kernel_regularizer=regularizer_object
)(current_layer_object)
current_layer_object = keras.layers.LeakyReLU(
alpha=SLOPE_FOR_RELU
)(current_layer_object)
if DENSE_LAYER_DROPOUT_FRACTION is not None:
current_layer_object = keras.layers.Dropout(
rate=DENSE_LAYER_DROPOUT_FRACTION
)(current_layer_object)
if USE_BATCH_NORMALIZATION:
current_layer_object = keras.layers.BatchNormalization(
axis=-1, center=True, scale=True
)(current_layer_object)
# Add output layer (also dense).
current_layer_object = keras.layers.Dense(
1, activation=None, use_bias=True,
kernel_initializer='glorot_uniform', bias_initializer='zeros',
kernel_regularizer=regularizer_object
)(current_layer_object)
current_layer_object = keras.layers.Activation(
'sigmoid'
)(current_layer_object)
if DENSE_LAYER_DROPOUT_FRACTION is not None and NUM_DENSE_LAYERS == 1:
current_layer_object = keras.layers.Dropout(
rate=DENSE_LAYER_DROPOUT_FRACTION
)(current_layer_object)
# Put the whole thing together and compile.
cnn_model_object = keras.models.Model(
inputs=input_layer_object, outputs=current_layer_object)
cnn_model_object.compile(
loss=keras.losses.binary_crossentropy,
optimizer=keras.optimizers.Adam(),
metrics=LIST_OF_METRIC_FUNCTIONS)
cnn_model_object.summary()
return cnn_model_object
def setup_cnn_example(training_file_names):
"""Sets up CNN.
:param training_file_names: 1-D list of paths to input files.
"""
this_image_dict = read_image_file(training_file_names[0])
cnn_model_object = setup_cnn(
num_grid_rows=this_image_dict[PREDICTOR_MATRIX_KEY].shape[1],
num_grid_columns=this_image_dict[PREDICTOR_MATRIX_KEY].shape[2])
def deep_learning_generator(netcdf_file_names, num_examples_per_batch,
normalization_dict, binarization_threshold):
"""Generates training examples for deep-learning model on the fly.
E = number of examples (storm objects)
M = number of rows in each storm-centered grid
N = number of columns in each storm-centered grid
C = number of channels (predictor variables)
:param netcdf_file_names: 1-D list of paths to input (NetCDF) files.
:param num_examples_per_batch: Number of examples per training batch.
:param normalization_dict: See doc for `normalize_images`. You cannot leave
this as None.
:param binarization_threshold: Binarization threshold for target variable.
See `binarize_target_images` for details on what this does.
:return: predictor_matrix: E-by-M-by-N-by-C numpy array of predictor values.
:return: target_values: length-E numpy array of target values (integers in
0...1).
:raises: TypeError: if `normalization_dict is None`.
"""
# TODO(thunderhoser): Maybe add upsampling or downsampling.
if normalization_dict is None:
error_string = 'normalization_dict cannot be None. Must be specified.'
raise TypeError(error_string)
random.shuffle(netcdf_file_names)
num_files = len(netcdf_file_names)
file_index = 0
num_examples_in_memory = 0
full_predictor_matrix = None
full_target_matrix = None
predictor_names = None
while True:
while num_examples_in_memory < num_examples_per_batch:
print('Reading data from: "{0:s}"...'.format(
netcdf_file_names[file_index]))
this_image_dict = read_image_file(netcdf_file_names[file_index])
predictor_names = this_image_dict[PREDICTOR_NAMES_KEY]
file_index += 1
if file_index >= num_files:
file_index = 0
if full_target_matrix is None or full_target_matrix.size == 0:
full_predictor_matrix = (
this_image_dict[PREDICTOR_MATRIX_KEY] + 0.
)
full_target_matrix = this_image_dict[TARGET_MATRIX_KEY] + 0.
else:
full_predictor_matrix = numpy.concatenate(
(full_predictor_matrix,
this_image_dict[PREDICTOR_MATRIX_KEY]),
axis=0)
full_target_matrix = numpy.concatenate(
(full_target_matrix, this_image_dict[TARGET_MATRIX_KEY]),
axis=0)
num_examples_in_memory = full_target_matrix.shape[0]
batch_indices = numpy.linspace(
0, num_examples_in_memory - 1, num=num_examples_in_memory,
dtype=int)
batch_indices = numpy.random.choice(
batch_indices, size=num_examples_per_batch, replace=False)
predictor_matrix, _ = normalize_images(
predictor_matrix=full_predictor_matrix[batch_indices, ...],
predictor_names=predictor_names,
normalization_dict=normalization_dict)
predictor_matrix = predictor_matrix.astype('float32')
target_values = binarize_target_images(
target_matrix=full_target_matrix[batch_indices, ...],
binarization_threshold=binarization_threshold)
print('Fraction of examples in positive class: {0:.4f}'.format(
numpy.mean(target_values)))
num_examples_in_memory = 0
full_predictor_matrix = None
full_target_matrix = None
yield (predictor_matrix, target_values)
def train_cnn(
cnn_model_object, training_file_names, normalization_dict,
binarization_threshold, num_examples_per_batch, num_epochs,
num_training_batches_per_epoch, output_model_file_name,
validation_file_names=None, num_validation_batches_per_epoch=None):
"""Trains CNN (convolutional neural net).
:param cnn_model_object: Untrained instance of `keras.models.Model` (may be
created by `setup_cnn`).
:param training_file_names: 1-D list of paths to training files (must be
readable by `read_image_file`).
:param normalization_dict: See doc for `deep_learning_generator`.
:param binarization_threshold: Same.
:param num_examples_per_batch: Same.
:param num_epochs: Number of epochs.
:param num_training_batches_per_epoch: Number of training batches furnished
to model in each epoch.
:param output_model_file_name: Path to output file. The model will be saved
as an HDF5 file (extension should be ".h5", but this is not enforced).
:param validation_file_names: 1-D list of paths to training files (must be
readable by `read_image_file`). If `validation_file_names is None`,
will omit on-the-fly validation.
:param num_validation_batches_per_epoch:
[used only if `validation_file_names is not None`]
Number of validation batches furnished to model in each epoch.
:return: cnn_metadata_dict: Dictionary with the following keys.
cnn_metadata_dict['training_file_names']: See input doc.
cnn_metadata_dict['normalization_dict']: Same.
cnn_metadata_dict['binarization_threshold']: Same.
cnn_metadata_dict['num_examples_per_batch']: Same.
cnn_metadata_dict['num_training_batches_per_epoch']: Same.
cnn_metadata_dict['validation_file_names']: Same.
cnn_metadata_dict['num_validation_batches_per_epoch']: Same.
"""
_create_directory(file_name=output_model_file_name)
if validation_file_names is None:
checkpoint_object = keras.callbacks.ModelCheckpoint(
filepath=output_model_file_name, monitor='loss', verbose=1,
save_best_only=False, save_weights_only=False, mode='min',
period=1)
else:
checkpoint_object = keras.callbacks.ModelCheckpoint(
filepath=output_model_file_name, monitor='val_loss', verbose=1,
save_best_only=True, save_weights_only=False, mode='min',
period=1)
list_of_callback_objects = [checkpoint_object]
cnn_metadata_dict = {
TRAINING_FILES_KEY: training_file_names,
NORMALIZATION_DICT_KEY: normalization_dict,
BINARIZATION_THRESHOLD_KEY: binarization_threshold,
NUM_EXAMPLES_PER_BATCH_KEY: num_examples_per_batch,
NUM_TRAINING_BATCHES_KEY: num_training_batches_per_epoch,
VALIDATION_FILES_KEY: validation_file_names,
NUM_VALIDATION_BATCHES_KEY: num_validation_batches_per_epoch
}
training_generator = deep_learning_generator(
netcdf_file_names=training_file_names,
num_examples_per_batch=num_examples_per_batch,
normalization_dict=normalization_dict,
binarization_threshold=binarization_threshold)
if validation_file_names is None:
cnn_model_object.fit_generator(
generator=training_generator,
steps_per_epoch=num_training_batches_per_epoch, epochs=num_epochs,
verbose=1, callbacks=list_of_callback_objects, workers=0)
return cnn_metadata_dict
early_stopping_object = keras.callbacks.EarlyStopping(
monitor='val_loss', min_delta=MIN_XENTROPY_DECREASE_FOR_EARLY_STOP,
patience=NUM_EPOCHS_FOR_EARLY_STOPPING, verbose=1, mode='min')
list_of_callback_objects.append(early_stopping_object)
validation_generator = deep_learning_generator(
netcdf_file_names=validation_file_names,
num_examples_per_batch=num_examples_per_batch,
normalization_dict=normalization_dict,
binarization_threshold=binarization_threshold)
cnn_model_object.fit_generator(
generator=training_generator,
steps_per_epoch=num_training_batches_per_epoch, epochs=num_epochs,
verbose=1, callbacks=list_of_callback_objects, workers=0,
validation_data=validation_generator,
validation_steps=num_validation_batches_per_epoch)
return cnn_metadata_dict
def _create_directory(directory_name=None, file_name=None):
"""Creates directory (along with parents if necessary).
This method creates directories only when necessary, so you don't have to
worry about it overwriting anything.
:param directory_name: Name of desired directory.
:param file_name: [used only if `directory_name is None`]
Path to desired file. All directories in path will be created.
"""
if directory_name is None:
directory_name = os.path.split(file_name)[0]
try:
os.makedirs(directory_name)
except OSError as this_error:
if this_error.errno == errno.EEXIST and os.path.isdir(directory_name):
pass
else:
raise
def read_keras_model(hdf5_file_name):
"""Reads Keras model from HDF5 file.
:param hdf5_file_name: Path to input file.
:return: model_object: Instance of `keras.models.Model`.
"""
return keras.models.load_model(
hdf5_file_name, custom_objects=METRIC_FUNCTION_DICT)
def find_model_metafile(model_file_name, raise_error_if_missing=False):
"""Finds metafile for machine-learning model.
:param model_file_name: Path to file with trained model.
:param raise_error_if_missing: Boolean flag. If True and metafile is not
found, this method will error out.
:return: model_metafile_name: Path to file with metadata. If file is not
found and `raise_error_if_missing = False`, this will be the expected
path.
:raises: ValueError: if metafile is not found and
`raise_error_if_missing = True`.
"""
model_directory_name, pathless_model_file_name = os.path.split(
model_file_name)
model_metafile_name = '{0:s}/{1:s}_metadata.json'.format(
model_directory_name, os.path.splitext(pathless_model_file_name)[0]
)
if not os.path.isfile(model_metafile_name) and raise_error_if_missing:
error_string = 'Cannot find file. Expected at: "{0:s}"'.format(
model_metafile_name)
raise ValueError(error_string)
return model_metafile_name
def _metadata_numpy_to_list(model_metadata_dict):
"""Converts numpy arrays in model metadata to lists.
This is needed so that the metadata can be written to a JSON file (JSON does
not handle numpy arrays).
This method does not overwrite the original dictionary.
:param model_metadata_dict: Dictionary created by `train_cnn` or
`train_ucn`.
:return: new_metadata_dict: Same but with lists instead of numpy arrays.
"""
new_metadata_dict = copy.deepcopy(model_metadata_dict)
if NORMALIZATION_DICT_KEY in new_metadata_dict.keys():
this_norm_dict = new_metadata_dict[NORMALIZATION_DICT_KEY]
for this_key in this_norm_dict.keys():
if isinstance(this_norm_dict[this_key], numpy.ndarray):
this_norm_dict[this_key] = this_norm_dict[this_key].tolist()
return new_metadata_dict
def _metadata_list_to_numpy(model_metadata_dict):
"""Converts lists in model metadata to numpy arrays.
This method is the inverse of `_metadata_numpy_to_list`.
This method overwrites the original dictionary.
:param model_metadata_dict: Dictionary created by `train_cnn` or
`train_ucn`.
:return: model_metadata_dict: Same but numpy arrays instead of lists.
"""
if NORMALIZATION_DICT_KEY in model_metadata_dict.keys():
this_norm_dict = model_metadata_dict[NORMALIZATION_DICT_KEY]
for this_key in this_norm_dict.keys():
this_norm_dict[this_key] = numpy.array(this_norm_dict[this_key])
return model_metadata_dict
def write_model_metadata(model_metadata_dict, json_file_name):
"""Writes metadata for machine-learning model to JSON file.
:param model_metadata_dict: Dictionary created by `train_cnn` or
`train_ucn`.
:param json_file_name: Path to output file.
"""
_create_directory(file_name=json_file_name)
new_metadata_dict = _metadata_numpy_to_list(model_metadata_dict)
with open(json_file_name, 'w') as this_file:
json.dump(new_metadata_dict, this_file)
def read_model_metadata(json_file_name):
"""Reads metadata for machine-learning model from JSON file.
:param json_file_name: Path to output file.
:return: model_metadata_dict: Dictionary with keys listed in doc for
`train_cnn` or `train_ucn`.
"""
with open(json_file_name) as this_file:
model_metadata_dict = json.load(this_file)
return _metadata_list_to_numpy(model_metadata_dict)
def train_cnn_example(
cnn_model_object, training_file_names, validation_file_names,
normalization_dict, binarization_threshold):
"""Actually trains the CNN.
:param cnn_model_object: See doc for `train_cnn`.
:param training_file_names: Same.
:param validation_file_names: Same.
:param normalization_dict: Same.
:param binarization_threshold: Same.
"""
cnn_file_name = '{0:s}/cnn_model.h5'.format(MODULE4_DIR_NAME)
cnn_metadata_dict = train_cnn(
cnn_model_object=cnn_model_object,
training_file_names=training_file_names,
normalization_dict=normalization_dict,
binarization_threshold=binarization_threshold,
num_examples_per_batch=256, num_epochs=10,
num_training_batches_per_epoch=10,
validation_file_names=validation_file_names,
num_validation_batches_per_epoch=10,
output_model_file_name=cnn_file_name)
def _apply_cnn(cnn_model_object, predictor_matrix, verbose=True,
output_layer_name=None):
"""Applies trained CNN (convolutional neural net) to new data.
E = number of examples (storm objects) in file
M = number of rows in each storm-centered grid
N = number of columns in each storm-centered grid
C = number of channels (predictor variables)
:param cnn_model_object: Trained instance of `keras.models.Model`.
:param predictor_matrix: E-by-M-by-N-by-C numpy array of predictor values.
:param verbose: Boolean flag. If True, progress messages will be printed.
:param output_layer_name: Name of output layer. If
`output_layer_name is None`, this method will use the actual output
layer, so will return predictions. If `output_layer_name is not None`,
will return "features" (outputs from the given layer).
If `output_layer_name is None`...
:return: forecast_probabilities: length-E numpy array with forecast
probabilities of positive class (label = 1).
If `output_layer_name is not None`...
:return: feature_matrix: numpy array of features (outputs from the given
layer). There is no guarantee on the shape of this array, except that
the first axis has length E.
"""
num_examples = predictor_matrix.shape[0]
num_examples_per_batch = 1000
if output_layer_name is None:
model_object_to_use = cnn_model_object
else:
model_object_to_use = keras.models.Model(
inputs=cnn_model_object.input,
outputs=cnn_model_object.get_layer(name=output_layer_name).output)
output_array = None
for i in range(0, num_examples, num_examples_per_batch):
this_first_index = i
this_last_index = min(
[i + num_examples_per_batch - 1, num_examples - 1]
)
if verbose:
print('Applying model to examples {0:d}-{1:d} of {2:d}...'.format(
this_first_index, this_last_index, num_examples))
these_indices = numpy.linspace(
this_first_index, this_last_index,
num=this_last_index - this_first_index + 1, dtype=int)
this_output_array = model_object_to_use.predict(
predictor_matrix[these_indices, ...],
batch_size=num_examples_per_batch)
if output_layer_name is None:
this_output_array = this_output_array[:, -1]
if output_array is None:
output_array = this_output_array + 0.
else:
output_array = numpy.concatenate(
(output_array, this_output_array), axis=0)
return output_array
def evaluate_cnn(
cnn_model_object, image_dict, cnn_metadata_dict, output_dir_name):
"""Evaluates trained CNN (convolutional neural net).
:param cnn_model_object: Trained instance of `keras.models.Model`.
:param image_dict: Dictionary created by `read_image_file` or
`read_many_image_files`. Should contain validation or testing data (not
training data), but this is not enforced.
:param cnn_metadata_dict: Dictionary created by `train_cnn`. This will
ensure that data in `image_dict` are processed the exact same way as the
training data for `cnn_model_object`.
:param output_dir_name: Path to output directory. Figures will be saved
here.
"""
predictor_matrix, _ = normalize_images(
predictor_matrix=image_dict[PREDICTOR_MATRIX_KEY] + 0.,
predictor_names=image_dict[PREDICTOR_NAMES_KEY],
normalization_dict=cnn_metadata_dict[NORMALIZATION_DICT_KEY])
predictor_matrix = predictor_matrix.astype('float32')
target_values = binarize_target_images(
target_matrix=image_dict[TARGET_MATRIX_KEY],
binarization_threshold=cnn_metadata_dict[BINARIZATION_THRESHOLD_KEY])
forecast_probabilities = _apply_cnn(cnn_model_object=cnn_model_object,
predictor_matrix=predictor_matrix)
print(MINOR_SEPARATOR_STRING)
pofd_by_threshold, pod_by_threshold = roc_curves.plot_roc_curve(
observed_labels=target_values,
forecast_probabilities=forecast_probabilities)
area_under_roc_curve = scikit_learn_auc(pofd_by_threshold, pod_by_threshold)
title_string = 'Area under ROC curve: {0:.4f}'.format(area_under_roc_curve)
pyplot.title(title_string)
pyplot.show()
_create_directory(directory_name=output_dir_name)
roc_curve_file_name = '{0:s}/roc_curve.jpg'.format(output_dir_name)
print('Saving figure to: "{0:s}"...'.format(roc_curve_file_name))
pyplot.savefig(roc_curve_file_name, dpi=FIGURE_RESOLUTION_DPI)
pyplot.close()
performance_diagrams.plot_performance_diagram(
observed_labels=target_values,
forecast_probabilities=forecast_probabilities)
pyplot.show()
perf_diagram_file_name = '{0:s}/performance_diagram.jpg'.format(
output_dir_name)
print('Saving figure to: "{0:s}"...'.format(perf_diagram_file_name))
pyplot.savefig(perf_diagram_file_name, dpi=FIGURE_RESOLUTION_DPI)
pyplot.close()
attributes_diagrams.plot_attributes_diagram(
observed_labels=target_values,
forecast_probabilities=forecast_probabilities, num_bins=20)
pyplot.show()
attr_diagram_file_name = '{0:s}/attributes_diagram.jpg'.format(
output_dir_name)
print('Saving figure to: "{0:s}"...'.format(attr_diagram_file_name))
pyplot.savefig(attr_diagram_file_name, dpi=FIGURE_RESOLUTION_DPI)
pyplot.close()
def evaluate_cnn_example(validation_image_dict):
"""Evaluates CNN on validation data.
:param validation_image_dict: Dictionary created by `read_many_image_files`.
"""
cnn_file_name = '{0:s}/pretrained_cnn/pretrained_cnn.h5'.format(
MODULE4_DIR_NAME)
cnn_metafile_name = find_model_metafile(model_file_name=cnn_file_name)
cnn_model_object = read_keras_model(cnn_file_name)
cnn_metadata_dict = read_model_metadata(cnn_metafile_name)
validation_dir_name = '{0:s}/validation'.format(MODULE4_DIR_NAME)
evaluate_cnn(
cnn_model_object=cnn_model_object, image_dict=validation_image_dict,
cnn_metadata_dict=cnn_metadata_dict,
output_dir_name=validation_dir_name)
print(SEPARATOR_STRING)
def _get_binary_xentropy(target_values, forecast_probabilities):
"""Computes binary cross-entropy.
This function satisfies the requirements for `cost_function` in the input to
`run_permutation_test`.
E = number of examples
:param: target_values: length-E numpy array of target values (integer class
labels).
:param: forecast_probabilities: length-E numpy array with predicted
probabilities of positive class (target value = 1).
:return: cross_entropy: Cross-entropy.
"""
forecast_probabilities[
forecast_probabilities < MIN_PROBABILITY] = MIN_PROBABILITY
forecast_probabilities[
forecast_probabilities > MAX_PROBABILITY] = MAX_PROBABILITY
return -1 * numpy.nanmean(
target_values * numpy.log2(forecast_probabilities) +
(1 - target_values) * numpy.log2(1 - forecast_probabilities)
)
def permutation_test_for_cnn(
cnn_model_object, image_dict, cnn_metadata_dict,
output_pickle_file_name, cost_function=_get_binary_xentropy):
"""Runs permutation test on CNN (convolutional neural net).
E = number of examples (storm objects)
C = number of channels (predictor variables)
:param cnn_model_object: Trained instance of `keras.models.Model`.
:param image_dict: Dictionary created by `read_image_file` or
`read_many_image_files`. Should contain validation data (rather than
training data), but this is not enforced.
:param cnn_metadata_dict: Dictionary created by `train_cnn`. This will
ensure that data in `image_dict` are processed the exact same way as the
training data for `cnn_model_object`.
:param output_pickle_file_name: Path to output file. `result_dict` (the
output variable) will be saved here.
:param cost_function: Cost function (used to evaluate model predictions).
Must be negatively oriented (lower values are better). Must have the
following inputs and outputs.
Input: target_values: length-E numpy array of target values (integer class
labels).
Input: forecast_probabilities: length-E numpy array with predicted
probabilities of positive class (target value = 1).
Output: cost: Scalar value.
:return: result_dict: Dictionary with the following keys.
result_dict['permuted_predictor_name_by_step']: length-C list with name of
predictor permuted at each step.
result_dict['highest_cost_by_step']: length-C numpy array with corresponding
cost at each step. highest_cost_by_step[m] = cost after permuting
permuted_predictor_name_by_step[m].
result_dict['original_cost']: Original cost (before any permutation).
result_dict['predictor_names_step1']: length-C list of predictor names.
result_dict['costs_step1']: length-C numpy array of corresponding costs.
costs_step1[m] = cost after permuting only predictor_names_step1[m].
This key and "predictor_names_step1" correspond to the Breiman version
of the permutation test, while "permuted_predictor_name_by_step" and
"highest_cost_by_step" correspond to the Lakshmanan version.
"""
predictor_names = image_dict[PREDICTOR_NAMES_KEY]
predictor_matrix, _ = normalize_images(
predictor_matrix=image_dict[PREDICTOR_MATRIX_KEY] + 0.,
predictor_names=image_dict[PREDICTOR_NAMES_KEY],
normalization_dict=cnn_metadata_dict[NORMALIZATION_DICT_KEY])
predictor_matrix = predictor_matrix.astype('float32')
target_values = binarize_target_images(
target_matrix=image_dict[TARGET_MATRIX_KEY],
binarization_threshold=cnn_metadata_dict[BINARIZATION_THRESHOLD_KEY])
# Get original cost (before permutation).
these_probabilities = _apply_cnn(cnn_model_object=cnn_model_object,
predictor_matrix=predictor_matrix)
print(MINOR_SEPARATOR_STRING)
original_cost = cost_function(target_values, these_probabilities)
print('Original cost (no permutation): {0:.4e}\n'.format(original_cost))
num_examples = len(target_values)
remaining_predictor_names = predictor_names + []
current_step_num = 0
permuted_predictor_name_by_step = []
highest_cost_by_step = []
predictor_names_step1 = []
costs_step1 = []
while len(remaining_predictor_names) > 0:
current_step_num += 1
highest_cost = -numpy.inf
best_predictor_name = None
best_predictor_permuted_values = None
for this_predictor_name in remaining_predictor_names:
print(
('Trying predictor "{0:s}" at step {1:d} of permutation test...'
).format(this_predictor_name, current_step_num)
)
this_predictor_index = predictor_names.index(this_predictor_name)
this_predictor_matrix = predictor_matrix + 0.
for i in range(num_examples):
this_predictor_matrix[i, ..., this_predictor_index] = (
numpy.random.permutation(
this_predictor_matrix[i, ..., this_predictor_index])
)
print(MINOR_SEPARATOR_STRING)
these_probabilities = _apply_cnn(
cnn_model_object=cnn_model_object,
predictor_matrix=this_predictor_matrix)
print(MINOR_SEPARATOR_STRING)
this_cost = cost_function(target_values, these_probabilities)
print('Resulting cost = {0:.4e}'.format(this_cost))
if current_step_num == 1:
predictor_names_step1.append(this_predictor_name)
costs_step1.append(this_cost)
if this_cost < highest_cost:
continue
highest_cost = this_cost + 0.
best_predictor_name = this_predictor_name + ''
best_predictor_permuted_values = this_predictor_matrix[
..., this_predictor_index]
permuted_predictor_name_by_step.append(best_predictor_name)
highest_cost_by_step.append(highest_cost)
# Remove best predictor from list.
remaining_predictor_names.remove(best_predictor_name)
# Leave values of best predictor permuted.
this_predictor_index = predictor_names.index(best_predictor_name)
predictor_matrix[
..., this_predictor_index] = best_predictor_permuted_values
print('\nBest predictor = "{0:s}" ... new cost = {1:.4e}\n'.format(
best_predictor_name, highest_cost))
result_dict = {
PERMUTED_PREDICTORS_KEY: permuted_predictor_name_by_step,
HIGHEST_COSTS_KEY: numpy.array(highest_cost_by_step),
ORIGINAL_COST_KEY: original_cost,
STEP1_PREDICTORS_KEY: predictor_names_step1,
STEP1_COSTS_KEY: numpy.array(costs_step1)
}
_create_directory(file_name=output_pickle_file_name)
print('Writing results to: "{0:s}"...'.format(output_pickle_file_name))
file_handle = open(output_pickle_file_name, 'wb')
pickle.dump(result_dict, file_handle)
file_handle.close()
return result_dict
def permutation_test_example(cnn_model_object, validation_image_dict,
cnn_metadata_dict):
"""Runs the permutation test on validation data.
:param cnn_model_object: See doc for `permutation_test_for_cnn`.
:param validation_image_dict: Same.
:param cnn_metadata_dict: Same.
"""
permutation_dir_name = '{0:s}/permutation_test'.format(MODULE4_DIR_NAME)
main_permutation_file_name = '{0:s}/permutation_results.p'.format(
permutation_dir_name)
permutation_dict = permutation_test_for_cnn(
cnn_model_object=cnn_model_object, image_dict=validation_image_dict,
cnn_metadata_dict=cnn_metadata_dict,
output_pickle_file_name=main_permutation_file_name)
def _label_bars_in_graph(axes_object, y_coords, y_strings):
"""Labels bars in graph.
J = number of bars
:param axes_object: Instance of `matplotlib.axes._subplots.AxesSubplot`.
Will plot on these axes.
:param y_coords: length-J numpy array with y-coordinates of bars.
:param y_strings: length-J list of labels.
"""
x_min, x_max = pyplot.xlim()
x_coord_for_text = x_min + 0.01 * (x_max - x_min)
for j in range(len(y_coords)):
axes_object.text(
x_coord_for_text, y_coords[j], y_strings[j], color='k',
horizontalalignment='left', verticalalignment='center')
def plot_breiman_results(
result_dict, output_file_name, plot_percent_increase=False):
"""Plots results of Breiman (single-pass) permutation test.
:param result_dict: Dictionary created by `permutation_test_for_cnn`.
:param output_file_name: Path to output file. Figure will be saved here.
:param plot_percent_increase: Boolean flag. If True, x-axis will be
percentage of original cost (before permutation). If False, will be
actual cost.
"""
cost_values = result_dict[STEP1_COSTS_KEY]
predictor_names = result_dict[STEP1_PREDICTORS_KEY]
sort_indices = numpy.argsort(cost_values)
cost_values = cost_values[sort_indices]
predictor_names = [predictor_names[k] for k in sort_indices]
x_coords = numpy.concatenate((
numpy.array([result_dict[ORIGINAL_COST_KEY]]), cost_values
))
if plot_percent_increase:
x_coords = 100 * x_coords / x_coords[0]
y_strings = ['No permutation'] + predictor_names
y_coords = numpy.linspace(
0, len(y_strings) - 1, num=len(y_strings), dtype=float)
_, axes_object = pyplot.subplots(
1, 1, figsize=(FIGURE_WIDTH_INCHES, FIGURE_HEIGHT_INCHES)
)
axes_object.barh(
y_coords, x_coords, color=BAR_GRAPH_FACE_COLOUR,
edgecolor=BAR_GRAPH_EDGE_COLOUR, linewidth=BAR_GRAPH_EDGE_WIDTH)
pyplot.yticks([], [])
pyplot.ylabel('Predictor permuted')
if plot_percent_increase:
pyplot.xlabel('Cost (percentage of original)')
else:
pyplot.xlabel('Cost')
_label_bars_in_graph(
axes_object=axes_object, y_coords=y_coords, y_strings=y_strings)
pyplot.show()
_create_directory(file_name=output_file_name)
print('Saving figure to: "{0:s}"...'.format(output_file_name))
pyplot.savefig(output_file_name, dpi=FIGURE_RESOLUTION_DPI)
pyplot.close()
def plot_lakshmanan_results(
result_dict, output_file_name, plot_percent_increase=False):
"""Plots results of Lakshmanan (multi-pass) permutation test.
:param result_dict: See doc for `plot_breiman_results`.
:param output_file_name: Same.
:param plot_percent_increase: Same.
"""
x_coords = numpy.concatenate((
numpy.array([result_dict[ORIGINAL_COST_KEY]]),
result_dict[HIGHEST_COSTS_KEY]
))
if plot_percent_increase:
x_coords = 100 * x_coords / x_coords[0]
y_strings = ['No permutation'] + result_dict[PERMUTED_PREDICTORS_KEY]
y_coords = numpy.linspace(
0, len(y_strings) - 1, num=len(y_strings), dtype=float
)[::-1]
_, axes_object = pyplot.subplots(
1, 1, figsize=(FIGURE_WIDTH_INCHES, FIGURE_HEIGHT_INCHES)
)
axes_object.barh(
y_coords, x_coords, color=BAR_GRAPH_FACE_COLOUR,
edgecolor=BAR_GRAPH_EDGE_COLOUR, linewidth=BAR_GRAPH_EDGE_WIDTH)
pyplot.yticks([], [])
pyplot.ylabel('Predictor permuted')
if plot_percent_increase:
pyplot.xlabel('Cost (percentage of original)')
else:
pyplot.xlabel('Cost')
_label_bars_in_graph(
axes_object=axes_object, y_coords=y_coords, y_strings=y_strings)
pyplot.show()
_create_directory(file_name=output_file_name)
print('Saving figure to: "{0:s}"...'.format(output_file_name))
pyplot.savefig(output_file_name, dpi=FIGURE_RESOLUTION_DPI)
pyplot.close()
def plot_breiman_results_example(permutation_dir_name, permutation_dict):
"""Plots results of Breiman permutation test.
:param permutation_dir_name: Name of output directory.
:param permutation_dict: Dictionary created by `permutation_test_for_cnn`.
"""
breiman_file_name = '{0:s}/breiman_results.jpg'.format(permutation_dir_name)
plot_breiman_results(
result_dict=permutation_dict, output_file_name=breiman_file_name,
plot_percent_increase=False)
def plot_lakshmanan_results_example(permutation_dir_name, permutation_dict):
"""Plots results of Lakshmanan permutation test.
:param permutation_dir_name: Name of output directory.
:param permutation_dict: Dictionary created by `permutation_test_for_cnn`.
"""
lakshmanan_file_name = '{0:s}/lakshmanan_results.jpg'.format(
permutation_dir_name)
plot_lakshmanan_results(
result_dict=permutation_dict, output_file_name=lakshmanan_file_name,
plot_percent_increase=False)
def _gradient_descent_for_bwo(
cnn_model_object, loss_tensor, init_function_or_matrices,
num_iterations, learning_rate):
"""Does gradient descent (the nitty-gritty part) for backwards optimization.
:param cnn_model_object: Trained instance of `keras.models.Model`.
:param loss_tensor: Keras tensor, defining the loss function to be
minimized.
:param init_function_or_matrices: Either a function or list of numpy arrays.
If function, will be used to initialize input matrices. See
`create_gaussian_initializer` for an example.
If list of numpy arrays, these are the input matrices themselves. Matrices
should be processed in the exact same way that training data were processed
(e.g., normalization method). Matrices must also be in the same order as
training matrices, and the [q]th matrix in this list must have the same
shape as the [q]th training matrix.
:param num_iterations: Number of gradient-descent iterations (number of
times that the input matrices are adjusted).
:param learning_rate: Learning rate. At each iteration, each input value x
will be decremented by `learning_rate * gradient`, where `gradient` is
the gradient of the loss function with respect to x.
:return: list_of_optimized_input_matrices: length-T list of optimized input
matrices (numpy arrays), where T = number of input tensors to the model.
If the input arg `init_function_or_matrices` is a list of numpy arrays
(rather than a function), `list_of_optimized_input_matrices` will have
the exact same shape, just with different values.
"""
if isinstance(cnn_model_object.input, list):
list_of_input_tensors = cnn_model_object.input
else:
list_of_input_tensors = [cnn_model_object.input]
num_input_tensors = len(list_of_input_tensors)
list_of_gradient_tensors = K.gradients(loss_tensor, list_of_input_tensors)
for i in range(num_input_tensors):
list_of_gradient_tensors[i] /= K.maximum(
K.sqrt(K.mean(list_of_gradient_tensors[i] ** 2)),
K.epsilon()
)
inputs_to_loss_and_gradients = K.function(
list_of_input_tensors + [K.learning_phase()],
([loss_tensor] + list_of_gradient_tensors)
)
if isinstance(init_function_or_matrices, list):
list_of_optimized_input_matrices = copy.deepcopy(
init_function_or_matrices)
else:
list_of_optimized_input_matrices = [None] * num_input_tensors
for i in range(num_input_tensors):
these_dimensions = numpy.array(
[1] + list_of_input_tensors[i].get_shape().as_list()[1:],
dtype=int)
list_of_optimized_input_matrices[i] = init_function_or_matrices(
these_dimensions)
for j in range(num_iterations):
these_outputs = inputs_to_loss_and_gradients(
list_of_optimized_input_matrices + [0])
if numpy.mod(j, 100) == 0:
print('Loss after {0:d} of {1:d} iterations: {2:.2e}'.format(
j, num_iterations, these_outputs[0]))
for i in range(num_input_tensors):
list_of_optimized_input_matrices[i] -= (
these_outputs[i + 1] * learning_rate)
print('Loss after {0:d} iterations: {1:.2e}'.format(
num_iterations, these_outputs[0]))
return list_of_optimized_input_matrices
def bwo_for_class(
cnn_model_object, target_class, init_function_or_matrices,
num_iterations=DEFAULT_NUM_BWO_ITERATIONS,
learning_rate=DEFAULT_BWO_LEARNING_RATE):
"""Does backwards optimization to maximize probability of target class.
:param cnn_model_object: Trained instance of `keras.models.Model`.
:param target_class: Synthetic input data will be created to maximize
probability of this class.
:param init_function_or_matrices: See doc for `_gradient_descent_for_bwo`.
:param num_iterations: Same.
:param learning_rate: Same.
:return: list_of_optimized_input_matrices: Same.
"""
target_class = int(numpy.round(target_class))
num_iterations = int(numpy.round(num_iterations))
assert target_class >= 0
assert num_iterations > 0
assert learning_rate > 0.
assert learning_rate < 1.
num_output_neurons = (
cnn_model_object.layers[-1].output.get_shape().as_list()[-1]
)
if num_output_neurons == 1:
assert target_class <= 1
if target_class == 1:
loss_tensor = K.mean(
(cnn_model_object.layers[-1].output[..., 0] - 1) ** 2
)
else:
loss_tensor = K.mean(
cnn_model_object.layers[-1].output[..., 0] ** 2
)
else:
assert target_class < num_output_neurons
loss_tensor = K.mean(
(cnn_model_object.layers[-1].output[..., target_class] - 1) ** 2
)
return _gradient_descent_for_bwo(
cnn_model_object=cnn_model_object, loss_tensor=loss_tensor,
init_function_or_matrices=init_function_or_matrices,
num_iterations=num_iterations, learning_rate=learning_rate)
def bwo_example1(validation_image_dict, normalization_dict, cnn_model_object):
"""Optimizes random example (storm object) for positive class.
:param validation_image_dict: Dictionary created by `read_many_image_files`.
:param normalization_dict: Dictionary created by
`get_image_normalization_params`.
:param cnn_model_object: Trained instance of `keras.models.Model`.
"""
orig_predictor_matrix = validation_image_dict[PREDICTOR_MATRIX_KEY][0, ...]
predictor_names = validation_image_dict[PREDICTOR_NAMES_KEY]
orig_predictor_matrix_norm, _ = normalize_images(
predictor_matrix=orig_predictor_matrix + 0.,
predictor_names=predictor_names, normalization_dict=normalization_dict)
orig_predictor_matrix_norm = numpy.expand_dims(
orig_predictor_matrix_norm, axis=0)
optimized_predictor_matrix_norm = bwo_for_class(
cnn_model_object=cnn_model_object, target_class=1,
init_function_or_matrices=[orig_predictor_matrix_norm]
)[0][0, ...]
optimized_predictor_matrix = denormalize_images(
predictor_matrix=optimized_predictor_matrix_norm,
predictor_names=predictor_names, normalization_dict=normalization_dict)
temperature_index = predictor_names.index(TEMPERATURE_NAME)
combined_temp_matrix_kelvins = numpy.concatenate(
(orig_predictor_matrix[..., temperature_index],
optimized_predictor_matrix[..., temperature_index]),
axis=0)
min_colour_temp_kelvins = numpy.percentile(combined_temp_matrix_kelvins, 1)
max_colour_temp_kelvins = numpy.percentile(combined_temp_matrix_kelvins, 99)
figure_object, _ = plot_many_predictors_with_barbs(
predictor_matrix=orig_predictor_matrix,
predictor_names=predictor_names,
min_colour_temp_kelvins=min_colour_temp_kelvins,
max_colour_temp_kelvins=max_colour_temp_kelvins)
figure_object.suptitle('Real example (before optimization)')
pyplot.show()
figure_object, _ = plot_many_predictors_with_barbs(
predictor_matrix=optimized_predictor_matrix,
predictor_names=predictor_names,
min_colour_temp_kelvins=min_colour_temp_kelvins,
max_colour_temp_kelvins=max_colour_temp_kelvins)
figure_object.suptitle('Synthetic example (after optimization)')
pyplot.show()
def bwo_example2(validation_image_dict, normalization_dict, cnn_model_object):
"""Optimizes random example (storm object) for negative class.
:param validation_image_dict: Dictionary created by `read_many_image_files`.
:param normalization_dict: Dictionary created by
`get_image_normalization_params`.
:param cnn_model_object: Trained instance of `keras.models.Model`.
"""
orig_predictor_matrix = validation_image_dict[PREDICTOR_MATRIX_KEY][0, ...]
predictor_names = validation_image_dict[PREDICTOR_NAMES_KEY]
orig_predictor_matrix_norm, _ = normalize_images(
predictor_matrix=orig_predictor_matrix + 0.,
predictor_names=predictor_names, normalization_dict=normalization_dict)
orig_predictor_matrix_norm = numpy.expand_dims(
orig_predictor_matrix_norm, axis=0)
optimized_predictor_matrix_norm = bwo_for_class(
cnn_model_object=cnn_model_object, target_class=0,
init_function_or_matrices=[orig_predictor_matrix_norm]
)[0][0, ...]
optimized_predictor_matrix = denormalize_images(
predictor_matrix=optimized_predictor_matrix_norm,
predictor_names=predictor_names, normalization_dict=normalization_dict)
temperature_index = predictor_names.index(TEMPERATURE_NAME)
combined_temp_matrix_kelvins = numpy.concatenate(
(orig_predictor_matrix[..., temperature_index],
optimized_predictor_matrix[..., temperature_index]),
axis=0)
min_colour_temp_kelvins = numpy.percentile(combined_temp_matrix_kelvins, 1)
max_colour_temp_kelvins = numpy.percentile(combined_temp_matrix_kelvins, 99)
figure_object, _ = plot_many_predictors_with_barbs(
predictor_matrix=orig_predictor_matrix,
predictor_names=predictor_names,
min_colour_temp_kelvins=min_colour_temp_kelvins,
max_colour_temp_kelvins=max_colour_temp_kelvins)
figure_object.suptitle('Real example (before optimization)')
pyplot.show()
figure_object, _ = plot_many_predictors_with_barbs(
predictor_matrix=optimized_predictor_matrix,
predictor_names=predictor_names,
min_colour_temp_kelvins=min_colour_temp_kelvins,
max_colour_temp_kelvins=max_colour_temp_kelvins)
figure_object.suptitle('Synthetic example (after optimization)')
pyplot.show()
def bwo_example3(validation_image_dict, normalization_dict, cnn_model_object):
"""Optimizes extreme example (storm object) for positive class.
:param validation_image_dict: Dictionary created by `read_many_image_files`.
:param normalization_dict: Dictionary created by
`get_image_normalization_params`.
:param cnn_model_object: Trained instance of `keras.models.Model`.
"""
target_matrix_s01 = validation_image_dict[TARGET_MATRIX_KEY]
example_index = numpy.unravel_index(
numpy.argmax(target_matrix_s01), target_matrix_s01.shape
)[0]
orig_predictor_matrix = validation_image_dict[PREDICTOR_MATRIX_KEY][
example_index, ...]
predictor_names = validation_image_dict[PREDICTOR_NAMES_KEY]
orig_predictor_matrix_norm, _ = normalize_images(
predictor_matrix=orig_predictor_matrix + 0.,
predictor_names=predictor_names, normalization_dict=normalization_dict)
orig_predictor_matrix_norm = numpy.expand_dims(
orig_predictor_matrix_norm, axis=0)
optimized_predictor_matrix_norm = bwo_for_class(
cnn_model_object=cnn_model_object, target_class=1,
init_function_or_matrices=[orig_predictor_matrix_norm]
)[0][0, ...]
optimized_predictor_matrix = denormalize_images(
predictor_matrix=optimized_predictor_matrix_norm,
predictor_names=predictor_names, normalization_dict=normalization_dict)
temperature_index = predictor_names.index(TEMPERATURE_NAME)
combined_temp_matrix_kelvins = numpy.concatenate(
(orig_predictor_matrix[..., temperature_index],
optimized_predictor_matrix[..., temperature_index]),
axis=0)
min_colour_temp_kelvins = numpy.percentile(combined_temp_matrix_kelvins, 1)
max_colour_temp_kelvins = numpy.percentile(combined_temp_matrix_kelvins, 99)
figure_object, _ = plot_many_predictors_with_barbs(
predictor_matrix=orig_predictor_matrix,
predictor_names=predictor_names,
min_colour_temp_kelvins=min_colour_temp_kelvins,
max_colour_temp_kelvins=max_colour_temp_kelvins)
figure_object.suptitle('Real example (before optimization)')
pyplot.show()
figure_object, _ = plot_many_predictors_with_barbs(
predictor_matrix=optimized_predictor_matrix,
predictor_names=predictor_names,
min_colour_temp_kelvins=min_colour_temp_kelvins,
max_colour_temp_kelvins=max_colour_temp_kelvins)
figure_object.suptitle('Synthetic example (after optimization)')
pyplot.show()
def bwo_example4(validation_image_dict, normalization_dict, cnn_model_object):
"""Optimizes extreme example (storm object) for negative class.
:param validation_image_dict: Dictionary created by `read_many_image_files`.
:param normalization_dict: Dictionary created by
`get_image_normalization_params`.
:param cnn_model_object: Trained instance of `keras.models.Model`.
"""
target_matrix_s01 = validation_image_dict[TARGET_MATRIX_KEY]
example_index = numpy.unravel_index(
numpy.argmax(target_matrix_s01), target_matrix_s01.shape
)[0]
orig_predictor_matrix = validation_image_dict[PREDICTOR_MATRIX_KEY][
example_index, ...]
predictor_names = validation_image_dict[PREDICTOR_NAMES_KEY]
orig_predictor_matrix_norm, _ = normalize_images(
predictor_matrix=orig_predictor_matrix + 0.,
predictor_names=predictor_names, normalization_dict=normalization_dict)
orig_predictor_matrix_norm = numpy.expand_dims(
orig_predictor_matrix_norm, axis=0)
optimized_predictor_matrix_norm = bwo_for_class(
cnn_model_object=cnn_model_object, target_class=0,
init_function_or_matrices=[orig_predictor_matrix_norm]
)[0][0, ...]
optimized_predictor_matrix = denormalize_images(
predictor_matrix=optimized_predictor_matrix_norm,
predictor_names=predictor_names, normalization_dict=normalization_dict)
temperature_index = predictor_names.index(TEMPERATURE_NAME)
combined_temp_matrix_kelvins = numpy.concatenate(
(orig_predictor_matrix[..., temperature_index],
optimized_predictor_matrix[..., temperature_index]),
axis=0)
min_colour_temp_kelvins = numpy.percentile(combined_temp_matrix_kelvins, 1)
max_colour_temp_kelvins = numpy.percentile(combined_temp_matrix_kelvins, 99)
figure_object, _ = plot_many_predictors_with_barbs(
predictor_matrix=orig_predictor_matrix,
predictor_names=predictor_names,
min_colour_temp_kelvins=min_colour_temp_kelvins,
max_colour_temp_kelvins=max_colour_temp_kelvins)
figure_object.suptitle('Real example (before optimization)')
pyplot.show()
figure_object, _ = plot_many_predictors_with_barbs(
predictor_matrix=optimized_predictor_matrix,
predictor_names=predictor_names,
min_colour_temp_kelvins=min_colour_temp_kelvins,
max_colour_temp_kelvins=max_colour_temp_kelvins)
figure_object.suptitle('Synthetic example (after optimization)')
pyplot.show()
def _do_saliency_calculations(
cnn_model_object, loss_tensor, list_of_input_matrices):
"""Does the nitty-gritty part of computing saliency maps.
T = number of input tensors to the model
E = number of examples (storm objects)
:param cnn_model_object: Trained instance of `keras.models.Model`.
:param loss_tensor: Keras tensor defining the loss function.
:param list_of_input_matrices: length-T list of numpy arrays, comprising one
or more examples (storm objects). list_of_input_matrices[i] must have
the same dimensions as the [i]th input tensor to the model.
:return: list_of_saliency_matrices: length-T list of numpy arrays,
comprising the saliency map for each example.
list_of_saliency_matrices[i] has the same dimensions as
list_of_input_matrices[i] and defines the "saliency" of each value x,
which is the gradient of the loss function with respect to x.
"""
if isinstance(cnn_model_object.input, list):
list_of_input_tensors = cnn_model_object.input
else:
list_of_input_tensors = [cnn_model_object.input]
list_of_gradient_tensors = K.gradients(loss_tensor, list_of_input_tensors)
num_input_tensors = len(list_of_input_tensors)
for i in range(num_input_tensors):
list_of_gradient_tensors[i] /= K.maximum(
K.std(list_of_gradient_tensors[i]), K.epsilon()
)
inputs_to_gradients_function = K.function(
list_of_input_tensors + [K.learning_phase()],
list_of_gradient_tensors)
list_of_saliency_matrices = inputs_to_gradients_function(
list_of_input_matrices + [0])
for i in range(num_input_tensors):
list_of_saliency_matrices[i] *= -1
return list_of_saliency_matrices
def saliency_for_class(cnn_model_object, target_class, list_of_input_matrices):
"""For each input example, creates saliency map for prob of given class.
:param cnn_model_object: Trained instance of `keras.models.Model`.
:param target_class: Saliency maps will be created for probability of this
class.
:param list_of_input_matrices: See doc for `_do_saliency_calculations`.
:return: list_of_saliency_matrices: Same.
"""
target_class = int(numpy.round(target_class))
assert target_class >= 0
num_output_neurons = (
cnn_model_object.layers[-1].output.get_shape().as_list()[-1]
)
if num_output_neurons == 1:
assert target_class <= 1
if target_class == 1:
loss_tensor = K.mean(
(cnn_model_object.layers[-1].output[..., 0] - 1) ** 2
)
else:
loss_tensor = K.mean(
cnn_model_object.layers[-1].output[..., 0] ** 2
)
else:
assert target_class < num_output_neurons
loss_tensor = K.mean(
(cnn_model_object.layers[-1].output[..., target_class] - 1) ** 2
)
return _do_saliency_calculations(
cnn_model_object=cnn_model_object, loss_tensor=loss_tensor,
list_of_input_matrices=list_of_input_matrices)
def plot_saliency_2d(
saliency_matrix, axes_object, colour_map_object,
max_absolute_contour_level, contour_interval, line_width=2):
"""Plots saliency map over 2-D grid (for one predictor).
M = number of rows in grid
N = number of columns in grid
:param saliency_matrix: M-by-N numpy array of saliency values.
:param axes_object: Instance of `matplotlib.axes._subplots.AxesSubplot`.
Will plot on these axes.
:param colour_map_object: Colour scheme (instance of
`matplotlib.pyplot.cm`).
:param max_absolute_contour_level: Max saliency to plot. The minimum
saliency plotted will be `-1 * max_absolute_contour_level`.
:param max_absolute_contour_level: Max absolute saliency value to plot. The
min and max values, respectively, will be
`-1 * max_absolute_contour_level` and `max_absolute_contour_level`.
:param contour_interval: Saliency interval between successive contours.
:param line_width: Width of contour lines.
"""
num_grid_rows = saliency_matrix.shape[0]
num_grid_columns = saliency_matrix.shape[1]
x_coords_unique = numpy.linspace(
0, num_grid_columns, num=num_grid_columns + 1, dtype=float)
x_coords_unique = x_coords_unique[:-1]
x_coords_unique = x_coords_unique + numpy.diff(x_coords_unique[:2]) / 2
y_coords_unique = numpy.linspace(
0, num_grid_rows, num=num_grid_rows + 1, dtype=float)
y_coords_unique = y_coords_unique[:-1]
y_coords_unique = y_coords_unique + numpy.diff(y_coords_unique[:2]) / 2
x_coord_matrix, y_coord_matrix = numpy.meshgrid(x_coords_unique,
y_coords_unique)
half_num_contours = int(numpy.round(
1 + max_absolute_contour_level / contour_interval
))
# Plot positive values.
these_contour_levels = numpy.linspace(
0., max_absolute_contour_level, num=half_num_contours)
axes_object.contour(
x_coord_matrix, y_coord_matrix, saliency_matrix,
these_contour_levels, cmap=colour_map_object,
vmin=numpy.min(these_contour_levels),
vmax=numpy.max(these_contour_levels), linewidths=line_width,
linestyles='solid', zorder=1e6)
# Plot negative values.
these_contour_levels = these_contour_levels[1:]
axes_object.contour(
x_coord_matrix, y_coord_matrix, -saliency_matrix,
these_contour_levels, cmap=colour_map_object,
vmin=numpy.min(these_contour_levels),
vmax=numpy.max(these_contour_levels), linewidths=line_width,
linestyles='dashed', zorder=1e6)
def plot_many_saliency_maps(
saliency_matrix, axes_objects_2d_list, colour_map_object,
max_absolute_contour_level, contour_interval, line_width=2):
"""Plots 2-D saliency map for each predictor.
M = number of rows in grid
N = number of columns in grid
C = number of predictors
:param saliency_matrix: M-by-N-by-C numpy array of saliency values.
:param axes_objects_2d_list: See doc for `_init_figure_panels`.
:param colour_map_object: See doc for `plot_saliency_2d`.
:param max_absolute_contour_level: Same.
:param max_absolute_contour_level: Same.
:param contour_interval: Same.
:param line_width: Same.
"""
num_predictors = saliency_matrix.shape[-1]
num_panel_rows = len(axes_objects_2d_list)
num_panel_columns = len(axes_objects_2d_list[0])
for m in range(num_predictors):
this_panel_row, this_panel_column = numpy.unravel_index(
m, (num_panel_rows, num_panel_columns)
)
plot_saliency_2d(
saliency_matrix=saliency_matrix[..., m],
axes_object=axes_objects_2d_list[this_panel_row][this_panel_column],
colour_map_object=colour_map_object,
max_absolute_contour_level=max_absolute_contour_level,
contour_interval=contour_interval, line_width=line_width)
def saliency_example1(validation_image_dict, normalization_dict,
cnn_model_object):
"""Computes saliency map for random example wrt positive-class probability.
:param validation_image_dict: Dictionary created by `read_many_image_files`.
:param normalization_dict: Dictionary created by
`get_image_normalization_params`.
:param cnn_model_object: Trained instance of `keras.models.Model`.
"""
predictor_matrix = validation_image_dict[PREDICTOR_MATRIX_KEY][0, ...]
predictor_names = validation_image_dict[PREDICTOR_NAMES_KEY]
predictor_matrix_norm, _ = normalize_images(
predictor_matrix=predictor_matrix + 0.,
predictor_names=predictor_names, normalization_dict=normalization_dict)
predictor_matrix_norm = numpy.expand_dims(predictor_matrix_norm, axis=0)
saliency_matrix = saliency_for_class(
cnn_model_object=cnn_model_object, target_class=1,
list_of_input_matrices=[predictor_matrix_norm]
)[0][0, ...]
temperature_index = predictor_names.index(TEMPERATURE_NAME)
min_colour_temp_kelvins = numpy.percentile(
predictor_matrix[..., temperature_index], 1)
max_colour_temp_kelvins = numpy.percentile(
predictor_matrix[..., temperature_index], 99)
wind_indices = numpy.array([
predictor_names.index(U_WIND_NAME), predictor_names.index(V_WIND_NAME)
], dtype=int)
max_colour_wind_speed_m_s01 = numpy.percentile(
numpy.absolute(predictor_matrix[..., wind_indices]), 99)
_, axes_objects_2d_list = plot_many_predictors_sans_barbs(
predictor_matrix=predictor_matrix, predictor_names=predictor_names,
min_colour_temp_kelvins=min_colour_temp_kelvins,
max_colour_temp_kelvins=max_colour_temp_kelvins,
max_colour_wind_speed_m_s01=max_colour_wind_speed_m_s01)
max_absolute_contour_level = numpy.percentile(
numpy.absolute(saliency_matrix), 99)
contour_interval = max_absolute_contour_level / 10
plot_many_saliency_maps(
saliency_matrix=saliency_matrix,
axes_objects_2d_list=axes_objects_2d_list,
colour_map_object=SALIENCY_COLOUR_MAP_OBJECT,
max_absolute_contour_level=max_absolute_contour_level,
contour_interval=contour_interval)
pyplot.show()
def saliency_example2(validation_image_dict, normalization_dict,
cnn_model_object):
"""Computes saliency map for random example wrt negative-class probability.
:param validation_image_dict: Dictionary created by `read_many_image_files`.
:param normalization_dict: Dictionary created by
`get_image_normalization_params`.
:param cnn_model_object: Trained instance of `keras.models.Model`.
"""
predictor_matrix = validation_image_dict[PREDICTOR_MATRIX_KEY][0, ...]
predictor_names = validation_image_dict[PREDICTOR_NAMES_KEY]
predictor_matrix_norm, _ = normalize_images(
predictor_matrix=predictor_matrix + 0.,
predictor_names=predictor_names, normalization_dict=normalization_dict)
predictor_matrix_norm = numpy.expand_dims(predictor_matrix_norm, axis=0)
saliency_matrix = saliency_for_class(
cnn_model_object=cnn_model_object, target_class=0,
list_of_input_matrices=[predictor_matrix_norm]
)[0][0, ...]
temperature_index = predictor_names.index(TEMPERATURE_NAME)
min_colour_temp_kelvins = numpy.percentile(
predictor_matrix[..., temperature_index], 1)
max_colour_temp_kelvins = numpy.percentile(
predictor_matrix[..., temperature_index], 99)
wind_indices = numpy.array([
predictor_names.index(U_WIND_NAME), predictor_names.index(V_WIND_NAME)
], dtype=int)
max_colour_wind_speed_m_s01 = numpy.percentile(
numpy.absolute(predictor_matrix[..., wind_indices]), 99)
_, axes_objects_2d_list = plot_many_predictors_sans_barbs(
predictor_matrix=predictor_matrix, predictor_names=predictor_names,
min_colour_temp_kelvins=min_colour_temp_kelvins,
max_colour_temp_kelvins=max_colour_temp_kelvins,
max_colour_wind_speed_m_s01=max_colour_wind_speed_m_s01)
max_absolute_contour_level = numpy.percentile(
numpy.absolute(saliency_matrix), 99)
contour_interval = max_absolute_contour_level / 10
plot_many_saliency_maps(
saliency_matrix=saliency_matrix,
axes_objects_2d_list=axes_objects_2d_list,
colour_map_object=SALIENCY_COLOUR_MAP_OBJECT,
max_absolute_contour_level=max_absolute_contour_level,
contour_interval=contour_interval)
pyplot.show()
def saliency_example3(validation_image_dict, normalization_dict,
cnn_model_object):
"""Computes saliency map for extreme example wrt positive-class probability.
:param validation_image_dict: Dictionary created by `read_many_image_files`.
:param normalization_dict: Dictionary created by
`get_image_normalization_params`.
:param cnn_model_object: Trained instance of `keras.models.Model`.
"""
target_matrix_s01 = validation_image_dict[TARGET_MATRIX_KEY]
example_index = numpy.unravel_index(
numpy.argmax(target_matrix_s01), target_matrix_s01.shape
)[0]
predictor_matrix = validation_image_dict[PREDICTOR_MATRIX_KEY][
example_index, ...]
predictor_names = validation_image_dict[PREDICTOR_NAMES_KEY]
predictor_matrix_norm, _ = normalize_images(
predictor_matrix=predictor_matrix + 0.,
predictor_names=predictor_names, normalization_dict=normalization_dict)
predictor_matrix_norm = numpy.expand_dims(predictor_matrix_norm, axis=0)
saliency_matrix = saliency_for_class(
cnn_model_object=cnn_model_object, target_class=1,
list_of_input_matrices=[predictor_matrix_norm]
)[0][0, ...]
temperature_index = predictor_names.index(TEMPERATURE_NAME)
min_colour_temp_kelvins = numpy.percentile(
predictor_matrix[..., temperature_index], 1)
max_colour_temp_kelvins = numpy.percentile(
predictor_matrix[..., temperature_index], 99)
wind_indices = numpy.array([
predictor_names.index(U_WIND_NAME), predictor_names.index(V_WIND_NAME)
], dtype=int)
max_colour_wind_speed_m_s01 = numpy.percentile(
numpy.absolute(predictor_matrix[..., wind_indices]), 99)
_, axes_objects_2d_list = plot_many_predictors_sans_barbs(
predictor_matrix=predictor_matrix, predictor_names=predictor_names,
min_colour_temp_kelvins=min_colour_temp_kelvins,
max_colour_temp_kelvins=max_colour_temp_kelvins,
max_colour_wind_speed_m_s01=max_colour_wind_speed_m_s01)
max_absolute_contour_level = numpy.percentile(
numpy.absolute(saliency_matrix), 99)
contour_interval = max_absolute_contour_level / 10
plot_many_saliency_maps(
saliency_matrix=saliency_matrix,
axes_objects_2d_list=axes_objects_2d_list,
colour_map_object=SALIENCY_COLOUR_MAP_OBJECT,
max_absolute_contour_level=max_absolute_contour_level,
contour_interval=contour_interval)
pyplot.show()
def saliency_example4(validation_image_dict, normalization_dict,
cnn_model_object):
"""Computes saliency map for extreme example wrt negative-class probability.
:param validation_image_dict: Dictionary created by `read_many_image_files`.
:param normalization_dict: Dictionary created by
`get_image_normalization_params`.
:param cnn_model_object: Trained instance of `keras.models.Model`.
"""
target_matrix_s01 = validation_image_dict[TARGET_MATRIX_KEY]
example_index = numpy.unravel_index(
numpy.argmax(target_matrix_s01), target_matrix_s01.shape
)[0]
predictor_matrix = validation_image_dict[PREDICTOR_MATRIX_KEY][
example_index, ...]
predictor_names = validation_image_dict[PREDICTOR_NAMES_KEY]
predictor_matrix_norm, _ = normalize_images(
predictor_matrix=predictor_matrix + 0.,
predictor_names=predictor_names, normalization_dict=normalization_dict)
predictor_matrix_norm = numpy.expand_dims(predictor_matrix_norm, axis=0)
saliency_matrix = saliency_for_class(
cnn_model_object=cnn_model_object, target_class=0,
list_of_input_matrices=[predictor_matrix_norm]
)[0][0, ...]
temperature_index = predictor_names.index(TEMPERATURE_NAME)
min_colour_temp_kelvins = numpy.percentile(
predictor_matrix[..., temperature_index], 1)
max_colour_temp_kelvins = numpy.percentile(
predictor_matrix[..., temperature_index], 99)
wind_indices = numpy.array([
predictor_names.index(U_WIND_NAME), predictor_names.index(V_WIND_NAME)
], dtype=int)
max_colour_wind_speed_m_s01 = numpy.percentile(
numpy.absolute(predictor_matrix[..., wind_indices]), 99)
_, axes_objects_2d_list = plot_many_predictors_sans_barbs(
predictor_matrix=predictor_matrix, predictor_names=predictor_names,
min_colour_temp_kelvins=min_colour_temp_kelvins,
max_colour_temp_kelvins=max_colour_temp_kelvins,
max_colour_wind_speed_m_s01=max_colour_wind_speed_m_s01)
max_absolute_contour_level = numpy.percentile(
numpy.absolute(saliency_matrix), 99)
contour_interval = max_absolute_contour_level / 10
plot_many_saliency_maps(
saliency_matrix=saliency_matrix,
axes_objects_2d_list=axes_objects_2d_list,
colour_map_object=SALIENCY_COLOUR_MAP_OBJECT,
max_absolute_contour_level=max_absolute_contour_level,
contour_interval=contour_interval)
pyplot.show()
def _create_smoothing_filter(
smoothing_radius_px, num_half_filter_rows, num_half_filter_columns,
num_channels):
"""Creates convolution filter for Gaussian smoothing.
M = number of rows in filter
N = number of columns in filter
C = number of channels (or "variables" or "features") to smooth. Each
channel will be smoothed independently.
:param smoothing_radius_px: e-folding radius (pixels).
:param num_half_filter_rows: Number of rows in one half of filter. Total
number of rows will be 2 * `num_half_filter_rows` + 1.
:param num_half_filter_columns: Same but for columns.
:param num_channels: C in the above discussion.
:return: weight_matrix: M-by-N-by-C-by-C numpy array of convolution weights.
"""
num_filter_rows = 2 * num_half_filter_rows + 1
num_filter_columns = 2 * num_half_filter_columns + 1
row_offsets_unique = numpy.linspace(
-num_half_filter_rows, num_half_filter_rows, num=num_filter_rows,
dtype=float)
column_offsets_unique = numpy.linspace(
-num_half_filter_columns, num_half_filter_columns,
num=num_filter_columns, dtype=float)
column_offset_matrix, row_offset_matrix = numpy.meshgrid(
column_offsets_unique, row_offsets_unique)
pixel_offset_matrix = numpy.sqrt(
row_offset_matrix ** 2 + column_offset_matrix ** 2)
small_weight_matrix = numpy.exp(
-pixel_offset_matrix ** 2 / (2 * smoothing_radius_px ** 2)
)
small_weight_matrix = small_weight_matrix / numpy.sum(small_weight_matrix)
weight_matrix = numpy.zeros(
(num_filter_rows, num_filter_columns, num_channels, num_channels)
)
for k in range(num_channels):
weight_matrix[..., k, k] = small_weight_matrix
return weight_matrix
def setup_ucn(
num_input_features, first_num_rows, first_num_columns,
upsampling_factors, num_output_channels,
use_activation_for_out_layer=False, use_bn_for_out_layer=True,
use_transposed_conv=False, smoothing_radius_px=None):
"""Creates (but does not train) upconvnet.
L = number of conv or deconv layers
:param num_input_features: Number of input features.
:param first_num_rows: Number of rows in input to first deconv layer. The
input features will be reshaped into a grid with this many rows.
:param first_num_columns: Same but for columns.
:param upsampling_factors: length-L numpy array of upsampling factors. Must
all be positive integers.
:param num_output_channels: Number of channels in output images.
:param use_activation_for_out_layer: Boolean flag. If True, activation will
be applied to output layer.
:param use_bn_for_out_layer: Boolean flag. If True, batch normalization
will be applied to output layer.
:param use_transposed_conv: Boolean flag. If True, upsampling will be done
with transposed-convolution layers. If False, each upsampling will be
done with an upsampling layer followed by a conv layer.
:param smoothing_radius_px: Smoothing radius (pixels). Gaussian smoothing
with this e-folding radius will be done after each upsampling. If
`smoothing_radius_px is None`, no smoothing will be done.
:return: ucn_model_object: Untrained instance of `keras.models.Model`.
"""
if smoothing_radius_px is not None:
num_half_smoothing_rows = int(numpy.round(
(NUM_SMOOTHING_FILTER_ROWS - 1) / 2
))
num_half_smoothing_columns = int(numpy.round(
(NUM_SMOOTHING_FILTER_COLUMNS - 1) / 2
))
regularizer_object = keras.regularizers.l1_l2(l1=L1_WEIGHT, l2=L2_WEIGHT)
input_layer_object = keras.layers.Input(shape=(num_input_features,))
current_num_filters = int(numpy.round(
num_input_features / (first_num_rows * first_num_columns)
))
layer_object = keras.layers.Reshape(
target_shape=(first_num_rows, first_num_columns, current_num_filters)
)(input_layer_object)
num_main_layers = len(upsampling_factors)
for i in range(num_main_layers):
this_upsampling_factor = upsampling_factors[i]
if i == num_main_layers - 1:
current_num_filters = num_output_channels + 0
elif this_upsampling_factor == 1:
current_num_filters = int(numpy.round(current_num_filters / 2))
if use_transposed_conv:
if this_upsampling_factor > 1:
this_padding_arg = 'same'
else:
this_padding_arg = 'valid'
layer_object = keras.layers.Conv2DTranspose(
filters=current_num_filters,
kernel_size=(NUM_CONV_FILTER_ROWS, NUM_CONV_FILTER_COLUMNS),
strides=(this_upsampling_factor, this_upsampling_factor),
padding=this_padding_arg, data_format='channels_last',
dilation_rate=(1, 1), activation=None, use_bias=True,
kernel_initializer='glorot_uniform', bias_initializer='zeros',
kernel_regularizer=regularizer_object
)(layer_object)
else:
if this_upsampling_factor > 1:
try:
layer_object = keras.layers.UpSampling2D(
size=(this_upsampling_factor, this_upsampling_factor),
data_format='channels_last', interpolation='nearest'
)(layer_object)
except:
layer_object = keras.layers.UpSampling2D(
size=(this_upsampling_factor, this_upsampling_factor),
data_format='channels_last'
)(layer_object)
layer_object = keras.layers.Conv2D(
filters=current_num_filters,
kernel_size=(NUM_CONV_FILTER_ROWS, NUM_CONV_FILTER_COLUMNS),
strides=(1, 1), padding='same', data_format='channels_last',
dilation_rate=(1, 1), activation=None, use_bias=True,
kernel_initializer='glorot_uniform', bias_initializer='zeros',
kernel_regularizer=regularizer_object
)(layer_object)
if this_upsampling_factor == 1:
layer_object = keras.layers.ZeroPadding2D(
padding=(1, 1), data_format='channels_last'
)(layer_object)
if smoothing_radius_px is not None:
this_weight_matrix = _create_smoothing_filter(
smoothing_radius_px=smoothing_radius_px,
num_half_filter_rows=num_half_smoothing_rows,
num_half_filter_columns=num_half_smoothing_columns,
num_channels=current_num_filters)
this_bias_vector = numpy.zeros(current_num_filters)
layer_object = keras.layers.Conv2D(
filters=current_num_filters,
kernel_size=(NUM_SMOOTHING_FILTER_ROWS,
NUM_SMOOTHING_FILTER_COLUMNS),
strides=(1, 1), padding='same', data_format='channels_last',
dilation_rate=(1, 1), activation=None, use_bias=True,
kernel_initializer='glorot_uniform', bias_initializer='zeros',
kernel_regularizer=regularizer_object, trainable=False,
weights=[this_weight_matrix, this_bias_vector]
)(layer_object)
if i < num_main_layers - 1 or use_activation_for_out_layer:
layer_object = keras.layers.LeakyReLU(
alpha=SLOPE_FOR_RELU
)(layer_object)
if i < num_main_layers - 1 or use_bn_for_out_layer:
layer_object = keras.layers.BatchNormalization(
axis=-1, center=True, scale=True
)(layer_object)
ucn_model_object = keras.models.Model(
inputs=input_layer_object, outputs=layer_object)
ucn_model_object.compile(
loss=keras.losses.mean_squared_error, optimizer=keras.optimizers.Adam())
ucn_model_object.summary()
return ucn_model_object
def get_cnn_flatten_layer(cnn_model_object):
"""Finds flattening layer in CNN.
This method assumes that there is only one flattening layer. If there are
several, this method will return the first (shallowest).
:param cnn_model_object: Instance of `keras.models.Model`.
:return: layer_name: Name of flattening layer.
:raises: TypeError: if flattening layer cannot be found.
"""
layer_names = [lyr.name for lyr in cnn_model_object.layers]
flattening_flags = numpy.array(
['flatten' in n for n in layer_names], dtype=bool)
flattening_indices = numpy.where(flattening_flags)[0]
if len(flattening_indices) == 0:
error_string = (
'Cannot find flattening layer in model. Layer names are listed '
'below.\n{0:s}'
).format(str(layer_names))
raise TypeError(error_string)
return layer_names[flattening_indices[0]]
def setup_ucn_example(cnn_model_object):
"""Example of UCN architecture (with transposed conv, no smoothing).
:param cnn_model_object: Trained CNN (instance of `keras.models.Model`).
"""
cnn_feature_layer_name = get_cnn_flatten_layer(cnn_model_object)
cnn_feature_layer_object = cnn_model_object.get_layer(
name=cnn_feature_layer_name)
cnn_feature_dimensions = numpy.array(
cnn_feature_layer_object.input.shape[1:], dtype=int)
num_input_features = numpy.prod(cnn_feature_dimensions)
first_num_rows = cnn_feature_dimensions[0]
first_num_columns = cnn_feature_dimensions[1]
num_output_channels = numpy.array(
cnn_model_object.input.shape[1:], dtype=int
)[-1]
upsampling_factors = numpy.array([2, 1, 1, 2, 1, 1], dtype=int)
ucn_model_object = setup_ucn(
num_input_features=num_input_features, first_num_rows=first_num_rows,
first_num_columns=first_num_columns,
upsampling_factors=upsampling_factors,
num_output_channels=num_output_channels,
use_transposed_conv=True, smoothing_radius_px=None)
def ucn_generator(netcdf_file_names, num_examples_per_batch, normalization_dict,
cnn_model_object, cnn_feature_layer_name):
"""Generates training examples for UCN (upconvolutional network) on the fly.
E = number of examples (storm objects)
M = number of rows in each storm-centered grid
N = number of columns in each storm-centered grid
C = number of channels (predictor variables)
Z = number of scalar features (neurons in layer `cnn_feature_layer_name` of
the CNN specified by `cnn_model_object`)
:param netcdf_file_names: 1-D list of paths to input (NetCDF) files.
:param num_examples_per_batch: Number of examples per training batch.
:param normalization_dict: See doc for `normalize_images`. You cannot leave
this as None.
:param cnn_model_object: Trained CNN model (instance of
`keras.models.Model`). This will be used to turn images stored in
`netcdf_file_names` into scalar features.
:param cnn_feature_layer_name: The "scalar features" will be the set of
activations from this layer.
:return: feature_matrix: E-by-Z numpy array of scalar features. These are
the "predictors" for the upconv network.
:return: target_matrix: E-by-M-by-N-by-C numpy array of target images.
These are the predictors for the CNN and the targets for the upconv
network.
:raises: TypeError: if `normalization_dict is None`.
"""
if normalization_dict is None:
error_string = 'normalization_dict cannot be None. Must be specified.'
raise TypeError(error_string)
random.shuffle(netcdf_file_names)
num_files = len(netcdf_file_names)
file_index = 0
num_examples_in_memory = 0
full_target_matrix = None
predictor_names = None
while True:
while num_examples_in_memory < num_examples_per_batch:
print('Reading data from: "{0:s}"...'.format(
netcdf_file_names[file_index]))
this_image_dict = read_image_file(netcdf_file_names[file_index])
predictor_names = this_image_dict[PREDICTOR_NAMES_KEY]
file_index += 1
if file_index >= num_files:
file_index = 0
if full_target_matrix is None or full_target_matrix.size == 0:
full_target_matrix = this_image_dict[PREDICTOR_MATRIX_KEY] + 0.
else:
full_target_matrix = numpy.concatenate(
(full_target_matrix, this_image_dict[PREDICTOR_MATRIX_KEY]),
axis=0)
num_examples_in_memory = full_target_matrix.shape[0]
batch_indices = numpy.linspace(
0, num_examples_in_memory - 1, num=num_examples_in_memory,
dtype=int)
batch_indices = numpy.random.choice(
batch_indices, size=num_examples_per_batch, replace=False)
target_matrix, _ = normalize_images(
predictor_matrix=full_target_matrix[batch_indices, ...],
predictor_names=predictor_names,
normalization_dict=normalization_dict)
target_matrix = target_matrix.astype('float32')
feature_matrix = _apply_cnn(
cnn_model_object=cnn_model_object, predictor_matrix=target_matrix,
verbose=False, output_layer_name=cnn_feature_layer_name)
num_examples_in_memory = 0
full_target_matrix = None
yield (feature_matrix, target_matrix)
def train_ucn(
ucn_model_object, training_file_names, normalization_dict,
cnn_model_object, cnn_file_name, cnn_feature_layer_name,
num_examples_per_batch, num_epochs, num_training_batches_per_epoch,
output_model_file_name, validation_file_names=None,
num_validation_batches_per_epoch=None):
"""Trains UCN (upconvolutional network).
:param ucn_model_object: Untrained instance of `keras.models.Model` (may be
created by `setup_ucn`), representing the upconv network.
:param training_file_names: 1-D list of paths to training files (must be
readable by `read_image_file`).
:param normalization_dict: See doc for `ucn_generator`.
:param cnn_model_object: Same.
:param cnn_file_name: Path to file with trained CNN (represented by
`cnn_model_object`). This is needed only for the output dictionary
(metadata).
:param cnn_feature_layer_name: Same.
:param num_examples_per_batch: Same.
:param num_epochs: Number of epochs.
:param num_training_batches_per_epoch: Number of training batches furnished
to model in each epoch.
:param output_model_file_name: Path to output file. The model will be saved
as an HDF5 file (extension should be ".h5", but this is not enforced).
:param validation_file_names: 1-D list of paths to training files (must be
readable by `read_image_file`). If `validation_file_names is None`,
will omit on-the-fly validation.
:param num_validation_batches_per_epoch:
[used only if `validation_file_names is not None`]
Number of validation batches furnished to model in each epoch.
:return: ucn_metadata_dict: Dictionary with the following keys.
ucn_metadata_dict['training_file_names']: See input doc.
ucn_metadata_dict['normalization_dict']: Same.
ucn_metadata_dict['cnn_file_name']: Same.
ucn_metadata_dict['cnn_feature_layer_name']: Same.
ucn_metadata_dict['num_examples_per_batch']: Same.
ucn_metadata_dict['num_training_batches_per_epoch']: Same.
ucn_metadata_dict['validation_file_names']: Same.
ucn_metadata_dict['num_validation_batches_per_epoch']: Same.
"""
_create_directory(file_name=output_model_file_name)
if validation_file_names is None:
checkpoint_object = keras.callbacks.ModelCheckpoint(
filepath=output_model_file_name, monitor='loss', verbose=1,
save_best_only=False, save_weights_only=False, mode='min',
period=1)
else:
checkpoint_object = keras.callbacks.ModelCheckpoint(
filepath=output_model_file_name, monitor='val_loss', verbose=1,
save_best_only=True, save_weights_only=False, mode='min',
period=1)
list_of_callback_objects = [checkpoint_object]
ucn_metadata_dict = {
TRAINING_FILES_KEY: training_file_names,
NORMALIZATION_DICT_KEY: normalization_dict,
CNN_FILE_KEY: cnn_file_name,
CNN_FEATURE_LAYER_KEY: cnn_feature_layer_name,
NUM_EXAMPLES_PER_BATCH_KEY: num_examples_per_batch,
NUM_TRAINING_BATCHES_KEY: num_training_batches_per_epoch,
VALIDATION_FILES_KEY: validation_file_names,
NUM_VALIDATION_BATCHES_KEY: num_validation_batches_per_epoch
}
training_generator = ucn_generator(
netcdf_file_names=training_file_names,
num_examples_per_batch=num_examples_per_batch,
normalization_dict=normalization_dict,
cnn_model_object=cnn_model_object,
cnn_feature_layer_name=cnn_feature_layer_name)
if validation_file_names is None:
ucn_model_object.fit_generator(
generator=training_generator,
steps_per_epoch=num_training_batches_per_epoch, epochs=num_epochs,
verbose=1, callbacks=list_of_callback_objects, workers=0)
return ucn_metadata_dict
early_stopping_object = keras.callbacks.EarlyStopping(
monitor='val_loss', min_delta=MIN_MSE_DECREASE_FOR_EARLY_STOP,
patience=NUM_EPOCHS_FOR_EARLY_STOPPING, verbose=1, mode='min')
list_of_callback_objects.append(early_stopping_object)
validation_generator = ucn_generator(
netcdf_file_names=validation_file_names,
num_examples_per_batch=num_examples_per_batch,
normalization_dict=normalization_dict,
cnn_model_object=cnn_model_object,
cnn_feature_layer_name=cnn_feature_layer_name)
ucn_model_object.fit_generator(
generator=training_generator,
steps_per_epoch=num_training_batches_per_epoch, epochs=num_epochs,
verbose=1, callbacks=list_of_callback_objects, workers=0,
validation_data=validation_generator,
validation_steps=num_validation_batches_per_epoch)
return ucn_metadata_dict
def train_ucn_example(ucn_model_object, training_file_names, normalization_dict,
cnn_model_object, cnn_file_name):
"""Actually trains the UCN (upconvolutional network).
:param ucn_model_object: See doc for `train_ucn`.
:param training_file_names: Same.
:param normalization_dict: Same.
:param cnn_model_object: See doc for `cnn_model_object` in `train_ucn`.
:param cnn_file_name: See doc for `train_ucn`.
"""
validation_file_names = find_many_image_files(
first_date_string='20150101', last_date_string='20151231')
ucn_file_name = '{0:s}/ucn_model.h5'.format(MODULE4_DIR_NAME)
ucn_metadata_dict = train_ucn(
ucn_model_object=ucn_model_object,
training_file_names=training_file_names,
normalization_dict=normalization_dict,
cnn_model_object=cnn_model_object, cnn_file_name=cnn_file_name,
cnn_feature_layer_name=get_cnn_flatten_layer(cnn_model_object),
num_examples_per_batch=100, num_epochs=10,
num_training_batches_per_epoch=10, output_model_file_name=ucn_file_name,
validation_file_names=validation_file_names,
num_validation_batches_per_epoch=10)
def apply_ucn_example1(
validation_image_dict, normalization_dict, cnn_model_object):
"""Uses upconvnet to reconstruct random validation example.
:param validation_image_dict: Dictionary created by `read_many_image_files`.
:param normalization_dict: Dictionary created by
`get_image_normalization_params`.
:param cnn_model_object: Trained instance of `keras.models.Model`,
representing the CNN that goes with the upconvnet.
"""
ucn_file_name = '{0:s}/pretrained_cnn/pretrained_ucn.h5'.format(
MODULE4_DIR_NAME)
ucn_metafile_name = find_model_metafile(model_file_name=ucn_file_name)
ucn_model_object = read_keras_model(ucn_file_name)
ucn_metadata_dict = read_model_metadata(ucn_metafile_name)
image_matrix = validation_image_dict[PREDICTOR_MATRIX_KEY][0, ...]
predictor_names = validation_image_dict[PREDICTOR_NAMES_KEY]
image_matrix_norm, _ = normalize_images(
predictor_matrix=image_matrix + 0.,
predictor_names=predictor_names, normalization_dict=normalization_dict)
image_matrix_norm = numpy.expand_dims(image_matrix_norm, axis=0)
feature_matrix = _apply_cnn(
cnn_model_object=cnn_model_object, predictor_matrix=image_matrix_norm,
output_layer_name=get_cnn_flatten_layer(cnn_model_object),
verbose=False)
reconstructed_image_matrix_norm = ucn_model_object.predict(
feature_matrix, batch_size=1)
reconstructed_image_matrix = denormalize_images(
predictor_matrix=reconstructed_image_matrix_norm,
predictor_names=predictor_names, normalization_dict=normalization_dict
)[0, ...]
temperature_index = predictor_names.index(TEMPERATURE_NAME)
combined_temp_matrix_kelvins = numpy.concatenate(
(image_matrix[..., temperature_index],
reconstructed_image_matrix[..., temperature_index]),
axis=0)
min_colour_temp_kelvins = numpy.percentile(combined_temp_matrix_kelvins, 1)
max_colour_temp_kelvins = numpy.percentile(combined_temp_matrix_kelvins, 99)
figure_object, _ = plot_many_predictors_with_barbs(
predictor_matrix=image_matrix,
predictor_names=predictor_names,
min_colour_temp_kelvins=min_colour_temp_kelvins,
max_colour_temp_kelvins=max_colour_temp_kelvins)
figure_object.suptitle('Original image (CNN input)')
pyplot.show()
figure_object, _ = plot_many_predictors_with_barbs(
predictor_matrix=reconstructed_image_matrix,
predictor_names=predictor_names,
min_colour_temp_kelvins=min_colour_temp_kelvins,
max_colour_temp_kelvins=max_colour_temp_kelvins)
figure_object.suptitle('Reconstructed image (upconvnet output)')
pyplot.show()
def apply_ucn_example2(
validation_image_dict, normalization_dict, ucn_model_object,
cnn_model_object):
"""Uses upconvnet to reconstruct extreme validation example.
:param validation_image_dict: Dictionary created by `read_many_image_files`.
:param normalization_dict: Dictionary created by
`get_image_normalization_params`.
:param ucn_model_object: Trained instance of `keras.models.Model`,
representing the upconvnet.
:param cnn_model_object: Trained instance of `keras.models.Model`,
representing the CNN that goes with the upconvnet.
"""
target_matrix_s01 = validation_image_dict[TARGET_MATRIX_KEY]
example_index = numpy.unravel_index(
numpy.argmax(target_matrix_s01), target_matrix_s01.shape
)[0]
image_matrix = validation_image_dict[PREDICTOR_MATRIX_KEY][
example_index, ...]
predictor_names = validation_image_dict[PREDICTOR_NAMES_KEY]
image_matrix_norm, _ = normalize_images(
predictor_matrix=image_matrix + 0.,
predictor_names=predictor_names, normalization_dict=normalization_dict)
image_matrix_norm = numpy.expand_dims(image_matrix_norm, axis=0)
feature_matrix = _apply_cnn(
cnn_model_object=cnn_model_object, predictor_matrix=image_matrix_norm,
output_layer_name=get_cnn_flatten_layer(cnn_model_object),
verbose=False)
reconstructed_image_matrix_norm = ucn_model_object.predict(
feature_matrix, batch_size=1)
reconstructed_image_matrix = denormalize_images(
predictor_matrix=reconstructed_image_matrix_norm,
predictor_names=predictor_names, normalization_dict=normalization_dict
)[0, ...]
temperature_index = predictor_names.index(TEMPERATURE_NAME)
combined_temp_matrix_kelvins = numpy.concatenate(
(image_matrix[..., temperature_index],
reconstructed_image_matrix[..., temperature_index]),
axis=0)
min_colour_temp_kelvins = numpy.percentile(combined_temp_matrix_kelvins, 1)
max_colour_temp_kelvins = numpy.percentile(combined_temp_matrix_kelvins, 99)
figure_object, _ = plot_many_predictors_with_barbs(
predictor_matrix=image_matrix,
predictor_names=predictor_names,
min_colour_temp_kelvins=min_colour_temp_kelvins,
max_colour_temp_kelvins=max_colour_temp_kelvins)
figure_object.suptitle('Original image (CNN input)')
pyplot.show()
figure_object, _ = plot_many_predictors_with_barbs(
predictor_matrix=reconstructed_image_matrix,
predictor_names=predictor_names,
min_colour_temp_kelvins=min_colour_temp_kelvins,
max_colour_temp_kelvins=max_colour_temp_kelvins)
figure_object.suptitle('Reconstructed image (upconvnet output)')
pyplot.show()
def _normalize_features(feature_matrix, feature_means=None,
feature_standard_deviations=None):
"""Normalizes scalar features to z-scores.
E = number of examples (storm objects)
Z = number of features
:param feature_matrix: E-by-Z numpy array of features.
:param feature_means: length-Z numpy array of mean values. If
`feature_means is None`, these will be computed on the fly from
`feature_matrix`.
:param feature_standard_deviations: Same but with standard deviations.
:return: feature_matrix: Normalized version of input.
:return: feature_means: See input doc.
:return: feature_standard_deviations: See input doc.
"""
if feature_means is None or feature_standard_deviations is None:
feature_means = numpy.mean(feature_matrix, axis=0)
feature_standard_deviations = numpy.std(feature_matrix, axis=0, ddof=1)
num_examples = feature_matrix.shape[0]
num_features = feature_matrix.shape[1]
mean_matrix = numpy.reshape(feature_means, (1, num_features))
mean_matrix = numpy.repeat(mean_matrix, repeats=num_examples, axis=0)
stdev_matrix = numpy.reshape(feature_standard_deviations, (1, num_features))
stdev_matrix = numpy.repeat(stdev_matrix, repeats=num_examples, axis=0)
feature_matrix = (feature_matrix - mean_matrix) / stdev_matrix
return feature_matrix, feature_means, feature_standard_deviations
def _fit_svd(baseline_feature_matrix, test_feature_matrix,
percent_variance_to_keep):
"""Fits SVD (singular-value decomposition) model.
B = number of baseline examples (storm objects)
T = number of testing examples (storm objects)
Z = number of scalar features (produced by dense layer of a CNN)
K = number of modes (top eigenvectors) retained
The SVD model will be fit only to the baseline set, but both the baseline
and testing sets will be used to compute normalization parameters (means and
standard deviations). Before, when only the baseline set was used to
compute normalization params, the testing set had huge standard deviations,
which caused the results of novelty detection to be physically unrealistic.
:param baseline_feature_matrix: B-by-Z numpy array of features.
:param test_feature_matrix: T-by-Z numpy array of features.
:param percent_variance_to_keep: Percentage of variance to keep. Determines
how many eigenvectors (K in the above discussion) will be used in the
SVD model.
:return: svd_dictionary: Dictionary with the following keys.
svd_dictionary['eof_matrix']: Z-by-K numpy array, where each column is an
EOF (empirical orthogonal function).
svd_dictionary['feature_means']: length-Z numpy array with mean value of
each feature (before transformation).
svd_dictionary['feature_standard_deviations']: length-Z numpy array with
standard deviation of each feature (before transformation).
"""
combined_feature_matrix = numpy.concatenate(
(baseline_feature_matrix, test_feature_matrix), axis=0)
combined_feature_matrix, feature_means, feature_standard_deviations = (
_normalize_features(feature_matrix=combined_feature_matrix)
)
num_baseline_examples = baseline_feature_matrix.shape[0]
baseline_feature_matrix = combined_feature_matrix[
:num_baseline_examples, ...]
eigenvalues, eof_matrix = numpy.linalg.svd(baseline_feature_matrix)[1:]
eigenvalues = eigenvalues ** 2
explained_variances = eigenvalues / numpy.sum(eigenvalues)
cumulative_explained_variances = numpy.cumsum(explained_variances)
fraction_of_variance_to_keep = 0.01 * percent_variance_to_keep
num_modes_to_keep = 1 + numpy.where(
cumulative_explained_variances >= fraction_of_variance_to_keep
)[0][0]
print(
('Number of modes required to explain {0:f}% of variance: {1:d}'
).format(percent_variance_to_keep, num_modes_to_keep)
)
return {
EOF_MATRIX_KEY: numpy.transpose(eof_matrix)[..., :num_modes_to_keep],
FEATURE_MEANS_KEY: feature_means,
FEATURE_STDEVS_KEY: feature_standard_deviations
}
def _apply_svd(feature_vector, svd_dictionary):
"""Applies SVD (singular-value decomposition) model to new example.
Z = number of features
:param feature_vector: length-Z numpy array with feature values for one
example (storm object).
:param svd_dictionary: Dictionary created by `_fit_svd`.
:return: reconstructed_feature_vector: Reconstructed version of input.
"""
this_matrix = numpy.dot(
svd_dictionary[EOF_MATRIX_KEY],
numpy.transpose(svd_dictionary[EOF_MATRIX_KEY])
)
feature_vector_norm = (
(feature_vector - svd_dictionary[FEATURE_MEANS_KEY]) /
svd_dictionary[FEATURE_STDEVS_KEY]
)
reconstructed_feature_vector_norm = numpy.dot(
this_matrix, feature_vector_norm)
return (
svd_dictionary[FEATURE_MEANS_KEY] +
reconstructed_feature_vector_norm * svd_dictionary[FEATURE_STDEVS_KEY]
)
def do_novelty_detection(
baseline_image_matrix, test_image_matrix, image_normalization_dict,
predictor_names, cnn_model_object, cnn_feature_layer_name,
ucn_model_object, num_novel_test_images,
percent_svd_variance_to_keep=97.5):
"""Does novelty detection.
Specifically, this method follows the procedure in Wagstaff et al. (2018)
to determine which images in the test set are most novel with respect to the
baseline set.
NOTE: Both input and output images are (assumed to be) denormalized.
B = number of baseline examples (storm objects)
T = number of test examples (storm objects)
M = number of rows in each storm-centered grid
N = number of columns in each storm-centered grid
C = number of channels (predictor variables)
:param baseline_image_matrix: B-by-M-by-N-by-C numpy array of baseline
images.
:param test_image_matrix: T-by-M-by-N-by-C numpy array of test images.
:param image_normalization_dict: See doc for `normalize_images`.
:param predictor_names: length-C list of predictor names.
:param cnn_model_object: Trained CNN model (instance of
`keras.models.Model`). Will be used to turn images into scalar
features.
:param cnn_feature_layer_name: The "scalar features" will be the set of
activations from this layer.
:param ucn_model_object: Trained UCN model (instance of
`keras.models.Model`). Will be used to turn scalar features into
images.
:param num_novel_test_images: Number of novel test images to find.
:param percent_svd_variance_to_keep: See doc for `_fit_svd`.
:return: novelty_dict: Dictionary with the following keys. In the following
discussion, Q = number of novel test images found.
novelty_dict['novel_image_matrix_actual']: Q-by-M-by-N-by-C numpy array of
novel test images.
novelty_dict['novel_image_matrix_upconv']: Same as
"novel_image_matrix_actual" but reconstructed by the upconvnet.
novelty_dict['novel_image_matrix_upconv_svd']: Same as
"novel_image_matrix_actual" but reconstructed by SVD (singular-value
decomposition) and the upconvnet.
:raises: TypeError: if `image_normalization_dict is None`.
"""
if image_normalization_dict is None:
error_string = (
'image_normalization_dict cannot be None. Must be specified.')
raise TypeError(error_string)
num_test_examples = test_image_matrix.shape[0]
baseline_image_matrix_norm, _ = normalize_images(
predictor_matrix=baseline_image_matrix + 0.,
predictor_names=predictor_names,
normalization_dict=image_normalization_dict)
test_image_matrix_norm, _ = normalize_images(
predictor_matrix=test_image_matrix + 0.,
predictor_names=predictor_names,
normalization_dict=image_normalization_dict)
baseline_feature_matrix = _apply_cnn(
cnn_model_object=cnn_model_object,
predictor_matrix=baseline_image_matrix_norm, verbose=False,
output_layer_name=cnn_feature_layer_name)
test_feature_matrix = _apply_cnn(
cnn_model_object=cnn_model_object,
predictor_matrix=test_image_matrix_norm, verbose=False,
output_layer_name=cnn_feature_layer_name)
novel_indices = []
novel_image_matrix_upconv = None
novel_image_matrix_upconv_svd = None
for k in range(num_novel_test_images):
print('Finding {0:d}th of {1:d} novel test images...'.format(
k + 1, num_novel_test_images))
if len(novel_indices) == 0:
this_baseline_feature_matrix = baseline_feature_matrix + 0.
this_test_feature_matrix = test_feature_matrix + 0.
else:
novel_indices_numpy = numpy.array(novel_indices, dtype=int)
this_baseline_feature_matrix = numpy.concatenate(
(baseline_feature_matrix,
test_feature_matrix[novel_indices_numpy, ...]),
axis=0)
this_test_feature_matrix = numpy.delete(
test_feature_matrix, obj=novel_indices_numpy, axis=0)
svd_dictionary = _fit_svd(
baseline_feature_matrix=this_baseline_feature_matrix,
test_feature_matrix=this_test_feature_matrix,
percent_variance_to_keep=percent_svd_variance_to_keep)
svd_errors = numpy.full(num_test_examples, numpy.nan)
test_feature_matrix_svd = numpy.full(
test_feature_matrix.shape, numpy.nan)
for i in range(num_test_examples):
print(i)
if i in novel_indices:
continue
test_feature_matrix_svd[i, ...] = _apply_svd(
feature_vector=test_feature_matrix[i, ...],
svd_dictionary=svd_dictionary)
svd_errors[i] = numpy.linalg.norm(
test_feature_matrix_svd[i, ...] - test_feature_matrix[i, ...]
)
new_novel_index = numpy.nanargmax(svd_errors)
novel_indices.append(new_novel_index)
new_image_matrix_upconv = ucn_model_object.predict(
test_feature_matrix[[new_novel_index], ...], batch_size=1)
new_image_matrix_upconv_svd = ucn_model_object.predict(
test_feature_matrix_svd[[new_novel_index], ...], batch_size=1)
if novel_image_matrix_upconv is None:
novel_image_matrix_upconv = new_image_matrix_upconv + 0.
novel_image_matrix_upconv_svd = new_image_matrix_upconv_svd + 0.
else:
novel_image_matrix_upconv = numpy.concatenate(
(novel_image_matrix_upconv, new_image_matrix_upconv), axis=0)
novel_image_matrix_upconv_svd = numpy.concatenate(
(novel_image_matrix_upconv_svd, new_image_matrix_upconv_svd),
axis=0)
novel_indices = numpy.array(novel_indices, dtype=int)
novel_image_matrix_upconv = denormalize_images(
predictor_matrix=novel_image_matrix_upconv,
predictor_names=predictor_names,
normalization_dict=image_normalization_dict)
novel_image_matrix_upconv_svd = denormalize_images(
predictor_matrix=novel_image_matrix_upconv_svd,
predictor_names=predictor_names,
normalization_dict=image_normalization_dict)
return {
NOVEL_IMAGES_ACTUAL_KEY: test_image_matrix[novel_indices, ...],
NOVEL_IMAGES_UPCONV_KEY: novel_image_matrix_upconv,
NOVEL_IMAGES_UPCONV_SVD_KEY: novel_image_matrix_upconv_svd
}
def _plot_novelty_for_many_predictors(
novelty_matrix, predictor_names, max_absolute_temp_kelvins,
max_absolute_refl_dbz):
"""Plots novelty for each predictor on 2-D grid with wind barbs overlain.
M = number of rows in grid
N = number of columns in grid
C = number of predictors
:param novelty_matrix: M-by-N-by-C numpy array of novelty values.
:param predictor_names: length-C list of predictor names.
:param max_absolute_temp_kelvins: Max absolute temperature in colour scheme.
Minimum temperature in colour scheme will be
-1 * `max_absolute_temp_kelvins`, and this will be a diverging scheme
centered at zero.
:param max_absolute_refl_dbz: Same but for reflectivity.
:return: figure_object: See doc for `_init_figure_panels`.
:return: axes_objects_2d_list: Same.
"""
u_wind_matrix_m_s01 = novelty_matrix[
..., predictor_names.index(U_WIND_NAME)]
v_wind_matrix_m_s01 = novelty_matrix[
..., predictor_names.index(V_WIND_NAME)]
non_wind_predictor_names = [
p for p in predictor_names if p not in [U_WIND_NAME, V_WIND_NAME]
]
figure_object, axes_objects_2d_list = _init_figure_panels(
num_rows=len(non_wind_predictor_names), num_columns=1)
for m in range(len(non_wind_predictor_names)):
this_predictor_index = predictor_names.index(
non_wind_predictor_names[m])
if non_wind_predictor_names[m] == REFLECTIVITY_NAME:
this_min_colour_value = -1 * max_absolute_refl_dbz
this_max_colour_value = max_absolute_refl_dbz + 0.
this_colour_map_object = pyplot.cm.PuOr
else:
this_min_colour_value = -1 * max_absolute_temp_kelvins
this_max_colour_value = max_absolute_temp_kelvins + 0.
this_colour_map_object = pyplot.cm.bwr
this_colour_bar_object = plot_predictor_2d(
predictor_matrix=novelty_matrix[..., this_predictor_index],
colour_map_object=this_colour_map_object, colour_norm_object=None,
min_colour_value=this_min_colour_value,
max_colour_value=this_max_colour_value,
axes_object=axes_objects_2d_list[m][0])
plot_wind_2d(u_wind_matrix_m_s01=u_wind_matrix_m_s01,
v_wind_matrix_m_s01=v_wind_matrix_m_s01,
axes_object=axes_objects_2d_list[m][0])
this_colour_bar_object.set_label(non_wind_predictor_names[m])
return figure_object, axes_objects_2d_list
def plot_novelty_detection(image_dict, novelty_dict, test_index):
"""Plots results of novelty detection.
:param image_dict: Dictionary created by `read_many_image_files`, containing
input data for novelty detection.
:param novelty_dict: Dictionary created by `do_novelty_detection`,
containing results.
:param test_index: Array index. The [i]th-most novel test example will be
plotted, where i = `test_index`.
"""
predictor_names = image_dict[PREDICTOR_NAMES_KEY]
temperature_index = predictor_names.index(TEMPERATURE_NAME)
reflectivity_index = predictor_names.index(REFLECTIVITY_NAME)
image_matrix_actual = novelty_dict[NOVEL_IMAGES_ACTUAL_KEY][test_index, ...]
image_matrix_upconv = novelty_dict[NOVEL_IMAGES_UPCONV_KEY][test_index, ...]
image_matrix_upconv_svd = novelty_dict[
NOVEL_IMAGES_UPCONV_SVD_KEY][test_index, ...]
combined_matrix_kelvins = numpy.concatenate(
(image_matrix_actual[..., temperature_index],
image_matrix_upconv[..., temperature_index]),
axis=0)
min_colour_temp_kelvins = numpy.percentile(combined_matrix_kelvins, 1)
max_colour_temp_kelvins = numpy.percentile(combined_matrix_kelvins, 99)
this_figure_object, _ = plot_many_predictors_with_barbs(
predictor_matrix=image_matrix_actual, predictor_names=predictor_names,
min_colour_temp_kelvins=min_colour_temp_kelvins,
max_colour_temp_kelvins=max_colour_temp_kelvins)
base_title_string = '{0:d}th-most novel example'.format(test_index + 1)
this_title_string = '{0:s}: actual'.format(base_title_string)
this_figure_object.suptitle(this_title_string)
pyplot.show()
this_figure_object, _ = plot_many_predictors_with_barbs(
predictor_matrix=image_matrix_upconv,
predictor_names=predictor_names,
min_colour_temp_kelvins=min_colour_temp_kelvins,
max_colour_temp_kelvins=max_colour_temp_kelvins)
this_title_string = r'{0:s}: upconvnet reconstruction'.format(
base_title_string)
this_title_string += r' ($\mathbf{X}_{up}$)'
this_figure_object.suptitle(this_title_string)
pyplot.show()
novelty_matrix = image_matrix_upconv - image_matrix_upconv_svd
max_absolute_temp_kelvins = numpy.percentile(
numpy.absolute(novelty_matrix[..., temperature_index]), 99)
max_absolute_refl_dbz = numpy.percentile(
numpy.absolute(novelty_matrix[..., reflectivity_index]), 99)
this_figure_object, _ = _plot_novelty_for_many_predictors(
novelty_matrix=novelty_matrix, predictor_names=predictor_names,
max_absolute_temp_kelvins=max_absolute_temp_kelvins,
max_absolute_refl_dbz=max_absolute_refl_dbz)
this_title_string = r'{0:s}: novelty'.format(
base_title_string)
this_title_string += r' ($\mathbf{X}_{up} - \mathbf{X}_{up,svd}$)'
this_figure_object.suptitle(this_title_string)
pyplot.show()
def do_novelty_detection_example(
validation_image_dict, normalization_dict, cnn_model_object,
ucn_model_object):
"""Runs novelty detection.
The baseline images are a random set of 100 from the validation set, and the
test images are the 100 storm objects with greatest vorticity in the
validation set.
:param validation_image_dict: Dictionary created by `read_many_image_files`.
:param normalization_dict: Dictionary created by
`get_image_normalization_params`.
:param cnn_model_object: Trained instance of `keras.models.Model`,
representing the CNN or "encoder".
:param ucn_model_object: Trained instance of `keras.models.Model`,
representing the UCN or "decoder".
"""
target_matrix_s01 = validation_image_dict[TARGET_MATRIX_KEY]
num_examples = target_matrix_s01.shape[0]
max_target_by_example_s01 = numpy.array(
[numpy.max(target_matrix_s01[i, ...]) for i in range(num_examples)]
)
test_indices = numpy.argsort(-1 * max_target_by_example_s01)[:100]
test_indices = test_indices[test_indices >= 100]
baseline_indices = numpy.linspace(0, 100, num=100, dtype=int)
novelty_dict = do_novelty_detection(
baseline_image_matrix=validation_image_dict[
PREDICTOR_MATRIX_KEY][baseline_indices, ...],
test_image_matrix=validation_image_dict[
PREDICTOR_MATRIX_KEY][test_indices, ...],
image_normalization_dict=normalization_dict,
predictor_names=validation_image_dict[PREDICTOR_NAMES_KEY],
cnn_model_object=cnn_model_object,
cnn_feature_layer_name=get_cnn_flatten_layer(cnn_model_object),
ucn_model_object=ucn_model_object,
num_novel_test_images=4)
def plot_novelty_detection_example1(validation_image_dict, novelty_dict):
"""Plots first-most novel example, selon novelty detection.
:param validation_image_dict: Dictionary created by `read_many_image_files`.
:param novelty_dict: Dictionary created by `do_novelty_detection`.
"""
plot_novelty_detection(image_dict=validation_image_dict,
novelty_dict=novelty_dict, test_index=0)
def plot_novelty_detection_example2(validation_image_dict, novelty_dict):
"""Plots second-most novel example, selon novelty detection.
:param validation_image_dict: Dictionary created by `read_many_image_files`.
:param novelty_dict: Dictionary created by `do_novelty_detection`.
"""
plot_novelty_detection(image_dict=validation_image_dict,
novelty_dict=novelty_dict, test_index=1)
def plot_novelty_detection_example3(validation_image_dict, novelty_dict):
"""Plots third-most novel example, selon novelty detection.
:param validation_image_dict: Dictionary created by `read_many_image_files`.
:param novelty_dict: Dictionary created by `do_novelty_detection`.
"""
plot_novelty_detection(image_dict=validation_image_dict,
novelty_dict=novelty_dict, test_index=2)
def plot_novelty_detection_example4(validation_image_dict, novelty_dict):
"""Plots fourth-most novel example, selon novelty detection.
:param validation_image_dict: Dictionary created by `read_many_image_files`.
:param novelty_dict: Dictionary created by `do_novelty_detection`.
"""
plot_novelty_detection(image_dict=validation_image_dict,
novelty_dict=novelty_dict, test_index=3)
|
[
"numpy.sum",
"keras.backend.epsilon",
"numpy.ones",
"keras.models.Model",
"numpy.argsort",
"numpy.linalg.svd",
"numpy.exp",
"keras.layers.Input",
"netCDF4.Dataset",
"keras.layers.Flatten",
"numpy.max",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.subplots",
"keras.backend.gradients",
"matplotlib.pyplot.show",
"keras.backend.learning_phase",
"keras.optimizers.Adam",
"numpy.percentile",
"numpy.min",
"keras.layers.Conv2D",
"numpy.delete",
"time.gmtime",
"numpy.unravel_index",
"numpy.array",
"keras.backend.std",
"time.strptime",
"pickle.dump",
"module_4.attributes_diagrams.plot_attributes_diagram",
"module_4.roc_curves.plot_roc_curve",
"numpy.linalg.norm",
"glob.glob",
"keras.layers.Reshape",
"numpy.full",
"numpy.meshgrid",
"matplotlib.pyplot.close",
"numpy.transpose",
"copy.deepcopy",
"numpy.average",
"keras.callbacks.ModelCheckpoint",
"keras.layers.Dropout",
"numpy.mod",
"matplotlib.pyplot.subplots_adjust",
"numpy.nanargmax",
"numpy.expand_dims",
"keras.layers.Dense",
"module_4.performance_diagrams.plot_performance_diagram",
"numpy.diff",
"matplotlib.pyplot.xlabel",
"keras.models.load_model",
"matplotlib.pyplot.title",
"numpy.argmax",
"numpy.mean",
"keras.layers.ZeroPadding2D",
"numpy.prod",
"matplotlib.pyplot.cm.ScalarMappable",
"numpy.cumsum",
"numpy.linspace",
"keras.layers.MaxPooling2D",
"numpy.repeat",
"json.dump",
"keras.layers.LeakyReLU",
"keras.layers.Conv2DTranspose",
"keras.layers.UpSampling2D",
"numpy.dot",
"keras.layers.BatchNormalization",
"keras.layers.Activation",
"numpy.zeros",
"matplotlib.pyplot.savefig",
"numpy.absolute",
"random.shuffle",
"keras.regularizers.l1_l2",
"numpy.round",
"numpy.std",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.colorbar",
"numpy.reshape",
"numpy.random.choice",
"numpy.log2",
"numpy.random.permutation",
"matplotlib.pyplot.ylabel",
"numpy.concatenate",
"matplotlib.pyplot.xlim",
"json.load",
"numpy.logical_and",
"sklearn.metrics.auc",
"keras.callbacks.EarlyStopping",
"keras.backend.mean",
"numpy.where",
"numpy.sqrt"
] |
[((974, 992), 'numpy.full', 'numpy.full', (['(3)', '(0.0)'], {}), '(3, 0.0)\n', (984, 992), False, 'import numpy\n'), ((1080, 1113), 'matplotlib.pyplot.rc', 'pyplot.rc', (['"""font"""'], {'size': 'FONT_SIZE'}), "('font', size=FONT_SIZE)\n", (1089, 1113), True, 'import matplotlib.pyplot as pyplot\n'), ((1114, 1152), 'matplotlib.pyplot.rc', 'pyplot.rc', (['"""axes"""'], {'titlesize': 'FONT_SIZE'}), "('axes', titlesize=FONT_SIZE)\n", (1123, 1152), True, 'import matplotlib.pyplot as pyplot\n'), ((1153, 1191), 'matplotlib.pyplot.rc', 'pyplot.rc', (['"""axes"""'], {'labelsize': 'FONT_SIZE'}), "('axes', labelsize=FONT_SIZE)\n", (1162, 1191), True, 'import matplotlib.pyplot as pyplot\n'), ((1192, 1231), 'matplotlib.pyplot.rc', 'pyplot.rc', (['"""xtick"""'], {'labelsize': 'FONT_SIZE'}), "('xtick', labelsize=FONT_SIZE)\n", (1201, 1231), True, 'import matplotlib.pyplot as pyplot\n'), ((1232, 1271), 'matplotlib.pyplot.rc', 'pyplot.rc', (['"""ytick"""'], {'labelsize': 'FONT_SIZE'}), "('ytick', labelsize=FONT_SIZE)\n", (1241, 1271), True, 'import matplotlib.pyplot as pyplot\n'), ((1272, 1311), 'matplotlib.pyplot.rc', 'pyplot.rc', (['"""legend"""'], {'fontsize': 'FONT_SIZE'}), "('legend', fontsize=FONT_SIZE)\n", (1281, 1311), True, 'import matplotlib.pyplot as pyplot\n'), ((1312, 1352), 'matplotlib.pyplot.rc', 'pyplot.rc', (['"""figure"""'], {'titlesize': 'FONT_SIZE'}), "('figure', titlesize=FONT_SIZE)\n", (1321, 1352), True, 'import matplotlib.pyplot as pyplot\n'), ((4586, 4659), 'numpy.array', 'numpy.array', (['[0.1, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70]'], {}), '([0.1, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70])\n', (4597, 4659), False, 'import numpy\n'), ((902, 943), 'numpy.array', 'numpy.array', (['[166, 206, 227]'], {'dtype': 'float'}), '([166, 206, 227], dtype=float)\n', (913, 943), False, 'import numpy\n'), ((3734, 3760), 'numpy.array', 'numpy.array', (['[4, 233, 231]'], {}), '([4, 233, 231])\n', (3745, 3760), False, 'import numpy\n'), ((3762, 3788), 'numpy.array', 'numpy.array', (['[1, 159, 244]'], {}), '([1, 159, 244])\n', (3773, 3788), False, 'import numpy\n'), ((3794, 3818), 'numpy.array', 'numpy.array', (['[3, 0, 244]'], {}), '([3, 0, 244])\n', (3805, 3818), False, 'import numpy\n'), ((3820, 3844), 'numpy.array', 'numpy.array', (['[2, 253, 2]'], {}), '([2, 253, 2])\n', (3831, 3844), False, 'import numpy\n'), ((3850, 3874), 'numpy.array', 'numpy.array', (['[1, 197, 1]'], {}), '([1, 197, 1])\n', (3861, 3874), False, 'import numpy\n'), ((3876, 3900), 'numpy.array', 'numpy.array', (['[0, 142, 0]'], {}), '([0, 142, 0])\n', (3887, 3900), False, 'import numpy\n'), ((3906, 3932), 'numpy.array', 'numpy.array', (['[253, 248, 2]'], {}), '([253, 248, 2])\n', (3917, 3932), False, 'import numpy\n'), ((3934, 3960), 'numpy.array', 'numpy.array', (['[229, 188, 0]'], {}), '([229, 188, 0])\n', (3945, 3960), False, 'import numpy\n'), ((3966, 3992), 'numpy.array', 'numpy.array', (['[253, 149, 0]'], {}), '([253, 149, 0])\n', (3977, 3992), False, 'import numpy\n'), ((3994, 4018), 'numpy.array', 'numpy.array', (['[253, 0, 0]'], {}), '([253, 0, 0])\n', (4005, 4018), False, 'import numpy\n'), ((4024, 4048), 'numpy.array', 'numpy.array', (['[212, 0, 0]'], {}), '([212, 0, 0])\n', (4035, 4048), False, 'import numpy\n'), ((4050, 4074), 'numpy.array', 'numpy.array', (['[188, 0, 0]'], {}), '([188, 0, 0])\n', (4061, 4074), False, 'import numpy\n'), ((4080, 4106), 'numpy.array', 'numpy.array', (['[248, 0, 253]'], {}), '([248, 0, 253])\n', (4091, 4106), False, 'import numpy\n'), ((4108, 4135), 'numpy.array', 'numpy.array', (['[152, 84, 198]'], {}), '([152, 84, 198])\n', (4119, 4135), False, 'import numpy\n'), ((4354, 4367), 'numpy.ones', 'numpy.ones', (['(3)'], {}), '(3)\n', (4364, 4367), False, 'import numpy\n'), ((8723, 8753), 'glob.glob', 'glob.glob', (['netcdf_file_pattern'], {}), '(netcdf_file_pattern)\n', (8732, 8753), False, 'import glob\n'), ((10128, 10161), 'netCDF4.Dataset', 'netCDF4.Dataset', (['netcdf_file_name'], {}), '(netcdf_file_name)\n', (10143, 10161), False, 'import netCDF4\n'), ((10179, 10252), 'numpy.array', 'numpy.array', (['dataset_object.variables[NETCDF_TRACK_ID_NAME][:]'], {'dtype': 'int'}), '(dataset_object.variables[NETCDF_TRACK_ID_NAME][:], dtype=int)\n', (10190, 10252), False, 'import numpy\n'), ((10280, 10355), 'numpy.array', 'numpy.array', (['dataset_object.variables[NETCDF_TRACK_STEP_NAME][:]'], {'dtype': 'int'}), '(dataset_object.variables[NETCDF_TRACK_STEP_NAME][:], dtype=int)\n', (10291, 10355), False, 'import numpy\n'), ((10914, 10987), 'numpy.array', 'numpy.array', (['dataset_object.variables[NETCDF_TARGET_NAME][:]'], {'dtype': 'float'}), '(dataset_object.variables[NETCDF_TARGET_NAME][:], dtype=float)\n', (10925, 10987), False, 'import numpy\n'), ((14277, 14401), 'matplotlib.pyplot.subplots', 'pyplot.subplots', (['num_rows', 'num_columns'], {'sharex': '(False)', 'sharey': '(False)', 'figsize': '(FIGURE_WIDTH_INCHES, FIGURE_HEIGHT_INCHES)'}), '(num_rows, num_columns, sharex=False, sharey=False, figsize=\n (FIGURE_WIDTH_INCHES, FIGURE_HEIGHT_INCHES))\n', (14292, 14401), True, 'import matplotlib.pyplot as pyplot\n'), ((14690, 14829), 'matplotlib.pyplot.subplots_adjust', 'pyplot.subplots_adjust', ([], {'left': '(0.02)', 'bottom': '(0.02)', 'right': '(0.98)', 'top': '(0.95)', 'hspace': 'vertical_space_fraction', 'wspace': 'horizontal_space_fraction'}), '(left=0.02, bottom=0.02, right=0.98, top=0.95, hspace\n =vertical_space_fraction, wspace=horizontal_space_fraction)\n', (14712, 14829), True, 'import matplotlib.pyplot as pyplot\n'), ((16384, 16457), 'matplotlib.pyplot.cm.ScalarMappable', 'pyplot.cm.ScalarMappable', ([], {'cmap': 'colour_map_object', 'norm': 'colour_norm_object'}), '(cmap=colour_map_object, norm=colour_norm_object)\n', (16408, 16457), True, 'import matplotlib.pyplot as pyplot\n'), ((16860, 16995), 'matplotlib.pyplot.colorbar', 'pyplot.colorbar', ([], {'ax': 'axes_object', 'mappable': 'scalar_mappable_object', 'orientation': 'orientation_string', 'pad': 'padding', 'extend': 'extend_string'}), '(ax=axes_object, mappable=scalar_mappable_object,\n orientation=orientation_string, pad=padding, extend=extend_string)\n', (16875, 16995), True, 'import matplotlib.pyplot as pyplot\n'), ((19558, 19632), 'numpy.linspace', 'numpy.linspace', (['(0)', 'num_grid_columns'], {'num': '(num_grid_columns + 1)', 'dtype': 'float'}), '(0, num_grid_columns, num=num_grid_columns + 1, dtype=float)\n', (19572, 19632), False, 'import numpy\n'), ((19784, 19852), 'numpy.linspace', 'numpy.linspace', (['(0)', 'num_grid_rows'], {'num': '(num_grid_rows + 1)', 'dtype': 'float'}), '(0, num_grid_rows, num=num_grid_rows + 1, dtype=float)\n', (19798, 19852), False, 'import numpy\n'), ((20019, 20067), 'numpy.meshgrid', 'numpy.meshgrid', (['x_coords_unique', 'y_coords_unique'], {}), '(x_coords_unique, y_coords_unique)\n', (20033, 20067), False, 'import numpy\n'), ((20146, 20209), 'numpy.sqrt', 'numpy.sqrt', (['(u_wind_matrix_m_s01 ** 2 + v_wind_matrix_m_s01 ** 2)'], {}), '(u_wind_matrix_m_s01 ** 2 + v_wind_matrix_m_s01 ** 2)\n', (20156, 20209), False, 'import numpy\n'), ((26898, 26911), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (26909, 26911), True, 'import matplotlib.pyplot as pyplot\n'), ((27872, 27885), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (27883, 27885), True, 'import matplotlib.pyplot as pyplot\n'), ((28966, 29045), 'numpy.array', 'numpy.array', (['[intermediate_normalization_dict[NUM_VALUES_KEY], new_values.size]'], {}), '([intermediate_normalization_dict[NUM_VALUES_KEY], new_values.size])\n', (28977, 29045), False, 'import numpy\n'), ((29115, 29164), 'numpy.average', 'numpy.average', (['these_means'], {'weights': 'these_weights'}), '(these_means, weights=these_weights)\n', (29128, 29164), False, 'import numpy\n'), ((29372, 29421), 'numpy.average', 'numpy.average', (['these_means'], {'weights': 'these_weights'}), '(these_means, weights=these_weights)\n', (29385, 29421), False, 'import numpy\n'), ((29962, 30105), 'numpy.sqrt', 'numpy.sqrt', (['(multiplier * (intermediate_normalization_dict[MEAN_OF_SQUARES_KEY] - \n intermediate_normalization_dict[MEAN_VALUE_KEY] ** 2))'], {}), '(multiplier * (intermediate_normalization_dict[\n MEAN_OF_SQUARES_KEY] - intermediate_normalization_dict[MEAN_VALUE_KEY] **\n 2))\n', (29972, 30105), False, 'import numpy\n'), ((36284, 36299), 'numpy.array', 'numpy.array', (['[]'], {}), '([])\n', (36295, 36299), False, 'import numpy\n'), ((36929, 36982), 'numpy.percentile', 'numpy.percentile', (['max_target_values', 'percentile_level'], {}), '(max_target_values, percentile_level)\n', (36945, 36982), False, 'import numpy\n'), ((38165, 38204), 'numpy.full', 'numpy.full', (['num_examples', '(-1)'], {'dtype': 'int'}), '(num_examples, -1, dtype=int)\n', (38175, 38204), False, 'import numpy\n'), ((40196, 40270), 'numpy.linspace', 'numpy.linspace', (['(0)', '(num_dense_layers - 1)'], {'num': 'num_dense_layers', 'dtype': 'float'}), '(0, num_dense_layers - 1, num=num_dense_layers, dtype=float)\n', (40210, 40270), False, 'import numpy\n'), ((41017, 41069), 'keras.regularizers.l1_l2', 'keras.regularizers.l1_l2', ([], {'l1': 'L1_WEIGHT', 'l2': 'L2_WEIGHT'}), '(l1=L1_WEIGHT, l2=L2_WEIGHT)\n', (41041, 41069), False, 'import keras\n'), ((41145, 41220), 'keras.layers.Input', 'keras.layers.Input', ([], {'shape': '(num_grid_rows, num_grid_columns, num_predictors)'}), '(shape=(num_grid_rows, num_grid_columns, num_predictors))\n', (41163, 41220), False, 'import keras\n'), ((43209, 43237), 'numpy.prod', 'numpy.prod', (['these_dimensions'], {}), '(these_dimensions)\n', (43219, 43237), False, 'import numpy\n'), ((44995, 45070), 'keras.models.Model', 'keras.models.Model', ([], {'inputs': 'input_layer_object', 'outputs': 'current_layer_object'}), '(inputs=input_layer_object, outputs=current_layer_object)\n', (45013, 45070), False, 'import keras\n'), ((46961, 46994), 'random.shuffle', 'random.shuffle', (['netcdf_file_names'], {}), '(netcdf_file_names)\n', (46975, 46994), False, 'import random\n'), ((52825, 52995), 'keras.callbacks.EarlyStopping', 'keras.callbacks.EarlyStopping', ([], {'monitor': '"""val_loss"""', 'min_delta': 'MIN_XENTROPY_DECREASE_FOR_EARLY_STOP', 'patience': 'NUM_EPOCHS_FOR_EARLY_STOPPING', 'verbose': '(1)', 'mode': '"""min"""'}), "(monitor='val_loss', min_delta=\n MIN_XENTROPY_DECREASE_FOR_EARLY_STOP, patience=\n NUM_EPOCHS_FOR_EARLY_STOPPING, verbose=1, mode='min')\n", (52854, 52995), False, 'import keras\n'), ((54615, 54691), 'keras.models.load_model', 'keras.models.load_model', (['hdf5_file_name'], {'custom_objects': 'METRIC_FUNCTION_DICT'}), '(hdf5_file_name, custom_objects=METRIC_FUNCTION_DICT)\n', (54638, 54691), False, 'import keras\n'), ((56255, 56289), 'copy.deepcopy', 'copy.deepcopy', (['model_metadata_dict'], {}), '(model_metadata_dict)\n', (56268, 56289), False, 'import copy\n'), ((63260, 63367), 'module_4.roc_curves.plot_roc_curve', 'roc_curves.plot_roc_curve', ([], {'observed_labels': 'target_values', 'forecast_probabilities': 'forecast_probabilities'}), '(observed_labels=target_values,\n forecast_probabilities=forecast_probabilities)\n', (63285, 63367), False, 'from module_4 import roc_curves\n'), ((63409, 63462), 'sklearn.metrics.auc', 'scikit_learn_auc', (['pofd_by_threshold', 'pod_by_threshold'], {}), '(pofd_by_threshold, pod_by_threshold)\n', (63425, 63462), True, 'from sklearn.metrics import auc as scikit_learn_auc\n'), ((63548, 63574), 'matplotlib.pyplot.title', 'pyplot.title', (['title_string'], {}), '(title_string)\n', (63560, 63574), True, 'import matplotlib.pyplot as pyplot\n'), ((63579, 63592), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (63590, 63592), True, 'import matplotlib.pyplot as pyplot\n'), ((63795, 63857), 'matplotlib.pyplot.savefig', 'pyplot.savefig', (['roc_curve_file_name'], {'dpi': 'FIGURE_RESOLUTION_DPI'}), '(roc_curve_file_name, dpi=FIGURE_RESOLUTION_DPI)\n', (63809, 63857), True, 'import matplotlib.pyplot as pyplot\n'), ((63862, 63876), 'matplotlib.pyplot.close', 'pyplot.close', ([], {}), '()\n', (63874, 63876), True, 'import matplotlib.pyplot as pyplot\n'), ((63882, 64009), 'module_4.performance_diagrams.plot_performance_diagram', 'performance_diagrams.plot_performance_diagram', ([], {'observed_labels': 'target_values', 'forecast_probabilities': 'forecast_probabilities'}), '(observed_labels=target_values,\n forecast_probabilities=forecast_probabilities)\n', (63927, 64009), False, 'from module_4 import performance_diagrams\n'), ((64027, 64040), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (64038, 64040), True, 'import matplotlib.pyplot as pyplot\n'), ((64214, 64279), 'matplotlib.pyplot.savefig', 'pyplot.savefig', (['perf_diagram_file_name'], {'dpi': 'FIGURE_RESOLUTION_DPI'}), '(perf_diagram_file_name, dpi=FIGURE_RESOLUTION_DPI)\n', (64228, 64279), True, 'import matplotlib.pyplot as pyplot\n'), ((64284, 64298), 'matplotlib.pyplot.close', 'pyplot.close', ([], {}), '()\n', (64296, 64298), True, 'import matplotlib.pyplot as pyplot\n'), ((64304, 64442), 'module_4.attributes_diagrams.plot_attributes_diagram', 'attributes_diagrams.plot_attributes_diagram', ([], {'observed_labels': 'target_values', 'forecast_probabilities': 'forecast_probabilities', 'num_bins': '(20)'}), '(observed_labels=target_values,\n forecast_probabilities=forecast_probabilities, num_bins=20)\n', (64347, 64442), False, 'from module_4 import attributes_diagrams\n'), ((64460, 64473), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (64471, 64473), True, 'import matplotlib.pyplot as pyplot\n'), ((64646, 64711), 'matplotlib.pyplot.savefig', 'pyplot.savefig', (['attr_diagram_file_name'], {'dpi': 'FIGURE_RESOLUTION_DPI'}), '(attr_diagram_file_name, dpi=FIGURE_RESOLUTION_DPI)\n', (64660, 64711), True, 'import matplotlib.pyplot as pyplot\n'), ((64716, 64730), 'matplotlib.pyplot.close', 'pyplot.close', ([], {}), '()\n', (64728, 64730), True, 'import matplotlib.pyplot as pyplot\n'), ((72495, 72532), 'pickle.dump', 'pickle.dump', (['result_dict', 'file_handle'], {}), '(result_dict, file_handle)\n', (72506, 72532), False, 'import pickle\n'), ((73691, 73704), 'matplotlib.pyplot.xlim', 'pyplot.xlim', ([], {}), '()\n', (73702, 73704), True, 'import matplotlib.pyplot as pyplot\n'), ((74574, 74600), 'numpy.argsort', 'numpy.argsort', (['cost_values'], {}), '(cost_values)\n', (74587, 74600), False, 'import numpy\n'), ((75070, 75144), 'matplotlib.pyplot.subplots', 'pyplot.subplots', (['(1)', '(1)'], {'figsize': '(FIGURE_WIDTH_INCHES, FIGURE_HEIGHT_INCHES)'}), '(1, 1, figsize=(FIGURE_WIDTH_INCHES, FIGURE_HEIGHT_INCHES))\n', (75085, 75144), True, 'import matplotlib.pyplot as pyplot\n'), ((75317, 75338), 'matplotlib.pyplot.yticks', 'pyplot.yticks', (['[]', '[]'], {}), '([], [])\n', (75330, 75338), True, 'import matplotlib.pyplot as pyplot\n'), ((75343, 75378), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (['"""Predictor permuted"""'], {}), "('Predictor permuted')\n", (75356, 75378), True, 'import matplotlib.pyplot as pyplot\n'), ((75609, 75622), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (75620, 75622), True, 'import matplotlib.pyplot as pyplot\n'), ((75745, 75804), 'matplotlib.pyplot.savefig', 'pyplot.savefig', (['output_file_name'], {'dpi': 'FIGURE_RESOLUTION_DPI'}), '(output_file_name, dpi=FIGURE_RESOLUTION_DPI)\n', (75759, 75804), True, 'import matplotlib.pyplot as pyplot\n'), ((75809, 75823), 'matplotlib.pyplot.close', 'pyplot.close', ([], {}), '()\n', (75821, 75823), True, 'import matplotlib.pyplot as pyplot\n'), ((76553, 76627), 'matplotlib.pyplot.subplots', 'pyplot.subplots', (['(1)', '(1)'], {'figsize': '(FIGURE_WIDTH_INCHES, FIGURE_HEIGHT_INCHES)'}), '(1, 1, figsize=(FIGURE_WIDTH_INCHES, FIGURE_HEIGHT_INCHES))\n', (76568, 76627), True, 'import matplotlib.pyplot as pyplot\n'), ((76800, 76821), 'matplotlib.pyplot.yticks', 'pyplot.yticks', (['[]', '[]'], {}), '([], [])\n', (76813, 76821), True, 'import matplotlib.pyplot as pyplot\n'), ((76826, 76861), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (['"""Predictor permuted"""'], {}), "('Predictor permuted')\n", (76839, 76861), True, 'import matplotlib.pyplot as pyplot\n'), ((77092, 77105), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (77103, 77105), True, 'import matplotlib.pyplot as pyplot\n'), ((77228, 77287), 'matplotlib.pyplot.savefig', 'pyplot.savefig', (['output_file_name'], {'dpi': 'FIGURE_RESOLUTION_DPI'}), '(output_file_name, dpi=FIGURE_RESOLUTION_DPI)\n', (77242, 77287), True, 'import matplotlib.pyplot as pyplot\n'), ((77292, 77306), 'matplotlib.pyplot.close', 'pyplot.close', ([], {}), '()\n', (77304, 77306), True, 'import matplotlib.pyplot as pyplot\n'), ((80242, 80289), 'keras.backend.gradients', 'K.gradients', (['loss_tensor', 'list_of_input_tensors'], {}), '(loss_tensor, list_of_input_tensors)\n', (80253, 80289), True, 'from keras import backend as K\n'), ((84274, 84327), 'numpy.expand_dims', 'numpy.expand_dims', (['orig_predictor_matrix_norm'], {'axis': '(0)'}), '(orig_predictor_matrix_norm, axis=0)\n', (84291, 84327), False, 'import numpy\n'), ((84822, 84952), 'numpy.concatenate', 'numpy.concatenate', (['(orig_predictor_matrix[..., temperature_index], optimized_predictor_matrix[\n ..., temperature_index])'], {'axis': '(0)'}), '((orig_predictor_matrix[..., temperature_index],\n optimized_predictor_matrix[..., temperature_index]), axis=0)\n', (84839, 84952), False, 'import numpy\n'), ((85006, 85055), 'numpy.percentile', 'numpy.percentile', (['combined_temp_matrix_kelvins', '(1)'], {}), '(combined_temp_matrix_kelvins, 1)\n', (85022, 85055), False, 'import numpy\n'), ((85086, 85136), 'numpy.percentile', 'numpy.percentile', (['combined_temp_matrix_kelvins', '(99)'], {}), '(combined_temp_matrix_kelvins, 99)\n', (85102, 85136), False, 'import numpy\n'), ((85467, 85480), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (85478, 85480), True, 'import matplotlib.pyplot as pyplot\n'), ((85820, 85833), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (85831, 85833), True, 'import matplotlib.pyplot as pyplot\n'), ((86605, 86658), 'numpy.expand_dims', 'numpy.expand_dims', (['orig_predictor_matrix_norm'], {'axis': '(0)'}), '(orig_predictor_matrix_norm, axis=0)\n', (86622, 86658), False, 'import numpy\n'), ((87153, 87283), 'numpy.concatenate', 'numpy.concatenate', (['(orig_predictor_matrix[..., temperature_index], optimized_predictor_matrix[\n ..., temperature_index])'], {'axis': '(0)'}), '((orig_predictor_matrix[..., temperature_index],\n optimized_predictor_matrix[..., temperature_index]), axis=0)\n', (87170, 87283), False, 'import numpy\n'), ((87337, 87386), 'numpy.percentile', 'numpy.percentile', (['combined_temp_matrix_kelvins', '(1)'], {}), '(combined_temp_matrix_kelvins, 1)\n', (87353, 87386), False, 'import numpy\n'), ((87417, 87467), 'numpy.percentile', 'numpy.percentile', (['combined_temp_matrix_kelvins', '(99)'], {}), '(combined_temp_matrix_kelvins, 99)\n', (87433, 87467), False, 'import numpy\n'), ((87798, 87811), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (87809, 87811), True, 'import matplotlib.pyplot as pyplot\n'), ((88151, 88164), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (88162, 88164), True, 'import matplotlib.pyplot as pyplot\n'), ((89139, 89192), 'numpy.expand_dims', 'numpy.expand_dims', (['orig_predictor_matrix_norm'], {'axis': '(0)'}), '(orig_predictor_matrix_norm, axis=0)\n', (89156, 89192), False, 'import numpy\n'), ((89687, 89817), 'numpy.concatenate', 'numpy.concatenate', (['(orig_predictor_matrix[..., temperature_index], optimized_predictor_matrix[\n ..., temperature_index])'], {'axis': '(0)'}), '((orig_predictor_matrix[..., temperature_index],\n optimized_predictor_matrix[..., temperature_index]), axis=0)\n', (89704, 89817), False, 'import numpy\n'), ((89871, 89920), 'numpy.percentile', 'numpy.percentile', (['combined_temp_matrix_kelvins', '(1)'], {}), '(combined_temp_matrix_kelvins, 1)\n', (89887, 89920), False, 'import numpy\n'), ((89951, 90001), 'numpy.percentile', 'numpy.percentile', (['combined_temp_matrix_kelvins', '(99)'], {}), '(combined_temp_matrix_kelvins, 99)\n', (89967, 90001), False, 'import numpy\n'), ((90332, 90345), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (90343, 90345), True, 'import matplotlib.pyplot as pyplot\n'), ((90685, 90698), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (90696, 90698), True, 'import matplotlib.pyplot as pyplot\n'), ((91673, 91726), 'numpy.expand_dims', 'numpy.expand_dims', (['orig_predictor_matrix_norm'], {'axis': '(0)'}), '(orig_predictor_matrix_norm, axis=0)\n', (91690, 91726), False, 'import numpy\n'), ((92221, 92351), 'numpy.concatenate', 'numpy.concatenate', (['(orig_predictor_matrix[..., temperature_index], optimized_predictor_matrix[\n ..., temperature_index])'], {'axis': '(0)'}), '((orig_predictor_matrix[..., temperature_index],\n optimized_predictor_matrix[..., temperature_index]), axis=0)\n', (92238, 92351), False, 'import numpy\n'), ((92405, 92454), 'numpy.percentile', 'numpy.percentile', (['combined_temp_matrix_kelvins', '(1)'], {}), '(combined_temp_matrix_kelvins, 1)\n', (92421, 92454), False, 'import numpy\n'), ((92485, 92535), 'numpy.percentile', 'numpy.percentile', (['combined_temp_matrix_kelvins', '(99)'], {}), '(combined_temp_matrix_kelvins, 99)\n', (92501, 92535), False, 'import numpy\n'), ((92866, 92879), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (92877, 92879), True, 'import matplotlib.pyplot as pyplot\n'), ((93219, 93232), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (93230, 93232), True, 'import matplotlib.pyplot as pyplot\n'), ((94395, 94442), 'keras.backend.gradients', 'K.gradients', (['loss_tensor', 'list_of_input_tensors'], {}), '(loss_tensor, list_of_input_tensors)\n', (94406, 94442), True, 'from keras import backend as K\n'), ((97472, 97546), 'numpy.linspace', 'numpy.linspace', (['(0)', 'num_grid_columns'], {'num': '(num_grid_columns + 1)', 'dtype': 'float'}), '(0, num_grid_columns, num=num_grid_columns + 1, dtype=float)\n', (97486, 97546), False, 'import numpy\n'), ((97698, 97766), 'numpy.linspace', 'numpy.linspace', (['(0)', 'num_grid_rows'], {'num': '(num_grid_rows + 1)', 'dtype': 'float'}), '(0, num_grid_rows, num=num_grid_rows + 1, dtype=float)\n', (97712, 97766), False, 'import numpy\n'), ((97933, 97981), 'numpy.meshgrid', 'numpy.meshgrid', (['x_coords_unique', 'y_coords_unique'], {}), '(x_coords_unique, y_coords_unique)\n', (97947, 97981), False, 'import numpy\n'), ((98197, 98267), 'numpy.linspace', 'numpy.linspace', (['(0.0)', 'max_absolute_contour_level'], {'num': 'half_num_contours'}), '(0.0, max_absolute_contour_level, num=half_num_contours)\n', (98211, 98267), False, 'import numpy\n'), ((101068, 101116), 'numpy.expand_dims', 'numpy.expand_dims', (['predictor_matrix_norm'], {'axis': '(0)'}), '(predictor_matrix_norm, axis=0)\n', (101085, 101116), False, 'import numpy\n'), ((101386, 101447), 'numpy.percentile', 'numpy.percentile', (['predictor_matrix[..., temperature_index]', '(1)'], {}), '(predictor_matrix[..., temperature_index], 1)\n', (101402, 101447), False, 'import numpy\n'), ((101487, 101549), 'numpy.percentile', 'numpy.percentile', (['predictor_matrix[..., temperature_index]', '(99)'], {}), '(predictor_matrix[..., temperature_index], 99)\n', (101503, 101549), False, 'import numpy\n'), ((102566, 102579), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (102577, 102579), True, 'import matplotlib.pyplot as pyplot\n'), ((103371, 103419), 'numpy.expand_dims', 'numpy.expand_dims', (['predictor_matrix_norm'], {'axis': '(0)'}), '(predictor_matrix_norm, axis=0)\n', (103388, 103419), False, 'import numpy\n'), ((103689, 103750), 'numpy.percentile', 'numpy.percentile', (['predictor_matrix[..., temperature_index]', '(1)'], {}), '(predictor_matrix[..., temperature_index], 1)\n', (103705, 103750), False, 'import numpy\n'), ((103790, 103852), 'numpy.percentile', 'numpy.percentile', (['predictor_matrix[..., temperature_index]', '(99)'], {}), '(predictor_matrix[..., temperature_index], 99)\n', (103806, 103852), False, 'import numpy\n'), ((104869, 104882), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (104880, 104882), True, 'import matplotlib.pyplot as pyplot\n'), ((105877, 105925), 'numpy.expand_dims', 'numpy.expand_dims', (['predictor_matrix_norm'], {'axis': '(0)'}), '(predictor_matrix_norm, axis=0)\n', (105894, 105925), False, 'import numpy\n'), ((106195, 106256), 'numpy.percentile', 'numpy.percentile', (['predictor_matrix[..., temperature_index]', '(1)'], {}), '(predictor_matrix[..., temperature_index], 1)\n', (106211, 106256), False, 'import numpy\n'), ((106296, 106358), 'numpy.percentile', 'numpy.percentile', (['predictor_matrix[..., temperature_index]', '(99)'], {}), '(predictor_matrix[..., temperature_index], 99)\n', (106312, 106358), False, 'import numpy\n'), ((107375, 107388), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (107386, 107388), True, 'import matplotlib.pyplot as pyplot\n'), ((108383, 108431), 'numpy.expand_dims', 'numpy.expand_dims', (['predictor_matrix_norm'], {'axis': '(0)'}), '(predictor_matrix_norm, axis=0)\n', (108400, 108431), False, 'import numpy\n'), ((108701, 108762), 'numpy.percentile', 'numpy.percentile', (['predictor_matrix[..., temperature_index]', '(1)'], {}), '(predictor_matrix[..., temperature_index], 1)\n', (108717, 108762), False, 'import numpy\n'), ((108802, 108864), 'numpy.percentile', 'numpy.percentile', (['predictor_matrix[..., temperature_index]', '(99)'], {}), '(predictor_matrix[..., temperature_index], 99)\n', (108818, 108864), False, 'import numpy\n'), ((109881, 109894), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (109892, 109894), True, 'import matplotlib.pyplot as pyplot\n'), ((110812, 110910), 'numpy.linspace', 'numpy.linspace', (['(-num_half_filter_rows)', 'num_half_filter_rows'], {'num': 'num_filter_rows', 'dtype': 'float'}), '(-num_half_filter_rows, num_half_filter_rows, num=\n num_filter_rows, dtype=float)\n', (110826, 110910), False, 'import numpy\n'), ((110951, 111058), 'numpy.linspace', 'numpy.linspace', (['(-num_half_filter_columns)', 'num_half_filter_columns'], {'num': 'num_filter_columns', 'dtype': 'float'}), '(-num_half_filter_columns, num_half_filter_columns, num=\n num_filter_columns, dtype=float)\n', (110965, 111058), False, 'import numpy\n'), ((111118, 111175), 'numpy.meshgrid', 'numpy.meshgrid', (['column_offsets_unique', 'row_offsets_unique'], {}), '(column_offsets_unique, row_offsets_unique)\n', (111132, 111175), False, 'import numpy\n'), ((111212, 111274), 'numpy.sqrt', 'numpy.sqrt', (['(row_offset_matrix ** 2 + column_offset_matrix ** 2)'], {}), '(row_offset_matrix ** 2 + column_offset_matrix ** 2)\n', (111222, 111274), False, 'import numpy\n'), ((111311, 111380), 'numpy.exp', 'numpy.exp', (['(-pixel_offset_matrix ** 2 / (2 * smoothing_radius_px ** 2))'], {}), '(-pixel_offset_matrix ** 2 / (2 * smoothing_radius_px ** 2))\n', (111320, 111380), False, 'import numpy\n'), ((111495, 111573), 'numpy.zeros', 'numpy.zeros', (['(num_filter_rows, num_filter_columns, num_channels, num_channels)'], {}), '((num_filter_rows, num_filter_columns, num_channels, num_channels))\n', (111506, 111573), False, 'import numpy\n'), ((113554, 113606), 'keras.regularizers.l1_l2', 'keras.regularizers.l1_l2', ([], {'l1': 'L1_WEIGHT', 'l2': 'L2_WEIGHT'}), '(l1=L1_WEIGHT, l2=L2_WEIGHT)\n', (113578, 113606), False, 'import keras\n'), ((113632, 113679), 'keras.layers.Input', 'keras.layers.Input', ([], {'shape': '(num_input_features,)'}), '(shape=(num_input_features,))\n', (113650, 113679), False, 'import keras\n'), ((117709, 117776), 'keras.models.Model', 'keras.models.Model', ([], {'inputs': 'input_layer_object', 'outputs': 'layer_object'}), '(inputs=input_layer_object, outputs=layer_object)\n', (117727, 117776), False, 'import keras\n'), ((118456, 118520), 'numpy.array', 'numpy.array', (["[('flatten' in n) for n in layer_names]"], {'dtype': 'bool'}), "([('flatten' in n) for n in layer_names], dtype=bool)\n", (118467, 118520), False, 'import numpy\n'), ((119273, 119337), 'numpy.array', 'numpy.array', (['cnn_feature_layer_object.input.shape[1:]'], {'dtype': 'int'}), '(cnn_feature_layer_object.input.shape[1:], dtype=int)\n', (119284, 119337), False, 'import numpy\n'), ((119373, 119407), 'numpy.prod', 'numpy.prod', (['cnn_feature_dimensions'], {}), '(cnn_feature_dimensions)\n', (119383, 119407), False, 'import numpy\n'), ((119632, 119674), 'numpy.array', 'numpy.array', (['[2, 1, 1, 2, 1, 1]'], {'dtype': 'int'}), '([2, 1, 1, 2, 1, 1], dtype=int)\n', (119643, 119674), False, 'import numpy\n'), ((121610, 121643), 'random.shuffle', 'random.shuffle', (['netcdf_file_names'], {}), '(netcdf_file_names)\n', (121624, 121643), False, 'import random\n'), ((127355, 127519), 'keras.callbacks.EarlyStopping', 'keras.callbacks.EarlyStopping', ([], {'monitor': '"""val_loss"""', 'min_delta': 'MIN_MSE_DECREASE_FOR_EARLY_STOP', 'patience': 'NUM_EPOCHS_FOR_EARLY_STOPPING', 'verbose': '(1)', 'mode': '"""min"""'}), "(monitor='val_loss', min_delta=\n MIN_MSE_DECREASE_FOR_EARLY_STOP, patience=NUM_EPOCHS_FOR_EARLY_STOPPING,\n verbose=1, mode='min')\n", (127384, 127519), False, 'import keras\n'), ((130523, 130567), 'numpy.expand_dims', 'numpy.expand_dims', (['image_matrix_norm'], {'axis': '(0)'}), '(image_matrix_norm, axis=0)\n', (130540, 130567), False, 'import numpy\n'), ((131179, 131300), 'numpy.concatenate', 'numpy.concatenate', (['(image_matrix[..., temperature_index], reconstructed_image_matrix[...,\n temperature_index])'], {'axis': '(0)'}), '((image_matrix[..., temperature_index],\n reconstructed_image_matrix[..., temperature_index]), axis=0)\n', (131196, 131300), False, 'import numpy\n'), ((131354, 131403), 'numpy.percentile', 'numpy.percentile', (['combined_temp_matrix_kelvins', '(1)'], {}), '(combined_temp_matrix_kelvins, 1)\n', (131370, 131403), False, 'import numpy\n'), ((131434, 131484), 'numpy.percentile', 'numpy.percentile', (['combined_temp_matrix_kelvins', '(99)'], {}), '(combined_temp_matrix_kelvins, 99)\n', (131450, 131484), False, 'import numpy\n'), ((131798, 131811), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (131809, 131811), True, 'import matplotlib.pyplot as pyplot\n'), ((132151, 132164), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (132162, 132164), True, 'import matplotlib.pyplot as pyplot\n'), ((133307, 133351), 'numpy.expand_dims', 'numpy.expand_dims', (['image_matrix_norm'], {'axis': '(0)'}), '(image_matrix_norm, axis=0)\n', (133324, 133351), False, 'import numpy\n'), ((133963, 134084), 'numpy.concatenate', 'numpy.concatenate', (['(image_matrix[..., temperature_index], reconstructed_image_matrix[...,\n temperature_index])'], {'axis': '(0)'}), '((image_matrix[..., temperature_index],\n reconstructed_image_matrix[..., temperature_index]), axis=0)\n', (133980, 134084), False, 'import numpy\n'), ((134138, 134187), 'numpy.percentile', 'numpy.percentile', (['combined_temp_matrix_kelvins', '(1)'], {}), '(combined_temp_matrix_kelvins, 1)\n', (134154, 134187), False, 'import numpy\n'), ((134218, 134268), 'numpy.percentile', 'numpy.percentile', (['combined_temp_matrix_kelvins', '(99)'], {}), '(combined_temp_matrix_kelvins, 99)\n', (134234, 134268), False, 'import numpy\n'), ((134582, 134595), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (134593, 134595), True, 'import matplotlib.pyplot as pyplot\n'), ((134935, 134948), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (134946, 134948), True, 'import matplotlib.pyplot as pyplot\n'), ((135969, 136016), 'numpy.reshape', 'numpy.reshape', (['feature_means', '(1, num_features)'], {}), '(feature_means, (1, num_features))\n', (135982, 136016), False, 'import numpy\n'), ((136035, 136090), 'numpy.repeat', 'numpy.repeat', (['mean_matrix'], {'repeats': 'num_examples', 'axis': '(0)'}), '(mean_matrix, repeats=num_examples, axis=0)\n', (136047, 136090), False, 'import numpy\n'), ((136111, 136172), 'numpy.reshape', 'numpy.reshape', (['feature_standard_deviations', '(1, num_features)'], {}), '(feature_standard_deviations, (1, num_features))\n', (136124, 136172), False, 'import numpy\n'), ((136192, 136248), 'numpy.repeat', 'numpy.repeat', (['stdev_matrix'], {'repeats': 'num_examples', 'axis': '(0)'}), '(stdev_matrix, repeats=num_examples, axis=0)\n', (136204, 136248), False, 'import numpy\n'), ((137967, 138040), 'numpy.concatenate', 'numpy.concatenate', (['(baseline_feature_matrix, test_feature_matrix)'], {'axis': '(0)'}), '((baseline_feature_matrix, test_feature_matrix), axis=0)\n', (137984, 138040), False, 'import numpy\n'), ((138568, 138601), 'numpy.cumsum', 'numpy.cumsum', (['explained_variances'], {}), '(explained_variances)\n', (138580, 138601), False, 'import numpy\n'), ((139861, 139904), 'numpy.dot', 'numpy.dot', (['this_matrix', 'feature_vector_norm'], {}), '(this_matrix, feature_vector_norm)\n', (139870, 139904), False, 'import numpy\n'), ((145914, 145951), 'numpy.array', 'numpy.array', (['novel_indices'], {'dtype': 'int'}), '(novel_indices, dtype=int)\n', (145925, 145951), False, 'import numpy\n'), ((150049, 150170), 'numpy.concatenate', 'numpy.concatenate', (['(image_matrix_actual[..., temperature_index], image_matrix_upconv[...,\n temperature_index])'], {'axis': '(0)'}), '((image_matrix_actual[..., temperature_index],\n image_matrix_upconv[..., temperature_index]), axis=0)\n', (150066, 150170), False, 'import numpy\n'), ((150224, 150268), 'numpy.percentile', 'numpy.percentile', (['combined_matrix_kelvins', '(1)'], {}), '(combined_matrix_kelvins, 1)\n', (150240, 150268), False, 'import numpy\n'), ((150299, 150344), 'numpy.percentile', 'numpy.percentile', (['combined_matrix_kelvins', '(99)'], {}), '(combined_matrix_kelvins, 99)\n', (150315, 150344), False, 'import numpy\n'), ((150798, 150811), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (150809, 150811), True, 'import matplotlib.pyplot as pyplot\n'), ((151274, 151287), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (151285, 151287), True, 'import matplotlib.pyplot as pyplot\n'), ((152043, 152056), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (152054, 152056), True, 'import matplotlib.pyplot as pyplot\n'), ((153196, 153238), 'numpy.linspace', 'numpy.linspace', (['(0)', '(100)'], {'num': '(100)', 'dtype': 'int'}), '(0, 100, num=100, dtype=int)\n', (153210, 153238), False, 'import numpy\n'), ((6913, 6952), 'time.strptime', 'time.strptime', (['time_string', 'time_format'], {}), '(time_string, time_format)\n', (6926, 6952), False, 'import time\n'), ((7350, 7376), 'time.gmtime', 'time.gmtime', (['unix_time_sec'], {}), '(unix_time_sec)\n', (7361, 7376), False, 'import time\n'), ((10482, 10556), 'numpy.array', 'numpy.array', (['dataset_object.variables[this_predictor_name][:]'], {'dtype': 'float'}), '(dataset_object.variables[this_predictor_name][:], dtype=float)\n', (10493, 10556), False, 'import numpy\n'), ((10602, 10651), 'numpy.expand_dims', 'numpy.expand_dims', (['this_predictor_matrix'], {'axis': '(-1)'}), '(this_predictor_matrix, axis=-1)\n', (10619, 10651), False, 'import numpy\n'), ((18028, 18102), 'matplotlib.pyplot.subplots', 'pyplot.subplots', (['(1)', '(1)'], {'figsize': '(FIGURE_WIDTH_INCHES, FIGURE_HEIGHT_INCHES)'}), '(1, 1, figsize=(FIGURE_WIDTH_INCHES, FIGURE_HEIGHT_INCHES))\n', (18043, 18102), True, 'import matplotlib.pyplot as pyplot\n'), ((19336, 19410), 'matplotlib.pyplot.subplots', 'pyplot.subplots', (['(1)', '(1)'], {'figsize': '(FIGURE_WIDTH_INCHES, FIGURE_HEIGHT_INCHES)'}), '(1, 1, figsize=(FIGURE_WIDTH_INCHES, FIGURE_HEIGHT_INCHES))\n', (19351, 19410), True, 'import matplotlib.pyplot as pyplot\n'), ((31336, 31372), 'numpy.array', 'numpy.array', (['[this_mean, this_stdev]'], {}), '([this_mean, this_stdev])\n', (31347, 31372), False, 'import numpy\n'), ((36629, 36669), 'numpy.full', 'numpy.full', (['this_num_examples', 'numpy.nan'], {}), '(this_num_examples, numpy.nan)\n', (36639, 36669), False, 'import numpy\n'), ((36822, 36885), 'numpy.concatenate', 'numpy.concatenate', (['(max_target_values, these_max_target_values)'], {}), '((max_target_values, these_max_target_values))\n', (36839, 36885), False, 'import numpy\n'), ((40324, 40377), 'numpy.exp', 'numpy.exp', (['(-1 * dense_layer_indices / e_folding_param)'], {}), '(-1 * dense_layer_indices / e_folding_param)\n', (40333, 40377), False, 'import numpy\n'), ((43266, 43288), 'keras.layers.Flatten', 'keras.layers.Flatten', ([], {}), '()\n', (43286, 43288), False, 'import keras\n'), ((44409, 44574), 'keras.layers.Dense', 'keras.layers.Dense', (['(1)'], {'activation': 'None', 'use_bias': '(True)', 'kernel_initializer': '"""glorot_uniform"""', 'bias_initializer': '"""zeros"""', 'kernel_regularizer': 'regularizer_object'}), "(1, activation=None, use_bias=True, kernel_initializer=\n 'glorot_uniform', bias_initializer='zeros', kernel_regularizer=\n regularizer_object)\n", (44427, 44574), False, 'import keras\n'), ((44645, 44679), 'keras.layers.Activation', 'keras.layers.Activation', (['"""sigmoid"""'], {}), "('sigmoid')\n", (44668, 44679), False, 'import keras\n'), ((48347, 48435), 'numpy.linspace', 'numpy.linspace', (['(0)', '(num_examples_in_memory - 1)'], {'num': 'num_examples_in_memory', 'dtype': 'int'}), '(0, num_examples_in_memory - 1, num=num_examples_in_memory,\n dtype=int)\n', (48361, 48435), False, 'import numpy\n'), ((48481, 48559), 'numpy.random.choice', 'numpy.random.choice', (['batch_indices'], {'size': 'num_examples_per_batch', 'replace': '(False)'}), '(batch_indices, size=num_examples_per_batch, replace=False)\n', (48500, 48559), False, 'import numpy\n'), ((51306, 51476), 'keras.callbacks.ModelCheckpoint', 'keras.callbacks.ModelCheckpoint', ([], {'filepath': 'output_model_file_name', 'monitor': '"""loss"""', 'verbose': '(1)', 'save_best_only': '(False)', 'save_weights_only': '(False)', 'mode': '"""min"""', 'period': '(1)'}), "(filepath=output_model_file_name, monitor=\n 'loss', verbose=1, save_best_only=False, save_weights_only=False, mode=\n 'min', period=1)\n", (51337, 51476), False, 'import keras\n'), ((51542, 51714), 'keras.callbacks.ModelCheckpoint', 'keras.callbacks.ModelCheckpoint', ([], {'filepath': 'output_model_file_name', 'monitor': '"""val_loss"""', 'verbose': '(1)', 'save_best_only': '(True)', 'save_weights_only': '(False)', 'mode': '"""min"""', 'period': '(1)'}), "(filepath=output_model_file_name, monitor=\n 'val_loss', verbose=1, save_best_only=True, save_weights_only=False,\n mode='min', period=1)\n", (51573, 51714), False, 'import keras\n'), ((57777, 57816), 'json.dump', 'json.dump', (['new_metadata_dict', 'this_file'], {}), '(new_metadata_dict, this_file)\n', (57786, 57816), False, 'import json\n'), ((58166, 58186), 'json.load', 'json.load', (['this_file'], {}), '(this_file)\n', (58175, 58186), False, 'import json\n'), ((61230, 61338), 'numpy.linspace', 'numpy.linspace', (['this_first_index', 'this_last_index'], {'num': '(this_last_index - this_first_index + 1)', 'dtype': 'int'}), '(this_first_index, this_last_index, num=this_last_index -\n this_first_index + 1, dtype=int)\n', (61244, 61338), False, 'import numpy\n'), ((72116, 72149), 'numpy.array', 'numpy.array', (['highest_cost_by_step'], {}), '(highest_cost_by_step)\n', (72127, 72149), False, 'import numpy\n'), ((72271, 72295), 'numpy.array', 'numpy.array', (['costs_step1'], {}), '(costs_step1)\n', (72282, 72295), False, 'import numpy\n'), ((75418, 75464), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['"""Cost (percentage of original)"""'], {}), "('Cost (percentage of original)')\n", (75431, 75464), True, 'import matplotlib.pyplot as pyplot\n'), ((75483, 75504), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['"""Cost"""'], {}), "('Cost')\n", (75496, 75504), True, 'import matplotlib.pyplot as pyplot\n'), ((76901, 76947), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['"""Cost (percentage of original)"""'], {}), "('Cost (percentage of original)')\n", (76914, 76947), True, 'import matplotlib.pyplot as pyplot\n'), ((76966, 76987), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['"""Cost"""'], {}), "('Cost')\n", (76979, 76987), True, 'import matplotlib.pyplot as pyplot\n'), ((80731, 80771), 'copy.deepcopy', 'copy.deepcopy', (['init_function_or_matrices'], {}), '(init_function_or_matrices)\n', (80744, 80771), False, 'import copy\n'), ((82444, 82469), 'numpy.round', 'numpy.round', (['target_class'], {}), '(target_class)\n', (82455, 82469), False, 'import numpy\n'), ((82496, 82523), 'numpy.round', 'numpy.round', (['num_iterations'], {}), '(num_iterations)\n', (82507, 82523), False, 'import numpy\n'), ((83172, 83244), 'keras.backend.mean', 'K.mean', (['((cnn_model_object.layers[-1].output[..., target_class] - 1) ** 2)'], {}), '((cnn_model_object.layers[-1].output[..., target_class] - 1) ** 2)\n', (83178, 83244), True, 'from keras import backend as K\n'), ((95491, 95516), 'numpy.round', 'numpy.round', (['target_class'], {}), '(target_class)\n', (95502, 95516), False, 'import numpy\n'), ((96073, 96145), 'keras.backend.mean', 'K.mean', (['((cnn_model_object.layers[-1].output[..., target_class] - 1) ** 2)'], {}), '((cnn_model_object.layers[-1].output[..., target_class] - 1) ** 2)\n', (96079, 96145), True, 'from keras import backend as K\n'), ((98063, 98125), 'numpy.round', 'numpy.round', (['(1 + max_absolute_contour_level / contour_interval)'], {}), '(1 + max_absolute_contour_level / contour_interval)\n', (98074, 98125), False, 'import numpy\n'), ((99848, 99907), 'numpy.unravel_index', 'numpy.unravel_index', (['m', '(num_panel_rows, num_panel_columns)'], {}), '(m, (num_panel_rows, num_panel_columns))\n', (99867, 99907), False, 'import numpy\n'), ((101751, 101802), 'numpy.absolute', 'numpy.absolute', (['predictor_matrix[..., wind_indices]'], {}), '(predictor_matrix[..., wind_indices])\n', (101765, 101802), False, 'import numpy\n'), ((102187, 102218), 'numpy.absolute', 'numpy.absolute', (['saliency_matrix'], {}), '(saliency_matrix)\n', (102201, 102218), False, 'import numpy\n'), ((104054, 104105), 'numpy.absolute', 'numpy.absolute', (['predictor_matrix[..., wind_indices]'], {}), '(predictor_matrix[..., wind_indices])\n', (104068, 104105), False, 'import numpy\n'), ((104490, 104521), 'numpy.absolute', 'numpy.absolute', (['saliency_matrix'], {}), '(saliency_matrix)\n', (104504, 104521), False, 'import numpy\n'), ((106560, 106611), 'numpy.absolute', 'numpy.absolute', (['predictor_matrix[..., wind_indices]'], {}), '(predictor_matrix[..., wind_indices])\n', (106574, 106611), False, 'import numpy\n'), ((106996, 107027), 'numpy.absolute', 'numpy.absolute', (['saliency_matrix'], {}), '(saliency_matrix)\n', (107010, 107027), False, 'import numpy\n'), ((109066, 109117), 'numpy.absolute', 'numpy.absolute', (['predictor_matrix[..., wind_indices]'], {}), '(predictor_matrix[..., wind_indices])\n', (109080, 109117), False, 'import numpy\n'), ((109502, 109533), 'numpy.absolute', 'numpy.absolute', (['saliency_matrix'], {}), '(saliency_matrix)\n', (109516, 109533), False, 'import numpy\n'), ((111443, 111473), 'numpy.sum', 'numpy.sum', (['small_weight_matrix'], {}), '(small_weight_matrix)\n', (111452, 111473), False, 'import numpy\n'), ((113711, 113781), 'numpy.round', 'numpy.round', (['(num_input_features / (first_num_rows * first_num_columns))'], {}), '(num_input_features / (first_num_rows * first_num_columns))\n', (113722, 113781), False, 'import numpy\n'), ((113817, 113912), 'keras.layers.Reshape', 'keras.layers.Reshape', ([], {'target_shape': '(first_num_rows, first_num_columns, current_num_filters)'}), '(target_shape=(first_num_rows, first_num_columns,\n current_num_filters))\n', (113837, 113912), False, 'import keras\n'), ((118553, 118582), 'numpy.where', 'numpy.where', (['flattening_flags'], {}), '(flattening_flags)\n', (118564, 118582), False, 'import numpy\n'), ((119531, 119587), 'numpy.array', 'numpy.array', (['cnn_model_object.input.shape[1:]'], {'dtype': 'int'}), '(cnn_model_object.input.shape[1:], dtype=int)\n', (119542, 119587), False, 'import numpy\n'), ((122652, 122740), 'numpy.linspace', 'numpy.linspace', (['(0)', '(num_examples_in_memory - 1)'], {'num': 'num_examples_in_memory', 'dtype': 'int'}), '(0, num_examples_in_memory - 1, num=num_examples_in_memory,\n dtype=int)\n', (122666, 122740), False, 'import numpy\n'), ((122786, 122864), 'numpy.random.choice', 'numpy.random.choice', (['batch_indices'], {'size': 'num_examples_per_batch', 'replace': '(False)'}), '(batch_indices, size=num_examples_per_batch, replace=False)\n', (122805, 122864), False, 'import numpy\n'), ((125771, 125941), 'keras.callbacks.ModelCheckpoint', 'keras.callbacks.ModelCheckpoint', ([], {'filepath': 'output_model_file_name', 'monitor': '"""loss"""', 'verbose': '(1)', 'save_best_only': '(False)', 'save_weights_only': '(False)', 'mode': '"""min"""', 'period': '(1)'}), "(filepath=output_model_file_name, monitor=\n 'loss', verbose=1, save_best_only=False, save_weights_only=False, mode=\n 'min', period=1)\n", (125802, 125941), False, 'import keras\n'), ((126007, 126179), 'keras.callbacks.ModelCheckpoint', 'keras.callbacks.ModelCheckpoint', ([], {'filepath': 'output_model_file_name', 'monitor': '"""val_loss"""', 'verbose': '(1)', 'save_best_only': '(True)', 'save_weights_only': '(False)', 'mode': '"""min"""', 'period': '(1)'}), "(filepath=output_model_file_name, monitor=\n 'val_loss', verbose=1, save_best_only=True, save_weights_only=False,\n mode='min', period=1)\n", (126038, 126179), False, 'import keras\n'), ((135748, 135782), 'numpy.mean', 'numpy.mean', (['feature_matrix'], {'axis': '(0)'}), '(feature_matrix, axis=0)\n', (135758, 135782), False, 'import numpy\n'), ((135821, 135862), 'numpy.std', 'numpy.std', (['feature_matrix'], {'axis': '(0)', 'ddof': '(1)'}), '(feature_matrix, axis=0, ddof=1)\n', (135830, 135862), False, 'import numpy\n'), ((138386, 138427), 'numpy.linalg.svd', 'numpy.linalg.svd', (['baseline_feature_matrix'], {}), '(baseline_feature_matrix)\n', (138402, 138427), False, 'import numpy\n'), ((138508, 138530), 'numpy.sum', 'numpy.sum', (['eigenvalues'], {}), '(eigenvalues)\n', (138517, 138530), False, 'import numpy\n'), ((139625, 139672), 'numpy.transpose', 'numpy.transpose', (['svd_dictionary[EOF_MATRIX_KEY]'], {}), '(svd_dictionary[EOF_MATRIX_KEY])\n', (139640, 139672), False, 'import numpy\n'), ((144443, 144483), 'numpy.full', 'numpy.full', (['num_test_examples', 'numpy.nan'], {}), '(num_test_examples, numpy.nan)\n', (144453, 144483), False, 'import numpy\n'), ((144518, 144566), 'numpy.full', 'numpy.full', (['test_feature_matrix.shape', 'numpy.nan'], {}), '(test_feature_matrix.shape, numpy.nan)\n', (144528, 144566), False, 'import numpy\n'), ((145038, 145065), 'numpy.nanargmax', 'numpy.nanargmax', (['svd_errors'], {}), '(svd_errors)\n', (145053, 145065), False, 'import numpy\n'), ((151414, 151468), 'numpy.absolute', 'numpy.absolute', (['novelty_matrix[..., temperature_index]'], {}), '(novelty_matrix[..., temperature_index])\n', (151428, 151468), False, 'import numpy\n'), ((151528, 151583), 'numpy.absolute', 'numpy.absolute', (['novelty_matrix[..., reflectivity_index]'], {}), '(novelty_matrix[..., reflectivity_index])\n', (151542, 151583), False, 'import numpy\n'), ((153068, 153113), 'numpy.argsort', 'numpy.argsort', (['(-1 * max_target_by_example_s01)'], {}), '(-1 * max_target_by_example_s01)\n', (153081, 153113), False, 'import numpy\n'), ((9058, 9167), 'numpy.logical_and', 'numpy.logical_and', (['(file_times_unix_sec >= first_time_unix_sec)', '(file_times_unix_sec <= last_time_unix_sec)'], {}), '(file_times_unix_sec >= first_time_unix_sec, \n file_times_unix_sec <= last_time_unix_sec)\n', (9075, 9167), False, 'import numpy\n'), ((10806, 10875), 'numpy.concatenate', 'numpy.concatenate', (['(predictor_matrix, this_predictor_matrix)'], {'axis': '(-1)'}), '((predictor_matrix, this_predictor_matrix), axis=-1)\n', (10823, 10875), False, 'import numpy\n'), ((11861, 11891), 'copy.deepcopy', 'copy.deepcopy', (['this_image_dict'], {}), '(this_image_dict)\n', (11874, 11891), False, 'import copy\n'), ((11989, 12065), 'numpy.concatenate', 'numpy.concatenate', (['(image_dict[this_key], this_image_dict[this_key])'], {'axis': '(0)'}), '((image_dict[this_key], this_image_dict[this_key]), axis=0)\n', (12006, 12065), False, 'import numpy\n'), ((19725, 19756), 'numpy.diff', 'numpy.diff', (['x_coords_unique[:2]'], {}), '(x_coords_unique[:2])\n', (19735, 19756), False, 'import numpy\n'), ((19945, 19976), 'numpy.diff', 'numpy.diff', (['y_coords_unique[:2]'], {}), '(y_coords_unique[:2])\n', (19955, 19976), False, 'import numpy\n'), ((24245, 24271), 'numpy.sqrt', 'numpy.sqrt', (['num_predictors'], {}), '(num_predictors)\n', (24255, 24271), False, 'import numpy\n'), ((26757, 26804), 'numpy.percentile', 'numpy.percentile', (['temperature_matrix_kelvins', '(1)'], {}), '(temperature_matrix_kelvins, 1)\n', (26773, 26804), False, 'import numpy\n'), ((26838, 26886), 'numpy.percentile', 'numpy.percentile', (['temperature_matrix_kelvins', '(99)'], {}), '(temperature_matrix_kelvins, 99)\n', (26854, 26886), False, 'import numpy\n'), ((27244, 27275), 'numpy.argmax', 'numpy.argmax', (['target_matrix_s01'], {}), '(target_matrix_s01)\n', (27256, 27275), False, 'import numpy\n'), ((27731, 27778), 'numpy.percentile', 'numpy.percentile', (['temperature_matrix_kelvins', '(1)'], {}), '(temperature_matrix_kelvins, 1)\n', (27747, 27778), False, 'import numpy\n'), ((27812, 27860), 'numpy.percentile', 'numpy.percentile', (['temperature_matrix_kelvins', '(99)'], {}), '(temperature_matrix_kelvins, 99)\n', (27828, 27860), False, 'import numpy\n'), ((28916, 28938), 'numpy.mean', 'numpy.mean', (['new_values'], {}), '(new_values)\n', (28926, 28938), False, 'import numpy\n'), ((29277, 29304), 'numpy.mean', 'numpy.mean', (['(new_values ** 2)'], {}), '(new_values ** 2)\n', (29287, 29304), False, 'import numpy\n'), ((33045, 33081), 'numpy.mean', 'numpy.mean', (['predictor_matrix[..., m]'], {}), '(predictor_matrix[..., m])\n', (33055, 33081), False, 'import numpy\n'), ((33107, 33150), 'numpy.std', 'numpy.std', (['predictor_matrix[..., m]'], {'ddof': '(1)'}), '(predictor_matrix[..., m], ddof=1)\n', (33116, 33150), False, 'import numpy\n'), ((33205, 33241), 'numpy.array', 'numpy.array', (['[this_mean, this_stdev]'], {}), '([this_mean, this_stdev])\n', (33216, 33241), False, 'import numpy\n'), ((36755, 36792), 'numpy.max', 'numpy.max', (['this_target_matrix[i, ...]'], {}), '(this_target_matrix[i, ...])\n', (36764, 36792), False, 'import numpy\n'), ((38281, 38313), 'numpy.max', 'numpy.max', (['target_matrix[i, ...]'], {}), '(target_matrix[i, ...])\n', (38290, 38313), False, 'import numpy\n'), ((38725, 38773), 'numpy.max', 'numpy.max', (['image_dict[TARGET_MATRIX_KEY][i, ...]'], {}), '(image_dict[TARGET_MATRIX_KEY][i, ...])\n', (38734, 38773), False, 'import numpy\n'), ((40413, 40445), 'numpy.round', 'numpy.round', (['num_inputs_by_layer'], {}), '(num_inputs_by_layer)\n', (40424, 40445), False, 'import numpy\n'), ((40547, 40589), 'numpy.array', 'numpy.array', (['[num_output_units]'], {'dtype': 'int'}), '([num_output_units], dtype=int)\n', (40558, 40589), False, 'import numpy\n'), ((42846, 43025), 'keras.layers.MaxPooling2D', 'keras.layers.MaxPooling2D', ([], {'pool_size': '(NUM_POOLING_ROWS, NUM_POOLING_COLUMNS)', 'strides': '(NUM_POOLING_ROWS, NUM_POOLING_COLUMNS)', 'padding': '"""valid"""', 'data_format': '"""channels_last"""'}), "(pool_size=(NUM_POOLING_ROWS, NUM_POOLING_COLUMNS),\n strides=(NUM_POOLING_ROWS, NUM_POOLING_COLUMNS), padding='valid',\n data_format='channels_last')\n", (42871, 43025), False, 'import keras\n'), ((43584, 43776), 'keras.layers.Dense', 'keras.layers.Dense', (['num_outputs_by_dense_layer[k]'], {'activation': 'None', 'use_bias': '(True)', 'kernel_initializer': '"""glorot_uniform"""', 'bias_initializer': '"""zeros"""', 'kernel_regularizer': 'regularizer_object'}), "(num_outputs_by_dense_layer[k], activation=None, use_bias\n =True, kernel_initializer='glorot_uniform', bias_initializer='zeros',\n kernel_regularizer=regularizer_object)\n", (43602, 43776), False, 'import keras\n'), ((43868, 43912), 'keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', ([], {'alpha': 'SLOPE_FOR_RELU'}), '(alpha=SLOPE_FOR_RELU)\n', (43890, 43912), False, 'import keras\n'), ((44823, 44878), 'keras.layers.Dropout', 'keras.layers.Dropout', ([], {'rate': 'DENSE_LAYER_DROPOUT_FRACTION'}), '(rate=DENSE_LAYER_DROPOUT_FRACTION)\n', (44843, 44878), False, 'import keras\n'), ((45175, 45198), 'keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {}), '()\n', (45196, 45198), False, 'import keras\n'), ((57255, 57292), 'numpy.array', 'numpy.array', (['this_norm_dict[this_key]'], {}), '(this_norm_dict[this_key])\n', (57266, 57292), False, 'import numpy\n'), ((61736, 61796), 'numpy.concatenate', 'numpy.concatenate', (['(output_array, this_output_array)'], {'axis': '(0)'}), '((output_array, this_output_array), axis=0)\n', (61753, 61796), False, 'import numpy\n'), ((74754, 74799), 'numpy.array', 'numpy.array', (['[result_dict[ORIGINAL_COST_KEY]]'], {}), '([result_dict[ORIGINAL_COST_KEY]])\n', (74765, 74799), False, 'import numpy\n'), ((76178, 76223), 'numpy.array', 'numpy.array', (['[result_dict[ORIGINAL_COST_KEY]]'], {}), '([result_dict[ORIGINAL_COST_KEY]])\n', (76189, 76223), False, 'import numpy\n'), ((80454, 80465), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (80463, 80465), True, 'from keras import backend as K\n'), ((81321, 81338), 'numpy.mod', 'numpy.mod', (['j', '(100)'], {}), '(j, 100)\n', (81330, 81338), False, 'import numpy\n'), ((82872, 82933), 'keras.backend.mean', 'K.mean', (['((cnn_model_object.layers[-1].output[..., 0] - 1) ** 2)'], {}), '((cnn_model_object.layers[-1].output[..., 0] - 1) ** 2)\n', (82878, 82933), True, 'from keras import backend as K\n'), ((83004, 83059), 'keras.backend.mean', 'K.mean', (['(cnn_model_object.layers[-1].output[..., 0] ** 2)'], {}), '(cnn_model_object.layers[-1].output[..., 0] ** 2)\n', (83010, 83059), True, 'from keras import backend as K\n'), ((88685, 88716), 'numpy.argmax', 'numpy.argmax', (['target_matrix_s01'], {}), '(target_matrix_s01)\n', (88697, 88716), False, 'import numpy\n'), ((91219, 91250), 'numpy.argmax', 'numpy.argmax', (['target_matrix_s01'], {}), '(target_matrix_s01)\n', (91231, 91250), False, 'import numpy\n'), ((94596, 94630), 'keras.backend.std', 'K.std', (['list_of_gradient_tensors[i]'], {}), '(list_of_gradient_tensors[i])\n', (94601, 94630), True, 'from keras import backend as K\n'), ((94632, 94643), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (94641, 94643), True, 'from keras import backend as K\n'), ((95773, 95834), 'keras.backend.mean', 'K.mean', (['((cnn_model_object.layers[-1].output[..., 0] - 1) ** 2)'], {}), '((cnn_model_object.layers[-1].output[..., 0] - 1) ** 2)\n', (95779, 95834), True, 'from keras import backend as K\n'), ((95905, 95960), 'keras.backend.mean', 'K.mean', (['(cnn_model_object.layers[-1].output[..., 0] ** 2)'], {}), '(cnn_model_object.layers[-1].output[..., 0] ** 2)\n', (95911, 95960), True, 'from keras import backend as K\n'), ((97639, 97670), 'numpy.diff', 'numpy.diff', (['x_coords_unique[:2]'], {}), '(x_coords_unique[:2])\n', (97649, 97670), False, 'import numpy\n'), ((97859, 97890), 'numpy.diff', 'numpy.diff', (['y_coords_unique[:2]'], {}), '(y_coords_unique[:2])\n', (97869, 97890), False, 'import numpy\n'), ((98426, 98457), 'numpy.min', 'numpy.min', (['these_contour_levels'], {}), '(these_contour_levels)\n', (98435, 98457), False, 'import numpy\n'), ((98472, 98503), 'numpy.max', 'numpy.max', (['these_contour_levels'], {}), '(these_contour_levels)\n', (98481, 98503), False, 'import numpy\n'), ((98800, 98831), 'numpy.min', 'numpy.min', (['these_contour_levels'], {}), '(these_contour_levels)\n', (98809, 98831), False, 'import numpy\n'), ((98846, 98877), 'numpy.max', 'numpy.max', (['these_contour_levels'], {}), '(these_contour_levels)\n', (98855, 98877), False, 'import numpy\n'), ((105443, 105474), 'numpy.argmax', 'numpy.argmax', (['target_matrix_s01'], {}), '(target_matrix_s01)\n', (105455, 105474), False, 'import numpy\n'), ((107949, 107980), 'numpy.argmax', 'numpy.argmax', (['target_matrix_s01'], {}), '(target_matrix_s01)\n', (107961, 107980), False, 'import numpy\n'), ((113340, 113388), 'numpy.round', 'numpy.round', (['((NUM_SMOOTHING_FILTER_ROWS - 1) / 2)'], {}), '((NUM_SMOOTHING_FILTER_ROWS - 1) / 2)\n', (113351, 113388), False, 'import numpy\n'), ((113453, 113504), 'numpy.round', 'numpy.round', (['((NUM_SMOOTHING_FILTER_COLUMNS - 1) / 2)'], {}), '((NUM_SMOOTHING_FILTER_COLUMNS - 1) / 2)\n', (113464, 113504), False, 'import numpy\n'), ((116670, 116702), 'numpy.zeros', 'numpy.zeros', (['current_num_filters'], {}), '(current_num_filters)\n', (116681, 116702), False, 'import numpy\n'), ((117872, 117895), 'keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {}), '()\n', (117893, 117895), False, 'import keras\n'), ((132889, 132920), 'numpy.argmax', 'numpy.argmax', (['target_matrix_s01'], {}), '(target_matrix_s01)\n', (132901, 132920), False, 'import numpy\n'), ((138986, 139013), 'numpy.transpose', 'numpy.transpose', (['eof_matrix'], {}), '(eof_matrix)\n', (139001, 139013), False, 'import numpy\n'), ((143839, 143876), 'numpy.array', 'numpy.array', (['novel_indices'], {'dtype': 'int'}), '(novel_indices, dtype=int)\n', (143850, 143876), False, 'import numpy\n'), ((143920, 144024), 'numpy.concatenate', 'numpy.concatenate', (['(baseline_feature_matrix, test_feature_matrix[novel_indices_numpy, ...])'], {'axis': '(0)'}), '((baseline_feature_matrix, test_feature_matrix[\n novel_indices_numpy, ...]), axis=0)\n', (143937, 144024), False, 'import numpy\n'), ((144110, 144176), 'numpy.delete', 'numpy.delete', (['test_feature_matrix'], {'obj': 'novel_indices_numpy', 'axis': '(0)'}), '(test_feature_matrix, obj=novel_indices_numpy, axis=0)\n', (144122, 144176), False, 'import numpy\n'), ((144900, 144985), 'numpy.linalg.norm', 'numpy.linalg.norm', (['(test_feature_matrix_svd[i, ...] - test_feature_matrix[i, ...])'], {}), '(test_feature_matrix_svd[i, ...] - test_feature_matrix[i, ...]\n )\n', (144917, 144985), False, 'import numpy\n'), ((145631, 145710), 'numpy.concatenate', 'numpy.concatenate', (['(novel_image_matrix_upconv, new_image_matrix_upconv)'], {'axis': '(0)'}), '((novel_image_matrix_upconv, new_image_matrix_upconv), axis=0)\n', (145648, 145710), False, 'import numpy\n'), ((145772, 145863), 'numpy.concatenate', 'numpy.concatenate', (['(novel_image_matrix_upconv_svd, new_image_matrix_upconv_svd)'], {'axis': '(0)'}), '((novel_image_matrix_upconv_svd,\n new_image_matrix_upconv_svd), axis=0)\n', (145789, 145863), False, 'import numpy\n'), ((152975, 153011), 'numpy.max', 'numpy.max', (['target_matrix_s01[i, ...]'], {}), '(target_matrix_s01[i, ...])\n', (152984, 153011), False, 'import numpy\n'), ((41801, 42147), 'keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': 'current_num_filters', 'kernel_size': '(NUM_CONV_FILTER_ROWS, NUM_CONV_FILTER_COLUMNS)', 'strides': '(1, 1)', 'padding': '"""valid"""', 'data_format': '"""channels_last"""', 'dilation_rate': '(1, 1)', 'activation': 'None', 'use_bias': '(True)', 'kernel_initializer': '"""glorot_uniform"""', 'bias_initializer': '"""zeros"""', 'kernel_regularizer': 'regularizer_object'}), "(filters=current_num_filters, kernel_size=(\n NUM_CONV_FILTER_ROWS, NUM_CONV_FILTER_COLUMNS), strides=(1, 1), padding\n ='valid', data_format='channels_last', dilation_rate=(1, 1), activation\n =None, use_bias=True, kernel_initializer='glorot_uniform',\n bias_initializer='zeros', kernel_regularizer=regularizer_object)\n", (41820, 42147), False, 'import keras\n'), ((42300, 42344), 'keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', ([], {'alpha': 'SLOPE_FOR_RELU'}), '(alpha=SLOPE_FOR_RELU)\n', (42322, 42344), False, 'import keras\n'), ((44046, 44101), 'keras.layers.Dropout', 'keras.layers.Dropout', ([], {'rate': 'DENSE_LAYER_DROPOUT_FRACTION'}), '(rate=DENSE_LAYER_DROPOUT_FRACTION)\n', (44066, 44101), False, 'import keras\n'), ((44226, 44291), 'keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {'axis': '(-1)', 'center': '(True)', 'scale': '(True)'}), '(axis=-1, center=True, scale=True)\n', (44257, 44291), False, 'import keras\n'), ((47941, 48035), 'numpy.concatenate', 'numpy.concatenate', (['(full_predictor_matrix, this_image_dict[PREDICTOR_MATRIX_KEY])'], {'axis': '(0)'}), '((full_predictor_matrix, this_image_dict[\n PREDICTOR_MATRIX_KEY]), axis=0)\n', (47958, 48035), False, 'import numpy\n'), ((48131, 48218), 'numpy.concatenate', 'numpy.concatenate', (['(full_target_matrix, this_image_dict[TARGET_MATRIX_KEY])'], {'axis': '(0)'}), '((full_target_matrix, this_image_dict[TARGET_MATRIX_KEY]),\n axis=0)\n', (48148, 48218), False, 'import numpy\n'), ((49111, 49136), 'numpy.mean', 'numpy.mean', (['target_values'], {}), '(target_values)\n', (49121, 49136), False, 'import numpy\n'), ((70499, 70576), 'numpy.random.permutation', 'numpy.random.permutation', (['this_predictor_matrix[i, ..., this_predictor_index]'], {}), '(this_predictor_matrix[i, ..., this_predictor_index])\n', (70523, 70576), False, 'import numpy\n'), ((80399, 80439), 'keras.backend.mean', 'K.mean', (['(list_of_gradient_tensors[i] ** 2)'], {}), '(list_of_gradient_tensors[i] ** 2)\n', (80405, 80439), True, 'from keras import backend as K\n'), ((80557, 80575), 'keras.backend.learning_phase', 'K.learning_phase', ([], {}), '()\n', (80573, 80575), True, 'from keras import backend as K\n'), ((94735, 94753), 'keras.backend.learning_phase', 'K.learning_phase', ([], {}), '()\n', (94751, 94753), True, 'from keras import backend as K\n'), ((114504, 114914), 'keras.layers.Conv2DTranspose', 'keras.layers.Conv2DTranspose', ([], {'filters': 'current_num_filters', 'kernel_size': '(NUM_CONV_FILTER_ROWS, NUM_CONV_FILTER_COLUMNS)', 'strides': '(this_upsampling_factor, this_upsampling_factor)', 'padding': 'this_padding_arg', 'data_format': '"""channels_last"""', 'dilation_rate': '(1, 1)', 'activation': 'None', 'use_bias': '(True)', 'kernel_initializer': '"""glorot_uniform"""', 'bias_initializer': '"""zeros"""', 'kernel_regularizer': 'regularizer_object'}), "(filters=current_num_filters, kernel_size=(\n NUM_CONV_FILTER_ROWS, NUM_CONV_FILTER_COLUMNS), strides=(\n this_upsampling_factor, this_upsampling_factor), padding=\n this_padding_arg, data_format='channels_last', dilation_rate=(1, 1),\n activation=None, use_bias=True, kernel_initializer='glorot_uniform',\n bias_initializer='zeros', kernel_regularizer=regularizer_object)\n", (114532, 114914), False, 'import keras\n'), ((115646, 115991), 'keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': 'current_num_filters', 'kernel_size': '(NUM_CONV_FILTER_ROWS, NUM_CONV_FILTER_COLUMNS)', 'strides': '(1, 1)', 'padding': '"""same"""', 'data_format': '"""channels_last"""', 'dilation_rate': '(1, 1)', 'activation': 'None', 'use_bias': '(True)', 'kernel_initializer': '"""glorot_uniform"""', 'bias_initializer': '"""zeros"""', 'kernel_regularizer': 'regularizer_object'}), "(filters=current_num_filters, kernel_size=(\n NUM_CONV_FILTER_ROWS, NUM_CONV_FILTER_COLUMNS), strides=(1, 1), padding\n ='same', data_format='channels_last', dilation_rate=(1, 1), activation=\n None, use_bias=True, kernel_initializer='glorot_uniform',\n bias_initializer='zeros', kernel_regularizer=regularizer_object)\n", (115665, 115991), False, 'import keras\n'), ((116731, 117154), 'keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': 'current_num_filters', 'kernel_size': '(NUM_SMOOTHING_FILTER_ROWS, NUM_SMOOTHING_FILTER_COLUMNS)', 'strides': '(1, 1)', 'padding': '"""same"""', 'data_format': '"""channels_last"""', 'dilation_rate': '(1, 1)', 'activation': 'None', 'use_bias': '(True)', 'kernel_initializer': '"""glorot_uniform"""', 'bias_initializer': '"""zeros"""', 'kernel_regularizer': 'regularizer_object', 'trainable': '(False)', 'weights': '[this_weight_matrix, this_bias_vector]'}), "(filters=current_num_filters, kernel_size=(\n NUM_SMOOTHING_FILTER_ROWS, NUM_SMOOTHING_FILTER_COLUMNS), strides=(1, 1\n ), padding='same', data_format='channels_last', dilation_rate=(1, 1),\n activation=None, use_bias=True, kernel_initializer='glorot_uniform',\n bias_initializer='zeros', kernel_regularizer=regularizer_object,\n trainable=False, weights=[this_weight_matrix, this_bias_vector])\n", (116750, 117154), False, 'import keras\n'), ((117398, 117442), 'keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', ([], {'alpha': 'SLOPE_FOR_RELU'}), '(alpha=SLOPE_FOR_RELU)\n', (117420, 117442), False, 'import keras\n'), ((117575, 117640), 'keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {'axis': '(-1)', 'center': '(True)', 'scale': '(True)'}), '(axis=-1, center=True, scale=True)\n', (117606, 117640), False, 'import keras\n'), ((122433, 122524), 'numpy.concatenate', 'numpy.concatenate', (['(full_target_matrix, this_image_dict[PREDICTOR_MATRIX_KEY])'], {'axis': '(0)'}), '((full_target_matrix, this_image_dict[PREDICTOR_MATRIX_KEY\n ]), axis=0)\n', (122450, 122524), False, 'import numpy\n'), ((138698, 138773), 'numpy.where', 'numpy.where', (['(cumulative_explained_variances >= fraction_of_variance_to_keep)'], {}), '(cumulative_explained_variances >= fraction_of_variance_to_keep)\n', (138709, 138773), False, 'import numpy\n'), ((42493, 42547), 'keras.layers.Dropout', 'keras.layers.Dropout', ([], {'rate': 'CONV_LAYER_DROPOUT_FRACTION'}), '(rate=CONV_LAYER_DROPOUT_FRACTION)\n', (42513, 42547), False, 'import keras\n'), ((42688, 42753), 'keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {'axis': '(-1)', 'center': '(True)', 'scale': '(True)'}), '(axis=-1, center=True, scale=True)\n', (42719, 42753), False, 'import keras\n'), ((66260, 66294), 'numpy.log2', 'numpy.log2', (['forecast_probabilities'], {}), '(forecast_probabilities)\n', (66270, 66294), False, 'import numpy\n'), ((66327, 66365), 'numpy.log2', 'numpy.log2', (['(1 - forecast_probabilities)'], {}), '(1 - forecast_probabilities)\n', (66337, 66365), False, 'import numpy\n'), ((114259, 114295), 'numpy.round', 'numpy.round', (['(current_num_filters / 2)'], {}), '(current_num_filters / 2)\n', (114270, 114295), False, 'import numpy\n'), ((116173, 116244), 'keras.layers.ZeroPadding2D', 'keras.layers.ZeroPadding2D', ([], {'padding': '(1, 1)', 'data_format': '"""channels_last"""'}), "(padding=(1, 1), data_format='channels_last')\n", (116199, 116244), False, 'import keras\n'), ((115146, 115289), 'keras.layers.UpSampling2D', 'keras.layers.UpSampling2D', ([], {'size': '(this_upsampling_factor, this_upsampling_factor)', 'data_format': '"""channels_last"""', 'interpolation': '"""nearest"""'}), "(size=(this_upsampling_factor,\n this_upsampling_factor), data_format='channels_last', interpolation=\n 'nearest')\n", (115171, 115289), False, 'import keras\n'), ((115424, 115537), 'keras.layers.UpSampling2D', 'keras.layers.UpSampling2D', ([], {'size': '(this_upsampling_factor, this_upsampling_factor)', 'data_format': '"""channels_last"""'}), "(size=(this_upsampling_factor,\n this_upsampling_factor), data_format='channels_last')\n", (115449, 115537), False, 'import keras\n')]
|
# libraries
import numpy as np
from bio_embeddings.embed import ProtTransT5BFDEmbedder
import pandas as pd
embedder = ProtTransT5BFDEmbedder()
ds = pd.read_csv('Sequences_Predict.csv')
sequences_Example = list(ds["Sequence"])
num_seq = len(sequences_Example)
i = 0
length = 1000
while i < num_seq:
print("Doing", i, num_seq)
start = i
end = i + length
sequences = sequences_Example[start:end]
embeddings = []
for seq in sequences:
embeddings.append(np.mean(np.asarray(embedder.embed(seq)), axis=0))
s_no = start / length
filename = 'Embeddings/' + 'T5_' + str(s_no) + '.npz'
embeddings = np.asarray(embeddings)
# print(embeddings.shape)
np.savez_compressed(filename, embeddings)
i += length
|
[
"pandas.read_csv",
"numpy.asarray",
"numpy.savez_compressed",
"bio_embeddings.embed.ProtTransT5BFDEmbedder"
] |
[((120, 144), 'bio_embeddings.embed.ProtTransT5BFDEmbedder', 'ProtTransT5BFDEmbedder', ([], {}), '()\n', (142, 144), False, 'from bio_embeddings.embed import ProtTransT5BFDEmbedder\n'), ((151, 187), 'pandas.read_csv', 'pd.read_csv', (['"""Sequences_Predict.csv"""'], {}), "('Sequences_Predict.csv')\n", (162, 187), True, 'import pandas as pd\n'), ((608, 630), 'numpy.asarray', 'np.asarray', (['embeddings'], {}), '(embeddings)\n', (618, 630), True, 'import numpy as np\n'), ((659, 700), 'numpy.savez_compressed', 'np.savez_compressed', (['filename', 'embeddings'], {}), '(filename, embeddings)\n', (678, 700), True, 'import numpy as np\n')]
|
from math import isclose
import numpy as np
from scipy.spatial.transform.rotation import Rotation
from scipy.spatial.distance import cityblock
MIN_OVERLAPPING_BEACONS = 12
scanner_positions = []
class Beacon:
def __init__(self, x: int, y: int, z: int):
self.pos = np.array([x, y, z])
class Scanner:
def __init__(self, index: int):
self.index = index
self.beacons = None
self.differences = None
def set_beacons(self, beacons: list[Beacon]):
self.beacons = beacons
self.differences: list[list[float]] = []
for i in range(len(beacons)):
row = []
for j in range(i):
difference = np.linalg.norm(np.array(beacons[i].pos) - np.array(beacons[j].pos))
row.append(difference)
self.differences.append(row)
def get_max_overlaps(scanner1: Scanner, scanner2: Scanner):
num_matching_differences = 0
indices_1 = set()
indices_2 = set()
for i1 in range(len(scanner1.beacons)): # check if there are at least 12 euclidean distance matches
for j1 in range(i1):
for i2 in range(len(scanner2.beacons)):
for j2 in range(i2):
if isclose(scanner1.differences[i1][j1], scanner2.differences[i2][j2]):
indices_1.add(i1)
indices_1.add(j1)
indices_2.add(i2)
indices_2.add(j2)
num_matching_differences += 1
return num_matching_differences, indices_1, indices_2
def is_close(one: list[float], other: list[float]):
value = True
for i in range(len(one)):
if not isclose(one[i], other[i]):
value = False
return value
def get_possible_beacon_rotations(beacons: list[Beacon]):
rotations = []
for x_degrees in range(0, 360, 90):
for y_degrees in range(0, 360, 90):
for z_degrees in range(0, 360, 90):
rotation = []
for beacon in beacons:
rotation.append(rotate_point(beacon.pos, x_degrees, y_degrees, z_degrees))
rotations.append(rotation)
return rotations
def rotate_point(point: np.array, x_degrees: int, y_degrees: int, z_degrees: int):
x_radians = np.radians(x_degrees)
rotation_axis = np.array([1, 0, 0])
rotation_vector = x_radians * rotation_axis
rotation = Rotation.from_rotvec(rotation_vector)
rotated_point = rotation.apply(point)
y_radians = np.radians(y_degrees)
rotation_axis = np.array([0, 1, 0])
rotation_vector = y_radians * rotation_axis
rotation = Rotation.from_rotvec(rotation_vector)
rotated_point = rotation.apply(rotated_point)
z_radians = np.radians(z_degrees)
rotation_axis = np.array([0, 0, 1])
rotation_vector = z_radians * rotation_axis
rotation = Rotation.from_rotvec(rotation_vector)
rotated_point = rotation.apply(rotated_point)
return np.array(rotated_point)
def get_position_pair(positions_1, positions_2, diff):
for position_1 in positions_1:
for position_2 in positions_2:
if isclose(np.linalg.norm(position_1 - position_2), diff):
return position_1, position_2
def get_new_beacon_positions(unified_scanner, indices_1, new_scanner):
global scanner_positions
relevant_unified_beacons = \
[beacon for idx, beacon in enumerate(unified_scanner.beacons) if idx in indices_1]
possible_rotations = get_possible_beacon_rotations(new_scanner.beacons)
for possible_rotation in possible_rotations:
diffs = {}
for relevant_unified_beacon in relevant_unified_beacons:
for beacon_pos in possible_rotation:
diff = np.linalg.norm(relevant_unified_beacon.pos - beacon_pos)
key_found = False
for key in diffs.keys():
if isclose(diff, key):
diffs[key] += 1
key_found = True
if not key_found:
diffs[diff] = 1
for diff in diffs.keys():
matching_diff_found = False
if not matching_diff_found and diffs[diff] >= 12:
matching_diff_found = True
unified_positions = [beacon.pos for beacon in relevant_unified_beacons]
position_pair = get_position_pair(unified_positions, possible_rotation, diff)
translation_vector = position_pair[0] - position_pair[1]
scanner_positions.append(np.array([translation_vector]))
for i in range(len(possible_rotation)):
possible_rotation[i] = possible_rotation[i] + translation_vector
new_beacon_positions = []
for beacon_pos in possible_rotation:
already_exists = False
for relevant_unified_beacon in relevant_unified_beacons:
if is_close(beacon_pos, relevant_unified_beacon.pos):
already_exists = True
if not already_exists:
new_beacon_positions.append(beacon_pos)
return new_beacon_positions
def main():
global scanner_positions
lines = open('input.txt', 'r').readlines()
scanners = []
scanner = None
beacons = []
same_distance_threshold = (MIN_OVERLAPPING_BEACONS * (MIN_OVERLAPPING_BEACONS - 1)) / 2
for line in lines:
if 'scanner' in line:
index = int(line.strip()[len('--- scanner '):-len(' ---')])
if scanner is not None:
scanner.set_beacons(beacons)
scanners.append(scanner)
scanner = Scanner(index)
beacons = []
else:
comma_split = line.strip().split(',')
if len(comma_split) == 3:
x = int(comma_split[0])
y = int(comma_split[1])
z = int(comma_split[2])
beacon = Beacon(x, y, z)
beacons.append(beacon)
scanner.set_beacons(beacons)
scanners.append(scanner)
unified_scanner = scanners[0]
scanner_positions.append(np.array([0, 0, 0]))
scanners.remove(scanners[0])
while len(scanners) > 0:
print(f'{len(scanners)} scanners yet to be unified')
step_completed = False
for j in range(len(scanners)):
if j < len(scanners):
if not step_completed:
num_overlapping_beacons, indices_1, indices_2 = get_max_overlaps(unified_scanner, scanners[j])
if num_overlapping_beacons >= same_distance_threshold / 2:
new_beacon_positions = get_new_beacon_positions(unified_scanner, indices_1, scanners[j])
if new_beacon_positions is not None:
unified_beacons = unified_scanner.beacons
for pos in new_beacon_positions:
unified_beacons.append(Beacon(pos[0], pos[1], pos[2]))
unified_scanner.set_beacons(unified_beacons)
scanners.remove(scanners[j])
step_completed = True
print(f'Unified scanner and scanner at index {j} overlap - added to unified map')
max_distance = 0
for i in range(len(scanner_positions)):
for j in range(len(scanner_positions)):
distance = cityblock(scanner_positions[i], scanner_positions[j])
if distance > max_distance:
max_distance = distance
print('Max distance between scanners:', round(max_distance))
if __name__ == '__main__':
main()
|
[
"numpy.radians",
"scipy.spatial.distance.cityblock",
"numpy.array",
"math.isclose",
"numpy.linalg.norm",
"scipy.spatial.transform.rotation.Rotation.from_rotvec"
] |
[((2299, 2320), 'numpy.radians', 'np.radians', (['x_degrees'], {}), '(x_degrees)\n', (2309, 2320), True, 'import numpy as np\n'), ((2341, 2360), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (2349, 2360), True, 'import numpy as np\n'), ((2424, 2461), 'scipy.spatial.transform.rotation.Rotation.from_rotvec', 'Rotation.from_rotvec', (['rotation_vector'], {}), '(rotation_vector)\n', (2444, 2461), False, 'from scipy.spatial.transform.rotation import Rotation\n'), ((2521, 2542), 'numpy.radians', 'np.radians', (['y_degrees'], {}), '(y_degrees)\n', (2531, 2542), True, 'import numpy as np\n'), ((2563, 2582), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (2571, 2582), True, 'import numpy as np\n'), ((2646, 2683), 'scipy.spatial.transform.rotation.Rotation.from_rotvec', 'Rotation.from_rotvec', (['rotation_vector'], {}), '(rotation_vector)\n', (2666, 2683), False, 'from scipy.spatial.transform.rotation import Rotation\n'), ((2751, 2772), 'numpy.radians', 'np.radians', (['z_degrees'], {}), '(z_degrees)\n', (2761, 2772), True, 'import numpy as np\n'), ((2793, 2812), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (2801, 2812), True, 'import numpy as np\n'), ((2876, 2913), 'scipy.spatial.transform.rotation.Rotation.from_rotvec', 'Rotation.from_rotvec', (['rotation_vector'], {}), '(rotation_vector)\n', (2896, 2913), False, 'from scipy.spatial.transform.rotation import Rotation\n'), ((2976, 2999), 'numpy.array', 'np.array', (['rotated_point'], {}), '(rotated_point)\n', (2984, 2999), True, 'import numpy as np\n'), ((281, 300), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (289, 300), True, 'import numpy as np\n'), ((6195, 6214), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (6203, 6214), True, 'import numpy as np\n'), ((1689, 1714), 'math.isclose', 'isclose', (['one[i]', 'other[i]'], {}), '(one[i], other[i])\n', (1696, 1714), False, 'from math import isclose\n'), ((7494, 7547), 'scipy.spatial.distance.cityblock', 'cityblock', (['scanner_positions[i]', 'scanner_positions[j]'], {}), '(scanner_positions[i], scanner_positions[j])\n', (7503, 7547), False, 'from scipy.spatial.distance import cityblock\n'), ((3154, 3193), 'numpy.linalg.norm', 'np.linalg.norm', (['(position_1 - position_2)'], {}), '(position_1 - position_2)\n', (3168, 3193), True, 'import numpy as np\n'), ((3755, 3811), 'numpy.linalg.norm', 'np.linalg.norm', (['(relevant_unified_beacon.pos - beacon_pos)'], {}), '(relevant_unified_beacon.pos - beacon_pos)\n', (3769, 3811), True, 'import numpy as np\n'), ((1224, 1291), 'math.isclose', 'isclose', (['scanner1.differences[i1][j1]', 'scanner2.differences[i2][j2]'], {}), '(scanner1.differences[i1][j1], scanner2.differences[i2][j2])\n', (1231, 1291), False, 'from math import isclose\n'), ((3910, 3928), 'math.isclose', 'isclose', (['diff', 'key'], {}), '(diff, key)\n', (3917, 3928), False, 'from math import isclose\n'), ((4556, 4586), 'numpy.array', 'np.array', (['[translation_vector]'], {}), '([translation_vector])\n', (4564, 4586), True, 'import numpy as np\n'), ((706, 730), 'numpy.array', 'np.array', (['beacons[i].pos'], {}), '(beacons[i].pos)\n', (714, 730), True, 'import numpy as np\n'), ((733, 757), 'numpy.array', 'np.array', (['beacons[j].pos'], {}), '(beacons[j].pos)\n', (741, 757), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
__author__ = 'Jinkey'
import time
import numpy as np
import h5py
import matplotlib.pyplot as plt
import scipy
from PIL import Image
from scipy import ndimage
from dnn_app_utils import *
from keras.models import Sequential, load_model
from keras.layers import Dense, Dropout, Activation
np.random.seed(1)
class DeepClassifier:
def __init__(self):
self.train_x, self.train_y, self.test_x, self.test_y, self.classes = load_data()
self.sample_amount = self.train_x.shape[0]
self.test_amount = self.test_x.shape[0]
self.model = Sequential()
self.parameters = {}
self.is_trained = False
def show_data_info(self):
print ("Number of training examples: " + str(self.sample_amount))
print ("Number of testing examples: " + str(self.test_amount))
print ("Each image is of size: (" + str(self.train_x.shape[1]) + ", " + str(self.train_x.shape[1]) + ", 3)")
print ("train_x_orig shape: " + str(self.train_x.shape))
print ("train_y shape: " + str(self.train_y.shape))
print ("test_x_orig shape: " + str(self.test_x.shape))
print ("test_y shape: " + str(self.test_y.shape))
return self
def flattern_x(self):
self.train_x = self.train_x.reshape(self.sample_amount, -1).T
self.test_x = self.test_x.reshape(self.test_amount, -1).T
assert self.train_x.shape == (12288, self.sample_amount)
assert self.test_x.shape == (12288, self.test_amount)
return self
def standardize_x(self):
self.train_x = self.train_x / 255.0
self.test_x = self.test_x / 255.0
return self
def L_layer_model(self, learning_rate=0.0075, num_iterations=3000): # lr was 0.009
np.random.seed(1)
costs = [] # keep track of cost
# Parameters initialization.
### START CODE HERE ###
layers_dims = [12288, 20, 7, 5, 1]
parameters = initialize_parameters_deep(layers_dims)
### END CODE HERE ###
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: [LINEAR -> RELU]*(L-1) -> LINEAR -> SIGMOID.
### START CODE HERE ### (≈ 1 line of code)
AL, caches = L_model_forward(self.train_x, parameters)
### END CODE HERE ###
# Compute cost.
### START CODE HERE ### (≈ 1 line of code)
cost = compute_cost(AL, self.train_y)
### END CODE HERE ###
# Backward propagation.
### START CODE HERE ### (≈ 1 line of code)
grads = L_model_backward(AL, self.train_y, caches)
### END CODE HERE ###
# Update parameters.
### START CODE HERE ### (≈ 1 line of code)
self.parameters = update_parameters(parameters, grads, learning_rate)
### END CODE HERE ###
# Print the cost every 100 training example
if i % 100 == 0:
costs.append(cost)
print ("Cost after iteration %i: %f" % (i, cost))
self.is_trained = True
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return self
def save_model(self):
f = h5py.File("iscat-deep.h5", "w")
f.create_dataset("layers", data=5)
for key, value in self.parameters.items():
f.create_dataset(key, data=value)
def load_model(self):
f = h5py.File("iscat-deep.h5", "r")
number_of_layers = np.squeeze(f["layers"])
for i in range(1, number_of_layers):
self.parameters["W"+str(i)] = np.array(f["W"+str(i)])
self.parameters["b"+str(i)] = np.array(f["b"+str(i)])
self.is_trained = True
return self
# 课程作业也是只是呈现到多层网络的前向和反向传播,前馈和反馈的函数都封装在dnn_app_utils 里面,所以这里我直接用 keras 实现课程作业要求的5层神经网络
def L_layer_model_with_keras(self):
model = Sequential()
model.add(Dense(output_dim=20, activation="relu", input_dim=12288))
model.add(Dense(output_dim=7, activation="relu", input_dim=13))
model.add(Dense(output_dim=5, activation="relu", input_dim=7))
model.add(Dense(output_dim=1, activation="sigmoid", input_dim=5))
model.compile(loss="binary_crossentropy", optimizer='sgd', metrics=["accuracy"])
model.fit(self.train_x.T, self.train_y.T, nb_epoch=5000)
model.save("iscat-keras.h5")
score = model.evaluate(self.test_x.T, self.test_y.T)
print(score)
return self
def load_keras_model(self):
self.model = load_model('iscat-keras.h5')
return self
def predict_with_keras(self, image_path):
image = np.array(ndimage.imread(image_path, flatten=False))
image_flatten = scipy.misc.imresize(image, size=(64, 64)).reshape((64*64*3, 1))
result = np.squeeze(self.model.predict(image_flatten.T))
print("这%s一只猫" % "是" if result==1 else "不是")
def predict_standard(self, image_path):
print("==============在测试集的准确率=================")
predict(self.test_x, self.test_y, self.parameters)
print("==============预测一张图片=================")
image = np.array(ndimage.imread(image_path, flatten=False))
my_image = scipy.misc.imresize(image, size=(64, 64)).reshape((64 * 64 * 3, 1))
my_predicted_image = predict(X=my_image, y=[1], parameters=self.parameters)
print("这%s一只猫" % "是" if my_predicted_image == 1 else "不是")
plt.imshow(image)
if __name__ == '__main__':
# 使用作业方法训练模型
# DeepClassifier().flattern_x().standardize_x().L_layer_model(learning_rate=0.0075, num_iterations=3000).save_model()
# 使用 作业的 模型预测
DeepClassifier().load_model().flattern_x().standardize_x().predict_standard("images/cat.jpg")
# 使用 Keras 训练模型
# DeepClassifier().flattern_x().standardize_x().L_layer_model_with_keras()
# 使用 Keras 模型预测
# DeepClassifier().load_model().predict("images/cat.jpg")
|
[
"keras.models.load_model",
"h5py.File",
"numpy.random.seed",
"matplotlib.pyplot.show",
"keras.models.Sequential",
"matplotlib.pyplot.imshow",
"keras.layers.Dense",
"scipy.misc.imresize",
"numpy.squeeze",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"scipy.ndimage.imread"
] |
[((312, 329), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (326, 329), True, 'import numpy as np\n'), ((587, 599), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (597, 599), False, 'from keras.models import Sequential, load_model\n'), ((1764, 1781), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (1778, 1781), True, 'import numpy as np\n'), ((3192, 3210), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""cost"""'], {}), "('cost')\n", (3202, 3210), True, 'import matplotlib.pyplot as plt\n'), ((3219, 3254), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""iterations (per tens)"""'], {}), "('iterations (per tens)')\n", (3229, 3254), True, 'import matplotlib.pyplot as plt\n'), ((3321, 3331), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3329, 3331), True, 'import matplotlib.pyplot as plt\n'), ((3392, 3423), 'h5py.File', 'h5py.File', (['"""iscat-deep.h5"""', '"""w"""'], {}), "('iscat-deep.h5', 'w')\n", (3401, 3423), False, 'import h5py\n'), ((3603, 3634), 'h5py.File', 'h5py.File', (['"""iscat-deep.h5"""', '"""r"""'], {}), "('iscat-deep.h5', 'r')\n", (3612, 3634), False, 'import h5py\n'), ((3662, 3685), 'numpy.squeeze', 'np.squeeze', (["f['layers']"], {}), "(f['layers'])\n", (3672, 3685), True, 'import numpy as np\n'), ((4062, 4074), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (4072, 4074), False, 'from keras.models import Sequential, load_model\n'), ((4717, 4745), 'keras.models.load_model', 'load_model', (['"""iscat-keras.h5"""'], {}), "('iscat-keras.h5')\n", (4727, 4745), False, 'from keras.models import Sequential, load_model\n'), ((5617, 5634), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (5627, 5634), True, 'import matplotlib.pyplot as plt\n'), ((3165, 3182), 'numpy.squeeze', 'np.squeeze', (['costs'], {}), '(costs)\n', (3175, 3182), True, 'import numpy as np\n'), ((4093, 4149), 'keras.layers.Dense', 'Dense', ([], {'output_dim': '(20)', 'activation': '"""relu"""', 'input_dim': '(12288)'}), "(output_dim=20, activation='relu', input_dim=12288)\n", (4098, 4149), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((4169, 4221), 'keras.layers.Dense', 'Dense', ([], {'output_dim': '(7)', 'activation': '"""relu"""', 'input_dim': '(13)'}), "(output_dim=7, activation='relu', input_dim=13)\n", (4174, 4221), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((4241, 4292), 'keras.layers.Dense', 'Dense', ([], {'output_dim': '(5)', 'activation': '"""relu"""', 'input_dim': '(7)'}), "(output_dim=5, activation='relu', input_dim=7)\n", (4246, 4292), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((4312, 4366), 'keras.layers.Dense', 'Dense', ([], {'output_dim': '(1)', 'activation': '"""sigmoid"""', 'input_dim': '(5)'}), "(output_dim=1, activation='sigmoid', input_dim=5)\n", (4317, 4366), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((4838, 4879), 'scipy.ndimage.imread', 'ndimage.imread', (['image_path'], {'flatten': '(False)'}), '(image_path, flatten=False)\n', (4852, 4879), False, 'from scipy import ndimage\n'), ((5328, 5369), 'scipy.ndimage.imread', 'ndimage.imread', (['image_path'], {'flatten': '(False)'}), '(image_path, flatten=False)\n', (5342, 5369), False, 'from scipy import ndimage\n'), ((4905, 4946), 'scipy.misc.imresize', 'scipy.misc.imresize', (['image'], {'size': '(64, 64)'}), '(image, size=(64, 64))\n', (4924, 4946), False, 'import scipy\n'), ((5390, 5431), 'scipy.misc.imresize', 'scipy.misc.imresize', (['image'], {'size': '(64, 64)'}), '(image, size=(64, 64))\n', (5409, 5431), False, 'import scipy\n')]
|
"""
********************************************************************************
make figures
********************************************************************************
"""
import numpy as np
from scipy.interpolate import griddata
import matplotlib.pyplot as plt
def plt_sol0(XY, u, width, height, cmap):
lb = XY.min(0)
ub = XY.max(0)
nx = 200
x = np.linspace(lb[0], ub[0], nx)
y = np.linspace(lb[1], ub[1], nx)
X, Y = np.meshgrid(x, y)
U = griddata(XY, u.flatten(), (X, Y), method = "linear")
plt.figure(figsize = (width, height))
plt.pcolor(X, Y, U, cmap = cmap, shading = "auto")
plt.colorbar()
plt.xlabel("x")
plt.ylabel("y")
plt.title(r"$ \phi $")
plt.show()
def plt_sol1(XY, u, width, height, cmap, v0, v1, vt):
lb = XY.min(0)
ub = XY.max(0)
nx = 200
x = np.linspace(lb[0], ub[0], nx)
y = np.linspace(lb[1], ub[1], nx)
X, Y = np.meshgrid(x, y)
U = griddata(XY, u.flatten(), (X, Y), method = "cubic")
plt.figure(figsize = (width, height))
plt.pcolor(X, Y, U, cmap = cmap, shading = "auto", vmin = v0, vmax = v1)
plt.colorbar(ticks = np.arange(v0, v1 + .001, vt))
plt.xlabel("x")
plt.ylabel("y")
plt.title(r"$ \phi $")
plt.show()
def plt_diff(x, u1, u2, height, width, cmap, v0, v1, vt):
lb = x.min(0)
ub = x.max(0)
nx = 200
x = np.linspace(lb[0], ub[0], nx)
y = np.linspace(lb[1], ub[1], nx)
X, Y = np.meshgrid(x, y)
U1 = griddata(x, u1.flatten(), (X, Y), method = "cubic")
U2 = griddata(x, u2.flatten(), (X, Y), method = "cubic")
U3 = griddata(x, (u1 - u2).flatten(), (X, Y), method = "cubic")
plt.figure(figsize = (height, width))
plt.subplot(1, 3, 1)
plt.pcolor(X, Y, U1, cmap = cmap, shading = "auto", vmin = v0, vmax = v1)
plt.colorbar(ticks = np.arange(v0, v1 + .001, vt))
plt.xlabel("x")
plt.ylabel("y")
plt.title(r"$ \phi_1 $")
plt.subplot(1, 3, 2)
plt.pcolor(X, Y, U2, cmap = cmap, shading = "auto", vmin = v0, vmax = v1)
plt.colorbar(ticks = np.arange(v0, v1 + .001, vt))
plt.xlabel("x")
plt.ylabel("y")
plt.title(r"$ \phi_2 $")
plt.subplot(1, 3, 3)
plt.pcolor(X, Y, U3, cmap = cmap, shading = "auto", vmin = v0, vmax = v1)
plt.colorbar(ticks = np.arange(v0 / 10, (v1 + .001) / 10, vt / 10))
plt.xlabel("x")
plt.ylabel("y")
plt.title(r"$ \phi_1 - \phi_2 $")
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.pcolor",
"matplotlib.pyplot.subplot",
"numpy.meshgrid",
"matplotlib.pyplot.show",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((376, 405), 'numpy.linspace', 'np.linspace', (['lb[0]', 'ub[0]', 'nx'], {}), '(lb[0], ub[0], nx)\n', (387, 405), True, 'import numpy as np\n'), ((414, 443), 'numpy.linspace', 'np.linspace', (['lb[1]', 'ub[1]', 'nx'], {}), '(lb[1], ub[1], nx)\n', (425, 443), True, 'import numpy as np\n'), ((455, 472), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (466, 472), True, 'import numpy as np\n'), ((538, 573), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(width, height)'}), '(figsize=(width, height))\n', (548, 573), True, 'import matplotlib.pyplot as plt\n'), ((580, 626), 'matplotlib.pyplot.pcolor', 'plt.pcolor', (['X', 'Y', 'U'], {'cmap': 'cmap', 'shading': '"""auto"""'}), "(X, Y, U, cmap=cmap, shading='auto')\n", (590, 626), True, 'import matplotlib.pyplot as plt\n'), ((635, 649), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (647, 649), True, 'import matplotlib.pyplot as plt\n'), ((654, 669), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (664, 669), True, 'import matplotlib.pyplot as plt\n'), ((674, 689), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (684, 689), True, 'import matplotlib.pyplot as plt\n'), ((694, 716), 'matplotlib.pyplot.title', 'plt.title', (['"""$ \\\\phi $"""'], {}), "('$ \\\\phi $')\n", (703, 716), True, 'import matplotlib.pyplot as plt\n'), ((721, 731), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (729, 731), True, 'import matplotlib.pyplot as plt\n'), ((846, 875), 'numpy.linspace', 'np.linspace', (['lb[0]', 'ub[0]', 'nx'], {}), '(lb[0], ub[0], nx)\n', (857, 875), True, 'import numpy as np\n'), ((884, 913), 'numpy.linspace', 'np.linspace', (['lb[1]', 'ub[1]', 'nx'], {}), '(lb[1], ub[1], nx)\n', (895, 913), True, 'import numpy as np\n'), ((925, 942), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (936, 942), True, 'import numpy as np\n'), ((1007, 1042), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(width, height)'}), '(figsize=(width, height))\n', (1017, 1042), True, 'import matplotlib.pyplot as plt\n'), ((1049, 1113), 'matplotlib.pyplot.pcolor', 'plt.pcolor', (['X', 'Y', 'U'], {'cmap': 'cmap', 'shading': '"""auto"""', 'vmin': 'v0', 'vmax': 'v1'}), "(X, Y, U, cmap=cmap, shading='auto', vmin=v0, vmax=v1)\n", (1059, 1113), True, 'import matplotlib.pyplot as plt\n'), ((1181, 1196), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (1191, 1196), True, 'import matplotlib.pyplot as plt\n'), ((1201, 1216), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (1211, 1216), True, 'import matplotlib.pyplot as plt\n'), ((1221, 1243), 'matplotlib.pyplot.title', 'plt.title', (['"""$ \\\\phi $"""'], {}), "('$ \\\\phi $')\n", (1230, 1243), True, 'import matplotlib.pyplot as plt\n'), ((1248, 1258), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1256, 1258), True, 'import matplotlib.pyplot as plt\n'), ((1375, 1404), 'numpy.linspace', 'np.linspace', (['lb[0]', 'ub[0]', 'nx'], {}), '(lb[0], ub[0], nx)\n', (1386, 1404), True, 'import numpy as np\n'), ((1413, 1442), 'numpy.linspace', 'np.linspace', (['lb[1]', 'ub[1]', 'nx'], {}), '(lb[1], ub[1], nx)\n', (1424, 1442), True, 'import numpy as np\n'), ((1454, 1471), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (1465, 1471), True, 'import numpy as np\n'), ((1666, 1701), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(height, width)'}), '(figsize=(height, width))\n', (1676, 1701), True, 'import matplotlib.pyplot as plt\n'), ((1708, 1728), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(1)'], {}), '(1, 3, 1)\n', (1719, 1728), True, 'import matplotlib.pyplot as plt\n'), ((1733, 1798), 'matplotlib.pyplot.pcolor', 'plt.pcolor', (['X', 'Y', 'U1'], {'cmap': 'cmap', 'shading': '"""auto"""', 'vmin': 'v0', 'vmax': 'v1'}), "(X, Y, U1, cmap=cmap, shading='auto', vmin=v0, vmax=v1)\n", (1743, 1798), True, 'import matplotlib.pyplot as plt\n'), ((1866, 1881), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (1876, 1881), True, 'import matplotlib.pyplot as plt\n'), ((1886, 1901), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (1896, 1901), True, 'import matplotlib.pyplot as plt\n'), ((1906, 1930), 'matplotlib.pyplot.title', 'plt.title', (['"""$ \\\\phi_1 $"""'], {}), "('$ \\\\phi_1 $')\n", (1915, 1930), True, 'import matplotlib.pyplot as plt\n'), ((1935, 1955), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(2)'], {}), '(1, 3, 2)\n', (1946, 1955), True, 'import matplotlib.pyplot as plt\n'), ((1960, 2025), 'matplotlib.pyplot.pcolor', 'plt.pcolor', (['X', 'Y', 'U2'], {'cmap': 'cmap', 'shading': '"""auto"""', 'vmin': 'v0', 'vmax': 'v1'}), "(X, Y, U2, cmap=cmap, shading='auto', vmin=v0, vmax=v1)\n", (1970, 2025), True, 'import matplotlib.pyplot as plt\n'), ((2093, 2108), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (2103, 2108), True, 'import matplotlib.pyplot as plt\n'), ((2113, 2128), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (2123, 2128), True, 'import matplotlib.pyplot as plt\n'), ((2133, 2157), 'matplotlib.pyplot.title', 'plt.title', (['"""$ \\\\phi_2 $"""'], {}), "('$ \\\\phi_2 $')\n", (2142, 2157), True, 'import matplotlib.pyplot as plt\n'), ((2162, 2182), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(3)'], {}), '(1, 3, 3)\n', (2173, 2182), True, 'import matplotlib.pyplot as plt\n'), ((2187, 2252), 'matplotlib.pyplot.pcolor', 'plt.pcolor', (['X', 'Y', 'U3'], {'cmap': 'cmap', 'shading': '"""auto"""', 'vmin': 'v0', 'vmax': 'v1'}), "(X, Y, U3, cmap=cmap, shading='auto', vmin=v0, vmax=v1)\n", (2197, 2252), True, 'import matplotlib.pyplot as plt\n'), ((2337, 2352), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (2347, 2352), True, 'import matplotlib.pyplot as plt\n'), ((2357, 2372), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (2367, 2372), True, 'import matplotlib.pyplot as plt\n'), ((2377, 2411), 'matplotlib.pyplot.title', 'plt.title', (['"""$ \\\\phi_1 - \\\\phi_2 $"""'], {}), "('$ \\\\phi_1 - \\\\phi_2 $')\n", (2386, 2411), True, 'import matplotlib.pyplot as plt\n'), ((1147, 1176), 'numpy.arange', 'np.arange', (['v0', '(v1 + 0.001)', 'vt'], {}), '(v0, v1 + 0.001, vt)\n', (1156, 1176), True, 'import numpy as np\n'), ((1832, 1861), 'numpy.arange', 'np.arange', (['v0', '(v1 + 0.001)', 'vt'], {}), '(v0, v1 + 0.001, vt)\n', (1841, 1861), True, 'import numpy as np\n'), ((2059, 2088), 'numpy.arange', 'np.arange', (['v0', '(v1 + 0.001)', 'vt'], {}), '(v0, v1 + 0.001, vt)\n', (2068, 2088), True, 'import numpy as np\n'), ((2286, 2332), 'numpy.arange', 'np.arange', (['(v0 / 10)', '((v1 + 0.001) / 10)', '(vt / 10)'], {}), '(v0 / 10, (v1 + 0.001) / 10, vt / 10)\n', (2295, 2332), True, 'import numpy as np\n')]
|
#### feed in the obs seq line by line
#### quantity becomes all one
#### feed with all obs seq
import numpy as np
import pandas as pd
import random
from sklearn.model_selection import KFold
from hmm_class import hmm
from sklearn.preprocessing import normalize
import time
import matplotlib.pyplot as plt
# load the obs and split into k fold.
def split_load_data(filename, k_splits):
obs_seq = np.loadtxt(filename, dtype = int)
kf = KFold(n_splits = k_splits, shuffle = False)
for train_index, test_index in kf.split(obs_seq):
obs_train, obs_test = obs_seq[train_index], obs_seq[test_index]
return obs_train, obs_test
def load_data(filename):
obs_seq = np.loadtxt(filename, dtype = int)
return obs_seq
# generate random states for the len of data
def sts_seq_generate(N, size_data, len_obs): # N states
sts_seq = np.zeros((size_data, len_obs), dtype = int)
for i in range(size_data):
for j in range(len_obs):
sts_seq[i][j] = random.randint(0,N-1)
return sts_seq
# generate emission probability randomly
# return as matrix
def em_prob_generate(n, m): # n:# states, m: # obs
em_prob = np.zeros((n,m))
for i in range(n):
for j in range(m):
em_prob[i][j] = np.random.uniform(0,1)
em_prob = normalize(em_prob, axis = 1, norm = 'l1')
return np.asmatrix(em_prob)
def trans_prob_generate(n): # n:# states
trans_prob = np.zeros((n,n))
for i in range(n):
for j in range(n):
trans_prob[i][j] = np.random.uniform(0,1)
trans_prob = normalize(trans_prob, axis = 1, norm = 'l1')
return np.asmatrix(trans_prob)
def pi_generate(n):
pi = np.zeros(n)
for i in range(n):
pi[i] = np.random.uniform(0,1)
pi = normalize([pi], axis = 1, norm = 'l1')
return np.asmatrix(pi)
# useful parameter for later use
def param_generate(n, obs_seq):
size_data = len(obs_seq) # cal the line of obs 1000
len_obs = len(obs_seq[0]) # cal the len of each obs. only works for the same length
sts_seq = sts_seq_generate(n, size_data, len_obs)
return size_data, len_obs, sts_seq
# output all the std file for this project
def outfile(filename,N = None, ep = None, tp = None, hidden_sts = None, distribution = None):
f = open(filename, "w+")
if N:
f.write(str(N))
f.write("\n")
if np.any(ep):
[n, m] = np.shape(ep)
for i in range(n*m):
f.write(str(ep.item(i)))
if i % m == m-1:
f.write("\n")
else:
f.write(",")
for j in range(n*n):
f.write(str(tp.item(j)))
if j % n == n-1:
f.write("\n")
else:
f.write(",")
if hidden_sts:
size_data = len(hidden_sts)
len_seq = len(hidden_sts[0])
for i in range(size_data):
for j in range(len_seq):
f.write(str(hidden_sts[i][j]))
if j % len_seq == len_seq-1:
f.write("\n")
else:
f.write(",")
if distribution:
size_data = len(distribution)
len_seq = len(distribution[0])
for i in range(size_data):
for j in range(len_seq):
f.write(str(distribution[i][j]))
if j % len_seq == len_seq-1:
f.write("\n")
else:
f.write(",")
f.close()
# compute the predicted output prob distribution
# input the hidden states list and unique states outlook
# return the distribution list
def predic_prob(hidden_sts, uniq_sts):
distribution = []
each_prob = []
size_data = len(hidden_sts)
len_seq = len(hidden_sts[0])
dis_dict = dict.fromkeys(uniq_sts, 0)
for i in range(size_data):
for j in range(len_seq):
dis_dict[hidden_sts[i][j]] += 1
if j % len_seq == len_seq-1: # change line
each_prob = prob_cal(dis_dict)
distribution.append(each_prob)
dis_dict.clear()
dis_dict = dict.fromkeys(uniq_sts, 0)
return distribution
# probability computation from the dictionary
def prob_cal(dictionary):
prob_lst = []
length = len(dictionary)
total = []
for i in range(length):
total.append(dictionary[i])
total = sum(total)
for i in range(length):
prob_lst.append(dictionary[i]/total)
return prob_lst
# Given a seq of output, predict the next output and the next state, and test it on the data.
def predict_next_sts(hidden_sts, tp):
size_data = len(hidden_sts)
len_seq = len(hidden_sts[0])
next_sts = []
for i in range(size_data):
next_sts.append(np.argmax(tp[hidden_sts[i][len_seq-1],:]))
for i in range(size_data):
hidden_sts[i].append(next_sts[i])
return hidden_sts
if __name__ == '__main__':
n = 5 # number of states
m = 4 # number of observation
k = 5 # k fold
num_iter = 1000 # number of iteration
tolerance = 10**(-5)
obs_seq = load_data('train534.dat')
size_data, len_obs, sts_seq = param_generate(n, obs_seq)
uniq_sts = list(np.unique(sts_seq)) # the function need to feed in a list of uniq states
uniq_obs = list(np.unique(obs_seq))
pi = pi_generate(n) # start prob
em_prob = em_prob_generate(n, m) # generate uniform distribution em prob
trans_prob = trans_prob_generate(n) # generate uniform distribution trans prob.
model = hmm(uniq_sts, uniq_obs, pi, trans_prob, em_prob) # init the model
# Number of times, each corresponding item in ‘observation_list’ occurs
quantities = np.ones(size_data)
prob = model.log_prob(obs_seq, quantities)
print("prob of seq with original param %f" %(prob))
# run EM/ Baum_welch to train the data.
# use Baum_welch to maximize the likelihood
# get the transition matrix A and emission probability matrix B
ep, tp, sp, prob_lst, iter_count, loss_lst = model.train_hmm(obs_seq, num_iter, quantities, tolerance)
print("emission_prob\n", ep)
# pd.DataFrame(model.em_prob, index = uniq_sts, columns = uniq_obs)
print("pi\n", sp)
print("transition\n", tp)
prob = model.log_prob(obs_seq, quantities)
print("prob of seq after %d iterations: %f" %(num_iter, prob))
# use viterbi to compute the most likely sequence. Report the time it took.
tr_start_t = time.perf_counter()
hidden_states = []
for i in range(size_data):
hidden_states.append(model.viterbi(obs_seq[i]))
tr_end_t = time.perf_counter()
print("time for get all the hidden states from training data:", tr_end_t - tr_start_t)
# print('hidden states:\n', hidden_states)
###### calculate the log likelihood of test set
###### predict the output from test seq
test_obs = load_data('test1_534.dat')
size_data_test, len_obs_test, test_sts_seq = param_generate(n, test_obs)
test_quant = np.ones(size_data_test)
test_prob = model.log_prob(test_obs, test_quant)
print("The log likelihood of test set: %f" %(test_prob))
##### output the hidden states of test set
te_start_t = time.perf_counter()
test_hidden_sts = []
for i in range(size_data_test):
test_hidden_sts.append(model.viterbi(test_obs[i]))
te_end_t = time.perf_counter()
print("time for get all the hidden states from test data:", te_end_t - te_start_t)
test_hidden_sts = [list(map(int, lst)) for lst in test_hidden_sts] # cast the data to int
# comput the next state for every sequnece. T=40 to 41
test_hidden_sts = predict_next_sts(test_hidden_sts, tp)
# print("test set hidden states:\n", test_hidden_sts)
distribution = predic_prob(test_hidden_sts, uniq_sts)
####### output file ##########
outfile("modelpars.dat", N = n, ep = ep, tp = tp)
outfile("loglik.dat", N = test_prob)
outfile("viterbi.dat", hidden_sts = test_hidden_sts)
outfile("predict.dat", distribution = distribution)
####### plot ###########
x = np.arange(0, iter_count)
plt.figure()
plt.plot(x, prob_lst, color = 'r')
plt.xlabel('iteration times')
plt.ylabel('log likelihood')
plt.title('Learning curve')
plt.show()
plt.figure()
plt.plot(x, loss_lst, color = 'b')
plt.xlabel('iteration times')
plt.ylabel('loss')
plt.title('Loss from each iteration')
plt.show()
# ep_test, tp_test, sp_test = model.train(obs_test, 2, quantities_test)
# run the baum-welch algo to obtain the A, B matrix and start prob.
# def em_prob_generate():
# return np.matrix('0.1 0.2 0.3 0.4; 0.2 0.3 0.1 0.4; 0.4 0.3 0.2 0.1; 0.2 0.1 0.4 0.3; 0.3 0.1 0.2 0.4')
# def trans_prob_generate():
# return np.matrix('0.2 0.1 0.3 0.2 0.2; 0.1 0.2 0.2 0.1 0.4; 0.3 0.1 0.1 0.2 0.3; 0.2 0.1 0.1 0.2 0.4; 0.3 0.3 0.2 0.1 0.1')
# def pi_generate():
# return np.matrix('0.1 0.2 0.3 0.1 0.3')
|
[
"matplotlib.pyplot.title",
"numpy.argmax",
"hmm_class.hmm",
"numpy.ones",
"numpy.shape",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.unique",
"random.randint",
"numpy.loadtxt",
"matplotlib.pyplot.show",
"time.perf_counter",
"sklearn.preprocessing.normalize",
"matplotlib.pyplot.ylabel",
"numpy.random.uniform",
"matplotlib.pyplot.plot",
"numpy.zeros",
"sklearn.model_selection.KFold",
"numpy.any",
"numpy.asmatrix",
"matplotlib.pyplot.xlabel"
] |
[((411, 442), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {'dtype': 'int'}), '(filename, dtype=int)\n', (421, 442), True, 'import numpy as np\n'), ((452, 491), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'k_splits', 'shuffle': '(False)'}), '(n_splits=k_splits, shuffle=False)\n', (457, 491), False, 'from sklearn.model_selection import KFold\n'), ((686, 717), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {'dtype': 'int'}), '(filename, dtype=int)\n', (696, 717), True, 'import numpy as np\n'), ((854, 895), 'numpy.zeros', 'np.zeros', (['(size_data, len_obs)'], {'dtype': 'int'}), '((size_data, len_obs), dtype=int)\n', (862, 895), True, 'import numpy as np\n'), ((1144, 1160), 'numpy.zeros', 'np.zeros', (['(n, m)'], {}), '((n, m))\n', (1152, 1160), True, 'import numpy as np\n'), ((1258, 1295), 'sklearn.preprocessing.normalize', 'normalize', (['em_prob'], {'axis': '(1)', 'norm': '"""l1"""'}), "(em_prob, axis=1, norm='l1')\n", (1267, 1295), False, 'from sklearn.preprocessing import normalize\n'), ((1309, 1329), 'numpy.asmatrix', 'np.asmatrix', (['em_prob'], {}), '(em_prob)\n', (1320, 1329), True, 'import numpy as np\n'), ((1391, 1407), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (1399, 1407), True, 'import numpy as np\n'), ((1511, 1551), 'sklearn.preprocessing.normalize', 'normalize', (['trans_prob'], {'axis': '(1)', 'norm': '"""l1"""'}), "(trans_prob, axis=1, norm='l1')\n", (1520, 1551), False, 'from sklearn.preprocessing import normalize\n'), ((1565, 1588), 'numpy.asmatrix', 'np.asmatrix', (['trans_prob'], {}), '(trans_prob)\n', (1576, 1588), True, 'import numpy as np\n'), ((1619, 1630), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1627, 1630), True, 'import numpy as np\n'), ((1693, 1727), 'sklearn.preprocessing.normalize', 'normalize', (['[pi]'], {'axis': '(1)', 'norm': '"""l1"""'}), "([pi], axis=1, norm='l1')\n", (1702, 1727), False, 'from sklearn.preprocessing import normalize\n'), ((1741, 1756), 'numpy.asmatrix', 'np.asmatrix', (['pi'], {}), '(pi)\n', (1752, 1756), True, 'import numpy as np\n'), ((2274, 2284), 'numpy.any', 'np.any', (['ep'], {}), '(ep)\n', (2280, 2284), True, 'import numpy as np\n'), ((5002, 5050), 'hmm_class.hmm', 'hmm', (['uniq_sts', 'uniq_obs', 'pi', 'trans_prob', 'em_prob'], {}), '(uniq_sts, uniq_obs, pi, trans_prob, em_prob)\n', (5005, 5050), False, 'from hmm_class import hmm\n'), ((5160, 5178), 'numpy.ones', 'np.ones', (['size_data'], {}), '(size_data)\n', (5167, 5178), True, 'import numpy as np\n'), ((5900, 5919), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (5917, 5919), False, 'import time\n'), ((6034, 6053), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (6051, 6053), False, 'import time\n'), ((6417, 6440), 'numpy.ones', 'np.ones', (['size_data_test'], {}), '(size_data_test)\n', (6424, 6440), True, 'import numpy as np\n'), ((6613, 6632), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (6630, 6632), False, 'import time\n'), ((6757, 6776), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (6774, 6776), False, 'import time\n'), ((7453, 7477), 'numpy.arange', 'np.arange', (['(0)', 'iter_count'], {}), '(0, iter_count)\n', (7462, 7477), True, 'import numpy as np\n'), ((7480, 7492), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7490, 7492), True, 'import matplotlib.pyplot as plt\n'), ((7495, 7527), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'prob_lst'], {'color': '"""r"""'}), "(x, prob_lst, color='r')\n", (7503, 7527), True, 'import matplotlib.pyplot as plt\n'), ((7532, 7561), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""iteration times"""'], {}), "('iteration times')\n", (7542, 7561), True, 'import matplotlib.pyplot as plt\n'), ((7564, 7592), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""log likelihood"""'], {}), "('log likelihood')\n", (7574, 7592), True, 'import matplotlib.pyplot as plt\n'), ((7595, 7622), 'matplotlib.pyplot.title', 'plt.title', (['"""Learning curve"""'], {}), "('Learning curve')\n", (7604, 7622), True, 'import matplotlib.pyplot as plt\n'), ((7625, 7635), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7633, 7635), True, 'import matplotlib.pyplot as plt\n'), ((7640, 7652), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7650, 7652), True, 'import matplotlib.pyplot as plt\n'), ((7655, 7687), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'loss_lst'], {'color': '"""b"""'}), "(x, loss_lst, color='b')\n", (7663, 7687), True, 'import matplotlib.pyplot as plt\n'), ((7692, 7721), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""iteration times"""'], {}), "('iteration times')\n", (7702, 7721), True, 'import matplotlib.pyplot as plt\n'), ((7724, 7742), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (7734, 7742), True, 'import matplotlib.pyplot as plt\n'), ((7745, 7782), 'matplotlib.pyplot.title', 'plt.title', (['"""Loss from each iteration"""'], {}), "('Loss from each iteration')\n", (7754, 7782), True, 'import matplotlib.pyplot as plt\n'), ((7785, 7795), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7793, 7795), True, 'import matplotlib.pyplot as plt\n'), ((1663, 1686), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (1680, 1686), True, 'import numpy as np\n'), ((2298, 2310), 'numpy.shape', 'np.shape', (['ep'], {}), '(ep)\n', (2306, 2310), True, 'import numpy as np\n'), ((4687, 4705), 'numpy.unique', 'np.unique', (['sts_seq'], {}), '(sts_seq)\n', (4696, 4705), True, 'import numpy as np\n'), ((4778, 4796), 'numpy.unique', 'np.unique', (['obs_seq'], {}), '(obs_seq)\n', (4787, 4796), True, 'import numpy as np\n'), ((975, 999), 'random.randint', 'random.randint', (['(0)', '(N - 1)'], {}), '(0, N - 1)\n', (989, 999), False, 'import random\n'), ((1223, 1246), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (1240, 1246), True, 'import numpy as np\n'), ((1473, 1496), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (1490, 1496), True, 'import numpy as np\n'), ((4268, 4312), 'numpy.argmax', 'np.argmax', (['tp[hidden_sts[i][len_seq - 1], :]'], {}), '(tp[hidden_sts[i][len_seq - 1], :])\n', (4277, 4312), True, 'import numpy as np\n')]
|
from functools import partial
import numpy as np
import matplotlib.pyplot as plt
from mne.utils import _TempDir
from pactools.dar_model import AR, DAR, HAR, StableDAR
from pactools.utils.testing import assert_equal, assert_greater
from pactools.utils.testing import assert_raises, assert_array_equal
from pactools.utils.testing import assert_true, assert_array_almost_equal
from pactools.comodulogram import Comodulogram, read_comodulogram
from pactools.comodulogram import ALL_PAC_METRICS, BICOHERENCE_PAC_METRICS
from pactools.simulate_pac import simulate_pac
# Parameters used for the simulated signal in the test
low_fq_range = [1., 3., 5., 7.]
high_fq_range = [25., 50., 75.]
n_low = len(low_fq_range)
n_high = len(high_fq_range)
high_fq = high_fq_range[1]
low_fq = low_fq_range[1]
n_points = 1024
fs = 200.
signal = simulate_pac(n_points=n_points, fs=fs, high_fq=high_fq, low_fq=low_fq,
low_fq_width=1., noise_level=0.1, random_state=0)
signal_copy = signal.copy()
class ComodTest(Comodulogram):
# A comodulogram call with default params used for testing
def __init__(self, fs=fs, low_fq_range=low_fq_range, low_fq_width=1.,
high_fq_range=high_fq_range, high_fq_width='auto',
method='tort', n_surrogates=0, vmin=None, vmax=None,
progress_bar=False, ax_special=None, minimum_shift=1.0,
random_state=0, coherence_params=dict(), low_fq_width_2=4.0):
super(ComodTest, self).__init__(
fs=fs, low_fq_range=low_fq_range, low_fq_width=low_fq_width,
high_fq_range=high_fq_range, high_fq_width=high_fq_width,
method=method, n_surrogates=n_surrogates, vmin=vmin, vmax=vmax,
progress_bar=progress_bar, ax_special=ax_special,
minimum_shift=minimum_shift, random_state=random_state,
coherence_params=coherence_params, low_fq_width_2=low_fq_width_2)
def fast_comod(low_sig=signal, high_sig=None, mask=None, *args, **kwargs):
return ComodTest(*args, **kwargs).fit(low_sig=low_sig, high_sig=high_sig,
mask=mask).comod_
def test_input_checking():
# test that we have a ValueError for bad parameters
func = partial(fast_comod, method='wrong')
assert_raises(ValueError, func)
func = partial(fast_comod, fs='wrong')
assert_raises(ValueError, func)
func = partial(fast_comod, low_sig='wrong')
assert_raises(ValueError, func)
func = partial(fast_comod, high_sig='wrong')
assert_raises(ValueError, func)
def test_different_dimension_in_input():
# Test that 1D or 2D signals are accepted, but not 3D
for dim in [
(4, -1),
(-1, ),
(1, -1),
]:
fast_comod(signal.reshape(*dim))
dim = (2, 2, -1)
assert_raises(ValueError, fast_comod, signal.reshape(*dim))
def test_high_sig_identical():
# Test that we have the same result with high_sig=low_sig and high_sig=None
for method in ALL_PAC_METRICS:
if method in BICOHERENCE_PAC_METRICS:
continue
comod_0 = fast_comod(method=method)
comod_1 = fast_comod(high_sig=signal, method=method)
assert_array_equal(comod_0, comod_1)
def test_comod_correct_maximum():
# Test that the PAC is maximum at the correct location in the comodulogram
for method in ALL_PAC_METRICS:
est = ComodTest(method=method, progress_bar=True).fit(signal)
comod = est.comod_
# test the shape of the comodulogram
assert_array_equal(comod.shape, (n_low, n_high))
# the bicoherence metrics fail this test with current parameters
if method in BICOHERENCE_PAC_METRICS or method == 'jiang':
continue
low_fq_0, high_fq_0, max_pac = est.get_maximum_pac()
assert_equal(low_fq_0, low_fq)
assert_equal(high_fq_0, high_fq)
assert_equal(max_pac, comod.max())
assert_true(np.all(comod > 0))
def test_empty_mask():
# Test that using an empty mask does not change the results
mask = np.zeros(n_points, dtype=bool)
for method in ALL_PAC_METRICS:
comod_0 = fast_comod(mask=mask, method=method)
comod_1 = fast_comod(low_sig=signal[~mask], method=method)
assert_array_almost_equal(comod_0, comod_1, decimal=7)
def test_surrogates():
# Test the surrogates comodulogram
for method in ALL_PAC_METRICS:
msg = 'with method=%s' % method
if method in BICOHERENCE_PAC_METRICS or method == 'jiang':
continue
n_surrogates = 10
est = ComodTest(method=method, n_surrogates=n_surrogates).fit(signal)
assert_array_equal(est.comod_.shape, (n_low, n_high), err_msg=msg)
assert_array_equal(est.surrogates_.shape, (n_surrogates, n_low,
n_high), err_msg=msg)
# z-score
z_score = est.comod_z_score_
assert_array_equal(z_score.shape, (n_low, n_high), err_msg=msg)
if method != 'jiang': # 'jiang' method does not estimate CFC but CFD
assert_greater(z_score[1, 1], z_score[-1, -1], msg=msg)
# surrogate_max
surrogate_max = est.surrogate_max_
assert_array_equal(surrogate_max.shape, (n_surrogates, ))
assert_greater(est.comod_[1, 1], surrogate_max.max(), msg=msg)
assert_greater(surrogate_max.max(), est.comod_[-1, -1], msg=msg)
# Smoke test with contours in the plotting function
est.plot(contour_level=0.01, contour_method='comod_max')
est.plot(contour_level=3, contour_method='z_score')
plt.close('all')
def test_no_surrogate():
# Test the errors when n_surrogates == 0
for method in ALL_PAC_METRICS:
est = ComodTest(method=method, n_surrogates=0).fit(signal)
with assert_raises(ValueError):
est.comod_z_score_
with assert_raises(ValueError):
est.surrogate_max_
with assert_raises(ValueError):
est.plot(contour_level=0.01)
plt.close('all')
def test_comodulogram_dar_models():
# Smoke test with DAR models
for klass in (AR, DAR, HAR, StableDAR):
if klass is StableDAR:
model = klass(ordar=10, ordriv=2, iter_newton=10)
else:
model = klass(ordar=10, ordriv=2)
comod = fast_comod(method=model)
assert_true(~np.any(np.isnan(comod)))
def test_plot_comodulogram():
# Smoke test with the standard plotting function
est = ComodTest().fit(signal)
est.plot()
# Smoke test with the special plotting functions
ax = plt.figure().gca()
for method in ALL_PAC_METRICS:
est = ComodTest(low_fq_range=[low_fq], method=method,
ax_special=ax).fit(signal)
# Test that it raises an error if ax_special is not None and low_fq_range
# has more than one element
func = partial(fast_comod, ax_special=ax)
assert_raises(ValueError, func)
plt.close('all')
def test_signal_unchanged():
# Test that signal has not been changed during the test
assert_array_equal(signal_copy, signal)
def _compare_values(v, v2):
if isinstance(v, np.ndarray):
assert_array_equal(v, v2)
elif isinstance(v, dict):
for key, value in v.items():
_compare_values(v[key], v2[key])
elif isinstance(v, np.random.RandomState):
for s, s2 in zip(v.get_state(), v2.get_state()):
_compare_values(s, s2)
else:
assert_equal(v, v2)
def _compare_instance(inst1, inst2):
for k, v in vars(inst1).items():
v2 = getattr(inst2, k)
_compare_values(v, v2)
def test_save():
# Test File IO
tmp = _TempDir()
est = ComodTest()
fname = tmp + '/test.hdf5'
est.save(fname)
est2 = read_comodulogram(fname)
_compare_instance(est, est2)
# Now fit and save
est.fit(signal)
est.save(fname, overwrite=True)
est3 = read_comodulogram(fname)
_compare_instance(est, est3)
|
[
"functools.partial",
"pactools.simulate_pac.simulate_pac",
"pactools.utils.testing.assert_raises",
"mne.utils._TempDir",
"matplotlib.pyplot.close",
"pactools.utils.testing.assert_array_equal",
"pactools.comodulogram.read_comodulogram",
"numpy.zeros",
"pactools.utils.testing.assert_equal",
"pactools.utils.testing.assert_array_almost_equal",
"pactools.utils.testing.assert_greater",
"numpy.isnan",
"matplotlib.pyplot.figure",
"numpy.all"
] |
[((827, 952), 'pactools.simulate_pac.simulate_pac', 'simulate_pac', ([], {'n_points': 'n_points', 'fs': 'fs', 'high_fq': 'high_fq', 'low_fq': 'low_fq', 'low_fq_width': '(1.0)', 'noise_level': '(0.1)', 'random_state': '(0)'}), '(n_points=n_points, fs=fs, high_fq=high_fq, low_fq=low_fq,\n low_fq_width=1.0, noise_level=0.1, random_state=0)\n', (839, 952), False, 'from pactools.simulate_pac import simulate_pac\n'), ((2237, 2272), 'functools.partial', 'partial', (['fast_comod'], {'method': '"""wrong"""'}), "(fast_comod, method='wrong')\n", (2244, 2272), False, 'from functools import partial\n'), ((2277, 2308), 'pactools.utils.testing.assert_raises', 'assert_raises', (['ValueError', 'func'], {}), '(ValueError, func)\n', (2290, 2308), False, 'from pactools.utils.testing import assert_raises, assert_array_equal\n'), ((2320, 2351), 'functools.partial', 'partial', (['fast_comod'], {'fs': '"""wrong"""'}), "(fast_comod, fs='wrong')\n", (2327, 2351), False, 'from functools import partial\n'), ((2356, 2387), 'pactools.utils.testing.assert_raises', 'assert_raises', (['ValueError', 'func'], {}), '(ValueError, func)\n', (2369, 2387), False, 'from pactools.utils.testing import assert_raises, assert_array_equal\n'), ((2399, 2435), 'functools.partial', 'partial', (['fast_comod'], {'low_sig': '"""wrong"""'}), "(fast_comod, low_sig='wrong')\n", (2406, 2435), False, 'from functools import partial\n'), ((2440, 2471), 'pactools.utils.testing.assert_raises', 'assert_raises', (['ValueError', 'func'], {}), '(ValueError, func)\n', (2453, 2471), False, 'from pactools.utils.testing import assert_raises, assert_array_equal\n'), ((2483, 2520), 'functools.partial', 'partial', (['fast_comod'], {'high_sig': '"""wrong"""'}), "(fast_comod, high_sig='wrong')\n", (2490, 2520), False, 'from functools import partial\n'), ((2525, 2556), 'pactools.utils.testing.assert_raises', 'assert_raises', (['ValueError', 'func'], {}), '(ValueError, func)\n', (2538, 2556), False, 'from pactools.utils.testing import assert_raises, assert_array_equal\n'), ((4059, 4089), 'numpy.zeros', 'np.zeros', (['n_points'], {'dtype': 'bool'}), '(n_points, dtype=bool)\n', (4067, 4089), True, 'import numpy as np\n'), ((5593, 5609), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (5602, 5609), True, 'import matplotlib.pyplot as plt\n'), ((6005, 6021), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (6014, 6021), True, 'import matplotlib.pyplot as plt\n'), ((6863, 6897), 'functools.partial', 'partial', (['fast_comod'], {'ax_special': 'ax'}), '(fast_comod, ax_special=ax)\n', (6870, 6897), False, 'from functools import partial\n'), ((6902, 6933), 'pactools.utils.testing.assert_raises', 'assert_raises', (['ValueError', 'func'], {}), '(ValueError, func)\n', (6915, 6933), False, 'from pactools.utils.testing import assert_raises, assert_array_equal\n'), ((6938, 6954), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (6947, 6954), True, 'import matplotlib.pyplot as plt\n'), ((7050, 7089), 'pactools.utils.testing.assert_array_equal', 'assert_array_equal', (['signal_copy', 'signal'], {}), '(signal_copy, signal)\n', (7068, 7089), False, 'from pactools.utils.testing import assert_raises, assert_array_equal\n'), ((7663, 7673), 'mne.utils._TempDir', '_TempDir', ([], {}), '()\n', (7671, 7673), False, 'from mne.utils import _TempDir\n'), ((7758, 7782), 'pactools.comodulogram.read_comodulogram', 'read_comodulogram', (['fname'], {}), '(fname)\n', (7775, 7782), False, 'from pactools.comodulogram import Comodulogram, read_comodulogram\n'), ((7907, 7931), 'pactools.comodulogram.read_comodulogram', 'read_comodulogram', (['fname'], {}), '(fname)\n', (7924, 7931), False, 'from pactools.comodulogram import Comodulogram, read_comodulogram\n'), ((3187, 3223), 'pactools.utils.testing.assert_array_equal', 'assert_array_equal', (['comod_0', 'comod_1'], {}), '(comod_0, comod_1)\n', (3205, 3223), False, 'from pactools.utils.testing import assert_raises, assert_array_equal\n'), ((3524, 3572), 'pactools.utils.testing.assert_array_equal', 'assert_array_equal', (['comod.shape', '(n_low, n_high)'], {}), '(comod.shape, (n_low, n_high))\n', (3542, 3572), False, 'from pactools.utils.testing import assert_raises, assert_array_equal\n'), ((3805, 3835), 'pactools.utils.testing.assert_equal', 'assert_equal', (['low_fq_0', 'low_fq'], {}), '(low_fq_0, low_fq)\n', (3817, 3835), False, 'from pactools.utils.testing import assert_equal, assert_greater\n'), ((3844, 3876), 'pactools.utils.testing.assert_equal', 'assert_equal', (['high_fq_0', 'high_fq'], {}), '(high_fq_0, high_fq)\n', (3856, 3876), False, 'from pactools.utils.testing import assert_equal, assert_greater\n'), ((4256, 4310), 'pactools.utils.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['comod_0', 'comod_1'], {'decimal': '(7)'}), '(comod_0, comod_1, decimal=7)\n', (4281, 4310), False, 'from pactools.utils.testing import assert_true, assert_array_almost_equal\n'), ((4651, 4717), 'pactools.utils.testing.assert_array_equal', 'assert_array_equal', (['est.comod_.shape', '(n_low, n_high)'], {'err_msg': 'msg'}), '(est.comod_.shape, (n_low, n_high), err_msg=msg)\n', (4669, 4717), False, 'from pactools.utils.testing import assert_raises, assert_array_equal\n'), ((4726, 4815), 'pactools.utils.testing.assert_array_equal', 'assert_array_equal', (['est.surrogates_.shape', '(n_surrogates, n_low, n_high)'], {'err_msg': 'msg'}), '(est.surrogates_.shape, (n_surrogates, n_low, n_high),\n err_msg=msg)\n', (4744, 4815), False, 'from pactools.utils.testing import assert_raises, assert_array_equal\n'), ((4927, 4990), 'pactools.utils.testing.assert_array_equal', 'assert_array_equal', (['z_score.shape', '(n_low, n_high)'], {'err_msg': 'msg'}), '(z_score.shape, (n_low, n_high), err_msg=msg)\n', (4945, 4990), False, 'from pactools.utils.testing import assert_raises, assert_array_equal\n'), ((5213, 5269), 'pactools.utils.testing.assert_array_equal', 'assert_array_equal', (['surrogate_max.shape', '(n_surrogates,)'], {}), '(surrogate_max.shape, (n_surrogates,))\n', (5231, 5269), False, 'from pactools.utils.testing import assert_raises, assert_array_equal\n'), ((5937, 5962), 'pactools.utils.testing.assert_raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (5950, 5962), False, 'from pactools.utils.testing import assert_raises, assert_array_equal\n'), ((7162, 7187), 'pactools.utils.testing.assert_array_equal', 'assert_array_equal', (['v', 'v2'], {}), '(v, v2)\n', (7180, 7187), False, 'from pactools.utils.testing import assert_raises, assert_array_equal\n'), ((3940, 3957), 'numpy.all', 'np.all', (['(comod > 0)'], {}), '(comod > 0)\n', (3946, 3957), True, 'import numpy as np\n'), ((5081, 5136), 'pactools.utils.testing.assert_greater', 'assert_greater', (['z_score[1, 1]', 'z_score[-1, -1]'], {'msg': 'msg'}), '(z_score[1, 1], z_score[-1, -1], msg=msg)\n', (5095, 5136), False, 'from pactools.utils.testing import assert_equal, assert_greater\n'), ((5798, 5823), 'pactools.utils.testing.assert_raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (5811, 5823), False, 'from pactools.utils.testing import assert_raises, assert_array_equal\n'), ((5869, 5894), 'pactools.utils.testing.assert_raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (5882, 5894), False, 'from pactools.utils.testing import assert_raises, assert_array_equal\n'), ((6574, 6586), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6584, 6586), True, 'import matplotlib.pyplot as plt\n'), ((7457, 7476), 'pactools.utils.testing.assert_equal', 'assert_equal', (['v', 'v2'], {}), '(v, v2)\n', (7469, 7476), False, 'from pactools.utils.testing import assert_equal, assert_greater\n'), ((6359, 6374), 'numpy.isnan', 'np.isnan', (['comod'], {}), '(comod)\n', (6367, 6374), True, 'import numpy as np\n')]
|
import numpy as np
from ..base import NCMBase
from sklearn.neighbors import NearestNeighbors
class KNeighborsMean(NCMBase):
def __init__(self, **sklearn):
if "n_neighbors" in sklearn:
sklearn["n_neighbors"] += 1
else:
sklearn["n_neighbors"] = 6
self.clf = NearestNeighbors(**sklearn)
self.y = []
def fit(self, X, y):
self.clf.fit(X)
self.y = y
def scores(self, X, y, X_eq_fit):
if X_eq_fit:
pass
else:
ind = self.clf.kneighbors(X, return_distance=False)
y_pred = self.y[ind]
res = []
for label, row in zip(y, y_pred):
count_label = [count for label_, count in
zip(*np.unique(row, return_counts=True))
if label_ == label]
if len(count_label) == 1:
res.append(
(len(row) - count_label[0]) / len(row)
)
else:
res.append(1.0)
return np.array(res)
|
[
"numpy.array",
"sklearn.neighbors.NearestNeighbors",
"numpy.unique"
] |
[((312, 339), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {}), '(**sklearn)\n', (328, 339), False, 'from sklearn.neighbors import NearestNeighbors\n'), ((1084, 1097), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (1092, 1097), True, 'import numpy as np\n'), ((768, 802), 'numpy.unique', 'np.unique', (['row'], {'return_counts': '(True)'}), '(row, return_counts=True)\n', (777, 802), True, 'import numpy as np\n')]
|
"""
.. _pyvista_demo_ref:
3D Visualization with PyVista
=============================
The example demonstrates the how to use the VTK interface via the
`pyvista library <http://docs.pyvista.org>`__ .
To run this example, you will need to `install pyvista <http://docs.pyvista.org/getting-started/installation.html>`__ .
- contributed by `@banesullivan <https://github.com/banesullivan>`_
Using the inversion result from the example notebook
`plot_laguna_del_maule_inversion.ipynb <http://docs.simpeg.xyz/content/examples/20-published/plot_laguna_del_maule_inversion.html>`_
"""
# sphinx_gallery_thumbnail_number = 2
import os
import shutil
import tarfile
import shelve
import tarfile
import discretize
import pyvista as pv
import numpy as np
# Set a documentation friendly plotting theme
pv.set_plot_theme('document')
print('PyVista Version: {}'.format(pv.__version__))
###############################################################################
# Download and load data
# ----------------------
#
# In the following we load the :code:`mesh` and :code:`Lpout` that you would
# get from running the laguna-del-maule inversion notebook as well as some of
# the raw data for the topography surface and gravity observations.
# Download Topography and Observed gravity data
url = "https://storage.googleapis.com/simpeg/Chile_GRAV_4_Miller/Chile_GRAV_4_Miller.tar.gz"
downloads = discretize.utils.download(url, overwrite=True)
basePath = downloads.split(".")[0]
# unzip the tarfile
tar = tarfile.open(downloads, "r")
tar.extractall()
tar.close()
# Download the inverted model
f = discretize.utils.download(
"https://storage.googleapis.com/simpeg/laguna_del_maule_slicer.tar.gz"
)
tar = tarfile.open(f, "r")
tar.extractall()
tar.close()
with shelve.open('./laguna_del_maule_slicer/laguna_del_maule-result') as db:
mesh = db['mesh']
Lpout = db['Lpout']
# Load the mesh/data
mesh = discretize.TensorMesh.copy(mesh)
models = {'Lpout':Lpout}
###############################################################################
# Create PyVista data objects
# ---------------------------
#
# Here we start making PyVista data objects of all the spatially referenced
# data.
# Get the PyVista dataset of the inverted model
dataset = mesh.to_vtk(models)
###############################################################################
# Load topography points from text file as XYZ numpy array
topo_pts = np.loadtxt('Chile_GRAV_4_Miller/LdM_topo.topo', skiprows=1)
# Create the topography points and apply an elevation filter
topo = pv.PolyData(topo_pts).delaunay_2d().elevation()
###############################################################################
# Load the gravity data from text file as XYZ+attributes numpy array
grav_data = np.loadtxt('Chile_GRAV_4_Miller/LdM_grav_obs.grv', skiprows=1)
print('gravity file shape: ', grav_data.shape)
# Use the points to create PolyData
grav = pv.PolyData(grav_data[:,0:3])
# Add the data arrays
grav.point_arrays['comp-1'] = grav_data[:,3]
grav.point_arrays['comp-2'] = grav_data[:,4]
###############################################################################
# Plot the topographic surface and the gravity data
p = pv.Plotter()
p.add_mesh(topo, color='grey')
p.add_mesh(grav, stitle='Observed Gravtiy Data', point_size=15,
render_points_as_spheres=True)
# Use a non-phot-realistic shading technique to show topographic relief
p.enable_eye_dome_lighting()
p.show(window_size=[1024, 768])
###############################################################################
# Visualize Using PyVista
# -----------------------
#
# Here we visualize all the data in 3D!
# Create display parameters for inverted model
dparams = dict(
show_edges=False,
cmap='bwr',
clim=[-0.6, 0.6],
)
# Apply a threshold filter to remove topography
# no arguments will remove the NaN values
dataset_t = dataset.threshold()
# Extract volumetric threshold
threshed = dataset_t.threshold(-0.2, invert=True)
# Create the rendering scene
p = pv.Plotter()
# add a grid axes
p.show_grid()
# Add spatially referenced data to the scene
p.add_mesh(dataset_t.slice('x'), **dparams)
p.add_mesh(dataset_t.slice('y'), **dparams)
p.add_mesh(threshed, **dparams)
p.add_mesh(topo, opacity=0.75, color='grey',
#cmap='gist_earth', clim=[1.7e+03, 3.104e+03],
)
p.add_mesh(grav, cmap='viridis', point_size=15,
render_points_as_spheres=True)
# Here is a nice camera position we manually found:
cpos = [(395020.7332989303, 6039949.0452080015, 20387.583125699253),
(364528.3152860675, 6008839.363092581, -3776.318305935185),
(-0.3423732500124074, -0.34364514928896667, 0.8744647328772646)]
p.camera_position = cpos
# Render the scene!
p.show(window_size=[1024, 768])
|
[
"pyvista.set_plot_theme",
"discretize.utils.download",
"shelve.open",
"pyvista.Plotter",
"numpy.loadtxt",
"tarfile.open",
"discretize.TensorMesh.copy",
"pyvista.PolyData"
] |
[((794, 823), 'pyvista.set_plot_theme', 'pv.set_plot_theme', (['"""document"""'], {}), "('document')\n", (811, 823), True, 'import pyvista as pv\n'), ((1387, 1433), 'discretize.utils.download', 'discretize.utils.download', (['url'], {'overwrite': '(True)'}), '(url, overwrite=True)\n', (1412, 1433), False, 'import discretize\n'), ((1496, 1524), 'tarfile.open', 'tarfile.open', (['downloads', '"""r"""'], {}), "(downloads, 'r')\n", (1508, 1524), False, 'import tarfile\n'), ((1589, 1691), 'discretize.utils.download', 'discretize.utils.download', (['"""https://storage.googleapis.com/simpeg/laguna_del_maule_slicer.tar.gz"""'], {}), "(\n 'https://storage.googleapis.com/simpeg/laguna_del_maule_slicer.tar.gz')\n", (1614, 1691), False, 'import discretize\n'), ((1699, 1719), 'tarfile.open', 'tarfile.open', (['f', '"""r"""'], {}), "(f, 'r')\n", (1711, 1719), False, 'import tarfile\n'), ((1902, 1934), 'discretize.TensorMesh.copy', 'discretize.TensorMesh.copy', (['mesh'], {}), '(mesh)\n', (1928, 1934), False, 'import discretize\n'), ((2419, 2478), 'numpy.loadtxt', 'np.loadtxt', (['"""Chile_GRAV_4_Miller/LdM_topo.topo"""'], {'skiprows': '(1)'}), "('Chile_GRAV_4_Miller/LdM_topo.topo', skiprows=1)\n", (2429, 2478), True, 'import numpy as np\n'), ((2758, 2820), 'numpy.loadtxt', 'np.loadtxt', (['"""Chile_GRAV_4_Miller/LdM_grav_obs.grv"""'], {'skiprows': '(1)'}), "('Chile_GRAV_4_Miller/LdM_grav_obs.grv', skiprows=1)\n", (2768, 2820), True, 'import numpy as np\n'), ((2911, 2941), 'pyvista.PolyData', 'pv.PolyData', (['grav_data[:, 0:3]'], {}), '(grav_data[:, 0:3])\n', (2922, 2941), True, 'import pyvista as pv\n'), ((3191, 3203), 'pyvista.Plotter', 'pv.Plotter', ([], {}), '()\n', (3201, 3203), True, 'import pyvista as pv\n'), ((4016, 4028), 'pyvista.Plotter', 'pv.Plotter', ([], {}), '()\n', (4026, 4028), True, 'import pyvista as pv\n'), ((1755, 1819), 'shelve.open', 'shelve.open', (['"""./laguna_del_maule_slicer/laguna_del_maule-result"""'], {}), "('./laguna_del_maule_slicer/laguna_del_maule-result')\n", (1766, 1819), False, 'import shelve\n'), ((2547, 2568), 'pyvista.PolyData', 'pv.PolyData', (['topo_pts'], {}), '(topo_pts)\n', (2558, 2568), True, 'import pyvista as pv\n')]
|
import numpy as np
# time occ1 occ2
mctdh_data = np.array(
[[5.0000000e-01, 1.2083970e-02, 9.8791603e-01],
[1.0000000e+00, 4.3008830e-02, 9.5699117e-01],
[1.5000000e+00, 7.9675930e-02, 9.2032407e-01],
[2.0000000e+00, 1.0804013e-01, 8.9195987e-01],
[2.5000000e+00, 1.1972252e-01, 8.8027748e-01],
[3.0000000e+00, 1.1480491e-01, 8.8519509e-01],
[3.5000000e+00, 9.9963810e-02, 9.0003619e-01],
[4.0000000e+00, 8.3915230e-02, 9.1608477e-01],
[4.5000000e+00, 7.3173540e-02, 9.2682646e-01],
[5.0000000e+00, 7.0139510e-02, 9.2986049e-01],
[5.5000000e+00, 7.3678880e-02, 9.2632112e-01],
[6.0000000e+00, 8.1009600e-02, 9.1899040e-01],
[6.5000000e+00, 8.9498230e-02, 9.1050177e-01],
[7.0000000e+00, 9.7581630e-02, 9.0241837e-01],
[7.5000000e+00, 1.0480220e-01, 8.9519780e-01],
[8.0000000e+00, 1.1138269e-01, 8.8861731e-01],
[8.5000000e+00, 1.1778152e-01, 8.8221848e-01],
[9.0000000e+00, 1.2444794e-01, 8.7555206e-01],
[9.5000000e+00, 1.3177114e-01, 8.6822886e-01],
[1.0000000e+01, 1.4010714e-01, 8.5989286e-01],
[1.0500000e+01, 1.4975263e-01, 8.5024737e-01],
[1.1000000e+01, 1.6089050e-01, 8.3910950e-01],
[1.1500000e+01, 1.7348692e-01, 8.2651308e-01],
[1.2000000e+01, 1.8720687e-01, 8.1279313e-01],
[1.2500000e+01, 2.0153796e-01, 7.9846204e-01],
[1.3000000e+01, 2.1602325e-01, 7.8397675e-01],
[1.3500000e+01, 2.3037238e-01, 7.6962762e-01],
[1.4000000e+01, 2.4446829e-01, 7.5553171e-01],
[1.4500000e+01, 2.5835149e-01, 7.4164851e-01],
[1.5000000e+01, 2.7219119e-01, 7.2780881e-01],
[1.5500000e+01, 2.8613236e-01, 7.1386764e-01],
[1.6000000e+01, 3.0009972e-01, 6.9990028e-01],
[1.6500000e+01, 3.1382441e-01, 6.8617559e-01],
[1.7000000e+01, 3.2698213e-01, 6.7301787e-01],
[1.7500000e+01, 3.3937635e-01, 6.6062365e-01],
[1.8000000e+01, 3.5102877e-01, 6.4897123e-01],
[1.8500000e+01, 3.6206939e-01, 6.3793061e-01],
[1.9000000e+01, 3.7267412e-01, 6.2732588e-01],
[1.9500000e+01, 3.8298746e-01, 6.1701254e-01],
[2.0000000e+01, 3.9308095e-01, 6.0691905e-01],
[2.0500000e+01, 4.0301861e-01, 5.9698139e-01],
[2.1000000e+01, 4.1282250e-01, 5.8717750e-01],
[2.1500000e+01, 4.2245660e-01, 5.7754340e-01],
[2.2000000e+01, 4.3190030e-01, 5.6809970e-01],
[2.2500000e+01, 4.4112886e-01, 5.5887114e-01],
[2.3000000e+01, 4.5010529e-01, 5.4989471e-01],
[2.3500000e+01, 4.5883535e-01, 5.4116465e-01],
[2.4000000e+01, 4.6722627e-01, 5.3277373e-01],
[2.4500000e+01, 4.7498362e-01, 5.2501638e-01],
[2.5000000e+01, 4.8179766e-01, 5.1820234e-01],
[2.5500000e+01, 4.8751389e-01, 5.1248611e-01],
[2.6000000e+01, 4.9209686e-01, 5.0790314e-01],
[2.6500000e+01, 4.9574730e-01, 5.0425270e-01],
[2.7000000e+01, 4.9904364e-01, 5.0095636e-01],
[2.7500000e+01, 5.0257806e-01, 4.9742194e-01],
[2.8000000e+01, 5.0676072e-01, 4.9323928e-01],
[2.8500000e+01, 5.1170749e-01, 4.8829251e-01],
[2.9000000e+01, 5.1722842e-01, 4.8277158e-01],
[2.9500000e+01, 5.2295475e-01, 4.7704525e-01],
[3.0000000e+01, 5.2852166e-01, 4.7147834e-01],
[3.0500000e+01, 5.3372943e-01, 4.6627057e-01],
[3.1000000e+01, 5.3829162e-01, 4.6170838e-01],
[3.1500000e+01, 5.4233712e-01, 4.5766288e-01],
[3.2000000e+01, 5.4618162e-01, 4.5381838e-01],
[3.2500000e+01, 5.5001448e-01, 4.4998552e-01],
[3.3000000e+01, 5.5421102e-01, 4.4578898e-01],
[3.3500000e+01, 5.5877238e-01, 4.4122762e-01],
[3.4000000e+01, 5.6366982e-01, 4.3633018e-01],
[3.4500000e+01, 5.6899077e-01, 4.3100923e-01],
[3.5000000e+01, 5.7434030e-01, 4.2565970e-01],
[3.5500000e+01, 5.7964648e-01, 4.2035352e-01],
[3.6000000e+01, 5.8496463e-01, 4.1503537e-01],
[3.6500000e+01, 5.9025535e-01, 4.0974465e-01],
[3.7000000e+01, 5.9588610e-01, 4.0411390e-01],
[3.7500000e+01, 6.0174132e-01, 3.9825868e-01],
[3.8000000e+01, 6.0794735e-01, 3.9205265e-01],
[3.8500000e+01, 6.1445039e-01, 3.8554961e-01],
[3.9000000e+01, 6.2091842e-01, 3.7908158e-01],
[3.9500000e+01, 6.2772288e-01, 3.7227712e-01],
[4.0000000e+01, 6.3477112e-01, 3.6522888e-01],
[4.0500000e+01, 6.4184368e-01, 3.5815632e-01],
[4.1000000e+01, 6.4928782e-01, 3.5071218e-01],
[4.1500000e+01, 6.5735624e-01, 3.4264376e-01],
[4.2000000e+01, 6.6578903e-01, 3.3421097e-01],
[4.2500000e+01, 6.7432732e-01, 3.2567268e-01],
[4.3000000e+01, 6.8295713e-01, 3.1704287e-01],
[4.3500000e+01, 6.9150497e-01, 3.0849503e-01],
[4.4000000e+01, 6.9983774e-01, 3.0016226e-01],
[4.4500000e+01, 7.0817274e-01, 2.9182726e-01],
[4.5000000e+01, 7.1672047e-01, 2.8327953e-01],
[4.5500000e+01, 7.2537341e-01, 2.7462659e-01],
[4.6000000e+01, 7.3393667e-01, 2.6606333e-01],
[4.6500000e+01, 7.4239650e-01, 2.5760350e-01],
[4.7000000e+01, 7.5084866e-01, 2.4915134e-01],
[4.7500000e+01, 7.5929325e-01, 2.4070675e-01],
[4.8000000e+01, 7.6748428e-01, 2.3251572e-01],
[4.8500000e+01, 7.7499907e-01, 2.2500093e-01],
[4.9000000e+01, 7.8147300e-01, 2.1852700e-01],
[4.9500000e+01, 7.8661012e-01, 2.1338988e-01],
[5.0000000e+01, 7.9006035e-01, 2.0993965e-01],
[5.0500000e+01, 7.9159180e-01, 2.0840820e-01],
[5.1000000e+01, 7.9141817e-01, 2.0858183e-01],
[5.1500000e+01, 7.9027855e-01, 2.0972145e-01],
[5.2000000e+01, 7.8918090e-01, 2.1081910e-01],
[5.2500000e+01, 7.8902032e-01, 2.1097968e-01],
[5.3000000e+01, 7.9041607e-01, 2.0958393e-01],
[5.3500000e+01, 7.9373225e-01, 2.0626775e-01],
[5.4000000e+01, 7.9883965e-01, 2.0116035e-01],
[5.4500000e+01, 8.0470534e-01, 1.9529466e-01],
[5.5000000e+01, 8.0938464e-01, 1.9061536e-01],
[5.5500000e+01, 8.1079180e-01, 1.8920820e-01],
[5.6000000e+01, 8.0799382e-01, 1.9200618e-01],
[5.6500000e+01, 8.0187198e-01, 1.9812802e-01],
[5.7000000e+01, 7.9445757e-01, 2.0554243e-01],
[5.7500000e+01, 7.8781232e-01, 2.1218768e-01],
[5.8000000e+01, 7.8320459e-01, 2.1679541e-01],
[5.8500000e+01, 7.8067931e-01, 2.1932069e-01],
[5.9000000e+01, 7.7953602e-01, 2.2046398e-01],
[5.9500000e+01, 7.7929186e-01, 2.2070814e-01],
[6.0000000e+01, 7.7972442e-01, 2.2027558e-01],
[6.0500000e+01, 7.8047304e-01, 2.1952696e-01],
[6.1000000e+01, 7.8136710e-01, 2.1863290e-01],
[6.1500000e+01, 7.8251678e-01, 2.1748322e-01],
[6.2000000e+01, 7.8353249e-01, 2.1646751e-01],
[6.2500000e+01, 7.8336467e-01, 2.1663533e-01],
[6.3000000e+01, 7.8158438e-01, 2.1841562e-01],
[6.3500000e+01, 7.7916625e-01, 2.2083375e-01],
[6.4000000e+01, 7.7744298e-01, 2.2255702e-01],
[6.4500000e+01, 7.7732909e-01, 2.2267091e-01],
[6.5000000e+01, 7.7936981e-01, 2.2063019e-01],
[6.5500000e+01, 7.8319636e-01, 2.1680364e-01],
[6.6000000e+01, 7.8739037e-01, 2.1260963e-01],
[6.6500000e+01, 7.9077882e-01, 2.0922118e-01],
[6.7000000e+01, 7.9332419e-01, 2.0667581e-01],
[6.7500000e+01, 7.9557840e-01, 2.0442160e-01],
[6.8000000e+01, 7.9825531e-01, 2.0174469e-01],
[6.8500000e+01, 8.0211292e-01, 1.9788708e-01],
[6.9000000e+01, 8.0710085e-01, 1.9289915e-01],
[6.9500000e+01, 8.1213965e-01, 1.8786035e-01],
[7.0000000e+01, 8.1624836e-01, 1.8375164e-01],
[7.0500000e+01, 8.1922441e-01, 1.8077559e-01],
[7.1000000e+01, 8.2136937e-01, 1.7863063e-01],
[7.1500000e+01, 8.2308043e-01, 1.7691957e-01],
[7.2000000e+01, 8.2440015e-01, 1.7559985e-01],
[7.2500000e+01, 8.2492880e-01, 1.7507120e-01],
[7.3000000e+01, 8.2421052e-01, 1.7578948e-01],
[7.3500000e+01, 8.2197299e-01, 1.7802701e-01],
[7.4000000e+01, 8.1841193e-01, 1.8158807e-01],
[7.4500000e+01, 8.1409910e-01, 1.8590090e-01],
[7.5000000e+01, 8.0929874e-01, 1.9070126e-01],
[7.5500000e+01, 8.0380650e-01, 1.9619350e-01],
[7.6000000e+01, 7.9742293e-01, 2.0257707e-01],
[7.6500000e+01, 7.9001137e-01, 2.0998863e-01],
[7.7000000e+01, 7.8124159e-01, 2.1875841e-01],
[7.7500000e+01, 7.7098038e-01, 2.2901962e-01],
[7.8000000e+01, 7.5977139e-01, 2.4022861e-01],
[7.8500000e+01, 7.4845065e-01, 2.5154935e-01],
[7.9000000e+01, 7.3754115e-01, 2.6245885e-01],
[7.9500000e+01, 7.2720729e-01, 2.7279271e-01],
[8.0000000e+01, 7.1757244e-01, 2.8242756e-01],
[8.0500000e+01, 7.0883123e-01, 2.9116877e-01],
[8.1000000e+01, 7.0085659e-01, 2.9914341e-01],
[8.1500000e+01, 6.9303527e-01, 3.0696473e-01],
[8.2000000e+01, 6.8486829e-01, 3.1513171e-01],
[8.2500000e+01, 6.7646527e-01, 3.2353473e-01],
[8.3000000e+01, 6.6835282e-01, 3.3164718e-01],
[8.3500000e+01, 6.6088327e-01, 3.3911673e-01],
[8.4000000e+01, 6.5393189e-01, 3.4606811e-01],
[8.4500000e+01, 6.4749414e-01, 3.5250586e-01],
[8.5000000e+01, 6.4213779e-01, 3.5786221e-01],
[8.5500000e+01, 6.3822323e-01, 3.6177677e-01],
[8.6000000e+01, 6.3508024e-01, 3.6491976e-01],
[8.6500000e+01, 6.3187553e-01, 3.6812447e-01],
[8.7000000e+01, 6.2866410e-01, 3.7133590e-01],
[8.7500000e+01, 6.2567578e-01, 3.7432422e-01],
[8.8000000e+01, 6.2327737e-01, 3.7672263e-01],
[8.8500000e+01, 6.2202450e-01, 3.7797550e-01],
[8.9000000e+01, 6.2142673e-01, 3.7857327e-01],
[8.9500000e+01, 6.2085314e-01, 3.7914686e-01],
[9.0000000e+01, 6.2039036e-01, 3.7960964e-01],
[9.0500000e+01, 6.1980805e-01, 3.8019195e-01],
[9.1000000e+01, 6.1892296e-01, 3.8107704e-01],
[9.1500000e+01, 6.1828657e-01, 3.8171343e-01],
[9.2000000e+01, 6.1839927e-01, 3.8160073e-01],
[9.2500000e+01, 6.1910003e-01, 3.8089997e-01],
[9.3000000e+01, 6.2054513e-01, 3.7945487e-01],
[9.3500000e+01, 6.2310521e-01, 3.7689479e-01],
[9.4000000e+01, 6.2607344e-01, 3.7392656e-01],
[9.4500000e+01, 6.2886337e-01, 3.7113663e-01],
[9.5000000e+01, 6.3217429e-01, 3.6782571e-01],
[9.5500000e+01, 6.3663281e-01, 3.6336719e-01],
[9.6000000e+01, 6.4211005e-01, 3.5788995e-01],
[9.6500000e+01, 6.4845972e-01, 3.5154028e-01],
[9.7000000e+01, 6.5566634e-01, 3.4433366e-01],
[9.7500000e+01, 6.6344440e-01, 3.3655560e-01],
[9.8000000e+01, 6.7139138e-01, 3.2860862e-01],
[9.8500000e+01, 6.7944561e-01, 3.2055439e-01],
[9.9000000e+01, 6.8767516e-01, 3.1232484e-01],
[9.9500000e+01, 6.9606729e-01, 3.0393271e-01],
[1.0000000e+02, 7.0464076e-01, 2.9535924e-01],
[1.0050000e+02, 7.1352864e-01, 2.8647136e-01],
[1.0100000e+02, 7.2275077e-01, 2.7724923e-01],
[1.0150000e+02, 7.3202915e-01, 2.6797085e-01],
[1.0200000e+02, 7.4071962e-01, 2.5928038e-01],
[1.0250000e+02, 7.4834850e-01, 2.5165150e-01],
[1.0300000e+02, 7.5504078e-01, 2.4495922e-01],
[1.0350000e+02, 7.6135113e-01, 2.3864887e-01],
[1.0400000e+02, 7.6773182e-01, 2.3226818e-01],
[1.0450000e+02, 7.7409993e-01, 2.2590007e-01],
[1.0500000e+02, 7.8042367e-01, 2.1957633e-01],
[1.0550000e+02, 7.8673749e-01, 2.1326251e-01],
[1.0600000e+02, 7.9254266e-01, 2.0745734e-01],
[1.0650000e+02, 7.9707885e-01, 2.0292115e-01],
[1.0700000e+02, 8.0012517e-01, 1.9987483e-01],
[1.0750000e+02, 8.0232982e-01, 1.9767018e-01],
[1.0800000e+02, 8.0447387e-01, 1.9552613e-01],
[1.0850000e+02, 8.0691381e-01, 1.9308619e-01],
[1.0900000e+02, 8.0991568e-01, 1.9008432e-01],
[1.0950000e+02, 8.1386778e-01, 1.8613222e-01],
[1.1000000e+02, 8.1882018e-01, 1.8117982e-01],
[1.1050000e+02, 8.2400922e-01, 1.7599078e-01],
[1.1100000e+02, 8.2818942e-01, 1.7181058e-01],
[1.1150000e+02, 8.3038865e-01, 1.6961135e-01],
[1.1200000e+02, 8.3033898e-01, 1.6966102e-01],
[1.1250000e+02, 8.2863678e-01, 1.7136322e-01],
[1.1300000e+02, 8.2653080e-01, 1.7346920e-01],
[1.1350000e+02, 8.2536814e-01, 1.7463186e-01],
[1.1400000e+02, 8.2604248e-01, 1.7395752e-01],
[1.1450000e+02, 8.2854491e-01, 1.7145509e-01],
[1.1500000e+02, 8.3199930e-01, 1.6800070e-01],
[1.1550000e+02, 8.3538285e-01, 1.6461715e-01],
[1.1600000e+02, 8.3816342e-01, 1.6183658e-01],
[1.1650000e+02, 8.4022613e-01, 1.5977387e-01],
[1.1700000e+02, 8.4147193e-01, 1.5852807e-01],
[1.1750000e+02, 8.4185527e-01, 1.5814473e-01],
[1.1800000e+02, 8.4185298e-01, 1.5814702e-01],
[1.1850000e+02, 8.4241659e-01, 1.5758341e-01],
[1.1900000e+02, 8.4415570e-01, 1.5584430e-01],
[1.1950000e+02, 8.4685108e-01, 1.5314892e-01],
[1.2000000e+02, 8.4990233e-01, 1.5009767e-01]]
)
|
[
"numpy.array"
] |
[((76, 8320), 'numpy.array', 'np.array', (['[[0.5, 0.01208397, 0.98791603], [1.0, 0.04300883, 0.95699117], [1.5, \n 0.07967593, 0.92032407], [2.0, 0.10804013, 0.89195987], [2.5, \n 0.11972252, 0.88027748], [3.0, 0.11480491, 0.88519509], [3.5, \n 0.09996381, 0.90003619], [4.0, 0.08391523, 0.91608477], [4.5, \n 0.07317354, 0.92682646], [5.0, 0.07013951, 0.92986049], [5.5, \n 0.07367888, 0.92632112], [6.0, 0.0810096, 0.9189904], [6.5, 0.08949823,\n 0.91050177], [7.0, 0.09758163, 0.90241837], [7.5, 0.1048022, 0.8951978],\n [8.0, 0.11138269, 0.88861731], [8.5, 0.11778152, 0.88221848], [9.0, \n 0.12444794, 0.87555206], [9.5, 0.13177114, 0.86822886], [10.0, \n 0.14010714, 0.85989286], [10.5, 0.14975263, 0.85024737], [11.0, \n 0.1608905, 0.8391095], [11.5, 0.17348692, 0.82651308], [12.0, \n 0.18720687, 0.81279313], [12.5, 0.20153796, 0.79846204], [13.0, \n 0.21602325, 0.78397675], [13.5, 0.23037238, 0.76962762], [14.0, \n 0.24446829, 0.75553171], [14.5, 0.25835149, 0.74164851], [15.0, \n 0.27219119, 0.72780881], [15.5, 0.28613236, 0.71386764], [16.0, \n 0.30009972, 0.69990028], [16.5, 0.31382441, 0.68617559], [17.0, \n 0.32698213, 0.67301787], [17.5, 0.33937635, 0.66062365], [18.0, \n 0.35102877, 0.64897123], [18.5, 0.36206939, 0.63793061], [19.0, \n 0.37267412, 0.62732588], [19.5, 0.38298746, 0.61701254], [20.0, \n 0.39308095, 0.60691905], [20.5, 0.40301861, 0.59698139], [21.0, \n 0.4128225, 0.5871775], [21.5, 0.4224566, 0.5775434], [22.0, 0.4319003, \n 0.5680997], [22.5, 0.44112886, 0.55887114], [23.0, 0.45010529, \n 0.54989471], [23.5, 0.45883535, 0.54116465], [24.0, 0.46722627, \n 0.53277373], [24.5, 0.47498362, 0.52501638], [25.0, 0.48179766, \n 0.51820234], [25.5, 0.48751389, 0.51248611], [26.0, 0.49209686, \n 0.50790314], [26.5, 0.4957473, 0.5042527], [27.0, 0.49904364, \n 0.50095636], [27.5, 0.50257806, 0.49742194], [28.0, 0.50676072, \n 0.49323928], [28.5, 0.51170749, 0.48829251], [29.0, 0.51722842, \n 0.48277158], [29.5, 0.52295475, 0.47704525], [30.0, 0.52852166, \n 0.47147834], [30.5, 0.53372943, 0.46627057], [31.0, 0.53829162, \n 0.46170838], [31.5, 0.54233712, 0.45766288], [32.0, 0.54618162, \n 0.45381838], [32.5, 0.55001448, 0.44998552], [33.0, 0.55421102, \n 0.44578898], [33.5, 0.55877238, 0.44122762], [34.0, 0.56366982, \n 0.43633018], [34.5, 0.56899077, 0.43100923], [35.0, 0.5743403, \n 0.4256597], [35.5, 0.57964648, 0.42035352], [36.0, 0.58496463, \n 0.41503537], [36.5, 0.59025535, 0.40974465], [37.0, 0.5958861, \n 0.4041139], [37.5, 0.60174132, 0.39825868], [38.0, 0.60794735, \n 0.39205265], [38.5, 0.61445039, 0.38554961], [39.0, 0.62091842, \n 0.37908158], [39.5, 0.62772288, 0.37227712], [40.0, 0.63477112, \n 0.36522888], [40.5, 0.64184368, 0.35815632], [41.0, 0.64928782, \n 0.35071218], [41.5, 0.65735624, 0.34264376], [42.0, 0.66578903, \n 0.33421097], [42.5, 0.67432732, 0.32567268], [43.0, 0.68295713, \n 0.31704287], [43.5, 0.69150497, 0.30849503], [44.0, 0.69983774, \n 0.30016226], [44.5, 0.70817274, 0.29182726], [45.0, 0.71672047, \n 0.28327953], [45.5, 0.72537341, 0.27462659], [46.0, 0.73393667, \n 0.26606333], [46.5, 0.7423965, 0.2576035], [47.0, 0.75084866, \n 0.24915134], [47.5, 0.75929325, 0.24070675], [48.0, 0.76748428, \n 0.23251572], [48.5, 0.77499907, 0.22500093], [49.0, 0.781473, 0.218527],\n [49.5, 0.78661012, 0.21338988], [50.0, 0.79006035, 0.20993965], [50.5, \n 0.7915918, 0.2084082], [51.0, 0.79141817, 0.20858183], [51.5, \n 0.79027855, 0.20972145], [52.0, 0.7891809, 0.2108191], [52.5, \n 0.78902032, 0.21097968], [53.0, 0.79041607, 0.20958393], [53.5, \n 0.79373225, 0.20626775], [54.0, 0.79883965, 0.20116035], [54.5, \n 0.80470534, 0.19529466], [55.0, 0.80938464, 0.19061536], [55.5, \n 0.8107918, 0.1892082], [56.0, 0.80799382, 0.19200618], [56.5, \n 0.80187198, 0.19812802], [57.0, 0.79445757, 0.20554243], [57.5, \n 0.78781232, 0.21218768], [58.0, 0.78320459, 0.21679541], [58.5, \n 0.78067931, 0.21932069], [59.0, 0.77953602, 0.22046398], [59.5, \n 0.77929186, 0.22070814], [60.0, 0.77972442, 0.22027558], [60.5, \n 0.78047304, 0.21952696], [61.0, 0.7813671, 0.2186329], [61.5, \n 0.78251678, 0.21748322], [62.0, 0.78353249, 0.21646751], [62.5, \n 0.78336467, 0.21663533], [63.0, 0.78158438, 0.21841562], [63.5, \n 0.77916625, 0.22083375], [64.0, 0.77744298, 0.22255702], [64.5, \n 0.77732909, 0.22267091], [65.0, 0.77936981, 0.22063019], [65.5, \n 0.78319636, 0.21680364], [66.0, 0.78739037, 0.21260963], [66.5, \n 0.79077882, 0.20922118], [67.0, 0.79332419, 0.20667581], [67.5, \n 0.7955784, 0.2044216], [68.0, 0.79825531, 0.20174469], [68.5, \n 0.80211292, 0.19788708], [69.0, 0.80710085, 0.19289915], [69.5, \n 0.81213965, 0.18786035], [70.0, 0.81624836, 0.18375164], [70.5, \n 0.81922441, 0.18077559], [71.0, 0.82136937, 0.17863063], [71.5, \n 0.82308043, 0.17691957], [72.0, 0.82440015, 0.17559985], [72.5, \n 0.8249288, 0.1750712], [73.0, 0.82421052, 0.17578948], [73.5, \n 0.82197299, 0.17802701], [74.0, 0.81841193, 0.18158807], [74.5, \n 0.8140991, 0.1859009], [75.0, 0.80929874, 0.19070126], [75.5, 0.8038065,\n 0.1961935], [76.0, 0.79742293, 0.20257707], [76.5, 0.79001137, \n 0.20998863], [77.0, 0.78124159, 0.21875841], [77.5, 0.77098038, \n 0.22901962], [78.0, 0.75977139, 0.24022861], [78.5, 0.74845065, \n 0.25154935], [79.0, 0.73754115, 0.26245885], [79.5, 0.72720729, \n 0.27279271], [80.0, 0.71757244, 0.28242756], [80.5, 0.70883123, \n 0.29116877], [81.0, 0.70085659, 0.29914341], [81.5, 0.69303527, \n 0.30696473], [82.0, 0.68486829, 0.31513171], [82.5, 0.67646527, \n 0.32353473], [83.0, 0.66835282, 0.33164718], [83.5, 0.66088327, \n 0.33911673], [84.0, 0.65393189, 0.34606811], [84.5, 0.64749414, \n 0.35250586], [85.0, 0.64213779, 0.35786221], [85.5, 0.63822323, \n 0.36177677], [86.0, 0.63508024, 0.36491976], [86.5, 0.63187553, \n 0.36812447], [87.0, 0.6286641, 0.3713359], [87.5, 0.62567578, \n 0.37432422], [88.0, 0.62327737, 0.37672263], [88.5, 0.6220245, \n 0.3779755], [89.0, 0.62142673, 0.37857327], [89.5, 0.62085314, \n 0.37914686], [90.0, 0.62039036, 0.37960964], [90.5, 0.61980805, \n 0.38019195], [91.0, 0.61892296, 0.38107704], [91.5, 0.61828657, \n 0.38171343], [92.0, 0.61839927, 0.38160073], [92.5, 0.61910003, \n 0.38089997], [93.0, 0.62054513, 0.37945487], [93.5, 0.62310521, \n 0.37689479], [94.0, 0.62607344, 0.37392656], [94.5, 0.62886337, \n 0.37113663], [95.0, 0.63217429, 0.36782571], [95.5, 0.63663281, \n 0.36336719], [96.0, 0.64211005, 0.35788995], [96.5, 0.64845972, \n 0.35154028], [97.0, 0.65566634, 0.34433366], [97.5, 0.6634444, \n 0.3365556], [98.0, 0.67139138, 0.32860862], [98.5, 0.67944561, \n 0.32055439], [99.0, 0.68767516, 0.31232484], [99.5, 0.69606729, \n 0.30393271], [100.0, 0.70464076, 0.29535924], [100.5, 0.71352864, \n 0.28647136], [101.0, 0.72275077, 0.27724923], [101.5, 0.73202915, \n 0.26797085], [102.0, 0.74071962, 0.25928038], [102.5, 0.7483485, \n 0.2516515], [103.0, 0.75504078, 0.24495922], [103.5, 0.76135113, \n 0.23864887], [104.0, 0.76773182, 0.23226818], [104.5, 0.77409993, \n 0.22590007], [105.0, 0.78042367, 0.21957633], [105.5, 0.78673749, \n 0.21326251], [106.0, 0.79254266, 0.20745734], [106.5, 0.79707885, \n 0.20292115], [107.0, 0.80012517, 0.19987483], [107.5, 0.80232982, \n 0.19767018], [108.0, 0.80447387, 0.19552613], [108.5, 0.80691381, \n 0.19308619], [109.0, 0.80991568, 0.19008432], [109.5, 0.81386778, \n 0.18613222], [110.0, 0.81882018, 0.18117982], [110.5, 0.82400922, \n 0.17599078], [111.0, 0.82818942, 0.17181058], [111.5, 0.83038865, \n 0.16961135], [112.0, 0.83033898, 0.16966102], [112.5, 0.82863678, \n 0.17136322], [113.0, 0.8265308, 0.1734692], [113.5, 0.82536814, \n 0.17463186], [114.0, 0.82604248, 0.17395752], [114.5, 0.82854491, \n 0.17145509], [115.0, 0.8319993, 0.1680007], [115.5, 0.83538285, \n 0.16461715], [116.0, 0.83816342, 0.16183658], [116.5, 0.84022613, \n 0.15977387], [117.0, 0.84147193, 0.15852807], [117.5, 0.84185527, \n 0.15814473], [118.0, 0.84185298, 0.15814702], [118.5, 0.84241659, \n 0.15758341], [119.0, 0.8441557, 0.1558443], [119.5, 0.84685108, \n 0.15314892], [120.0, 0.84990233, 0.15009767]]'], {}), '([[0.5, 0.01208397, 0.98791603], [1.0, 0.04300883, 0.95699117], [\n 1.5, 0.07967593, 0.92032407], [2.0, 0.10804013, 0.89195987], [2.5, \n 0.11972252, 0.88027748], [3.0, 0.11480491, 0.88519509], [3.5, \n 0.09996381, 0.90003619], [4.0, 0.08391523, 0.91608477], [4.5, \n 0.07317354, 0.92682646], [5.0, 0.07013951, 0.92986049], [5.5, \n 0.07367888, 0.92632112], [6.0, 0.0810096, 0.9189904], [6.5, 0.08949823,\n 0.91050177], [7.0, 0.09758163, 0.90241837], [7.5, 0.1048022, 0.8951978],\n [8.0, 0.11138269, 0.88861731], [8.5, 0.11778152, 0.88221848], [9.0, \n 0.12444794, 0.87555206], [9.5, 0.13177114, 0.86822886], [10.0, \n 0.14010714, 0.85989286], [10.5, 0.14975263, 0.85024737], [11.0, \n 0.1608905, 0.8391095], [11.5, 0.17348692, 0.82651308], [12.0, \n 0.18720687, 0.81279313], [12.5, 0.20153796, 0.79846204], [13.0, \n 0.21602325, 0.78397675], [13.5, 0.23037238, 0.76962762], [14.0, \n 0.24446829, 0.75553171], [14.5, 0.25835149, 0.74164851], [15.0, \n 0.27219119, 0.72780881], [15.5, 0.28613236, 0.71386764], [16.0, \n 0.30009972, 0.69990028], [16.5, 0.31382441, 0.68617559], [17.0, \n 0.32698213, 0.67301787], [17.5, 0.33937635, 0.66062365], [18.0, \n 0.35102877, 0.64897123], [18.5, 0.36206939, 0.63793061], [19.0, \n 0.37267412, 0.62732588], [19.5, 0.38298746, 0.61701254], [20.0, \n 0.39308095, 0.60691905], [20.5, 0.40301861, 0.59698139], [21.0, \n 0.4128225, 0.5871775], [21.5, 0.4224566, 0.5775434], [22.0, 0.4319003, \n 0.5680997], [22.5, 0.44112886, 0.55887114], [23.0, 0.45010529, \n 0.54989471], [23.5, 0.45883535, 0.54116465], [24.0, 0.46722627, \n 0.53277373], [24.5, 0.47498362, 0.52501638], [25.0, 0.48179766, \n 0.51820234], [25.5, 0.48751389, 0.51248611], [26.0, 0.49209686, \n 0.50790314], [26.5, 0.4957473, 0.5042527], [27.0, 0.49904364, \n 0.50095636], [27.5, 0.50257806, 0.49742194], [28.0, 0.50676072, \n 0.49323928], [28.5, 0.51170749, 0.48829251], [29.0, 0.51722842, \n 0.48277158], [29.5, 0.52295475, 0.47704525], [30.0, 0.52852166, \n 0.47147834], [30.5, 0.53372943, 0.46627057], [31.0, 0.53829162, \n 0.46170838], [31.5, 0.54233712, 0.45766288], [32.0, 0.54618162, \n 0.45381838], [32.5, 0.55001448, 0.44998552], [33.0, 0.55421102, \n 0.44578898], [33.5, 0.55877238, 0.44122762], [34.0, 0.56366982, \n 0.43633018], [34.5, 0.56899077, 0.43100923], [35.0, 0.5743403, \n 0.4256597], [35.5, 0.57964648, 0.42035352], [36.0, 0.58496463, \n 0.41503537], [36.5, 0.59025535, 0.40974465], [37.0, 0.5958861, \n 0.4041139], [37.5, 0.60174132, 0.39825868], [38.0, 0.60794735, \n 0.39205265], [38.5, 0.61445039, 0.38554961], [39.0, 0.62091842, \n 0.37908158], [39.5, 0.62772288, 0.37227712], [40.0, 0.63477112, \n 0.36522888], [40.5, 0.64184368, 0.35815632], [41.0, 0.64928782, \n 0.35071218], [41.5, 0.65735624, 0.34264376], [42.0, 0.66578903, \n 0.33421097], [42.5, 0.67432732, 0.32567268], [43.0, 0.68295713, \n 0.31704287], [43.5, 0.69150497, 0.30849503], [44.0, 0.69983774, \n 0.30016226], [44.5, 0.70817274, 0.29182726], [45.0, 0.71672047, \n 0.28327953], [45.5, 0.72537341, 0.27462659], [46.0, 0.73393667, \n 0.26606333], [46.5, 0.7423965, 0.2576035], [47.0, 0.75084866, \n 0.24915134], [47.5, 0.75929325, 0.24070675], [48.0, 0.76748428, \n 0.23251572], [48.5, 0.77499907, 0.22500093], [49.0, 0.781473, 0.218527],\n [49.5, 0.78661012, 0.21338988], [50.0, 0.79006035, 0.20993965], [50.5, \n 0.7915918, 0.2084082], [51.0, 0.79141817, 0.20858183], [51.5, \n 0.79027855, 0.20972145], [52.0, 0.7891809, 0.2108191], [52.5, \n 0.78902032, 0.21097968], [53.0, 0.79041607, 0.20958393], [53.5, \n 0.79373225, 0.20626775], [54.0, 0.79883965, 0.20116035], [54.5, \n 0.80470534, 0.19529466], [55.0, 0.80938464, 0.19061536], [55.5, \n 0.8107918, 0.1892082], [56.0, 0.80799382, 0.19200618], [56.5, \n 0.80187198, 0.19812802], [57.0, 0.79445757, 0.20554243], [57.5, \n 0.78781232, 0.21218768], [58.0, 0.78320459, 0.21679541], [58.5, \n 0.78067931, 0.21932069], [59.0, 0.77953602, 0.22046398], [59.5, \n 0.77929186, 0.22070814], [60.0, 0.77972442, 0.22027558], [60.5, \n 0.78047304, 0.21952696], [61.0, 0.7813671, 0.2186329], [61.5, \n 0.78251678, 0.21748322], [62.0, 0.78353249, 0.21646751], [62.5, \n 0.78336467, 0.21663533], [63.0, 0.78158438, 0.21841562], [63.5, \n 0.77916625, 0.22083375], [64.0, 0.77744298, 0.22255702], [64.5, \n 0.77732909, 0.22267091], [65.0, 0.77936981, 0.22063019], [65.5, \n 0.78319636, 0.21680364], [66.0, 0.78739037, 0.21260963], [66.5, \n 0.79077882, 0.20922118], [67.0, 0.79332419, 0.20667581], [67.5, \n 0.7955784, 0.2044216], [68.0, 0.79825531, 0.20174469], [68.5, \n 0.80211292, 0.19788708], [69.0, 0.80710085, 0.19289915], [69.5, \n 0.81213965, 0.18786035], [70.0, 0.81624836, 0.18375164], [70.5, \n 0.81922441, 0.18077559], [71.0, 0.82136937, 0.17863063], [71.5, \n 0.82308043, 0.17691957], [72.0, 0.82440015, 0.17559985], [72.5, \n 0.8249288, 0.1750712], [73.0, 0.82421052, 0.17578948], [73.5, \n 0.82197299, 0.17802701], [74.0, 0.81841193, 0.18158807], [74.5, \n 0.8140991, 0.1859009], [75.0, 0.80929874, 0.19070126], [75.5, 0.8038065,\n 0.1961935], [76.0, 0.79742293, 0.20257707], [76.5, 0.79001137, \n 0.20998863], [77.0, 0.78124159, 0.21875841], [77.5, 0.77098038, \n 0.22901962], [78.0, 0.75977139, 0.24022861], [78.5, 0.74845065, \n 0.25154935], [79.0, 0.73754115, 0.26245885], [79.5, 0.72720729, \n 0.27279271], [80.0, 0.71757244, 0.28242756], [80.5, 0.70883123, \n 0.29116877], [81.0, 0.70085659, 0.29914341], [81.5, 0.69303527, \n 0.30696473], [82.0, 0.68486829, 0.31513171], [82.5, 0.67646527, \n 0.32353473], [83.0, 0.66835282, 0.33164718], [83.5, 0.66088327, \n 0.33911673], [84.0, 0.65393189, 0.34606811], [84.5, 0.64749414, \n 0.35250586], [85.0, 0.64213779, 0.35786221], [85.5, 0.63822323, \n 0.36177677], [86.0, 0.63508024, 0.36491976], [86.5, 0.63187553, \n 0.36812447], [87.0, 0.6286641, 0.3713359], [87.5, 0.62567578, \n 0.37432422], [88.0, 0.62327737, 0.37672263], [88.5, 0.6220245, \n 0.3779755], [89.0, 0.62142673, 0.37857327], [89.5, 0.62085314, \n 0.37914686], [90.0, 0.62039036, 0.37960964], [90.5, 0.61980805, \n 0.38019195], [91.0, 0.61892296, 0.38107704], [91.5, 0.61828657, \n 0.38171343], [92.0, 0.61839927, 0.38160073], [92.5, 0.61910003, \n 0.38089997], [93.0, 0.62054513, 0.37945487], [93.5, 0.62310521, \n 0.37689479], [94.0, 0.62607344, 0.37392656], [94.5, 0.62886337, \n 0.37113663], [95.0, 0.63217429, 0.36782571], [95.5, 0.63663281, \n 0.36336719], [96.0, 0.64211005, 0.35788995], [96.5, 0.64845972, \n 0.35154028], [97.0, 0.65566634, 0.34433366], [97.5, 0.6634444, \n 0.3365556], [98.0, 0.67139138, 0.32860862], [98.5, 0.67944561, \n 0.32055439], [99.0, 0.68767516, 0.31232484], [99.5, 0.69606729, \n 0.30393271], [100.0, 0.70464076, 0.29535924], [100.5, 0.71352864, \n 0.28647136], [101.0, 0.72275077, 0.27724923], [101.5, 0.73202915, \n 0.26797085], [102.0, 0.74071962, 0.25928038], [102.5, 0.7483485, \n 0.2516515], [103.0, 0.75504078, 0.24495922], [103.5, 0.76135113, \n 0.23864887], [104.0, 0.76773182, 0.23226818], [104.5, 0.77409993, \n 0.22590007], [105.0, 0.78042367, 0.21957633], [105.5, 0.78673749, \n 0.21326251], [106.0, 0.79254266, 0.20745734], [106.5, 0.79707885, \n 0.20292115], [107.0, 0.80012517, 0.19987483], [107.5, 0.80232982, \n 0.19767018], [108.0, 0.80447387, 0.19552613], [108.5, 0.80691381, \n 0.19308619], [109.0, 0.80991568, 0.19008432], [109.5, 0.81386778, \n 0.18613222], [110.0, 0.81882018, 0.18117982], [110.5, 0.82400922, \n 0.17599078], [111.0, 0.82818942, 0.17181058], [111.5, 0.83038865, \n 0.16961135], [112.0, 0.83033898, 0.16966102], [112.5, 0.82863678, \n 0.17136322], [113.0, 0.8265308, 0.1734692], [113.5, 0.82536814, \n 0.17463186], [114.0, 0.82604248, 0.17395752], [114.5, 0.82854491, \n 0.17145509], [115.0, 0.8319993, 0.1680007], [115.5, 0.83538285, \n 0.16461715], [116.0, 0.83816342, 0.16183658], [116.5, 0.84022613, \n 0.15977387], [117.0, 0.84147193, 0.15852807], [117.5, 0.84185527, \n 0.15814473], [118.0, 0.84185298, 0.15814702], [118.5, 0.84241659, \n 0.15758341], [119.0, 0.8441557, 0.1558443], [119.5, 0.84685108, \n 0.15314892], [120.0, 0.84990233, 0.15009767]])\n', (84, 8320), True, 'import numpy as np\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.