code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
# -*- encoding: utf-8
"""
Copyright (c) 2014, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Stanford University nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY <NAME> ''AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <NAME> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from .gop import *
import numpy as np
from .util import *
LATEX_OUTPUT=True
for bnd in ['st','sf','mssf','ds']:
# Load the dataset
over_segs,segmentations,boxes = loadVOCAndOverSeg( "test", detector=bnd, year="2012" )
has_box = [len(b)>0 for b in boxes]
boxes = [np.vstack(b).astype(np.int32) if len(b)>0 else np.zeros((0,4),dtype=np.int32) for b in boxes]
# Generate the proposals
s = []
s.append( (50,5,0.7) ) # ~250 props
s.append( (100,5,0.75) ) # ~450 props
s.append( (180,5,0.8) ) # ~650 props
s.append( (200,7,0.85) ) # ~1100 props
s.append( (250,10,0.9) ) # ~2200 props
s.append( (290,20,0.9) ) # ~4400 props
for N_S,N_T,iou in s:
prop_settings = setupBaseline( N_S, N_T, iou )
bo,b_bo,pool_s,box_pool_s = dataset.proposeAndEvaluate( over_segs, segmentations, boxes, proposals.Proposal( prop_settings ) )
if LATEX_OUTPUT:
print(( "Baseline %s ($%d$,$%d$) & %d & %0.3f & %0.3f & %0.3f & %0.3f & \\\\"%(bnd, N_S,N_T,np.mean(pool_s),np.mean(bo[:,0]),np.sum(bo[:,0]*bo[:,1])/np.sum(bo[:,1]), np.mean(bo[:,0]>=0.5), np.mean(bo[:,0]>=0.7) ) ))
else:
print(( "ABO ", np.mean(bo[:,0]) ))
print(( "cover ", np.sum(bo[:,0]*bo[:,1])/np.sum(bo[:,1]) ))
print(( "recall ", np.mean(bo[:,0]>=0.5), "\t", np.mean(bo[:,0]>=0.6), "\t", np.mean(bo[:,0]>=0.7), "\t", np.mean(bo[:,0]>=0.8), "\t", np.mean(bo[:,0]>=0.9), "\t", np.mean(bo[:,0]>=1) ))
print(( "# props ", np.mean(pool_s) ))
print(( "box ABO ", np.mean(b_bo) ))
print(( "box recall ", np.mean(b_bo>=0.5), "\t", np.mean(b_bo>=0.6), "\t", np.mean(b_bo>=0.7), "\t", np.mean(b_bo>=0.8), "\t", np.mean(b_bo>=0.9), "\t", np.mean(b_bo>=1) ))
print(( "# box ", np.mean(box_pool_s[~np.isnan(box_pool_s)]) ))
|
[
"numpy.mean",
"numpy.sum",
"numpy.zeros",
"numpy.isnan",
"numpy.vstack"
] |
[((1876, 1908), 'numpy.zeros', 'np.zeros', (['(0, 4)'], {'dtype': 'np.int32'}), '((0, 4), dtype=np.int32)\n', (1884, 1908), True, 'import numpy as np\n'), ((1829, 1841), 'numpy.vstack', 'np.vstack', (['b'], {}), '(b)\n', (1838, 1841), True, 'import numpy as np\n'), ((2666, 2683), 'numpy.mean', 'np.mean', (['bo[:, 0]'], {}), '(bo[:, 0])\n', (2673, 2683), True, 'import numpy as np\n'), ((2781, 2805), 'numpy.mean', 'np.mean', (['(bo[:, 0] >= 0.5)'], {}), '(bo[:, 0] >= 0.5)\n', (2788, 2805), True, 'import numpy as np\n'), ((2810, 2834), 'numpy.mean', 'np.mean', (['(bo[:, 0] >= 0.6)'], {}), '(bo[:, 0] >= 0.6)\n', (2817, 2834), True, 'import numpy as np\n'), ((2839, 2863), 'numpy.mean', 'np.mean', (['(bo[:, 0] >= 0.7)'], {}), '(bo[:, 0] >= 0.7)\n', (2846, 2863), True, 'import numpy as np\n'), ((2868, 2892), 'numpy.mean', 'np.mean', (['(bo[:, 0] >= 0.8)'], {}), '(bo[:, 0] >= 0.8)\n', (2875, 2892), True, 'import numpy as np\n'), ((2897, 2921), 'numpy.mean', 'np.mean', (['(bo[:, 0] >= 0.9)'], {}), '(bo[:, 0] >= 0.9)\n', (2904, 2921), True, 'import numpy as np\n'), ((2926, 2948), 'numpy.mean', 'np.mean', (['(bo[:, 0] >= 1)'], {}), '(bo[:, 0] >= 1)\n', (2933, 2948), True, 'import numpy as np\n'), ((2975, 2990), 'numpy.mean', 'np.mean', (['pool_s'], {}), '(pool_s)\n', (2982, 2990), True, 'import numpy as np\n'), ((3021, 3034), 'numpy.mean', 'np.mean', (['b_bo'], {}), '(b_bo)\n', (3028, 3034), True, 'import numpy as np\n'), ((3064, 3084), 'numpy.mean', 'np.mean', (['(b_bo >= 0.5)'], {}), '(b_bo >= 0.5)\n', (3071, 3084), True, 'import numpy as np\n'), ((3090, 3110), 'numpy.mean', 'np.mean', (['(b_bo >= 0.6)'], {}), '(b_bo >= 0.6)\n', (3097, 3110), True, 'import numpy as np\n'), ((3116, 3136), 'numpy.mean', 'np.mean', (['(b_bo >= 0.7)'], {}), '(b_bo >= 0.7)\n', (3123, 3136), True, 'import numpy as np\n'), ((3142, 3162), 'numpy.mean', 'np.mean', (['(b_bo >= 0.8)'], {}), '(b_bo >= 0.8)\n', (3149, 3162), True, 'import numpy as np\n'), ((3168, 3188), 'numpy.mean', 'np.mean', (['(b_bo >= 0.9)'], {}), '(b_bo >= 0.9)\n', (3175, 3188), True, 'import numpy as np\n'), ((3194, 3212), 'numpy.mean', 'np.mean', (['(b_bo >= 1)'], {}), '(b_bo >= 1)\n', (3201, 3212), True, 'import numpy as np\n'), ((2508, 2523), 'numpy.mean', 'np.mean', (['pool_s'], {}), '(pool_s)\n', (2515, 2523), True, 'import numpy as np\n'), ((2524, 2541), 'numpy.mean', 'np.mean', (['bo[:, 0]'], {}), '(bo[:, 0])\n', (2531, 2541), True, 'import numpy as np\n'), ((2582, 2606), 'numpy.mean', 'np.mean', (['(bo[:, 0] >= 0.5)'], {}), '(bo[:, 0] >= 0.5)\n', (2589, 2606), True, 'import numpy as np\n'), ((2605, 2629), 'numpy.mean', 'np.mean', (['(bo[:, 0] >= 0.7)'], {}), '(bo[:, 0] >= 0.7)\n', (2612, 2629), True, 'import numpy as np\n'), ((2712, 2739), 'numpy.sum', 'np.sum', (['(bo[:, 0] * bo[:, 1])'], {}), '(bo[:, 0] * bo[:, 1])\n', (2718, 2739), True, 'import numpy as np\n'), ((2736, 2752), 'numpy.sum', 'np.sum', (['bo[:, 1]'], {}), '(bo[:, 1])\n', (2742, 2752), True, 'import numpy as np\n'), ((2541, 2568), 'numpy.sum', 'np.sum', (['(bo[:, 0] * bo[:, 1])'], {}), '(bo[:, 0] * bo[:, 1])\n', (2547, 2568), True, 'import numpy as np\n'), ((2565, 2581), 'numpy.sum', 'np.sum', (['bo[:, 1]'], {}), '(bo[:, 1])\n', (2571, 2581), True, 'import numpy as np\n'), ((3260, 3280), 'numpy.isnan', 'np.isnan', (['box_pool_s'], {}), '(box_pool_s)\n', (3268, 3280), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.datasets import make_blobs
from sklearn.mixture import GaussianMixture
from sklearn.cluster import KMeans
from matplotlib.patches import Ellipse
# For reproducibility
np.random.seed(1000)
nb_samples = 300
nb_centers = 2
if __name__ == '__main__':
# Create the dataset
X, Y = make_blobs(n_samples=nb_samples, n_features=2, center_box=[-1, 1], centers=nb_centers,
cluster_std=[1.0, 0.6], random_state=1000)
# Show the dataset
sns.set()
fig, ax = plt.subplots(figsize=(15, 9))
ax.scatter(X[:, 0], X[:, 1], s=120)
ax.set_xlabel(r'$x_0$', fontsize=14)
ax.set_ylabel(r'$x_1$', fontsize=14)
plt.show()
# Train the model
gm = GaussianMixture(n_components=2, random_state=1000)
gm.fit(X)
Y_pred = gm.fit_predict(X)
print('Means: \n{}'.format(gm.means_))
print('Covariance matrices: \n{}'.format(gm.covariances_))
print('Weights: \n{}'.format(gm.weights_))
m1 = gm.means_[0]
m2 = gm.means_[1]
c1 = gm.covariances_[0]
c2 = gm.covariances_[1]
we1 = 1 + gm.weights_[0]
we2 = 1 + gm.weights_[1]
# Eigendecompose the covariances
w1, v1 = np.linalg.eigh(c1)
w2, v2 = np.linalg.eigh(c2)
nv1 = v1 / np.linalg.norm(v1)
nv2 = v2 / np.linalg.norm(v2)
print('Eigenvalues 1: \n{}'.format(w1))
print('Eigenvectors 1: \n{}'.format(nv1))
print('Eigenvalues 2: \n{}'.format(w2))
print('Eigenvectors 2: \n{}'.format(nv2))
a1 = np.arccos(np.dot(nv1[:, 1], [1.0, 0.0]) / np.linalg.norm(nv1[:, 1])) * 180.0 / np.pi
a2 = np.arccos(np.dot(nv2[:, 1], [1.0, 0.0]) / np.linalg.norm(nv2[:, 1])) * 180.0 / np.pi
# Perform K-Means clustering
km = KMeans(n_clusters=2, random_state=1000)
km.fit(X)
Y_pred_km = km.predict(X)
# Show the comparison of the results
fig, ax = plt.subplots(1, 2, figsize=(22, 9), sharey=True)
ax[0].scatter(X[Y_pred == 0, 0], X[Y_pred == 0, 1], s=80, marker='o', label='Gaussian 1')
ax[0].scatter(X[Y_pred == 1, 0], X[Y_pred == 1, 1], s=80, marker='d', label='Gaussian 2')
g1 = Ellipse(xy=m1, width=w1[1] * 3, height=w1[0] * 3, fill=False, linestyle='dashed', angle=a1, color='black',
linewidth=1)
g1_1 = Ellipse(xy=m1, width=w1[1] * 2, height=w1[0] * 2, fill=False, linestyle='dashed', angle=a1, color='black',
linewidth=2)
g1_2 = Ellipse(xy=m1, width=w1[1] * 1.4, height=w1[0] * 1.4, fill=False, linestyle='dashed', angle=a1,
color='black', linewidth=3)
g2 = Ellipse(xy=m2, width=w2[1] * 3, height=w2[0] * 3, fill=False, linestyle='dashed', angle=a2, color='black',
linewidth=1)
g2_1 = Ellipse(xy=m2, width=w2[1] * 2, height=w2[0] * 2, fill=False, linestyle='dashed', angle=a2, color='black',
linewidth=2)
g2_2 = Ellipse(xy=m2, width=w2[1] * 1.4, height=w2[0] * 1.4, fill=False, linestyle='dashed', angle=a2,
color='black', linewidth=3)
ax[0].set_xlabel(r'$x_0$', fontsize=16)
ax[0].set_ylabel(r'$x_1$', fontsize=16)
ax[0].add_artist(g1)
ax[0].add_artist(g1_1)
ax[0].add_artist(g1_2)
ax[0].add_artist(g2)
ax[0].add_artist(g2_1)
ax[0].add_artist(g2_2)
ax[0].set_title('Gaussian Mixture', fontsize=16)
ax[0].legend(fontsize=16)
ax[1].scatter(X[Y_pred_km == 0, 0], X[Y_pred_km == 0, 1], s=80, marker='o', label='Cluster 1')
ax[1].scatter(X[Y_pred_km == 1, 0], X[Y_pred_km == 1, 1], s=80, marker='d', label='Cluster 2')
ax[1].set_xlabel(r'$x_0$', fontsize=16)
ax[1].set_title('K-Means', fontsize=16)
ax[1].legend(fontsize=16)
# Predict the probability of some sample points
print('P([0, -2]=G1) = {:.3f} and P([0, -2]=G2) = {:.3f}'.format(*list(gm.predict_proba([[0.0, -2.0]]).squeeze())))
print('P([1, -1]=G1) = {:.3f} and P([1, -1]=G2) = {:.3f}'.format(*list(gm.predict_proba([[1.0, -1.0]]).squeeze())))
print('P([1, 0]=G1) = {:.3f} and P([1, 0]=G2) = {:.3f}'.format(*list(gm.predict_proba([[1.0, 0.0]]).squeeze())))
plt.show()
# Compute AICs, BICs, and log-likelihood
n_max_components = 20
aics = []
bics = []
log_likelihoods = []
for n in range(1, n_max_components + 1):
gm = GaussianMixture(n_components=n, random_state=1000)
gm.fit(X)
aics.append(gm.aic(X))
bics.append(gm.bic(X))
log_likelihoods.append(gm.score(X) * nb_samples)
# Show the results
fig, ax = plt.subplots(1, 3, figsize=(20, 6))
ax[0].plot(range(1, n_max_components + 1), aics)
ax[0].set_xticks(range(1, n_max_components + 1))
ax[0].set_xlabel('Number of Gaussians', fontsize=14)
ax[0].set_title('AIC', fontsize=14)
ax[1].plot(range(1, n_max_components + 1), bics)
ax[1].set_xticks(range(1, n_max_components + 1))
ax[1].set_xlabel('Number of Gaussians', fontsize=14)
ax[1].set_title('BIC', fontsize=14)
ax[2].plot(range(1, n_max_components + 1), log_likelihoods)
ax[2].set_xticks(range(1, n_max_components + 1))
ax[2].set_xlabel('Number of Gaussians', fontsize=14)
ax[2].set_title('Log-likelihood', fontsize=14)
plt.show()
|
[
"sklearn.cluster.KMeans",
"seaborn.set",
"sklearn.mixture.GaussianMixture",
"sklearn.datasets.make_blobs",
"numpy.dot",
"numpy.random.seed",
"numpy.linalg.norm",
"numpy.linalg.eigh",
"matplotlib.patches.Ellipse",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] |
[((256, 276), 'numpy.random.seed', 'np.random.seed', (['(1000)'], {}), '(1000)\n', (270, 276), True, 'import numpy as np\n'), ((376, 510), 'sklearn.datasets.make_blobs', 'make_blobs', ([], {'n_samples': 'nb_samples', 'n_features': '(2)', 'center_box': '[-1, 1]', 'centers': 'nb_centers', 'cluster_std': '[1.0, 0.6]', 'random_state': '(1000)'}), '(n_samples=nb_samples, n_features=2, center_box=[-1, 1], centers=\n nb_centers, cluster_std=[1.0, 0.6], random_state=1000)\n', (386, 510), False, 'from sklearn.datasets import make_blobs\n'), ((556, 565), 'seaborn.set', 'sns.set', ([], {}), '()\n', (563, 565), True, 'import seaborn as sns\n'), ((581, 610), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(15, 9)'}), '(figsize=(15, 9))\n', (593, 610), True, 'import matplotlib.pyplot as plt\n'), ((740, 750), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (748, 750), True, 'import matplotlib.pyplot as plt\n'), ((783, 833), 'sklearn.mixture.GaussianMixture', 'GaussianMixture', ([], {'n_components': '(2)', 'random_state': '(1000)'}), '(n_components=2, random_state=1000)\n', (798, 833), False, 'from sklearn.mixture import GaussianMixture\n'), ((1245, 1263), 'numpy.linalg.eigh', 'np.linalg.eigh', (['c1'], {}), '(c1)\n', (1259, 1263), True, 'import numpy as np\n'), ((1277, 1295), 'numpy.linalg.eigh', 'np.linalg.eigh', (['c2'], {}), '(c2)\n', (1291, 1295), True, 'import numpy as np\n'), ((1779, 1818), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(2)', 'random_state': '(1000)'}), '(n_clusters=2, random_state=1000)\n', (1785, 1818), False, 'from sklearn.cluster import KMeans\n'), ((1919, 1967), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(22, 9)', 'sharey': '(True)'}), '(1, 2, figsize=(22, 9), sharey=True)\n', (1931, 1967), True, 'import matplotlib.pyplot as plt\n'), ((2167, 2291), 'matplotlib.patches.Ellipse', 'Ellipse', ([], {'xy': 'm1', 'width': '(w1[1] * 3)', 'height': '(w1[0] * 3)', 'fill': '(False)', 'linestyle': '"""dashed"""', 'angle': 'a1', 'color': '"""black"""', 'linewidth': '(1)'}), "(xy=m1, width=w1[1] * 3, height=w1[0] * 3, fill=False, linestyle=\n 'dashed', angle=a1, color='black', linewidth=1)\n", (2174, 2291), False, 'from matplotlib.patches import Ellipse\n'), ((2315, 2439), 'matplotlib.patches.Ellipse', 'Ellipse', ([], {'xy': 'm1', 'width': '(w1[1] * 2)', 'height': '(w1[0] * 2)', 'fill': '(False)', 'linestyle': '"""dashed"""', 'angle': 'a1', 'color': '"""black"""', 'linewidth': '(2)'}), "(xy=m1, width=w1[1] * 2, height=w1[0] * 2, fill=False, linestyle=\n 'dashed', angle=a1, color='black', linewidth=2)\n", (2322, 2439), False, 'from matplotlib.patches import Ellipse\n'), ((2465, 2593), 'matplotlib.patches.Ellipse', 'Ellipse', ([], {'xy': 'm1', 'width': '(w1[1] * 1.4)', 'height': '(w1[0] * 1.4)', 'fill': '(False)', 'linestyle': '"""dashed"""', 'angle': 'a1', 'color': '"""black"""', 'linewidth': '(3)'}), "(xy=m1, width=w1[1] * 1.4, height=w1[0] * 1.4, fill=False, linestyle\n ='dashed', angle=a1, color='black', linewidth=3)\n", (2472, 2593), False, 'from matplotlib.patches import Ellipse\n'), ((2618, 2742), 'matplotlib.patches.Ellipse', 'Ellipse', ([], {'xy': 'm2', 'width': '(w2[1] * 3)', 'height': '(w2[0] * 3)', 'fill': '(False)', 'linestyle': '"""dashed"""', 'angle': 'a2', 'color': '"""black"""', 'linewidth': '(1)'}), "(xy=m2, width=w2[1] * 3, height=w2[0] * 3, fill=False, linestyle=\n 'dashed', angle=a2, color='black', linewidth=1)\n", (2625, 2742), False, 'from matplotlib.patches import Ellipse\n'), ((2766, 2890), 'matplotlib.patches.Ellipse', 'Ellipse', ([], {'xy': 'm2', 'width': '(w2[1] * 2)', 'height': '(w2[0] * 2)', 'fill': '(False)', 'linestyle': '"""dashed"""', 'angle': 'a2', 'color': '"""black"""', 'linewidth': '(2)'}), "(xy=m2, width=w2[1] * 2, height=w2[0] * 2, fill=False, linestyle=\n 'dashed', angle=a2, color='black', linewidth=2)\n", (2773, 2890), False, 'from matplotlib.patches import Ellipse\n'), ((2916, 3044), 'matplotlib.patches.Ellipse', 'Ellipse', ([], {'xy': 'm2', 'width': '(w2[1] * 1.4)', 'height': '(w2[0] * 1.4)', 'fill': '(False)', 'linestyle': '"""dashed"""', 'angle': 'a2', 'color': '"""black"""', 'linewidth': '(3)'}), "(xy=m2, width=w2[1] * 1.4, height=w2[0] * 1.4, fill=False, linestyle\n ='dashed', angle=a2, color='black', linewidth=3)\n", (2923, 3044), False, 'from matplotlib.patches import Ellipse\n'), ((4127, 4137), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4135, 4137), True, 'import matplotlib.pyplot as plt\n'), ((4549, 4584), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(20, 6)'}), '(1, 3, figsize=(20, 6))\n', (4561, 4584), True, 'import matplotlib.pyplot as plt\n'), ((5227, 5237), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5235, 5237), True, 'import matplotlib.pyplot as plt\n'), ((1312, 1330), 'numpy.linalg.norm', 'np.linalg.norm', (['v1'], {}), '(v1)\n', (1326, 1330), True, 'import numpy as np\n'), ((1346, 1364), 'numpy.linalg.norm', 'np.linalg.norm', (['v2'], {}), '(v2)\n', (1360, 1364), True, 'import numpy as np\n'), ((4323, 4373), 'sklearn.mixture.GaussianMixture', 'GaussianMixture', ([], {'n_components': 'n', 'random_state': '(1000)'}), '(n_components=n, random_state=1000)\n', (4338, 4373), False, 'from sklearn.mixture import GaussianMixture\n'), ((1567, 1596), 'numpy.dot', 'np.dot', (['nv1[:, 1]', '[1.0, 0.0]'], {}), '(nv1[:, 1], [1.0, 0.0])\n', (1573, 1596), True, 'import numpy as np\n'), ((1599, 1624), 'numpy.linalg.norm', 'np.linalg.norm', (['nv1[:, 1]'], {}), '(nv1[:, 1])\n', (1613, 1624), True, 'import numpy as np\n'), ((1661, 1690), 'numpy.dot', 'np.dot', (['nv2[:, 1]', '[1.0, 0.0]'], {}), '(nv2[:, 1], [1.0, 0.0])\n', (1667, 1690), True, 'import numpy as np\n'), ((1693, 1718), 'numpy.linalg.norm', 'np.linalg.norm', (['nv2[:, 1]'], {}), '(nv2[:, 1])\n', (1707, 1718), True, 'import numpy as np\n')]
|
"""
fit1d package is designed to provide an organized toolbox for different types of
1D fits that can be performed.
It is easy to add new fits and other functionalities
"""
from abc import ABC, abstractmethod
import numpy as np
from typing import List,Tuple
from fit1d.common.model import Model, ModelMock
from fit1d.common.outlier import OutLier
from fit1d.common.fit_data import FitData
class Fit1D(ABC):
"""
This is the main class of the fit1d package. It is used to allow the user to execute
fit and eval methods, in addition to calc_RMS and calc_error static services.
The properties of this class are the _model and _outlier objects and a _use_remove_outliers
boolean
"""
_outlier: OutLier
_use_remove_outliers: bool
_fit_data: FitData
# interface methods
def fit(self, x: np.ndarray, y: np.ndarray) -> FitData:
self._fit_data.x = x
self._fit_data.y = y
if self._use_remove_outliers:
self._remove_outlier()
else:
self._calc_fit_and_update_fit_data()
return self._fit_data
def eval(self, x: np.ndarray = None, model: Model = None) -> np.ndarray:
if x is not None:
self._fit_data.x = x
if model is not None:
self._fit_data.model = model
self._calc_eval()
return self._fit_data.y_fit
def calc_error(self):
"""
calc error vector , update _fit_data
:return:
"""
if self._fit_data.y is not None and self._fit_data.y_fit is not None:
self._fit_data.error_vector = self._fit_data.y - self._fit_data.y_fit
def calc_rms(self):
if self._fit_data.error_vector is not None:
self._fit_data.rms = (sum(self._fit_data.error_vector ** 2) / len(self._fit_data.error_vector)) ** 0.5
def get_fit_data(self) -> FitData:
return self._fit_data
# abstract methods
@abstractmethod
def _calc_fit(self):
"""
abstractmethod:
run fit calculation of the data update model in _fit_data.model
:return: Null
"""
pass
@abstractmethod
def _calc_eval(self):
"""
abstractmethod:
subclass calculate model eval for inner x and model
update _fit_data.y_fit
:return: Void
"""
pass
# internal methods
def _update_fit_data(self):
self._calc_eval()
self.calc_error()
self.calc_rms()
def _remove_outlier(self):
while True:
self._calc_fit_and_update_fit_data()
indexes_to_remove = self._outlier.find_outliers(self._fit_data.error_vector)
if len(indexes_to_remove) == 0:
break
else:
self._remove_indexes(indexes_to_remove)
def _remove_indexes(self, ind):
self._fit_data.x = np.delete(self._fit_data.x, ind)
self._fit_data.y = np.delete(self._fit_data.y, ind)
def _calc_fit_and_update_fit_data(self):
self._calc_fit()
self._update_fit_data()
class Fit1DMock(Fit1D):
""" Mock class. Used only for tests """
def __init__(self, outlier: OutLier, remove_outliers: bool):
self._fit_data = FitData()
self._outlier = outlier
self._use_remove_outliers = remove_outliers
def _calc_fit(self):
self._fit_data.model = ModelMock({"param1": 5.5})
def _calc_eval(self) -> np.ndarray:
if self._fit_data.y is None or len(self._fit_data.y) == 4:
self._fit_data.y_fit = np.array([11, 22, 33, 44])
else:
self._fit_data.y_fit = np.array([11, 33, 44])
|
[
"numpy.delete",
"numpy.array",
"fit1d.common.fit_data.FitData",
"fit1d.common.model.ModelMock"
] |
[((2867, 2899), 'numpy.delete', 'np.delete', (['self._fit_data.x', 'ind'], {}), '(self._fit_data.x, ind)\n', (2876, 2899), True, 'import numpy as np\n'), ((2927, 2959), 'numpy.delete', 'np.delete', (['self._fit_data.y', 'ind'], {}), '(self._fit_data.y, ind)\n', (2936, 2959), True, 'import numpy as np\n'), ((3223, 3232), 'fit1d.common.fit_data.FitData', 'FitData', ([], {}), '()\n', (3230, 3232), False, 'from fit1d.common.fit_data import FitData\n'), ((3374, 3400), 'fit1d.common.model.ModelMock', 'ModelMock', (["{'param1': 5.5}"], {}), "({'param1': 5.5})\n", (3383, 3400), False, 'from fit1d.common.model import Model, ModelMock\n'), ((3544, 3570), 'numpy.array', 'np.array', (['[11, 22, 33, 44]'], {}), '([11, 22, 33, 44])\n', (3552, 3570), True, 'import numpy as np\n'), ((3620, 3642), 'numpy.array', 'np.array', (['[11, 33, 44]'], {}), '([11, 33, 44])\n', (3628, 3642), True, 'import numpy as np\n')]
|
# min (1/2) x'Q'x - q'x
from __future__ import print_function
import numpy as np
import aa
dim = 1000
mems = [5, 10, 20, 50, 100]
N = int(1e4)
np.random.seed(1234)
Q = np.random.randn(dim,dim)
Q = Q.T.dot(Q)
q = np.random.randn(dim)
x_0 = np.random.randn(dim)
x_star = np.linalg.solve(Q, q)
step = 0.0005
def f(x):
return 0.5 * x.T @ Q @ x - q.T @ x
f_star = f(x_star)
print('f^* = ', f_star)
print('No acceleration')
x = x_0.copy()
for i in range(N):
x_prev = np.copy(x)
x -= step * (Q.dot(x) - q)
if i % 1000 == 0:
print('i: ', i,' f - f^*: ', f(x) - f_star)
for mem in mems:
print('Type-I acceleration, mem:', mem)
x = x_0.copy()
aa_wrk = aa.AndersonAccelerator(dim, mem, True, eta=1e-8)
for i in range(N):
x_prev = np.copy(x)
x -= step * (Q.dot(x) - q)
aa_wrk.apply(x, x_prev)
if i % 1000 == 0:
print('i: ', i,' f - f^*: ', f(x) - f_star)
print('Type-II acceleration, mem:', mem)
x = x_0.copy()
aa_wrk = aa.AndersonAccelerator(dim, mem, False, eta=1e-10)
for i in range(N):
x_prev = np.copy(x)
x -= step * (Q.dot(x) - q)
aa_wrk.apply(x, x_prev)
if i % 1000 == 0:
print('i: ', i,' f - f^*: ', f(x) - f_star)
|
[
"numpy.copy",
"numpy.linalg.solve",
"aa.AndersonAccelerator",
"numpy.random.seed",
"numpy.random.randn"
] |
[((146, 166), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (160, 166), True, 'import numpy as np\n'), ((172, 197), 'numpy.random.randn', 'np.random.randn', (['dim', 'dim'], {}), '(dim, dim)\n', (187, 197), True, 'import numpy as np\n'), ((216, 236), 'numpy.random.randn', 'np.random.randn', (['dim'], {}), '(dim)\n', (231, 236), True, 'import numpy as np\n'), ((243, 263), 'numpy.random.randn', 'np.random.randn', (['dim'], {}), '(dim)\n', (258, 263), True, 'import numpy as np\n'), ((273, 294), 'numpy.linalg.solve', 'np.linalg.solve', (['Q', 'q'], {}), '(Q, q)\n', (288, 294), True, 'import numpy as np\n'), ((475, 485), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (482, 485), True, 'import numpy as np\n'), ((679, 728), 'aa.AndersonAccelerator', 'aa.AndersonAccelerator', (['dim', 'mem', '(True)'], {'eta': '(1e-08)'}), '(dim, mem, True, eta=1e-08)\n', (701, 728), False, 'import aa\n'), ((988, 1038), 'aa.AndersonAccelerator', 'aa.AndersonAccelerator', (['dim', 'mem', '(False)'], {'eta': '(1e-10)'}), '(dim, mem, False, eta=1e-10)\n', (1010, 1038), False, 'import aa\n'), ((764, 774), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (771, 774), True, 'import numpy as np\n'), ((1075, 1085), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (1082, 1085), True, 'import numpy as np\n')]
|
from paper_1.data.data_loader import load_val_data, load_train_data, sequential_data_loader, random_data_loader
from paper_1.utils import read_parameter_file, create_experiment_directory
from paper_1.evaluation.eval_utils import init_metrics_object
from paper_1.baseline.main import train as baseline_train
from paper_1.model.model_utils import initialize_model
from torch.utils.tensorboard import SummaryWriter
from train import select_splitted_pseudo_labels
from os.path import dirname, abspath
from torch.optim import Adam
import pandas as pd
import numpy as np
import random
import torch
import os
def main(main_params: dict, data_params: dict, metric_params: dict, model_params: dict,
parent_dir, source_domain: str, target_domain: str):
# clear the cuda memory
torch.cuda.empty_cache()
# get the current validation fold
val_fold = data_params['data']['val_fold']
# read the train params
num_train_iter = main_params['num_train_iter']
experiment_id = main_params['experiment_id']
num_epochs = main_params['num_epochs']
quantiles = main_params['quantiles']
model_dir = main_params['model_dir']
base_dir = main_params['base_dir']
# get the data loader parameters
balance_keys = data_params['data_loader']['balance_keys']
batch_size = data_params['data_loader']['batch_size']
# load the data
data_train_src, data_train_tar = load_train_data(data_params, source_domain, target_domain)
data_list_val = load_val_data(data_params)
num_val_iter_list = [df.shape[0] for df in data_list_val]
validation_domains = data_params['data']['validation']['validation_domains']
val_loader_list = [sequential_data_loader(data_frame) for data_frame in data_list_val]
# load a pre trained model
model_path = model_dir + source_domain + '/' + 'None' + '/' + str(val_fold) + '/f1_best.pt'
# load a previously stored model, which is the init point for curriculum labeling
pretrained_model = torch.load(model_path)
mapping = metric_params['inverse_class_mapping']
# initialize the metrics object
metric_object = init_metrics_object(metric_params)
# create a directory for the current experiments
file_names_params = os.listdir(parent_dir + '/parameters/')
file_names_params = [parent_dir + '/parameters/' + x for x in file_names_params]
file_names_baseline = os.listdir(parent_dir + '/baseline/')
file_names_baseline = [parent_dir + '/baseline/' + x for x in file_names_baseline]
file_names = []
file_names.extend(file_names_params)
file_names.extend(file_names_baseline)
file_names = [x for x in file_names if not os.path.isdir(x)]
val_fold = data_params['data']['val_fold']
exp_base_dir = create_experiment_directory(base_dir, source_domain, target_domain, val_fold, file_names, experiment_id)
for quantile in quantiles:
exp_dir = exp_base_dir + str(quantile) + '/'
if not os.path.exists(exp_dir):
os.makedirs(exp_dir)
# create a tensorboard writer
writer = SummaryWriter(exp_dir)
# create data loader with current pseudo labels
data_frame_pseudo = select_splitted_pseudo_labels(pretrained_model, data_train_tar, quantile, mapping)
# delete the previously trained model, as it is no longer in use
del pretrained_model
# create the train data loader
data_train = pd.concat([data_train_src, data_frame_pseudo])
train_loader = random_data_loader(data_train, balance_keys, batch_size)
# initialize a new model to train it from scratch
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = initialize_model(model_params, parent_dir, device)
model.cuda()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(device)
# create an optimizer for the model
optimizer = Adam(model.parameters(), lr=4e-5, betas=(0.9, 0.999))
# train the newly created model from scratch
baseline_train(model, optimizer, metric_object, num_train_iter, metric_params, train_loader, val_loader_list,
source_domain, writer, num_val_iter_list, validation_domains, num_epochs, exp_dir)
# update the pretrained model
pretrained_model = model
del model
del optimizer
if __name__ == '__main__':
# set the seed for reproducability
seed_value = 0
random.seed(seed_value)
np.random.seed(seed_value)
torch.manual_seed(seed_value)
torch.cuda.manual_seed_all(seed_value)
# get the current and parent directory
current_file = abspath(__file__)
current_dir = dirname(current_file)
parent_dir = dirname(current_dir)
metric_param_file = parent_dir + '/parameters/metric_params.yaml'
model_param_file = parent_dir + '/parameters/model_params.yaml'
data_param_file = parent_dir + '/parameters/data_params.yaml'
main_param_file = current_dir + '/main_params.yaml'
# load the parameters
metric_params = read_parameter_file(metric_param_file)
model_params = read_parameter_file(model_param_file)
main_params = read_parameter_file(main_param_file)
data_params = read_parameter_file(data_param_file)
# define the domains, on which the models should be trained
source_domains = ['Race', 'Religion', 'Sexual Orientation']
target_domains = ['Race', 'Religion', 'Sexual Orientation']
for source_domain in source_domains:
for target_domain in target_domains:
if source_domain != target_domain:
main(main_params, data_params, metric_params, model_params, parent_dir, source_domain, target_domain)
|
[
"paper_1.data.data_loader.sequential_data_loader",
"torch.cuda.is_available",
"paper_1.data.data_loader.load_val_data",
"torch.utils.tensorboard.SummaryWriter",
"os.path.exists",
"os.listdir",
"paper_1.baseline.main.train",
"paper_1.utils.read_parameter_file",
"os.path.isdir",
"pandas.concat",
"numpy.random.seed",
"paper_1.utils.create_experiment_directory",
"paper_1.model.model_utils.initialize_model",
"os.path.dirname",
"paper_1.data.data_loader.random_data_loader",
"torch.cuda.empty_cache",
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"os.makedirs",
"torch.load",
"paper_1.evaluation.eval_utils.init_metrics_object",
"random.seed",
"os.path.abspath",
"paper_1.data.data_loader.load_train_data",
"train.select_splitted_pseudo_labels"
] |
[((787, 811), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (809, 811), False, 'import torch\n'), ((1407, 1465), 'paper_1.data.data_loader.load_train_data', 'load_train_data', (['data_params', 'source_domain', 'target_domain'], {}), '(data_params, source_domain, target_domain)\n', (1422, 1465), False, 'from paper_1.data.data_loader import load_val_data, load_train_data, sequential_data_loader, random_data_loader\n'), ((1486, 1512), 'paper_1.data.data_loader.load_val_data', 'load_val_data', (['data_params'], {}), '(data_params)\n', (1499, 1512), False, 'from paper_1.data.data_loader import load_val_data, load_train_data, sequential_data_loader, random_data_loader\n'), ((1985, 2007), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (1995, 2007), False, 'import torch\n'), ((2118, 2152), 'paper_1.evaluation.eval_utils.init_metrics_object', 'init_metrics_object', (['metric_params'], {}), '(metric_params)\n', (2137, 2152), False, 'from paper_1.evaluation.eval_utils import init_metrics_object\n'), ((2231, 2270), 'os.listdir', 'os.listdir', (["(parent_dir + '/parameters/')"], {}), "(parent_dir + '/parameters/')\n", (2241, 2270), False, 'import os\n'), ((2382, 2419), 'os.listdir', 'os.listdir', (["(parent_dir + '/baseline/')"], {}), "(parent_dir + '/baseline/')\n", (2392, 2419), False, 'import os\n'), ((2744, 2852), 'paper_1.utils.create_experiment_directory', 'create_experiment_directory', (['base_dir', 'source_domain', 'target_domain', 'val_fold', 'file_names', 'experiment_id'], {}), '(base_dir, source_domain, target_domain,\n val_fold, file_names, experiment_id)\n', (2771, 2852), False, 'from paper_1.utils import read_parameter_file, create_experiment_directory\n'), ((4440, 4463), 'random.seed', 'random.seed', (['seed_value'], {}), '(seed_value)\n', (4451, 4463), False, 'import random\n'), ((4468, 4494), 'numpy.random.seed', 'np.random.seed', (['seed_value'], {}), '(seed_value)\n', (4482, 4494), True, 'import numpy as np\n'), ((4499, 4528), 'torch.manual_seed', 'torch.manual_seed', (['seed_value'], {}), '(seed_value)\n', (4516, 4528), False, 'import torch\n'), ((4533, 4571), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed_value'], {}), '(seed_value)\n', (4559, 4571), False, 'import torch\n'), ((4635, 4652), 'os.path.abspath', 'abspath', (['__file__'], {}), '(__file__)\n', (4642, 4652), False, 'from os.path import dirname, abspath\n'), ((4671, 4692), 'os.path.dirname', 'dirname', (['current_file'], {}), '(current_file)\n', (4678, 4692), False, 'from os.path import dirname, abspath\n'), ((4710, 4730), 'os.path.dirname', 'dirname', (['current_dir'], {}), '(current_dir)\n', (4717, 4730), False, 'from os.path import dirname, abspath\n'), ((5038, 5076), 'paper_1.utils.read_parameter_file', 'read_parameter_file', (['metric_param_file'], {}), '(metric_param_file)\n', (5057, 5076), False, 'from paper_1.utils import read_parameter_file, create_experiment_directory\n'), ((5096, 5133), 'paper_1.utils.read_parameter_file', 'read_parameter_file', (['model_param_file'], {}), '(model_param_file)\n', (5115, 5133), False, 'from paper_1.utils import read_parameter_file, create_experiment_directory\n'), ((5152, 5188), 'paper_1.utils.read_parameter_file', 'read_parameter_file', (['main_param_file'], {}), '(main_param_file)\n', (5171, 5188), False, 'from paper_1.utils import read_parameter_file, create_experiment_directory\n'), ((5207, 5243), 'paper_1.utils.read_parameter_file', 'read_parameter_file', (['data_param_file'], {}), '(data_param_file)\n', (5226, 5243), False, 'from paper_1.utils import read_parameter_file, create_experiment_directory\n'), ((1679, 1713), 'paper_1.data.data_loader.sequential_data_loader', 'sequential_data_loader', (['data_frame'], {}), '(data_frame)\n', (1701, 1713), False, 'from paper_1.data.data_loader import load_val_data, load_train_data, sequential_data_loader, random_data_loader\n'), ((3064, 3086), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['exp_dir'], {}), '(exp_dir)\n', (3077, 3086), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((3172, 3258), 'train.select_splitted_pseudo_labels', 'select_splitted_pseudo_labels', (['pretrained_model', 'data_train_tar', 'quantile', 'mapping'], {}), '(pretrained_model, data_train_tar, quantile,\n mapping)\n', (3201, 3258), False, 'from train import select_splitted_pseudo_labels\n'), ((3419, 3465), 'pandas.concat', 'pd.concat', (['[data_train_src, data_frame_pseudo]'], {}), '([data_train_src, data_frame_pseudo])\n', (3428, 3465), True, 'import pandas as pd\n'), ((3489, 3545), 'paper_1.data.data_loader.random_data_loader', 'random_data_loader', (['data_train', 'balance_keys', 'batch_size'], {}), '(data_train, balance_keys, batch_size)\n', (3507, 3545), False, 'from paper_1.data.data_loader import load_val_data, load_train_data, sequential_data_loader, random_data_loader\n'), ((3685, 3735), 'paper_1.model.model_utils.initialize_model', 'initialize_model', (['model_params', 'parent_dir', 'device'], {}), '(model_params, parent_dir, device)\n', (3701, 3735), False, 'from paper_1.model.model_utils import initialize_model\n'), ((4027, 4227), 'paper_1.baseline.main.train', 'baseline_train', (['model', 'optimizer', 'metric_object', 'num_train_iter', 'metric_params', 'train_loader', 'val_loader_list', 'source_domain', 'writer', 'num_val_iter_list', 'validation_domains', 'num_epochs', 'exp_dir'], {}), '(model, optimizer, metric_object, num_train_iter,\n metric_params, train_loader, val_loader_list, source_domain, writer,\n num_val_iter_list, validation_domains, num_epochs, exp_dir)\n', (4041, 4227), True, 'from paper_1.baseline.main import train as baseline_train\n'), ((2950, 2973), 'os.path.exists', 'os.path.exists', (['exp_dir'], {}), '(exp_dir)\n', (2964, 2973), False, 'import os\n'), ((2987, 3007), 'os.makedirs', 'os.makedirs', (['exp_dir'], {}), '(exp_dir)\n', (2998, 3007), False, 'import os\n'), ((3632, 3657), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3655, 3657), False, 'import torch\n'), ((3784, 3809), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3807, 3809), False, 'import torch\n'), ((2659, 2675), 'os.path.isdir', 'os.path.isdir', (['x'], {}), '(x)\n', (2672, 2675), False, 'import os\n')]
|
import tensorflow as tf
import pandas as pd
import numpy as np
import sys
import time
from cflow import ConditionalFlow
from MoINN.modules.subnetworks import DenseSubNet
from utils import train_density_estimation, plot_loss, plot_tau_ratio
# import data
tau1_gen = np.reshape(np.load("../data/tau1s_Pythia_gen.npy"), (-1,1))
tau2_gen = np.reshape(np.load("../data/tau2s_Pythia_gen.npy"), (-1,1))
tau1_sim = np.reshape(np.load("../data/tau1s_Pythia_sim.npy"), (-1,1))
tau2_sim = np.reshape(np.load("../data/tau2s_Pythia_sim.npy"), (-1,1))
data_gen = tf.convert_to_tensor(np.concatenate([tau1_gen,tau2_gen], axis=-1), dtype=tf.float32)
data_sim = tf.convert_to_tensor(np.concatenate([tau1_sim,tau2_sim], axis=-1), dtype=tf.float32)
train_gen, test_gen = np.split(data_gen, 2)
train_sim, test_sim = np.split(data_sim, 2)
# Get the flow
meta = {
"units": 16,
"layers": 4,
"initializer": "glorot_uniform",
"activation": "leakyrelu",
}
cflow = ConditionalFlow(dims_in=[2], dims_c=[[2]], n_blocks=12, subnet_meta=meta, subnet_constructor=DenseSubNet)
# train the network
EPOCHS = 50
BATCH_SIZE = 1000
LR = 5e-3
DECAY_RATE=0.1
ITERS = len(train_gen)//BATCH_SIZE
DECAY_STEP=ITERS
#Prepare the tf.dataset
train_dataset = tf.data.Dataset.from_tensor_slices((train_gen, train_sim))
train_dataset = train_dataset.shuffle(buffer_size=500000).batch(BATCH_SIZE).prefetch(tf.data.AUTOTUNE)
lr_schedule = tf.keras.optimizers.schedules.InverseTimeDecay(LR, DECAY_STEP, DECAY_RATE)
opt = tf.keras.optimizers.Adam(lr_schedule)
train_losses = []
#train_all = np.concatenate([train_gen, train_sim], axis=-1)
start_time = time.time()
for e in range(EPOCHS):
batch_train_losses = []
# Iterate over the batches of the dataset.
for step, (batch_gen, batch_sim) in enumerate(train_dataset):
batch_loss = train_density_estimation(cflow, opt, batch_gen, [batch_sim])
batch_train_losses.append(batch_loss)
train_loss = tf.reduce_mean(batch_train_losses)
train_losses.append(train_loss)
if (e + 1) % 1 == 0:
# Print metrics
print(
"Epoch #{}: Loss: {}, Learning_Rate: {}".format(
e + 1, train_losses[-1], opt._decayed_lr(tf.float32)
)
)
end_time = time.time()
print("--- Run time: %s hour ---" % ((end_time - start_time)/60/60))
print("--- Run time: %s mins ---" % ((end_time - start_time)/60))
print("--- Run time: %s secs ---" % ((end_time - start_time)))
# Make plots and sample
plot_loss(train_losses, name="Log-likelihood", log_axis=False)
detector = tf.constant(test_sim, dtype=tf.float32)
unfold_gen = cflow.sample(int(5e5),[detector])
plot_tau_ratio(test_gen, unfold_gen, detector, name="tau_ratio")
unfold_gen = {}
for i in range(10):
unfold_gen[i] = cflow.sample(int(5e5),[detector])
unfold_pythia = np.stack([unfold_gen[i] for i in range(10)])
np.save("inn_pythia",unfold_pythia)
|
[
"cflow.ConditionalFlow",
"tensorflow.keras.optimizers.schedules.InverseTimeDecay",
"tensorflow.data.Dataset.from_tensor_slices",
"utils.train_density_estimation",
"tensorflow.keras.optimizers.Adam",
"numpy.split",
"utils.plot_tau_ratio",
"tensorflow.constant",
"numpy.concatenate",
"tensorflow.reduce_mean",
"numpy.load",
"time.time",
"numpy.save",
"utils.plot_loss"
] |
[((758, 779), 'numpy.split', 'np.split', (['data_gen', '(2)'], {}), '(data_gen, 2)\n', (766, 779), True, 'import numpy as np\n'), ((802, 823), 'numpy.split', 'np.split', (['data_sim', '(2)'], {}), '(data_sim, 2)\n', (810, 823), True, 'import numpy as np\n'), ((986, 1095), 'cflow.ConditionalFlow', 'ConditionalFlow', ([], {'dims_in': '[2]', 'dims_c': '[[2]]', 'n_blocks': '(12)', 'subnet_meta': 'meta', 'subnet_constructor': 'DenseSubNet'}), '(dims_in=[2], dims_c=[[2]], n_blocks=12, subnet_meta=meta,\n subnet_constructor=DenseSubNet)\n', (1001, 1095), False, 'from cflow import ConditionalFlow\n'), ((1262, 1320), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(train_gen, train_sim)'], {}), '((train_gen, train_sim))\n', (1296, 1320), True, 'import tensorflow as tf\n'), ((1439, 1513), 'tensorflow.keras.optimizers.schedules.InverseTimeDecay', 'tf.keras.optimizers.schedules.InverseTimeDecay', (['LR', 'DECAY_STEP', 'DECAY_RATE'], {}), '(LR, DECAY_STEP, DECAY_RATE)\n', (1485, 1513), True, 'import tensorflow as tf\n'), ((1520, 1557), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['lr_schedule'], {}), '(lr_schedule)\n', (1544, 1557), True, 'import tensorflow as tf\n'), ((1651, 1662), 'time.time', 'time.time', ([], {}), '()\n', (1660, 1662), False, 'import time\n'), ((2280, 2291), 'time.time', 'time.time', ([], {}), '()\n', (2289, 2291), False, 'import time\n'), ((2516, 2578), 'utils.plot_loss', 'plot_loss', (['train_losses'], {'name': '"""Log-likelihood"""', 'log_axis': '(False)'}), "(train_losses, name='Log-likelihood', log_axis=False)\n", (2525, 2578), False, 'from utils import train_density_estimation, plot_loss, plot_tau_ratio\n'), ((2591, 2630), 'tensorflow.constant', 'tf.constant', (['test_sim'], {'dtype': 'tf.float32'}), '(test_sim, dtype=tf.float32)\n', (2602, 2630), True, 'import tensorflow as tf\n'), ((2678, 2742), 'utils.plot_tau_ratio', 'plot_tau_ratio', (['test_gen', 'unfold_gen', 'detector'], {'name': '"""tau_ratio"""'}), "(test_gen, unfold_gen, detector, name='tau_ratio')\n", (2692, 2742), False, 'from utils import train_density_estimation, plot_loss, plot_tau_ratio\n'), ((2896, 2932), 'numpy.save', 'np.save', (['"""inn_pythia"""', 'unfold_pythia'], {}), "('inn_pythia', unfold_pythia)\n", (2903, 2932), True, 'import numpy as np\n'), ((279, 318), 'numpy.load', 'np.load', (['"""../data/tau1s_Pythia_gen.npy"""'], {}), "('../data/tau1s_Pythia_gen.npy')\n", (286, 318), True, 'import numpy as np\n'), ((350, 389), 'numpy.load', 'np.load', (['"""../data/tau2s_Pythia_gen.npy"""'], {}), "('../data/tau2s_Pythia_gen.npy')\n", (357, 389), True, 'import numpy as np\n'), ((422, 461), 'numpy.load', 'np.load', (['"""../data/tau1s_Pythia_sim.npy"""'], {}), "('../data/tau1s_Pythia_sim.npy')\n", (429, 461), True, 'import numpy as np\n'), ((493, 532), 'numpy.load', 'np.load', (['"""../data/tau2s_Pythia_sim.npy"""'], {}), "('../data/tau2s_Pythia_sim.npy')\n", (500, 532), True, 'import numpy as np\n'), ((575, 620), 'numpy.concatenate', 'np.concatenate', (['[tau1_gen, tau2_gen]'], {'axis': '(-1)'}), '([tau1_gen, tau2_gen], axis=-1)\n', (589, 620), True, 'import numpy as np\n'), ((671, 716), 'numpy.concatenate', 'np.concatenate', (['[tau1_sim, tau2_sim]'], {'axis': '(-1)'}), '([tau1_sim, tau2_sim], axis=-1)\n', (685, 716), True, 'import numpy as np\n'), ((1979, 2013), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['batch_train_losses'], {}), '(batch_train_losses)\n', (1993, 2013), True, 'import tensorflow as tf\n'), ((1854, 1914), 'utils.train_density_estimation', 'train_density_estimation', (['cflow', 'opt', 'batch_gen', '[batch_sim]'], {}), '(cflow, opt, batch_gen, [batch_sim])\n', (1878, 1914), False, 'from utils import train_density_estimation, plot_loss, plot_tau_ratio\n')]
|
from abc import ABC, abstractmethod
import numpy as np
class SwarmAlgorithm(ABC):
'''
A base abstract class for different swarm algorithms.
Parameters
----------
D : int
Search space dimension.
N : int
Population size.
fit_func : callable
Fitness (objective) function or a function returning multiple values
corresponding to different objectives (for multi-objective problems).
params : array_like
Model behavioral parameters.
bounds : ndarray
A 2 by D matrix containing lower and upper bounds of the search space
for each dimension.
seed : int, optional, default=None
Random generator seed.
max_iter : int, optional, default=100
Maximum number of iterations (generations).
stag_iter : int, optional, default=100
Specifies the allowed number of iterations without solution improvement
by equal or more than a given tolerance. If the number is exceeded,
the optimization process stagnations occurs and the algorithm stops.
e : float, optional, default=1e-5
Tolerance.
Attributes
----------
particles : ndarray
An N by D array representing the swarm of N particles.
scores : ndarray
An array of size N representing the value of the fitness function
for each particle.
gbest : ndarray
The D-dimensional vector representing the position of the current
global best particle.
gbest_score : float
The value of the fitness function for the current global best particle.
eval_num : int
The number of fitness function evaluations.
'''
def __init__(self, D, N, fit_func, params, bounds, seed=None, max_iter=100,
stag_iter=100, e=1e-5):
self.D = D
self.N = N
# Initialize problem parameters.
self.fit_func = fit_func
self.l_bounds = bounds[0]
self.u_bounds = bounds[1]
# Behavioural parameters' initialization.
self.set_params(params)
# Initializing the Numpy random numbers generator to reproduce results
# of the optimization processes.
self.seed = seed
# Stopping criteria.
self.max_iter = max_iter
self.stag_iter = stag_iter
self.e = e
self.reset()
@abstractmethod
def set_params(self, new_params):
'''
Initialize the algorithm with a strategy (vector of parameters).
Parameters
----------
new_params : array_like
Returns
-------
No value.
'''
pass
def reset(self):
'''
Resets the algorithm state.
Parameters
----------
No parameters.
Returns
-------
No value.
'''
if self.seed is not None:
np.random.seed(self.seed)
# Generate initial population and particles' velocities.
self.set_population([self.generate_particle()
for _ in range(self.N)])
def generate_particle(self):
'''
Generates a swarm particle within bounds.
Parameters
----------
No parameters.
Returns
-------
ndarray
A vector of size D representing particle's coordinates.
'''
coords_range = self.u_bounds - self.l_bounds
return self.l_bounds + np.random.uniform(size=self.D) * coords_range
def set_population(self, new_population):
'''
Sets a population with a pre-generated one.
Parameters
----------
new_population: array_like
A matrix with dimensions N by D, which represents the coordinates
of each particle.
Returns
-------
No value.
'''
self.eval_num = self.N
self.N = len(new_population)
self.particles = np.copy(new_population)
self.scores = np.array([self.fit_func(p) for p in self.particles])
# Initializing current best.
gbest_index = np.ndarray.argmin(self.scores)
self.gbest = np.copy(self.particles[gbest_index])
self.gbest_score = self.scores[gbest_index]
@abstractmethod
def optimize(self):
'''
Main loop of the algorithm.
Parameters
----------
No parameters.
Returns
-------
ndarray
The coordinates of the global best particle at the end of
the optimization process.
'''
pass
def update_best(self):
'''
Updates global best particle if needed.
Parameters
----------
No parameters.
Returns
-------
No value.
'''
current_best_index = np.argmin(self.scores)
current_best = self.particles[current_best_index]
current_best_score = self.scores[current_best_index]
if current_best_score < self.gbest_score:
self.gbest = np.copy(current_best)
self.gbest_score = current_best_score
def simplebounds(self, coords):
'''
Simple constraint rule for particles' positions
(in-place coordinate modification).
Parameters
----------
coords: ndarray
An array of particles to apply the rule to.
Returns
-------
No value.
'''
l_bounds_tiled = np.tile(self.l_bounds, [coords.shape[0], 1])
u_bounds_tiled = np.tile(self.u_bounds, [coords.shape[0], 1])
lower_bound_indexes = coords < self.l_bounds
upper_bound_indexes = coords > self.u_bounds
coords[lower_bound_indexes] = l_bounds_tiled[lower_bound_indexes]
coords[upper_bound_indexes] = u_bounds_tiled[upper_bound_indexes]
def info(self):
'''
Returns basic information about the algorithm state in a
human-readable representation.
Parameters
----------
No parameters.
Returns
-------
str
Information about current best position, score and
current number of fitness-function evaluations.
'''
info = f'Algorithm: {type(self).__name__}\n'
info += f'Best position: {self.gbest}\n'
info += f'Best score: {self.gbest_score}\n'
info += f'Fitness function evaluatiions number: {self.eval_num}'
return info
|
[
"numpy.copy",
"numpy.ndarray.argmin",
"numpy.tile",
"numpy.random.seed",
"numpy.random.uniform",
"numpy.argmin"
] |
[((3954, 3977), 'numpy.copy', 'np.copy', (['new_population'], {}), '(new_population)\n', (3961, 3977), True, 'import numpy as np\n'), ((4113, 4143), 'numpy.ndarray.argmin', 'np.ndarray.argmin', (['self.scores'], {}), '(self.scores)\n', (4130, 4143), True, 'import numpy as np\n'), ((4165, 4201), 'numpy.copy', 'np.copy', (['self.particles[gbest_index]'], {}), '(self.particles[gbest_index])\n', (4172, 4201), True, 'import numpy as np\n'), ((4834, 4856), 'numpy.argmin', 'np.argmin', (['self.scores'], {}), '(self.scores)\n', (4843, 4856), True, 'import numpy as np\n'), ((5481, 5525), 'numpy.tile', 'np.tile', (['self.l_bounds', '[coords.shape[0], 1]'], {}), '(self.l_bounds, [coords.shape[0], 1])\n', (5488, 5525), True, 'import numpy as np\n'), ((5551, 5595), 'numpy.tile', 'np.tile', (['self.u_bounds', '[coords.shape[0], 1]'], {}), '(self.u_bounds, [coords.shape[0], 1])\n', (5558, 5595), True, 'import numpy as np\n'), ((2887, 2912), 'numpy.random.seed', 'np.random.seed', (['self.seed'], {}), '(self.seed)\n', (2901, 2912), True, 'import numpy as np\n'), ((5052, 5073), 'numpy.copy', 'np.copy', (['current_best'], {}), '(current_best)\n', (5059, 5073), True, 'import numpy as np\n'), ((3458, 3488), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'self.D'}), '(size=self.D)\n', (3475, 3488), True, 'import numpy as np\n')]
|
"""
Generate coulomb matrices for molecules.
See Montavon et al., _New Journal of Physics_ __15__ (2013) 095003.
"""
import numpy as np
from typing import Any, List, Optional
from deepchem.utils.typing import RDKitMol
from deepchem.utils.data_utils import pad_array
from deepchem.feat.base_classes import MolecularFeaturizer
class CoulombMatrix(MolecularFeaturizer):
"""Calculate Coulomb matrices for molecules.
Coulomb matrices provide a representation of the electronic structure of
a molecule. For a molecule with `N` atoms, the Coulomb matrix is a
`N X N` matrix where each element gives the strength of the
electrostatic interaction between two atoms. The method is described
in more detail in [1]_.
Examples
--------
>>> import deepchem as dc
>>> featurizers = dc.feat.CoulombMatrix(max_atoms=23)
>>> input_file = 'deepchem/feat/tests/data/water.sdf' # really backed by water.sdf.csv
>>> tasks = ["atomization_energy"]
>>> loader = dc.data.SDFLoader(tasks, featurizer=featurizers)
>>> dataset = loader.create_dataset(input_file)
References
----------
.. [1] Montavon, Grégoire, et al. "Learning invariant representations of
molecules for atomization energy prediction." Advances in neural information
processing systems. 2012.
Note
----
This class requires RDKit to be installed.
"""
def __init__(self,
max_atoms: int,
remove_hydrogens: bool = False,
randomize: bool = False,
upper_tri: bool = False,
n_samples: int = 1,
seed: Optional[int] = None):
"""Initialize this featurizer.
Parameters
----------
max_atoms: int
The maximum number of atoms expected for molecules this featurizer will
process.
remove_hydrogens: bool, optional (default False)
If True, remove hydrogens before processing them.
randomize: bool, optional (default False)
If True, use method `randomize_coulomb_matrices` to randomize Coulomb matrices.
upper_tri: bool, optional (default False)
Generate only upper triangle part of Coulomb matrices.
n_samples: int, optional (default 1)
If `randomize` is set to True, the number of random samples to draw.
seed: int, optional (default None)
Random seed to use.
"""
self.max_atoms = int(max_atoms)
self.remove_hydrogens = remove_hydrogens
self.randomize = randomize
self.upper_tri = upper_tri
self.n_samples = n_samples
if seed is not None:
seed = int(seed)
self.seed = seed
def _featurize(self, datapoint: RDKitMol, **kwargs) -> np.ndarray:
"""
Calculate Coulomb matrices for molecules. If extra randomized
matrices are generated, they are treated as if they are features
for additional conformers.
Since Coulomb matrices are symmetric, only the (flattened) upper
triangular portion is returned.
Parameters
----------
datapoint: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
np.ndarray
The coulomb matrices of the given molecule.
The default shape is `(num_confs, max_atoms, max_atoms)`.
If num_confs == 1, the shape is `(max_atoms, max_atoms)`.
"""
if 'mol' in kwargs:
datapoint = kwargs.get("mol")
raise DeprecationWarning(
'Mol is being phased out as a parameter, please pass "datapoint" instead.'
)
features = self.coulomb_matrix(datapoint)
if self.upper_tri:
features = [f[np.triu_indices_from(f)] for f in features]
features = np.asarray(features)
if features.shape[0] == 1:
# `(1, max_atoms, max_atoms)` -> `(max_atoms, max_atoms)`
features = np.squeeze(features, axis=0)
return features
def coulomb_matrix(self, mol: RDKitMol) -> np.ndarray:
"""
Generate Coulomb matrices for each conformer of the given molecule.
Parameters
----------
mol: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
np.ndarray
The coulomb matrices of the given molecule
"""
try:
from rdkit import Chem
from rdkit.Chem import AllChem
except ModuleNotFoundError:
raise ImportError("This class requires RDKit to be installed.")
# Check whether num_confs >=1 or not
num_confs = len(mol.GetConformers())
if num_confs == 0:
mol = Chem.AddHs(mol)
AllChem.EmbedMolecule(mol, AllChem.ETKDG())
if self.remove_hydrogens:
mol = Chem.RemoveHs(mol)
n_atoms = mol.GetNumAtoms()
z = [atom.GetAtomicNum() for atom in mol.GetAtoms()]
rval = []
for conf in mol.GetConformers():
d = self.get_interatomic_distances(conf)
m = np.outer(z, z) / d
m[range(n_atoms), range(n_atoms)] = 0.5 * np.array(z)**2.4
if self.randomize:
for random_m in self.randomize_coulomb_matrix(m):
random_m = pad_array(random_m, self.max_atoms)
rval.append(random_m)
else:
m = pad_array(m, self.max_atoms)
rval.append(m)
return np.asarray(rval)
def randomize_coulomb_matrix(self, m: np.ndarray) -> List[np.ndarray]:
"""Randomize a Coulomb matrix as decribed in [1]_:
1. Compute row norms for M in a vector row_norms.
2. Sample a zero-mean unit-variance noise vector e with dimension
equal to row_norms.
3. Permute the rows and columns of M with the permutation that
sorts row_norms + e.
Parameters
----------
m: np.ndarray
Coulomb matrix.
Returns
-------
List[np.ndarray]
List of the random coulomb matrix
References
----------
.. [1] Montavon et al., New Journal of Physics, 15, (2013), 095003
"""
rval = []
row_norms = np.asarray([np.linalg.norm(row) for row in m], dtype=float)
rng = np.random.RandomState(self.seed)
for i in range(self.n_samples):
e = rng.normal(size=row_norms.size)
p = np.argsort(row_norms + e)
new = m[p][:, p] # permute rows first, then columns
rval.append(new)
return rval
@staticmethod
def get_interatomic_distances(conf: Any) -> np.ndarray:
"""
Get interatomic distances for atoms in a molecular conformer.
Parameters
----------
conf: rdkit.Chem.rdchem.Conformer
Molecule conformer.
Returns
-------
np.ndarray
The distances matrix for all atoms in a molecule
"""
n_atoms = conf.GetNumAtoms()
coords = [
# Convert AtomPositions from Angstrom to bohr (atomic units)
conf.GetAtomPosition(i).__idiv__(0.52917721092) for i in range(n_atoms)
]
d = np.zeros((n_atoms, n_atoms), dtype=float)
for i in range(n_atoms):
for j in range(i):
d[i, j] = coords[i].Distance(coords[j])
d[j, i] = d[i, j]
return d
class CoulombMatrixEig(CoulombMatrix):
"""Calculate the eigenvalues of Coulomb matrices for molecules.
This featurizer computes the eigenvalues of the Coulomb matrices for provided
molecules. Coulomb matrices are described in [1]_.
Examples
--------
>>> import deepchem as dc
>>> featurizers = dc.feat.CoulombMatrixEig(max_atoms=23)
>>> input_file = 'deepchem/feat/tests/data/water.sdf' # really backed by water.sdf.csv
>>> tasks = ["atomization_energy"]
>>> loader = dc.data.SDFLoader(tasks, featurizer=featurizers)
>>> dataset = loader.create_dataset(input_file)
References
----------
.. [1] Montavon, Grégoire, et al. "Learning invariant representations of
molecules for atomization energy prediction." Advances in neural information
processing systems. 2012.
"""
def __init__(self,
max_atoms: int,
remove_hydrogens: bool = False,
randomize: bool = False,
n_samples: int = 1,
seed: Optional[int] = None):
"""Initialize this featurizer.
Parameters
----------
max_atoms: int
The maximum number of atoms expected for molecules this featurizer will
process.
remove_hydrogens: bool, optional (default False)
If True, remove hydrogens before processing them.
randomize: bool, optional (default False)
If True, use method `randomize_coulomb_matrices` to randomize Coulomb matrices.
n_samples: int, optional (default 1)
If `randomize` is set to True, the number of random samples to draw.
seed: int, optional (default None)
Random seed to use.
"""
self.max_atoms = int(max_atoms)
self.remove_hydrogens = remove_hydrogens
self.randomize = randomize
self.n_samples = n_samples
if seed is not None:
seed = int(seed)
self.seed = seed
def _featurize(self, datapoint: RDKitMol, **kwargs) -> np.ndarray:
"""
Calculate eigenvalues of Coulomb matrix for molecules. Eigenvalues
are returned sorted by absolute value in descending order and padded
by max_atoms.
Parameters
----------
datapoint: rdkit.Chem.rdchem.Mol
RDKit Mol object
Returns
-------
np.ndarray
The eigenvalues of Coulomb matrix for molecules.
The default shape is `(num_confs, max_atoms)`.
If num_confs == 1, the shape is `(max_atoms,)`.
"""
if 'mol' in kwargs:
datapoint = kwargs.get("mol")
raise DeprecationWarning(
'Mol is being phased out as a parameter, please pass "datapoint" instead.'
)
cmat = self.coulomb_matrix(datapoint)
features_list = []
for f in cmat:
w, v = np.linalg.eig(f)
w_abs = np.abs(w)
sortidx = np.argsort(w_abs)
sortidx = sortidx[::-1]
w = w[sortidx]
f = pad_array(w, self.max_atoms)
features_list.append(f)
features = np.asarray(features_list)
if features.shape[0] == 1:
# `(1, max_atoms)` -> `(max_atoms,)`
features = np.squeeze(features, axis=0)
return features
|
[
"numpy.abs",
"numpy.linalg.eig",
"rdkit.Chem.AddHs",
"numpy.asarray",
"numpy.triu_indices_from",
"numpy.squeeze",
"numpy.argsort",
"numpy.array",
"numpy.zeros",
"numpy.outer",
"rdkit.Chem.AllChem.ETKDG",
"numpy.linalg.norm",
"rdkit.Chem.RemoveHs",
"numpy.random.RandomState",
"deepchem.utils.data_utils.pad_array"
] |
[((3568, 3588), 'numpy.asarray', 'np.asarray', (['features'], {}), '(features)\n', (3578, 3588), True, 'import numpy as np\n'), ((5033, 5049), 'numpy.asarray', 'np.asarray', (['rval'], {}), '(rval)\n', (5043, 5049), True, 'import numpy as np\n'), ((5793, 5825), 'numpy.random.RandomState', 'np.random.RandomState', (['self.seed'], {}), '(self.seed)\n', (5814, 5825), True, 'import numpy as np\n'), ((6596, 6637), 'numpy.zeros', 'np.zeros', (['(n_atoms, n_atoms)'], {'dtype': 'float'}), '((n_atoms, n_atoms), dtype=float)\n', (6604, 6637), True, 'import numpy as np\n'), ((9664, 9689), 'numpy.asarray', 'np.asarray', (['features_list'], {}), '(features_list)\n', (9674, 9689), True, 'import numpy as np\n'), ((3701, 3729), 'numpy.squeeze', 'np.squeeze', (['features'], {'axis': '(0)'}), '(features, axis=0)\n', (3711, 3729), True, 'import numpy as np\n'), ((4365, 4380), 'rdkit.Chem.AddHs', 'Chem.AddHs', (['mol'], {}), '(mol)\n', (4375, 4380), False, 'from rdkit import Chem\n'), ((4474, 4492), 'rdkit.Chem.RemoveHs', 'Chem.RemoveHs', (['mol'], {}), '(mol)\n', (4487, 4492), False, 'from rdkit import Chem\n'), ((5914, 5939), 'numpy.argsort', 'np.argsort', (['(row_norms + e)'], {}), '(row_norms + e)\n', (5924, 5939), True, 'import numpy as np\n'), ((9454, 9470), 'numpy.linalg.eig', 'np.linalg.eig', (['f'], {}), '(f)\n', (9467, 9470), True, 'import numpy as np\n'), ((9485, 9494), 'numpy.abs', 'np.abs', (['w'], {}), '(w)\n', (9491, 9494), True, 'import numpy as np\n'), ((9511, 9528), 'numpy.argsort', 'np.argsort', (['w_abs'], {}), '(w_abs)\n', (9521, 9528), True, 'import numpy as np\n'), ((9590, 9618), 'deepchem.utils.data_utils.pad_array', 'pad_array', (['w', 'self.max_atoms'], {}), '(w, self.max_atoms)\n', (9599, 9618), False, 'from deepchem.utils.data_utils import pad_array\n'), ((9781, 9809), 'numpy.squeeze', 'np.squeeze', (['features'], {'axis': '(0)'}), '(features, axis=0)\n', (9791, 9809), True, 'import numpy as np\n'), ((4414, 4429), 'rdkit.Chem.AllChem.ETKDG', 'AllChem.ETKDG', ([], {}), '()\n', (4427, 4429), False, 'from rdkit.Chem import AllChem\n'), ((4690, 4704), 'numpy.outer', 'np.outer', (['z', 'z'], {}), '(z, z)\n', (4698, 4704), True, 'import numpy as np\n'), ((4970, 4998), 'deepchem.utils.data_utils.pad_array', 'pad_array', (['m', 'self.max_atoms'], {}), '(m, self.max_atoms)\n', (4979, 4998), False, 'from deepchem.utils.data_utils import pad_array\n'), ((5735, 5754), 'numpy.linalg.norm', 'np.linalg.norm', (['row'], {}), '(row)\n', (5749, 5754), True, 'import numpy as np\n'), ((3509, 3532), 'numpy.triu_indices_from', 'np.triu_indices_from', (['f'], {}), '(f)\n', (3529, 3532), True, 'import numpy as np\n'), ((4757, 4768), 'numpy.array', 'np.array', (['z'], {}), '(z)\n', (4765, 4768), True, 'import numpy as np\n'), ((4878, 4913), 'deepchem.utils.data_utils.pad_array', 'pad_array', (['random_m', 'self.max_atoms'], {}), '(random_m, self.max_atoms)\n', (4887, 4913), False, 'from deepchem.utils.data_utils import pad_array\n')]
|
#!/usr/bin/env python
# coding: utf-8
# This script generates a zone plate pattern (based on partial filling) given the material, energy, grid size and number of zones as input
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
from numba import njit
from joblib import Parallel, delayed
from tqdm import tqdm, trange
import urllib,os,pickle
from os.path import dirname as up
# Importing all the required libraries. Numba is used to optimize functions.
# In[2]:
def repeat_pattern(X,Y,Z):
flag_ = np.where((X>0)&(Y>0))
flag1 = np.where((X>0)&(Y<0))
flag1 = tuple((flag1[0][::-1],flag1[1]))
Z[flag1] = Z[flag_]
flag2 = np.where((X<0)&(Y>0))
flag2 = tuple((flag2[0],flag2[1][::-1]))
Z[flag2] = Z[flag_]
flag3 = np.where((X<0)&(Y<0))
flag3 = tuple((flag3[0][::-1],flag3[1][::-1]))
Z[flag3] = Z[flag_]
return Z
# *repeat_pattern* : produces the zone plate pattern given the pattern in only one quadrant(X,Y>0) as input.
# * *Inputs* : X and Y grid denoting the coordinates and Z containing the pattern in one quadrant.
# * *Outputs* : Z itself is modified to reflect the repition.
# In[3]:
def get_property(mat,energy):
url = "http://henke.lbl.gov/cgi-bin/pert_cgi.pl"
data = {'Element':str(mat), 'Energy':str(energy), 'submit':'Submit Query'}
data = urllib.parse.urlencode(data)
data = data.encode('utf-8')
req = urllib.request.Request(url, data)
resp = urllib.request.urlopen(req)
respDat = resp.read()
response = respDat.split()
d = b'g/cm^3<li>Delta'
i = response.index(d)
delta = str(response[i+2])[:str(response[i+2]).index('<li>Beta')][2:]
beta = str(response[i+4])[2:-1]
return float(delta),float(beta)
# *get_property* : gets delta and beta for a given material at the specified energy from Henke et al.
# * *Inputs* : mat - material, energy - energy in eV
# * *Outputs* : delta, beta
# In[4]:
@njit # equivalent to "jit(nopython=True)".
def partial_fill(x,y,step,r1,r2,n):
x_ = np.linspace(x-step/2,x+step/2,n)
y_ = np.linspace(y-step/2,y+step/2,n)
cnts = 0
for i in range(n):
for j in range(n):
z = (x_[i] * x_[i] + y_[j] * y_[j])
if r1*r1 < z < r2*r2:
cnts += 1
fill_factor = cnts/(n*n)
return fill_factor
# *partial_fill* : workhorse function for determining the fill pattern. This function is thus used in a loop. njit is used to optimize the function.
# * *Inputs* : x,y - coordinates of the point, step - step size, r1,r2 - inner and outer radii of ring, n - resolution
# * *Outputs* : fill_factor - value of the pixel based on amount of ring passing through it
# In[5]:
#find the radius of the nth zone
def zone_radius(n,f,wavel):
return np.sqrt(n*wavel*f + ((n*wavel)/2)**2)
# *zone_radius* : functon to find the radius of a zone given the zone number and wavelength
# * *Inputs* : n - zone number, f - focal length, wavel - wavelength
# * *Outputs* : radius of the zone as specified by the inputs
# In[6]:
def make_quadrant(X,Y,flag,r1,r2,step,n,zone_number):
z = np.zeros(np.shape(X))
Z = np.sqrt(X**2+Y**2)
for l in range(len(flag[0])):
i = flag[0][l]
j = flag[1][l]
if 0.75*r1< Z[i][j] < 1.25*r2:
x1 = X[i][j]
y1 = Y[i][j]
z[i][j] = partial_fill(x1,y1,step,r1,r2,n)
z[tuple((flag[1],flag[0]))] = z[tuple((flag[0],flag[1]))]
return z
# *make_quadrant* : function used to create a quadrant of a ring given the inner and outer radius and zone number
# * *Inputs* : X,Y - grid, flag - specifies the quadrant to be filled (i.e. where X,Y>0), r1,r2 - inner and outer radii, n - parameter for the partial_fill function
# * *Outputs* : z - output pattern with one quadrant filled.
# In[7]:
#2D ZP
def make_ring(i):
print(i)
r1 = radius[i-1]
r2 = radius[i]
n = 250
ring = make_quadrant(X,Y,flag,r1,r2,step_xy,n,zone_number = i)
ring = repeat_pattern(X,Y,ring)
ring_ = np.where(ring!=0)
vals_ = ring[ring_]
np.save('ring_locs_'+str(i)+'.npy',ring_)
np.save('ring_vals_'+str(i)+'.npy',vals_)
return
# *make_ring* : function used to create a ring given the relevant parameters
# * *Inputs* : i-zone number,radius - array of radii ,X,Y - grid, flag - specifies the quadrant to be filled (i.e. where X,Y>0),n - parameter for the partial_fill function
# * *Outputs* : None. Saves the rings to memory.
# In[8]:
mat = 'Au'
energy = 10000 #Energy in EV
f = 10e-3 #focal length in meters
wavel = (1239.84/energy)*10**(-9) #Wavelength in meters
delta,beta = get_property(mat,energy)
zones = 700 #number of zones
radius = np.zeros(zones)
# Setting up the parameters and initializing the variables.
# In[9]:
for k in range(zones):
radius[k] = zone_radius(k,f,wavel)
# Filling the radius array with the radius of zones for later use in making the rings.
# In the next few code blocks, we check if the parameters of the simulation make sense. First we print out the input and output pixel sizes assuming we will be using the 1FT propagator. Then we see if the pixel sizes are small enough compared to the outermost zone width. Finally we check if the focal spot can be contained for the given amount of tilt angle.
# In[10]:
grid_size = 55296
input_xrange = 262e-6
step_xy = input_xrange/grid_size
L_out = (1239.84/energy)*10**(-9)*f/(input_xrange/grid_size)
step_xy_output = L_out/grid_size
print(' Ouput L : ',L_out)
print(' output pixel size(nm) : ',step_xy_output*1e9)
print(' input pixel size(nm) : ',step_xy*1e9)
# In[11]:
drn = radius[-1]-radius[-2]
print(' maximum radius(um) : ',radius[-1]*1e6)
print(' outermost zone width(nm) :',drn*1e9)
# In[12]:
print(' max shift of focal spot(um) : ',(L_out/2)*1e6)
# invert the following to get max tilt allowance
# after which the focal spot falls of the
# simulation plane
# np.sin(theta*(np.pi/180))*f = (L_out/2)
theta_max = np.arcsin((L_out/2)*(1/f))*(180/np.pi)
print(' max wavefield aligned tilt(deg) : ',theta_max)
# In[13]:
if step_xy > 0.25*drn :
print(' WARNING ! input pixel size too small')
print(' ratio of input step size to outermost zone width', step_xy/drn)
if step_xy_output > 0.25*drn :
print(' WARNING ! output pixel size too small')
print(' ratio of output step size to outermost zone width', step_xy_output/drn)
# In[14]:
zones_to_fill = []
for i in range(zones):
if i%2 == 1 :
zones_to_fill.append(i)
zones_to_fill = np.array(zones_to_fill)
# Making a list of zones to fill. (Since only alternate zones are filled in our case. This can be modified as per convenience)
# In[ ]:
try :
os.chdir(up(os.getcwd())+str('/hard_xray_zp'))
except :
os.mkdir(up(os.getcwd())+str('/hard_xray_zp'))
os.chdir(up(os.getcwd())+str('/hard_xray_zp'))
# Store the location of each ring of the zone plate separately in a sub directory. This is more efficient than storing the whole zone plate array !
# In[ ]:
x1 = input_xrange/2
x = np.linspace(-x1,x1,grid_size)
step_xy = x[-1]-x[-2]
zp_coords =[-x1,x1,-x1,x1]
# In[ ]:
X,Y = np.meshgrid(x,x)
flag = np.where((X>0)&(Y>0)&(X>=Y))
# Creating the input 1D array and setting the parameters for use by the make ring function.
# Note that X,Y,flag and step_xy will be read by multiple processes which we will spawn using joblib.
# In[ ]:
get_ipython().run_cell_magic('capture', '', 'from joblib import Parallel, delayed \nresults = Parallel(n_jobs=5)(delayed(make_ring)(i) for i in zones_to_fill)')
# Creating the rings ! (Adjust the number of jobs depending on CPU cores.)
# In[ ]:
params = {'grid_size':grid_size,'step_xy':step_xy,'energy(in eV)':energy,'wavelength in m':wavel,'focal_length':f,'zp_coords':zp_coords,'delta':delta,'beta':beta}
pickle.dump(params,open('parameters.pickle','wb'))
# Pickling and saving all the associated parameters along with the rings for use in simulation!
|
[
"numpy.sqrt",
"numpy.where",
"urllib.request.Request",
"numpy.arcsin",
"os.getcwd",
"numpy.array",
"numpy.zeros",
"numpy.linspace",
"urllib.parse.urlencode",
"numpy.meshgrid",
"numpy.shape",
"urllib.request.urlopen"
] |
[((4720, 4735), 'numpy.zeros', 'np.zeros', (['zones'], {}), '(zones)\n', (4728, 4735), True, 'import numpy as np\n'), ((6546, 6569), 'numpy.array', 'np.array', (['zones_to_fill'], {}), '(zones_to_fill)\n', (6554, 6569), True, 'import numpy as np\n'), ((7066, 7097), 'numpy.linspace', 'np.linspace', (['(-x1)', 'x1', 'grid_size'], {}), '(-x1, x1, grid_size)\n', (7077, 7097), True, 'import numpy as np\n'), ((7164, 7181), 'numpy.meshgrid', 'np.meshgrid', (['x', 'x'], {}), '(x, x)\n', (7175, 7181), True, 'import numpy as np\n'), ((7188, 7226), 'numpy.where', 'np.where', (['((X > 0) & (Y > 0) & (X >= Y))'], {}), '((X > 0) & (Y > 0) & (X >= Y))\n', (7196, 7226), True, 'import numpy as np\n'), ((520, 547), 'numpy.where', 'np.where', (['((X > 0) & (Y > 0))'], {}), '((X > 0) & (Y > 0))\n', (528, 547), True, 'import numpy as np\n'), ((554, 581), 'numpy.where', 'np.where', (['((X > 0) & (Y < 0))'], {}), '((X > 0) & (Y < 0))\n', (562, 581), True, 'import numpy as np\n'), ((657, 684), 'numpy.where', 'np.where', (['((X < 0) & (Y > 0))'], {}), '((X < 0) & (Y > 0))\n', (665, 684), True, 'import numpy as np\n'), ((760, 787), 'numpy.where', 'np.where', (['((X < 0) & (Y < 0))'], {}), '((X < 0) & (Y < 0))\n', (768, 787), True, 'import numpy as np\n'), ((1327, 1355), 'urllib.parse.urlencode', 'urllib.parse.urlencode', (['data'], {}), '(data)\n', (1349, 1355), False, 'import urllib, os, pickle\n'), ((1398, 1431), 'urllib.request.Request', 'urllib.request.Request', (['url', 'data'], {}), '(url, data)\n', (1420, 1431), False, 'import urllib, os, pickle\n'), ((1443, 1470), 'urllib.request.urlopen', 'urllib.request.urlopen', (['req'], {}), '(req)\n', (1465, 1470), False, 'import urllib, os, pickle\n'), ((2016, 2058), 'numpy.linspace', 'np.linspace', (['(x - step / 2)', '(x + step / 2)', 'n'], {}), '(x - step / 2, x + step / 2, n)\n', (2027, 2058), True, 'import numpy as np\n'), ((2058, 2100), 'numpy.linspace', 'np.linspace', (['(y - step / 2)', '(y + step / 2)', 'n'], {}), '(y - step / 2, y + step / 2, n)\n', (2069, 2100), True, 'import numpy as np\n'), ((2764, 2809), 'numpy.sqrt', 'np.sqrt', (['(n * wavel * f + (n * wavel / 2) ** 2)'], {}), '(n * wavel * f + (n * wavel / 2) ** 2)\n', (2771, 2809), True, 'import numpy as np\n'), ((3131, 3155), 'numpy.sqrt', 'np.sqrt', (['(X ** 2 + Y ** 2)'], {}), '(X ** 2 + Y ** 2)\n', (3138, 3155), True, 'import numpy as np\n'), ((4010, 4029), 'numpy.where', 'np.where', (['(ring != 0)'], {}), '(ring != 0)\n', (4018, 4029), True, 'import numpy as np\n'), ((5998, 6028), 'numpy.arcsin', 'np.arcsin', (['(L_out / 2 * (1 / f))'], {}), '(L_out / 2 * (1 / f))\n', (6007, 6028), True, 'import numpy as np\n'), ((3110, 3121), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (3118, 3121), True, 'import numpy as np\n'), ((6733, 6744), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6742, 6744), False, 'import urllib, os, pickle\n'), ((6793, 6804), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6802, 6804), False, 'import urllib, os, pickle\n'), ((6844, 6855), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6853, 6855), False, 'import urllib, os, pickle\n')]
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Paddle-Lite full python api demo
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from paddlelite.lite import *
import numpy as np
import platform
# Command arguments
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_dir", default="", type=str, help="Non-combined Model dir path")
parser.add_argument("--model_file", default="", type=str, help="Model file")
parser.add_argument(
"--param_file", default="", type=str, help="Combined model param file")
parser.add_argument(
"--input_shape",
default=[1, 3, 224, 224],
nargs='+',
type=int,
required=False,
help="Model input shape, eg: 1 3 224 224. Defalut: 1 3 224 224")
parser.add_argument(
"--backend",
default="",
type=str,
help="To use a particular backend for execution. Should be one of: arm|opencl|x86|x86_opencl|metal|nnadapter"
)
parser.add_argument(
"--image_path", default="", type=str, help="The path of test image file")
parser.add_argument(
"--label_path", default="", type=str, help="The path of label file")
parser.add_argument(
"--print_results",
type=bool,
default=False,
help="Print results. Default: False")
parser.add_argument(
"--nnadapter_device_names",
default="",
type=str,
help="Set nnadapter device names")
parser.add_argument(
"--nnadapter_context_properties",
default="",
type=str,
help="Set nnadapter context properties")
parser.add_argument(
"--nnadapter_model_cache_dir",
default="",
type=str,
help="Set nnadapter model cache dir")
parser.add_argument(
"--nnadapter_subgraph_partition_config_path",
default="",
type=str,
help="Set nnadapter subgraph partition config path")
parser.add_argument(
"--nnadapter_mixed_precision_quantization_config_path",
default="",
type=str,
help="Set nnadapter mixed precision quantization config path")
def RunModel(args):
# 1. Set config information
config = CxxConfig()
if args.model_file != '' and args.param_file != '':
config.set_model_file(args.model_file)
config.set_param_file(args.param_file)
else:
config.set_model_dir(args.model_dir)
if platform.machine() in ["x86_64", "x64", "AMD64"]:
platform_place = Place(TargetType.X86, PrecisionType.FP32)
else:
platform_place = Place(TargetType.ARM, PrecisionType.FP32)
if args.backend.upper() in ["ARM"]:
places = [Place(TargetType.ARM, PrecisionType.FP32)]
elif args.backend.upper() in ["X86"]:
places = [Place(TargetType.X86, PrecisionType.FP32)]
elif args.backend.upper() in ["OPENCL", "X86_OPENCL"]:
places = [
Place(TargetType.OpenCL, PrecisionType.FP16,
DataLayoutType.ImageDefault), Place(
TargetType.OpenCL, PrecisionType.FP16,
DataLayoutType.ImageFolder),
Place(TargetType.OpenCL, PrecisionType.FP32, DataLayoutType.NCHW),
Place(TargetType.OpenCL, PrecisionType.Any,
DataLayoutType.ImageDefault), Place(
TargetType.OpenCL, PrecisionType.Any,
DataLayoutType.ImageFolder),
Place(TargetType.OpenCL, PrecisionType.Any, DataLayoutType.NCHW),
platform_place, Place(TargetType.Host, PrecisionType.FP32)
]
'''
Set opencl kernel binary.
Large addtitional prepare time is cost due to algorithm selecting and
building kernel from source code.
Prepare time can be reduced dramitically after building algorithm file
and OpenCL kernel binary on the first running.
The 1st running time will be a bit longer due to the compiling time if
you don't call `set_opencl_binary_path_name` explicitly.
So call `set_opencl_binary_path_name` explicitly is strongly
recommended.
Make sure you have write permission of the binary path.
We strongly recommend each model has a unique binary name.
'''
bin_path = "./"
bin_name = "lite_opencl_kernel.bin"
config.set_opencl_binary_path_name(bin_path, bin_name)
'''
opencl tune option:
CL_TUNE_NONE
CL_TUNE_RAPID
CL_TUNE_NORMAL
CL_TUNE_EXHAUSTIVE
'''
tuned_path = "./"
tuned_name = "lite_opencl_tuned.bin"
config.set_opencl_tune(CLTuneMode.CL_TUNE_NORMAL, tuned_path,
tuned_name, 4)
'''
opencl precision option:
CL_PRECISION_AUTO, first fp16 if valid, default
CL_PRECISION_FP32, force fp32
CL_PRECISION_FP16, force fp16
'''
config.set_opencl_precision(CLPrecisionType.CL_PRECISION_AUTO)
elif args.backend.upper() in ["METAL"]:
# set metallib path
import paddlelite, os
module_path = os.path.dirname(paddlelite.__file__)
config.set_metal_lib_path(module_path + "/libs/lite.metallib")
config.set_metal_use_mps(True)
# set places for Metal
places = [
Place(TargetType.Metal, PrecisionType.FP32,
DataLayoutType.MetalTexture2DArray),
Place(TargetType.Metal, PrecisionType.FP16,
DataLayoutType.MetalTexture2DArray), platform_place,
Place(TargetType.Host, PrecisionType.FP32)
]
elif args.backend.upper() in ["NNADAPTER"]:
places = [
Place(TargetType.NNAdapter, PrecisionType.FP32), platform_place,
Place(TargetType.Host, PrecisionType.FP32)
]
if args.nnadapter_device_names == "":
print(
"Please set nnadapter_device_names when backend = nnadapter!")
return
config.set_nnadapter_device_names(
args.nnadapter_device_names.split(","))
config.set_nnadapter_context_properties(
args.nnadapter_context_properties)
config.set_nnadapter_model_cache_dir(args.nnadapter_model_cache_dir)
config.set_nnadapter_subgraph_partition_config_path(
args.nnadapter_subgraph_partition_config_path)
config.set_nnadapter_mixed_precision_quantization_config_path(
args.nnadapter_mixed_precision_quantization_config_path)
else:
raise ValueError("Unsupported backend: %s." % args.backend)
config.set_valid_places(places)
# 2. Create paddle predictor
predictor = create_paddle_predictor(config)
optimized_model_dir = "opt_" + args.backend
predictor.save_optimized_model(optimized_model_dir)
# 3. Set input data
input_tensor = predictor.get_input(0)
c, h, w = args.input_shape[1], args.input_shape[2], args.input_shape[3]
read_image = len(args.image_path) != 0 and len(args.label_path) != 0
if read_image == True:
import cv2
with open(args.label_path, "r") as f:
label_list = f.readlines()
image_mean = [0.485, 0.456, 0.406]
image_std = [0.229, 0.224, 0.225]
image_data = cv2.imread(args.image_path)
image_data = cv2.resize(image_data, (h, w))
image_data = cv2.cvtColor(image_data, cv2.COLOR_BGR2RGB)
image_data = image_data.transpose((2, 0, 1)) / 255.0
image_data = (image_data - np.array(image_mean).reshape(
(3, 1, 1))) / np.array(image_std).reshape((3, 1, 1))
image_data = image_data.reshape([1, c, h, w]).astype('float32')
input_tensor.from_numpy(image_data)
else:
input_tensor.from_numpy(np.ones((1, c, h, w)).astype("float32"))
# 4. Run model
predictor.run()
# 5. Get output data
output_tensor = predictor.get_output(0)
output_data = output_tensor.numpy()
if args.print_results == True:
print("result data:\n{}".format(output_data))
print("mean:{:.6e}, std:{:.6e}, min:{:.6e}, max:{:.6e}".format(
np.mean(output_data),
np.std(output_data), np.min(output_data), np.max(output_data)))
# 6. Post-process
if read_image == True:
output_data = output_data.flatten()
class_id = np.argmax(output_data)
class_name = label_list[class_id]
score = output_data[class_id]
print("class_name: {} score: {}".format(class_name, score))
if __name__ == '__main__':
args = parser.parse_args()
RunModel(args)
|
[
"numpy.mean",
"numpy.ones",
"argparse.ArgumentParser",
"numpy.argmax",
"numpy.min",
"numpy.max",
"os.path.dirname",
"numpy.array",
"cv2.cvtColor",
"numpy.std",
"platform.machine",
"cv2.resize",
"cv2.imread"
] |
[((873, 898), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (896, 898), False, 'import argparse\n'), ((2870, 2888), 'platform.machine', 'platform.machine', ([], {}), '()\n', (2886, 2888), False, 'import platform\n'), ((7710, 7737), 'cv2.imread', 'cv2.imread', (['args.image_path'], {}), '(args.image_path)\n', (7720, 7737), False, 'import cv2\n'), ((7759, 7789), 'cv2.resize', 'cv2.resize', (['image_data', '(h, w)'], {}), '(image_data, (h, w))\n', (7769, 7789), False, 'import cv2\n'), ((7811, 7854), 'cv2.cvtColor', 'cv2.cvtColor', (['image_data', 'cv2.COLOR_BGR2RGB'], {}), '(image_data, cv2.COLOR_BGR2RGB)\n', (7823, 7854), False, 'import cv2\n'), ((8767, 8789), 'numpy.argmax', 'np.argmax', (['output_data'], {}), '(output_data)\n', (8776, 8789), True, 'import numpy as np\n'), ((8560, 8580), 'numpy.mean', 'np.mean', (['output_data'], {}), '(output_data)\n', (8567, 8580), True, 'import numpy as np\n'), ((8590, 8609), 'numpy.std', 'np.std', (['output_data'], {}), '(output_data)\n', (8596, 8609), True, 'import numpy as np\n'), ((8611, 8630), 'numpy.min', 'np.min', (['output_data'], {}), '(output_data)\n', (8617, 8630), True, 'import numpy as np\n'), ((8632, 8651), 'numpy.max', 'np.max', (['output_data'], {}), '(output_data)\n', (8638, 8651), True, 'import numpy as np\n'), ((5556, 5592), 'os.path.dirname', 'os.path.dirname', (['paddlelite.__file__'], {}), '(paddlelite.__file__)\n', (5571, 5592), False, 'import paddlelite, os\n'), ((8007, 8026), 'numpy.array', 'np.array', (['image_std'], {}), '(image_std)\n', (8015, 8026), True, 'import numpy as np\n'), ((8204, 8225), 'numpy.ones', 'np.ones', (['(1, c, h, w)'], {}), '((1, c, h, w))\n', (8211, 8225), True, 'import numpy as np\n'), ((7951, 7971), 'numpy.array', 'np.array', (['image_mean'], {}), '(image_mean)\n', (7959, 7971), True, 'import numpy as np\n')]
|
""" Waymo dataset with votes.
Author: <NAME>
Date: 2020
"""
import os
import sys
import numpy as np
import pickle
from torch.utils.data import Dataset
import scipy.io as sio # to load .mat files for depth points
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, '..', 'utils'))
from box_util import get_corners_from_labels_array
import pc_util
import waymo_utils
from model_util_waymo import WaymoDatasetConfig
DC = WaymoDatasetConfig() # dataset specific config
MAX_NUM_OBJ = 128 # maximum number of objects allowed per scene
# RAW_LABELS = {0: 'TYPE_UNKNOWN', 1: 'TYPE_VEHICLE' , 2: 'TYPE_PEDESTRIAN', 3: 'TYPE_SIGN', 4: 'TYPE_CYCLIST'}
class WaymoDetectionVotesDataset(Dataset):
def __init__(self, split_set='train', num_points=180000,
use_height=False,
augment=False,
verbose:bool = True):
# self.mapping_labels = {1:0,2:1,4:2} # map dataset labels to our labels to handle discarded classes
# self.excluded_labels = [0,3] # exclude unknowns and signs labels
self.split_set = split_set
self.type2class = {0: 'TYPE_UNKNOWN', 1: 'TYPE_VEHICLE' , 2: 'TYPE_PEDESTRIAN', 3: 'TYPE_SIGN', 4: 'TYPE_CYCLIST'}
self.class2type = {self.type2class[t]:t for t in self.type2class}
self.classes = ['TYPE_VEHICLE'] #, 'TYPE_PEDESTRIAN', 'TYPE_CYCLIST']
self.data_path = os.path.join(BASE_DIR,
'dataset') # TODO: rename to votes data path
# self.raw_data_path = os.path.join(BASE_DIR, 'dataset')
# access segments dictionary list
# load segments_dict_list dictionary
self.segments_dict_list_path = os.path.join(self.data_path, split_set, 'segments_dict_list')
if not os.path.exists(self.segments_dict_list_path):
raise ValueError('segments Dictionary list is not found, make sure to preprocess the data first')
with open(self.segments_dict_list_path, 'rb') as f:
self.segments_dict_list = pickle.load(f)
self.num_segments = len(self.segments_dict_list)
if verbose: print("No of segments in the dataset is {}".format(len(self.segments_dict_list)))
self.num_frames = 0
for segment_dict in self.segments_dict_list:
# add total number of frames in every segment
self.num_frames += segment_dict['frame_count']
# self.scan_names = sorted(list(set([os.path.basename(x).split("_")[1].split('.')[0] for x in os.listdir(os.path.join(self.data_path, 'training', 'votes'))])))
self.num_points = num_points
self.augment = augment
self.use_height = use_height
def __len__(self):
return self.num_frames
def resolve_idx_to_frame_path(self, idx):
''' Get Global idx and transorm into segment frame idx
'''
frame_idx = idx
for segment_dict in self.segments_dict_list:
if frame_idx >= segment_dict['frame_count']:
frame_idx -= segment_dict['frame_count']
else:
frames_list = os.listdir(os.path.join(self.data_path, self.split_set, segment_dict['id']))
frame_path = os.path.join(self.data_path, self.split_set, segment_dict['id'], frames_list[frame_idx])
if not os.path.exists(frame_path):
raise ValueError("Frame path doesn't exist, error in idx_to_frame_path function")
return frame_path
def filtrate_objects(self, labels):
'''
obje_list Nx8 array contains all annotated objects
'''
type_whitelist = [self.class2type[i] for i in self.classes]
# remove unwanted classes
rows_to_be_deleted = []
for i in range(labels.shape[0]):
if not labels[i,0] in type_whitelist:
rows_to_be_deleted.append(i)
labels = np.delete(labels, rows_to_be_deleted, 0)
return labels
def __getitem__(self, idx):
"""
Returns a dict with following keys:
point_clouds: (N,3+C)
center_label: (MAX_NUM_OBJ,3) for GT box center XYZ
heading_class_label: (MAX_NUM_OBJ,) with int values in 0,...,NUM_HEADING_BIN-1
heading_residual_label: (MAX_NUM_OBJ,)
size_classe_label: (MAX_NUM_OBJ,) with int values in 0,...,NUM_SIZE_CLUSTER
size_residual_label: (MAX_NUM_OBJ,3)
sem_cls_label: (MAX_NUM_OBJ,) semantic class index
box_label_mask: (MAX_NUM_OBJ) as 0/1 with 1 indicating a unique box
vote_label: (N,9) with votes XYZ (3 votes: X1Y1Z1, X2Y2Z2, X3Y3Z3)
if there is only one vote than X1==X2==X3 etc.
vote_label_mask: (N,) with 0/1 with 1 indicating the point
is in one of the object's OBB.
scan_idx: int scan index in scan_names list
max_gt_bboxes: unused
"""
frame_data_path = self.resolve_idx_to_frame_path(idx)
segment_id = frame_data_path.split('/')[-2]
frame_idx = frame_data_path.split('/')[-1].split('_')[-1].split('.')[0]
# print('data idx is ', idx)
# print('extracted segment id is ', segment_id)
# print('extracted frame idx is ', frame_idx)
# print("path is ", frame_data_path)
point_cloud = np.load(os.path.join(self.data_path, self.split_set, 'votes', '{}'.format(segment_id), '{}_{}_pc.npz'.format(segment_id, frame_idx)))['pc'] # Nx3
if not os.path.exists(os.path.join(self.data_path, self.split_set, 'votes', '{}'.format(segment_id), '{}_{}_pc.npz'.format(segment_id, frame_idx))):
print('this path does not exist !!')
print(os.path.join(self.data_path, self.split_set, 'votes', '{}'.format(segment_id), '{}_{}_pc.npz'.format(segment_id, frame_idx)))
assert point_cloud.shape[1] == 3
frame_data_path = os.path.join(self.data_path, self.split_set,'{}'.format(segment_id) ,'{}_{}.npz'.format(segment_id, frame_idx))
frame_data = np.load(frame_data_path)
labels = frame_data['labels']
assert labels.shape[1] == 8
# print('labels types before filterations ', labels[:,0])
labels = self.filtrate_objects(labels)
# print('labels types after filterations ', labels[:,0])
# create bboxes matrix
bboxes = np.zeros_like(labels)
for i in range(labels.shape[0]):
# if labels[i,0] in self.excluded_labels: # skip signs and unknown labels
# continue
bboxes[i, 0:3] = labels[i,4:7] #centers
bboxes[i, 3:6] = labels[i,1:4] #lwh
bboxes[i, 6] = labels[i,7] # heading
bboxes[i, 7] = DC.raw2used_labels[labels[i,0]] #label
point_votes = np.load(os.path.join(self.data_path, self.split_set, 'votes', '{}'.format(segment_id) ,'{}_{}_votes.npz'.format(segment_id, frame_idx)))['point_votes'] # Nx10
assert point_votes.shape[1] == 10
point_cloud = point_cloud[:,0:3]
if self.use_height:
floor_height = np.percentile(point_cloud[:,2],0.99)
height = point_cloud[:,2] - floor_height
point_cloud = np.concatenate([point_cloud, np.expand_dims(height, 1)],1) # (N,4)
# ------------------------------- DATA AUGMENTATION ------------------------------
if self.augment:
raise NotImplementedError
# Rotation along up-axis/Z-axis
rot_angle = (np.random.random()*np.pi/3) - np.pi/6 # -30 ~ +30 degree
rot_mat = waymo_utils.rotz(rot_angle)
point_votes_end = np.zeros_like(point_votes)
point_votes_end[:,1:4] = np.dot(point_cloud[:,0:3] + point_votes[:,1:4], np.transpose(rot_mat))
point_votes_end[:,4:7] = np.dot(point_cloud[:,0:3] + point_votes[:,4:7], np.transpose(rot_mat))
point_votes_end[:,7:10] = np.dot(point_cloud[:,0:3] + point_votes[:,7:10], np.transpose(rot_mat))
point_cloud[:,0:3] = np.dot(point_cloud[:,0:3], np.transpose(rot_mat))
bboxes[:,0:3] = np.dot(bboxes[:,0:3], np.transpose(rot_mat))
bboxes[:,6] -= rot_angle
point_votes[:,1:4] = point_votes_end[:,1:4] - point_cloud[:,0:3]
point_votes[:,4:7] = point_votes_end[:,4:7] - point_cloud[:,0:3]
point_votes[:,7:10] = point_votes_end[:,7:10] - point_cloud[:,0:3]
# Augment point cloud scale: 0.85x-1.15x
scale_ratio = np.random.random()*0.3+0.85
scale_ratio = np.expand_dims(np.tile(scale_ratio,3),0)
point_cloud[:,0:3] *= scale_ratio
bboxes[:,0:3] *= scale_ratio
bboxes[:,3:6] *= scale_ratio
point_votes[:,1:4] *= scale_ratio
point_votes[:,4:7] *= scale_ratio
point_votes[:,7:10] *= scale_ratio
if self.use_height:
point_cloud[:,-1] *= scale_ratio[0,0]
# ------------------------------- LABELS ------------------------------
box3d_centers = np.zeros((MAX_NUM_OBJ, 3))
box3d_sizes = np.zeros((MAX_NUM_OBJ, 3))
angle_classes = np.zeros((MAX_NUM_OBJ,))
angle_residuals = np.zeros((MAX_NUM_OBJ,))
size_classes = np.zeros((MAX_NUM_OBJ,))
size_residuals = np.zeros((MAX_NUM_OBJ, 3))
label_mask = np.zeros((MAX_NUM_OBJ))
label_mask[0:bboxes.shape[0]] = 1
max_bboxes = np.zeros((MAX_NUM_OBJ, 8))
max_bboxes[0:bboxes.shape[0],:] = bboxes
for i in range(bboxes.shape[0]):
bbox = bboxes[i]
semantic_class = bbox[7]
box3d_center = bbox[0:3]
angle_class, angle_residual = DC.angle2class(bbox[6])
# NOTE: The mean size stored in size2class is of full length of box edges,
# while in sunrgbd_data.py data dumping we dumped *half* length l,w,h.. so have to time it by 2 here
box3d_size = bbox[3:6]
size_class, size_residual = DC.size2class(box3d_size, DC.class2type[semantic_class])
box3d_centers[i,:] = box3d_center
angle_classes[i] = angle_class
angle_residuals[i] = angle_residual
size_classes[i] = size_class
size_residuals[i] = size_residual
box3d_sizes[i,:] = box3d_size
target_bboxes_mask = label_mask
target_bboxes = np.zeros((MAX_NUM_OBJ, 6))
for i in range(bboxes.shape[0]):
bbox = bboxes[i]
corners_3d = np.transpose(get_corners_from_labels_array(bbox)) # 8 x 3
# import pdb; pdb.set_trace()
# compute axis aligned box
xmin = np.min(corners_3d[:,0])
ymin = np.min(corners_3d[:,1])
zmin = np.min(corners_3d[:,2])
xmax = np.max(corners_3d[:,0])
ymax = np.max(corners_3d[:,1])
zmax = np.max(corners_3d[:,2])
target_bbox = np.array([(xmin+xmax)/2, (ymin+ymax)/2, (zmin+zmax)/2, xmax-xmin, ymax-ymin, zmax-zmin])
target_bboxes[i,:] = target_bbox
point_cloud, choices = pc_util.random_sampling(point_cloud, self.num_points, return_choices=True)
point_votes_mask = point_votes[choices,0]
point_votes = point_votes[choices,1:]
ret_dict = {}
ret_dict['point_clouds'] = point_cloud.astype(np.float32)
ret_dict['center_label'] = target_bboxes.astype(np.float32)[:,0:3]
ret_dict['heading_class_label'] = angle_classes.astype(np.int64)
ret_dict['heading_residual_label'] = angle_residuals.astype(np.float32)
ret_dict['size_class_label'] = size_classes.astype(np.int64)
ret_dict['size_residual_label'] = size_residuals.astype(np.float32)
target_bboxes_semcls = np.zeros((MAX_NUM_OBJ))
target_bboxes_semcls[0:bboxes.shape[0]] = bboxes[:,-1] # from 0 to 4
ret_dict['sem_cls_label'] = target_bboxes_semcls.astype(np.int64)
ret_dict['box_label_mask'] = target_bboxes_mask.astype(np.float32)
ret_dict['vote_label'] = point_votes.astype(np.float32)
ret_dict['vote_label_mask'] = point_votes_mask.astype(np.int64)
# ret_dict['scan_idx'] = np.array(idx).astype(np.int64) # TODO: wrong indicator, add frame name and segment name instead
# ret_dict['max_gt_bboxes'] = max_bboxes #ABAHNASY: not used parameter
return ret_dict
def viz_votes(pc, point_votes, point_votes_mask):
""" Visualize point votes and point votes mask labels
pc: (N,3 or 6), point_votes: (N,9), point_votes_mask: (N,)
"""
inds = (point_votes_mask==1)
pc_obj = pc[inds,0:3]
pc_obj_voted1 = pc_obj + point_votes[inds,0:3]
pc_obj_voted2 = pc_obj + point_votes[inds,3:6]
pc_obj_voted3 = pc_obj + point_votes[inds,6:9]
pc_util.write_ply(pc_obj, 'pc_obj.ply')
pc_util.write_ply(pc_obj_voted1, 'pc_obj_voted1.ply')
pc_util.write_ply(pc_obj_voted2, 'pc_obj_voted2.ply')
pc_util.write_ply(pc_obj_voted3, 'pc_obj_voted3.ply')
def viz_obb(pc, label, mask, angle_classes, angle_residuals,
size_classes, size_residuals):
""" Visualize oriented bounding box ground truth
pc: (N,3)
label: (K,3) K == MAX_NUM_OBJ
mask: (K,)
angle_classes: (K,)
angle_residuals: (K,)
size_classes: (K,)
size_residuals: (K,3)
"""
oriented_boxes = []
K = label.shape[0]
for i in range(K):
if mask[i] == 0: continue
obb = np.zeros(7)
obb[0:3] = label[i,0:3]
heading_angle = DC.class2angle(angle_classes[i], angle_residuals[i])
box_size = DC.class2size(size_classes[i], size_residuals[i])
obb[3:6] = box_size
obb[6] = -1 * heading_angle
print(obb)
oriented_boxes.append(obb)
pc_util.write_oriented_bbox(oriented_boxes, 'gt_obbs.ply')
pc_util.write_ply(label[mask==1,:], 'gt_centroids.ply')
def get_sem_cls_statistics():
""" Compute number of objects for each semantic class """
d = WaymoDetectionVotesDataset(use_height=True, augment=False)
sem_cls_cnt = {}
for i in range(len(d)):
if i%10==0: print(i)
sample = d[i]
pc = sample['point_clouds']
sem_cls = sample['sem_cls_label']
mask = sample['box_label_mask']
for j in sem_cls:
if mask[j] == 0: continue
if sem_cls[j] not in sem_cls_cnt:
sem_cls_cnt[sem_cls[j]] = 0
sem_cls_cnt[sem_cls[j]] += 1
print(sem_cls_cnt)
if __name__=='__main__':
d = WaymoDetectionVotesDataset(use_height=True, augment=False)
# for i in range(len(d)):
sample = d[0]
print(sample['vote_label'].shape, sample['vote_label_mask'].shape)
pc_util.write_ply(sample['point_clouds'], 'pc.ply')
viz_votes(sample['point_clouds'], sample['vote_label'], sample['vote_label_mask'])
viz_obb(sample['point_clouds'], sample['center_label'], sample['box_label_mask'],
sample['heading_class_label'], sample['heading_residual_label'],
sample['size_class_label'], sample['size_residual_label'])
|
[
"numpy.array",
"sys.path.append",
"os.path.exists",
"box_util.get_corners_from_labels_array",
"numpy.random.random",
"numpy.delete",
"numpy.max",
"pc_util.random_sampling",
"numpy.min",
"numpy.tile",
"pc_util.write_oriented_bbox",
"pickle.load",
"model_util_waymo.WaymoDatasetConfig",
"numpy.transpose",
"pc_util.write_ply",
"os.path.join",
"waymo_utils.rotz",
"numpy.zeros",
"numpy.expand_dims",
"os.path.abspath",
"numpy.percentile",
"numpy.load",
"numpy.zeros_like"
] |
[((307, 332), 'sys.path.append', 'sys.path.append', (['BASE_DIR'], {}), '(BASE_DIR)\n', (322, 332), False, 'import sys\n'), ((527, 547), 'model_util_waymo.WaymoDatasetConfig', 'WaymoDatasetConfig', ([], {}), '()\n', (545, 547), False, 'from model_util_waymo import WaymoDatasetConfig\n'), ((241, 266), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (256, 266), False, 'import os\n'), ((349, 386), 'os.path.join', 'os.path.join', (['BASE_DIR', '""".."""', '"""utils"""'], {}), "(BASE_DIR, '..', 'utils')\n", (361, 386), False, 'import os\n'), ((12906, 12945), 'pc_util.write_ply', 'pc_util.write_ply', (['pc_obj', '"""pc_obj.ply"""'], {}), "(pc_obj, 'pc_obj.ply')\n", (12923, 12945), False, 'import pc_util\n'), ((12950, 13003), 'pc_util.write_ply', 'pc_util.write_ply', (['pc_obj_voted1', '"""pc_obj_voted1.ply"""'], {}), "(pc_obj_voted1, 'pc_obj_voted1.ply')\n", (12967, 13003), False, 'import pc_util\n'), ((13008, 13061), 'pc_util.write_ply', 'pc_util.write_ply', (['pc_obj_voted2', '"""pc_obj_voted2.ply"""'], {}), "(pc_obj_voted2, 'pc_obj_voted2.ply')\n", (13025, 13061), False, 'import pc_util\n'), ((13066, 13119), 'pc_util.write_ply', 'pc_util.write_ply', (['pc_obj_voted3', '"""pc_obj_voted3.ply"""'], {}), "(pc_obj_voted3, 'pc_obj_voted3.ply')\n", (13083, 13119), False, 'import pc_util\n'), ((13871, 13929), 'pc_util.write_oriented_bbox', 'pc_util.write_oriented_bbox', (['oriented_boxes', '"""gt_obbs.ply"""'], {}), "(oriented_boxes, 'gt_obbs.ply')\n", (13898, 13929), False, 'import pc_util\n'), ((13934, 13992), 'pc_util.write_ply', 'pc_util.write_ply', (['label[mask == 1, :]', '"""gt_centroids.ply"""'], {}), "(label[mask == 1, :], 'gt_centroids.ply')\n", (13951, 13992), False, 'import pc_util\n'), ((14807, 14858), 'pc_util.write_ply', 'pc_util.write_ply', (["sample['point_clouds']", '"""pc.ply"""'], {}), "(sample['point_clouds'], 'pc.ply')\n", (14824, 14858), False, 'import pc_util\n'), ((1456, 1489), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""dataset"""'], {}), "(BASE_DIR, 'dataset')\n", (1468, 1489), False, 'import os\n'), ((1751, 1812), 'os.path.join', 'os.path.join', (['self.data_path', 'split_set', '"""segments_dict_list"""'], {}), "(self.data_path, split_set, 'segments_dict_list')\n", (1763, 1812), False, 'import os\n'), ((3968, 4008), 'numpy.delete', 'np.delete', (['labels', 'rows_to_be_deleted', '(0)'], {}), '(labels, rows_to_be_deleted, 0)\n', (3977, 4008), True, 'import numpy as np\n'), ((6136, 6160), 'numpy.load', 'np.load', (['frame_data_path'], {}), '(frame_data_path)\n', (6143, 6160), True, 'import numpy as np\n'), ((6469, 6490), 'numpy.zeros_like', 'np.zeros_like', (['labels'], {}), '(labels)\n', (6482, 6490), True, 'import numpy as np\n'), ((9181, 9207), 'numpy.zeros', 'np.zeros', (['(MAX_NUM_OBJ, 3)'], {}), '((MAX_NUM_OBJ, 3))\n', (9189, 9207), True, 'import numpy as np\n'), ((9230, 9256), 'numpy.zeros', 'np.zeros', (['(MAX_NUM_OBJ, 3)'], {}), '((MAX_NUM_OBJ, 3))\n', (9238, 9256), True, 'import numpy as np\n'), ((9281, 9305), 'numpy.zeros', 'np.zeros', (['(MAX_NUM_OBJ,)'], {}), '((MAX_NUM_OBJ,))\n', (9289, 9305), True, 'import numpy as np\n'), ((9332, 9356), 'numpy.zeros', 'np.zeros', (['(MAX_NUM_OBJ,)'], {}), '((MAX_NUM_OBJ,))\n', (9340, 9356), True, 'import numpy as np\n'), ((9380, 9404), 'numpy.zeros', 'np.zeros', (['(MAX_NUM_OBJ,)'], {}), '((MAX_NUM_OBJ,))\n', (9388, 9404), True, 'import numpy as np\n'), ((9430, 9456), 'numpy.zeros', 'np.zeros', (['(MAX_NUM_OBJ, 3)'], {}), '((MAX_NUM_OBJ, 3))\n', (9438, 9456), True, 'import numpy as np\n'), ((9478, 9499), 'numpy.zeros', 'np.zeros', (['MAX_NUM_OBJ'], {}), '(MAX_NUM_OBJ)\n', (9486, 9499), True, 'import numpy as np\n'), ((9565, 9591), 'numpy.zeros', 'np.zeros', (['(MAX_NUM_OBJ, 8)'], {}), '((MAX_NUM_OBJ, 8))\n', (9573, 9591), True, 'import numpy as np\n'), ((10517, 10543), 'numpy.zeros', 'np.zeros', (['(MAX_NUM_OBJ, 6)'], {}), '((MAX_NUM_OBJ, 6))\n', (10525, 10543), True, 'import numpy as np\n'), ((11228, 11302), 'pc_util.random_sampling', 'pc_util.random_sampling', (['point_cloud', 'self.num_points'], {'return_choices': '(True)'}), '(point_cloud, self.num_points, return_choices=True)\n', (11251, 11302), False, 'import pc_util\n'), ((11892, 11913), 'numpy.zeros', 'np.zeros', (['MAX_NUM_OBJ'], {}), '(MAX_NUM_OBJ)\n', (11900, 11913), True, 'import numpy as np\n'), ((13559, 13570), 'numpy.zeros', 'np.zeros', (['(7)'], {}), '(7)\n', (13567, 13570), True, 'import numpy as np\n'), ((1828, 1872), 'os.path.exists', 'os.path.exists', (['self.segments_dict_list_path'], {}), '(self.segments_dict_list_path)\n', (1842, 1872), False, 'import os\n'), ((2082, 2096), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2093, 2096), False, 'import pickle\n'), ((7209, 7247), 'numpy.percentile', 'np.percentile', (['point_cloud[:, 2]', '(0.99)'], {}), '(point_cloud[:, 2], 0.99)\n', (7222, 7247), True, 'import numpy as np\n'), ((7695, 7722), 'waymo_utils.rotz', 'waymo_utils.rotz', (['rot_angle'], {}), '(rot_angle)\n', (7711, 7722), False, 'import waymo_utils\n'), ((7754, 7780), 'numpy.zeros_like', 'np.zeros_like', (['point_votes'], {}), '(point_votes)\n', (7767, 7780), True, 'import numpy as np\n'), ((10797, 10821), 'numpy.min', 'np.min', (['corners_3d[:, 0]'], {}), '(corners_3d[:, 0])\n', (10803, 10821), True, 'import numpy as np\n'), ((10840, 10864), 'numpy.min', 'np.min', (['corners_3d[:, 1]'], {}), '(corners_3d[:, 1])\n', (10846, 10864), True, 'import numpy as np\n'), ((10883, 10907), 'numpy.min', 'np.min', (['corners_3d[:, 2]'], {}), '(corners_3d[:, 2])\n', (10889, 10907), True, 'import numpy as np\n'), ((10926, 10950), 'numpy.max', 'np.max', (['corners_3d[:, 0]'], {}), '(corners_3d[:, 0])\n', (10932, 10950), True, 'import numpy as np\n'), ((10969, 10993), 'numpy.max', 'np.max', (['corners_3d[:, 1]'], {}), '(corners_3d[:, 1])\n', (10975, 10993), True, 'import numpy as np\n'), ((11012, 11036), 'numpy.max', 'np.max', (['corners_3d[:, 2]'], {}), '(corners_3d[:, 2])\n', (11018, 11036), True, 'import numpy as np\n'), ((11062, 11172), 'numpy.array', 'np.array', (['[(xmin + xmax) / 2, (ymin + ymax) / 2, (zmin + zmax) / 2, xmax - xmin, ymax -\n ymin, zmax - zmin]'], {}), '([(xmin + xmax) / 2, (ymin + ymax) / 2, (zmin + zmax) / 2, xmax -\n xmin, ymax - ymin, zmax - zmin])\n', (11070, 11172), True, 'import numpy as np\n'), ((3271, 3363), 'os.path.join', 'os.path.join', (['self.data_path', 'self.split_set', "segment_dict['id']", 'frames_list[frame_idx]'], {}), "(self.data_path, self.split_set, segment_dict['id'],\n frames_list[frame_idx])\n", (3283, 3363), False, 'import os\n'), ((7866, 7887), 'numpy.transpose', 'np.transpose', (['rot_mat'], {}), '(rot_mat)\n', (7878, 7887), True, 'import numpy as np\n'), ((7974, 7995), 'numpy.transpose', 'np.transpose', (['rot_mat'], {}), '(rot_mat)\n', (7986, 7995), True, 'import numpy as np\n'), ((8084, 8105), 'numpy.transpose', 'np.transpose', (['rot_mat'], {}), '(rot_mat)\n', (8096, 8105), True, 'import numpy as np\n'), ((8168, 8189), 'numpy.transpose', 'np.transpose', (['rot_mat'], {}), '(rot_mat)\n', (8180, 8189), True, 'import numpy as np\n'), ((8241, 8262), 'numpy.transpose', 'np.transpose', (['rot_mat'], {}), '(rot_mat)\n', (8253, 8262), True, 'import numpy as np\n'), ((8697, 8720), 'numpy.tile', 'np.tile', (['scale_ratio', '(3)'], {}), '(scale_ratio, 3)\n', (8704, 8720), True, 'import numpy as np\n'), ((10652, 10687), 'box_util.get_corners_from_labels_array', 'get_corners_from_labels_array', (['bbox'], {}), '(bbox)\n', (10681, 10687), False, 'from box_util import get_corners_from_labels_array\n'), ((3176, 3240), 'os.path.join', 'os.path.join', (['self.data_path', 'self.split_set', "segment_dict['id']"], {}), "(self.data_path, self.split_set, segment_dict['id'])\n", (3188, 3240), False, 'import os\n'), ((3383, 3409), 'os.path.exists', 'os.path.exists', (['frame_path'], {}), '(frame_path)\n', (3397, 3409), False, 'import os\n'), ((7354, 7379), 'numpy.expand_dims', 'np.expand_dims', (['height', '(1)'], {}), '(height, 1)\n', (7368, 7379), True, 'import numpy as np\n'), ((8628, 8646), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (8644, 8646), True, 'import numpy as np\n'), ((7616, 7634), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (7632, 7634), True, 'import numpy as np\n')]
|
import numpy as np
class Agent:
def __init__(self):
self.q_table = np.zeros(shape=(3, ))
self.rewards = []
self.averaged_rewards = []
self.total_rewards = 0
self.action_cursor = 1
class HystereticAgentMatrix:
def __init__(self, environment, increasing_learning_rate=0.9, decreasing_learning_rate=0.1,
discount_factor=0.9, exploration_rate=0.01):
self.environment = environment
self.discount_factor = discount_factor
self.exploration_rate = exploration_rate
self.increasing_learning_rate = increasing_learning_rate
self.decreasing_learning_rate = decreasing_learning_rate
# Setup q_table
self.num_of_action = self.environment.actions.n
self.states_dim_x = self.environment.states.dim_x
self.states_dim_y = self.environment.states.dim_y
# Agents
self.num_of_agents = 2
self.agents = []
for i in range(self.num_of_agents):
self.agents.append(Agent())
self.steps = 1
def step(self):
actions = []
for agent in self.agents:
# Determine Actions
action = self.get_action(agent)
actions.append(action)
# Take action and update
for agent in self.agents:
# Previous State capture (Previous q value, previous position)
q_p = agent.q_table[agent.action_cursor]
# Take action
obs, reward, done, valid = self.environment.step(action=actions, agent_id=0)
# Update Q-table
bellman_value = reward + self.discount_factor * (np.max(agent.q_table[agent.action_cursor]) - q_p)
if bellman_value >= 0:
new_q = q_p + self.increasing_learning_rate * bellman_value
else:
new_q = q_p + self.decreasing_learning_rate * bellman_value
agent.q_table[agent.action_cursor] = new_q
# self.exploration_rate = self.exploration_rate / self.steps
agent.total_rewards += reward
agent.rewards.append(reward)
if self.steps > 1:
agent.averaged_rewards.append(agent.total_rewards / (self.steps + 5))
self.steps += 1
def set_exploration_rate(self, rate):
self.exploration_rate = rate
def get_action(self, agent):
if np.random.randint(0, 100) / 100 < self.exploration_rate:
# Explore
action = np.random.randint(0, self.num_of_action)
else:
action = np.argmax(agent.q_table)
agent.action_cursor = action
return action
def get_averaged_rewards(self, agent_id=0):
return self.agents[agent_id].averaged_rewards, self.agents[agent_id + 1].averaged_rewards
def get_rewards(self):
return self.agents[0].rewards, self.agents[1].rewards
def reset_reward(self):
for agent in self.agents:
agent.rewards = []
agent.averaged_rewards = []
|
[
"numpy.zeros",
"numpy.argmax",
"numpy.random.randint",
"numpy.max"
] |
[((80, 100), 'numpy.zeros', 'np.zeros', ([], {'shape': '(3,)'}), '(shape=(3,))\n', (88, 100), True, 'import numpy as np\n'), ((2484, 2524), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.num_of_action'], {}), '(0, self.num_of_action)\n', (2501, 2524), True, 'import numpy as np\n'), ((2560, 2584), 'numpy.argmax', 'np.argmax', (['agent.q_table'], {}), '(agent.q_table)\n', (2569, 2584), True, 'import numpy as np\n'), ((2384, 2409), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (2401, 2409), True, 'import numpy as np\n'), ((1646, 1688), 'numpy.max', 'np.max', (['agent.q_table[agent.action_cursor]'], {}), '(agent.q_table[agent.action_cursor])\n', (1652, 1688), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# Copyright 2020 The PsiZ Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Module of TensorFlow kernel layers.
Classes:
GroupAttention: A simple group-specific attention layer.
Kernel: A kernel that allows the user to separately specify a
distance and similarity function.
AttentionKernel: A kernel that uses group-specific attention
weights and allows the user to separately specify a distance
and similarity function.
GroupAttentionVariational: A variational group attention layer.
"""
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import backend as K
import psiz.keras.constraints as pk_constraints
import psiz.keras.initializers as pk_initializers
from psiz.keras.layers.variational import Variational
from psiz.keras.layers.distances.minkowski import WeightedMinkowski
from psiz.models.base import GroupLevel
@tf.keras.utils.register_keras_serializable(
package='psiz.keras.layers', name='GroupAttention'
)
class GroupAttention(tf.keras.layers.Layer):
"""Group-specific attention weights."""
def __init__(
self, n_group=1, n_dim=None, fit_group=None,
embeddings_initializer=None, embeddings_regularizer=None,
embeddings_constraint=None, **kwargs):
"""Initialize.
Arguments:
n_dim: An integer indicating the dimensionality of the
embeddings. Must be equal to or greater than one.
n_group (optional): An integer indicating the number of
different population groups in the embedding. A
separate set of attention weights will be inferred for
each group. Must be equal to or greater than one.
fit_group: Boolean indicating if variable is trainable.
shape=(n_group,)
Raises:
ValueError: If `n_dim` or `n_group` arguments are invalid.
"""
super(GroupAttention, self).__init__(**kwargs)
if (n_group < 1):
raise ValueError(
"The number of groups (`n_group`) must be an integer greater "
"than 0."
)
self.n_group = n_group
if (n_dim < 1):
raise ValueError(
"The dimensionality (`n_dim`) must be an integer "
"greater than 0."
)
self.n_dim = n_dim
# Handle initializer.
if embeddings_initializer is None:
if self.n_group == 1:
embeddings_initializer = tf.keras.initializers.Ones()
else:
scale = self.n_dim
alpha = np.ones((self.n_dim))
embeddings_initializer = pk_initializers.RandomAttention(
alpha, scale
)
self.embeddings_initializer = tf.keras.initializers.get(
embeddings_initializer
)
# Handle regularizer.
self.embeddings_regularizer = tf.keras.regularizers.get(
embeddings_regularizer
)
# Handle constraints.
if embeddings_constraint is None:
embeddings_constraint = pk_constraints.NonNegNorm(
scale=self.n_dim
)
self.embeddings_constraint = tf.keras.constraints.get(
embeddings_constraint
)
if fit_group is None:
if self.n_group == 1:
fit_group = False # TODO default should always be train
else:
fit_group = True
self.fit_group = fit_group
self.embeddings = self.add_weight(
shape=(self.n_group, self.n_dim),
initializer=self.embeddings_initializer,
trainable=fit_group, name='w', dtype=K.floatx(),
regularizer=self.embeddings_regularizer,
constraint=self.embeddings_constraint
)
self.mask_zero = False
def call(self, inputs):
"""Call.
Inflate weights by `group_id`.
Arguments:
inputs: A Tensor denoting `group_id`.
"""
output = tf.gather(self.embeddings, inputs)
# Add singleton dimension for sample_size.
output = tf.expand_dims(output, axis=0)
return output
def get_config(self):
"""Return layer configuration."""
config = super().get_config()
config.update({
'n_group': int(self.n_group),
'n_dim': int(self.n_dim),
'fit_group': self.fit_group,
'embeddings_initializer':
tf.keras.initializers.serialize(self.embeddings_initializer),
'embeddings_regularizer':
tf.keras.regularizers.serialize(self.embeddings_regularizer),
'embeddings_constraint':
tf.keras.constraints.serialize(self.embeddings_constraint)
})
return config
@tf.keras.utils.register_keras_serializable(
package='psiz.keras.layers', name='Kernel'
)
class Kernel(GroupLevel):
"""A basic population-wide kernel."""
def __init__(self, distance=None, similarity=None, **kwargs):
"""Initialize."""
super(Kernel, self).__init__(**kwargs)
if distance is None:
distance = WeightedMinkowski()
self.distance = distance
if similarity is None:
similarity = ExponentialSimilarity()
self.similarity = similarity
# Gather all pointers to theta-associated variables.
theta = self.distance.theta
theta.update(self.similarity.theta)
self.theta = theta
self._n_sample = ()
self._kl_weight = 0
@property
def n_sample(self):
return self._n_sample
@n_sample.setter
def n_sample(self, n_sample):
self._n_sample = n_sample
self.distance.n_sample = n_sample
self.similarity.n_sample = n_sample
@property
def kl_weight(self):
return self._kl_weight
@kl_weight.setter
def kl_weight(self, kl_weight):
self._kl_weight = kl_weight
# Set kl_weight of constituent layers. # TODO MAYBE use `_layers`?
self.distance.kl_weight = kl_weight
self.similarity.kl_weight = kl_weight
def call(self, inputs):
"""Call.
Compute k(z_0, z_1), where `k` is the similarity kernel.
Note: Broadcasting rules are used to compute similarity between
`z_0` and `z_1`.
Arguments:
inputs:
z_0: A tf.Tensor denoting a set of vectors.
shape = (batch_size, [n, m, ...] n_dim)
z_1: A tf.Tensor denoting a set of vectors.
shape = (batch_size, [n, m, ...] n_dim)
"""
z_0 = inputs[0]
z_1 = inputs[1]
# group = inputs[-1][:, self.group_level]
# Create identity attention weights.
attention = tf.ones_like(z_0)
# Compute distance between query and references.
dist_qr = self.distance([z_0, z_1, attention])
# Compute similarity.
sim_qr = self.similarity(dist_qr)
return sim_qr
def get_config(self):
"""Return layer configuration."""
config = super().get_config()
config.update({
'distance': tf.keras.utils.serialize_keras_object(self.distance),
'similarity': tf.keras.utils.serialize_keras_object(
self.similarity
),
})
return config
@classmethod
def from_config(cls, config):
"""Create from configuration."""
config['distance'] = tf.keras.layers.deserialize(config['distance'])
config['similarity'] = tf.keras.layers.deserialize(
config['similarity']
)
return cls(**config)
@tf.keras.utils.register_keras_serializable(
package='psiz.keras.layers', name='AttentionKernel'
)
class AttentionKernel(GroupLevel):
"""Attention kernel container."""
def __init__(
self, n_dim=None, attention=None, distance=None, similarity=None,
**kwargs):
"""Initialize.
Arguments:
n_dim: The dimensionality of the attention weights. This
should match the dimensionality of the embedding.
attention: A attention layer. If this is specified, the
argument `n_dim` is ignored.
distance: A distance layer.
similarity: A similarity layer.
"""
super(AttentionKernel, self).__init__(**kwargs)
if attention is None:
attention = GroupAttention(n_dim=n_dim, n_group=1)
self.attention = attention
if distance is None:
distance = WeightedMinkowski()
self.distance = distance
if similarity is None:
similarity = ExponentialSimilarity()
self.similarity = similarity
# Gather all pointers to theta-associated variables.
theta = self.distance.theta
theta.update(self.similarity.theta)
self.theta = theta
self._n_sample = ()
self._kl_weight = 0
def call(self, inputs):
"""Call.
Compute k(z_0, z_1), where `k` is the similarity kernel.
Note: Broadcasting rules are used to compute similarity between
`z_0` and `z_1`.
Arguments:
inputs:
z_0: A tf.Tensor denoting a set of vectors.
shape = (batch_size, [n, m, ...] n_dim)
z_1: A tf.Tensor denoting a set of vectors.
shape = (batch_size, [n, m, ...] n_dim)
group: A tf.Tensor denoting group assignments.
shape = (batch_size, k)
"""
z_0 = inputs[0]
z_1 = inputs[1]
group = inputs[-1]
# Expand attention weights.
attention = self.attention(group[:, self.group_level])
# Add singleton inner dimensions that are not related to sample_size,
# batch_size or vector dimensionality.
attention_shape = tf.shape(attention)
sample_size = tf.expand_dims(attention_shape[0], axis=0)
batch_size = tf.expand_dims(attention_shape[1], axis=0)
dim_size = tf.expand_dims(attention_shape[-1], axis=0)
n_expand = tf.rank(z_0) - tf.rank(attention)
shape_exp = tf.ones(n_expand, dtype=attention_shape[0].dtype)
shape_exp = tf.concat(
(sample_size, batch_size, shape_exp, dim_size), axis=0
)
attention = tf.reshape(attention, shape_exp)
# Compute distance between query and references.
dist_qr = self.distance([z_0, z_1, attention])
# Compute similarity.
sim_qr = self.similarity(dist_qr)
return sim_qr
# @property
# def n_dim(self):
# """Getter method for n_dim."""
# return self.attention.n_dim
@property
def n_sample(self):
return self._n_sample
@n_sample.setter
def n_sample(self, n_sample):
self._n_sample = n_sample
self.attention.n_sample = n_sample
self.distance.n_sample = n_sample
self.similarity.n_sample = n_sample
@property
def kl_weight(self):
return self._kl_weight
@kl_weight.setter
def kl_weight(self, kl_weight):
self._kl_weight = kl_weight
# Set kl_weight of constituent layers. # TODO MAYBE use `_layers`?
self.attention.kl_weight = kl_weight
self.distance.kl_weight = kl_weight
self.similarity.kl_weight = kl_weight
def get_config(self):
"""Return layer configuration."""
config = super().get_config()
config.update({
# 'n_dim': int(self.n_dim),
'attention': tf.keras.utils.serialize_keras_object(self.attention),
'distance': tf.keras.utils.serialize_keras_object(self.distance),
'similarity': tf.keras.utils.serialize_keras_object(
self.similarity
),
})
return config
@classmethod
def from_config(cls, config):
"""Create from configuration."""
config['attention'] = tf.keras.layers.deserialize(config['attention'])
config['distance'] = tf.keras.layers.deserialize(config['distance'])
config['similarity'] = tf.keras.layers.deserialize(
config['similarity']
)
return cls(**config)
@tf.keras.utils.register_keras_serializable(
package='psiz.keras.layers', name='GroupAttentionVariational'
)
class GroupAttentionVariational(Variational):
"""Variational analog of group-specific attention weights."""
def __init__(self, **kwargs):
"""Initialize.
Arguments:
kwargs: Additional key-word arguments.
"""
super(GroupAttentionVariational, self).__init__(**kwargs)
def call(self, inputs):
"""Call.
Grab `group_id` only.
Arguments:
inputs: A Tensor denoting a trial's group membership.
"""
# Run forward pass through variational posterior layer.
outputs = self.posterior(inputs)
# Apply KL divergence between posterior and prior.
self.add_kl_loss(self.posterior.embeddings, self.prior.embeddings)
return outputs
@property
def n_group(self):
"""Getter method for `n_group`"""
# TODO need better decoupling, not all distributions will have loc.
return self.posterior.embeddings.distribution.loc.shape[0]
@property
def n_dim(self):
"""Getter method for `n_group`"""
# TODO need better decoupling, not all distributions will have loc.
return self.posterior.embeddings.distribution.loc.shape[1]
@property
def mask_zero(self):
"""Getter method for embeddings `mask_zero`."""
return self.posterior.mask_zero
@property
def embeddings(self):
"""Getter method for embeddings posterior mode."""
return self.posterior.embeddings
|
[
"tensorflow.shape",
"psiz.keras.initializers.RandomAttention",
"tensorflow.keras.utils.serialize_keras_object",
"tensorflow.keras.initializers.Ones",
"tensorflow.ones_like",
"tensorflow.keras.initializers.serialize",
"tensorflow.rank",
"tensorflow.concat",
"tensorflow.keras.regularizers.get",
"tensorflow.keras.constraints.serialize",
"psiz.keras.layers.distances.minkowski.WeightedMinkowski",
"numpy.ones",
"tensorflow.keras.utils.register_keras_serializable",
"tensorflow.gather",
"tensorflow.python.keras.backend.floatx",
"tensorflow.reshape",
"psiz.keras.constraints.NonNegNorm",
"tensorflow.expand_dims",
"tensorflow.keras.constraints.get",
"tensorflow.keras.regularizers.serialize",
"tensorflow.ones",
"tensorflow.keras.initializers.get",
"tensorflow.keras.layers.deserialize"
] |
[((1520, 1618), 'tensorflow.keras.utils.register_keras_serializable', 'tf.keras.utils.register_keras_serializable', ([], {'package': '"""psiz.keras.layers"""', 'name': '"""GroupAttention"""'}), "(package='psiz.keras.layers',\n name='GroupAttention')\n", (1562, 1618), True, 'import tensorflow as tf\n'), ((5502, 5592), 'tensorflow.keras.utils.register_keras_serializable', 'tf.keras.utils.register_keras_serializable', ([], {'package': '"""psiz.keras.layers"""', 'name': '"""Kernel"""'}), "(package='psiz.keras.layers',\n name='Kernel')\n", (5544, 5592), True, 'import tensorflow as tf\n'), ((8384, 8483), 'tensorflow.keras.utils.register_keras_serializable', 'tf.keras.utils.register_keras_serializable', ([], {'package': '"""psiz.keras.layers"""', 'name': '"""AttentionKernel"""'}), "(package='psiz.keras.layers',\n name='AttentionKernel')\n", (8426, 8483), True, 'import tensorflow as tf\n'), ((12988, 13097), 'tensorflow.keras.utils.register_keras_serializable', 'tf.keras.utils.register_keras_serializable', ([], {'package': '"""psiz.keras.layers"""', 'name': '"""GroupAttentionVariational"""'}), "(package='psiz.keras.layers',\n name='GroupAttentionVariational')\n", (13030, 13097), True, 'import tensorflow as tf\n'), ((3453, 3502), 'tensorflow.keras.initializers.get', 'tf.keras.initializers.get', (['embeddings_initializer'], {}), '(embeddings_initializer)\n', (3478, 3502), True, 'import tensorflow as tf\n'), ((3594, 3643), 'tensorflow.keras.regularizers.get', 'tf.keras.regularizers.get', (['embeddings_regularizer'], {}), '(embeddings_regularizer)\n', (3619, 3643), True, 'import tensorflow as tf\n'), ((3886, 3933), 'tensorflow.keras.constraints.get', 'tf.keras.constraints.get', (['embeddings_constraint'], {}), '(embeddings_constraint)\n', (3910, 3933), True, 'import tensorflow as tf\n'), ((4714, 4748), 'tensorflow.gather', 'tf.gather', (['self.embeddings', 'inputs'], {}), '(self.embeddings, inputs)\n', (4723, 4748), True, 'import tensorflow as tf\n'), ((4817, 4847), 'tensorflow.expand_dims', 'tf.expand_dims', (['output'], {'axis': '(0)'}), '(output, axis=0)\n', (4831, 4847), True, 'import tensorflow as tf\n'), ((7500, 7517), 'tensorflow.ones_like', 'tf.ones_like', (['z_0'], {}), '(z_0)\n', (7512, 7517), True, 'import tensorflow as tf\n'), ((8201, 8248), 'tensorflow.keras.layers.deserialize', 'tf.keras.layers.deserialize', (["config['distance']"], {}), "(config['distance'])\n", (8228, 8248), True, 'import tensorflow as tf\n'), ((8280, 8329), 'tensorflow.keras.layers.deserialize', 'tf.keras.layers.deserialize', (["config['similarity']"], {}), "(config['similarity'])\n", (8307, 8329), True, 'import tensorflow as tf\n'), ((10643, 10662), 'tensorflow.shape', 'tf.shape', (['attention'], {}), '(attention)\n', (10651, 10662), True, 'import tensorflow as tf\n'), ((10685, 10727), 'tensorflow.expand_dims', 'tf.expand_dims', (['attention_shape[0]'], {'axis': '(0)'}), '(attention_shape[0], axis=0)\n', (10699, 10727), True, 'import tensorflow as tf\n'), ((10749, 10791), 'tensorflow.expand_dims', 'tf.expand_dims', (['attention_shape[1]'], {'axis': '(0)'}), '(attention_shape[1], axis=0)\n', (10763, 10791), True, 'import tensorflow as tf\n'), ((10811, 10854), 'tensorflow.expand_dims', 'tf.expand_dims', (['attention_shape[-1]'], {'axis': '(0)'}), '(attention_shape[-1], axis=0)\n', (10825, 10854), True, 'import tensorflow as tf\n'), ((10929, 10978), 'tensorflow.ones', 'tf.ones', (['n_expand'], {'dtype': 'attention_shape[0].dtype'}), '(n_expand, dtype=attention_shape[0].dtype)\n', (10936, 10978), True, 'import tensorflow as tf\n'), ((10999, 11064), 'tensorflow.concat', 'tf.concat', (['(sample_size, batch_size, shape_exp, dim_size)'], {'axis': '(0)'}), '((sample_size, batch_size, shape_exp, dim_size), axis=0)\n', (11008, 11064), True, 'import tensorflow as tf\n'), ((11107, 11139), 'tensorflow.reshape', 'tf.reshape', (['attention', 'shape_exp'], {}), '(attention, shape_exp)\n', (11117, 11139), True, 'import tensorflow as tf\n'), ((12727, 12775), 'tensorflow.keras.layers.deserialize', 'tf.keras.layers.deserialize', (["config['attention']"], {}), "(config['attention'])\n", (12754, 12775), True, 'import tensorflow as tf\n'), ((12805, 12852), 'tensorflow.keras.layers.deserialize', 'tf.keras.layers.deserialize', (["config['distance']"], {}), "(config['distance'])\n", (12832, 12852), True, 'import tensorflow as tf\n'), ((12884, 12933), 'tensorflow.keras.layers.deserialize', 'tf.keras.layers.deserialize', (["config['similarity']"], {}), "(config['similarity'])\n", (12911, 12933), True, 'import tensorflow as tf\n'), ((3775, 3818), 'psiz.keras.constraints.NonNegNorm', 'pk_constraints.NonNegNorm', ([], {'scale': 'self.n_dim'}), '(scale=self.n_dim)\n', (3800, 3818), True, 'import psiz.keras.constraints as pk_constraints\n'), ((5856, 5875), 'psiz.keras.layers.distances.minkowski.WeightedMinkowski', 'WeightedMinkowski', ([], {}), '()\n', (5873, 5875), False, 'from psiz.keras.layers.distances.minkowski import WeightedMinkowski\n'), ((9305, 9324), 'psiz.keras.layers.distances.minkowski.WeightedMinkowski', 'WeightedMinkowski', ([], {}), '()\n', (9322, 9324), False, 'from psiz.keras.layers.distances.minkowski import WeightedMinkowski\n'), ((10875, 10887), 'tensorflow.rank', 'tf.rank', (['z_0'], {}), '(z_0)\n', (10882, 10887), True, 'import tensorflow as tf\n'), ((10890, 10908), 'tensorflow.rank', 'tf.rank', (['attention'], {}), '(attention)\n', (10897, 10908), True, 'import tensorflow as tf\n'), ((3162, 3190), 'tensorflow.keras.initializers.Ones', 'tf.keras.initializers.Ones', ([], {}), '()\n', (3188, 3190), True, 'import tensorflow as tf\n'), ((3268, 3287), 'numpy.ones', 'np.ones', (['self.n_dim'], {}), '(self.n_dim)\n', (3275, 3287), True, 'import numpy as np\n'), ((3331, 3376), 'psiz.keras.initializers.RandomAttention', 'pk_initializers.RandomAttention', (['alpha', 'scale'], {}), '(alpha, scale)\n', (3362, 3376), True, 'import psiz.keras.initializers as pk_initializers\n'), ((4372, 4382), 'tensorflow.python.keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (4380, 4382), True, 'from tensorflow.python.keras import backend as K\n'), ((5176, 5236), 'tensorflow.keras.initializers.serialize', 'tf.keras.initializers.serialize', (['self.embeddings_initializer'], {}), '(self.embeddings_initializer)\n', (5207, 5236), True, 'import tensorflow as tf\n'), ((5292, 5352), 'tensorflow.keras.regularizers.serialize', 'tf.keras.regularizers.serialize', (['self.embeddings_regularizer'], {}), '(self.embeddings_regularizer)\n', (5323, 5352), True, 'import tensorflow as tf\n'), ((5407, 5465), 'tensorflow.keras.constraints.serialize', 'tf.keras.constraints.serialize', (['self.embeddings_constraint'], {}), '(self.embeddings_constraint)\n', (5437, 5465), True, 'import tensorflow as tf\n'), ((7880, 7932), 'tensorflow.keras.utils.serialize_keras_object', 'tf.keras.utils.serialize_keras_object', (['self.distance'], {}), '(self.distance)\n', (7917, 7932), True, 'import tensorflow as tf\n'), ((7960, 8014), 'tensorflow.keras.utils.serialize_keras_object', 'tf.keras.utils.serialize_keras_object', (['self.similarity'], {}), '(self.similarity)\n', (7997, 8014), True, 'import tensorflow as tf\n'), ((12326, 12379), 'tensorflow.keras.utils.serialize_keras_object', 'tf.keras.utils.serialize_keras_object', (['self.attention'], {}), '(self.attention)\n', (12363, 12379), True, 'import tensorflow as tf\n'), ((12405, 12457), 'tensorflow.keras.utils.serialize_keras_object', 'tf.keras.utils.serialize_keras_object', (['self.distance'], {}), '(self.distance)\n', (12442, 12457), True, 'import tensorflow as tf\n'), ((12485, 12539), 'tensorflow.keras.utils.serialize_keras_object', 'tf.keras.utils.serialize_keras_object', (['self.similarity'], {}), '(self.similarity)\n', (12522, 12539), True, 'import tensorflow as tf\n')]
|
import os.path as osp
import numpy as np
import math
from tqdm import tqdm
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.utils.data
from torchvision import transforms, datasets
from ofa.utils import AverageMeter, accuracy
from ofa.model_zoo import ofa_specialized
from ofa.imagenet_classification.elastic_nn.utils import set_running_statistics
import copy
import random
def evaluate_ofa_resnet_subnet(ofa_net, path, net_config, data_loader, batch_size, device='cuda:0'):
assert 'w' in net_config and 'd' in net_config and 'e' in net_config
assert len(net_config['w']) == 6 and len(net_config['e']) == 18 and len(net_config['d']) == 5
ofa_net.set_active_subnet(w=net_config['w'], d=net_config['d'], e=net_config['e'])
subnet = ofa_net.get_active_subnet().to(device)
calib_bn(subnet, path, 224, batch_size)
top1 = validate(subnet, path, 224, data_loader, batch_size, device)
return top1
def evaluate_ofa_resnet_ensemble_subnet(ofa_net, path, net_config1, net_config2, data_loader, batch_size, device='cuda:0'):
assert 'w' in net_config1 and 'd' in net_config1 and 'e' in net_config1
assert len(net_config1['w']) == 6 and len(net_config1['e']) == 18 and len(net_config1['d']) == 5
ofa_net.set_active_subnet(w=net_config1['w'], d=net_config1['d'], e=net_config1['e'])
subnet1 = ofa_net.get_active_subnet().to(device)
calib_bn(subnet1, path, 224, batch_size)
ofa_net.set_active_subnet(w=net_config2['w'], d=net_config2['d'], e=net_config2['e'])
subnet2 = ofa_net.get_active_subnet().to(device)
calib_bn(subnet2, path, 224, batch_size)
# assert net_config2['r'][0]==net_config1['r'][0]
subnets = []
subnets.append(subnet2)
subnets.append(subnet1)
top1 = ensemble_validate(subnets, path, 224, data_loader, batch_size, device)
return top1
def evaluate_ofa_subnet(ofa_net, path, net_config, data_loader, batch_size, device='cuda:0'):
assert 'ks' in net_config and 'd' in net_config and 'e' in net_config
assert len(net_config['ks']) == 20 and len(net_config['e']) == 20 and len(net_config['d']) == 5
ofa_net.set_active_subnet(ks=net_config['ks'], d=net_config['d'], e=net_config['e'])
subnet = ofa_net.get_active_subnet().to(device)
calib_bn(subnet, path, net_config['r'][0], batch_size)
top1 = validate(subnet, path, net_config['r'][0], data_loader, batch_size, device)
return top1
def evaluate_ofa_ensemble_subnet(ofa_net, path, net_config1, net_config2, data_loader, batch_size, device='cuda:0'):
assert 'ks' in net_config1 and 'd' in net_config1 and 'e' in net_config1
assert len(net_config1['ks']) == 20 and len(net_config1['e']) == 20 and len(net_config1['d']) == 5
ofa_net.set_active_subnet(ks=net_config1['ks'], d=net_config1['d'], e=net_config1['e'])
subnet1 = ofa_net.get_active_subnet().to(device)
calib_bn(subnet1, path, net_config1['r'][0], batch_size)
ofa_net.set_active_subnet(ks=net_config2['ks'], d=net_config2['d'], e=net_config2['e'])
subnet2 = ofa_net.get_active_subnet().to(device)
calib_bn(subnet2, path, net_config2['r'][0], batch_size)
assert net_config2['r'][0]==net_config1['r'][0]
subnets = []
subnets.append(subnet2)
subnets.append(subnet1)
top1 = ensemble_validate(subnets, path, net_config2['r'][0], data_loader, batch_size, device)
return top1
def calib_bn(net, path, image_size, batch_size, num_images=2000):
# print('Creating dataloader for resetting BN running statistics...')
dataset = datasets.ImageFolder(
osp.join(
path,
'train'),
transforms.Compose([
transforms.RandomResizedCrop(image_size),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(brightness=32. / 255., saturation=0.5),
transforms.ToTensor(),
transforms.Normalize(
mean=[
0.485,
0.456,
0.406],
std=[
0.229,
0.224,
0.225]
),
])
)
chosen_indexes = np.random.choice(list(range(len(dataset))), num_images)
sub_sampler = torch.utils.data.sampler.SubsetRandomSampler(chosen_indexes)
data_loader = torch.utils.data.DataLoader(
dataset,
sampler=sub_sampler,
batch_size=batch_size,
num_workers=16,
pin_memory=True,
drop_last=False,
)
# print('Resetting BN running statistics (this may take 10-20 seconds)...')
set_running_statistics(net, data_loader)
def ensemble_validate(nets, path, image_size, data_loader, batch_size=100, device='cuda:0'):
if 'cuda' in device:
print('use cuda')
for net in nets:
net = torch.nn.DataParallel(net).to(device)
else:
for net in nets:
net = net.to(device)
data_loader.dataset.transform = transforms.Compose([
transforms.Resize(int(math.ceil(image_size / 0.875))),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
),
])
cudnn.benchmark = True
criterion = nn.CrossEntropyLoss().to(device)
for net in nets:
net.eval()
net = net.to(device)
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
with torch.no_grad():
with tqdm(total=len(data_loader), desc='Validate') as t:
for i, (images, labels) in enumerate(data_loader):
images, labels = images.to(device), labels.to(device)
# compute output
n = len(nets)
output = 0
for i, net in enumerate(nets):
if i == 0:
output =net(images)
else:
output+=net(images)
output = output/n
loss = criterion(output, labels)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, labels, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0].item(), images.size(0))
top5.update(acc5[0].item(), images.size(0))
t.set_postfix({
'loss': losses.avg,
'top1': top1.avg,
'top5': top5.avg,
'img_size': images.size(2),
})
t.update(1)
print('Results: loss=%.5f,\t top1=%.3f,\t top5=%.1f' % (losses.avg, top1.avg, top5.avg))
return top1.avg
def validate(net, path, image_size, data_loader, batch_size=100, device='cuda:0'):
if 'cuda' in device:
net = torch.nn.DataParallel(net).to(device)
else:
net = net.to(device)
data_loader.dataset.transform = transforms.Compose([
transforms.Resize(int(math.ceil(image_size / 0.875))),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
),
])
cudnn.benchmark = True
criterion = nn.CrossEntropyLoss().to(device)
net.eval()
net = net.to(device)
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
with torch.no_grad():
with tqdm(total=len(data_loader), desc='Validate') as t:
for i, (images, labels) in enumerate(data_loader):
images, labels = images.to(device), labels.to(device)
# compute output
output = net(images)
loss = criterion(output, labels)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, labels, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0].item(), images.size(0))
top5.update(acc5[0].item(), images.size(0))
t.set_postfix({
'loss': losses.avg,
'top1': top1.avg,
'top5': top5.avg,
'img_size': images.size(2),
})
t.update(1)
print('Results: loss=%.5f,\t top1=%.1f,\t top5=%.1f' % (losses.avg, top1.avg, top5.avg))
return top1.avg
def evaluate_ofa_specialized(path, data_loader, batch_size=100, device='cuda:0', ensemble=False):
def select_platform_name():
valid_platform_name = [
'pixel1', 'pixel2', 'note10', 'note8', 's7edge', 'lg-g8', '1080ti', 'v100', 'tx2', 'cpu', 'flops'
]
print("Please select a hardware platform from ('pixel1', 'pixel2', 'note10', 'note8', 's7edge', 'lg-g8', '1080ti', 'v100', 'tx2', 'cpu', 'flops')!\n")
while True:
platform_name = input()
platform_name = platform_name.lower()
if platform_name in valid_platform_name:
return platform_name
print("Platform name is invalid! Please select in ('pixel1', 'pixel2', 'note10', 'note8', 's7edge', 'lg-g8', '1080ti', 'v100', 'tx2', 'cpu', 'flops')!\n")
def select_netid(platform_name):
platform_efficiency_map = {
'pixel1': {
143: 'pixel1_lat@[email protected]_finetune@75',
132: 'pixel1_lat@[email protected]_finetune@75',
79: 'pixel1_lat@[email protected]_finetune@75',
58: 'pixel1_lat@[email protected]_finetune@75',
40: 'pixel1_lat@[email protected]_finetune@25',
28: 'pixel1_lat@[email protected]_finetune@25',
20: 'pixel1_lat@[email protected]_finetune@25',
},
'pixel2': {
62: 'pixel2_lat@[email protected]_finetune@25',
50: 'pixel2_lat@[email protected]_finetune@25',
35: 'pixel2_lat@[email protected]_finetune@25',
25: 'pixel2_lat@[email protected]_finetune@25',
},
'note10': {
64: 'note10_lat@[email protected]_finetune@75',
50: 'note10_lat@[email protected]_finetune@75',
41: 'note10_lat@[email protected]_finetune@75',
30: 'note10_lat@[email protected]_finetune@75',
22: 'note10_lat@[email protected]_finetune@25',
16: 'note10_lat@[email protected]_finetune@25',
11: 'note10_lat@[email protected]_finetune@25',
8: 'note10_lat@[email protected]_finetune@25',
},
'note8': {
65: 'note8_lat@[email protected]_finetune@25',
49: 'note8_lat@[email protected]_finetune@25',
31: 'note8_lat@[email protected]_finetune@25',
22: 'note8_lat@[email protected]_finetune@25',
},
's7edge': {
88: 's7edge_lat@[email protected]_finetune@25',
58: 's7edge_lat@[email protected]_finetune@25',
41: 's7edge_lat@[email protected]_finetune@25',
29: 's7edge_lat@[email protected]_finetune@25',
},
'lg-g8': {
24: 'LG-G8_lat@[email protected]_finetune@25',
16: 'LG-G8_lat@[email protected]_finetune@25',
11: 'LG-G8_lat@[email protected]_finetune@25',
8: 'LG-G8_lat@[email protected]_finetune@25',
},
'1080ti': {
27: '1080ti_gpu64@[email protected]_finetune@25',
22: '1080ti_gpu64@[email protected]_finetune@25',
15: '1080ti_gpu64@[email protected]_finetune@25',
12: '1080ti_gpu64@[email protected]_finetune@25',
},
'v100': {
11: 'v100_gpu64@[email protected]_finetune@25',
9: 'v100_gpu64@[email protected]_finetune@25',
6: 'v100_gpu64@[email protected]_finetune@25',
5: 'v100_gpu64@[email protected]_finetune@25',
},
'tx2': {
96: 'tx2_gpu16@[email protected]_finetune@25',
80: 'tx2_gpu16@[email protected]_finetune@25',
47: 'tx2_gpu16@[email protected]_finetune@25',
35: 'tx2_gpu16@[email protected]_finetune@25',
},
'cpu': {
17: 'cpu_lat@[email protected]_finetune@25',
15: 'cpu_lat@[email protected]_finetune@25',
11: 'cpu_lat@[email protected]_finetune@25',
10: 'cpu_lat@[email protected]_finetune@25',
},
'flops': {
595: 'flops@[email protected]_finetune@75',
482: 'flops@[email protected]_finetune@75',
389: 'flops@[email protected]_finetune@75',
}
}
sub_efficiency_map = platform_efficiency_map[platform_name]
if not platform_name == 'flops':
print("Now, please specify a latency constraint for model specialization among", sorted(list(sub_efficiency_map.keys())), 'ms. (Please just input the number.) \n')
else:
print("Now, please specify a FLOPs constraint for model specialization among", sorted(list(sub_efficiency_map.keys())), 'MFLOPs. (Please just input the number.) \n')
while True:
efficiency_constraint = input()
if not efficiency_constraint.isdigit():
print('Sorry, please input an integer! \n')
continue
efficiency_constraint = int(efficiency_constraint)
if not efficiency_constraint in sub_efficiency_map.keys():
print('Sorry, please choose a value from: ', sorted(list(sub_efficiency_map.keys())), '.\n')
continue
return sub_efficiency_map[efficiency_constraint]
if not ensemble:
platform_name = select_platform_name()
net_id = select_netid(platform_name)
net, image_size = ofa_specialized(net_id=net_id, pretrained=True)
validate(net, path, image_size, data_loader, batch_size, device)
else:
nets = []
for i in range(2):
print('{}model'.format(i))
platform_name = select_platform_name()
net_id = select_netid(platform_name)
net, image_size = ofa_specialized(net_id=net_id, pretrained=True)
nets.append(net)
ensemble_validate(nets, path, image_size, data_loader, batch_size, device)
return net_id
net_id = ['pixel1_lat@[email protected]_finetune@75', 'pixel1_lat@[email protected]_finetune@75',
'pixel1_lat@[email protected]_finetune@75', 'pixel1_lat@[email protected]_finetune@75',
'pixel1_lat@[email protected]_finetune@25', 'pixel1_lat@[email protected]_finetune@25',
'pixel1_lat@[email protected]_finetune@25', 'pixel2_lat@[email protected]_finetune@25',
'pixel2_lat@[email protected]_finetune@25', 'pixel2_lat@[email protected]_finetune@25',
'pixel2_lat@[email protected]_finetune@25', 'note10_lat@[email protected]_finetune@75',
'note10_lat@[email protected]_finetune@75', 'note10_lat@[email protected]_finetune@75',
'note10_lat@[email protected]_finetune@25', 'note10_lat@[email protected]_finetune@25',
'note10_lat@[email protected]_finetune@25', 'note8_lat@[email protected]_finetune@25',
'note8_lat@[email protected]_finetune@25', 'note8_lat@[email protected]_finetune@25',
'note8_lat@[email protected]_finetune@25', 's7edge_lat@[email protected]_finetune@25',
's7edge_lat@[email protected]_finetune@25', 's7edge_lat@[email protected]_finetune@25',
's7edge_lat@[email protected]_finetune@25', 'LG-G8_lat@[email protected]_finetune@25',
'LG-G8_lat@[email protected]_finetune@25', 'LG-G8_lat@[email protected]_finetune@25',
'LG-G8_lat@[email protected]_finetune@25', '1080ti_gpu64@[email protected]_finetune@25',
'1080ti_gpu64@[email protected]_finetune@25', '1080ti_gpu64@[email protected]_finetune@25',
'1080ti_gpu64@[email protected]_finetune@25', 'v100_gpu64@[email protected]_finetune@25',
'v100_gpu64@[email protected]_finetune@25', 'v100_gpu64@[email protected]_finetune@25',
'v100_gpu64@[email protected]_finetune@25', 'tx2_gpu16@[email protected]_finetune@25',
'tx2_gpu16@[email protected]_finetune@25', 'tx2_gpu16@[email protected]_finetune@25',
'tx2_gpu16@[email protected]_finetune@25', 'cpu_lat@[email protected]_finetune@25',
'cpu_lat@[email protected]_finetune@25', 'cpu_lat@[email protected]_finetune@25',
'cpu_lat@[email protected]_finetune@25', 'flops@[email protected]_finetune@75',
'flops@[email protected]_finetune@75', 'flops@[email protected]_finetune@75', ]
def evaluate_ofa_space(path, data_loader, batch_size=100, device='cuda:0', ensemble=False):
net_acc=[]
for i, id in enumerate(net_id):
acc=""
for j in range(2, len(id)):
if id[j]=='.':
acc=id[j-2]+id[j-1]+id[j]+id[j+1]
net_acc.append(acc)
id =np.argsort(np.array(net_acc))
new_net_id = copy.deepcopy(net_id)
for i, sortid in enumerate(id):
new_net_id[i] = net_id[sortid]
print('new_net_id', new_net_id)
n = len(net_id)
best_acc = 0
space = []
best_team =[]
for i in range(1, n):
for j in range(i):
nets = []
team = []
team.append(j)
team.append(i)
net, image_size = ofa_specialized(net_id=new_net_id[j], pretrained=True)
nets.append(net)
net, image_size = ofa_specialized(net_id=new_net_id[i], pretrained=True)
nets.append(net)
acc = ensemble_validate(nets, path, image_size, data_loader, batch_size, device)
if acc>best_acc:
best_acc=acc
best_team = team
print('space {} best_acc{}'.format(i+1, best_acc))
space.append(best_acc)
print('space:{}'.format(space))
return net_id[best_team[0]], net_id[best_team[1]]
def evaluate_ofa_best_acc_team(path, data_loader, batch_size=100, device='cuda:0', ensemble=False):
net_acc=[]
for i, id in enumerate(net_id):
acc=""
for j in range(2, len(id)):
if id[j]=='.':
acc=id[j-2]+id[j-1]+id[j]+id[j+1]
net_acc.append(acc)
id =np.argsort(np.array(net_acc))
new_net_id = copy.deepcopy(net_id)
for i, sortid in enumerate(id):
new_net_id[i] = net_id[sortid]
print('new_net_id', new_net_id)
n = len(net_id)
best_acc = 0
space = []
best_team =[]
i = n-1
for j in range(18, n):
nets = []
team = []
team.append(j)
team.append(i)
net, image_size = ofa_specialized(net_id=new_net_id[j], pretrained=True)
nets.append(net)
net, image_size = ofa_specialized(net_id=new_net_id[i], pretrained=True)
nets.append(net)
acc = ensemble_validate(nets, path, image_size, data_loader, batch_size, device)
print('net i:{} netj:{} acc:{}'.format(new_net_id[i], new_net_id[j], acc))
if acc>best_acc:
best_acc=acc
best_team = team
print('space {} best_acc{}'.format(i+1, best_acc))
space.append(best_acc)
print('space:{}'.format(space))
return new_net_id[best_team[0]], new_net_id[best_team[1]]
def evaluate_ofa_random_sample(path, data_loader, batch_size=100, device='cuda:0', ensemble=False):
net_acc=[]
for i, id in enumerate(net_id):
acc=""
for j in range(2, len(id)):
if id[j]=='.':
acc=id[j-2]+id[j-1]+id[j]+id[j+1]
net_acc.append(acc)
id =np.argsort(np.array(net_acc))
new_net_id = copy.deepcopy(net_id)
for i, sortid in enumerate(id):
new_net_id[i] = net_id[sortid]
print('new_net_id', new_net_id)
n = len(net_id)
best_acc = 0
acc_list = []
space = []
best_team =[]
for k in range(20):
nets = []
team = []
i = random.randint(0, n-1)
j = (i + random.randint(1, n-1)) % n
print('i:{} j:{}'.format(i, j))
team.append(j)
team.append(i)
net, image_size = ofa_specialized(net_id=new_net_id[j], pretrained=True)
nets.append(net)
net, image_size = ofa_specialized(net_id=new_net_id[i], pretrained=True)
nets.append(net)
acc = ensemble_validate(nets, path, image_size, data_loader, batch_size, device)
print('net i:{} netj:{} acc:{}'.format(new_net_id[i], new_net_id[j], acc))
acc_list.append(acc)
if acc>best_acc:
best_acc=acc
best_team = team
avg_acc = np.mean(acc_list)
std_acc = np.std(acc_list, ddof=1)
var_acc = np.var(acc_list)
print("avg{} var{} std{}".format(avg_acc, std_acc, var_acc))
print('best_random_team best_acc{}'.format(best_team, best_acc))
space.append(best_acc)
print('space:{}'.format(space))
return new_net_id[best_team[0]], new_net_id[best_team[1]]
sort_net_id=['tx2_gpu16@[email protected]_finetune@25', 'note8_lat@[email protected]_finetune@25', 's7edge_lat@[email protected]_finetune@25',
'cpu_lat@[email protected]_finetune@25', 'LG-G8_lat@[email protected]_finetune@25', 'pixel1_lat@[email protected]_finetune@25',
'note10_lat@[email protected]_finetune@25', 'pixel2_lat@[email protected]_finetune@25', 'v100_gpu64@[email protected]_finetune@25',
'cpu_lat@11ms_top1@72. 0_finetune@25', '1080ti_gpu64@[email protected]_finetune@25', 'note8_lat@[email protected]_finetune@25',
'tx2_gpu16@[email protected]_finetune@25', 'v100_gpu64@[email protected]_finetune@25', 'LG-G8_lat@11ms_to [email protected]_finetune@25',
's7edge_lat@[email protected]_finetune@25', 'pixel1_lat@[email protected]_finetune@25', 'pixel2_lat@[email protected]_finetune@25',
'note10_lat@[email protected]_finetune@25', '1080ti_gpu 64@[email protected]_finetune@25', 'cpu_lat@[email protected]_finetune@25',
's7edge_lat@[email protected]_finetune@25', 'LG-G8_lat@[email protected]_finetune@25', 'pixel2_lat@[email protected]_finetune@25',
'note8_lat@[email protected]_finetune@25', 'pixel1_lat@[email protected]_finetune@25', '1080ti_gpu64@[email protected]_finetune@25',
'v100_gpu64@[email protected]_finetune@25', 'tx2_gpu16@[email protected]_finetune@25', 'note10_lat@[email protected]_finetune@25',
'cpu_lat@[email protected]_finetune@25', 'tx2_gpu16@[email protected]_finetune@25', 'pixel2_lat@[email protected]_finetune@25',
'v100_gpu64@[email protected]_finetune@25', 'note8_lat@[email protected]_finetune@25', 's7edge_lat@[email protected]_finetune@25',
'1080ti_gpu64@[email protected]_finetune@25', 'LG-G8_lat@[email protected]_finetune@25', 'pixel1_lat@[email protected]_finetune@75',
'pixel1_lat@[email protected]_finetune@75', 'flops@[email protected]_finetune@75', 'note10_lat@[email protected]_finetune@75',
'flops@[email protected]_finetune@75', 'note10_lat@[email protected]_finetune@75', 'pixel1_lat@[email protected]_finetune@75',
'flops@[email protected]_finetune@75', 'pixel1_lat@[email protected]_finetune@75', 'note10_lat@[email protected]_finetune@75']
|
[
"torch.nn.CrossEntropyLoss",
"ofa.model_zoo.ofa_specialized",
"numpy.array",
"torchvision.transforms.ColorJitter",
"copy.deepcopy",
"numpy.mean",
"ofa.utils.accuracy",
"torchvision.transforms.ToTensor",
"torchvision.transforms.RandomResizedCrop",
"random.randint",
"torchvision.transforms.RandomHorizontalFlip",
"ofa.imagenet_classification.elastic_nn.utils.set_running_statistics",
"torchvision.transforms.Normalize",
"numpy.std",
"torchvision.transforms.CenterCrop",
"math.ceil",
"os.path.join",
"ofa.utils.AverageMeter",
"numpy.var"
] |
[((4567, 4607), 'ofa.imagenet_classification.elastic_nn.utils.set_running_statistics', 'set_running_statistics', (['net', 'data_loader'], {}), '(net, data_loader)\n', (4589, 4607), False, 'from ofa.imagenet_classification.elastic_nn.utils import set_running_statistics\n'), ((5384, 5398), 'ofa.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (5396, 5398), False, 'from ofa.utils import AverageMeter, accuracy\n'), ((5410, 5424), 'ofa.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (5422, 5424), False, 'from ofa.utils import AverageMeter, accuracy\n'), ((5436, 5450), 'ofa.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (5448, 5450), False, 'from ofa.utils import AverageMeter, accuracy\n'), ((7349, 7363), 'ofa.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (7361, 7363), False, 'from ofa.utils import AverageMeter, accuracy\n'), ((7375, 7389), 'ofa.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (7387, 7389), False, 'from ofa.utils import AverageMeter, accuracy\n'), ((7401, 7415), 'ofa.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (7413, 7415), False, 'from ofa.utils import AverageMeter, accuracy\n'), ((17124, 17145), 'copy.deepcopy', 'copy.deepcopy', (['net_id'], {}), '(net_id)\n', (17137, 17145), False, 'import copy\n'), ((18434, 18455), 'copy.deepcopy', 'copy.deepcopy', (['net_id'], {}), '(net_id)\n', (18447, 18455), False, 'import copy\n'), ((19766, 19787), 'copy.deepcopy', 'copy.deepcopy', (['net_id'], {}), '(net_id)\n', (19779, 19787), False, 'import copy\n'), ((20719, 20736), 'numpy.mean', 'np.mean', (['acc_list'], {}), '(acc_list)\n', (20726, 20736), True, 'import numpy as np\n'), ((20751, 20775), 'numpy.std', 'np.std', (['acc_list'], {'ddof': '(1)'}), '(acc_list, ddof=1)\n', (20757, 20775), True, 'import numpy as np\n'), ((20790, 20806), 'numpy.var', 'np.var', (['acc_list'], {}), '(acc_list)\n', (20796, 20806), True, 'import numpy as np\n'), ((3559, 3582), 'os.path.join', 'osp.join', (['path', '"""train"""'], {}), "(path, 'train')\n", (3567, 3582), True, 'import os.path as osp\n'), ((14066, 14113), 'ofa.model_zoo.ofa_specialized', 'ofa_specialized', ([], {'net_id': 'net_id', 'pretrained': '(True)'}), '(net_id=net_id, pretrained=True)\n', (14081, 14113), False, 'from ofa.model_zoo import ofa_specialized\n'), ((17088, 17105), 'numpy.array', 'np.array', (['net_acc'], {}), '(net_acc)\n', (17096, 17105), True, 'import numpy as np\n'), ((18398, 18415), 'numpy.array', 'np.array', (['net_acc'], {}), '(net_acc)\n', (18406, 18415), True, 'import numpy as np\n'), ((18784, 18838), 'ofa.model_zoo.ofa_specialized', 'ofa_specialized', ([], {'net_id': 'new_net_id[j]', 'pretrained': '(True)'}), '(net_id=new_net_id[j], pretrained=True)\n', (18799, 18838), False, 'from ofa.model_zoo import ofa_specialized\n'), ((18890, 18944), 'ofa.model_zoo.ofa_specialized', 'ofa_specialized', ([], {'net_id': 'new_net_id[i]', 'pretrained': '(True)'}), '(net_id=new_net_id[i], pretrained=True)\n', (18905, 18944), False, 'from ofa.model_zoo import ofa_specialized\n'), ((19730, 19747), 'numpy.array', 'np.array', (['net_acc'], {}), '(net_acc)\n', (19738, 19747), True, 'import numpy as np\n'), ((20059, 20083), 'random.randint', 'random.randint', (['(0)', '(n - 1)'], {}), '(0, n - 1)\n', (20073, 20083), False, 'import random\n'), ((20239, 20293), 'ofa.model_zoo.ofa_specialized', 'ofa_specialized', ([], {'net_id': 'new_net_id[j]', 'pretrained': '(True)'}), '(net_id=new_net_id[j], pretrained=True)\n', (20254, 20293), False, 'from ofa.model_zoo import ofa_specialized\n'), ((20345, 20399), 'ofa.model_zoo.ofa_specialized', 'ofa_specialized', ([], {'net_id': 'new_net_id[i]', 'pretrained': '(True)'}), '(net_id=new_net_id[i], pretrained=True)\n', (20360, 20399), False, 'from ofa.model_zoo import ofa_specialized\n'), ((5032, 5065), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['image_size'], {}), '(image_size)\n', (5053, 5065), False, 'from torchvision import transforms, datasets\n'), ((5075, 5096), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (5094, 5096), False, 'from torchvision import transforms, datasets\n'), ((5106, 5181), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (5126, 5181), False, 'from torchvision import transforms, datasets\n'), ((5268, 5289), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (5287, 5289), True, 'import torch.nn as nn\n'), ((7026, 7059), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['image_size'], {}), '(image_size)\n', (7047, 7059), False, 'from torchvision import transforms, datasets\n'), ((7069, 7090), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (7088, 7090), False, 'from torchvision import transforms, datasets\n'), ((7100, 7175), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (7120, 7175), False, 'from torchvision import transforms, datasets\n'), ((7262, 7283), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (7281, 7283), True, 'import torch.nn as nn\n'), ((14411, 14458), 'ofa.model_zoo.ofa_specialized', 'ofa_specialized', ([], {'net_id': 'net_id', 'pretrained': '(True)'}), '(net_id=net_id, pretrained=True)\n', (14426, 14458), False, 'from ofa.model_zoo import ofa_specialized\n'), ((17508, 17562), 'ofa.model_zoo.ofa_specialized', 'ofa_specialized', ([], {'net_id': 'new_net_id[j]', 'pretrained': '(True)'}), '(net_id=new_net_id[j], pretrained=True)\n', (17523, 17562), False, 'from ofa.model_zoo import ofa_specialized\n'), ((17622, 17676), 'ofa.model_zoo.ofa_specialized', 'ofa_specialized', ([], {'net_id': 'new_net_id[i]', 'pretrained': '(True)'}), '(net_id=new_net_id[i], pretrained=True)\n', (17637, 17676), False, 'from ofa.model_zoo import ofa_specialized\n'), ((3650, 3690), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', (['image_size'], {}), '(image_size)\n', (3678, 3690), False, 'from torchvision import transforms, datasets\n'), ((3704, 3737), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (3735, 3737), False, 'from torchvision import transforms, datasets\n'), ((3751, 3814), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', ([], {'brightness': '(32.0 / 255.0)', 'saturation': '(0.5)'}), '(brightness=32.0 / 255.0, saturation=0.5)\n', (3773, 3814), False, 'from torchvision import transforms, datasets\n'), ((3826, 3847), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3845, 3847), False, 'from torchvision import transforms, datasets\n'), ((3861, 3936), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (3881, 3936), False, 'from torchvision import transforms, datasets\n'), ((6121, 6158), 'ofa.utils.accuracy', 'accuracy', (['output', 'labels'], {'topk': '(1, 5)'}), '(output, labels, topk=(1, 5))\n', (6129, 6158), False, 'from ofa.utils import AverageMeter, accuracy\n'), ((7840, 7877), 'ofa.utils.accuracy', 'accuracy', (['output', 'labels'], {'topk': '(1, 5)'}), '(output, labels, topk=(1, 5))\n', (7848, 7877), False, 'from ofa.utils import AverageMeter, accuracy\n'), ((20099, 20123), 'random.randint', 'random.randint', (['(1)', '(n - 1)'], {}), '(1, n - 1)\n', (20113, 20123), False, 'import random\n'), ((4991, 5020), 'math.ceil', 'math.ceil', (['(image_size / 0.875)'], {}), '(image_size / 0.875)\n', (5000, 5020), False, 'import math\n'), ((6985, 7014), 'math.ceil', 'math.ceil', (['(image_size / 0.875)'], {}), '(image_size / 0.875)\n', (6994, 7014), False, 'import math\n')]
|
from netCDF4 import Dataset
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
import matplotlib.cm as cm
import numpy as np
#-------------------------------------------------------------
def plot_subfigure(axis, array, nCells, nEdgesOnCell, verticesOnCell, xCell, yCell, zCell, xVertex, yVertex, zVertex, cmin, cmax, cmap):
xMin = 1.0e30
xMax = -1.0e30
yMin = 1.0e30
yMax = -1.0e30
cmap = plt.get_cmap(cmap)
patches = []
colors = []
for iCell in range(0,nCells):
if (yCell[iCell] > 0.0):
vertices = []
for iVertexOnCell in range(0,nEdgesOnCell[iCell]):
iVertex = verticesOnCell[iCell,iVertexOnCell]
vertices.append((xVertex[iVertex],zVertex[iVertex]))
colors.append(array[iCell])
patches.append(Polygon(vertices))
xMin = min(xMin,xVertex[iVertex])
xMax = max(xMax,xVertex[iVertex])
yMin = min(yMin,zVertex[iVertex])
yMax = max(yMax,zVertex[iVertex])
pc = PatchCollection(patches, cmap=cmap)
pc.set_array(np.array(colors))
pc.set_clim(cmin, cmax)
axis.add_collection(pc)
axis.set_xlim(xMin,xMax)
axis.set_ylim(yMin,yMax)
axis.set_aspect("equal")
axis.ticklabel_format(style='plain')
axis.tick_params(axis='x', \
which='both', \
bottom=False, \
top=False, \
labelbottom=False)
axis.tick_params(axis='y', \
which='both', \
left=False, \
right=False, \
labelleft=False)
#-------------------------------------------------------------
def plot_testcase():
nGrids = [2562,10242,40962,163842]
testTypes = ["cosine_bell","slotted_cylinder"]
methods = ["IR","IR","upwind"]
iTimes = [0,-1,-1]
for nGrid in nGrids:
print("nGrid: ", nGrid)
fig, axes = plt.subplots(3,4)
iTestType = -1
for testType in testTypes:
iTestType += 1
print(" Test type: ", testType)
iMethod = -1
for method, iTime in zip(methods,iTimes):
iMethod += 1
print(" Method: ", method, ", iTime: ", iTime)
filenamein = "./output_%s_%s_%i/output.2000.nc" %(method,testType,nGrid)
filein = Dataset(filenamein,"r")
nCells = len(filein.dimensions["nCells"])
nEdgesOnCell = filein.variables["nEdgesOnCell"][:]
verticesOnCell = filein.variables["verticesOnCell"][:]
xCell = filein.variables["xCell"][:]
yCell = filein.variables["yCell"][:]
zCell = filein.variables["zCell"][:]
xVertex = filein.variables["xVertex"][:]
yVertex = filein.variables["yVertex"][:]
zVertex = filein.variables["zVertex"][:]
verticesOnCell[:] = verticesOnCell[:] - 1
iceAreaCategory = filein.variables["iceAreaCategory"][:]
filein.close()
iceAreaCell = np.sum(iceAreaCategory,axis=(2,3))
plot_subfigure(axes[iMethod,iTestType*2], iceAreaCell[iTime], nCells, nEdgesOnCell, verticesOnCell, xCell, yCell, zCell, xVertex, yVertex, zVertex, 0.0, 1.0, "viridis")
iceAreaCellDiff = iceAreaCell[iTime] - iceAreaCell[0]
if (iMethod != 0):
plot_subfigure(axes[iMethod,iTestType*2+1], iceAreaCellDiff, nCells, nEdgesOnCell, verticesOnCell, xCell, yCell, zCell, xVertex, yVertex, zVertex, -1.0, 1.0, "bwr")
else:
axes[iMethod,iTestType*2+1].axis('off')
plt.savefig("advection_%6.6i.png" %(nGrid),dpi=300)
plt.cla()
plt.close(fig)
#-------------------------------------------------------------------------------
if __name__ == "__main__":
plot_testcase()
|
[
"matplotlib.pyplot.savefig",
"netCDF4.Dataset",
"matplotlib.collections.PatchCollection",
"matplotlib.pyplot.close",
"numpy.array",
"numpy.sum",
"matplotlib.pyplot.cla",
"matplotlib.pyplot.subplots",
"matplotlib.patches.Polygon",
"matplotlib.pyplot.get_cmap"
] |
[((505, 523), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['cmap'], {}), '(cmap)\n', (517, 523), True, 'import matplotlib.pyplot as plt\n'), ((1132, 1167), 'matplotlib.collections.PatchCollection', 'PatchCollection', (['patches'], {'cmap': 'cmap'}), '(patches, cmap=cmap)\n', (1147, 1167), False, 'from matplotlib.collections import PatchCollection\n'), ((1185, 1201), 'numpy.array', 'np.array', (['colors'], {}), '(colors)\n', (1193, 1201), True, 'import numpy as np\n'), ((2065, 2083), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(4)'], {}), '(3, 4)\n', (2077, 2083), True, 'import matplotlib.pyplot as plt\n'), ((3858, 3909), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('advection_%6.6i.png' % nGrid)"], {'dpi': '(300)'}), "('advection_%6.6i.png' % nGrid, dpi=300)\n", (3869, 3909), True, 'import matplotlib.pyplot as plt\n'), ((3918, 3927), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (3925, 3927), True, 'import matplotlib.pyplot as plt\n'), ((3936, 3950), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (3945, 3950), True, 'import matplotlib.pyplot as plt\n'), ((917, 934), 'matplotlib.patches.Polygon', 'Polygon', (['vertices'], {}), '(vertices)\n', (924, 934), False, 'from matplotlib.patches import Polygon\n'), ((2507, 2531), 'netCDF4.Dataset', 'Dataset', (['filenamein', '"""r"""'], {}), "(filenamein, 'r')\n", (2514, 2531), False, 'from netCDF4 import Dataset\n'), ((3254, 3290), 'numpy.sum', 'np.sum', (['iceAreaCategory'], {'axis': '(2, 3)'}), '(iceAreaCategory, axis=(2, 3))\n', (3260, 3290), True, 'import numpy as np\n')]
|
import glob
import json
import os
import subprocess
import time
import xml.etree.ElementTree as ET
from xml.etree.ElementTree import ParseError
import geopandas as gpd
import rasterio
import numpy as np
from shapely.geometry import Polygon
class PipelineError(RuntimeError):
def __init__(self, message):
self.message = message
def listlike(arg):
'''Checks whether an argument is list-like, returns boolean'''
return not hasattr(arg, "strip") and (hasattr(arg, "__getitem__")
or hasattr(arg, "__iter__"))
def clean_dir(dir_to_clean, file_extensions):
'''Deletes files with specified extension(s) from a directory.
This function is intended to help cleanup outputs from command line
tools that we do not want to keep. Files to be deleted will be
identified using a wildcard with that file extension in dir_to_clean.
Parameters
----------
dir_to_clean: string, path
path to directory to delete files from
file_extension: string or list-like of strings
file extensions that will be used for identifying files to remove,
such as ['.tfw', '.kml'].
'''
if listlike(file_extensions):
for ext in file_extensions:
to_rem = glob.glob(os.path.join(dir_to_clean, '*{}'.format(ext)))
for file in to_rem:
os.remove(file)
print("Removed {:,d} files with extension {}.".format(
len(to_rem), ext))
elif type(file_extension) == str:
to_rem = glob.glob(os.path.join(dir_to_clean, '*{}'.format(ext)))
for file in to_rem:
os.remove(file)
print("Removed {:,d} files with extension {}.".format(
len(to_rem), ext))
else:
raise (TypeError,
'file_extensions needs to be a string or list-like of strings.')
def clean_buffer_polys(poly_shp,
tile_shp,
odir,
simp_tol=None,
simp_topol=None):
"""Removes polygons within the buffer zone of a tile.
This function removes polygons from a shapefile that fall in the buffered
area of point cloud tile. When building footprints or tree crowns (for
example) are delineated from a point cloud, a buffer around the tile is
generally be used to avoid edge effects. This tool computes the centroid of
each polygon and determines whether it falls within the bounds of the
unbuffered tile. It outputs a new shapefile containing only those polygons
whose centroids fall within the unbuffered tile.
The polygons may be simplified using optional arguments simp_tol and
simp_topol to reduce the number of points that define their boundaries.
Parameters
----------
polygons_shp: string, path to shapefile (required)
A shapefile containing the polygons delineated within a buffered tile.
tile_shp: string, path to shapefile (required)
A shapefile containing the bounds of the tile WITHOUT buffers
odir: string, path to directory (required)
Path to the output directory for the new shapefile
simp_tol = numeric,
Tolerance level for simplification. All points within a simplified
geometry will be no more than simp_tol from the original.
simp_topol = boolean (optional)
Whether or not to preserve topology of polygons. If False, a quicker
algorithm will be used, but may produce self-intersecting or otherwise
invalid geometries.
"""
fname = os.path.basename(poly_shp)
outfile = os.path.join(odir, fname)
os.makedirs(odir, exist_ok=True)
tile_boundary = gpd.read_file(tile_shp)
polys = gpd.read_file(poly_shp)
# boolean indicator of whether each polygon falls within tile boundary
clean_polys_ix = polys.centroid.within(tile_boundary.loc[0].geometry)
# retrieve the polygons within the boundary
clean_polys = polys[clean_polys_ix]
if simp_tol:
clean_polys = clean_polys.simplify(simp_tol, simp_topol)
if len(clean_polys) > 0:
clean_polys.to_file(outfile)
def clip_tile_from_shp(in_raster, in_shp, odir, buffer=0):
'''Clips a raster image to the bounding box of a shapefile.
The input raster will be clipped using a rasterio command line tool. The
output raster will have the same name and file type as the input raster, and
will be written to the output directory, odir. The process is executed using
subprocess.run().
Parameters
----------
in_raster: string, path to file
raster image to be clipped
in_shp: string, path to file
shapefile from which bounding box is calculated to clip the raster
odir: string, path
output directory where clipped raster will be stored
buffer: numeric
additional buffer to add to total bounding box of shapefile when
clipping the raster
Returns
-------
proc_clip: CompletedProcess
The result of executing subprocess.run using the rio clip command.
'''
basename = os.path.basename(in_raster)
# read the shapefile using geopandas and calculate its bounds
gdf = gpd.read_file(in_shp)
tile_bnds = ' '.join(str(x) for x in gdf.buffer(buffer).total_bounds)
# create the output directory if it doesn't already exist
os.makedirs(odir, exist_ok=True)
outfile = os.path.join(odir, basename)
# clip the raster
proc_clip = subprocess.run(
['rio', 'clip', in_raster, outfile, '--bounds', tile_bnds],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
return proc_clip
def convert_project(infile, outfile, crs):
'''Converts a raster to another format and specifies its projection.
Uses rasterio command line tool executed using subprocess. The file
generated will have the same name and be in the same folder as the input
file.
Parameters
----------
infile: string, path to file
input raster to be converted
outfile: string, path to file
output raster to be generated
crs: string
specification of coordinate reference system to use following rasterio
command line tool (RIO) formatting (e.g., 'EPSG:3857')
Returns
-------
proc_convert: CompletedProcess
result of executing subprocess.run using rio convert
proc_project: CompletedProcess
result of executing subprocess.run using rio edit-info
'''
# convert the file to the new format
proc_convert = subprocess.run(['rio', 'convert', infile, outfile],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
# add the projection info
proc_project = subprocess.run(['rio', 'edit-info', '--crs', crs, outfile],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
return proc_convert, proc_project
def validation_summary(xml_dir, verbose=False):
'''
Generates a summary of validation results for a directory of lidar files
Parameters
----------
xml_dir : string, path to directory
directory containing xml files produced by LASvalidate
verbose : boolean
whether or not to include the messages describing why any files
produced warning or failed validation.
Returns
-------
summary_report : a printed report
'''
xmls = glob.glob(os.path.join(xml_dir, '*.xml'))
passed = 0
warnings = 0
failed = 0
parse_errors = 0
warning_messages = []
failed_messages = []
for validation_report in xmls:
try:
tile_id = os.path.basename(validation_report).split('.')[0]
tree = ET.parse(validation_report)
root = tree.getroot()
result = root.find('report').find('summary').text.strip()
if result == 'pass':
passed += 1
else:
variable = root.find('report').find('details').find(
result).find('variable').text
note = root.find('report').find('details').find(result).find(
'note').text
if result == 'fail':
failed += 1
failed_messages.append('{} -> {} | {} : {}'.format(
tile_id, result, variable, note))
elif result == 'warning':
warnings += 1
warning_messages.append('{} -> {} | {} : {}'.format(
tile_id, result, variable, note))
except ParseError:
parse_errors += 1
summary = '''LASvalidate Summary
====================
Passed: {:,d}
Failed: {:,d}
Warnings: {:,d}
ParseErrors: {:,d}
'''.format(passed, failed, warnings, parse_errors)
details = '''Details
========
{}
{}
'''.format('\n'.join(failed_messages), '\n'.join(warning_messages))
print(summary)
if verbose:
print(details)
def move_invalid_tiles(xml_dir, dest_dir):
'''Moves lidar data that fail validation checks into a new directory
Parameters
----------
xml_dir : string, path to directory
where the xml reports produced by LASvalidate can be found
dest_dir : str, path to directory
where you would like the point cloud and associated files to be moved
Returns
-------
A printed statement about how many tiles were moved.
'''
xmls = glob.glob(os.path.join(xml_dir, '*.xml'))
invalid_dir = dest_dir
num_invalid = 0
for validation_report in xmls:
tile_id = os.path.basename(validation_report).split('.')[0]
tree = ET.parse(validation_report)
root = tree.getroot()
result = root.find('report').find('summary').text.strip()
if result == 'fail':
# move the lidar file to a different folder
os.makedirs(invalid_dir, exist_ok=True)
for invalid_file in glob.glob(
os.path.join(xml_dir, tile_id + '*')):
basename = os.path.basename(invalid_file)
os.rename(invalid_file, os.path.join(invalid_dir, basename))
num_invalid += 1
print('Moved files for {} invalid tiles to {}'.format(
num_invalid, invalid_dir))
def get_bbox_as_poly(infile, epsg=None):
"""Uses PDAL's info tool to extract the bounding box of a file as a
shapely Polygon. If an EPSG code is provided, a GeoDataFrame is returned.
Parameters
----------
infile : str, path to file
path to input file that PDAL can read
epsg : int
EPSG code defining the coordinate reference system. Optional.
Returns
-------
bbox_poly : Polygon or GeoDataFrame
By default (no EPSG is provided), a shapely Polygon with the bounding
box as its coordinates is returned. If an EPSG code is specified,
bbox_poly is returned as a GeoPandas GeoDataFrame.
"""
result = subprocess.run(['pdal', 'info', infile],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
json_result = json.loads(result.stdout.decode())
coords = json_result['stats']['bbox']['native']['boundary']['coordinates']
geometry = Polygon(*coords)
if epsg:
bbox_poly = gpd.GeoDataFrame(
geometry=[geometry], crs={'init': 'epsg:{}'.format(epsg)})
else:
bbox_poly = Polygon(*coords)
return bbox_poly
def fname(path):
"""returns the filename as basename split from extension.
Parameters
-----------
path : str, path to file
filepath from which filename will be sliced
Returns
--------
filename : str
name of file, split from extension
"""
filename = os.path.basename(path).split('.')[0]
return filename
def annulus(inner_radius, outer_radius, dtype=np.uint8):
"""Generates a flat, donut-shaped (annular) structuring element.
A pixel is within the neighborhood if the euclidean distance between
it and the origin falls between the inner and outer radii (inclusive).
Parameters
----------
inner_radius : int
The inner radius of the annular structuring element
outer_radius : int
The outer radius of the annular structuring element
dtype : data-type
The data type of the structuring element
Returns
-------
selem : ndarray
The structuring element where elements of the neighborhood are 1
and 0 otherwise
"""
L = np.arange(-outer_radius, outer_radius + 1)
X, Y = np.meshgrid(L, L)
selem = np.array(
((X**2 + Y**2) <= outer_radius**2) * (
(X**2 + Y**2) >= inner_radius**2),
dtype=dtype)
return selem
def inspect_failures(failed_dir):
"""Prints error messages reported for tiles that failed in the lidar
processing pipeline.
Parameters
----------
failed_dir : string, path to directory
path to directory containing text files indicating any tiles which
failed processing
"""
failed = glob.glob(os.path.join(failed_dir, '*.txt'))
for filename in failed:
with open(filename) as f:
print([line for line in f.readlines() if line.rstrip() != ''])
print('----------------------')
def processing_summary(all_tiles, already_finished, processing_tiles,
finished_dir, failed_dir, start_time):
"""Prints a summary indicating progress of a lidar processing pipeline.
Parameters
----------
all_tiles : list-like
all tiles within a lidar acquisition
already_finished : list-like
tiles which were successfully processed in a previous execution of the
processing pipeline
processing_tiles : list-like
tiles which are being processed during the currently executing pipeline
finished_dir : string, path to directory
path to directory containing text files indicating any tiles which have
finished processing
failed_dir : string, path to directory
path to directory containing text files indicating any tiles which
failed processing
start_time : float
time the pipeline execution began, produced by time.time()
"""
failed = glob.glob(os.path.join(failed_dir, '*.txt'))
finished = glob.glob(os.path.join(finished_dir, '*.txt'))
summary = '''
Processing Summary
-------------------
{:>5,d} tiles in acquisition
{:>5,d} tiles previously finished in acquisition
{:>5,d} tiles being processed in this run
{:>5,d} tiles from this run finished
{:>5,d} tiles failed
'''.format(
len(all_tiles), len(already_finished), len(processing_tiles),
len(finished) - (len(all_tiles) - len(processing_tiles)), len(failed))
total_percent_unfinished = int(70 * (1 - len(finished) / len(all_tiles)))
total_percent_finished = int(70 * len(finished) / len(all_tiles))
total_percent_failed = int(70 * len(failed) / len(all_tiles))
this_run_unfinished = int(70 - 70*(len(finished) - (len(all_tiles) - \
len(processing_tiles))) / len(processing_tiles))
this_run_finished = int(70*(len(finished) - (len(all_tiles) - \
len(processing_tiles))) / len(processing_tiles))
progress_bars = '|' + '=' * this_run_finished + ' '* this_run_unfinished +\
'!' * total_percent_failed + '| {:.1%} this run\n'.format((len(finished)\
- (len(all_tiles) - len(processing_tiles))) / len(processing_tiles)) + \
'|' + '=' * total_percent_finished + ' ' * total_percent_unfinished + '!' \
* total_percent_failed + '| {:.1%} total'.format(len(finished) / \
len(all_tiles))
print(summary)
print(progress_bars)
time_to_complete(start_time, len(processing_tiles),
len(finished) - (len(all_tiles) - len(processing_tiles)))
def print_dhms(s):
"""Prints number of days, hours, minutes, and seconds
represented by number of seconds provided as input.
Parameters
----------
s : numeric
seconds
"""
days = s // (24 * 3600)
s = s % (24 * 3600)
hours = s // 3600
s %= 3600
minutes = s // 60
s %= 60
seconds = s
if days > 0:
print(f'{days:2.0f}d {hours:2.0f}h {minutes:2.0f}m {seconds:2.0f}s')
elif hours > 0:
print(f' {hours:2.0f}h {minutes:2.0f}m {seconds:2.0f}s')
else:
print(f' {minutes:2.0f}m {seconds:2.0f}s')
def time_to_complete(start_time, num_jobs, jobs_completed):
"""Prints elapsed time and estimated time of completion.
Parameters
----------
start_time : float
time the pipeline execution began, produced by time.time()
num_jobs : int
total number of jobs to be completed
jobs_completed : int
number of jobs completed so far
"""
if jobs_completed == 0:
print('\nNo jobs completed yet.')
else:
time_now = time.time()
elapsed = time_now - start_time
prop_complete = jobs_completed / num_jobs
est_completion = elapsed / prop_complete
time_left = est_completion - elapsed
print('\nelapsed: ', end='\t')
print_dhms(elapsed)
print('remaining: ', end='\t')
print_dhms(time_left)
def make_buffered_fishnet(xmin, ymin, xmax, ymax, crs, spacing=1000,
buffer=50):
"""Makes a GeoDataFrame with a fishnet grid that has overlapping edges.
Converts an existing lidar tiling scheme into one that has overlapping
tiles and which is aligned with a grid based on the spacing parameter.
Parameters
----------
xmin, ymin, xmax, ymax : numeric
Values indicating the extent of the existing lidar data.
crs : Coordinate Reference System
Must be readable by GeoPandas to create a GeoDataFrame.
spacing : int
Length and width of tiles in new tiling scheme prior to buffering
buffer : int
Amount of overlap between neighboring tiles.
"""
xmin, ymin = (
np.floor(np.array([xmin, ymin]) // spacing) * spacing).astype(int)
xmax, ymax = (
np.ceil(np.array([xmax, ymax]) // spacing) * spacing).astype(int) + spacing
xx, yy = np.meshgrid(
np.arange(xmin, xmax + spacing, spacing),
np.arange(ymin, ymax + spacing, spacing))
xx_leftbuff = xx[:, :-1] - buffer
xx_rightbuff = xx[:, 1:] + buffer
yy_downbuff = yy[:-1, :] - buffer
yy_upbuff = yy[1:, :] + buffer
ll = np.stack((
xx_leftbuff[1:, :].ravel(), # skip top row
yy_downbuff[:, :-1].ravel())).T # skip right-most column
ul = np.stack((
xx_leftbuff[:-1, :].ravel(), # skip bottom row
yy_upbuff[:, :-1].ravel())).T # skip right-most column
ur = np.stack((
xx_rightbuff[:-1, :].ravel(), # skip bottom row
yy_upbuff[:, 1:].ravel())).T # skip left-most column
lr = np.stack((
xx_rightbuff[1:, :].ravel(), # skip top row
yy_downbuff[:, 1:].ravel())).T # skip left-most column
buff_fishnet = np.stack([ll, ul, ur, lr])
polys = [
Polygon(buff_fishnet[:, i, :]) for i in range(buff_fishnet.shape[1])
]
ll_names = [x for x in (ll + buffer).astype(int).astype(str)]
tile_ids = [
'_'.join(tile) + '_{}'.format(str(spacing)) for tile in ll_names
]
buff_fishnet_gdf = gpd.GeoDataFrame(geometry=polys, crs=crs)
buff_fishnet_gdf['tile_id'] = tile_ids
return buff_fishnet_gdf.set_index('tile_id')
def get_intersecting_tiles(src_tiles, new_tiles):
"""Identifies tiles from src that intersect tiles in new_tiles.
This function is intended to identify the files which should be read for
retiling a lidar acquisition into the new_tiles layout.
src_tiles is expected to have a 'file_name' field.
Parameters
----------
src_tiles : GeoDataFrame
Original tiling scheme for lidar acquisition
new_tiles : GeoDataFrame
New tiling scheme for lidar acquisition, such as one created by the
make_buffered_fishnet function
Returns
-------
joined_tiles : GeoDataFrame
Each row shows a tile from new_tiles that intersected with one or more
tiles from src_tiles. The list of tiles from src_tiles that intersect
each tile in new_tiles are formatted as a space-delimited string.
"""
joined = gpd.sjoin(new_tiles, src_tiles)
joined_tiles = joined.groupby(level=0)['file_name'].apply(list).apply(
' '.join).to_frame()
joined_tiles.index.name = 'tile_id'
joined_tiles = joined_tiles.rename({
'file_name': 'intersecting_files'
},
axis=1)
return joined_tiles
def parse_coords_from_tileid(tile_id):
"""Get the coordinates of the lower left corner of the tile, assuming the
tile has been named in the pattern {XMIN}_{YMIN}_{LENGTH}.
Parameters
----------
tile_id : string
assumed tile_id follows the naming convention of {LLX}_{LLY}_{LENGTH}
where:
LLX = x-coordinate of lower-left corner of tile (in projected units)
LLY = y-coordinate of lower-left corner of tile (in projected units)
LENGTH = length of the raster (in projected units), assumed to be a
square tile shape
Returns
-------
llx, lly, length : int
x- and y- coordinates of lower-left corner and length of raster
"""
tile_parts = tile_id.split('_')
if len(tile_parts) == 2:
llx, lly = [int(coord) for coord in tile_parts]
length = 1000 # assumed tile width if not explicit in tile_id
elif len(tile_parts) == 3:
llx, lly, length = [int(coord) for coord in tile_parts]
return llx, lly, length
|
[
"geopandas.sjoin",
"xml.etree.ElementTree.parse",
"geopandas.read_file",
"os.makedirs",
"subprocess.run",
"os.path.join",
"numpy.array",
"shapely.geometry.Polygon",
"numpy.stack",
"os.path.basename",
"time.time",
"numpy.meshgrid",
"geopandas.GeoDataFrame",
"numpy.arange",
"os.remove"
] |
[((3566, 3592), 'os.path.basename', 'os.path.basename', (['poly_shp'], {}), '(poly_shp)\n', (3582, 3592), False, 'import os\n'), ((3607, 3632), 'os.path.join', 'os.path.join', (['odir', 'fname'], {}), '(odir, fname)\n', (3619, 3632), False, 'import os\n'), ((3637, 3669), 'os.makedirs', 'os.makedirs', (['odir'], {'exist_ok': '(True)'}), '(odir, exist_ok=True)\n', (3648, 3669), False, 'import os\n'), ((3691, 3714), 'geopandas.read_file', 'gpd.read_file', (['tile_shp'], {}), '(tile_shp)\n', (3704, 3714), True, 'import geopandas as gpd\n'), ((3727, 3750), 'geopandas.read_file', 'gpd.read_file', (['poly_shp'], {}), '(poly_shp)\n', (3740, 3750), True, 'import geopandas as gpd\n'), ((5096, 5123), 'os.path.basename', 'os.path.basename', (['in_raster'], {}), '(in_raster)\n', (5112, 5123), False, 'import os\n'), ((5200, 5221), 'geopandas.read_file', 'gpd.read_file', (['in_shp'], {}), '(in_shp)\n', (5213, 5221), True, 'import geopandas as gpd\n'), ((5363, 5395), 'os.makedirs', 'os.makedirs', (['odir'], {'exist_ok': '(True)'}), '(odir, exist_ok=True)\n', (5374, 5395), False, 'import os\n'), ((5410, 5438), 'os.path.join', 'os.path.join', (['odir', 'basename'], {}), '(odir, basename)\n', (5422, 5438), False, 'import os\n'), ((5477, 5603), 'subprocess.run', 'subprocess.run', (["['rio', 'clip', in_raster, outfile, '--bounds', tile_bnds]"], {'stderr': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE'}), "(['rio', 'clip', in_raster, outfile, '--bounds', tile_bnds],\n stderr=subprocess.PIPE, stdout=subprocess.PIPE)\n", (5491, 5603), False, 'import subprocess\n'), ((6542, 6645), 'subprocess.run', 'subprocess.run', (["['rio', 'convert', infile, outfile]"], {'stderr': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE'}), "(['rio', 'convert', infile, outfile], stderr=subprocess.PIPE,\n stdout=subprocess.PIPE)\n", (6556, 6645), False, 'import subprocess\n'), ((6759, 6871), 'subprocess.run', 'subprocess.run', (["['rio', 'edit-info', '--crs', crs, outfile]"], {'stderr': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE'}), "(['rio', 'edit-info', '--crs', crs, outfile], stderr=\n subprocess.PIPE, stdout=subprocess.PIPE)\n", (6773, 6871), False, 'import subprocess\n'), ((11001, 11094), 'subprocess.run', 'subprocess.run', (["['pdal', 'info', infile]"], {'stderr': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE'}), "(['pdal', 'info', infile], stderr=subprocess.PIPE, stdout=\n subprocess.PIPE)\n", (11015, 11094), False, 'import subprocess\n'), ((11295, 11311), 'shapely.geometry.Polygon', 'Polygon', (['*coords'], {}), '(*coords)\n', (11302, 11311), False, 'from shapely.geometry import Polygon\n'), ((12570, 12612), 'numpy.arange', 'np.arange', (['(-outer_radius)', '(outer_radius + 1)'], {}), '(-outer_radius, outer_radius + 1)\n', (12579, 12612), True, 'import numpy as np\n'), ((12624, 12641), 'numpy.meshgrid', 'np.meshgrid', (['L', 'L'], {}), '(L, L)\n', (12635, 12641), True, 'import numpy as np\n'), ((12654, 12761), 'numpy.array', 'np.array', (['((X ** 2 + Y ** 2 <= outer_radius ** 2) * (X ** 2 + Y ** 2 >= inner_radius **\n 2))'], {'dtype': 'dtype'}), '((X ** 2 + Y ** 2 <= outer_radius ** 2) * (X ** 2 + Y ** 2 >= \n inner_radius ** 2), dtype=dtype)\n', (12662, 12761), True, 'import numpy as np\n'), ((19114, 19140), 'numpy.stack', 'np.stack', (['[ll, ul, ur, lr]'], {}), '([ll, ul, ur, lr])\n', (19122, 19140), True, 'import numpy as np\n'), ((19425, 19466), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', ([], {'geometry': 'polys', 'crs': 'crs'}), '(geometry=polys, crs=crs)\n', (19441, 19466), True, 'import geopandas as gpd\n'), ((20428, 20459), 'geopandas.sjoin', 'gpd.sjoin', (['new_tiles', 'src_tiles'], {}), '(new_tiles, src_tiles)\n', (20437, 20459), True, 'import geopandas as gpd\n'), ((7475, 7505), 'os.path.join', 'os.path.join', (['xml_dir', '"""*.xml"""'], {}), "(xml_dir, '*.xml')\n", (7487, 7505), False, 'import os\n'), ((9495, 9525), 'os.path.join', 'os.path.join', (['xml_dir', '"""*.xml"""'], {}), "(xml_dir, '*.xml')\n", (9507, 9525), False, 'import os\n'), ((9694, 9721), 'xml.etree.ElementTree.parse', 'ET.parse', (['validation_report'], {}), '(validation_report)\n', (9702, 9721), True, 'import xml.etree.ElementTree as ET\n'), ((11465, 11481), 'shapely.geometry.Polygon', 'Polygon', (['*coords'], {}), '(*coords)\n', (11472, 11481), False, 'from shapely.geometry import Polygon\n'), ((13138, 13171), 'os.path.join', 'os.path.join', (['failed_dir', '"""*.txt"""'], {}), "(failed_dir, '*.txt')\n", (13150, 13171), False, 'import os\n'), ((14335, 14368), 'os.path.join', 'os.path.join', (['failed_dir', '"""*.txt"""'], {}), "(failed_dir, '*.txt')\n", (14347, 14368), False, 'import os\n'), ((14395, 14430), 'os.path.join', 'os.path.join', (['finished_dir', '"""*.txt"""'], {}), "(finished_dir, '*.txt')\n", (14407, 14430), False, 'import os\n'), ((16995, 17006), 'time.time', 'time.time', ([], {}), '()\n', (17004, 17006), False, 'import time\n'), ((18294, 18334), 'numpy.arange', 'np.arange', (['xmin', '(xmax + spacing)', 'spacing'], {}), '(xmin, xmax + spacing, spacing)\n', (18303, 18334), True, 'import numpy as np\n'), ((18344, 18384), 'numpy.arange', 'np.arange', (['ymin', '(ymax + spacing)', 'spacing'], {}), '(ymin, ymax + spacing, spacing)\n', (18353, 18384), True, 'import numpy as np\n'), ((19164, 19194), 'shapely.geometry.Polygon', 'Polygon', (['buff_fishnet[:, i, :]'], {}), '(buff_fishnet[:, i, :])\n', (19171, 19194), False, 'from shapely.geometry import Polygon\n'), ((7766, 7793), 'xml.etree.ElementTree.parse', 'ET.parse', (['validation_report'], {}), '(validation_report)\n', (7774, 7793), True, 'import xml.etree.ElementTree as ET\n'), ((9916, 9955), 'os.makedirs', 'os.makedirs', (['invalid_dir'], {'exist_ok': '(True)'}), '(invalid_dir, exist_ok=True)\n', (9927, 9955), False, 'import os\n'), ((1373, 1388), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (1382, 1388), False, 'import os\n'), ((1643, 1658), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (1652, 1658), False, 'import os\n'), ((10019, 10055), 'os.path.join', 'os.path.join', (['xml_dir', "(tile_id + '*')"], {}), "(xml_dir, tile_id + '*')\n", (10031, 10055), False, 'import os\n'), ((10085, 10115), 'os.path.basename', 'os.path.basename', (['invalid_file'], {}), '(invalid_file)\n', (10101, 10115), False, 'import os\n'), ((11810, 11832), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (11826, 11832), False, 'import os\n'), ((9629, 9664), 'os.path.basename', 'os.path.basename', (['validation_report'], {}), '(validation_report)\n', (9645, 9664), False, 'import os\n'), ((10156, 10191), 'os.path.join', 'os.path.join', (['invalid_dir', 'basename'], {}), '(invalid_dir, basename)\n', (10168, 10191), False, 'import os\n'), ((7697, 7732), 'os.path.basename', 'os.path.basename', (['validation_report'], {}), '(validation_report)\n', (7713, 7732), False, 'import os\n'), ((18098, 18120), 'numpy.array', 'np.array', (['[xmin, ymin]'], {}), '([xmin, ymin])\n', (18106, 18120), True, 'import numpy as np\n'), ((18191, 18213), 'numpy.array', 'np.array', (['[xmax, ymax]'], {}), '([xmax, ymax])\n', (18199, 18213), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
####################
def merge_dicts(list_of_dicts):
results = {}
for d in list_of_dicts:
for key in d.keys():
if key in results.keys():
results[key].append(d[key])
else:
results[key] = [d[key]]
return results
####################
comp_pJ = 22. * 1e-12 / 32. / 16.
num_layers = 6
num_comparator = 8
results = np.load('results.npy', allow_pickle=True).item()
y_mean = np.zeros(shape=(2, 2, 2, 2, num_layers))
y_std = np.zeros(shape=(2, 2, 2, 2, num_layers))
y_mac_per_cycle = np.zeros(shape=(2, 2, 2, 2, num_layers))
y_mac_per_pJ = np.zeros(shape=(2, 2, 2, 2, num_layers))
cycle = np.zeros(shape=(2, 2, 2, 2, num_layers))
nmac = np.zeros(shape=(2, 2, 2, 2, num_layers))
array = np.zeros(shape=(2, 2, 2, 2, num_layers))
y_ron = np.zeros(shape=(2, 2, 2, 2, num_layers))
y_roff = np.zeros(shape=(2, 2, 2, 2, num_layers))
y_adc = np.zeros(shape=(2, 2, 2, 2, num_layers, num_comparator))
y_energy = np.zeros(shape=(2, 2, 2, 2, num_layers))
array_util = np.zeros(shape=(2, 2, 2, 2, num_layers))
for key in sorted(results.keys()):
(skip, cards, alloc, profile) = key
alloc = 1 if alloc == 'block' else 0
layer_results = results[key]
max_cycle = 0
for layer in range(num_layers):
rdict = merge_dicts(layer_results[layer])
############################
y_mean[skip][cards][alloc][profile][layer] = np.mean(rdict['mean'])
y_std[skip][cards][alloc][profile][layer] = np.mean(rdict['std'])
############################
y_ron[skip][cards][alloc][profile][layer] = np.sum(rdict['ron'])
y_roff[skip][cards][alloc][profile][layer] = np.sum(rdict['roff'])
y_adc[skip][cards][alloc][profile][layer] = np.sum(rdict['adc'], axis=0)
y_energy[skip][cards][alloc][profile][layer] += y_ron[skip][cards][alloc][profile][layer] * 2e-16
y_energy[skip][cards][alloc][profile][layer] += y_roff[skip][cards][alloc][profile][layer] * 2e-16
y_energy[skip][cards][alloc][profile][layer] += np.sum(y_adc[skip][cards][alloc][profile][layer] * np.array([1,2,3,4,5,6,7,8]) * comp_pJ)
y_mac_per_cycle[skip][cards][alloc][profile][layer] = np.sum(rdict['nmac']) / np.sum(rdict['cycle'])
y_mac_per_pJ[skip][cards][alloc][profile][layer] = np.sum(rdict['nmac']) / 1e12 / np.sum(y_energy[skip][cards][alloc][profile][layer])
############################
cycle[skip][cards][alloc][profile][layer] = np.mean(rdict['cycle'])
nmac[skip][cards][alloc][profile][layer] = np.mean(rdict['nmac'])
array[skip][cards][alloc][profile][layer] = np.mean(rdict['array'])
############################
max_cycle = max(max_cycle, np.mean(rdict['cycle']))
############################
for layer in range(num_layers):
rdict = merge_dicts(layer_results[layer])
############################
y_cycle = np.mean(rdict['cycle'])
y_stall = np.mean(rdict['stall'])
y_array = np.mean(rdict['array'])
array_util[skip][cards][alloc][profile][layer] = (y_array * y_cycle - y_stall) / (y_array * max_cycle)
############################
####################
layers = np.array(range(1, 6+1))
skip_none = int(np.max(cycle[1, 0, 0, 0]))
skip_layer = int(np.max(cycle[1, 0, 0, 1]))
skip_block = int(np.max(cycle[1, 0, 1, 1]))
cards_none = int(np.max(cycle[1, 1, 0, 0]))
cards_layer = int(np.max(cycle[1, 1, 0, 1]))
cards_block = int(np.max(cycle[1, 1, 1, 1]))
height = [skip_none, skip_layer, skip_block, cards_none, cards_layer, cards_block]
x = ['skip/none', 'skip/layer', 'skip/block', 'cards/none', 'cards/layer', 'cards/block']
####################
plt.rcParams.update({'font.size': 12})
####################
plt.cla()
plt.clf()
plt.close()
plt.ylabel('# Cycles')
# plt.xlabel('Method')
plt.xticks(range(len(x)), x, rotation=45)
width = 0.2
plt.bar(x=x, height=height, width=width)
ax = plt.gca()
for i, h in enumerate(height):
# print (i, h)
ax.text(i - width, h + np.min(height)*0.02, str(h), fontdict={'size': 12})
fig = plt.gcf()
fig.set_size_inches(9, 5)
plt.tight_layout()
fig.savefig('cycles.png', dpi=300)
####################
|
[
"numpy.mean",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.clf",
"numpy.max",
"matplotlib.pyplot.close",
"matplotlib.pyplot.rcParams.update",
"numpy.zeros",
"matplotlib.pyplot.bar",
"numpy.sum",
"numpy.array",
"matplotlib.pyplot.tight_layout",
"numpy.min",
"numpy.load",
"matplotlib.pyplot.cla"
] |
[((502, 542), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2, 2, 2, 2, num_layers)'}), '(shape=(2, 2, 2, 2, num_layers))\n', (510, 542), True, 'import numpy as np\n'), ((551, 591), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2, 2, 2, 2, num_layers)'}), '(shape=(2, 2, 2, 2, num_layers))\n', (559, 591), True, 'import numpy as np\n'), ((611, 651), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2, 2, 2, 2, num_layers)'}), '(shape=(2, 2, 2, 2, num_layers))\n', (619, 651), True, 'import numpy as np\n'), ((667, 707), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2, 2, 2, 2, num_layers)'}), '(shape=(2, 2, 2, 2, num_layers))\n', (675, 707), True, 'import numpy as np\n'), ((717, 757), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2, 2, 2, 2, num_layers)'}), '(shape=(2, 2, 2, 2, num_layers))\n', (725, 757), True, 'import numpy as np\n'), ((765, 805), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2, 2, 2, 2, num_layers)'}), '(shape=(2, 2, 2, 2, num_layers))\n', (773, 805), True, 'import numpy as np\n'), ((814, 854), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2, 2, 2, 2, num_layers)'}), '(shape=(2, 2, 2, 2, num_layers))\n', (822, 854), True, 'import numpy as np\n'), ((864, 904), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2, 2, 2, 2, num_layers)'}), '(shape=(2, 2, 2, 2, num_layers))\n', (872, 904), True, 'import numpy as np\n'), ((914, 954), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2, 2, 2, 2, num_layers)'}), '(shape=(2, 2, 2, 2, num_layers))\n', (922, 954), True, 'import numpy as np\n'), ((963, 1019), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2, 2, 2, 2, num_layers, num_comparator)'}), '(shape=(2, 2, 2, 2, num_layers, num_comparator))\n', (971, 1019), True, 'import numpy as np\n'), ((1031, 1071), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2, 2, 2, 2, num_layers)'}), '(shape=(2, 2, 2, 2, num_layers))\n', (1039, 1071), True, 'import numpy as np\n'), ((1086, 1126), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2, 2, 2, 2, num_layers)'}), '(shape=(2, 2, 2, 2, num_layers))\n', (1094, 1126), True, 'import numpy as np\n'), ((3861, 3899), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 12}"], {}), "({'font.size': 12})\n", (3880, 3899), True, 'import matplotlib.pyplot as plt\n'), ((3923, 3932), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (3930, 3932), True, 'import matplotlib.pyplot as plt\n'), ((3933, 3942), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3940, 3942), True, 'import matplotlib.pyplot as plt\n'), ((3943, 3954), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3952, 3954), True, 'import matplotlib.pyplot as plt\n'), ((3956, 3978), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""# Cycles"""'], {}), "('# Cycles')\n", (3966, 3978), True, 'import matplotlib.pyplot as plt\n'), ((4058, 4098), 'matplotlib.pyplot.bar', 'plt.bar', ([], {'x': 'x', 'height': 'height', 'width': 'width'}), '(x=x, height=height, width=width)\n', (4065, 4098), True, 'import matplotlib.pyplot as plt\n'), ((4105, 4114), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4112, 4114), True, 'import matplotlib.pyplot as plt\n'), ((4251, 4260), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (4258, 4260), True, 'import matplotlib.pyplot as plt\n'), ((4287, 4305), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4303, 4305), True, 'import matplotlib.pyplot as plt\n'), ((3413, 3438), 'numpy.max', 'np.max', (['cycle[1, 0, 0, 0]'], {}), '(cycle[1, 0, 0, 0])\n', (3419, 3438), True, 'import numpy as np\n'), ((3457, 3482), 'numpy.max', 'np.max', (['cycle[1, 0, 0, 1]'], {}), '(cycle[1, 0, 0, 1])\n', (3463, 3482), True, 'import numpy as np\n'), ((3501, 3526), 'numpy.max', 'np.max', (['cycle[1, 0, 1, 1]'], {}), '(cycle[1, 0, 1, 1])\n', (3507, 3526), True, 'import numpy as np\n'), ((3547, 3572), 'numpy.max', 'np.max', (['cycle[1, 1, 0, 0]'], {}), '(cycle[1, 1, 0, 0])\n', (3553, 3572), True, 'import numpy as np\n'), ((3592, 3617), 'numpy.max', 'np.max', (['cycle[1, 1, 0, 1]'], {}), '(cycle[1, 1, 0, 1])\n', (3598, 3617), True, 'import numpy as np\n'), ((3637, 3662), 'numpy.max', 'np.max', (['cycle[1, 1, 1, 1]'], {}), '(cycle[1, 1, 1, 1])\n', (3643, 3662), True, 'import numpy as np\n'), ((443, 484), 'numpy.load', 'np.load', (['"""results.npy"""'], {'allow_pickle': '(True)'}), "('results.npy', allow_pickle=True)\n", (450, 484), True, 'import numpy as np\n'), ((1495, 1517), 'numpy.mean', 'np.mean', (["rdict['mean']"], {}), "(rdict['mean'])\n", (1502, 1517), True, 'import numpy as np\n'), ((1570, 1591), 'numpy.mean', 'np.mean', (["rdict['std']"], {}), "(rdict['std'])\n", (1577, 1591), True, 'import numpy as np\n'), ((1691, 1711), 'numpy.sum', 'np.sum', (["rdict['ron']"], {}), "(rdict['ron'])\n", (1697, 1711), True, 'import numpy as np\n'), ((1765, 1786), 'numpy.sum', 'np.sum', (["rdict['roff']"], {}), "(rdict['roff'])\n", (1771, 1786), True, 'import numpy as np\n'), ((1839, 1867), 'numpy.sum', 'np.sum', (["rdict['adc']"], {'axis': '(0)'}), "(rdict['adc'], axis=0)\n", (1845, 1867), True, 'import numpy as np\n'), ((2588, 2611), 'numpy.mean', 'np.mean', (["rdict['cycle']"], {}), "(rdict['cycle'])\n", (2595, 2611), True, 'import numpy as np\n'), ((2663, 2685), 'numpy.mean', 'np.mean', (["rdict['nmac']"], {}), "(rdict['nmac'])\n", (2670, 2685), True, 'import numpy as np\n'), ((2738, 2761), 'numpy.mean', 'np.mean', (["rdict['array']"], {}), "(rdict['array'])\n", (2745, 2761), True, 'import numpy as np\n'), ((3068, 3091), 'numpy.mean', 'np.mean', (["rdict['cycle']"], {}), "(rdict['cycle'])\n", (3075, 3091), True, 'import numpy as np\n'), ((3110, 3133), 'numpy.mean', 'np.mean', (["rdict['stall']"], {}), "(rdict['stall'])\n", (3117, 3133), True, 'import numpy as np\n'), ((3152, 3175), 'numpy.mean', 'np.mean', (["rdict['array']"], {}), "(rdict['array'])\n", (3159, 3175), True, 'import numpy as np\n'), ((2291, 2312), 'numpy.sum', 'np.sum', (["rdict['nmac']"], {}), "(rdict['nmac'])\n", (2297, 2312), True, 'import numpy as np\n'), ((2315, 2337), 'numpy.sum', 'np.sum', (["rdict['cycle']"], {}), "(rdict['cycle'])\n", (2321, 2337), True, 'import numpy as np\n'), ((2428, 2480), 'numpy.sum', 'np.sum', (['y_energy[skip][cards][alloc][profile][layer]'], {}), '(y_energy[skip][cards][alloc][profile][layer])\n', (2434, 2480), True, 'import numpy as np\n'), ((2844, 2867), 'numpy.mean', 'np.mean', (["rdict['cycle']"], {}), "(rdict['cycle'])\n", (2851, 2867), True, 'import numpy as np\n'), ((2397, 2418), 'numpy.sum', 'np.sum', (["rdict['nmac']"], {}), "(rdict['nmac'])\n", (2403, 2418), True, 'import numpy as np\n'), ((4192, 4206), 'numpy.min', 'np.min', (['height'], {}), '(height)\n', (4198, 4206), True, 'import numpy as np\n'), ((2188, 2222), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7, 8]'], {}), '([1, 2, 3, 4, 5, 6, 7, 8])\n', (2196, 2222), True, 'import numpy as np\n')]
|
#!/usr/bin/python
#
# Copyright 2020 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests the open source construction environments."""
from absl import flags
from absl.testing import absltest
from absl.testing import parameterized
import dm_construction
import numpy as np
FLAGS = flags.FLAGS
flags.DEFINE_string("backend", "docker", "")
def _make_random_action(action_spec, observation):
"""Makes a random action given an action spec and observation."""
# Sample the random action.
action = {}
for name, spec in action_spec.items():
if name == "Index":
value = np.random.randint(observation["n_edge"])
elif spec.dtype in (np.int32, np.int64, int):
value = np.random.randint(spec.minimum, spec.maximum + 1)
else:
value = np.random.uniform(spec.minimum, spec.maximum)
action[name] = value
return action
def _random_unroll(env, seed=1234, num_steps=10, difficulty=5,
random_choice_before_reset=False):
"""Take random actions in the given environment."""
np.random.seed(seed)
action_spec = env.action_spec()
if random_choice_before_reset:
np.random.choice([8], p=[1.])
timestep = env.reset(difficulty=difficulty)
trajectory = [timestep]
actions = [None]
for _ in range(num_steps):
if timestep.last():
if random_choice_before_reset:
np.random.choice([8], p=[1.])
timestep = env.reset(difficulty=difficulty)
action = _make_random_action(action_spec, timestep.observation)
timestep = env.step(action)
trajectory.append(timestep)
actions.append(action)
return trajectory, actions
class TestEnvironments(parameterized.TestCase):
def _make_environment(
self, problem_type, curriculum_sample, wrapper_type, backend_type=None):
"""Make the new version of the construction task."""
if backend_type is None:
backend_type = FLAGS.backend
return dm_construction.get_environment(
problem_type,
unity_environment=self._unity_envs[backend_type],
wrapper_type=wrapper_type,
curriculum_sample=curriculum_sample)
@classmethod
def setUpClass(cls):
super(TestEnvironments, cls).setUpClass()
# Construct the unity environment.
cls._unity_envs = {
"docker": dm_construction.get_unity_environment("docker"),
}
@classmethod
def tearDownClass(cls):
super(TestEnvironments, cls).tearDownClass()
for env in cls._unity_envs.values():
env.close()
@parameterized.named_parameters(
("covering", "covering"),
("covering_hard", "covering_hard"),
("connecting", "connecting"),
("silhouette", "silhouette"),
("marble_run", "marble_run"))
def test_discrete_relative_environments_curriculum_sample(self, name):
"""Smoke test for discrete relative wrapper with curriculum_sample=True."""
env = self._make_environment(name, True, "discrete_relative")
_random_unroll(env, difficulty=env.core_env.max_difficulty)
@parameterized.named_parameters(
("covering", "covering"),
("covering_hard", "covering_hard"),
("connecting", "connecting"),
("silhouette", "silhouette"),
("marble_run", "marble_run"))
def test_continuous_absolute_environments_curriculum_sample(self, name):
"""Smoke test for continuous absolute wrapper w/ curriculum_sample=True."""
env = self._make_environment(name, True, "continuous_absolute")
_random_unroll(env, difficulty=env.core_env.max_difficulty)
@parameterized.named_parameters(
("connecting_additional_layer", "connecting", "additional_layer"),
("connecting_mixed_height_targets", "connecting", "mixed_height_targets"),
("silhouette_double_the_targets", "silhouette", "double_the_targets"),)
def test_generalization_modes(self, name, generalization_mode):
"""Smoke test for discrete relative wrapper with curriculum_sample=True."""
env = self._make_environment(name, False, "discrete_relative")
_random_unroll(env, difficulty=generalization_mode)
if __name__ == "__main__":
absltest.main()
|
[
"dm_construction.get_unity_environment",
"numpy.random.choice",
"absl.testing.parameterized.named_parameters",
"absl.testing.absltest.main",
"dm_construction.get_environment",
"numpy.random.randint",
"numpy.random.seed",
"numpy.random.uniform",
"absl.flags.DEFINE_string"
] |
[((850, 894), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""backend"""', '"""docker"""', '""""""'], {}), "('backend', 'docker', '')\n", (869, 894), False, 'from absl import flags\n'), ((1580, 1600), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1594, 1600), True, 'import numpy as np\n'), ((3014, 3204), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('covering', 'covering')", "('covering_hard', 'covering_hard')", "('connecting', 'connecting')", "('silhouette', 'silhouette')", "('marble_run', 'marble_run')"], {}), "(('covering', 'covering'), ('covering_hard',\n 'covering_hard'), ('connecting', 'connecting'), ('silhouette',\n 'silhouette'), ('marble_run', 'marble_run'))\n", (3044, 3204), False, 'from absl.testing import parameterized\n'), ((3515, 3705), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('covering', 'covering')", "('covering_hard', 'covering_hard')", "('connecting', 'connecting')", "('silhouette', 'silhouette')", "('marble_run', 'marble_run')"], {}), "(('covering', 'covering'), ('covering_hard',\n 'covering_hard'), ('connecting', 'connecting'), ('silhouette',\n 'silhouette'), ('marble_run', 'marble_run'))\n", (3545, 3705), False, 'from absl.testing import parameterized\n'), ((4020, 4275), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('connecting_additional_layer', 'connecting', 'additional_layer')", "('connecting_mixed_height_targets', 'connecting', 'mixed_height_targets')", "('silhouette_double_the_targets', 'silhouette', 'double_the_targets')"], {}), "(('connecting_additional_layer', 'connecting',\n 'additional_layer'), ('connecting_mixed_height_targets', 'connecting',\n 'mixed_height_targets'), ('silhouette_double_the_targets', 'silhouette',\n 'double_the_targets'))\n", (4050, 4275), False, 'from absl.testing import parameterized\n'), ((4584, 4599), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (4597, 4599), False, 'from absl.testing import absltest\n'), ((1672, 1702), 'numpy.random.choice', 'np.random.choice', (['[8]'], {'p': '[1.0]'}), '([8], p=[1.0])\n', (1688, 1702), True, 'import numpy as np\n'), ((2446, 2615), 'dm_construction.get_environment', 'dm_construction.get_environment', (['problem_type'], {'unity_environment': 'self._unity_envs[backend_type]', 'wrapper_type': 'wrapper_type', 'curriculum_sample': 'curriculum_sample'}), '(problem_type, unity_environment=self.\n _unity_envs[backend_type], wrapper_type=wrapper_type, curriculum_sample\n =curriculum_sample)\n', (2477, 2615), False, 'import dm_construction\n'), ((1139, 1179), 'numpy.random.randint', 'np.random.randint', (["observation['n_edge']"], {}), "(observation['n_edge'])\n", (1156, 1179), True, 'import numpy as np\n'), ((2805, 2852), 'dm_construction.get_unity_environment', 'dm_construction.get_unity_environment', (['"""docker"""'], {}), "('docker')\n", (2842, 2852), False, 'import dm_construction\n'), ((1244, 1293), 'numpy.random.randint', 'np.random.randint', (['spec.minimum', '(spec.maximum + 1)'], {}), '(spec.minimum, spec.maximum + 1)\n', (1261, 1293), True, 'import numpy as np\n'), ((1318, 1363), 'numpy.random.uniform', 'np.random.uniform', (['spec.minimum', 'spec.maximum'], {}), '(spec.minimum, spec.maximum)\n', (1335, 1363), True, 'import numpy as np\n'), ((1891, 1921), 'numpy.random.choice', 'np.random.choice', (['[8]'], {'p': '[1.0]'}), '([8], p=[1.0])\n', (1907, 1921), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 11 13:30:53 2017
@author: laoj
"""
import numpy as np
import pymc3 as pm
import theano.tensor as tt
from pymc3.distributions.distribution import Discrete, draw_values, generate_samples, infer_shape
from pymc3.distributions.dist_math import bound, logpow, factln, Cholesky
from pymc3.math import tround
#%% n scaler, p 1D
#n = 183
n = np.array([[106],
[143],
[102],
[116],
[183],
[150]])
p = np.array([[ 0.21245365, 0.41223126, 0.37531509],
[ 0.13221011, 0.50537169, 0.3624182 ],
[ 0.08813779, 0.54447146, 0.36739075],
[ 0.18932804, 0.4630365, 0.34763546],
[ 0.11006472, 0.49227755, 0.39765773],
[ 0.17886852, 0.41098834, 0.41014314]])
# p = np.array([ 0.21245365, 0.41223126, 0.37531509])
n = tt.as_tensor_variable(n)
p = tt.as_tensor_variable(p)
n = np.squeeze(n)
n = tt.shape_padright(n) if n.ndim == 1 else tt.as_tensor_variable(n)
n.ndim
n * p
#%%
n = np.array([[106],
[143],
[102],
[116],
[183],
[150]])
#n = 183
p = np.array([[ 0.21245365, 0.41223126, 0.37531509],
[ 0.13221011, 0.50537169, 0.3624182 ],
[ 0.08813779, 0.54447146, 0.36739075],
[ 0.18932804, 0.4630365, 0.34763546],
[ 0.11006472, 0.49227755, 0.39765773],
[ 0.17886852, 0.41098834, 0.41014314]])
#p = np.array([[ 0.21245365, 0.41223126, 0.37531509]])
#n = tt.as_tensor_variable(n)
p = tt.as_tensor_variable(p)
#%%
class Multinomial(Discrete):
def __init__(self, n, p, *args, **kwargs):
super(Multinomial, self).__init__(*args, **kwargs)
p = p / tt.sum(p, axis=-1, keepdims=True)
n = np.squeeze(n) # works also if n is a tensor
if len(self.shape) > 1:
m = self.shape[-2]
try:
assert n.shape == (m,)
except (AttributeError, AssertionError):
n = n * tt.ones(m)
self.n = tt.shape_padright(n)
self.p = p if p.ndim > 1 else tt.shape_padleft(p)
elif n.ndim == 1:
self.n = tt.shape_padright(n)
self.p = p if p.ndim > 1 else tt.shape_padleft(p)
else:
# n is a scalar, p is a 1d array
self.n = tt.as_tensor_variable(n)
self.p = tt.as_tensor_variable(p)
self.mean = self.n * self.p
mode = tt.cast(tt.round(self.mean), 'int32')
diff = self.n - tt.sum(mode, axis=-1, keepdims=True)
inc_bool_arr = tt.abs_(diff) > 0
mode = tt.inc_subtensor(mode[inc_bool_arr.nonzero()],
diff[inc_bool_arr.nonzero()])
self.mode = mode
def _random(self, n, p, size=None):
original_dtype = p.dtype
# Set float type to float64 for numpy. This change is related to numpy issue #8317 (https://github.com/numpy/numpy/issues/8317)
p = p.astype('float64')
# Now, re-normalize all of the values in float64 precision. This is done inside the conditionals
if size == p.shape:
size = None
if (p.ndim == 1) and (n.ndim == 0):
p = p / p.sum()
randnum = np.random.multinomial(n, p.squeeze(), size=size)
else:
p = p / p.sum(axis=1, keepdims=True)
if n.shape[0] > p.shape[0]:
randnum = np.asarray([
np.random.multinomial(nn, p.squeeze(), size=size)
for nn in n
])
elif n.shape[0] < p.shape[0]:
randnum = np.asarray([
np.random.multinomial(n.squeeze(), pp, size=size)
for pp in p
])
else:
randnum = np.asarray([
np.random.multinomial(nn, pp, size=size)
for (nn, pp) in zip(n, p)
])
return randnum.astype(original_dtype)
def random(self, point=None, size=None):
n, p = draw_values([self.n, self.p], point=point)
samples = generate_samples(self._random, n, p,
dist_shape=self.shape,
size=size)
return samples
def logp(self, x):
n = self.n
p = self.p
return bound(
tt.sum(factln(n)) - tt.sum(factln(x)) + tt.sum(x * tt.log(p)),
tt.all(x >= 0),
tt.all(tt.eq(tt.sum(x, axis=-1, keepdims=True), n)),
tt.all(p <= 1),
tt.all(tt.eq(tt.sum(p, axis=-1), 1)),
tt.all(tt.ge(n, 0)),
broadcast_conditions=False
)
Multinomial.dist(1,np.ones(3)/3,shape=(6, 3)).mode.eval()
#%%
Multinomial.dist(n,p,shape=(6, 3)).p.eval()
#%%
Multinomial.dist(n,p,shape=(6, 3)).n.eval()
#%%
Multinomial.dist(n,p,shape=(6, 3)).mean.eval()
#%%
Multinomial.dist(n,p,shape=(6, 3)).random()
#%%
counts =np.asarray([[19, 50, 37],
[21, 67, 55],
[11, 53, 38],
[17, 54, 45],
[24, 93, 66],
[27, 53, 70]])
Multinomial.dist(n,p,shape=(6, 3)).logp(x=counts).eval()
#%%
with pm.Model() as model:
like = Multinomial('obs_ABC', n, p, observed=counts, shape=counts.shape)
#%%
paramall = (
[[.25, .25, .25, .25], 4, 2],
[[.25, .25, .25, .25], (1, 4), 3],
# 3: expect to fail
# [[.25, .25, .25, .25], (10, 4)],
[[.25, .25, .25, .25], (10, 1, 4), 5],
# 5: expect to fail
# [[[.25, .25, .25, .25]], (2, 4), [7, 11]],
[[[.25, .25, .25, .25],
[.25, .25, .25, .25]], (2, 4), 13],
[[[.25, .25, .25, .25],
[.25, .25, .25, .25]], (2, 4), [17, 19]],
[[[.25, .25, .25, .25],
[.25, .25, .25, .25]], (1, 2, 4), [23, 29]],
[[[.25, .25, .25, .25],
[.25, .25, .25, .25]], (10, 2, 4), [31, 37]],
)
for p, shape, n in paramall:
with pm.Model() as model:
m = Multinomial('m', n=n, p=np.asarray(p), shape=shape)
print(m.random().shape)
#%%
counts =np.asarray([[19, 50, 37],
[21, 67, 55],
[11, 53, 38],
[17, 54, 45],
[24, 93, 66],
[27, 53, 70]])
n = np.array([[106],
[143],
[102],
[116],
[183],
[150]])
sparsity=1 #not zero
beta=np.ones(counts.shape) #input for dirichlet
with pm.Model() as model:
theta=pm.Dirichlet('theta',beta/sparsity, shape = counts.shape)
transition=pm.Multinomial('transition',n,theta,observed=counts)
trace=pm.sample(1000)
#%%
import numpy as np
import pymc3 as pm
import theano.tensor as tt
def norm_simplex(p):
"""Sum-to-zero transformation."""
return (p.T / p.sum(axis=-1)).T
def ccmodel(beta, x):
"""Community composition model."""
return norm_simplex(tt.exp(tt.dot(x, tt.log(beta))))
class DirichletMultinomial(pm.Discrete):
"""Dirichlet Multinomial Model
"""
def __init__(self, alpha, *args, **kwargs):
super(DirichletMultinomial, self).__init__(*args, **kwargs)
self.alpha = alpha
def logp(self, x):
alpha = self.alpha
n = tt.sum(x, axis=-1)
sum_alpha = tt.sum(alpha, axis=-1)
const = (tt.gammaln(n + 1) + tt.gammaln(sum_alpha)) - tt.gammaln(n + sum_alpha)
series = tt.gammaln(x + alpha) - (tt.gammaln(x + 1) + tt.gammaln(alpha))
result = const + tt.sum(series, axis=-1)
return result
def as_col(x):
if isinstance(x, tt.TensorVariable):
return x.dimshuffle(0, 'x')
else:
return np.asarray(x).reshape(-1, 1)
def as_row(x):
if isinstance(x, tt.TensorVariable):
return x.dimshuffle('x', 0)
else:
return np.asarray(x).reshape(1, -1)
n, k, r = 25, 10, 2
x = np.random.randint(0, 1000, size=(n, k))
y = np.random.randint(0, 1000, size=n)
design = np.vstack((np.ones(25), np.random.randint(2, size=n))).T
with pm.Model() as model:
# Community composition
pi = pm.Dirichlet('pi', np.ones(k), shape=(r, k))
comp = pm.Deterministic('comp', ccmodel(pi, design))
# Inferred population density of observed taxa (hierarchical model)
rho = pm.Normal('rho', shape=r)
tau = pm.Lognormal('tau')
dens = pm.Lognormal('dens', tt.dot(design, rho), tau=tau, shape=n)
# Community composition *with* the spike
expected_recovery = as_col(1 / dens)
_comp = norm_simplex(tt.concatenate((comp, expected_recovery), axis=1))
# Variability
mu = pm.Lognormal('mu')
# Data
obs = DirichletMultinomial('obs', _comp * mu,
observed=tt.concatenate((x, as_col(y)), axis=1))
pm.sample(1000)
|
[
"pymc3.distributions.dist_math.factln",
"theano.tensor.ones",
"theano.tensor.all",
"theano.tensor.abs_",
"numpy.array",
"pymc3.sample",
"pymc3.distributions.distribution.generate_samples",
"theano.tensor.dot",
"theano.tensor.shape_padleft",
"theano.tensor.log",
"numpy.asarray",
"numpy.random.multinomial",
"theano.tensor.round",
"pymc3.distributions.distribution.draw_values",
"theano.tensor.as_tensor_variable",
"theano.tensor.concatenate",
"numpy.ones",
"theano.tensor.sum",
"numpy.squeeze",
"theano.tensor.shape_padright",
"pymc3.Model",
"pymc3.Normal",
"pymc3.Lognormal",
"pymc3.Dirichlet",
"pymc3.Multinomial",
"numpy.random.randint",
"theano.tensor.ge",
"theano.tensor.gammaln"
] |
[((406, 458), 'numpy.array', 'np.array', (['[[106], [143], [102], [116], [183], [150]]'], {}), '([[106], [143], [102], [116], [183], [150]])\n', (414, 458), True, 'import numpy as np\n'), ((468, 719), 'numpy.array', 'np.array', (['[[0.21245365, 0.41223126, 0.37531509], [0.13221011, 0.50537169, 0.3624182],\n [0.08813779, 0.54447146, 0.36739075], [0.18932804, 0.4630365, \n 0.34763546], [0.11006472, 0.49227755, 0.39765773], [0.17886852, \n 0.41098834, 0.41014314]]'], {}), '([[0.21245365, 0.41223126, 0.37531509], [0.13221011, 0.50537169, \n 0.3624182], [0.08813779, 0.54447146, 0.36739075], [0.18932804, \n 0.4630365, 0.34763546], [0.11006472, 0.49227755, 0.39765773], [\n 0.17886852, 0.41098834, 0.41014314]])\n', (476, 719), True, 'import numpy as np\n'), ((790, 814), 'theano.tensor.as_tensor_variable', 'tt.as_tensor_variable', (['n'], {}), '(n)\n', (811, 814), True, 'import theano.tensor as tt\n'), ((819, 843), 'theano.tensor.as_tensor_variable', 'tt.as_tensor_variable', (['p'], {}), '(p)\n', (840, 843), True, 'import theano.tensor as tt\n'), ((848, 861), 'numpy.squeeze', 'np.squeeze', (['n'], {}), '(n)\n', (858, 861), True, 'import numpy as np\n'), ((953, 1005), 'numpy.array', 'np.array', (['[[106], [143], [102], [116], [183], [150]]'], {}), '([[106], [143], [102], [116], [183], [150]])\n', (961, 1005), True, 'import numpy as np\n'), ((1025, 1276), 'numpy.array', 'np.array', (['[[0.21245365, 0.41223126, 0.37531509], [0.13221011, 0.50537169, 0.3624182],\n [0.08813779, 0.54447146, 0.36739075], [0.18932804, 0.4630365, \n 0.34763546], [0.11006472, 0.49227755, 0.39765773], [0.17886852, \n 0.41098834, 0.41014314]]'], {}), '([[0.21245365, 0.41223126, 0.37531509], [0.13221011, 0.50537169, \n 0.3624182], [0.08813779, 0.54447146, 0.36739075], [0.18932804, \n 0.4630365, 0.34763546], [0.11006472, 0.49227755, 0.39765773], [\n 0.17886852, 0.41098834, 0.41014314]])\n', (1033, 1276), True, 'import numpy as np\n'), ((1378, 1402), 'theano.tensor.as_tensor_variable', 'tt.as_tensor_variable', (['p'], {}), '(p)\n', (1399, 1402), True, 'import theano.tensor as tt\n'), ((4786, 4886), 'numpy.asarray', 'np.asarray', (['[[19, 50, 37], [21, 67, 55], [11, 53, 38], [17, 54, 45], [24, 93, 66], [27,\n 53, 70]]'], {}), '([[19, 50, 37], [21, 67, 55], [11, 53, 38], [17, 54, 45], [24, 93,\n 66], [27, 53, 70]])\n', (4796, 4886), True, 'import numpy as np\n'), ((5894, 5994), 'numpy.asarray', 'np.asarray', (['[[19, 50, 37], [21, 67, 55], [11, 53, 38], [17, 54, 45], [24, 93, 66], [27,\n 53, 70]]'], {}), '([[19, 50, 37], [21, 67, 55], [11, 53, 38], [17, 54, 45], [24, 93,\n 66], [27, 53, 70]])\n', (5904, 5994), True, 'import numpy as np\n'), ((6040, 6092), 'numpy.array', 'np.array', (['[[106], [143], [102], [116], [183], [150]]'], {}), '([[106], [143], [102], [116], [183], [150]])\n', (6048, 6092), True, 'import numpy as np\n'), ((6124, 6145), 'numpy.ones', 'np.ones', (['counts.shape'], {}), '(counts.shape)\n', (6131, 6145), True, 'import numpy as np\n'), ((7556, 7595), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1000)'], {'size': '(n, k)'}), '(0, 1000, size=(n, k))\n', (7573, 7595), True, 'import numpy as np\n'), ((7600, 7634), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1000)'], {'size': 'n'}), '(0, 1000, size=n)\n', (7617, 7634), True, 'import numpy as np\n'), ((866, 886), 'theano.tensor.shape_padright', 'tt.shape_padright', (['n'], {}), '(n)\n', (883, 886), True, 'import theano.tensor as tt\n'), ((907, 931), 'theano.tensor.as_tensor_variable', 'tt.as_tensor_variable', (['n'], {}), '(n)\n', (928, 931), True, 'import theano.tensor as tt\n'), ((4994, 5004), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (5002, 5004), True, 'import pymc3 as pm\n'), ((6173, 6183), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (6181, 6183), True, 'import pymc3 as pm\n'), ((6204, 6262), 'pymc3.Dirichlet', 'pm.Dirichlet', (['"""theta"""', '(beta / sparsity)'], {'shape': 'counts.shape'}), "('theta', beta / sparsity, shape=counts.shape)\n", (6216, 6262), True, 'import pymc3 as pm\n'), ((6277, 6332), 'pymc3.Multinomial', 'pm.Multinomial', (['"""transition"""', 'n', 'theta'], {'observed': 'counts'}), "('transition', n, theta, observed=counts)\n", (6291, 6332), True, 'import pymc3 as pm\n'), ((6340, 6355), 'pymc3.sample', 'pm.sample', (['(1000)'], {}), '(1000)\n', (6349, 6355), True, 'import pymc3 as pm\n'), ((7707, 7717), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (7715, 7717), True, 'import pymc3 as pm\n'), ((7950, 7975), 'pymc3.Normal', 'pm.Normal', (['"""rho"""'], {'shape': 'r'}), "('rho', shape=r)\n", (7959, 7975), True, 'import pymc3 as pm\n'), ((7986, 8005), 'pymc3.Lognormal', 'pm.Lognormal', (['"""tau"""'], {}), "('tau')\n", (7998, 8005), True, 'import pymc3 as pm\n'), ((8270, 8288), 'pymc3.Lognormal', 'pm.Lognormal', (['"""mu"""'], {}), "('mu')\n", (8282, 8288), True, 'import pymc3 as pm\n'), ((8440, 8455), 'pymc3.sample', 'pm.sample', (['(1000)'], {}), '(1000)\n', (8449, 8455), True, 'import pymc3 as pm\n'), ((1607, 1620), 'numpy.squeeze', 'np.squeeze', (['n'], {}), '(n)\n', (1617, 1620), True, 'import numpy as np\n'), ((3882, 3924), 'pymc3.distributions.distribution.draw_values', 'draw_values', (['[self.n, self.p]'], {'point': 'point'}), '([self.n, self.p], point=point)\n', (3893, 3924), False, 'from pymc3.distributions.distribution import Discrete, draw_values, generate_samples, infer_shape\n'), ((3943, 4013), 'pymc3.distributions.distribution.generate_samples', 'generate_samples', (['self._random', 'n', 'p'], {'dist_shape': 'self.shape', 'size': 'size'}), '(self._random, n, p, dist_shape=self.shape, size=size)\n', (3959, 4013), False, 'from pymc3.distributions.distribution import Discrete, draw_values, generate_samples, infer_shape\n'), ((5769, 5779), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (5777, 5779), True, 'import pymc3 as pm\n'), ((6932, 6950), 'theano.tensor.sum', 'tt.sum', (['x'], {'axis': '(-1)'}), '(x, axis=-1)\n', (6938, 6950), True, 'import theano.tensor as tt\n'), ((6971, 6993), 'theano.tensor.sum', 'tt.sum', (['alpha'], {'axis': '(-1)'}), '(alpha, axis=-1)\n', (6977, 6993), True, 'import theano.tensor as tt\n'), ((7784, 7794), 'numpy.ones', 'np.ones', (['k'], {}), '(k)\n', (7791, 7794), True, 'import numpy as np\n'), ((8038, 8057), 'theano.tensor.dot', 'tt.dot', (['design', 'rho'], {}), '(design, rho)\n', (8044, 8057), True, 'import theano.tensor as tt\n'), ((8189, 8238), 'theano.tensor.concatenate', 'tt.concatenate', (['(comp, expected_recovery)'], {'axis': '(1)'}), '((comp, expected_recovery), axis=1)\n', (8203, 8238), True, 'import theano.tensor as tt\n'), ((1561, 1594), 'theano.tensor.sum', 'tt.sum', (['p'], {'axis': '(-1)', 'keepdims': '(True)'}), '(p, axis=-1, keepdims=True)\n', (1567, 1594), True, 'import theano.tensor as tt\n'), ((1880, 1900), 'theano.tensor.shape_padright', 'tt.shape_padright', (['n'], {}), '(n)\n', (1897, 1900), True, 'import theano.tensor as tt\n'), ((2304, 2323), 'theano.tensor.round', 'tt.round', (['self.mean'], {}), '(self.mean)\n', (2312, 2323), True, 'import theano.tensor as tt\n'), ((2358, 2394), 'theano.tensor.sum', 'tt.sum', (['mode'], {'axis': '(-1)', 'keepdims': '(True)'}), '(mode, axis=-1, keepdims=True)\n', (2364, 2394), True, 'import theano.tensor as tt\n'), ((2418, 2431), 'theano.tensor.abs_', 'tt.abs_', (['diff'], {}), '(diff)\n', (2425, 2431), True, 'import theano.tensor as tt\n'), ((4279, 4293), 'theano.tensor.all', 'tt.all', (['(x >= 0)'], {}), '(x >= 0)\n', (4285, 4293), True, 'import theano.tensor as tt\n'), ((4372, 4386), 'theano.tensor.all', 'tt.all', (['(p <= 1)'], {}), '(p <= 1)\n', (4378, 4386), True, 'import theano.tensor as tt\n'), ((7057, 7082), 'theano.tensor.gammaln', 'tt.gammaln', (['(n + sum_alpha)'], {}), '(n + sum_alpha)\n', (7067, 7082), True, 'import theano.tensor as tt\n'), ((7100, 7121), 'theano.tensor.gammaln', 'tt.gammaln', (['(x + alpha)'], {}), '(x + alpha)\n', (7110, 7121), True, 'import theano.tensor as tt\n'), ((7189, 7212), 'theano.tensor.sum', 'tt.sum', (['series'], {'axis': '(-1)'}), '(series, axis=-1)\n', (7195, 7212), True, 'import theano.tensor as tt\n'), ((7655, 7666), 'numpy.ones', 'np.ones', (['(25)'], {}), '(25)\n', (7662, 7666), True, 'import numpy as np\n'), ((7668, 7696), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': 'n'}), '(2, size=n)\n', (7685, 7696), True, 'import numpy as np\n'), ((1943, 1962), 'theano.tensor.shape_padleft', 'tt.shape_padleft', (['p'], {}), '(p)\n', (1959, 1962), True, 'import theano.tensor as tt\n'), ((2010, 2030), 'theano.tensor.shape_padright', 'tt.shape_padright', (['n'], {}), '(n)\n', (2027, 2030), True, 'import theano.tensor as tt\n'), ((2173, 2197), 'theano.tensor.as_tensor_variable', 'tt.as_tensor_variable', (['n'], {}), '(n)\n', (2194, 2197), True, 'import theano.tensor as tt\n'), ((2219, 2243), 'theano.tensor.as_tensor_variable', 'tt.as_tensor_variable', (['p'], {}), '(p)\n', (2240, 2243), True, 'import theano.tensor as tt\n'), ((4457, 4468), 'theano.tensor.ge', 'tt.ge', (['n', '(0)'], {}), '(n, 0)\n', (4462, 4468), True, 'import theano.tensor as tt\n'), ((5826, 5839), 'numpy.asarray', 'np.asarray', (['p'], {}), '(p)\n', (5836, 5839), True, 'import numpy as np\n'), ((6624, 6636), 'theano.tensor.log', 'tt.log', (['beta'], {}), '(beta)\n', (6630, 6636), True, 'import theano.tensor as tt\n'), ((7012, 7029), 'theano.tensor.gammaln', 'tt.gammaln', (['(n + 1)'], {}), '(n + 1)\n', (7022, 7029), True, 'import theano.tensor as tt\n'), ((7032, 7053), 'theano.tensor.gammaln', 'tt.gammaln', (['sum_alpha'], {}), '(sum_alpha)\n', (7042, 7053), True, 'import theano.tensor as tt\n'), ((7125, 7142), 'theano.tensor.gammaln', 'tt.gammaln', (['(x + 1)'], {}), '(x + 1)\n', (7135, 7142), True, 'import theano.tensor as tt\n'), ((7145, 7162), 'theano.tensor.gammaln', 'tt.gammaln', (['alpha'], {}), '(alpha)\n', (7155, 7162), True, 'import theano.tensor as tt\n'), ((7353, 7366), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (7363, 7366), True, 'import numpy as np\n'), ((7500, 7513), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (7510, 7513), True, 'import numpy as np\n'), ((2073, 2092), 'theano.tensor.shape_padleft', 'tt.shape_padleft', (['p'], {}), '(p)\n', (2089, 2092), True, 'import theano.tensor as tt\n'), ((4320, 4353), 'theano.tensor.sum', 'tt.sum', (['x'], {'axis': '(-1)', 'keepdims': '(True)'}), '(x, axis=-1, keepdims=True)\n', (4326, 4353), True, 'import theano.tensor as tt\n'), ((4413, 4431), 'theano.tensor.sum', 'tt.sum', (['p'], {'axis': '(-1)'}), '(p, axis=-1)\n', (4419, 4431), True, 'import theano.tensor as tt\n'), ((4540, 4550), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (4547, 4550), True, 'import numpy as np\n'), ((1848, 1858), 'theano.tensor.ones', 'tt.ones', (['m'], {}), '(m)\n', (1855, 1858), True, 'import theano.tensor as tt\n'), ((4211, 4220), 'pymc3.distributions.dist_math.factln', 'factln', (['n'], {}), '(n)\n', (4217, 4220), False, 'from pymc3.distributions.dist_math import bound, logpow, factln, Cholesky\n'), ((4231, 4240), 'pymc3.distributions.dist_math.factln', 'factln', (['x'], {}), '(x)\n', (4237, 4240), False, 'from pymc3.distributions.dist_math import bound, logpow, factln, Cholesky\n'), ((4255, 4264), 'theano.tensor.log', 'tt.log', (['p'], {}), '(p)\n', (4261, 4264), True, 'import theano.tensor as tt\n'), ((3669, 3709), 'numpy.random.multinomial', 'np.random.multinomial', (['nn', 'pp'], {'size': 'size'}), '(nn, pp, size=size)\n', (3690, 3709), True, 'import numpy as np\n')]
|
from typing import Callable, Optional, Sequence, Tuple, Union
import numpy
from dexp.processing.utils.nd_slice import nd_split_slices, remove_margin_slice
from dexp.processing.utils.normalise import Normalise
from dexp.utils import xpArray
from dexp.utils.backends import Backend
def scatter_gather_i2i(
function: Callable,
image: xpArray,
tiles: Union[int, Tuple[int, ...]],
margins: Optional[Union[int, Tuple[int, ...]]] = None,
normalise: bool = False,
clip: bool = False,
to_numpy: bool = True,
internal_dtype: Optional[numpy.dtype] = None,
) -> xpArray:
"""
Image-2-image scatter-gather.
'Scatters' computation of a given unary function by splitting the input array into tiles,
computing using a given backend, and reassembling the tiles into a single array of same
shape as the inpout that is either backed by the same backend than that of the input image,
or that is backed by numpy -- usefull when the compute backend cannot hold the whole input and output
images in memory.
Parameters
----------
function : unary function
image : input image (can be any backend, numpy )
tiles : tile sizes to cut input image into, can be a single integer or a tuple of integers.
margins : margins to add to each tile, can be a single integer or a tuple of integers.
if None, no margins are added.
normalise : normalises the input image.
clip : clip after normalisation/denormalisation
to_numpy : should the result be a numpy array? Very usefull when the compute backend
cannot hold the whole input and output images in memory.
internal_dtype : internal dtype for computation
Returns
-------
Result of applying the unary function to the input image, if to_numpy==True then the image is
"""
if internal_dtype is None:
internal_dtype = image.dtype
if type(tiles) == int:
tiles = (tiles,) * image.ndim
# If None is passed for a tile that means that we don't tile along that axis, we als clip the tile size:
tiles = tuple((length if tile is None else min(length, tile)) for tile, length in zip(tiles, image.shape))
if margins is None:
margins = (0,) * image.ndim
if type(margins) == int:
margins = (margins,) * image.ndim
if to_numpy:
result = numpy.empty(shape=image.shape, dtype=internal_dtype)
else:
result = Backend.get_xp_module(image).empty_like(image, dtype=internal_dtype)
# Normalise:
norm = Normalise(Backend.to_backend(image), do_normalise=normalise, clip=clip, quantile=0.005)
# image shape:
shape = image.shape
# We compute the slices objects to cut the input and target images into batches:
tile_slices = list(nd_split_slices(shape, chunks=tiles, margins=margins))
tile_slices_no_margins = list(nd_split_slices(shape, chunks=tiles))
# Zipping together slices with and without margins:
slices = zip(tile_slices, tile_slices_no_margins)
# Number of tiles:
number_of_tiles = len(tile_slices)
if number_of_tiles == 1:
# If there is only one tile, let's not be complicated about it:
result = norm.backward(function(norm.forward(image)))
if to_numpy:
result = Backend.to_numpy(result, dtype=internal_dtype)
else:
result = Backend.to_backend(result, dtype=internal_dtype)
else:
_scatter_gather_loop(
norm.backward, function, image, internal_dtype, norm.forward, result, shape, slices, to_numpy
)
return result
def _scatter_gather_loop(
denorm_fun: Callable,
function: Callable,
image: xpArray,
internal_dtype: numpy.dtype,
norm_fun: Callable,
result: Callable,
shape: Tuple[int, ...],
slices: Sequence[Tuple[slice, ...]],
to_numpy: bool,
) -> None:
for tile_slice, tile_slice_no_margins in slices:
image_tile = image[tile_slice]
image_tile = Backend.to_backend(image_tile, dtype=internal_dtype)
image_tile = denorm_fun(function(norm_fun(image_tile)))
if to_numpy:
image_tile = Backend.to_numpy(image_tile, dtype=internal_dtype)
else:
image_tile = Backend.to_backend(image_tile, dtype=internal_dtype)
remove_margin_slice_tuple = remove_margin_slice(shape, tile_slice, tile_slice_no_margins)
image_tile = image_tile[remove_margin_slice_tuple]
result[tile_slice_no_margins] = image_tile
# Dask turned out not too work great here, HUGE overhead compared to the light approach above.
# def scatter_gather_dask(backend: Backend,
# function,
# image,
# chunks,
# margins=None):
# boundary=None
# trim=True
# align_arrays=True
#
# image_d = from_array(image, chunks=chunks, asarray=False)
#
# def function_numpy(_image):
# print(_image.shape)
# return backend.to_numpy(function(_image))
#
# #func, *args, depth=None, boundary=None, trim=True, align_arrays=True, **kwargs
# computation= map_overlap(function_numpy,
# image_d,
# depth=margins,
# boundary=boundary,
# trim=trim,
# align_arrays=align_arrays,
# dtype=image.dtype
# )
#
# #computation.visualize(filename='transpose.png')
# result = computation.compute()
#
# return result
|
[
"dexp.processing.utils.nd_slice.nd_split_slices",
"dexp.utils.backends.Backend.get_xp_module",
"dexp.utils.backends.Backend.to_backend",
"dexp.processing.utils.nd_slice.remove_margin_slice",
"dexp.utils.backends.Backend.to_numpy",
"numpy.empty"
] |
[((2346, 2398), 'numpy.empty', 'numpy.empty', ([], {'shape': 'image.shape', 'dtype': 'internal_dtype'}), '(shape=image.shape, dtype=internal_dtype)\n', (2357, 2398), False, 'import numpy\n'), ((2534, 2559), 'dexp.utils.backends.Backend.to_backend', 'Backend.to_backend', (['image'], {}), '(image)\n', (2552, 2559), False, 'from dexp.utils.backends import Backend\n'), ((2765, 2818), 'dexp.processing.utils.nd_slice.nd_split_slices', 'nd_split_slices', (['shape'], {'chunks': 'tiles', 'margins': 'margins'}), '(shape, chunks=tiles, margins=margins)\n', (2780, 2818), False, 'from dexp.processing.utils.nd_slice import nd_split_slices, remove_margin_slice\n'), ((2854, 2890), 'dexp.processing.utils.nd_slice.nd_split_slices', 'nd_split_slices', (['shape'], {'chunks': 'tiles'}), '(shape, chunks=tiles)\n', (2869, 2890), False, 'from dexp.processing.utils.nd_slice import nd_split_slices, remove_margin_slice\n'), ((3969, 4021), 'dexp.utils.backends.Backend.to_backend', 'Backend.to_backend', (['image_tile'], {'dtype': 'internal_dtype'}), '(image_tile, dtype=internal_dtype)\n', (3987, 4021), False, 'from dexp.utils.backends import Backend\n'), ((4312, 4373), 'dexp.processing.utils.nd_slice.remove_margin_slice', 'remove_margin_slice', (['shape', 'tile_slice', 'tile_slice_no_margins'], {}), '(shape, tile_slice, tile_slice_no_margins)\n', (4331, 4373), False, 'from dexp.processing.utils.nd_slice import nd_split_slices, remove_margin_slice\n'), ((3272, 3318), 'dexp.utils.backends.Backend.to_numpy', 'Backend.to_numpy', (['result'], {'dtype': 'internal_dtype'}), '(result, dtype=internal_dtype)\n', (3288, 3318), False, 'from dexp.utils.backends import Backend\n'), ((3354, 3402), 'dexp.utils.backends.Backend.to_backend', 'Backend.to_backend', (['result'], {'dtype': 'internal_dtype'}), '(result, dtype=internal_dtype)\n', (3372, 3402), False, 'from dexp.utils.backends import Backend\n'), ((4132, 4182), 'dexp.utils.backends.Backend.to_numpy', 'Backend.to_numpy', (['image_tile'], {'dtype': 'internal_dtype'}), '(image_tile, dtype=internal_dtype)\n', (4148, 4182), False, 'from dexp.utils.backends import Backend\n'), ((4222, 4274), 'dexp.utils.backends.Backend.to_backend', 'Backend.to_backend', (['image_tile'], {'dtype': 'internal_dtype'}), '(image_tile, dtype=internal_dtype)\n', (4240, 4274), False, 'from dexp.utils.backends import Backend\n'), ((2426, 2454), 'dexp.utils.backends.Backend.get_xp_module', 'Backend.get_xp_module', (['image'], {}), '(image)\n', (2447, 2454), False, 'from dexp.utils.backends import Backend\n')]
|
import unittest
import csv
import numpy as np
from viroconcom.fitting import Fit
def read_benchmark_dataset(path='tests/testfiles/1year_dataset_A.txt'):
"""
Reads a datasets provided for the environmental contour benchmark.
Parameters
----------
path : string
Path to dataset including the file name, defaults to 'examples/datasets/A.txt'
Returns
-------
x : ndarray of doubles
Observations of the environmental variable 1.
y : ndarray of doubles
Observations of the environmental variable 2.
x_label : str
Label of the environmantal variable 1.
y_label : str
Label of the environmental variable 2.
"""
x = list()
y = list()
x_label = None
y_label = None
with open(path, newline='') as csv_file:
reader = csv.reader(csv_file, delimiter=';')
idx = 0
for row in reader:
if idx == 0:
x_label = row[1][
1:] # Ignore first char (is a white space).
y_label = row[2][
1:] # Ignore first char (is a white space).
if idx > 0: # Ignore the header
x.append(float(row[1]))
y.append(float(row[2]))
idx = idx + 1
x = np.asarray(x)
y = np.asarray(y)
return (x, y, x_label, y_label)
class FittingTest(unittest.TestCase):
def test_2d_fit(self):
"""
2-d Fit with Weibull and Lognormal distribution.
"""
prng = np.random.RandomState(42)
# Draw 1000 samples from a Weibull distribution with shape=1.5 and scale=3,
# which represents significant wave height.
sample_1 = prng.weibull(1.5, 1000)*3
# Let the second sample, which represents spectral peak period increase
# with significant wave height and follow a Lognormal distribution with
# mean=2 and sigma=0.2
sample_2 = [0.1 + 1.5 * np.exp(0.2 * point) +
prng.lognormal(2, 0.2) for point in sample_1]
# Describe the distribution that should be fitted to the sample.
dist_description_0 = {'name': 'Weibull_3p',
'dependency': (None, None, None),
'width_of_intervals': 2}
dist_description_1 = {'name': 'Lognormal',
'dependency': (None, None, 0),
'functions': (None, None, 'exp3')}
# Compute the fit.
my_fit = Fit((sample_1, sample_2),
(dist_description_0, dist_description_1))
dist0 = my_fit.mul_var_dist.distributions[0]
dist1 = my_fit.mul_var_dist.distributions[1]
self.assertAlmostEqual(dist0.shape(0), 1.4165147571863412, places=5)
self.assertAlmostEqual(dist0.scale(0), 2.833833521811032, places=5)
self.assertAlmostEqual(dist0.loc(0), 0.07055663251419833, places=5)
self.assertAlmostEqual(dist1.shape(0), 0.17742685807554776 , places=5)
#self.assertAlmostEqual(dist1.scale, 7.1536437634240135+2.075539206642004e^{0.1515051024957754x}, places=5)
self.assertAlmostEqual(dist1.loc, None, places=5)
# Now use a 2-parameter Weibull distribution instead of 3-p distr.
dist_description_0 = {'name': 'Weibull_2p',
'dependency': (None, None, None),
'width_of_intervals': 2}
dist_description_1 = {'name': 'Lognormal',
'dependency': (None, None, 0),
'functions': (None, None, 'exp3')}
my_fit = Fit((sample_1, sample_2),
(dist_description_0, dist_description_1))
self.assertEqual(str(my_fit)[0:5], 'Fit()')
def test_2d_benchmark_case(self):
"""
Reproduces the baseline results presented in doi: 10.1115/OMAE2019-96523 .
"""
sample_hs, sample_tz, label_hs, label_tz = read_benchmark_dataset(
path='tests/testfiles/allyears_dataset_A.txt')
# Describe the distribution that should be fitted to the sample.
dist_description_0 = {'name': 'Weibull_3p',
'dependency': (None, None, None),
'width_of_intervals': 0.5}
dist_description_1 = {'name': 'Lognormal_SigmaMu',
'dependency': (0, None, 0),
'functions': ('exp3', None, 'power3')} # Shape, location, scale.
# Compute the fit.
my_fit = Fit((sample_hs, sample_tz),
(dist_description_0, dist_description_1))
# Evaluate the fitted parameters.
dist0 = my_fit.mul_var_dist.distributions[0]
dist1 = my_fit.mul_var_dist.distributions[1]
self.assertAlmostEqual(dist0.shape(0), 1.48, delta=0.02)
self.assertAlmostEqual(dist0.scale(0), 0.944, delta=0.01)
self.assertAlmostEqual(dist0.loc(0), 0.0981, delta=0.001)
self.assertAlmostEqual(dist1.shape.a, 0, delta=0.001)
self.assertAlmostEqual(dist1.shape.b, 0.308, delta=0.002)
self.assertAlmostEqual(dist1.shape.c, -0.250, delta=0.002)
self.assertAlmostEqual(dist1.scale.a, 1.47 , delta=0.02)
self.assertAlmostEqual(dist1.scale.b, 0.214, delta=0.002)
self.assertAlmostEqual(dist1.scale.c, 0.641, delta=0.002)
self.assertAlmostEqual(dist1.scale(0), 4.3 , delta=0.1)
self.assertAlmostEqual(dist1.scale(2), 6, delta=0.1)
self.assertAlmostEqual(dist1.scale(5), 8, delta=0.1)
def test_2d_exponentiated_wbl_fit(self):
"""
Tests if a 2D fit that includes an exp. Weibull distribution works.
"""
prng = np.random.RandomState(42)
# Draw 1000 samples from a Weibull distribution with shape=1.5 and scale=3,
# which represents significant wave height.
sample_hs = prng.weibull(1.5, 1000)*3
# Let the second sample, which represents zero-upcrossing period increase
# with significant wave height and follow a Lognormal distribution with
# mean=2 and sigma=0.2
sample_tz = [0.1 + 1.5 * np.exp(0.2 * point) +
prng.lognormal(2, 0.2) for point in sample_hs]
# Define the structure of the probabilistic model that will be fitted to the
# dataset.
dist_description_hs = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
# Shape, Location, Scale, Shape2
'width_of_intervals': 0.5}
dist_description_tz = {'name': 'Lognormal_SigmaMu',
'dependency': (0, None, 0),
# Shape, Location, Scale
'functions': ('exp3', None, 'power3')
# Shape, Location, Scale
}
# Fit the model to the data, first test a 1D fit.
fit = Fit(sample_hs, dist_description_hs)
# Now perform the 2D fit.
fit = Fit((sample_hs, sample_tz),
(dist_description_hs, dist_description_tz))
dist0 = fit.mul_var_dist.distributions[0]
self.assertGreater(dist0.shape(0), 1) # Should be about 1.5.
self.assertLess(dist0.shape(0), 2)
self.assertIsNone(dist0.loc(0)) # Has no location parameter, should be None.
self.assertGreater(dist0.scale(0), 2) # Should be about 3.
self.assertLess(dist0.scale(0), 4)
self.assertGreater(dist0.shape2(0), 0.5) # Should be about 1.
self.assertLess(dist0.shape2(0), 2)
def test_fit_lnsquare2(self):
"""
Tests a 2D fit that includes an logarithm square dependence function.
"""
sample_hs, sample_tz, label_hs, label_tz = read_benchmark_dataset()
# Define the structure of the probabilistic model that will be fitted to the
# dataset.
dist_description_hs = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
# Shape, Location, Scale, Shape2
'width_of_intervals': 0.5}
dist_description_tz = {'name': 'Lognormal_SigmaMu',
'dependency': (0, None, 0),
# Shape, Location, Scale
'functions': ('exp3', None, 'lnsquare2')
# Shape, Location, Scale
}
# Fit the model to the data.
fit = Fit((sample_hs, sample_tz),
(dist_description_hs, dist_description_tz))
# Check whether the logarithmic square fit worked correctly.
dist1 = fit.mul_var_dist.distributions[1]
self.assertGreater(dist1.scale.a, 1) # Should be about 1-5
self.assertLess(dist1.scale.a, 5) # Should be about 1-5
self.assertGreater(dist1.scale.b, 2) # Should be about 2-10
self.assertLess(dist1.scale.b, 10) # Should be about 2-10
self.assertGreater(dist1.scale(0), 0.1)
self.assertLess(dist1.scale(0), 10)
self.assertEqual(dist1.scale.func_name, 'lnsquare2')
def test_fit_powerdecrease3(self):
"""
Tests a 2D fit that includes an powerdecrease3 dependence function.
"""
sample_hs, sample_tz, label_hs, label_tz = read_benchmark_dataset()
# Define the structure of the probabilistic model that will be fitted to the
# dataset.
dist_description_hs = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
# Shape, Location, Scale, Shape2
'width_of_intervals': 0.5}
dist_description_tz = {'name': 'Lognormal_SigmaMu',
'dependency': (0, None, 0),
# Shape, Location, Scale
'functions': ('powerdecrease3', None, 'lnsquare2')
# Shape, Location, Scale
}
# Fit the model to the data.
fit = Fit((sample_hs, sample_tz),
(dist_description_hs, dist_description_tz))
# Check whether the logarithmic square fit worked correctly.
dist1 = fit.mul_var_dist.distributions[1]
self.assertGreater(dist1.shape.a, -0.1) # Should be about 0
self.assertLess(dist1.shape.a, 0.1) # Should be about 0
self.assertGreater(dist1.shape.b, 1.5) # Should be about 2-5
self.assertLess(dist1.shape.b, 6) # Should be about 2-10
self.assertGreater(dist1.shape.c, 0.8) # Should be about 1.1
self.assertLess(dist1.shape.c, 2) # Should be about 1.1
self.assertGreater(dist1.shape(0), 0.25) # Should be about 0.35
self.assertLess(dist1.shape(0), 0.4) # Should be about 0.35
self.assertEqual(dist1.shape.func_name, 'powerdecrease3')
def test_fit_asymdecrease3(self):
"""
Tests a 2D fit that includes an asymdecrease3 dependence function.
"""
sample_hs, sample_tz, label_hs, label_tz = read_benchmark_dataset()
# Define the structure of the probabilistic model that will be fitted to the
# dataset.
dist_description_hs = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
# Shape, Location, Scale, Shape2
'width_of_intervals': 0.5}
dist_description_tz = {'name': 'Lognormal_SigmaMu',
'dependency': (0, None, 0),
# Shape, Location, Scale
'functions': ('asymdecrease3', None, 'lnsquare2')
# Shape, Location, Scale
}
# Fit the model to the data.
fit = Fit((sample_hs, sample_tz),
(dist_description_hs, dist_description_tz))
# Check whether the logarithmic square fit worked correctly.
dist1 = fit.mul_var_dist.distributions[1]
self.assertAlmostEqual(dist1.shape.a, 0, delta=0.1) # Should be about 0
self.assertAlmostEqual(dist1.shape.b, 0.35, delta=0.4) # Should be about 0.35
self.assertAlmostEqual(np.abs(dist1.shape.c), 0.45, delta=0.2) # Should be about 0.45
self.assertAlmostEquals(dist1.shape(0), 0.35, delta=0.2) # Should be about 0.35
def test_min_number_datapoints_for_fit(self):
"""
Tests if the minimum number of datapoints required for a fit works.
"""
sample_hs, sample_tz, label_hs, label_tz = read_benchmark_dataset()
# Define the structure of the probabilistic model that will be fitted to the
# dataset.
dist_description_hs = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
# Shape, Location, Scale, Shape2
'width_of_intervals': 0.5}
dist_description_tz = {'name': 'Lognormal_SigmaMu',
'dependency': (0, None, 0),
# Shape, Location, Scale
'functions': ('exp3', None, 'lnsquare2'),
# Shape, Location, Scale
'min_datapoints_for_fit': 10
}
# Fit the model to the data.
fit = Fit((sample_hs, sample_tz),
(dist_description_hs, dist_description_tz))
# Check whether the logarithmic square fit worked correctly.
dist1 = fit.mul_var_dist.distributions[1]
a_min_10 = dist1.scale.a
# Now require more datapoints for a fit.
dist_description_tz = {'name': 'Lognormal_SigmaMu',
'dependency': (0, None, 0),
# Shape, Location, Scale
'functions': ('exp3', None, 'lnsquare2'),
# Shape, Location, Scale
'min_datapoints_for_fit': 500
}
# Fit the model to the data.
fit = Fit((sample_hs, sample_tz),
(dist_description_hs, dist_description_tz))
# Check whether the logarithmic square fit worked correctly.
dist1 = fit.mul_var_dist.distributions[1]
a_min_500 = dist1.scale.a
# Because in case 2 fewer bins have been used we should get different
# coefficients for the dependence function.
self.assertNotEqual(a_min_10, a_min_500)
def test_multi_processing(selfs):
"""
2-d Fit with multiprocessing (specified by setting a value for timeout)
"""
# Define a sample and a fit.
prng = np.random.RandomState(42)
sample_1 = prng.weibull(1.5, 1000)*3
sample_2 = [0.1 + 1.5 * np.exp(0.2 * point) +
prng.lognormal(2, 0.2) for point in sample_1]
dist_description_0 = {'name': 'Weibull',
'dependency': (None, None, None),
'width_of_intervals': 2}
dist_description_1 = {'name': 'Lognormal',
'dependency': (None, None, 0),
'functions': (None, None, 'exp3')}
# Compute the fit.
my_fit = Fit((sample_1, sample_2),
(dist_description_0, dist_description_1),
timeout=10)
def test_wbl_fit_with_negative_location(self):
"""
Tests fitting a translated Weibull distribution which would result
in a negative location parameter.
"""
sample_hs, sample_tz, label_hs, label_tz = read_benchmark_dataset()
# Define the structure of the probabilistic model that will be fitted to the
# dataset.
dist_description_hs = {'name': 'Weibull_3p',
'dependency': (None, None, None)}
# Fit the model to the data.
fit = Fit((sample_hs, ),
(dist_description_hs, ))
# Correct values for 10 years of data can be found in
# 10.1115/OMAE2019-96523 . Here we used 1 year of data.
dist0 = fit.mul_var_dist.distributions[0]
self.assertAlmostEqual(dist0.shape(0) / 10, 1.48 / 10, places=1)
self.assertGreater(dist0.loc(0), 0.0) # Should be 0.0981
self.assertLess(dist0.loc(0), 0.3) # Should be 0.0981
self.assertAlmostEqual(dist0.scale(0), 0.944, places=1)
# Shift the wave data with -1 m and fit again.
sample_hs = sample_hs - 2
# Negative location values will be set to zero instead and a
# warning will be raised.
with self.assertWarns(RuntimeWarning):
fit = Fit((sample_hs, ),
(dist_description_hs, ))
dist0 = fit.mul_var_dist.distributions[0]
self.assertAlmostEqual(dist0.shape(0) / 10, 1.48 / 10, places=1)
# Should be estimated to be 0.0981 - 2 and corrected to be 0.
self.assertEqual(dist0.loc(0), 0)
self.assertAlmostEqual(dist0.scale(0), 0.944, places=1)
def test_omae2020_wind_wave_model(self):
"""
Tests fitting the wind-wave model that was used in the publication
'Global hierarchical models for wind and wave contours' on dataset D.
"""
sample_v, sample_hs, label_v, label_hs = read_benchmark_dataset(path='tests/testfiles/1year_dataset_D.txt')
# Define the structure of the probabilistic model that will be fitted to the
# dataset.
dist_description_v = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
'width_of_intervals': 2}
dist_description_hs = {'name': 'Weibull_Exp',
'fixed_parameters' : (None, None, None, 5), # shape, location, scale, shape2
'dependency': (0, None, 0, None), # shape, location, scale, shape2
'functions': ('logistics4', None, 'alpha3', None), # shape, location, scale, shape2
'min_datapoints_for_fit': 20}
# Fit the model to the data.
fit = Fit((sample_v, sample_hs),
(dist_description_v, dist_description_hs))
dist0 = fit.mul_var_dist.distributions[0]
self.assertAlmostEqual(dist0.shape(0), 2.42, delta=1)
self.assertAlmostEqual(dist0.scale(0), 10.0, delta=2)
self.assertAlmostEqual(dist0.shape2(0), 0.761, delta=0.5)
dist1 = fit.mul_var_dist.distributions[1]
self.assertEqual(dist1.shape2(0), 5)
inspection_data1 = fit.multiple_fit_inspection_data[1]
self.assertEqual(inspection_data1.shape2_value[0], 5)
self.assertAlmostEqual(inspection_data1.shape_value[0], 0.8, delta=0.5) # interval centered at 1
self.assertAlmostEqual(inspection_data1.shape_value[4], 1.5, delta=0.5) # interval centered at 9
self.assertAlmostEqual(inspection_data1.shape_value[9], 2.5, delta=1) # interval centered at 19
self.assertAlmostEqual(dist1.shape(0), 0.8, delta=0.3)
self.assertAlmostEqual(dist1.shape(10), 1.6, delta=0.5)
self.assertAlmostEqual(dist1.shape(20), 2.3, delta=0.7)
self.assertAlmostEqual(dist1.shape.a, 0.582, delta=0.5)
self.assertAlmostEqual(dist1.shape.b, 1.90, delta=1)
self.assertAlmostEqual(dist1.shape.c, 0.248, delta=0.5)
self.assertAlmostEqual(dist1.shape.d, 8.49, delta=5)
self.assertAlmostEqual(inspection_data1.scale_value[0], 0.15, delta=0.2) # interval centered at 1
self.assertAlmostEqual(inspection_data1.scale_value[4], 1, delta=0.5) # interval centered at 9
self.assertAlmostEqual(inspection_data1.scale_value[9], 4, delta=1) # interval centered at 19
self.assertAlmostEqual(dist1.scale(0), 0.15, delta=0.5)
self.assertAlmostEqual(dist1.scale(10), 1, delta=0.5)
self.assertAlmostEqual(dist1.scale(20), 4, delta=1)
self.assertAlmostEqual(dist1.scale.a, 0.394, delta=0.5)
self.assertAlmostEqual(dist1.scale.b, 0.0178, delta=0.1)
self.assertAlmostEqual(dist1.scale.c, 1.88, delta=0.8)
def test_wrong_model(self):
"""
Tests wheter errors are raised when incorrect fitting models are
specified.
"""
sample_v, sample_hs, label_v, label_hs = read_benchmark_dataset(path='tests/testfiles/1year_dataset_D.txt')
# This structure is incorrect as there is not distribution called 'something'.
dist_description_v = {'name': 'something',
'dependency': (None, None, None, None),
'fixed_parameters': (None, None, None, None), # shape, location, scale, shape2
'width_of_intervals': 2}
with self.assertRaises(ValueError):
# Fit the model to the data.
fit = Fit((sample_v, ),
(dist_description_v, ))
# This structure is incorrect as there is not dependence function called 'something'.
dist_description_v = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
'width_of_intervals': 2}
dist_description_hs = {'name': 'Weibull_Exp',
'dependency': (0, None, 0, None), # shape, location, scale, shape2
'functions': ('something', None, 'alpha3', None), # shape, location, scale, shape2
'min_datapoints_for_fit': 20}
with self.assertRaises(ValueError):
# Fit the model to the data.
fit = Fit((sample_v, sample_hs),
(dist_description_v, dist_description_hs))
# This structure is incorrect as there will be only 1 or 2 intervals
# that fit 2000 datapoints.
dist_description_v = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
'width_of_intervals': 2}
dist_description_hs = {'name': 'Weibull_Exp',
'dependency': (0, None, 0, None), # shape, location, scale, shape2
'functions': ('logistics4', None, 'alpha3', None), # shape, location, scale, shape2
'min_datapoints_for_fit': 2000}
with self.assertRaises(RuntimeError):
# Fit the model to the data.
fit = Fit((sample_v, sample_hs),
(dist_description_v, dist_description_hs))
# This structure is incorrect as alpha3 is only compatible with
# logistics4 .
dist_description_v = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
'width_of_intervals': 2}
dist_description_hs = {'name': 'Weibull_Exp',
'fixed_parameters' : (None, None, None, 5), # shape, location, scale, shape2
'dependency': (0, None, 0, None), # shape, location, scale, shape2
'functions': ('power3', None, 'alpha3', None), # shape, location, scale, shape2
'min_datapoints_for_fit': 20}
with self.assertRaises(TypeError):
# Fit the model to the data.
fit = Fit((sample_v, sample_hs),
(dist_description_v, dist_description_hs))
# This structure is incorrect as only shape2 of an exponentiated Weibull
# distribution can be fixed at the moment.
dist_description_v = {'name': 'Lognormal',
'dependency': (None, None, None, None),
'fixed_parameters': (None, None, 5, None), # shape, location, scale, shape2
'width_of_intervals': 2}
with self.assertRaises(NotImplementedError):
# Fit the model to the data.
fit = Fit((sample_v, ),
(dist_description_v, ))
# This structure is incorrect as only shape2 of an exponentiated Weibull
# distribution can be fixed at the moment.
dist_description_v = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
'width_of_intervals': 2}
dist_description_hs = {'name': 'Weibull_Exp',
'fixed_parameters' : (None, None, 5, None), # shape, location, scale, shape2
'dependency': (0, None, 0, None), # shape, location, scale, shape2
'functions': ('logistics4', None, 'alpha3', None), # shape, location, scale, shape2
'min_datapoints_for_fit': 20}
with self.assertRaises(NotImplementedError):
# Fit the model to the data.
fit = Fit((sample_v, sample_hs),
(dist_description_v, dist_description_hs))
def test_weighting_of_dependence_function(self):
"""
Tests if using weights when the dependence function is fitted works
correctly.
"""
sample_v, sample_hs, label_v, label_hs = read_benchmark_dataset(path='tests/testfiles/1year_dataset_D.txt')
# Define the structure of the probabilistic model that will be fitted to the
# dataset.
dist_description_v = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
'width_of_intervals': 2}
dist_description_hs = {'name': 'Weibull_Exp',
'fixed_parameters' : (None, None, None, 5), # shape, location, scale, shape2
'dependency': (0, None, 0, None), # shape, location, scale, shape2
'functions': ('logistics4', None, 'alpha3', None), # shape, location, scale, shape2
'min_datapoints_for_fit': 20,
'do_use_weights_for_dependence_function': False}
# Fit the model to the data.
fit = Fit((sample_v, sample_hs),
(dist_description_v, dist_description_hs))
dist1_no_weights = fit.mul_var_dist.distributions[1]
# Now perform a fit with weights.
dist_description_hs = {'name': 'Weibull_Exp',
'fixed_parameters' : (None, None, None, 5), # shape, location, scale, shape2
'dependency': (0, None, 0, None), # shape, location, scale, shape2
'functions': ('logistics4', None, 'alpha3', None), # shape, location, scale, shape2
'min_datapoints_for_fit': 20,
'do_use_weights_for_dependence_function': True}
# Fit the model to the data.
fit = Fit((sample_v, sample_hs),
(dist_description_v, dist_description_hs))
dist1_with_weights = fit.mul_var_dist.distributions[1]
# Make sure the two fitted dependnece functions are different.
d = np.abs(dist1_with_weights.scale(0) - dist1_no_weights.scale(0)) / \
np.abs(dist1_no_weights.scale(0))
self.assertGreater(d, 0.01)
# Make sure they are not too different.
d = np.abs(dist1_with_weights.scale(20) - dist1_no_weights.scale(20)) / \
np.abs(dist1_no_weights.scale(20))
self.assertLess(d, 0.5)
|
[
"numpy.abs",
"numpy.asarray",
"numpy.exp",
"viroconcom.fitting.Fit",
"csv.reader",
"numpy.random.RandomState"
] |
[((1299, 1312), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (1309, 1312), True, 'import numpy as np\n'), ((1321, 1334), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (1331, 1334), True, 'import numpy as np\n'), ((825, 860), 'csv.reader', 'csv.reader', (['csv_file'], {'delimiter': '""";"""'}), "(csv_file, delimiter=';')\n", (835, 860), False, 'import csv\n'), ((1535, 1560), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (1556, 1560), True, 'import numpy as np\n'), ((2523, 2590), 'viroconcom.fitting.Fit', 'Fit', (['(sample_1, sample_2)', '(dist_description_0, dist_description_1)'], {}), '((sample_1, sample_2), (dist_description_0, dist_description_1))\n', (2526, 2590), False, 'from viroconcom.fitting import Fit\n'), ((3641, 3708), 'viroconcom.fitting.Fit', 'Fit', (['(sample_1, sample_2)', '(dist_description_0, dist_description_1)'], {}), '((sample_1, sample_2), (dist_description_0, dist_description_1))\n', (3644, 3708), False, 'from viroconcom.fitting import Fit\n'), ((4569, 4638), 'viroconcom.fitting.Fit', 'Fit', (['(sample_hs, sample_tz)', '(dist_description_0, dist_description_1)'], {}), '((sample_hs, sample_tz), (dist_description_0, dist_description_1))\n', (4572, 4638), False, 'from viroconcom.fitting import Fit\n'), ((5746, 5771), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (5767, 5771), True, 'import numpy as np\n'), ((7030, 7065), 'viroconcom.fitting.Fit', 'Fit', (['sample_hs', 'dist_description_hs'], {}), '(sample_hs, dist_description_hs)\n', (7033, 7065), False, 'from viroconcom.fitting import Fit\n'), ((7114, 7185), 'viroconcom.fitting.Fit', 'Fit', (['(sample_hs, sample_tz)', '(dist_description_hs, dist_description_tz)'], {}), '((sample_hs, sample_tz), (dist_description_hs, dist_description_tz))\n', (7117, 7185), False, 'from viroconcom.fitting import Fit\n'), ((8633, 8704), 'viroconcom.fitting.Fit', 'Fit', (['(sample_hs, sample_tz)', '(dist_description_hs, dist_description_tz)'], {}), '((sample_hs, sample_tz), (dist_description_hs, dist_description_tz))\n', (8636, 8704), False, 'from viroconcom.fitting import Fit\n'), ((10232, 10303), 'viroconcom.fitting.Fit', 'Fit', (['(sample_hs, sample_tz)', '(dist_description_hs, dist_description_tz)'], {}), '((sample_hs, sample_tz), (dist_description_hs, dist_description_tz))\n', (10235, 10303), False, 'from viroconcom.fitting import Fit\n'), ((12016, 12087), 'viroconcom.fitting.Fit', 'Fit', (['(sample_hs, sample_tz)', '(dist_description_hs, dist_description_tz)'], {}), '((sample_hs, sample_tz), (dist_description_hs, dist_description_tz))\n', (12019, 12087), False, 'from viroconcom.fitting import Fit\n'), ((13604, 13675), 'viroconcom.fitting.Fit', 'Fit', (['(sample_hs, sample_tz)', '(dist_description_hs, dist_description_tz)'], {}), '((sample_hs, sample_tz), (dist_description_hs, dist_description_tz))\n', (13607, 13675), False, 'from viroconcom.fitting import Fit\n'), ((14347, 14418), 'viroconcom.fitting.Fit', 'Fit', (['(sample_hs, sample_tz)', '(dist_description_hs, dist_description_tz)'], {}), '((sample_hs, sample_tz), (dist_description_hs, dist_description_tz))\n', (14350, 14418), False, 'from viroconcom.fitting import Fit\n'), ((14967, 14992), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (14988, 14992), True, 'import numpy as np\n'), ((15548, 15627), 'viroconcom.fitting.Fit', 'Fit', (['(sample_1, sample_2)', '(dist_description_0, dist_description_1)'], {'timeout': '(10)'}), '((sample_1, sample_2), (dist_description_0, dist_description_1), timeout=10)\n', (15551, 15627), False, 'from viroconcom.fitting import Fit\n'), ((16216, 16257), 'viroconcom.fitting.Fit', 'Fit', (['(sample_hs,)', '(dist_description_hs,)'], {}), '((sample_hs,), (dist_description_hs,))\n', (16219, 16257), False, 'from viroconcom.fitting import Fit\n'), ((18521, 18590), 'viroconcom.fitting.Fit', 'Fit', (['(sample_v, sample_hs)', '(dist_description_v, dist_description_hs)'], {}), '((sample_v, sample_hs), (dist_description_v, dist_description_hs))\n', (18524, 18590), False, 'from viroconcom.fitting import Fit\n'), ((26684, 26753), 'viroconcom.fitting.Fit', 'Fit', (['(sample_v, sample_hs)', '(dist_description_v, dist_description_hs)'], {}), '((sample_v, sample_hs), (dist_description_v, dist_description_hs))\n', (26687, 26753), False, 'from viroconcom.fitting import Fit\n'), ((27483, 27552), 'viroconcom.fitting.Fit', 'Fit', (['(sample_v, sample_hs)', '(dist_description_v, dist_description_hs)'], {}), '((sample_v, sample_hs), (dist_description_v, dist_description_hs))\n', (27486, 27552), False, 'from viroconcom.fitting import Fit\n'), ((12424, 12445), 'numpy.abs', 'np.abs', (['dist1.shape.c'], {}), '(dist1.shape.c)\n', (12430, 12445), True, 'import numpy as np\n'), ((16979, 17020), 'viroconcom.fitting.Fit', 'Fit', (['(sample_hs,)', '(dist_description_hs,)'], {}), '((sample_hs,), (dist_description_hs,))\n', (16982, 17020), False, 'from viroconcom.fitting import Fit\n'), ((21264, 21303), 'viroconcom.fitting.Fit', 'Fit', (['(sample_v,)', '(dist_description_v,)'], {}), '((sample_v,), (dist_description_v,))\n', (21267, 21303), False, 'from viroconcom.fitting import Fit\n'), ((22045, 22114), 'viroconcom.fitting.Fit', 'Fit', (['(sample_v, sample_hs)', '(dist_description_v, dist_description_hs)'], {}), '((sample_v, sample_hs), (dist_description_v, dist_description_hs))\n', (22048, 22114), False, 'from viroconcom.fitting import Fit\n'), ((22878, 22947), 'viroconcom.fitting.Fit', 'Fit', (['(sample_v, sample_hs)', '(dist_description_v, dist_description_hs)'], {}), '((sample_v, sample_hs), (dist_description_v, dist_description_hs))\n', (22881, 22947), False, 'from viroconcom.fitting import Fit\n'), ((23822, 23891), 'viroconcom.fitting.Fit', 'Fit', (['(sample_v, sample_hs)', '(dist_description_v, dist_description_hs)'], {}), '((sample_v, sample_hs), (dist_description_v, dist_description_hs))\n', (23825, 23891), False, 'from viroconcom.fitting import Fit\n'), ((24442, 24481), 'viroconcom.fitting.Fit', 'Fit', (['(sample_v,)', '(dist_description_v,)'], {}), '((sample_v,), (dist_description_v,))\n', (24445, 24481), False, 'from viroconcom.fitting import Fit\n'), ((25409, 25478), 'viroconcom.fitting.Fit', 'Fit', (['(sample_v, sample_hs)', '(dist_description_v, dist_description_hs)'], {}), '((sample_v, sample_hs), (dist_description_v, dist_description_hs))\n', (25412, 25478), False, 'from viroconcom.fitting import Fit\n'), ((1967, 1986), 'numpy.exp', 'np.exp', (['(0.2 * point)'], {}), '(0.2 * point)\n', (1973, 1986), True, 'import numpy as np\n'), ((6182, 6201), 'numpy.exp', 'np.exp', (['(0.2 * point)'], {}), '(0.2 * point)\n', (6188, 6201), True, 'import numpy as np\n'), ((15070, 15089), 'numpy.exp', 'np.exp', (['(0.2 * point)'], {}), '(0.2 * point)\n', (15076, 15089), True, 'import numpy as np\n')]
|
import numpy as np
"""
Contains preprocessing code for creating additional information based on MRI volumes and true segmentation maps (asegs).
Eg. weight masks for median frequency class weighing, edge weighing etc.
"""
def create_weight_mask(aseg):
"""
Main function for calculating weight mask of segmentation map for loss function. Currently only Median Frequency
Weighing is implemented. Other types can be additively added to the 'weights' variable
Args:
aseg (numpy.ndarray): Segmentation map with shape l x w x d
Returns:
numpy.ndarray: Weight Mask of same shape as aseg
"""
if len(aseg.shape)==4:
_, h,w,d = aseg.shape
elif len(aseg.shape)==3:
h,w,d = aseg.shape
weights = np.zeros((h,w,d), dtype=float) # Container ndarray of zeros for weights
weights += median_freq_class_weighing(aseg) # Add median frequency weights
# Further weights (eg. extra weights for region borders) can be added here
# Eg. weights += edge_weights(aseg)
return weights
def median_freq_class_weighing(aseg):
"""
Median Frequency Weighing. Guarded against class absence of certain classes.
Args:
aseg (numpy.ndarray): Segmentation map with shape l x w x d
Returns:
numpy.ndarray: Median frequency weighted mask of same shape as aseg
"""
# Calculates median frequency based weighing for classes
unique, counts = np.unique(aseg, return_counts=True)
if len(aseg.shape)==4:
_, h,w,d = aseg.shape
elif len(aseg.shape)==3:
h,w,d = aseg.shape
class_wise_weights = np.median(counts)/counts
aseg = aseg.astype(int)
# Guards against the absence of certain classes in sample
discon_guard_lut = np.zeros(int(max(unique))+1)-1
for idx, val in enumerate(unique):
discon_guard_lut[int(val)] = idx
discon_guard_lut = discon_guard_lut.astype(int)
# Assigns weights to w_mask and resets the missing classes
w_mask = np.reshape(class_wise_weights[discon_guard_lut[aseg.ravel()]], (h, w, d))
return w_mask
# Label mapping functions (to aparc (eval) and to label (train))
def map_label2aparc_aseg(mapped_aseg):
"""
Function to perform look-up table mapping from label space to aparc.DKTatlas+aseg space
:param np.ndarray mapped_aseg: label space segmentation (aparc.DKTatlas + aseg)
:return:
"""
aseg = np.zeros_like(mapped_aseg)
labels = np.array([0, 2, 4, 5, 7, 8, 10, 11, 12, 13, 14,
15, 16, 17, 18, 24, 26, 28, 31, 41, 43, 44,
46, 47, 49, 50, 51, 52, 53, 54, 58, 60, 63,
77, 1002, 1003, 1005, 1006, 1007, 1008, 1009, 1010, 1011,
1012, 1013, 1014, 1015, 1016, 1017, 1018, 1019, 1020, 1021, 1022,
1023, 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031, 1034, 1035,
2002, 2005, 2010, 2012, 2013, 2014, 2016, 2017, 2021, 2022, 2023,
2024, 2025, 2028])
h, w, d = aseg.shape
aseg = labels[mapped_aseg.ravel()]
aseg = aseg.reshape((h, w, d))
return aseg
# if __name__ == "__main__":
# #a = np.random.randint(0, 5, size=(10,10,10))
# #b = np.random.randint(5, 10, size=(10000))
#
# #map_masks_into_5_classes(np.random.randint(0, 250, size=(256, 256, 256)))
#
# import nibabel as nib
# from data_utils.process_mgz_into_hdf5 import map_aparc_aseg2label, map_aseg2label
# path = r"abide_ii/sub-28675/mri/aparc.DKTatlas+aseg.mgz"
# aseg = nib.load(path).get_data()
# labels_full, _ = map_aparc_aseg2label(aseg) # only for 79 classes case
# # labels_full, _ = map_aseg2label(aseg) # only for 37 classes case
# aseg = labels_full
# # print(aseg.shape)
# median_freq_class_weighing(aseg)
# # print(edge_weighing(aseg, 1.5))
|
[
"numpy.median",
"numpy.unique",
"numpy.array",
"numpy.zeros",
"numpy.zeros_like"
] |
[((756, 788), 'numpy.zeros', 'np.zeros', (['(h, w, d)'], {'dtype': 'float'}), '((h, w, d), dtype=float)\n', (764, 788), True, 'import numpy as np\n'), ((1441, 1476), 'numpy.unique', 'np.unique', (['aseg'], {'return_counts': '(True)'}), '(aseg, return_counts=True)\n', (1450, 1476), True, 'import numpy as np\n'), ((2410, 2436), 'numpy.zeros_like', 'np.zeros_like', (['mapped_aseg'], {}), '(mapped_aseg)\n', (2423, 2436), True, 'import numpy as np\n'), ((2450, 2881), 'numpy.array', 'np.array', (['[0, 2, 4, 5, 7, 8, 10, 11, 12, 13, 14, 15, 16, 17, 18, 24, 26, 28, 31, 41, \n 43, 44, 46, 47, 49, 50, 51, 52, 53, 54, 58, 60, 63, 77, 1002, 1003, \n 1005, 1006, 1007, 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015, 1016,\n 1017, 1018, 1019, 1020, 1021, 1022, 1023, 1024, 1025, 1026, 1027, 1028,\n 1029, 1030, 1031, 1034, 1035, 2002, 2005, 2010, 2012, 2013, 2014, 2016,\n 2017, 2021, 2022, 2023, 2024, 2025, 2028]'], {}), '([0, 2, 4, 5, 7, 8, 10, 11, 12, 13, 14, 15, 16, 17, 18, 24, 26, 28,\n 31, 41, 43, 44, 46, 47, 49, 50, 51, 52, 53, 54, 58, 60, 63, 77, 1002, \n 1003, 1005, 1006, 1007, 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015,\n 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023, 1024, 1025, 1026, 1027,\n 1028, 1029, 1030, 1031, 1034, 1035, 2002, 2005, 2010, 2012, 2013, 2014,\n 2016, 2017, 2021, 2022, 2023, 2024, 2025, 2028])\n', (2458, 2881), True, 'import numpy as np\n'), ((1616, 1633), 'numpy.median', 'np.median', (['counts'], {}), '(counts)\n', (1625, 1633), True, 'import numpy as np\n')]
|
import os
import scipy
import numpy as np
import pandas as pd
import torch
from torch.autograd import Variable
def predict_batch(net, inputs):
v = Variable(inputs.cuda(), volatile=True)
return net(v).data.cpu().numpy()
def get_probabilities(model, loader):
model.eval()
return np.vstack(predict_batch(model, data[0]) for data in loader)
def get_predictions(probs, thresholds):
preds = np.copy(probs)
preds[preds >= thresholds] = 1
preds[preds < thresholds] = 0
return preds.astype('uint8')
def get_argmax(output):
val,idx = torch.max(output, dim=1)
return idx.data.cpu().view(-1).numpy()
def get_targets(loader):
targets = None
for data in loader:
if targets is None:
shape = list(data[1].size())
shape[0] = 0
targets = np.empty(shape)
target = data[1]
if len(target.size()) == 1:
target = target.view(-1,1)
target = target.numpy()
targets = np.vstack([targets, target])
return targets
def ensemble_with_method(arr, method):
if method == c.MEAN:
return np.mean(arr, axis=0)
elif method == c.GMEAN:
return scipy.stats.mstats.gmean(arr, axis=0)
elif method == c.VOTE:
return scipy.stats.mode(arr, axis=0)[0][0]
raise Exception("Operation not found")
|
[
"numpy.copy",
"numpy.mean",
"scipy.stats.mode",
"torch.max",
"numpy.empty",
"numpy.vstack",
"scipy.stats.mstats.gmean"
] |
[((411, 425), 'numpy.copy', 'np.copy', (['probs'], {}), '(probs)\n', (418, 425), True, 'import numpy as np\n'), ((568, 592), 'torch.max', 'torch.max', (['output'], {'dim': '(1)'}), '(output, dim=1)\n', (577, 592), False, 'import torch\n'), ((988, 1016), 'numpy.vstack', 'np.vstack', (['[targets, target]'], {}), '([targets, target])\n', (997, 1016), True, 'import numpy as np\n'), ((1117, 1137), 'numpy.mean', 'np.mean', (['arr'], {'axis': '(0)'}), '(arr, axis=0)\n', (1124, 1137), True, 'import numpy as np\n'), ((822, 837), 'numpy.empty', 'np.empty', (['shape'], {}), '(shape)\n', (830, 837), True, 'import numpy as np\n'), ((1181, 1218), 'scipy.stats.mstats.gmean', 'scipy.stats.mstats.gmean', (['arr'], {'axis': '(0)'}), '(arr, axis=0)\n', (1205, 1218), False, 'import scipy\n'), ((1261, 1290), 'scipy.stats.mode', 'scipy.stats.mode', (['arr'], {'axis': '(0)'}), '(arr, axis=0)\n', (1277, 1290), False, 'import scipy\n')]
|
#!/usr/bin/env python3
#
# Author: <NAME>
# License: BSD 2-clause
# Last Change: Sun May 09, 2021 at 02:52 AM +0200
import numpy as np
ARRAY_TYPE = 'np'
def read_branch(ntp, tree, branch, idx=None):
data = ntp[tree][branch].array(library=ARRAY_TYPE)
return data if not idx else data[idx]
def read_branches_dict(ntp, tree, branches):
return ntp[tree].arrays(branches, library=ARRAY_TYPE)
def read_branches(ntp, tree, branches, idx=None, transpose=False):
data = list(ntp[tree].arrays(branches, library=ARRAY_TYPE).values())
if idx is not None:
data = [d[idx] for d in data]
return np.column_stack(data) if transpose else data
|
[
"numpy.column_stack"
] |
[((623, 644), 'numpy.column_stack', 'np.column_stack', (['data'], {}), '(data)\n', (638, 644), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
"""
Test code for the BBox Object
"""
import numpy as np
import pytest
from geometry_utils.bound_box import (BBox,
asBBox,
NullBBox,
InfBBox,
fromBBArray,
from_points,
)
class TestConstructors():
def test_creates(self):
B = BBox(((0, 0), (5, 5)))
assert isinstance(B, BBox)
def test_type(self):
B = np.array(((0, 0), (5, 5)))
assert not isinstance(B, BBox)
def testDataType(self):
B = BBox(((0, 0), (5, 5)))
assert B.dtype == np.float
def testShape(self):
B = BBox((0, 0, 5, 5))
assert B.shape == (2, 2)
def testShape2(self):
with pytest.raises(ValueError):
BBox((0, 0, 5))
def testShape3(self):
with pytest.raises(ValueError):
BBox((0, 0, 5, 6, 7))
def testArrayConstruction(self):
A = np.array(((4, 5), (10, 12)), np.float_)
B = BBox(A)
assert isinstance(B, BBox)
def testMinMax(self):
with pytest.raises(ValueError):
BBox((0, 0, -1, 6))
def testMinMax2(self):
with pytest.raises(ValueError):
BBox((0, 0, 1, -6))
def testMinMax3(self):
# OK to have a zero-sized BB
B = BBox(((0, 0), (0, 5)))
assert isinstance(B, BBox)
def testMinMax4(self):
# OK to have a zero-sized BB
B = BBox(((10., -34), (10., -34.0)))
assert isinstance(B, BBox)
def testMinMax5(self):
# OK to have a tiny BB
B = BBox(((0, 0), (1e-20, 5)))
assert isinstance(B, BBox)
def testMinMax6(self):
# Should catch tiny difference
with pytest.raises(ValueError):
BBox(((0, 0), (-1e-20, 5)))
class TestAsBBox():
def testPassThrough(self):
B = BBox(((0, 0), (5, 5)))
C = asBBox(B)
assert B is C
def testPassThrough2(self):
B = ((0, 0), (5, 5))
C = asBBox(B)
assert B is not C
def testPassArray(self):
# Different data type
A = np.array(((0, 0), (5, 5)))
C = asBBox(A)
assert A is not C
def testPassArray2(self):
# same data type -- should be a view
A = np.array(((0, 0), (5, 5)), np.float_)
C = asBBox(A)
A[0, 0] = -10
assert C[0, 0] == A[0, 0]
class TestIntersect():
def testSame(self):
B = BBox(((-23.5, 456), (56, 532.0)))
C = BBox(((-23.5, 456), (56, 532.0)))
assert B.Overlaps(C)
def testUpperLeft(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((0, 12), (10, 32.0)))
assert B.Overlaps(C)
def testUpperRight(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((12, 12), (25, 32.0)))
assert B.Overlaps(C)
def testLowerRight(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((12, 5), (25, 15)))
assert B.Overlaps(C)
def testLowerLeft(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((-10, 5), (8.5, 15)))
assert B.Overlaps(C)
def testBelow(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((-10, 5), (8.5, 9.2)))
assert not B.Overlaps(C)
def testAbove(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((-10, 25.001), (8.5, 32)))
assert not B.Overlaps(C)
def testLeft(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((4, 8), (4.95, 32)))
assert not B.Overlaps(C)
def testRight(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((17.1, 8), (17.95, 32)))
assert not B.Overlaps(C)
def testInside(self):
B = BBox(((-15, -25), (-5, -10)))
C = BBox(((-12, -22), (-6, -8)))
assert B.Overlaps(C)
def testOutside(self):
B = BBox(((-15, -25), (-5, -10)))
C = BBox(((-17, -26), (3, 0)))
assert B.Overlaps(C)
def testTouch(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((15, 8), (17.95, 32)))
assert B.Overlaps(C)
def testCorner(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((15, 25), (17.95, 32)))
assert B.Overlaps(C)
def testZeroSize(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((15, 25), (15, 25)))
assert B.Overlaps(C)
def testZeroSize2(self):
B = BBox(((5, 10), (5, 10)))
C = BBox(((15, 25), (15, 25)))
assert not B.Overlaps(C)
def testZeroSize3(self):
B = BBox(((5, 10), (5, 10)))
C = BBox(((0, 8), (10, 12)))
assert B.Overlaps(C)
def testZeroSize4(self):
B = BBox(((5, 1), (10, 25)))
C = BBox(((8, 8), (8, 8)))
assert B.Overlaps(C)
class TestEquality():
def testSame(self):
B = BBox(((1.0, 2.0), (5., 10.)))
C = BBox(((1.0, 2.0), (5., 10.)))
assert B == C
def testIdentical(self):
B = BBox(((1.0, 2.0), (5., 10.)))
assert B == B
def testNotSame(self):
B = BBox(((1.0, 2.0), (5., 10.)))
C = BBox(((1.0, 2.0), (5., 10.1)))
assert not B == C
def testWithArray(self):
B = BBox(((1.0, 2.0), (5., 10.)))
C = np.array(((1.0, 2.0), (5., 10.)))
assert B == C
def testWithArray2(self):
B = BBox(((1.0, 2.0), (5., 10.)))
C = np.array(((1.0, 2.0), (5., 10.)))
assert C == B
def testWithArray3(self):
B = BBox(((1.0, 2.0), (5., 10.)))
C = np.array(((1.01, 2.0), (5., 10.)))
assert not C == B
class TestInside():
def testSame(self):
B = BBox(((1.0, 2.0), (5., 10.)))
C = BBox(((1.0, 2.0), (5., 10.)))
assert B.Inside(C)
def testPoint(self):
B = BBox(((1.0, 2.0), (5., 10.)))
C = BBox(((3.0, 4.0), (3.0, 4.0)))
assert B.Inside(C)
def testPointOutside(self):
B = BBox(((1.0, 2.0), (5., 10.)))
C = BBox(((-3.0, 4.0), (0.10, 4.0)))
assert not B.Inside(C)
def testUpperLeft(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((0, 12), (10, 32.0)))
assert not B.Inside(C)
def testUpperRight(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((12, 12), (25, 32.0)))
assert not B.Inside(C)
def testLowerRight(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((12, 5), (25, 15)))
assert not B.Inside(C)
def testLowerLeft(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((-10, 5), (8.5, 15)))
assert not (B.Inside(C))
def testBelow(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((-10, 5), (8.5, 9.2)))
assert not (B.Inside(C))
def testAbove(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((-10, 25.001), (8.5, 32)))
assert not (B.Inside(C))
def testLeft(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((4, 8), (4.95, 32)))
assert not (B.Inside(C))
def testRight(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((17.1, 8), (17.95, 32)))
assert not (B.Inside(C))
class TestPointInside():
def testPointIn(self):
B = BBox(((1.0, 2.0), (5., 10.)))
P = (3.0, 4.0)
assert (B.PointInside(P))
def testUpperLeft(self):
B = BBox(((5, 10), (15, 25)))
P = (4, 30)
assert not (B.PointInside(P))
def testUpperRight(self):
B = BBox(((5, 10), (15, 25)))
P = (16, 30)
assert not (B.PointInside(P))
def testLowerRight(self):
B = BBox(((5, 10), (15, 25)))
P = (16, 4)
assert not (B.PointInside(P))
def testLowerLeft(self):
B = BBox(((5, 10), (15, 25)))
P = (-10, 5)
assert not (B.PointInside(P))
def testBelow(self):
B = BBox(((5, 10), (15, 25)))
P = (10, 5)
assert not (B.PointInside(P))
def testAbove(self):
B = BBox(((5, 10), (15, 25)))
P = (10, 25.001)
assert not (B.PointInside(P))
def testLeft(self):
B = BBox(((5, 10), (15, 25)))
P = (4, 12)
assert not (B.PointInside(P))
def testRight(self):
B = BBox(((5, 10), (15, 25)))
P = (17.1, 12.3)
assert not (B.PointInside(P))
def testPointOnTopLine(self):
B = BBox(((1.0, 2.0), (5., 10.)))
P = (3.0, 10.)
assert (B.PointInside(P))
def testPointLeftTopLine(self):
B = BBox(((1.0, 2.0), (5., 10.)))
P = (-3.0, 10.)
assert not (B.PointInside(P))
def testPointOnBottomLine(self):
B = BBox(((1.0, 2.0), (5., 10.)))
P = (3.0, 5.)
assert (B.PointInside(P))
def testPointOnLeft(self):
B = BBox(((-10., -10.), (-1.0, -1.0)))
P = (-10, -5.)
assert (B.PointInside(P))
def testPointOnRight(self):
B = BBox(((-10., -10.), (-1.0, -1.0)))
P = (-1, -5.)
assert (B.PointInside(P))
def testPointOnBottomRight(self):
B = BBox(((-10., -10.), (-1.0, -1.0)))
P = (-1, -10.)
assert (B.PointInside(P))
class Test_from_points():
def testCreate(self):
Pts = np.array(((5, 2), (3, 4), (1, 6)), np.float64)
B = from_points(Pts)
assert (B[0, 0] == 1.0 and
B[0, 1] == 2.0 and
B[1, 0] == 5.0 and
B[1, 1] == 6.0)
def testCreateInts(self):
Pts = np.array(((5, 2), (3, 4), (1, 6)))
B = from_points(Pts)
assert (B[0, 0] == 1.0 and
B[0, 1] == 2.0 and
B[1, 0] == 5.0 and
B[1, 1] == 6.0)
def testSinglePoint(self):
Pts = np.array((5, 2), np.float_)
B = from_points(Pts)
assert (B[0, 0] == 5. and
B[0, 1] == 2.0 and
B[1, 0] == 5. and
B[1, 1] == 2.0)
def testListTuples(self):
Pts = [(3, 6.5), (13, 43.2), (-4.32, -4), (65, -23), (-0.0001,
23.432)]
B = from_points(Pts)
assert (B[0, 0] == -4.32 and
B[0, 1] == -23.0 and
B[1, 0] == 65.0 and
B[1, 1] == 43.2)
class TestMerge():
A = BBox(((-23.5, 456), (56, 532.0)))
B = BBox(((-20.3, 460), (54, 465))) # B should be completely inside A
C = BBox(((-23.5, 456), (58, 540.))) # up and to the right or A
D = BBox(((-26.5, 12), (56, 532.0)))
def testInside(self):
C = self.A.copy()
C.Merge(self.B)
assert (C == self.A)
def testFullOutside(self):
C = self.B.copy()
C.Merge(self.A)
assert (C == self.A)
def testUpRight(self):
A = self.A.copy()
A.Merge(self.C)
assert (A[0] == self.A[0] and A[1] == self.C[1])
def testDownLeft(self):
A = self.A.copy()
A.Merge(self.D)
assert (A[0] == self.D[0] and A[1] == self.A[1])
class TestWidthHeight():
B = BBox(((1.0, 2.0), (5., 10.)))
def testWidth(self):
assert (self.B.Width == 4.0)
def testWidth2(self):
assert (self.B.Height == 8.0)
def testSetW(self):
with pytest.raises(AttributeError):
self.B.Height = 6
def testSetH(self):
with pytest.raises(AttributeError):
self.B.Width = 6
class TestCenter():
B = BBox(((1.0, 2.0), (5., 10.)))
def testCenter(self):
assert ((self.B.Center == (3.0, 6.0)).all())
def testSetCenter(self):
with pytest.raises(AttributeError):
self.B.Center = (6, 5)
class TestBBarray():
BBarray = np.array((((-23.5, 456), (56, 532.0)), ((-20.3, 460),
(54, 465)), ((-23.5, 456), (58, 540.)), ((-26.5,
12), (56, 532.0))), dtype=np.float)
BB = asBBox(((-26.5, 12.), (58., 540.)))
def testJoin(self):
BB = fromBBArray(self.BBarray)
assert BB == self.BB
class TestNullBBox():
B1 = NullBBox()
B2 = NullBBox()
B3 = BBox(((1.0, 2.0), (5., 10.)))
def testValues(self):
assert (np.alltrue(np.isnan(self.B1)))
def testIsNull(self):
assert (self.B1.IsNull)
def testEquals(self):
assert ((self.B1 == self.B2) is True)
def testNotEquals(self):
assert not self.B1 == self.B3
def testNotEquals2(self):
assert not self.B3 == self.B1
def testMerge(self):
C = self.B1.copy()
C.Merge(self.B3)
assert C == self.B3, 'merge failed, got: %s' % C
def testOverlaps(self):
assert self.B1.Overlaps(self.B3) is False
def testOverlaps2(self):
assert self.B3.Overlaps(self.B1) is False
class TestInfBBox():
B1 = InfBBox()
B2 = InfBBox()
B3 = BBox(((1.0, 2.0), (5., 10.)))
NB = NullBBox()
def testValues(self):
assert (np.alltrue(np.isinf(self.B1)))
# def testIsNull(self):
# assert ( self.B1.IsNull )
def testEquals(self):
assert self.B1 == self.B2
def testNotEquals(self):
assert not self.B1 == self.B3
def testNotEquals2(self):
assert self.B1 != self.B3
def testNotEquals3(self):
assert not self.B3 == self.B1
def testMerge(self):
C = self.B1.copy()
C.Merge(self.B3)
assert C == self.B2, 'merge failed, got: %s' % C
def testMerge2(self):
C = self.B3.copy()
C.Merge(self.B1)
assert C == self.B1, 'merge failed, got: %s' % C
def testOverlaps(self):
assert (self.B1.Overlaps(self.B2) is True)
def testOverlaps2(self):
assert (self.B3.Overlaps(self.B1) is True)
def testOverlaps3(self):
assert (self.B1.Overlaps(self.B3) is True)
def testOverlaps4(self):
assert (self.B1.Overlaps(self.NB) is True)
def testOverlaps5(self):
assert (self.NB.Overlaps(self.B1) is True)
class TestSides():
B = BBox(((1.0, 2.0), (5., 10.)))
def testLeft(self):
assert self.B.Left == 1.0
def testRight(self):
assert self.B.Right == 5.0
def testBottom(self):
assert self.B.Bottom == 2.0
def testTop(self):
assert self.B.Top == 10.0
class TestAsPoly():
B = BBox(((5, 0), (10, 20)))
corners = np.array([(5., 0.), (5., 20.), (10., 20.), (10., 0.)],
dtype=np.float64)
def testCorners(self):
print(self.B.AsPoly())
assert np.array_equal(self.B.AsPoly(), self.corners)
|
[
"geometry_utils.bound_box.fromBBArray",
"geometry_utils.bound_box.InfBBox",
"geometry_utils.bound_box.BBox",
"numpy.array",
"geometry_utils.bound_box.from_points",
"pytest.raises",
"numpy.isnan",
"numpy.isinf",
"geometry_utils.bound_box.NullBBox",
"geometry_utils.bound_box.asBBox"
] |
[((10354, 10387), 'geometry_utils.bound_box.BBox', 'BBox', (['((-23.5, 456), (56, 532.0))'], {}), '(((-23.5, 456), (56, 532.0)))\n', (10358, 10387), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((10396, 10427), 'geometry_utils.bound_box.BBox', 'BBox', (['((-20.3, 460), (54, 465))'], {}), '(((-20.3, 460), (54, 465)))\n', (10400, 10427), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((10472, 10505), 'geometry_utils.bound_box.BBox', 'BBox', (['((-23.5, 456), (58, 540.0))'], {}), '(((-23.5, 456), (58, 540.0)))\n', (10476, 10505), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((10541, 10573), 'geometry_utils.bound_box.BBox', 'BBox', (['((-26.5, 12), (56, 532.0))'], {}), '(((-26.5, 12), (56, 532.0)))\n', (10545, 10573), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((11098, 11129), 'geometry_utils.bound_box.BBox', 'BBox', (['((1.0, 2.0), (5.0, 10.0))'], {}), '(((1.0, 2.0), (5.0, 10.0)))\n', (11102, 11129), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((11484, 11515), 'geometry_utils.bound_box.BBox', 'BBox', (['((1.0, 2.0), (5.0, 10.0))'], {}), '(((1.0, 2.0), (5.0, 10.0)))\n', (11488, 11515), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((11741, 11885), 'numpy.array', 'np.array', (['(((-23.5, 456), (56, 532.0)), ((-20.3, 460), (54, 465)), ((-23.5, 456), (58,\n 540.0)), ((-26.5, 12), (56, 532.0)))'], {'dtype': 'np.float'}), '((((-23.5, 456), (56, 532.0)), ((-20.3, 460), (54, 465)), ((-23.5, \n 456), (58, 540.0)), ((-26.5, 12), (56, 532.0))), dtype=np.float)\n', (11749, 11885), True, 'import numpy as np\n'), ((11935, 11973), 'geometry_utils.bound_box.asBBox', 'asBBox', (['((-26.5, 12.0), (58.0, 540.0))'], {}), '(((-26.5, 12.0), (58.0, 540.0)))\n', (11941, 11973), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((12098, 12108), 'geometry_utils.bound_box.NullBBox', 'NullBBox', ([], {}), '()\n', (12106, 12108), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((12118, 12128), 'geometry_utils.bound_box.NullBBox', 'NullBBox', ([], {}), '()\n', (12126, 12128), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((12138, 12169), 'geometry_utils.bound_box.BBox', 'BBox', (['((1.0, 2.0), (5.0, 10.0))'], {}), '(((1.0, 2.0), (5.0, 10.0)))\n', (12142, 12169), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((12838, 12847), 'geometry_utils.bound_box.InfBBox', 'InfBBox', ([], {}), '()\n', (12845, 12847), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((12857, 12866), 'geometry_utils.bound_box.InfBBox', 'InfBBox', ([], {}), '()\n', (12864, 12866), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((12876, 12907), 'geometry_utils.bound_box.BBox', 'BBox', (['((1.0, 2.0), (5.0, 10.0))'], {}), '(((1.0, 2.0), (5.0, 10.0)))\n', (12880, 12907), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((12915, 12925), 'geometry_utils.bound_box.NullBBox', 'NullBBox', ([], {}), '()\n', (12923, 12925), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((14031, 14062), 'geometry_utils.bound_box.BBox', 'BBox', (['((1.0, 2.0), (5.0, 10.0))'], {}), '(((1.0, 2.0), (5.0, 10.0)))\n', (14035, 14062), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((14333, 14357), 'geometry_utils.bound_box.BBox', 'BBox', (['((5, 0), (10, 20))'], {}), '(((5, 0), (10, 20)))\n', (14337, 14357), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((14372, 14457), 'numpy.array', 'np.array', (['[(5.0, 0.0), (5.0, 20.0), (10.0, 20.0), (10.0, 0.0)]'], {'dtype': 'np.float64'}), '([(5.0, 0.0), (5.0, 20.0), (10.0, 20.0), (10.0, 0.0)], dtype=np.float64\n )\n', (14380, 14457), True, 'import numpy as np\n'), ((492, 514), 'geometry_utils.bound_box.BBox', 'BBox', (['((0, 0), (5, 5))'], {}), '(((0, 0), (5, 5)))\n', (496, 514), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((588, 614), 'numpy.array', 'np.array', (['((0, 0), (5, 5))'], {}), '(((0, 0), (5, 5)))\n', (596, 614), True, 'import numpy as np\n'), ((695, 717), 'geometry_utils.bound_box.BBox', 'BBox', (['((0, 0), (5, 5))'], {}), '(((0, 0), (5, 5)))\n', (699, 717), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((791, 809), 'geometry_utils.bound_box.BBox', 'BBox', (['(0, 0, 5, 5)'], {}), '((0, 0, 5, 5))\n', (795, 809), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((1089, 1128), 'numpy.array', 'np.array', (['((4, 5), (10, 12))', 'np.float_'], {}), '(((4, 5), (10, 12)), np.float_)\n', (1097, 1128), True, 'import numpy as np\n'), ((1141, 1148), 'geometry_utils.bound_box.BBox', 'BBox', (['A'], {}), '(A)\n', (1145, 1148), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((1461, 1483), 'geometry_utils.bound_box.BBox', 'BBox', (['((0, 0), (0, 5))'], {}), '(((0, 0), (0, 5)))\n', (1465, 1483), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((1596, 1630), 'geometry_utils.bound_box.BBox', 'BBox', (['((10.0, -34), (10.0, -34.0))'], {}), '(((10.0, -34), (10.0, -34.0)))\n', (1600, 1630), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((1735, 1761), 'geometry_utils.bound_box.BBox', 'BBox', (['((0, 0), (1e-20, 5))'], {}), '(((0, 0), (1e-20, 5)))\n', (1739, 1761), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((2010, 2032), 'geometry_utils.bound_box.BBox', 'BBox', (['((0, 0), (5, 5))'], {}), '(((0, 0), (5, 5)))\n', (2014, 2032), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((2045, 2054), 'geometry_utils.bound_box.asBBox', 'asBBox', (['B'], {}), '(B)\n', (2051, 2054), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((2151, 2160), 'geometry_utils.bound_box.asBBox', 'asBBox', (['B'], {}), '(B)\n', (2157, 2160), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((2259, 2285), 'numpy.array', 'np.array', (['((0, 0), (5, 5))'], {}), '(((0, 0), (5, 5)))\n', (2267, 2285), True, 'import numpy as np\n'), ((2298, 2307), 'geometry_utils.bound_box.asBBox', 'asBBox', (['A'], {}), '(A)\n', (2304, 2307), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((2422, 2459), 'numpy.array', 'np.array', (['((0, 0), (5, 5))', 'np.float_'], {}), '(((0, 0), (5, 5)), np.float_)\n', (2430, 2459), True, 'import numpy as np\n'), ((2472, 2481), 'geometry_utils.bound_box.asBBox', 'asBBox', (['A'], {}), '(A)\n', (2478, 2481), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((2600, 2633), 'geometry_utils.bound_box.BBox', 'BBox', (['((-23.5, 456), (56, 532.0))'], {}), '(((-23.5, 456), (56, 532.0)))\n', (2604, 2633), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((2646, 2679), 'geometry_utils.bound_box.BBox', 'BBox', (['((-23.5, 456), (56, 532.0))'], {}), '(((-23.5, 456), (56, 532.0)))\n', (2650, 2679), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((2751, 2776), 'geometry_utils.bound_box.BBox', 'BBox', (['((5, 10), (15, 25))'], {}), '(((5, 10), (15, 25)))\n', (2755, 2776), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((2789, 2816), 'geometry_utils.bound_box.BBox', 'BBox', (['((0, 12), (10, 32.0))'], {}), '(((0, 12), (10, 32.0)))\n', (2793, 2816), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((2889, 2914), 'geometry_utils.bound_box.BBox', 'BBox', (['((5, 10), (15, 25))'], {}), '(((5, 10), (15, 25)))\n', (2893, 2914), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((2927, 2955), 'geometry_utils.bound_box.BBox', 'BBox', (['((12, 12), (25, 32.0))'], {}), '(((12, 12), (25, 32.0)))\n', (2931, 2955), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((3028, 3053), 'geometry_utils.bound_box.BBox', 'BBox', (['((5, 10), (15, 25))'], {}), '(((5, 10), (15, 25)))\n', (3032, 3053), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((3066, 3091), 'geometry_utils.bound_box.BBox', 'BBox', (['((12, 5), (25, 15))'], {}), '(((12, 5), (25, 15)))\n', (3070, 3091), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((3163, 3188), 'geometry_utils.bound_box.BBox', 'BBox', (['((5, 10), (15, 25))'], {}), '(((5, 10), (15, 25)))\n', (3167, 3188), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((3201, 3228), 'geometry_utils.bound_box.BBox', 'BBox', (['((-10, 5), (8.5, 15))'], {}), '(((-10, 5), (8.5, 15)))\n', (3205, 3228), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((3296, 3321), 'geometry_utils.bound_box.BBox', 'BBox', (['((5, 10), (15, 25))'], {}), '(((5, 10), (15, 25)))\n', (3300, 3321), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((3334, 3362), 'geometry_utils.bound_box.BBox', 'BBox', (['((-10, 5), (8.5, 9.2))'], {}), '(((-10, 5), (8.5, 9.2)))\n', (3338, 3362), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((3434, 3459), 'geometry_utils.bound_box.BBox', 'BBox', (['((5, 10), (15, 25))'], {}), '(((5, 10), (15, 25)))\n', (3438, 3459), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((3472, 3504), 'geometry_utils.bound_box.BBox', 'BBox', (['((-10, 25.001), (8.5, 32))'], {}), '(((-10, 25.001), (8.5, 32)))\n', (3476, 3504), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((3575, 3600), 'geometry_utils.bound_box.BBox', 'BBox', (['((5, 10), (15, 25))'], {}), '(((5, 10), (15, 25)))\n', (3579, 3600), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((3613, 3639), 'geometry_utils.bound_box.BBox', 'BBox', (['((4, 8), (4.95, 32))'], {}), '(((4, 8), (4.95, 32)))\n', (3617, 3639), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((3711, 3736), 'geometry_utils.bound_box.BBox', 'BBox', (['((5, 10), (15, 25))'], {}), '(((5, 10), (15, 25)))\n', (3715, 3736), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((3749, 3779), 'geometry_utils.bound_box.BBox', 'BBox', (['((17.1, 8), (17.95, 32))'], {}), '(((17.1, 8), (17.95, 32)))\n', (3753, 3779), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((3852, 3881), 'geometry_utils.bound_box.BBox', 'BBox', (['((-15, -25), (-5, -10))'], {}), '(((-15, -25), (-5, -10)))\n', (3856, 3881), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((3894, 3922), 'geometry_utils.bound_box.BBox', 'BBox', (['((-12, -22), (-6, -8))'], {}), '(((-12, -22), (-6, -8)))\n', (3898, 3922), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((3992, 4021), 'geometry_utils.bound_box.BBox', 'BBox', (['((-15, -25), (-5, -10))'], {}), '(((-15, -25), (-5, -10)))\n', (3996, 4021), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((4034, 4060), 'geometry_utils.bound_box.BBox', 'BBox', (['((-17, -26), (3, 0))'], {}), '(((-17, -26), (3, 0)))\n', (4038, 4060), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((4128, 4153), 'geometry_utils.bound_box.BBox', 'BBox', (['((5, 10), (15, 25))'], {}), '(((5, 10), (15, 25)))\n', (4132, 4153), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((4166, 4194), 'geometry_utils.bound_box.BBox', 'BBox', (['((15, 8), (17.95, 32))'], {}), '(((15, 8), (17.95, 32)))\n', (4170, 4194), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((4263, 4288), 'geometry_utils.bound_box.BBox', 'BBox', (['((5, 10), (15, 25))'], {}), '(((5, 10), (15, 25)))\n', (4267, 4288), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((4301, 4330), 'geometry_utils.bound_box.BBox', 'BBox', (['((15, 25), (17.95, 32))'], {}), '(((15, 25), (17.95, 32)))\n', (4305, 4330), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((4401, 4426), 'geometry_utils.bound_box.BBox', 'BBox', (['((5, 10), (15, 25))'], {}), '(((5, 10), (15, 25)))\n', (4405, 4426), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((4439, 4465), 'geometry_utils.bound_box.BBox', 'BBox', (['((15, 25), (15, 25))'], {}), '(((15, 25), (15, 25)))\n', (4443, 4465), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((4537, 4561), 'geometry_utils.bound_box.BBox', 'BBox', (['((5, 10), (5, 10))'], {}), '(((5, 10), (5, 10)))\n', (4541, 4561), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((4574, 4600), 'geometry_utils.bound_box.BBox', 'BBox', (['((15, 25), (15, 25))'], {}), '(((15, 25), (15, 25)))\n', (4578, 4600), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((4676, 4700), 'geometry_utils.bound_box.BBox', 'BBox', (['((5, 10), (5, 10))'], {}), '(((5, 10), (5, 10)))\n', (4680, 4700), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((4713, 4737), 'geometry_utils.bound_box.BBox', 'BBox', (['((0, 8), (10, 12))'], {}), '(((0, 8), (10, 12)))\n', (4717, 4737), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((4809, 4833), 'geometry_utils.bound_box.BBox', 'BBox', (['((5, 1), (10, 25))'], {}), '(((5, 1), (10, 25)))\n', (4813, 4833), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((4846, 4868), 'geometry_utils.bound_box.BBox', 'BBox', (['((8, 8), (8, 8))'], {}), '(((8, 8), (8, 8)))\n', (4850, 4868), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((4959, 4990), 'geometry_utils.bound_box.BBox', 'BBox', (['((1.0, 2.0), (5.0, 10.0))'], {}), '(((1.0, 2.0), (5.0, 10.0)))\n', (4963, 4990), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((5001, 5032), 'geometry_utils.bound_box.BBox', 'BBox', (['((1.0, 2.0), (5.0, 10.0))'], {}), '(((1.0, 2.0), (5.0, 10.0)))\n', (5005, 5032), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((5095, 5126), 'geometry_utils.bound_box.BBox', 'BBox', (['((1.0, 2.0), (5.0, 10.0))'], {}), '(((1.0, 2.0), (5.0, 10.0)))\n', (5099, 5126), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((5187, 5218), 'geometry_utils.bound_box.BBox', 'BBox', (['((1.0, 2.0), (5.0, 10.0))'], {}), '(((1.0, 2.0), (5.0, 10.0)))\n', (5191, 5218), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((5229, 5260), 'geometry_utils.bound_box.BBox', 'BBox', (['((1.0, 2.0), (5.0, 10.1))'], {}), '(((1.0, 2.0), (5.0, 10.1)))\n', (5233, 5260), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((5328, 5359), 'geometry_utils.bound_box.BBox', 'BBox', (['((1.0, 2.0), (5.0, 10.0))'], {}), '(((1.0, 2.0), (5.0, 10.0)))\n', (5332, 5359), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((5370, 5405), 'numpy.array', 'np.array', (['((1.0, 2.0), (5.0, 10.0))'], {}), '(((1.0, 2.0), (5.0, 10.0)))\n', (5378, 5405), True, 'import numpy as np\n'), ((5469, 5500), 'geometry_utils.bound_box.BBox', 'BBox', (['((1.0, 2.0), (5.0, 10.0))'], {}), '(((1.0, 2.0), (5.0, 10.0)))\n', (5473, 5500), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((5511, 5546), 'numpy.array', 'np.array', (['((1.0, 2.0), (5.0, 10.0))'], {}), '(((1.0, 2.0), (5.0, 10.0)))\n', (5519, 5546), True, 'import numpy as np\n'), ((5610, 5641), 'geometry_utils.bound_box.BBox', 'BBox', (['((1.0, 2.0), (5.0, 10.0))'], {}), '(((1.0, 2.0), (5.0, 10.0)))\n', (5614, 5641), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((5652, 5688), 'numpy.array', 'np.array', (['((1.01, 2.0), (5.0, 10.0))'], {}), '(((1.01, 2.0), (5.0, 10.0)))\n', (5660, 5688), True, 'import numpy as np\n'), ((5772, 5803), 'geometry_utils.bound_box.BBox', 'BBox', (['((1.0, 2.0), (5.0, 10.0))'], {}), '(((1.0, 2.0), (5.0, 10.0)))\n', (5776, 5803), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((5814, 5845), 'geometry_utils.bound_box.BBox', 'BBox', (['((1.0, 2.0), (5.0, 10.0))'], {}), '(((1.0, 2.0), (5.0, 10.0)))\n', (5818, 5845), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((5909, 5940), 'geometry_utils.bound_box.BBox', 'BBox', (['((1.0, 2.0), (5.0, 10.0))'], {}), '(((1.0, 2.0), (5.0, 10.0)))\n', (5913, 5940), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((5951, 5981), 'geometry_utils.bound_box.BBox', 'BBox', (['((3.0, 4.0), (3.0, 4.0))'], {}), '(((3.0, 4.0), (3.0, 4.0)))\n', (5955, 5981), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((6054, 6085), 'geometry_utils.bound_box.BBox', 'BBox', (['((1.0, 2.0), (5.0, 10.0))'], {}), '(((1.0, 2.0), (5.0, 10.0)))\n', (6058, 6085), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((6096, 6127), 'geometry_utils.bound_box.BBox', 'BBox', (['((-3.0, 4.0), (0.1, 4.0))'], {}), '(((-3.0, 4.0), (0.1, 4.0)))\n', (6100, 6127), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((6202, 6227), 'geometry_utils.bound_box.BBox', 'BBox', (['((5, 10), (15, 25))'], {}), '(((5, 10), (15, 25)))\n', (6206, 6227), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((6240, 6267), 'geometry_utils.bound_box.BBox', 'BBox', (['((0, 12), (10, 32.0))'], {}), '(((0, 12), (10, 32.0)))\n', (6244, 6267), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((6342, 6367), 'geometry_utils.bound_box.BBox', 'BBox', (['((5, 10), (15, 25))'], {}), '(((5, 10), (15, 25)))\n', (6346, 6367), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((6380, 6408), 'geometry_utils.bound_box.BBox', 'BBox', (['((12, 12), (25, 32.0))'], {}), '(((12, 12), (25, 32.0)))\n', (6384, 6408), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((6483, 6508), 'geometry_utils.bound_box.BBox', 'BBox', (['((5, 10), (15, 25))'], {}), '(((5, 10), (15, 25)))\n', (6487, 6508), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((6521, 6546), 'geometry_utils.bound_box.BBox', 'BBox', (['((12, 5), (25, 15))'], {}), '(((12, 5), (25, 15)))\n', (6525, 6546), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((6620, 6645), 'geometry_utils.bound_box.BBox', 'BBox', (['((5, 10), (15, 25))'], {}), '(((5, 10), (15, 25)))\n', (6624, 6645), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((6658, 6685), 'geometry_utils.bound_box.BBox', 'BBox', (['((-10, 5), (8.5, 15))'], {}), '(((-10, 5), (8.5, 15)))\n', (6662, 6685), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((6757, 6782), 'geometry_utils.bound_box.BBox', 'BBox', (['((5, 10), (15, 25))'], {}), '(((5, 10), (15, 25)))\n', (6761, 6782), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((6795, 6823), 'geometry_utils.bound_box.BBox', 'BBox', (['((-10, 5), (8.5, 9.2))'], {}), '(((-10, 5), (8.5, 9.2)))\n', (6799, 6823), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((6895, 6920), 'geometry_utils.bound_box.BBox', 'BBox', (['((5, 10), (15, 25))'], {}), '(((5, 10), (15, 25)))\n', (6899, 6920), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((6933, 6965), 'geometry_utils.bound_box.BBox', 'BBox', (['((-10, 25.001), (8.5, 32))'], {}), '(((-10, 25.001), (8.5, 32)))\n', (6937, 6965), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((7036, 7061), 'geometry_utils.bound_box.BBox', 'BBox', (['((5, 10), (15, 25))'], {}), '(((5, 10), (15, 25)))\n', (7040, 7061), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((7074, 7100), 'geometry_utils.bound_box.BBox', 'BBox', (['((4, 8), (4.95, 32))'], {}), '(((4, 8), (4.95, 32)))\n', (7078, 7100), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((7172, 7197), 'geometry_utils.bound_box.BBox', 'BBox', (['((5, 10), (15, 25))'], {}), '(((5, 10), (15, 25)))\n', (7176, 7197), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((7210, 7240), 'geometry_utils.bound_box.BBox', 'BBox', (['((17.1, 8), (17.95, 32))'], {}), '(((17.1, 8), (17.95, 32)))\n', (7214, 7240), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((7341, 7372), 'geometry_utils.bound_box.BBox', 'BBox', (['((1.0, 2.0), (5.0, 10.0))'], {}), '(((1.0, 2.0), (5.0, 10.0)))\n', (7345, 7372), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((7470, 7495), 'geometry_utils.bound_box.BBox', 'BBox', (['((5, 10), (15, 25))'], {}), '(((5, 10), (15, 25)))\n', (7474, 7495), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((7597, 7622), 'geometry_utils.bound_box.BBox', 'BBox', (['((5, 10), (15, 25))'], {}), '(((5, 10), (15, 25)))\n', (7601, 7622), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((7725, 7750), 'geometry_utils.bound_box.BBox', 'BBox', (['((5, 10), (15, 25))'], {}), '(((5, 10), (15, 25)))\n', (7729, 7750), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((7851, 7876), 'geometry_utils.bound_box.BBox', 'BBox', (['((5, 10), (15, 25))'], {}), '(((5, 10), (15, 25)))\n', (7855, 7876), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((7974, 7999), 'geometry_utils.bound_box.BBox', 'BBox', (['((5, 10), (15, 25))'], {}), '(((5, 10), (15, 25)))\n', (7978, 7999), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((8096, 8121), 'geometry_utils.bound_box.BBox', 'BBox', (['((5, 10), (15, 25))'], {}), '(((5, 10), (15, 25)))\n', (8100, 8121), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((8222, 8247), 'geometry_utils.bound_box.BBox', 'BBox', (['((5, 10), (15, 25))'], {}), '(((5, 10), (15, 25)))\n', (8226, 8247), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((8344, 8369), 'geometry_utils.bound_box.BBox', 'BBox', (['((5, 10), (15, 25))'], {}), '(((5, 10), (15, 25)))\n', (8348, 8369), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((8480, 8511), 'geometry_utils.bound_box.BBox', 'BBox', (['((1.0, 2.0), (5.0, 10.0))'], {}), '(((1.0, 2.0), (5.0, 10.0)))\n', (8484, 8511), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((8616, 8647), 'geometry_utils.bound_box.BBox', 'BBox', (['((1.0, 2.0), (5.0, 10.0))'], {}), '(((1.0, 2.0), (5.0, 10.0)))\n', (8620, 8647), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((8758, 8789), 'geometry_utils.bound_box.BBox', 'BBox', (['((1.0, 2.0), (5.0, 10.0))'], {}), '(((1.0, 2.0), (5.0, 10.0)))\n', (8762, 8789), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((8888, 8924), 'geometry_utils.bound_box.BBox', 'BBox', (['((-10.0, -10.0), (-1.0, -1.0))'], {}), '(((-10.0, -10.0), (-1.0, -1.0)))\n', (8892, 8924), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((9025, 9061), 'geometry_utils.bound_box.BBox', 'BBox', (['((-10.0, -10.0), (-1.0, -1.0))'], {}), '(((-10.0, -10.0), (-1.0, -1.0)))\n', (9029, 9061), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((9167, 9203), 'geometry_utils.bound_box.BBox', 'BBox', (['((-10.0, -10.0), (-1.0, -1.0))'], {}), '(((-10.0, -10.0), (-1.0, -1.0)))\n', (9171, 9203), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((9328, 9374), 'numpy.array', 'np.array', (['((5, 2), (3, 4), (1, 6))', 'np.float64'], {}), '(((5, 2), (3, 4), (1, 6)), np.float64)\n', (9336, 9374), True, 'import numpy as np\n'), ((9387, 9403), 'geometry_utils.bound_box.from_points', 'from_points', (['Pts'], {}), '(Pts)\n', (9398, 9403), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((9587, 9621), 'numpy.array', 'np.array', (['((5, 2), (3, 4), (1, 6))'], {}), '(((5, 2), (3, 4), (1, 6)))\n', (9595, 9621), True, 'import numpy as np\n'), ((9634, 9650), 'geometry_utils.bound_box.from_points', 'from_points', (['Pts'], {}), '(Pts)\n', (9645, 9650), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((9834, 9861), 'numpy.array', 'np.array', (['(5, 2)', 'np.float_'], {}), '((5, 2), np.float_)\n', (9842, 9861), True, 'import numpy as np\n'), ((9874, 9890), 'geometry_utils.bound_box.from_points', 'from_points', (['Pts'], {}), '(Pts)\n', (9885, 9890), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((10164, 10180), 'geometry_utils.bound_box.from_points', 'from_points', (['Pts'], {}), '(Pts)\n', (10175, 10180), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((12009, 12034), 'geometry_utils.bound_box.fromBBArray', 'fromBBArray', (['self.BBarray'], {}), '(self.BBarray)\n', (12020, 12034), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((883, 908), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (896, 908), False, 'import pytest\n'), ((922, 937), 'geometry_utils.bound_box.BBox', 'BBox', (['(0, 0, 5)'], {}), '((0, 0, 5))\n', (926, 937), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((978, 1003), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (991, 1003), False, 'import pytest\n'), ((1017, 1038), 'geometry_utils.bound_box.BBox', 'BBox', (['(0, 0, 5, 6, 7)'], {}), '((0, 0, 5, 6, 7))\n', (1021, 1038), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((1224, 1249), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1237, 1249), False, 'import pytest\n'), ((1263, 1282), 'geometry_utils.bound_box.BBox', 'BBox', (['(0, 0, -1, 6)'], {}), '((0, 0, -1, 6))\n', (1267, 1282), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((1324, 1349), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1337, 1349), False, 'import pytest\n'), ((1363, 1382), 'geometry_utils.bound_box.BBox', 'BBox', (['(0, 0, 1, -6)'], {}), '((0, 0, 1, -6))\n', (1367, 1382), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((1877, 1902), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1890, 1902), False, 'import pytest\n'), ((1916, 1943), 'geometry_utils.bound_box.BBox', 'BBox', (['((0, 0), (-1e-20, 5))'], {}), '(((0, 0), (-1e-20, 5)))\n', (1920, 1943), False, 'from geometry_utils.bound_box import BBox, asBBox, NullBBox, InfBBox, fromBBArray, from_points\n'), ((11294, 11323), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (11307, 11323), False, 'import pytest\n'), ((11393, 11422), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (11406, 11422), False, 'import pytest\n'), ((11637, 11666), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (11650, 11666), False, 'import pytest\n'), ((12222, 12239), 'numpy.isnan', 'np.isnan', (['self.B1'], {}), '(self.B1)\n', (12230, 12239), True, 'import numpy as np\n'), ((12980, 12997), 'numpy.isinf', 'np.isinf', (['self.B1'], {}), '(self.B1)\n', (12988, 12997), True, 'import numpy as np\n')]
|
import cv2
cv2.setNumThreads(0)
cv2.ocl.setUseOpenCL(False)
import numpy as np
import math
from functools import wraps
def clip(img, dtype, maxval):
return np.clip(img, 0, maxval).astype(dtype)
def clipped(func):
"""
wrapper to clip results of transform to image dtype value range
"""
@wraps(func)
def wrapped_function(img, *args, **kwargs):
dtype, maxval = img.dtype, np.max(img)
return clip(func(img, *args, **kwargs), dtype, maxval)
return wrapped_function
def fix_shift_values(img, *args):
"""
shift values are normally specified in uint, but if your data is float - you need to remap values
"""
if img.dtype == np.float32:
return list(map(lambda x: x / 255, args))
return args
def vflip(img):
return cv2.flip(img, 0)
def hflip(img):
return cv2.flip(img, 1)
def flip(img, code):
return cv2.flip(img, code)
def transpose(img):
return img.transpose(1, 0, 2) if len(img.shape) > 2 else img.transpose(1, 0)
def rot90(img, times):
img = np.rot90(img, times)
return np.ascontiguousarray(img)
def rotate(img, angle):
"""
rotate image on specified angle
:param angle: angle in degrees
"""
height, width = img.shape[0:2]
mat = cv2.getRotationMatrix2D((width/2, height/2), angle, 1.0)
img = cv2.warpAffine(img, mat, (width, height),
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_REFLECT_101)
return img
def shift_scale_rotate(img, angle, scale, dx, dy):
"""
:param angle: in degrees
:param scale: relative scale
"""
height, width = img.shape[:2]
cc = math.cos(angle/180*math.pi) * scale
ss = math.sin(angle/180*math.pi) * scale
rotate_matrix = np.array([[cc, -ss], [ss, cc]])
box0 = np.array([[0, 0], [width, 0], [width, height], [0, height], ])
box1 = box0 - np.array([width/2, height/2])
box1 = np.dot(box1, rotate_matrix.T) + np.array([width/2+dx*width, height/2+dy*height])
box0 = box0.astype(np.float32)
box1 = box1.astype(np.float32)
mat = cv2.getPerspectiveTransform(box0, box1)
img = cv2.warpPerspective(img, mat, (width, height),
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_REFLECT_101)
return img
def center_crop(img, height, width):
h, w, c = img.shape
dy = (h-height)//2
dx = (w-width)//2
y1 = dy
y2 = y1 + height
x1 = dx
x2 = x1 + width
img = img[y1:y2, x1:x2, :]
return img
def shift_hsv(img, hue_shift, sat_shift, val_shift):
dtype = img.dtype
maxval = np.max(img)
img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV).astype(np.int32)
h, s, v = cv2.split(img)
h = cv2.add(h, hue_shift)
h = np.where(h < 0, maxval - h, h)
h = np.where(h > maxval, h - maxval, h)
h = h.astype(dtype)
s = clip(cv2.add(s, sat_shift), dtype, maxval)
v = clip(cv2.add(v, val_shift), dtype, maxval)
img = cv2.merge((h, s, v)).astype(dtype)
img = cv2.cvtColor(img, cv2.COLOR_HSV2RGB)
return img
def shift_channels(img, r_shift, g_shift, b_shift):
img[...,0] = clip(img[...,0] + r_shift, np.uint8, 255)
img[...,1] = clip(img[...,1] + g_shift, np.uint8, 255)
img[...,2] = clip(img[...,2] + b_shift, np.uint8, 255)
return img
def clahe(img, clipLimit=2.0, tileGridSize=(8,8)):
img_yuv = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
clahe = cv2.createCLAHE(clipLimit=clipLimit, tileGridSize=tileGridSize)
img_yuv[:, :, 0] = clahe.apply(img_yuv[:, :, 0])
img_output = cv2.cvtColor(img_yuv, cv2.COLOR_LAB2RGB)
return img_output
def blur(img, ksize):
return cv2.blur(img, (ksize, ksize))
def invert(img):
return 255 - img
def channel_shuffle(img):
ch_arr = [0, 1, 2]
np.random.shuffle(ch_arr)
img = img[..., ch_arr]
return img
def img_to_tensor(im, verbose=False):
'''AVE edit'''
im_out = np.moveaxis(im / (255. if im.dtype == np.uint8 else 1), -1, 0).astype(np.float32)
if verbose:
print ("augmentations.functiona.py.img_to_tensor(): im_out.shape:", im_out.shape)
print ("im_out.unique:", np.unique(im_out))
return im_out
def mask_to_tensor(mask, num_classes, verbose=False):
'''AVE edit'''
if num_classes > 1:
mask = img_to_tensor(mask)
else:
mask = np.expand_dims(mask / (255. if mask.dtype == np.uint8 else 1), 0).astype(np.float32)
if verbose:
print ("augmentations.functiona.py.img_to_tensor(): mask.shape:", mask.shape)
print ("mask.unique:", np.unique(mask))
return mask
|
[
"numpy.clip",
"numpy.ascontiguousarray",
"math.cos",
"numpy.array",
"cv2.warpPerspective",
"numpy.rot90",
"numpy.moveaxis",
"cv2.ocl.setUseOpenCL",
"numpy.where",
"functools.wraps",
"numpy.max",
"numpy.dot",
"cv2.blur",
"cv2.add",
"cv2.merge",
"cv2.warpAffine",
"cv2.getPerspectiveTransform",
"cv2.split",
"cv2.cvtColor",
"cv2.getRotationMatrix2D",
"cv2.setNumThreads",
"cv2.flip",
"numpy.unique",
"cv2.createCLAHE",
"numpy.expand_dims",
"math.sin",
"numpy.random.shuffle"
] |
[((12, 32), 'cv2.setNumThreads', 'cv2.setNumThreads', (['(0)'], {}), '(0)\n', (29, 32), False, 'import cv2\n'), ((34, 61), 'cv2.ocl.setUseOpenCL', 'cv2.ocl.setUseOpenCL', (['(False)'], {}), '(False)\n', (54, 61), False, 'import cv2\n'), ((326, 337), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (331, 337), False, 'from functools import wraps\n'), ((822, 838), 'cv2.flip', 'cv2.flip', (['img', '(0)'], {}), '(img, 0)\n', (830, 838), False, 'import cv2\n'), ((872, 888), 'cv2.flip', 'cv2.flip', (['img', '(1)'], {}), '(img, 1)\n', (880, 888), False, 'import cv2\n'), ((927, 946), 'cv2.flip', 'cv2.flip', (['img', 'code'], {}), '(img, code)\n', (935, 946), False, 'import cv2\n'), ((1093, 1113), 'numpy.rot90', 'np.rot90', (['img', 'times'], {}), '(img, times)\n', (1101, 1113), True, 'import numpy as np\n'), ((1126, 1151), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['img'], {}), '(img)\n', (1146, 1151), True, 'import numpy as np\n'), ((1319, 1379), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(width / 2, height / 2)', 'angle', '(1.0)'], {}), '((width / 2, height / 2), angle, 1.0)\n', (1342, 1379), False, 'import cv2\n'), ((1387, 1491), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'mat', '(width, height)'], {'flags': 'cv2.INTER_LINEAR', 'borderMode': 'cv2.BORDER_REFLECT_101'}), '(img, mat, (width, height), flags=cv2.INTER_LINEAR,\n borderMode=cv2.BORDER_REFLECT_101)\n', (1401, 1491), False, 'import cv2\n'), ((1844, 1875), 'numpy.array', 'np.array', (['[[cc, -ss], [ss, cc]]'], {}), '([[cc, -ss], [ss, cc]])\n', (1852, 1875), True, 'import numpy as np\n'), ((1890, 1950), 'numpy.array', 'np.array', (['[[0, 0], [width, 0], [width, height], [0, height]]'], {}), '([[0, 0], [width, 0], [width, height], [0, height]])\n', (1898, 1950), True, 'import numpy as np\n'), ((2181, 2220), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['box0', 'box1'], {}), '(box0, box1)\n', (2208, 2220), False, 'import cv2\n'), ((2232, 2341), 'cv2.warpPerspective', 'cv2.warpPerspective', (['img', 'mat', '(width, height)'], {'flags': 'cv2.INTER_LINEAR', 'borderMode': 'cv2.BORDER_REFLECT_101'}), '(img, mat, (width, height), flags=cv2.INTER_LINEAR,\n borderMode=cv2.BORDER_REFLECT_101)\n', (2251, 2341), False, 'import cv2\n'), ((2744, 2755), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (2750, 2755), True, 'import numpy as np\n'), ((2836, 2850), 'cv2.split', 'cv2.split', (['img'], {}), '(img)\n', (2845, 2850), False, 'import cv2\n'), ((2860, 2881), 'cv2.add', 'cv2.add', (['h', 'hue_shift'], {}), '(h, hue_shift)\n', (2867, 2881), False, 'import cv2\n'), ((2891, 2921), 'numpy.where', 'np.where', (['(h < 0)', '(maxval - h)', 'h'], {}), '(h < 0, maxval - h, h)\n', (2899, 2921), True, 'import numpy as np\n'), ((2931, 2966), 'numpy.where', 'np.where', (['(h > maxval)', '(h - maxval)', 'h'], {}), '(h > maxval, h - maxval, h)\n', (2939, 2966), True, 'import numpy as np\n'), ((3153, 3189), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_HSV2RGB'], {}), '(img, cv2.COLOR_HSV2RGB)\n', (3165, 3189), False, 'import cv2\n'), ((3530, 3566), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2LAB'], {}), '(img, cv2.COLOR_RGB2LAB)\n', (3542, 3566), False, 'import cv2\n'), ((3580, 3643), 'cv2.createCLAHE', 'cv2.createCLAHE', ([], {'clipLimit': 'clipLimit', 'tileGridSize': 'tileGridSize'}), '(clipLimit=clipLimit, tileGridSize=tileGridSize)\n', (3595, 3643), False, 'import cv2\n'), ((3716, 3756), 'cv2.cvtColor', 'cv2.cvtColor', (['img_yuv', 'cv2.COLOR_LAB2RGB'], {}), '(img_yuv, cv2.COLOR_LAB2RGB)\n', (3728, 3756), False, 'import cv2\n'), ((3819, 3848), 'cv2.blur', 'cv2.blur', (['img', '(ksize, ksize)'], {}), '(img, (ksize, ksize))\n', (3827, 3848), False, 'import cv2\n'), ((3953, 3978), 'numpy.random.shuffle', 'np.random.shuffle', (['ch_arr'], {}), '(ch_arr)\n', (3970, 3978), True, 'import numpy as np\n'), ((1741, 1772), 'math.cos', 'math.cos', (['(angle / 180 * math.pi)'], {}), '(angle / 180 * math.pi)\n', (1749, 1772), False, 'import math\n'), ((1787, 1818), 'math.sin', 'math.sin', (['(angle / 180 * math.pi)'], {}), '(angle / 180 * math.pi)\n', (1795, 1818), False, 'import math\n'), ((1973, 2006), 'numpy.array', 'np.array', (['[width / 2, height / 2]'], {}), '([width / 2, height / 2])\n', (1981, 2006), True, 'import numpy as np\n'), ((2015, 2044), 'numpy.dot', 'np.dot', (['box1', 'rotate_matrix.T'], {}), '(box1, rotate_matrix.T)\n', (2021, 2044), True, 'import numpy as np\n'), ((2047, 2107), 'numpy.array', 'np.array', (['[width / 2 + dx * width, height / 2 + dy * height]'], {}), '([width / 2 + dx * width, height / 2 + dy * height])\n', (2055, 2107), True, 'import numpy as np\n'), ((3006, 3027), 'cv2.add', 'cv2.add', (['s', 'sat_shift'], {}), '(s, sat_shift)\n', (3013, 3027), False, 'import cv2\n'), ((3058, 3079), 'cv2.add', 'cv2.add', (['v', 'val_shift'], {}), '(v, val_shift)\n', (3065, 3079), False, 'import cv2\n'), ((171, 194), 'numpy.clip', 'np.clip', (['img', '(0)', 'maxval'], {}), '(img, 0, maxval)\n', (178, 194), True, 'import numpy as np\n'), ((423, 434), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (429, 434), True, 'import numpy as np\n'), ((2767, 2803), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2HSV'], {}), '(img, cv2.COLOR_RGB2HSV)\n', (2779, 2803), False, 'import cv2\n'), ((3107, 3127), 'cv2.merge', 'cv2.merge', (['(h, s, v)'], {}), '((h, s, v))\n', (3116, 3127), False, 'import cv2\n'), ((4100, 4163), 'numpy.moveaxis', 'np.moveaxis', (['(im / (255.0 if im.dtype == np.uint8 else 1))', '(-1)', '(0)'], {}), '(im / (255.0 if im.dtype == np.uint8 else 1), -1, 0)\n', (4111, 4163), True, 'import numpy as np\n'), ((4324, 4341), 'numpy.unique', 'np.unique', (['im_out'], {}), '(im_out)\n', (4333, 4341), True, 'import numpy as np\n'), ((4750, 4765), 'numpy.unique', 'np.unique', (['mask'], {}), '(mask)\n', (4759, 4765), True, 'import numpy as np\n'), ((4529, 4595), 'numpy.expand_dims', 'np.expand_dims', (['(mask / (255.0 if mask.dtype == np.uint8 else 1))', '(0)'], {}), '(mask / (255.0 if mask.dtype == np.uint8 else 1), 0)\n', (4543, 4595), True, 'import numpy as np\n')]
|
import numpy as np
def get_conf_thresholded(conf, thresh_log_conf, dtype_np):
"""Normalizes a confidence score to (0..1).
Args:
conf (float):
Unnormalized confidence.
dtype_np (type):
Desired return type.
Returns:
confidence (np.float32):
Normalized joint confidence.
"""
# 1. / (1. + np.exp(-5000. * conf + 5))
# https://www.desmos.com/calculator/olqbvoffua
# + 9.5: 0.0019 => 0.5
# + 5 : 0.0010 => 0.5
# + 6.5: 0.0013 => 0.5
return np.where(
conf < dtype_np(0.),
dtype_np(0.),
dtype_np(1.) /
(dtype_np(1.) + np.exp(dtype_np(-5000.) * conf + dtype_np(9.5)))
).astype(dtype_np)
def get_confs(query_2d_full, frame_id, thresh_log_conf, mx_conf, dtype_np):
"""
Args:
query_2d_full (stealth.logic.skeleton.Skeleton):
Skeleton with confidences.
frame_id (int):
Frame id.
Returns:
confs (List[float]):
Confidences at frame_id.
"""
confs = np.zeros(query_2d_full.poses.shape[-1],
dtype=dtype_np)
is_normalized = query_2d_full.is_confidence_normalized()
if query_2d_full.has_confidence(frame_id):
for joint, conf in query_2d_full.confidence[frame_id].items():
cnf = dtype_np(conf) \
if is_normalized \
else get_conf_thresholded(conf, thresh_log_conf, dtype_np)
if mx_conf is not None and mx_conf < cnf:
mx_conf = dtype_np(cnf)
confs[joint] = dtype_np(cnf)
if mx_conf is None:
return confs
else:
assert isinstance(mx_conf, dtype_np)
return confs, mx_conf
|
[
"numpy.zeros"
] |
[((1052, 1107), 'numpy.zeros', 'np.zeros', (['query_2d_full.poses.shape[-1]'], {'dtype': 'dtype_np'}), '(query_2d_full.poses.shape[-1], dtype=dtype_np)\n', (1060, 1107), True, 'import numpy as np\n')]
|
# encoding: utf-8
import datetime
import numpy as np
import pandas as pd
def get_next_period_day(current, period, n=1, extra_offset=0):
"""
Get the n'th day in next period from current day.
Parameters
----------
current : int
Current date in format "%Y%m%d".
period : str
Interval between current and next. {'day', 'week', 'month'}
n : int
n times period.
extra_offset : int
n'th business day after next period.
Returns
-------
nxt : int
"""
current_dt = convert_int_to_datetime(current)
if period == 'day':
offset = pd.tseries.offsets.BDay() # move to next business day
# offset = offsets.Day
elif period == 'week':
offset = pd.tseries.offsets.Week(weekday=0) # move to next Monday
elif period == 'month':
offset = pd.tseries.offsets.BMonthBegin() # move to first business day of next month
# offset = offsets.MonthBegin
else:
raise NotImplementedError("Frequency as {} not support".format(period))
offset = offset * n
next_dt = current_dt + offset
if extra_offset:
next_dt = next_dt + extra_offset * pd.tseries.offsets.BDay()
nxt = convert_datetime_to_int(next_dt)
return nxt
def convert_int_to_datetime(dt):
"""Convert int date (%Y%m%d) to datetime.datetime object."""
if isinstance(dt, pd.Series):
dt = dt.astype(str)
elif isinstance(dt, int):
dt = str(dt)
return pd.to_datetime(dt, format="%Y%m%d")
def convert_datetime_to_int(dt):
f = lambda x: x.year * 10000 + x.month * 100 + x.day
if isinstance(dt, (datetime.datetime, datetime.date)):
dt = pd.Timestamp(dt)
res = f(dt)
elif isinstance(dt, np.datetime64):
dt = pd.Timestamp(dt)
res = f(dt)
else:
dt = pd.Series(dt)
res = dt.apply(f)
return res
def shift(date, n_weeks=0):
"""Shift date backward or forward for n weeks.
Parameters
----------
date : int or datetime
The date to be shifted.
n_weeks : int, optional
Positive for increasing date, negative for decreasing date.
Default 0 (no shift).
Returns
-------
res : int or datetime
"""
delta = pd.Timedelta(weeks=n_weeks)
is_int = isinstance(date, (int, np.integer))
if is_int:
dt = convert_int_to_datetime(date)
else:
dt = date
res = dt + delta
if is_int:
res = convert_datetime_to_int(res)
return res
def combine_date_time(date, time):
return np.int64(date) * 1000000 + np.int64(time)
def split_date_time(dt):
date = dt // 1000000
time = dt % 1000000
return date, time
def date_to_month(ser):
# ser = pd.Series(ser)
res = ser % 10000 // 100
MONTH_MAP = {1: 'Jan',
2: 'Feb',
3: 'Mar',
4: 'Apr',
5: 'May',
6: 'Jun',
7: 'Jul',
8: 'Aug',
9: 'Sep',
10: 'Oct',
11: 'Nov',
12: 'Dec'}
# res = res.replace(MONTH_MAP)
return res
def date_to_year(ser):
return ser // 10000
|
[
"pandas.Series",
"numpy.int64",
"pandas.Timedelta",
"pandas.tseries.offsets.BMonthBegin",
"pandas.tseries.offsets.Week",
"pandas.tseries.offsets.BDay",
"pandas.Timestamp",
"pandas.to_datetime"
] |
[((1491, 1526), 'pandas.to_datetime', 'pd.to_datetime', (['dt'], {'format': '"""%Y%m%d"""'}), "(dt, format='%Y%m%d')\n", (1505, 1526), True, 'import pandas as pd\n'), ((2277, 2304), 'pandas.Timedelta', 'pd.Timedelta', ([], {'weeks': 'n_weeks'}), '(weeks=n_weeks)\n', (2289, 2304), True, 'import pandas as pd\n'), ((618, 643), 'pandas.tseries.offsets.BDay', 'pd.tseries.offsets.BDay', ([], {}), '()\n', (641, 643), True, 'import pandas as pd\n'), ((1691, 1707), 'pandas.Timestamp', 'pd.Timestamp', (['dt'], {}), '(dt)\n', (1703, 1707), True, 'import pandas as pd\n'), ((2614, 2628), 'numpy.int64', 'np.int64', (['time'], {}), '(time)\n', (2622, 2628), True, 'import numpy as np\n'), ((748, 782), 'pandas.tseries.offsets.Week', 'pd.tseries.offsets.Week', ([], {'weekday': '(0)'}), '(weekday=0)\n', (771, 782), True, 'import pandas as pd\n'), ((1781, 1797), 'pandas.Timestamp', 'pd.Timestamp', (['dt'], {}), '(dt)\n', (1793, 1797), True, 'import pandas as pd\n'), ((1841, 1854), 'pandas.Series', 'pd.Series', (['dt'], {}), '(dt)\n', (1850, 1854), True, 'import pandas as pd\n'), ((2587, 2601), 'numpy.int64', 'np.int64', (['date'], {}), '(date)\n', (2595, 2601), True, 'import numpy as np\n'), ((851, 883), 'pandas.tseries.offsets.BMonthBegin', 'pd.tseries.offsets.BMonthBegin', ([], {}), '()\n', (881, 883), True, 'import pandas as pd\n'), ((1183, 1208), 'pandas.tseries.offsets.BDay', 'pd.tseries.offsets.BDay', ([], {}), '()\n', (1206, 1208), True, 'import pandas as pd\n')]
|
import deepchem as dc
import numpy as np
import tensorflow as tf
import deepchem.models.tensorgraph.layers as layers
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
class TestLayersEager(test_util.TensorFlowTestCase):
"""
Test that layers function in eager mode.
"""
def test_conv_1d(self):
"""Test invoking Conv1D in eager mode."""
with context.eager_mode():
width = 5
in_channels = 2
filters = 3
kernel_size = 2
batch_size = 10
input = np.random.rand(batch_size, width, in_channels).astype(np.float32)
layer = layers.Conv1D(filters, kernel_size)
result = layer(input)
self.assertEqual(result.shape[0], batch_size)
self.assertEqual(result.shape[2], filters)
assert len(layer.trainable_variables) == 2
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.Conv1D(filters, kernel_size)
result2 = layer2(input)
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3 = layer(input)
assert np.allclose(result, result3)
def test_dense(self):
"""Test invoking Dense in eager mode."""
with context.eager_mode():
in_dim = 2
out_dim = 3
batch_size = 10
input = np.random.rand(batch_size, in_dim).astype(np.float32)
layer = layers.Dense(out_dim)
result = layer(input)
assert result.shape == (batch_size, out_dim)
assert len(layer.trainable_variables) == 2
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.Dense(out_dim)
result2 = layer2(input)
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3 = layer(input)
assert np.allclose(result, result3)
def test_highway(self):
"""Test invoking Highway in eager mode."""
with context.eager_mode():
width = 5
batch_size = 10
input = np.random.rand(batch_size, width).astype(np.float32)
layer = layers.Highway()
result = layer(input)
assert result.shape == (batch_size, width)
assert len(layer.trainable_variables) == 4
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.Highway()
result2 = layer2(input)
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3 = layer(input)
assert np.allclose(result, result3)
def test_flatten(self):
"""Test invoking Flatten in eager mode."""
with context.eager_mode():
input = np.random.rand(5, 10, 4).astype(np.float32)
result = layers.Flatten()(input)
assert result.shape == (5, 40)
def test_reshape(self):
"""Test invoking Reshape in eager mode."""
with context.eager_mode():
input = np.random.rand(5, 10, 4).astype(np.float32)
result = layers.Reshape((100, 2))(input)
assert result.shape == (100, 2)
def test_cast(self):
"""Test invoking Cast in eager mode."""
with context.eager_mode():
input = np.random.rand(5, 3)
result = layers.Cast(dtype=tf.float32)(input)
assert result.dtype == tf.float32
def test_squeeze(self):
"""Test invoking Squeeze in eager mode."""
with context.eager_mode():
input = np.random.rand(5, 1, 4).astype(np.float32)
result = layers.Squeeze()(input)
assert result.shape == (5, 4)
def test_transpose(self):
"""Test invoking Transpose in eager mode."""
with context.eager_mode():
input = np.random.rand(5, 10, 4).astype(np.float32)
result = layers.Transpose((1, 2, 0))(input)
assert result.shape == (10, 4, 5)
def test_combine_mean_std(self):
"""Test invoking CombineMeanStd in eager mode."""
with context.eager_mode():
mean = np.random.rand(5, 3).astype(np.float32)
std = np.random.rand(5, 3).astype(np.float32)
layer = layers.CombineMeanStd(training_only=True, noise_epsilon=0.01)
result1 = layer(mean, std, training=False)
assert np.array_equal(result1, mean) # No noise in test mode
result2 = layer(mean, std, training=True)
assert not np.array_equal(result2, mean)
assert np.allclose(result2, mean, atol=0.1)
def test_repeat(self):
"""Test invoking Repeat in eager mode."""
with context.eager_mode():
input = np.random.rand(5, 4).astype(np.float32)
result = layers.Repeat(3)(input)
assert result.shape == (5, 3, 4)
assert np.array_equal(result[:, 0, :], result[:, 1, :])
def test_gather(self):
"""Test invoking Gather in eager mode."""
with context.eager_mode():
input = np.random.rand(5).astype(np.float32)
indices = [[1], [3]]
result = layers.Gather()(input, indices)
assert np.array_equal(result, [input[1], input[3]])
def test_gru(self):
"""Test invoking GRU in eager mode."""
with context.eager_mode():
batch_size = 10
n_hidden = 7
in_channels = 4
n_steps = 6
input = np.random.rand(batch_size, n_steps,
in_channels).astype(np.float32)
layer = layers.GRU(n_hidden, batch_size)
result, state = layer(input)
assert result.shape == (batch_size, n_steps, n_hidden)
assert len(layer.trainable_variables) == 3
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.GRU(n_hidden, batch_size)
result2, state2 = layer2(input)
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3, state3 = layer(input)
assert np.allclose(result, result3)
# But if we specify a different starting state, that should produce a
# different result.
result4, state4 = layer(input, initial_state=state3)
assert not np.allclose(result, result4)
def test_lstm(self):
"""Test invoking LSTM in eager mode."""
with context.eager_mode():
batch_size = 10
n_hidden = 7
in_channels = 4
n_steps = 6
input = np.random.rand(batch_size, n_steps,
in_channels).astype(np.float32)
layer = layers.LSTM(n_hidden, batch_size)
result, state = layer(input)
assert result.shape == (batch_size, n_steps, n_hidden)
assert len(layer.trainable_variables) == 3
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.LSTM(n_hidden, batch_size)
result2, state2 = layer2(input)
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3, state3 = layer(input)
assert np.allclose(result, result3)
# But if we specify a different starting state, that should produce a
# different result.
result4, state4 = layer(input, initial_state=state3)
assert not np.allclose(result, result4)
def test_time_series_dense(self):
"""Test invoking TimeSeriesDense in eager mode."""
with context.eager_mode():
in_dim = 2
out_dim = 3
n_steps = 6
batch_size = 10
input = np.random.rand(batch_size, n_steps, in_dim).astype(np.float32)
layer = layers.TimeSeriesDense(out_dim)
result = layer(input)
assert result.shape == (batch_size, n_steps, out_dim)
assert len(layer.trainable_variables) == 2
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.TimeSeriesDense(out_dim)
result2 = layer2(input)
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3 = layer(input)
assert np.allclose(result, result3)
def test_l1_loss(self):
"""Test invoking L1Loss in eager mode."""
with context.eager_mode():
input1 = np.random.rand(5, 10).astype(np.float32)
input2 = np.random.rand(5, 10).astype(np.float32)
result = layers.L1Loss()(input1, input2)
expected = np.mean(np.abs(input1 - input2), axis=1)
assert np.allclose(result, expected)
def test_l2_loss(self):
"""Test invoking L2Loss in eager mode."""
with context.eager_mode():
input1 = np.random.rand(5, 10).astype(np.float32)
input2 = np.random.rand(5, 10).astype(np.float32)
result = layers.L2Loss()(input1, input2)
expected = np.mean((input1 - input2)**2, axis=1)
assert np.allclose(result, expected)
def test_softmax(self):
"""Test invoking SoftMax in eager mode."""
with context.eager_mode():
input = np.random.rand(5, 10).astype(np.float32)
result = layers.SoftMax()(input)
expected = tf.nn.softmax(input)
assert np.allclose(result, expected)
def test_sigmoid(self):
"""Test invoking Sigmoid in eager mode."""
with context.eager_mode():
input = np.random.rand(5, 10).astype(np.float32)
result = layers.Sigmoid()(input)
expected = tf.nn.sigmoid(input)
assert np.allclose(result, expected)
def test_relu(self):
"""Test invoking ReLU in eager mode."""
with context.eager_mode():
input = np.random.normal(size=(5, 10)).astype(np.float32)
result = layers.ReLU()(input)
expected = tf.nn.relu(input)
assert np.allclose(result, expected)
def test_concat(self):
"""Test invoking Concat in eager mode."""
with context.eager_mode():
input1 = np.random.rand(5, 10).astype(np.float32)
input2 = np.random.rand(5, 4).astype(np.float32)
result = layers.Concat()(input1, input2)
assert result.shape == (5, 14)
assert np.array_equal(input1, result[:, :10])
assert np.array_equal(input2, result[:, 10:])
def test_stack(self):
"""Test invoking Stack in eager mode."""
with context.eager_mode():
input1 = np.random.rand(5, 4).astype(np.float32)
input2 = np.random.rand(5, 4).astype(np.float32)
result = layers.Stack()(input1, input2)
assert result.shape == (5, 2, 4)
assert np.array_equal(input1, result[:, 0, :])
assert np.array_equal(input2, result[:, 1, :])
def test_constant(self):
"""Test invoking Constant in eager mode."""
with context.eager_mode():
value = np.random.rand(5, 4).astype(np.float32)
result = layers.Constant(value)()
assert np.array_equal(result, value)
def test_variable(self):
"""Test invoking Variable in eager mode."""
with context.eager_mode():
value = np.random.rand(5, 4).astype(np.float32)
layer = layers.Variable(value)
result = layer()
assert np.array_equal(result.numpy(), value)
assert len(layer.trainable_variables) == 1
def test_add(self):
"""Test invoking Add in eager mode."""
with context.eager_mode():
result = layers.Add()([1, 2], [3, 4])
assert np.array_equal(result, [4, 6])
def test_multiply(self):
"""Test invoking Multiply in eager mode."""
with context.eager_mode():
result = layers.Multiply()([1, 2], [3, 4])
assert np.array_equal(result, [3, 8])
def test_divide(self):
"""Test invoking Divide in eager mode."""
with context.eager_mode():
result = layers.Divide()([1, 2], [2, 5])
assert np.allclose(result, [0.5, 0.4])
def test_log(self):
"""Test invoking Log in eager mode."""
with context.eager_mode():
result = layers.Log()(2.5)
assert np.allclose(result, np.log(2.5))
def test_exp(self):
"""Test invoking Exp in eager mode."""
with context.eager_mode():
result = layers.Exp()(2.5)
assert np.allclose(result, np.exp(2.5))
def test_interatomic_l2_distances(self):
"""Test invoking InteratomicL2Distances in eager mode."""
with context.eager_mode():
atoms = 5
neighbors = 2
coords = np.random.rand(atoms, 3)
neighbor_list = np.random.randint(0, atoms, size=(atoms, neighbors))
layer = layers.InteratomicL2Distances(atoms, neighbors, 3)
result = layer(coords, neighbor_list)
assert result.shape == (atoms, neighbors)
for atom in range(atoms):
for neighbor in range(neighbors):
delta = coords[atom] - coords[neighbor_list[atom, neighbor]]
dist2 = np.dot(delta, delta)
assert np.allclose(dist2, result[atom, neighbor])
def test_sparse_softmax_cross_entropy(self):
"""Test invoking SparseSoftMaxCrossEntropy in eager mode."""
with context.eager_mode():
batch_size = 10
n_features = 5
logits = np.random.rand(batch_size, n_features).astype(np.float32)
labels = np.random.rand(batch_size).astype(np.int32)
result = layers.SparseSoftMaxCrossEntropy()(labels, logits)
expected = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
assert np.allclose(result, expected)
def test_softmax_cross_entropy(self):
"""Test invoking SoftMaxCrossEntropy in eager mode."""
with context.eager_mode():
batch_size = 10
n_features = 5
logits = np.random.rand(batch_size, n_features).astype(np.float32)
labels = np.random.rand(batch_size, n_features).astype(np.float32)
result = layers.SoftMaxCrossEntropy()(labels, logits)
expected = tf.nn.softmax_cross_entropy_with_logits_v2(
labels=labels, logits=logits)
assert np.allclose(result, expected)
def test_sigmoid_cross_entropy(self):
"""Test invoking SigmoidCrossEntropy in eager mode."""
with context.eager_mode():
batch_size = 10
n_features = 5
logits = np.random.rand(batch_size, n_features).astype(np.float32)
labels = np.random.randint(0, 2,
(batch_size, n_features)).astype(np.float32)
result = layers.SigmoidCrossEntropy()(labels, logits)
expected = tf.nn.sigmoid_cross_entropy_with_logits(
labels=labels, logits=logits)
assert np.allclose(result, expected)
def test_reduce_mean(self):
"""Test invoking ReduceMean in eager mode."""
with context.eager_mode():
input = np.random.rand(5, 10).astype(np.float32)
result = layers.ReduceMean(axis=1)(input)
assert result.shape == (5,)
assert np.allclose(result, np.mean(input, axis=1))
def test_reduce_max(self):
"""Test invoking ReduceMax in eager mode."""
with context.eager_mode():
input = np.random.rand(5, 10).astype(np.float32)
result = layers.ReduceMax(axis=1)(input)
assert result.shape == (5,)
assert np.allclose(result, np.max(input, axis=1))
def test_reduce_sum(self):
"""Test invoking ReduceSum in eager mode."""
with context.eager_mode():
input = np.random.rand(5, 10).astype(np.float32)
result = layers.ReduceSum(axis=1)(input)
assert result.shape == (5,)
assert np.allclose(result, np.sum(input, axis=1))
def test_reduce_square_difference(self):
"""Test invoking ReduceSquareDifference in eager mode."""
with context.eager_mode():
input1 = np.random.rand(5, 10).astype(np.float32)
input2 = np.random.rand(5, 10).astype(np.float32)
result = layers.ReduceSquareDifference(axis=1)(input1, input2)
assert result.shape == (5,)
assert np.allclose(result, np.mean((input1 - input2)**2, axis=1))
def test_conv_2d(self):
"""Test invoking Conv2D in eager mode."""
with context.eager_mode():
length = 4
width = 5
in_channels = 2
filters = 3
kernel_size = 2
batch_size = 10
input = np.random.rand(batch_size, length, width,
in_channels).astype(np.float32)
layer = layers.Conv2D(filters, kernel_size=kernel_size)
result = layer(input)
assert result.shape == (batch_size, length, width, filters)
assert len(layer.trainable_variables) == 2
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.Conv2D(filters, kernel_size=kernel_size)
result2 = layer2(input)
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3 = layer(input)
assert np.allclose(result, result3)
def test_conv_3d(self):
"""Test invoking Conv3D in eager mode."""
with context.eager_mode():
length = 4
width = 5
depth = 6
in_channels = 2
filters = 3
kernel_size = 2
batch_size = 10
input = np.random.rand(batch_size, length, width, depth,
in_channels).astype(np.float32)
layer = layers.Conv3D(filters, kernel_size=kernel_size)
result = layer(input)
assert result.shape == (batch_size, length, width, depth, filters)
assert len(layer.trainable_variables) == 2
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.Conv3D(filters, kernel_size=kernel_size)
result2 = layer2(input)
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3 = layer(input)
assert np.allclose(result, result3)
def test_conv_2d_transpose(self):
"""Test invoking Conv2DTranspose in eager mode."""
with context.eager_mode():
length = 4
width = 5
in_channels = 2
filters = 3
kernel_size = 2
stride = 2
batch_size = 10
input = np.random.rand(batch_size, length, width,
in_channels).astype(np.float32)
layer = layers.Conv2DTranspose(
filters, kernel_size=kernel_size, stride=stride)
result = layer(input)
assert result.shape == (batch_size, length * stride, width * stride,
filters)
assert len(layer.trainable_variables) == 2
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.Conv2DTranspose(
filters, kernel_size=kernel_size, stride=stride)
result2 = layer2(input)
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3 = layer(input)
assert np.allclose(result, result3)
def test_conv_3d_transpose(self):
"""Test invoking Conv3DTranspose in eager mode."""
with context.eager_mode():
length = 4
width = 5
depth = 6
in_channels = 2
filters = 3
kernel_size = 2
stride = 2
batch_size = 10
input = np.random.rand(batch_size, length, width, depth,
in_channels).astype(np.float32)
layer = layers.Conv3DTranspose(
filters, kernel_size=kernel_size, stride=stride)
result = layer(input)
assert result.shape == (batch_size, length * stride, width * stride,
depth * stride, filters)
assert len(layer.trainable_variables) == 2
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.Conv3DTranspose(
filters, kernel_size=kernel_size, stride=stride)
result2 = layer2(input)
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3 = layer(input)
assert np.allclose(result, result3)
def test_max_pool_1d(self):
"""Test invoking MaxPool1D in eager mode."""
with context.eager_mode():
input = np.random.rand(4, 6, 8).astype(np.float32)
result = layers.MaxPool1D(strides=2)(input)
assert result.shape == (4, 3, 8)
def test_max_pool_2d(self):
"""Test invoking MaxPool2D in eager mode."""
with context.eager_mode():
input = np.random.rand(2, 4, 6, 8).astype(np.float32)
result = layers.MaxPool2D()(input)
assert result.shape == (2, 2, 3, 8)
def test_max_pool_3d(self):
"""Test invoking MaxPool3D in eager mode."""
with context.eager_mode():
input = np.random.rand(2, 4, 6, 8, 2).astype(np.float32)
result = layers.MaxPool3D()(input)
assert result.shape == (2, 2, 3, 4, 2)
def test_graph_conv(self):
"""Test invoking GraphConv in eager mode."""
with context.eager_mode():
out_channels = 2
n_atoms = 4 # In CCC and C, there are 4 atoms
raw_smiles = ['CCC', 'C']
import rdkit
mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]
featurizer = dc.feat.graph_features.ConvMolFeaturizer()
mols = featurizer.featurize(mols)
multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)
atom_features = multi_mol.get_atom_features().astype(np.float32)
degree_slice = multi_mol.deg_slice
membership = multi_mol.membership
deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]
args = [atom_features, degree_slice, membership] + deg_adjs
layer = layers.GraphConv(out_channels)
result = layer(*args)
assert result.shape == (n_atoms, out_channels)
assert len(layer.trainable_variables) == 2 * layer.num_deg
def test_graph_pool(self):
"""Test invoking GraphPool in eager mode."""
with context.eager_mode():
n_atoms = 4 # In CCC and C, there are 4 atoms
raw_smiles = ['CCC', 'C']
import rdkit
mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]
featurizer = dc.feat.graph_features.ConvMolFeaturizer()
mols = featurizer.featurize(mols)
multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)
atom_features = multi_mol.get_atom_features().astype(np.float32)
degree_slice = multi_mol.deg_slice
membership = multi_mol.membership
deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]
args = [atom_features, degree_slice, membership] + deg_adjs
result = layers.GraphPool()(*args)
assert result.shape[0] == n_atoms
# TODO What should shape[1] be? It's not documented.
def test_graph_gather(self):
"""Test invoking GraphGather in eager mode."""
with context.eager_mode():
batch_size = 2
n_features = 75
n_atoms = 4 # In CCC and C, there are 4 atoms
raw_smiles = ['CCC', 'C']
import rdkit
mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]
featurizer = dc.feat.graph_features.ConvMolFeaturizer()
mols = featurizer.featurize(mols)
multi_mol = dc.feat.mol_graphs.ConvMol.agglomerate_mols(mols)
atom_features = multi_mol.get_atom_features().astype(np.float32)
degree_slice = multi_mol.deg_slice
membership = multi_mol.membership
deg_adjs = multi_mol.get_deg_adjacency_lists()[1:]
args = [atom_features, degree_slice, membership] + deg_adjs
result = layers.GraphGather(batch_size)(*args)
# TODO(rbharath): Why is it 2*n_features instead of n_features?
assert result.shape == (batch_size, 2 * n_features)
def test_lstm_step(self):
"""Test invoking LSTMStep in eager mode."""
with context.eager_mode():
max_depth = 5
n_test = 5
n_feat = 10
y = np.random.rand(n_test, 2 * n_feat).astype(np.float32)
state_zero = np.random.rand(n_test, n_feat).astype(np.float32)
state_one = np.random.rand(n_test, n_feat).astype(np.float32)
layer = layers.LSTMStep(n_feat, 2 * n_feat)
result = layer(y, state_zero, state_one)
h_out, h_copy_out, c_out = (result[0], result[1][0], result[1][1])
assert h_out.shape == (n_test, n_feat)
assert h_copy_out.shape == (n_test, n_feat)
assert c_out.shape == (n_test, n_feat)
assert len(layer.trainable_variables) == 3
def test_attn_lstm_embedding(self):
"""Test invoking AttnLSTMEmbedding in eager mode."""
with context.eager_mode():
max_depth = 5
n_test = 5
n_support = 11
n_feat = 10
test = np.random.rand(n_test, n_feat).astype(np.float32)
support = np.random.rand(n_support, n_feat).astype(np.float32)
layer = layers.AttnLSTMEmbedding(n_test, n_support, n_feat, max_depth)
test_out, support_out = layer(test, support)
assert test_out.shape == (n_test, n_feat)
assert support_out.shape == (n_support, n_feat)
assert len(layer.trainable_variables) == 7
def test_iter_ref_lstm_embedding(self):
"""Test invoking AttnLSTMEmbedding in eager mode."""
with context.eager_mode():
max_depth = 5
n_test = 5
n_support = 11
n_feat = 10
test = np.random.rand(n_test, n_feat).astype(np.float32)
support = np.random.rand(n_support, n_feat).astype(np.float32)
layer = layers.IterRefLSTMEmbedding(n_test, n_support, n_feat, max_depth)
test_out, support_out = layer(test, support)
assert test_out.shape == (n_test, n_feat)
assert support_out.shape == (n_support, n_feat)
assert len(layer.trainable_variables) == 12
def test_batch_norm(self):
"""Test invoking BatchNorm in eager mode."""
with context.eager_mode():
batch_size = 10
n_features = 5
input = np.random.rand(batch_size, n_features).astype(np.float32)
layer = layers.BatchNorm()
result = layer(input)
assert result.shape == (batch_size, n_features)
assert len(layer.trainable_variables) == 2
def test_weighted_error(self):
"""Test invoking WeightedError in eager mode."""
with context.eager_mode():
input1 = np.random.rand(5, 10).astype(np.float32)
input2 = np.random.rand(5, 10).astype(np.float32)
result = layers.WeightedError()(input1, input2)
expected = np.sum(input1 * input2)
assert np.allclose(result, expected)
def test_vina_free_energy(self):
"""Test invoking VinaFreeEnergy in eager mode."""
with context.eager_mode():
n_atoms = 5
m_nbrs = 1
ndim = 3
nbr_cutoff = 1
start = 0
stop = 4
X = np.random.rand(n_atoms, ndim).astype(np.float32)
Z = np.random.randint(0, 2, (n_atoms)).astype(np.float32)
layer = layers.VinaFreeEnergy(n_atoms, m_nbrs, ndim, nbr_cutoff, start,
stop)
result = layer(X, Z)
assert len(layer.trainable_variables) == 6
assert result.shape == tuple()
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.VinaFreeEnergy(n_atoms, m_nbrs, ndim, nbr_cutoff, start,
stop)
result2 = layer2(X, Z)
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3 = layer(X, Z)
assert np.allclose(result, result3)
def test_weighted_linear_combo(self):
"""Test invoking WeightedLinearCombo in eager mode."""
with context.eager_mode():
input1 = np.random.rand(5, 10).astype(np.float32)
input2 = np.random.rand(5, 10).astype(np.float32)
layer = layers.WeightedLinearCombo()
result = layer(input1, input2)
assert len(layer.trainable_variables) == 2
expected = input1 * layer.trainable_variables[0] + input2 * layer.trainable_variables[1]
assert np.allclose(result, expected)
def test_neighbor_list(self):
"""Test invoking NeighborList in eager mode."""
with context.eager_mode():
N_atoms = 5
start = 0
stop = 12
nbr_cutoff = 3
ndim = 3
M_nbrs = 2
coords = start + np.random.rand(N_atoms, ndim) * (stop - start)
coords = tf.cast(tf.stack(coords), tf.float32)
layer = layers.NeighborList(N_atoms, M_nbrs, ndim, nbr_cutoff, start,
stop)
result = layer(coords)
assert result.shape == (N_atoms, M_nbrs)
def test_dropout(self):
"""Test invoking Dropout in eager mode."""
with context.eager_mode():
rate = 0.5
input = np.random.rand(5, 10).astype(np.float32)
layer = layers.Dropout(rate)
result1 = layer(input, training=False)
assert np.allclose(result1, input)
result2 = layer(input, training=True)
assert not np.allclose(result2, input)
nonzero = result2.numpy() != 0
assert np.allclose(result2.numpy()[nonzero], input[nonzero] / rate)
def test_atomic_convolution(self):
"""Test invoking AtomicConvolution in eager mode."""
with context.eager_mode():
batch_size = 4
max_atoms = 5
max_neighbors = 2
dimensions = 3
params = [[5.0, 2.0, 0.5], [10.0, 2.0, 0.5]]
input1 = np.random.rand(batch_size, max_atoms,
dimensions).astype(np.float32)
input2 = np.random.randint(
max_atoms, size=(batch_size, max_atoms, max_neighbors))
input3 = np.random.randint(
1, 10, size=(batch_size, max_atoms, max_neighbors))
layer = layers.AtomicConvolution(radial_params=params)
result = layer(input1, input2, input3)
assert result.shape == (batch_size, max_atoms, len(params))
assert len(layer.trainable_variables) == 3
def test_alpha_share_layer(self):
"""Test invoking AlphaShareLayer in eager mode."""
with context.eager_mode():
batch_size = 10
length = 6
input1 = np.random.rand(batch_size, length).astype(np.float32)
input2 = np.random.rand(batch_size, length).astype(np.float32)
layer = layers.AlphaShareLayer()
result = layer(input1, input2)
assert input1.shape == result[0].shape
assert input2.shape == result[1].shape
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.AlphaShareLayer()
result2 = layer2(input1, input2)
assert not np.allclose(result[0], result2[0])
assert not np.allclose(result[1], result2[1])
# But evaluating the first layer again should produce the same result as before.
result3 = layer(input1, input2)
assert np.allclose(result[0], result3[0])
assert np.allclose(result[1], result3[1])
def test_sluice_loss(self):
"""Test invoking SluiceLoss in eager mode."""
with context.eager_mode():
input1 = np.ones((3, 4)).astype(np.float32)
input2 = np.ones((2, 2)).astype(np.float32)
result = layers.SluiceLoss()(input1, input2)
assert np.allclose(result, 40.0)
def test_beta_share(self):
"""Test invoking BetaShare in eager mode."""
with context.eager_mode():
batch_size = 10
length = 6
input1 = np.random.rand(batch_size, length).astype(np.float32)
input2 = np.random.rand(batch_size, length).astype(np.float32)
layer = layers.BetaShare()
result = layer(input1, input2)
assert input1.shape == result.shape
assert input2.shape == result.shape
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.BetaShare()
result2 = layer2(input1, input2)
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3 = layer(input1, input2)
assert np.allclose(result, result3)
def test_ani_feat(self):
"""Test invoking ANIFeat in eager mode."""
with context.eager_mode():
batch_size = 10
max_atoms = 5
input = np.random.rand(batch_size, max_atoms, 4).astype(np.float32)
layer = layers.ANIFeat(max_atoms=max_atoms)
result = layer(input)
# TODO What should the output shape be? It's not documented, and there
# are no other test cases for it.
def test_graph_embed_pool_layer(self):
"""Test invoking GraphEmbedPoolLayer in eager mode."""
with context.eager_mode():
V = np.random.uniform(size=(10, 100, 50)).astype(np.float32)
adjs = np.random.uniform(size=(10, 100, 5, 100)).astype(np.float32)
layer = layers.GraphEmbedPoolLayer(num_vertices=6)
result = layer(V, adjs)
assert result[0].shape == (10, 6, 50)
assert result[1].shape == (10, 6, 5, 6)
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.GraphEmbedPoolLayer(num_vertices=6)
result2 = layer2(V, adjs)
assert not np.allclose(result[0], result2[0])
assert not np.allclose(result[1], result2[1])
# But evaluating the first layer again should produce the same result as before.
result3 = layer(V, adjs)
assert np.allclose(result[0], result3[0])
assert np.allclose(result[1], result3[1])
def test_graph_cnn(self):
"""Test invoking GraphCNN in eager mode."""
with context.eager_mode():
V = np.random.uniform(size=(10, 100, 50)).astype(np.float32)
adjs = np.random.uniform(size=(10, 100, 5, 100)).astype(np.float32)
layer = layers.GraphCNN(num_filters=6)
result = layer(V, adjs)
assert result.shape == (10, 100, 6)
# Creating a second layer should produce different results, since it has
# different random weights.
layer2 = layers.GraphCNN(num_filters=6)
result2 = layer2(V, adjs)
assert not np.allclose(result, result2)
# But evaluating the first layer again should produce the same result as before.
result3 = layer(V, adjs)
assert np.allclose(result, result3)
def test_hinge_loss(self):
"""Test invoking HingeLoss in eager mode."""
with context.eager_mode():
n_labels = 1
n_logits = 1
logits = np.random.rand(n_logits).astype(np.float32)
labels = np.random.rand(n_labels).astype(np.float32)
result = layers.HingeLoss()(labels, logits)
assert result.shape == (n_labels,)
|
[
"numpy.log",
"deepchem.models.tensorgraph.layers.MaxPool1D",
"deepchem.models.tensorgraph.layers.BatchNorm",
"deepchem.models.tensorgraph.layers.Conv3D",
"deepchem.models.tensorgraph.layers.ReduceMax",
"deepchem.models.tensorgraph.layers.SoftMax",
"deepchem.models.tensorgraph.layers.Gather",
"deepchem.models.tensorgraph.layers.TimeSeriesDense",
"numpy.exp",
"deepchem.models.tensorgraph.layers.AtomicConvolution",
"deepchem.models.tensorgraph.layers.BetaShare",
"deepchem.models.tensorgraph.layers.Conv2D",
"deepchem.models.tensorgraph.layers.GraphPool",
"deepchem.models.tensorgraph.layers.Conv3DTranspose",
"deepchem.feat.graph_features.ConvMolFeaturizer",
"deepchem.models.tensorgraph.layers.Conv2DTranspose",
"deepchem.models.tensorgraph.layers.WeightedLinearCombo",
"deepchem.models.tensorgraph.layers.AlphaShareLayer",
"numpy.sum",
"numpy.random.randint",
"deepchem.models.tensorgraph.layers.Exp",
"deepchem.models.tensorgraph.layers.GraphConv",
"deepchem.models.tensorgraph.layers.Transpose",
"numpy.mean",
"deepchem.models.tensorgraph.layers.ReLU",
"deepchem.models.tensorgraph.layers.IterRefLSTMEmbedding",
"deepchem.models.tensorgraph.layers.ReduceMean",
"deepchem.models.tensorgraph.layers.SigmoidCrossEntropy",
"numpy.max",
"deepchem.models.tensorgraph.layers.MaxPool2D",
"tensorflow.nn.sigmoid",
"numpy.dot",
"deepchem.models.tensorgraph.layers.Concat",
"deepchem.models.tensorgraph.layers.GraphGather",
"numpy.random.normal",
"numpy.ones",
"deepchem.models.tensorgraph.layers.Conv1D",
"deepchem.models.tensorgraph.layers.VinaFreeEnergy",
"deepchem.models.tensorgraph.layers.Flatten",
"deepchem.models.tensorgraph.layers.Cast",
"rdkit.Chem.MolFromSmiles",
"tensorflow.nn.softmax_cross_entropy_with_logits_v2",
"deepchem.models.tensorgraph.layers.Sigmoid",
"deepchem.models.tensorgraph.layers.AttnLSTMEmbedding",
"numpy.random.rand",
"deepchem.models.tensorgraph.layers.Add",
"deepchem.models.tensorgraph.layers.Divide",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"deepchem.models.tensorgraph.layers.Squeeze",
"deepchem.models.tensorgraph.layers.WeightedError",
"deepchem.models.tensorgraph.layers.GRU",
"tensorflow.nn.softmax",
"deepchem.models.tensorgraph.layers.HingeLoss",
"deepchem.models.tensorgraph.layers.LSTMStep",
"deepchem.feat.mol_graphs.ConvMol.agglomerate_mols",
"tensorflow.stack",
"deepchem.models.tensorgraph.layers.ReduceSquareDifference",
"deepchem.models.tensorgraph.layers.Reshape",
"deepchem.models.tensorgraph.layers.Variable",
"deepchem.models.tensorgraph.layers.Highway",
"deepchem.models.tensorgraph.layers.Dense",
"deepchem.models.tensorgraph.layers.L2Loss",
"deepchem.models.tensorgraph.layers.L1Loss",
"deepchem.models.tensorgraph.layers.Dropout",
"numpy.array_equal",
"deepchem.models.tensorgraph.layers.LSTM",
"deepchem.models.tensorgraph.layers.Stack",
"deepchem.models.tensorgraph.layers.SparseSoftMaxCrossEntropy",
"deepchem.models.tensorgraph.layers.GraphEmbedPoolLayer",
"deepchem.models.tensorgraph.layers.ANIFeat",
"deepchem.models.tensorgraph.layers.MaxPool3D",
"deepchem.models.tensorgraph.layers.ReduceSum",
"deepchem.models.tensorgraph.layers.NeighborList",
"deepchem.models.tensorgraph.layers.CombineMeanStd",
"deepchem.models.tensorgraph.layers.Repeat",
"deepchem.models.tensorgraph.layers.InteratomicL2Distances",
"deepchem.models.tensorgraph.layers.SoftMaxCrossEntropy",
"numpy.abs",
"tensorflow.python.eager.context.eager_mode",
"numpy.allclose",
"deepchem.models.tensorgraph.layers.Constant",
"tensorflow.nn.sigmoid_cross_entropy_with_logits",
"deepchem.models.tensorgraph.layers.SluiceLoss",
"deepchem.models.tensorgraph.layers.Multiply",
"tensorflow.nn.relu",
"numpy.random.uniform",
"deepchem.models.tensorgraph.layers.GraphCNN",
"deepchem.models.tensorgraph.layers.Log"
] |
[((403, 423), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (421, 423), False, 'from tensorflow.python.eager import context\n'), ((619, 654), 'deepchem.models.tensorgraph.layers.Conv1D', 'layers.Conv1D', (['filters', 'kernel_size'], {}), '(filters, kernel_size)\n', (632, 654), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((963, 998), 'deepchem.models.tensorgraph.layers.Conv1D', 'layers.Conv1D', (['filters', 'kernel_size'], {}), '(filters, kernel_size)\n', (976, 998), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((1206, 1234), 'numpy.allclose', 'np.allclose', (['result', 'result3'], {}), '(result, result3)\n', (1217, 1234), True, 'import numpy as np\n'), ((1314, 1334), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (1332, 1334), False, 'from tensorflow.python.eager import context\n'), ((1475, 1496), 'deepchem.models.tensorgraph.layers.Dense', 'layers.Dense', (['out_dim'], {}), '(out_dim)\n', (1487, 1496), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((1755, 1776), 'deepchem.models.tensorgraph.layers.Dense', 'layers.Dense', (['out_dim'], {}), '(out_dim)\n', (1767, 1776), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((1984, 2012), 'numpy.allclose', 'np.allclose', (['result', 'result3'], {}), '(result, result3)\n', (1995, 2012), True, 'import numpy as np\n'), ((2096, 2116), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (2114, 2116), False, 'from tensorflow.python.eager import context\n'), ((2237, 2253), 'deepchem.models.tensorgraph.layers.Highway', 'layers.Highway', ([], {}), '()\n', (2251, 2253), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((2510, 2526), 'deepchem.models.tensorgraph.layers.Highway', 'layers.Highway', ([], {}), '()\n', (2524, 2526), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((2734, 2762), 'numpy.allclose', 'np.allclose', (['result', 'result3'], {}), '(result, result3)\n', (2745, 2762), True, 'import numpy as np\n'), ((2846, 2866), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (2864, 2866), False, 'from tensorflow.python.eager import context\n'), ((3085, 3105), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (3103, 3105), False, 'from tensorflow.python.eager import context\n'), ((3327, 3347), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (3345, 3347), False, 'from tensorflow.python.eager import context\n'), ((3363, 3383), 'numpy.random.rand', 'np.random.rand', (['(5)', '(3)'], {}), '(5, 3)\n', (3377, 3383), True, 'import numpy as np\n'), ((3559, 3579), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (3577, 3579), False, 'from tensorflow.python.eager import context\n'), ((3800, 3820), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (3818, 3820), False, 'from tensorflow.python.eager import context\n'), ((4069, 4089), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (4087, 4089), False, 'from tensorflow.python.eager import context\n'), ((4210, 4271), 'deepchem.models.tensorgraph.layers.CombineMeanStd', 'layers.CombineMeanStd', ([], {'training_only': '(True)', 'noise_epsilon': '(0.01)'}), '(training_only=True, noise_epsilon=0.01)\n', (4231, 4271), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((4334, 4363), 'numpy.array_equal', 'np.array_equal', (['result1', 'mean'], {}), '(result1, mean)\n', (4348, 4363), True, 'import numpy as np\n'), ((4497, 4533), 'numpy.allclose', 'np.allclose', (['result2', 'mean'], {'atol': '(0.1)'}), '(result2, mean, atol=0.1)\n', (4508, 4533), True, 'import numpy as np\n'), ((4615, 4635), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (4633, 4635), False, 'from tensorflow.python.eager import context\n'), ((4782, 4830), 'numpy.array_equal', 'np.array_equal', (['result[:, 0, :]', 'result[:, 1, :]'], {}), '(result[:, 0, :], result[:, 1, :])\n', (4796, 4830), True, 'import numpy as np\n'), ((4912, 4932), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (4930, 4932), False, 'from tensorflow.python.eager import context\n'), ((5072, 5116), 'numpy.array_equal', 'np.array_equal', (['result', '[input[1], input[3]]'], {}), '(result, [input[1], input[3]])\n', (5086, 5116), True, 'import numpy as np\n'), ((5192, 5212), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (5210, 5212), False, 'from tensorflow.python.eager import context\n'), ((5420, 5452), 'deepchem.models.tensorgraph.layers.GRU', 'layers.GRU', (['n_hidden', 'batch_size'], {}), '(n_hidden, batch_size)\n', (5430, 5452), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((5728, 5760), 'deepchem.models.tensorgraph.layers.GRU', 'layers.GRU', (['n_hidden', 'batch_size'], {}), '(n_hidden, batch_size)\n', (5738, 5760), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((5984, 6012), 'numpy.allclose', 'np.allclose', (['result', 'result3'], {}), '(result, result3)\n', (5995, 6012), True, 'import numpy as np\n'), ((6299, 6319), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (6317, 6319), False, 'from tensorflow.python.eager import context\n'), ((6527, 6560), 'deepchem.models.tensorgraph.layers.LSTM', 'layers.LSTM', (['n_hidden', 'batch_size'], {}), '(n_hidden, batch_size)\n', (6538, 6560), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((6836, 6869), 'deepchem.models.tensorgraph.layers.LSTM', 'layers.LSTM', (['n_hidden', 'batch_size'], {}), '(n_hidden, batch_size)\n', (6847, 6869), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((7093, 7121), 'numpy.allclose', 'np.allclose', (['result', 'result3'], {}), '(result, result3)\n', (7104, 7121), True, 'import numpy as np\n'), ((7432, 7452), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (7450, 7452), False, 'from tensorflow.python.eager import context\n'), ((7620, 7651), 'deepchem.models.tensorgraph.layers.TimeSeriesDense', 'layers.TimeSeriesDense', (['out_dim'], {}), '(out_dim)\n', (7642, 7651), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((7919, 7950), 'deepchem.models.tensorgraph.layers.TimeSeriesDense', 'layers.TimeSeriesDense', (['out_dim'], {}), '(out_dim)\n', (7941, 7950), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((8158, 8186), 'numpy.allclose', 'np.allclose', (['result', 'result3'], {}), '(result, result3)\n', (8169, 8186), True, 'import numpy as np\n'), ((8269, 8289), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (8287, 8289), False, 'from tensorflow.python.eager import context\n'), ((8521, 8550), 'numpy.allclose', 'np.allclose', (['result', 'expected'], {}), '(result, expected)\n', (8532, 8550), True, 'import numpy as np\n'), ((8633, 8653), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (8651, 8653), False, 'from tensorflow.python.eager import context\n'), ((8831, 8870), 'numpy.mean', 'np.mean', (['((input1 - input2) ** 2)'], {'axis': '(1)'}), '((input1 - input2) ** 2, axis=1)\n', (8838, 8870), True, 'import numpy as np\n'), ((8882, 8911), 'numpy.allclose', 'np.allclose', (['result', 'expected'], {}), '(result, expected)\n', (8893, 8911), True, 'import numpy as np\n'), ((8995, 9015), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (9013, 9015), False, 'from tensorflow.python.eager import context\n'), ((9128, 9148), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['input'], {}), '(input)\n', (9141, 9148), True, 'import tensorflow as tf\n'), ((9162, 9191), 'numpy.allclose', 'np.allclose', (['result', 'expected'], {}), '(result, expected)\n', (9173, 9191), True, 'import numpy as np\n'), ((9275, 9295), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (9293, 9295), False, 'from tensorflow.python.eager import context\n'), ((9408, 9428), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['input'], {}), '(input)\n', (9421, 9428), True, 'import tensorflow as tf\n'), ((9442, 9471), 'numpy.allclose', 'np.allclose', (['result', 'expected'], {}), '(result, expected)\n', (9453, 9471), True, 'import numpy as np\n'), ((9549, 9569), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (9567, 9569), False, 'from tensorflow.python.eager import context\n'), ((9688, 9705), 'tensorflow.nn.relu', 'tf.nn.relu', (['input'], {}), '(input)\n', (9698, 9705), True, 'import tensorflow as tf\n'), ((9719, 9748), 'numpy.allclose', 'np.allclose', (['result', 'expected'], {}), '(result, expected)\n', (9730, 9748), True, 'import numpy as np\n'), ((9830, 9850), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (9848, 9850), False, 'from tensorflow.python.eager import context\n'), ((10060, 10098), 'numpy.array_equal', 'np.array_equal', (['input1', 'result[:, :10]'], {}), '(input1, result[:, :10])\n', (10074, 10098), True, 'import numpy as np\n'), ((10112, 10150), 'numpy.array_equal', 'np.array_equal', (['input2', 'result[:, 10:]'], {}), '(input2, result[:, 10:])\n', (10126, 10150), True, 'import numpy as np\n'), ((10230, 10250), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (10248, 10250), False, 'from tensorflow.python.eager import context\n'), ((10460, 10499), 'numpy.array_equal', 'np.array_equal', (['input1', 'result[:, 0, :]'], {}), '(input1, result[:, 0, :])\n', (10474, 10499), True, 'import numpy as np\n'), ((10513, 10552), 'numpy.array_equal', 'np.array_equal', (['input2', 'result[:, 1, :]'], {}), '(input2, result[:, 1, :])\n', (10527, 10552), True, 'import numpy as np\n'), ((10638, 10658), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (10656, 10658), False, 'from tensorflow.python.eager import context\n'), ((10767, 10796), 'numpy.array_equal', 'np.array_equal', (['result', 'value'], {}), '(result, value)\n', (10781, 10796), True, 'import numpy as np\n'), ((10882, 10902), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (10900, 10902), False, 'from tensorflow.python.eager import context\n'), ((10972, 10994), 'deepchem.models.tensorgraph.layers.Variable', 'layers.Variable', (['value'], {}), '(value)\n', (10987, 10994), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((11193, 11213), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (11211, 11213), False, 'from tensorflow.python.eager import context\n'), ((11272, 11302), 'numpy.array_equal', 'np.array_equal', (['result', '[4, 6]'], {}), '(result, [4, 6])\n', (11286, 11302), True, 'import numpy as np\n'), ((11388, 11408), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (11406, 11408), False, 'from tensorflow.python.eager import context\n'), ((11472, 11502), 'numpy.array_equal', 'np.array_equal', (['result', '[3, 8]'], {}), '(result, [3, 8])\n', (11486, 11502), True, 'import numpy as np\n'), ((11584, 11604), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (11602, 11604), False, 'from tensorflow.python.eager import context\n'), ((11666, 11697), 'numpy.allclose', 'np.allclose', (['result', '[0.5, 0.4]'], {}), '(result, [0.5, 0.4])\n', (11677, 11697), True, 'import numpy as np\n'), ((11773, 11793), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (11791, 11793), False, 'from tensorflow.python.eager import context\n'), ((11949, 11969), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (11967, 11969), False, 'from tensorflow.python.eager import context\n'), ((12165, 12185), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (12183, 12185), False, 'from tensorflow.python.eager import context\n'), ((12238, 12262), 'numpy.random.rand', 'np.random.rand', (['atoms', '(3)'], {}), '(atoms, 3)\n', (12252, 12262), True, 'import numpy as np\n'), ((12285, 12337), 'numpy.random.randint', 'np.random.randint', (['(0)', 'atoms'], {'size': '(atoms, neighbors)'}), '(0, atoms, size=(atoms, neighbors))\n', (12302, 12337), True, 'import numpy as np\n'), ((12352, 12402), 'deepchem.models.tensorgraph.layers.InteratomicL2Distances', 'layers.InteratomicL2Distances', (['atoms', 'neighbors', '(3)'], {}), '(atoms, neighbors, 3)\n', (12381, 12402), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((12861, 12881), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (12879, 12881), False, 'from tensorflow.python.eager import context\n'), ((13141, 13217), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'labels', 'logits': 'logits'}), '(labels=labels, logits=logits)\n', (13187, 13217), True, 'import tensorflow as tf\n'), ((13242, 13271), 'numpy.allclose', 'np.allclose', (['result', 'expected'], {}), '(result, expected)\n', (13253, 13271), True, 'import numpy as np\n'), ((13381, 13401), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (13399, 13401), False, 'from tensorflow.python.eager import context\n'), ((13669, 13741), 'tensorflow.nn.softmax_cross_entropy_with_logits_v2', 'tf.nn.softmax_cross_entropy_with_logits_v2', ([], {'labels': 'labels', 'logits': 'logits'}), '(labels=labels, logits=logits)\n', (13711, 13741), True, 'import tensorflow as tf\n'), ((13766, 13795), 'numpy.allclose', 'np.allclose', (['result', 'expected'], {}), '(result, expected)\n', (13777, 13795), True, 'import numpy as np\n'), ((13905, 13925), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (13923, 13925), False, 'from tensorflow.python.eager import context\n'), ((14237, 14306), 'tensorflow.nn.sigmoid_cross_entropy_with_logits', 'tf.nn.sigmoid_cross_entropy_with_logits', ([], {'labels': 'labels', 'logits': 'logits'}), '(labels=labels, logits=logits)\n', (14276, 14306), True, 'import tensorflow as tf\n'), ((14331, 14360), 'numpy.allclose', 'np.allclose', (['result', 'expected'], {}), '(result, expected)\n', (14342, 14360), True, 'import numpy as np\n'), ((14451, 14471), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (14469, 14471), False, 'from tensorflow.python.eager import context\n'), ((14755, 14775), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (14773, 14775), False, 'from tensorflow.python.eager import context\n'), ((15057, 15077), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (15075, 15077), False, 'from tensorflow.python.eager import context\n'), ((15386, 15406), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (15404, 15406), False, 'from tensorflow.python.eager import context\n'), ((15777, 15797), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (15795, 15797), False, 'from tensorflow.python.eager import context\n'), ((16047, 16094), 'deepchem.models.tensorgraph.layers.Conv2D', 'layers.Conv2D', (['filters'], {'kernel_size': 'kernel_size'}), '(filters, kernel_size=kernel_size)\n', (16060, 16094), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((16368, 16415), 'deepchem.models.tensorgraph.layers.Conv2D', 'layers.Conv2D', (['filters'], {'kernel_size': 'kernel_size'}), '(filters, kernel_size=kernel_size)\n', (16381, 16415), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((16623, 16651), 'numpy.allclose', 'np.allclose', (['result', 'result3'], {}), '(result, result3)\n', (16634, 16651), True, 'import numpy as np\n'), ((16734, 16754), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (16752, 16754), False, 'from tensorflow.python.eager import context\n'), ((17027, 17074), 'deepchem.models.tensorgraph.layers.Conv3D', 'layers.Conv3D', (['filters'], {'kernel_size': 'kernel_size'}), '(filters, kernel_size=kernel_size)\n', (17040, 17074), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((17355, 17402), 'deepchem.models.tensorgraph.layers.Conv3D', 'layers.Conv3D', (['filters'], {'kernel_size': 'kernel_size'}), '(filters, kernel_size=kernel_size)\n', (17368, 17402), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((17610, 17638), 'numpy.allclose', 'np.allclose', (['result', 'result3'], {}), '(result, result3)\n', (17621, 17638), True, 'import numpy as np\n'), ((17740, 17760), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (17758, 17760), False, 'from tensorflow.python.eager import context\n'), ((18027, 18098), 'deepchem.models.tensorgraph.layers.Conv2DTranspose', 'layers.Conv2DTranspose', (['filters'], {'kernel_size': 'kernel_size', 'stride': 'stride'}), '(filters, kernel_size=kernel_size, stride=stride)\n', (18049, 18098), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((18431, 18502), 'deepchem.models.tensorgraph.layers.Conv2DTranspose', 'layers.Conv2DTranspose', (['filters'], {'kernel_size': 'kernel_size', 'stride': 'stride'}), '(filters, kernel_size=kernel_size, stride=stride)\n', (18453, 18502), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((18721, 18749), 'numpy.allclose', 'np.allclose', (['result', 'result3'], {}), '(result, result3)\n', (18732, 18749), True, 'import numpy as np\n'), ((18851, 18871), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (18869, 18871), False, 'from tensorflow.python.eager import context\n'), ((19161, 19232), 'deepchem.models.tensorgraph.layers.Conv3DTranspose', 'layers.Conv3DTranspose', (['filters'], {'kernel_size': 'kernel_size', 'stride': 'stride'}), '(filters, kernel_size=kernel_size, stride=stride)\n', (19183, 19232), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((19581, 19652), 'deepchem.models.tensorgraph.layers.Conv3DTranspose', 'layers.Conv3DTranspose', (['filters'], {'kernel_size': 'kernel_size', 'stride': 'stride'}), '(filters, kernel_size=kernel_size, stride=stride)\n', (19603, 19652), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((19871, 19899), 'numpy.allclose', 'np.allclose', (['result', 'result3'], {}), '(result, result3)\n', (19882, 19899), True, 'import numpy as np\n'), ((19989, 20009), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (20007, 20009), False, 'from tensorflow.python.eager import context\n'), ((20246, 20266), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (20264, 20266), False, 'from tensorflow.python.eager import context\n'), ((20500, 20520), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (20518, 20520), False, 'from tensorflow.python.eager import context\n'), ((20759, 20779), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (20777, 20779), False, 'from tensorflow.python.eager import context\n'), ((20990, 21032), 'deepchem.feat.graph_features.ConvMolFeaturizer', 'dc.feat.graph_features.ConvMolFeaturizer', ([], {}), '()\n', (21030, 21032), True, 'import deepchem as dc\n'), ((21091, 21140), 'deepchem.feat.mol_graphs.ConvMol.agglomerate_mols', 'dc.feat.mol_graphs.ConvMol.agglomerate_mols', (['mols'], {}), '(mols)\n', (21134, 21140), True, 'import deepchem as dc\n'), ((21430, 21460), 'deepchem.models.tensorgraph.layers.GraphConv', 'layers.GraphConv', (['out_channels'], {}), '(out_channels)\n', (21446, 21460), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((21695, 21715), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (21713, 21715), False, 'from tensorflow.python.eager import context\n'), ((21903, 21945), 'deepchem.feat.graph_features.ConvMolFeaturizer', 'dc.feat.graph_features.ConvMolFeaturizer', ([], {}), '()\n', (21943, 21945), True, 'import deepchem as dc\n'), ((22004, 22053), 'deepchem.feat.mol_graphs.ConvMol.agglomerate_mols', 'dc.feat.mol_graphs.ConvMol.agglomerate_mols', (['mols'], {}), '(mols)\n', (22047, 22053), True, 'import deepchem as dc\n'), ((22562, 22582), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (22580, 22582), False, 'from tensorflow.python.eager import context\n'), ((22813, 22855), 'deepchem.feat.graph_features.ConvMolFeaturizer', 'dc.feat.graph_features.ConvMolFeaturizer', ([], {}), '()\n', (22853, 22855), True, 'import deepchem as dc\n'), ((22914, 22963), 'deepchem.feat.mol_graphs.ConvMol.agglomerate_mols', 'dc.feat.mol_graphs.ConvMol.agglomerate_mols', (['mols'], {}), '(mols)\n', (22957, 22963), True, 'import deepchem as dc\n'), ((23506, 23526), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (23524, 23526), False, 'from tensorflow.python.eager import context\n'), ((23798, 23833), 'deepchem.models.tensorgraph.layers.LSTMStep', 'layers.LSTMStep', (['n_feat', '(2 * n_feat)'], {}), '(n_feat, 2 * n_feat)\n', (23813, 23833), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((24248, 24268), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (24266, 24268), False, 'from tensorflow.python.eager import context\n'), ((24492, 24554), 'deepchem.models.tensorgraph.layers.AttnLSTMEmbedding', 'layers.AttnLSTMEmbedding', (['n_test', 'n_support', 'n_feat', 'max_depth'], {}), '(n_test, n_support, n_feat, max_depth)\n', (24516, 24554), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((24866, 24886), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (24884, 24886), False, 'from tensorflow.python.eager import context\n'), ((25110, 25175), 'deepchem.models.tensorgraph.layers.IterRefLSTMEmbedding', 'layers.IterRefLSTMEmbedding', (['n_test', 'n_support', 'n_feat', 'max_depth'], {}), '(n_test, n_support, n_feat, max_depth)\n', (25137, 25175), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((25467, 25487), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (25485, 25487), False, 'from tensorflow.python.eager import context\n'), ((25618, 25636), 'deepchem.models.tensorgraph.layers.BatchNorm', 'layers.BatchNorm', ([], {}), '()\n', (25634, 25636), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((25864, 25884), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (25882, 25884), False, 'from tensorflow.python.eager import context\n'), ((26069, 26092), 'numpy.sum', 'np.sum', (['(input1 * input2)'], {}), '(input1 * input2)\n', (26075, 26092), True, 'import numpy as np\n'), ((26106, 26135), 'numpy.allclose', 'np.allclose', (['result', 'expected'], {}), '(result, expected)\n', (26117, 26135), True, 'import numpy as np\n'), ((26235, 26255), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (26253, 26255), False, 'from tensorflow.python.eager import context\n'), ((26496, 26565), 'deepchem.models.tensorgraph.layers.VinaFreeEnergy', 'layers.VinaFreeEnergy', (['n_atoms', 'm_nbrs', 'ndim', 'nbr_cutoff', 'start', 'stop'], {}), '(n_atoms, m_nbrs, ndim, nbr_cutoff, start, stop)\n', (26517, 26565), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((26845, 26914), 'deepchem.models.tensorgraph.layers.VinaFreeEnergy', 'layers.VinaFreeEnergy', (['n_atoms', 'm_nbrs', 'ndim', 'nbr_cutoff', 'start', 'stop'], {}), '(n_atoms, m_nbrs, ndim, nbr_cutoff, start, stop)\n', (26866, 26914), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((27157, 27185), 'numpy.allclose', 'np.allclose', (['result', 'result3'], {}), '(result, result3)\n', (27168, 27185), True, 'import numpy as np\n'), ((27295, 27315), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (27313, 27315), False, 'from tensorflow.python.eager import context\n'), ((27443, 27471), 'deepchem.models.tensorgraph.layers.WeightedLinearCombo', 'layers.WeightedLinearCombo', ([], {}), '()\n', (27469, 27471), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((27666, 27695), 'numpy.allclose', 'np.allclose', (['result', 'expected'], {}), '(result, expected)\n', (27677, 27695), True, 'import numpy as np\n'), ((27790, 27810), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (27808, 27810), False, 'from tensorflow.python.eager import context\n'), ((28052, 28119), 'deepchem.models.tensorgraph.layers.NeighborList', 'layers.NeighborList', (['N_atoms', 'M_nbrs', 'ndim', 'nbr_cutoff', 'start', 'stop'], {}), '(N_atoms, M_nbrs, ndim, nbr_cutoff, start, stop)\n', (28071, 28119), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((28313, 28333), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (28331, 28333), False, 'from tensorflow.python.eager import context\n'), ((28421, 28441), 'deepchem.models.tensorgraph.layers.Dropout', 'layers.Dropout', (['rate'], {}), '(rate)\n', (28435, 28441), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((28500, 28527), 'numpy.allclose', 'np.allclose', (['result1', 'input'], {}), '(result1, input)\n', (28511, 28527), True, 'import numpy as np\n'), ((28832, 28852), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (28850, 28852), False, 'from tensorflow.python.eager import context\n'), ((29120, 29193), 'numpy.random.randint', 'np.random.randint', (['max_atoms'], {'size': '(batch_size, max_atoms, max_neighbors)'}), '(max_atoms, size=(batch_size, max_atoms, max_neighbors))\n', (29137, 29193), True, 'import numpy as np\n'), ((29220, 29289), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10)'], {'size': '(batch_size, max_atoms, max_neighbors)'}), '(1, 10, size=(batch_size, max_atoms, max_neighbors))\n', (29237, 29289), True, 'import numpy as np\n'), ((29315, 29361), 'deepchem.models.tensorgraph.layers.AtomicConvolution', 'layers.AtomicConvolution', ([], {'radial_params': 'params'}), '(radial_params=params)\n', (29339, 29361), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((29623, 29643), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (29641, 29643), False, 'from tensorflow.python.eager import context\n'), ((29836, 29860), 'deepchem.models.tensorgraph.layers.AlphaShareLayer', 'layers.AlphaShareLayer', ([], {}), '()\n', (29858, 29860), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((30118, 30142), 'deepchem.models.tensorgraph.layers.AlphaShareLayer', 'layers.AlphaShareLayer', ([], {}), '()\n', (30140, 30142), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((30426, 30460), 'numpy.allclose', 'np.allclose', (['result[0]', 'result3[0]'], {}), '(result[0], result3[0])\n', (30437, 30460), True, 'import numpy as np\n'), ((30474, 30508), 'numpy.allclose', 'np.allclose', (['result[1]', 'result3[1]'], {}), '(result[1], result3[1])\n', (30485, 30508), True, 'import numpy as np\n'), ((30599, 30619), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (30617, 30619), False, 'from tensorflow.python.eager import context\n'), ((30785, 30810), 'numpy.allclose', 'np.allclose', (['result', '(40.0)'], {}), '(result, 40.0)\n', (30796, 30810), True, 'import numpy as np\n'), ((30899, 30919), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (30917, 30919), False, 'from tensorflow.python.eager import context\n'), ((31112, 31130), 'deepchem.models.tensorgraph.layers.BetaShare', 'layers.BetaShare', ([], {}), '()\n', (31128, 31130), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((31382, 31400), 'deepchem.models.tensorgraph.layers.BetaShare', 'layers.BetaShare', ([], {}), '()\n', (31398, 31400), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((31626, 31654), 'numpy.allclose', 'np.allclose', (['result', 'result3'], {}), '(result, result3)\n', (31637, 31654), True, 'import numpy as np\n'), ((31739, 31759), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (31757, 31759), False, 'from tensorflow.python.eager import context\n'), ((31891, 31926), 'deepchem.models.tensorgraph.layers.ANIFeat', 'layers.ANIFeat', ([], {'max_atoms': 'max_atoms'}), '(max_atoms=max_atoms)\n', (31905, 31926), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((32183, 32203), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (32201, 32203), False, 'from tensorflow.python.eager import context\n'), ((32360, 32402), 'deepchem.models.tensorgraph.layers.GraphEmbedPoolLayer', 'layers.GraphEmbedPoolLayer', ([], {'num_vertices': '(6)'}), '(num_vertices=6)\n', (32386, 32402), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((32653, 32695), 'deepchem.models.tensorgraph.layers.GraphEmbedPoolLayer', 'layers.GraphEmbedPoolLayer', ([], {'num_vertices': '(6)'}), '(num_vertices=6)\n', (32679, 32695), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((32965, 32999), 'numpy.allclose', 'np.allclose', (['result[0]', 'result3[0]'], {}), '(result[0], result3[0])\n', (32976, 32999), True, 'import numpy as np\n'), ((33013, 33047), 'numpy.allclose', 'np.allclose', (['result[1]', 'result3[1]'], {}), '(result[1], result3[1])\n', (33024, 33047), True, 'import numpy as np\n'), ((33134, 33154), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (33152, 33154), False, 'from tensorflow.python.eager import context\n'), ((33311, 33341), 'deepchem.models.tensorgraph.layers.GraphCNN', 'layers.GraphCNN', ([], {'num_filters': '(6)'}), '(num_filters=6)\n', (33326, 33341), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((33544, 33574), 'deepchem.models.tensorgraph.layers.GraphCNN', 'layers.GraphCNN', ([], {'num_filters': '(6)'}), '(num_filters=6)\n', (33559, 33574), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((33786, 33814), 'numpy.allclose', 'np.allclose', (['result', 'result3'], {}), '(result, result3)\n', (33797, 33814), True, 'import numpy as np\n'), ((33903, 33923), 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), '()\n', (33921, 33923), False, 'from tensorflow.python.eager import context\n'), ((1046, 1074), 'numpy.allclose', 'np.allclose', (['result', 'result2'], {}), '(result, result2)\n', (1057, 1074), True, 'import numpy as np\n'), ((1824, 1852), 'numpy.allclose', 'np.allclose', (['result', 'result2'], {}), '(result, result2)\n', (1835, 1852), True, 'import numpy as np\n'), ((2574, 2602), 'numpy.allclose', 'np.allclose', (['result', 'result2'], {}), '(result, result2)\n', (2585, 2602), True, 'import numpy as np\n'), ((2941, 2957), 'deepchem.models.tensorgraph.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (2955, 2957), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((3180, 3204), 'deepchem.models.tensorgraph.layers.Reshape', 'layers.Reshape', (['(100, 2)'], {}), '((100, 2))\n', (3194, 3204), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((3399, 3428), 'deepchem.models.tensorgraph.layers.Cast', 'layers.Cast', ([], {'dtype': 'tf.float32'}), '(dtype=tf.float32)\n', (3410, 3428), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((3653, 3669), 'deepchem.models.tensorgraph.layers.Squeeze', 'layers.Squeeze', ([], {}), '()\n', (3667, 3669), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((3895, 3922), 'deepchem.models.tensorgraph.layers.Transpose', 'layers.Transpose', (['(1, 2, 0)'], {}), '((1, 2, 0))\n', (3911, 3922), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((4454, 4483), 'numpy.array_equal', 'np.array_equal', (['result2', 'mean'], {}), '(result2, mean)\n', (4468, 4483), True, 'import numpy as np\n'), ((4706, 4722), 'deepchem.models.tensorgraph.layers.Repeat', 'layers.Repeat', (['(3)'], {}), '(3)\n', (4719, 4722), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((5027, 5042), 'deepchem.models.tensorgraph.layers.Gather', 'layers.Gather', ([], {}), '()\n', (5040, 5042), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((5816, 5844), 'numpy.allclose', 'np.allclose', (['result', 'result2'], {}), '(result, result2)\n', (5827, 5844), True, 'import numpy as np\n'), ((6193, 6221), 'numpy.allclose', 'np.allclose', (['result', 'result4'], {}), '(result, result4)\n', (6204, 6221), True, 'import numpy as np\n'), ((6925, 6953), 'numpy.allclose', 'np.allclose', (['result', 'result2'], {}), '(result, result2)\n', (6936, 6953), True, 'import numpy as np\n'), ((7302, 7330), 'numpy.allclose', 'np.allclose', (['result', 'result4'], {}), '(result, result4)\n', (7313, 7330), True, 'import numpy as np\n'), ((7998, 8026), 'numpy.allclose', 'np.allclose', (['result', 'result2'], {}), '(result, result2)\n', (8009, 8026), True, 'import numpy as np\n'), ((8418, 8433), 'deepchem.models.tensorgraph.layers.L1Loss', 'layers.L1Loss', ([], {}), '()\n', (8431, 8433), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((8475, 8498), 'numpy.abs', 'np.abs', (['(input1 - input2)'], {}), '(input1 - input2)\n', (8481, 8498), True, 'import numpy as np\n'), ((8782, 8797), 'deepchem.models.tensorgraph.layers.L2Loss', 'layers.L2Loss', ([], {}), '()\n', (8795, 8797), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((9087, 9103), 'deepchem.models.tensorgraph.layers.SoftMax', 'layers.SoftMax', ([], {}), '()\n', (9101, 9103), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((9367, 9383), 'deepchem.models.tensorgraph.layers.Sigmoid', 'layers.Sigmoid', ([], {}), '()\n', (9381, 9383), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((9650, 9663), 'deepchem.models.tensorgraph.layers.ReLU', 'layers.ReLU', ([], {}), '()\n', (9661, 9663), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((9978, 9993), 'deepchem.models.tensorgraph.layers.Concat', 'layers.Concat', ([], {}), '()\n', (9991, 9993), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((10377, 10391), 'deepchem.models.tensorgraph.layers.Stack', 'layers.Stack', ([], {}), '()\n', (10389, 10391), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((10729, 10751), 'deepchem.models.tensorgraph.layers.Constant', 'layers.Constant', (['value'], {}), '(value)\n', (10744, 10751), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((11230, 11242), 'deepchem.models.tensorgraph.layers.Add', 'layers.Add', ([], {}), '()\n', (11240, 11242), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((11425, 11442), 'deepchem.models.tensorgraph.layers.Multiply', 'layers.Multiply', ([], {}), '()\n', (11440, 11442), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((11621, 11636), 'deepchem.models.tensorgraph.layers.Divide', 'layers.Divide', ([], {}), '()\n', (11634, 11636), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((11810, 11822), 'deepchem.models.tensorgraph.layers.Log', 'layers.Log', ([], {}), '()\n', (11820, 11822), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((11861, 11872), 'numpy.log', 'np.log', (['(2.5)'], {}), '(2.5)\n', (11867, 11872), True, 'import numpy as np\n'), ((11986, 11998), 'deepchem.models.tensorgraph.layers.Exp', 'layers.Exp', ([], {}), '()\n', (11996, 11998), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((12037, 12048), 'numpy.exp', 'np.exp', (['(2.5)'], {}), '(2.5)\n', (12043, 12048), True, 'import numpy as np\n'), ((13073, 13107), 'deepchem.models.tensorgraph.layers.SparseSoftMaxCrossEntropy', 'layers.SparseSoftMaxCrossEntropy', ([], {}), '()\n', (13105, 13107), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((13607, 13635), 'deepchem.models.tensorgraph.layers.SoftMaxCrossEntropy', 'layers.SoftMaxCrossEntropy', ([], {}), '()\n', (13633, 13635), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((14175, 14203), 'deepchem.models.tensorgraph.layers.SigmoidCrossEntropy', 'layers.SigmoidCrossEntropy', ([], {}), '()\n', (14201, 14203), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((14543, 14568), 'deepchem.models.tensorgraph.layers.ReduceMean', 'layers.ReduceMean', ([], {'axis': '(1)'}), '(axis=1)\n', (14560, 14568), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((14643, 14665), 'numpy.mean', 'np.mean', (['input'], {'axis': '(1)'}), '(input, axis=1)\n', (14650, 14665), True, 'import numpy as np\n'), ((14847, 14871), 'deepchem.models.tensorgraph.layers.ReduceMax', 'layers.ReduceMax', ([], {'axis': '(1)'}), '(axis=1)\n', (14863, 14871), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((14946, 14967), 'numpy.max', 'np.max', (['input'], {'axis': '(1)'}), '(input, axis=1)\n', (14952, 14967), True, 'import numpy as np\n'), ((15149, 15173), 'deepchem.models.tensorgraph.layers.ReduceSum', 'layers.ReduceSum', ([], {'axis': '(1)'}), '(axis=1)\n', (15165, 15173), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((15248, 15269), 'numpy.sum', 'np.sum', (['input'], {'axis': '(1)'}), '(input, axis=1)\n', (15254, 15269), True, 'import numpy as np\n'), ((15535, 15572), 'deepchem.models.tensorgraph.layers.ReduceSquareDifference', 'layers.ReduceSquareDifference', ([], {'axis': '(1)'}), '(axis=1)\n', (15564, 15572), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((15656, 15695), 'numpy.mean', 'np.mean', (['((input1 - input2) ** 2)'], {'axis': '(1)'}), '((input1 - input2) ** 2, axis=1)\n', (15663, 15695), True, 'import numpy as np\n'), ((16463, 16491), 'numpy.allclose', 'np.allclose', (['result', 'result2'], {}), '(result, result2)\n', (16474, 16491), True, 'import numpy as np\n'), ((17450, 17478), 'numpy.allclose', 'np.allclose', (['result', 'result2'], {}), '(result, result2)\n', (17461, 17478), True, 'import numpy as np\n'), ((18561, 18589), 'numpy.allclose', 'np.allclose', (['result', 'result2'], {}), '(result, result2)\n', (18572, 18589), True, 'import numpy as np\n'), ((19711, 19739), 'numpy.allclose', 'np.allclose', (['result', 'result2'], {}), '(result, result2)\n', (19722, 19739), True, 'import numpy as np\n'), ((20083, 20110), 'deepchem.models.tensorgraph.layers.MaxPool1D', 'layers.MaxPool1D', ([], {'strides': '(2)'}), '(strides=2)\n', (20099, 20110), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((20343, 20361), 'deepchem.models.tensorgraph.layers.MaxPool2D', 'layers.MaxPool2D', ([], {}), '()\n', (20359, 20361), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((20600, 20618), 'deepchem.models.tensorgraph.layers.MaxPool3D', 'layers.MaxPool3D', ([], {}), '()\n', (20616, 20618), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((20922, 20949), 'rdkit.Chem.MolFromSmiles', 'rdkit.Chem.MolFromSmiles', (['s'], {}), '(s)\n', (20946, 20949), False, 'import rdkit\n'), ((21835, 21862), 'rdkit.Chem.MolFromSmiles', 'rdkit.Chem.MolFromSmiles', (['s'], {}), '(s)\n', (21859, 21862), False, 'import rdkit\n'), ((22344, 22362), 'deepchem.models.tensorgraph.layers.GraphPool', 'layers.GraphPool', ([], {}), '()\n', (22360, 22362), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((22745, 22772), 'rdkit.Chem.MolFromSmiles', 'rdkit.Chem.MolFromSmiles', (['s'], {}), '(s)\n', (22769, 22772), False, 'import rdkit\n'), ((23254, 23284), 'deepchem.models.tensorgraph.layers.GraphGather', 'layers.GraphGather', (['batch_size'], {}), '(batch_size)\n', (23272, 23284), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((26013, 26035), 'deepchem.models.tensorgraph.layers.WeightedError', 'layers.WeightedError', ([], {}), '()\n', (26033, 26035), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((26998, 27026), 'numpy.allclose', 'np.allclose', (['result', 'result2'], {}), '(result, result2)\n', (27009, 27026), True, 'import numpy as np\n'), ((28008, 28024), 'tensorflow.stack', 'tf.stack', (['coords'], {}), '(coords)\n', (28016, 28024), True, 'import tensorflow as tf\n'), ((28589, 28616), 'numpy.allclose', 'np.allclose', (['result2', 'input'], {}), '(result2, input)\n', (28600, 28616), True, 'import numpy as np\n'), ((30199, 30233), 'numpy.allclose', 'np.allclose', (['result[0]', 'result2[0]'], {}), '(result[0], result2[0])\n', (30210, 30233), True, 'import numpy as np\n'), ((30251, 30285), 'numpy.allclose', 'np.allclose', (['result[1]', 'result2[1]'], {}), '(result[1], result2[1])\n', (30262, 30285), True, 'import numpy as np\n'), ((30736, 30755), 'deepchem.models.tensorgraph.layers.SluiceLoss', 'layers.SluiceLoss', ([], {}), '()\n', (30753, 30755), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((31457, 31485), 'numpy.allclose', 'np.allclose', (['result', 'result2'], {}), '(result, result2)\n', (31468, 31485), True, 'import numpy as np\n'), ((32745, 32779), 'numpy.allclose', 'np.allclose', (['result[0]', 'result2[0]'], {}), '(result[0], result2[0])\n', (32756, 32779), True, 'import numpy as np\n'), ((32797, 32831), 'numpy.allclose', 'np.allclose', (['result[1]', 'result2[1]'], {}), '(result[1], result2[1])\n', (32808, 32831), True, 'import numpy as np\n'), ((33624, 33652), 'numpy.allclose', 'np.allclose', (['result', 'result2'], {}), '(result, result2)\n', (33635, 33652), True, 'import numpy as np\n'), ((34096, 34114), 'deepchem.models.tensorgraph.layers.HingeLoss', 'layers.HingeLoss', ([], {}), '()\n', (34112, 34114), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((539, 585), 'numpy.random.rand', 'np.random.rand', (['batch_size', 'width', 'in_channels'], {}), '(batch_size, width, in_channels)\n', (553, 585), True, 'import numpy as np\n'), ((1407, 1441), 'numpy.random.rand', 'np.random.rand', (['batch_size', 'in_dim'], {}), '(batch_size, in_dim)\n', (1421, 1441), True, 'import numpy as np\n'), ((2170, 2203), 'numpy.random.rand', 'np.random.rand', (['batch_size', 'width'], {}), '(batch_size, width)\n', (2184, 2203), True, 'import numpy as np\n'), ((2882, 2906), 'numpy.random.rand', 'np.random.rand', (['(5)', '(10)', '(4)'], {}), '(5, 10, 4)\n', (2896, 2906), True, 'import numpy as np\n'), ((3121, 3145), 'numpy.random.rand', 'np.random.rand', (['(5)', '(10)', '(4)'], {}), '(5, 10, 4)\n', (3135, 3145), True, 'import numpy as np\n'), ((3595, 3618), 'numpy.random.rand', 'np.random.rand', (['(5)', '(1)', '(4)'], {}), '(5, 1, 4)\n', (3609, 3618), True, 'import numpy as np\n'), ((3836, 3860), 'numpy.random.rand', 'np.random.rand', (['(5)', '(10)', '(4)'], {}), '(5, 10, 4)\n', (3850, 3860), True, 'import numpy as np\n'), ((4104, 4124), 'numpy.random.rand', 'np.random.rand', (['(5)', '(3)'], {}), '(5, 3)\n', (4118, 4124), True, 'import numpy as np\n'), ((4156, 4176), 'numpy.random.rand', 'np.random.rand', (['(5)', '(3)'], {}), '(5, 3)\n', (4170, 4176), True, 'import numpy as np\n'), ((4651, 4671), 'numpy.random.rand', 'np.random.rand', (['(5)', '(4)'], {}), '(5, 4)\n', (4665, 4671), True, 'import numpy as np\n'), ((4948, 4965), 'numpy.random.rand', 'np.random.rand', (['(5)'], {}), '(5)\n', (4962, 4965), True, 'import numpy as np\n'), ((5309, 5357), 'numpy.random.rand', 'np.random.rand', (['batch_size', 'n_steps', 'in_channels'], {}), '(batch_size, n_steps, in_channels)\n', (5323, 5357), True, 'import numpy as np\n'), ((6416, 6464), 'numpy.random.rand', 'np.random.rand', (['batch_size', 'n_steps', 'in_channels'], {}), '(batch_size, n_steps, in_channels)\n', (6430, 6464), True, 'import numpy as np\n'), ((7543, 7586), 'numpy.random.rand', 'np.random.rand', (['batch_size', 'n_steps', 'in_dim'], {}), '(batch_size, n_steps, in_dim)\n', (7557, 7586), True, 'import numpy as np\n'), ((8306, 8327), 'numpy.random.rand', 'np.random.rand', (['(5)', '(10)'], {}), '(5, 10)\n', (8320, 8327), True, 'import numpy as np\n'), ((8362, 8383), 'numpy.random.rand', 'np.random.rand', (['(5)', '(10)'], {}), '(5, 10)\n', (8376, 8383), True, 'import numpy as np\n'), ((8670, 8691), 'numpy.random.rand', 'np.random.rand', (['(5)', '(10)'], {}), '(5, 10)\n', (8684, 8691), True, 'import numpy as np\n'), ((8726, 8747), 'numpy.random.rand', 'np.random.rand', (['(5)', '(10)'], {}), '(5, 10)\n', (8740, 8747), True, 'import numpy as np\n'), ((9031, 9052), 'numpy.random.rand', 'np.random.rand', (['(5)', '(10)'], {}), '(5, 10)\n', (9045, 9052), True, 'import numpy as np\n'), ((9311, 9332), 'numpy.random.rand', 'np.random.rand', (['(5)', '(10)'], {}), '(5, 10)\n', (9325, 9332), True, 'import numpy as np\n'), ((9585, 9615), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(5, 10)'}), '(size=(5, 10))\n', (9601, 9615), True, 'import numpy as np\n'), ((9867, 9888), 'numpy.random.rand', 'np.random.rand', (['(5)', '(10)'], {}), '(5, 10)\n', (9881, 9888), True, 'import numpy as np\n'), ((9923, 9943), 'numpy.random.rand', 'np.random.rand', (['(5)', '(4)'], {}), '(5, 4)\n', (9937, 9943), True, 'import numpy as np\n'), ((10267, 10287), 'numpy.random.rand', 'np.random.rand', (['(5)', '(4)'], {}), '(5, 4)\n', (10281, 10287), True, 'import numpy as np\n'), ((10322, 10342), 'numpy.random.rand', 'np.random.rand', (['(5)', '(4)'], {}), '(5, 4)\n', (10336, 10342), True, 'import numpy as np\n'), ((10674, 10694), 'numpy.random.rand', 'np.random.rand', (['(5)', '(4)'], {}), '(5, 4)\n', (10688, 10694), True, 'import numpy as np\n'), ((10918, 10938), 'numpy.random.rand', 'np.random.rand', (['(5)', '(4)'], {}), '(5, 4)\n', (10932, 10938), True, 'import numpy as np\n'), ((12658, 12678), 'numpy.dot', 'np.dot', (['delta', 'delta'], {}), '(delta, delta)\n', (12664, 12678), True, 'import numpy as np\n'), ((12696, 12738), 'numpy.allclose', 'np.allclose', (['dist2', 'result[atom, neighbor]'], {}), '(dist2, result[atom, neighbor])\n', (12707, 12738), True, 'import numpy as np\n'), ((12941, 12979), 'numpy.random.rand', 'np.random.rand', (['batch_size', 'n_features'], {}), '(batch_size, n_features)\n', (12955, 12979), True, 'import numpy as np\n'), ((13014, 13040), 'numpy.random.rand', 'np.random.rand', (['batch_size'], {}), '(batch_size)\n', (13028, 13040), True, 'import numpy as np\n'), ((13461, 13499), 'numpy.random.rand', 'np.random.rand', (['batch_size', 'n_features'], {}), '(batch_size, n_features)\n', (13475, 13499), True, 'import numpy as np\n'), ((13534, 13572), 'numpy.random.rand', 'np.random.rand', (['batch_size', 'n_features'], {}), '(batch_size, n_features)\n', (13548, 13572), True, 'import numpy as np\n'), ((13985, 14023), 'numpy.random.rand', 'np.random.rand', (['batch_size', 'n_features'], {}), '(batch_size, n_features)\n', (13999, 14023), True, 'import numpy as np\n'), ((14058, 14107), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', '(batch_size, n_features)'], {}), '(0, 2, (batch_size, n_features))\n', (14075, 14107), True, 'import numpy as np\n'), ((14487, 14508), 'numpy.random.rand', 'np.random.rand', (['(5)', '(10)'], {}), '(5, 10)\n', (14501, 14508), True, 'import numpy as np\n'), ((14791, 14812), 'numpy.random.rand', 'np.random.rand', (['(5)', '(10)'], {}), '(5, 10)\n', (14805, 14812), True, 'import numpy as np\n'), ((15093, 15114), 'numpy.random.rand', 'np.random.rand', (['(5)', '(10)'], {}), '(5, 10)\n', (15107, 15114), True, 'import numpy as np\n'), ((15423, 15444), 'numpy.random.rand', 'np.random.rand', (['(5)', '(10)'], {}), '(5, 10)\n', (15437, 15444), True, 'import numpy as np\n'), ((15479, 15500), 'numpy.random.rand', 'np.random.rand', (['(5)', '(10)'], {}), '(5, 10)\n', (15493, 15500), True, 'import numpy as np\n'), ((15930, 15984), 'numpy.random.rand', 'np.random.rand', (['batch_size', 'length', 'width', 'in_channels'], {}), '(batch_size, length, width, in_channels)\n', (15944, 15984), True, 'import numpy as np\n'), ((16903, 16964), 'numpy.random.rand', 'np.random.rand', (['batch_size', 'length', 'width', 'depth', 'in_channels'], {}), '(batch_size, length, width, depth, in_channels)\n', (16917, 16964), True, 'import numpy as np\n'), ((17910, 17964), 'numpy.random.rand', 'np.random.rand', (['batch_size', 'length', 'width', 'in_channels'], {}), '(batch_size, length, width, in_channels)\n', (17924, 17964), True, 'import numpy as np\n'), ((19037, 19098), 'numpy.random.rand', 'np.random.rand', (['batch_size', 'length', 'width', 'depth', 'in_channels'], {}), '(batch_size, length, width, depth, in_channels)\n', (19051, 19098), True, 'import numpy as np\n'), ((20025, 20048), 'numpy.random.rand', 'np.random.rand', (['(4)', '(6)', '(8)'], {}), '(4, 6, 8)\n', (20039, 20048), True, 'import numpy as np\n'), ((20282, 20308), 'numpy.random.rand', 'np.random.rand', (['(2)', '(4)', '(6)', '(8)'], {}), '(2, 4, 6, 8)\n', (20296, 20308), True, 'import numpy as np\n'), ((20536, 20565), 'numpy.random.rand', 'np.random.rand', (['(2)', '(4)', '(6)', '(8)', '(2)'], {}), '(2, 4, 6, 8, 2)\n', (20550, 20565), True, 'import numpy as np\n'), ((23593, 23627), 'numpy.random.rand', 'np.random.rand', (['n_test', '(2 * n_feat)'], {}), '(n_test, 2 * n_feat)\n', (23607, 23627), True, 'import numpy as np\n'), ((23666, 23696), 'numpy.random.rand', 'np.random.rand', (['n_test', 'n_feat'], {}), '(n_test, n_feat)\n', (23680, 23696), True, 'import numpy as np\n'), ((23734, 23764), 'numpy.random.rand', 'np.random.rand', (['n_test', 'n_feat'], {}), '(n_test, n_feat)\n', (23748, 23764), True, 'import numpy as np\n'), ((24359, 24389), 'numpy.random.rand', 'np.random.rand', (['n_test', 'n_feat'], {}), '(n_test, n_feat)\n', (24373, 24389), True, 'import numpy as np\n'), ((24425, 24458), 'numpy.random.rand', 'np.random.rand', (['n_support', 'n_feat'], {}), '(n_support, n_feat)\n', (24439, 24458), True, 'import numpy as np\n'), ((24977, 25007), 'numpy.random.rand', 'np.random.rand', (['n_test', 'n_feat'], {}), '(n_test, n_feat)\n', (24991, 25007), True, 'import numpy as np\n'), ((25043, 25076), 'numpy.random.rand', 'np.random.rand', (['n_support', 'n_feat'], {}), '(n_support, n_feat)\n', (25057, 25076), True, 'import numpy as np\n'), ((25546, 25584), 'numpy.random.rand', 'np.random.rand', (['batch_size', 'n_features'], {}), '(batch_size, n_features)\n', (25560, 25584), True, 'import numpy as np\n'), ((25901, 25922), 'numpy.random.rand', 'np.random.rand', (['(5)', '(10)'], {}), '(5, 10)\n', (25915, 25922), True, 'import numpy as np\n'), ((25957, 25978), 'numpy.random.rand', 'np.random.rand', (['(5)', '(10)'], {}), '(5, 10)\n', (25971, 25978), True, 'import numpy as np\n'), ((26369, 26398), 'numpy.random.rand', 'np.random.rand', (['n_atoms', 'ndim'], {}), '(n_atoms, ndim)\n', (26383, 26398), True, 'import numpy as np\n'), ((26428, 26460), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', 'n_atoms'], {}), '(0, 2, n_atoms)\n', (26445, 26460), True, 'import numpy as np\n'), ((27332, 27353), 'numpy.random.rand', 'np.random.rand', (['(5)', '(10)'], {}), '(5, 10)\n', (27346, 27353), True, 'import numpy as np\n'), ((27388, 27409), 'numpy.random.rand', 'np.random.rand', (['(5)', '(10)'], {}), '(5, 10)\n', (27402, 27409), True, 'import numpy as np\n'), ((27938, 27967), 'numpy.random.rand', 'np.random.rand', (['N_atoms', 'ndim'], {}), '(N_atoms, ndim)\n', (27952, 27967), True, 'import numpy as np\n'), ((28366, 28387), 'numpy.random.rand', 'np.random.rand', (['(5)', '(10)'], {}), '(5, 10)\n', (28380, 28387), True, 'import numpy as np\n'), ((29006, 29055), 'numpy.random.rand', 'np.random.rand', (['batch_size', 'max_atoms', 'dimensions'], {}), '(batch_size, max_atoms, dimensions)\n', (29020, 29055), True, 'import numpy as np\n'), ((29699, 29733), 'numpy.random.rand', 'np.random.rand', (['batch_size', 'length'], {}), '(batch_size, length)\n', (29713, 29733), True, 'import numpy as np\n'), ((29768, 29802), 'numpy.random.rand', 'np.random.rand', (['batch_size', 'length'], {}), '(batch_size, length)\n', (29782, 29802), True, 'import numpy as np\n'), ((30636, 30651), 'numpy.ones', 'np.ones', (['(3, 4)'], {}), '((3, 4))\n', (30643, 30651), True, 'import numpy as np\n'), ((30686, 30701), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (30693, 30701), True, 'import numpy as np\n'), ((30975, 31009), 'numpy.random.rand', 'np.random.rand', (['batch_size', 'length'], {}), '(batch_size, length)\n', (30989, 31009), True, 'import numpy as np\n'), ((31044, 31078), 'numpy.random.rand', 'np.random.rand', (['batch_size', 'length'], {}), '(batch_size, length)\n', (31058, 31078), True, 'import numpy as np\n'), ((31817, 31857), 'numpy.random.rand', 'np.random.rand', (['batch_size', 'max_atoms', '(4)'], {}), '(batch_size, max_atoms, 4)\n', (31831, 31857), True, 'import numpy as np\n'), ((32215, 32252), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(10, 100, 50)'}), '(size=(10, 100, 50))\n', (32232, 32252), True, 'import numpy as np\n'), ((32285, 32326), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(10, 100, 5, 100)'}), '(size=(10, 100, 5, 100))\n', (32302, 32326), True, 'import numpy as np\n'), ((33166, 33203), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(10, 100, 50)'}), '(size=(10, 100, 50))\n', (33183, 33203), True, 'import numpy as np\n'), ((33236, 33277), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(10, 100, 5, 100)'}), '(size=(10, 100, 5, 100))\n', (33253, 33277), True, 'import numpy as np\n'), ((33978, 34002), 'numpy.random.rand', 'np.random.rand', (['n_logits'], {}), '(n_logits)\n', (33992, 34002), True, 'import numpy as np\n'), ((34037, 34061), 'numpy.random.rand', 'np.random.rand', (['n_labels'], {}), '(n_labels)\n', (34051, 34061), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
from pathlib import Path
import pandas as pd
from numpy import around
if __name__ == "__main__":
# Harden's PPG is from 2018-19 season
# Bryant's PPG is from 2005-06 season
# Jordan's PPG is from 1986-87 season
per_game_df = pd.read_csv(Path('../data/compare_players_per_game.csv'))
per_48_df = pd.read_csv(Path('../data/compare_players_per_48.csv'))
per_100_df = pd.read_csv(Path('../data/compare_players_per_100_poss.csv'))
avg_TS_for_2018_19_season = 0.560 # source: https://www.basketball-reference.com/leagues/NBA_2019.html#all_misc_stats
avg_TS_for_2005_06_season = 0.536 # source: https://www.basketball-reference.com/leagues/NBA_2006.html#all_misc_stats
avg_TS_for_1986_87_season = 0.538 # source: https://www.basketball-reference.com/leagues/NBA_1987.html#all_misc_stats
# per game
per_game_harden = per_game_df[per_game_df['Player'] == '<NAME>']
per_game_bryant = per_game_df[per_game_df['Player'] == '<NAME>']
per_game_jordan = per_game_df[per_game_df['Player'] == '<NAME>']
harden_ppg = per_game_harden['PTS'].values[0]
bryant_ppg = per_game_bryant['PTS'].values[0]
jordan_ppg = per_game_jordan['PTS'].values[0]
# shooting stats
harden_efg = per_game_harden['eFG%'].values[0]
bryant_efg = per_game_bryant['eFG%'].values[0]
jordan_efg = per_game_jordan['eFG%'].values[0]
harden_ts = per_game_harden['TS%'].values[0]
bryant_ts = per_game_bryant['TS%'].values[0]
jordan_ts = per_game_jordan['TS%'].values[0]
# number of games
harden_g = per_game_harden['G'].values[0]
bryant_g = per_game_bryant['G'].values[0]
jordan_g = per_game_jordan['G'].values[0]
# minutes per game
harden_mpg = per_game_harden['MP'].values[0]
bryant_mpg = per_game_bryant['MP'].values[0]
jordan_mpg = per_game_jordan['MP'].values[0]
# per 48
per_48_harden = per_48_df[per_48_df['Player'] == '<NAME>']
per_48_bryant = per_48_df[per_48_df['Player'] == '<NAME>']
per_48_jordan = per_48_df[per_48_df['Player'] == '<NAME>']
harden_pp48 = per_48_harden['PTS'].values[0]
bryant_pp48 = per_48_bryant['PTS'].values[0]
jordan_pp48 = per_48_jordan['PTS'].values[0]
# per 100
per_100_harden = per_100_df[per_100_df['Player'] == '<NAME>']
per_100_bryant = per_100_df[per_100_df['Player'] == '<NAME>']
per_100_jordan = per_100_df[per_100_df['Player'] == '<NAME>']
harden_pp100 = per_100_harden['PTS'].values[0]
bryant_pp100 = per_100_bryant['PTS'].values[0]
jordan_pp100 = per_100_jordan['PTS'].values[0]
print('<NAME> in 2018-19: {} games, {} PPG, {}eFG%, {}TS% in {} minutes per game'
.format(harden_g, harden_ppg, harden_efg, harden_ts, harden_mpg))
print('He was {} more efficient than the average player in was that season'
.format(around(harden_ts - avg_TS_for_2018_19_season, 3)))
print('In the same season, he had {} Points per 48 minutes, and {} Points per 100 possessions'
.format(harden_pp48, harden_pp100))
print('\n------------------------------------------------------------------------------------------\n')
print('<NAME> in 2005-06: {} games, {} PPG, {}eFG%, {}TS% in {} minutes per game'
.format(bryant_g, bryant_ppg, bryant_efg, bryant_ts, bryant_mpg))
print('He was {} more efficient than the average player was in that season'
.format(around(bryant_ts - avg_TS_for_2005_06_season, 3)))
print('In the same season, he had {} Points per 48 minutes, and {} Points per 100 possessions'
.format(bryant_pp48, bryant_pp100))
print('\n------------------------------------------------------------------------------------------\n')
print('<NAME> in 1986-87: {} games, {} PPG, {}eFG%, {}TS% in {} minutes per game'
.format(jordan_g, jordan_ppg, jordan_efg, jordan_ts, jordan_mpg))
print('He was {} more efficient than the average player was in that season'
.format(around(jordan_ts - avg_TS_for_1986_87_season, 3)))
print('In the same season, he had {} Points per 48 minutes, and {} Points per 100 possessions'
.format(jordan_pp48, jordan_pp100))
|
[
"numpy.around",
"pathlib.Path"
] |
[((303, 347), 'pathlib.Path', 'Path', (['"""../data/compare_players_per_game.csv"""'], {}), "('../data/compare_players_per_game.csv')\n", (307, 347), False, 'from pathlib import Path\n'), ((377, 419), 'pathlib.Path', 'Path', (['"""../data/compare_players_per_48.csv"""'], {}), "('../data/compare_players_per_48.csv')\n", (381, 419), False, 'from pathlib import Path\n'), ((450, 498), 'pathlib.Path', 'Path', (['"""../data/compare_players_per_100_poss.csv"""'], {}), "('../data/compare_players_per_100_poss.csv')\n", (454, 498), False, 'from pathlib import Path\n'), ((2895, 2943), 'numpy.around', 'around', (['(harden_ts - avg_TS_for_2018_19_season)', '(3)'], {}), '(harden_ts - avg_TS_for_2018_19_season, 3)\n', (2901, 2943), False, 'from numpy import around\n'), ((3492, 3540), 'numpy.around', 'around', (['(bryant_ts - avg_TS_for_2005_06_season)', '(3)'], {}), '(bryant_ts - avg_TS_for_2005_06_season, 3)\n', (3498, 3540), False, 'from numpy import around\n'), ((4089, 4137), 'numpy.around', 'around', (['(jordan_ts - avg_TS_for_1986_87_season)', '(3)'], {}), '(jordan_ts - avg_TS_for_1986_87_season, 3)\n', (4095, 4137), False, 'from numpy import around\n')]
|
# A Rapid Proof of Concept for the eDensiometer
# Copyright 2018, <NAME>. All Rights Reserved. Created with contributions from <NAME>.
# Imports
from PIL import Image
from pprint import pprint
import numpy as np
import time as time_
def millis(): # from https://stackoverflow.com/questions/5998245/get-current-time-in-milliseconds-in-python/6000198#6000198
return int(round(time_.time() * 1000))
start = millis()
# Constants
# BRIGHT_CUTOFF = 175
RED_CUTOFF = 200
GREEN_CUTOFF = 150
BLUE_CUTOFF = 200
# Pull from test.jpg image in local directory
temp = np.asarray(Image.open('test.jpg'))
print(temp.shape)
# Variable Initialization
result = np.zeros((temp.shape[0], temp.shape[1], temp.shape[2]))
temp_bright = np.zeros((temp.shape[0], temp.shape[1]))
count_total = 0
count_open = 0
# Cycle through image
for row in range(0, temp.shape[0]):
for element in range(0, temp.shape[1]):
count_total += 1
temp_bright[row, element] = (int(temp[row][element][0]) + int(temp[row][element][1]) + int(temp[row][element][2]))/3
# bright = temp_bright[row][element] > BRIGHT_CUTOFF
red_enough = temp[row][element][0] > RED_CUTOFF
green_enough = temp[row][element][1] > GREEN_CUTOFF
blue_enough = temp[row][element][2] > BLUE_CUTOFF
if red_enough and green_enough and blue_enough:
# print(temp[row, element])
count_open += 1
result[row, element] = [255, 255, 255]
# Save filtered image as final.jpg
final = Image.fromarray(result.astype('uint8'), 'RGB')
final.save('final.jpg')
# Return/Print Percent Coverage
percent_open = count_open/count_total
percent_cover = 1 - percent_open
end = millis()
print("Percent Open: " + str(percent_open))
print("Percent Cover: " + str(percent_cover))
runtime = end-start
print("Runtime in MS: " + str(runtime))
|
[
"numpy.zeros",
"PIL.Image.open",
"time.time"
] |
[((653, 708), 'numpy.zeros', 'np.zeros', (['(temp.shape[0], temp.shape[1], temp.shape[2])'], {}), '((temp.shape[0], temp.shape[1], temp.shape[2]))\n', (661, 708), True, 'import numpy as np\n'), ((723, 763), 'numpy.zeros', 'np.zeros', (['(temp.shape[0], temp.shape[1])'], {}), '((temp.shape[0], temp.shape[1]))\n', (731, 763), True, 'import numpy as np\n'), ((575, 597), 'PIL.Image.open', 'Image.open', (['"""test.jpg"""'], {}), "('test.jpg')\n", (585, 597), False, 'from PIL import Image\n'), ((380, 392), 'time.time', 'time_.time', ([], {}), '()\n', (390, 392), True, 'import time as time_\n')]
|
import numpy as nm
from sfepy.linalg import dot_sequences
from sfepy.terms.terms import Term, terms
class DivGradTerm(Term):
r"""
Diffusion term.
:Definition:
.. math::
\int_{\Omega} \nu\ \nabla \ul{v} : \nabla \ul{u} \mbox{ , }
\int_{\Omega} \nu\ \nabla \ul{u} : \nabla \ul{w} \\
\int_{\Omega} \nabla \ul{v} : \nabla \ul{u} \mbox{ , }
\int_{\Omega} \nabla \ul{u} : \nabla \ul{w}
:Arguments 1:
- material : :math:`\nu` (viscosity, optional)
- virtual : :math:`\ul{v}`
- state : :math:`\ul{u}`
:Arguments 2:
- material : :math:`\nu` (viscosity, optional)
- parameter_1 : :math:`\ul{u}`
- parameter_2 : :math:`\ul{w}`
"""
name = 'dw_div_grad'
arg_types = (('opt_material', 'virtual', 'state'),
('opt_material', 'parameter_1', 'parameter_2'))
arg_shapes = {'opt_material' : '1, 1', 'virtual' : ('D', 'state'),
'state' : 'D', 'parameter_1' : 'D', 'parameter_2' : 'D'}
modes = ('weak', 'eval')
function = staticmethod(terms.term_ns_asm_div_grad)
def d_div_grad(self, out, grad1, grad2, mat, vg, fmode):
sh = grad1.shape
g1 = grad1.reshape((sh[0], sh[1], sh[2] * sh[3]))
g2 = grad2.reshape((sh[0], sh[1], sh[2] * sh[3]))
aux = mat * dot_sequences(g1[..., None], g2, 'ATB')[..., None]
if fmode == 2:
out[:] = aux
status = 0
else:
status = vg.integrate(out, aux, fmode)
return status
def get_fargs(self, mat, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(state)
if mat is None:
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(state)
mat = nm.ones((1, n_qp, 1, 1), dtype=nm.float64)
if mode == 'weak':
if diff_var is None:
grad = self.get(state, 'grad').transpose((0, 1, 3, 2))
sh = grad.shape
grad = grad.reshape((sh[0], sh[1], sh[2] * sh[3], 1))
fmode = 0
else:
grad = nm.array([0], ndmin=4, dtype=nm.float64)
fmode = 1
return grad, mat, vg, fmode
elif mode == 'eval':
grad1 = self.get(virtual, 'grad')
grad2 = self.get(state, 'grad')
fmode = {'eval' : 0, 'el_avg' : 1, 'qp' : 2}.get(mode, 1)
return grad1, grad2, mat, vg, fmode
else:
raise ValueError('unsupported evaluation mode in %s! (%s)'
% (self.name, mode))
def get_eval_shape(self, mat, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(state)
return (n_el, 1, 1, 1), state.dtype
def set_arg_types(self):
if self.mode == 'weak':
self.function = terms.term_ns_asm_div_grad
else:
self.function = self.d_div_grad
class ConvectTerm(Term):
r"""
Nonlinear convective term.
:Definition:
.. math::
\int_{\Omega} ((\ul{u} \cdot \nabla) \ul{u}) \cdot \ul{v}
:Arguments:
- virtual : :math:`\ul{v}`
- state : :math:`\ul{u}`
"""
name = 'dw_convect'
arg_types = ('virtual', 'state')
arg_shapes = {'virtual' : ('D', 'state'), 'state' : 'D'}
function = staticmethod(terms.term_ns_asm_convect)
def get_fargs(self, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(state)
grad = self.get(state, 'grad').transpose((0, 1, 3, 2)).copy()
val_qp = self.get(state, 'val')
fmode = diff_var is not None
return grad, val_qp, vg, fmode
class LinearConvectTerm(Term):
r"""
Linearized convective term.
:Definition:
.. math::
\int_{\Omega} ((\ul{b} \cdot \nabla) \ul{u}) \cdot \ul{v}
.. math::
((\ul{b} \cdot \nabla) \ul{u})|_{qp}
:Arguments:
- virtual : :math:`\ul{v}`
- parameter : :math:`\ul{b}`
- state : :math:`\ul{u}`
"""
name = 'dw_lin_convect'
arg_types = ('virtual', 'parameter', 'state')
arg_shapes = {'virtual' : ('D', 'state'), 'parameter' : 'D', 'state' : 'D'}
function = staticmethod(terms.dw_lin_convect)
def get_fargs(self, virtual, parameter, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(state)
val_qp = self.get(parameter, 'val')
if mode == 'weak':
if diff_var is None:
grad = self.get(state, 'grad').transpose((0, 1, 3, 2)).copy()
fmode = 0
else:
grad = nm.array([0], ndmin=4, dtype=nm.float64)
fmode = 1
return grad, val_qp, vg, fmode
elif mode == 'qp':
grad = self.get(state, 'grad').transpose((0, 1, 3, 2)).copy()
fmode = 2
return grad, val_qp, vg, fmode
else:
raise ValueError('unsupported evaluation mode in %s! (%s)'
% (self.name, mode))
class StokesTerm(Term):
r"""
Stokes problem coupling term. Corresponds to weak forms of gradient and
divergence terms. Can be evaluated.
:Definition:
.. math::
\int_{\Omega} p\ \nabla \cdot \ul{v} \mbox{ , }
\int_{\Omega} q\ \nabla \cdot \ul{u}
\mbox{ or }
\int_{\Omega} c\ p\ \nabla \cdot \ul{v} \mbox{ , }
\int_{\Omega} c\ q\ \nabla \cdot \ul{u}
:Arguments 1:
- material : :math:`c` (optional)
- virtual : :math:`\ul{v}`
- state : :math:`p`
:Arguments 2:
- material : :math:`c` (optional)
- state : :math:`\ul{u}`
- virtual : :math:`q`
:Arguments 3:
- material : :math:`c` (optional)
- parameter_v : :math:`\ul{u}`
- parameter_s : :math:`p`
"""
name = 'dw_stokes'
arg_types = (('opt_material', 'virtual', 'state'),
('opt_material', 'state', 'virtual'),
('opt_material', 'parameter_v', 'parameter_s'))
arg_shapes = [{'opt_material' : '1, 1',
'virtual/grad' : ('D', None), 'state/grad' : 1,
'virtual/div' : (1, None), 'state/div' : 'D',
'parameter_v' : 'D', 'parameter_s' : 1},
{'opt_material' : None}]
modes = ('grad', 'div', 'eval')
@staticmethod
def d_eval(out, coef, vec_qp, div, vvg):
out_qp = coef * vec_qp * div
status = vvg.integrate(out, out_qp)
return status
def get_fargs(self, coef, vvar, svar,
mode=None, term_mode=None, diff_var=None, **kwargs):
if self.mode == 'grad':
qp_var, qp_name = svar, 'val'
else:
qp_var, qp_name = vvar, 'div'
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(vvar)
if coef is None:
coef = nm.ones((1, n_qp, 1, 1), dtype=nm.float64)
if mode == 'weak':
vvg, _ = self.get_mapping(vvar)
svg, _ = self.get_mapping(svar)
if diff_var is None:
val_qp = self.get(qp_var, qp_name)
fmode = 0
else:
val_qp = nm.array([0], ndmin=4, dtype=nm.float64)
fmode = 1
return coef, val_qp, svg, vvg, fmode
elif mode == 'eval':
vvg, _ = self.get_mapping(vvar)
div = self.get(vvar, 'div')
vec_qp = self.get(svar, 'val')
return coef, vec_qp, div, vvg
else:
raise ValueError('unsupported evaluation mode in %s! (%s)'
% (self.name, mode))
def get_eval_shape(self, coef, vvar, svar,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(vvar)
return (n_el, 1, 1, 1), vvar.dtype
def set_arg_types(self):
self.function = {
'grad' : terms.dw_grad,
'div' : terms.dw_div,
'eval' : self.d_eval,
}[self.mode]
class GradTerm(Term):
r"""
Evaluate gradient of a scalar or vector field.
Supports 'eval', 'el_avg' and 'qp' evaluation modes.
:Definition:
.. math::
\int_{\Omega} \nabla p \mbox{ or } \int_{\Omega} \nabla \ul{w}
.. math::
\mbox{vector for } K \from \Ical_h: \int_{T_K} \nabla p /
\int_{T_K} 1 \mbox{ or } \int_{T_K} \nabla \ul{w} /
\int_{T_K} 1
.. math::
(\nabla p)|_{qp} \mbox{ or } \nabla \ul{w}|_{qp}
:Arguments:
- parameter : :math:`p` or :math:`\ul{w}`
"""
name = 'ev_grad'
arg_types = ('parameter',)
arg_shapes = [{'parameter' : 1}, {'parameter' : 'D'}]
@staticmethod
def function(out, grad, vg, fmode):
if fmode == 2:
out[:] = grad
status = 0
else:
status = vg.integrate(out, grad, fmode)
return status
def get_fargs(self, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(parameter)
grad = self.get(parameter, 'grad')
fmode = {'eval' : 0, 'el_avg' : 1, 'qp' : 2}.get(mode, 1)
return grad, vg, fmode
def get_eval_shape(self, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(parameter)
if mode != 'qp':
n_qp = 1
return (n_el, n_qp, dim, n_c), parameter.dtype
class DivTerm(Term):
r"""
Evaluate divergence of a vector field.
Supports 'eval', 'el_avg' and 'qp' evaluation modes.
:Definition:
.. math::
\int_{\Omega} \nabla \cdot \ul{u}
.. math::
\mbox{vector for } K \from \Ical_h:
\int_{T_K} \nabla \cdot \ul{u} / \int_{T_K} 1
.. math::
(\nabla \cdot \ul{u})|_{qp}
:Arguments:
- parameter : :math:`\ul{u}`
"""
name = 'ev_div'
arg_types = ('parameter',)
arg_shapes = {'parameter' : 'D'}
@staticmethod
def function(out, div, vg, fmode):
if fmode == 2:
out[:] = div
status = 0
else:
status = vg.integrate(out, div, fmode)
return status
def get_fargs(self, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(parameter)
div = self.get(parameter, 'div')
fmode = {'eval' : 0, 'el_avg' : 1, 'qp' : 2}.get(mode, 1)
return div, vg, fmode
def get_eval_shape(self, parameter,
mode=None, term_mode=None, diff_var=None, **kwargs):
n_el, n_qp, dim, n_en, n_c = self.get_data_shape(parameter)
if mode != 'qp':
n_qp = 1
return (n_el, n_qp, 1, 1), parameter.dtype
class DivOperatorTerm(Term):
r"""
Weighted divergence term of a test function.
:Definition:
.. math::
\int_{\Omega} \nabla \cdot \ul{v} \mbox { or } \int_{\Omega} c \nabla
\cdot \ul{v}
:Arguments:
- material : :math:`c` (optional)
- virtual : :math:`\ul{v}`
"""
name = 'dw_div'
arg_types = ('opt_material', 'virtual')
arg_shapes = [{'opt_material' : '1, 1', 'virtual' : ('D', None)},
{'opt_material' : None}]
@staticmethod
def function(out, mat, vg):
div_bf = vg.bfg
n_el, n_qp, dim, n_ep = div_bf.shape
div_bf = div_bf.reshape((n_el, n_qp, dim * n_ep, 1))
div_bf = nm.ascontiguousarray(div_bf)
if mat is not None:
status = vg.integrate(out, mat * div_bf)
else:
status = vg.integrate(out, div_bf)
return status
def get_fargs(self, mat, virtual,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(virtual)
return mat, vg
class GradDivStabilizationTerm(Term):
r"""
Grad-div stabilization term ( :math:`\gamma` is a global stabilization
parameter).
:Definition:
.. math::
\gamma \int_{\Omega} (\nabla\cdot\ul{u}) \cdot (\nabla\cdot\ul{v})
:Arguments:
- material : :math:`\gamma`
- virtual : :math:`\ul{v}`
- state : :math:`\ul{u}`
"""
name = 'dw_st_grad_div'
arg_types = ('material', 'virtual', 'state')
arg_shapes = {'material' : '1, 1', 'virtual' : ('D', 'state'),
'state' : 'D'}
function = staticmethod(terms.dw_st_grad_div)
def get_fargs(self, gamma, virtual, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg, _ = self.get_mapping(state)
if diff_var is None:
div = self.get(state, 'div')
fmode = 0
else:
div = nm.array([0], ndmin=4, dtype=nm.float64)
fmode = 1
return div, gamma, vg, fmode
from sfepy.terms.terms_diffusion import LaplaceTerm
class PSPGPStabilizationTerm(LaplaceTerm):
r"""
PSPG stabilization term, pressure part ( :math:`\tau` is a local
stabilization parameter), alias to Laplace term dw_laplace.
:Definition:
.. math::
\sum_{K \in \Ical_h}\int_{T_K} \tau_K\ \nabla p \cdot \nabla q
:Arguments:
- material : :math:`\tau_K`
- virtual : :math:`q`
- state : :math:`p`
"""
name = 'dw_st_pspg_p'
class PSPGCStabilizationTerm(Term):
r"""
PSPG stabilization term, convective part ( :math:`\tau` is a local
stabilization parameter).
:Definition:
.. math::
\sum_{K \in \Ical_h}\int_{T_K} \tau_K\ ((\ul{b} \cdot \nabla) \ul{u})
\cdot \nabla q
:Arguments:
- material : :math:`\tau_K`
- virtual : :math:`q`
- parameter : :math:`\ul{b}`
- state : :math:`\ul{u}`
"""
name = 'dw_st_pspg_c'
arg_types = ('material', 'virtual', 'parameter', 'state')
arg_shapes = {'material' : '1, 1', 'virtual' : (1, None),
'parameter' : 'D', 'state' : 'D'}
function = staticmethod(terms.dw_st_pspg_c)
def get_fargs(self, tau, virtual, parameter, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
sap, svg = self.get_approximation(virtual)
vap, vvg = self.get_approximation(state)
val_qp = self.get(parameter, 'val')
conn = vap.get_connectivity(self.region, self.integration)
if diff_var is None:
fmode = 0
else:
fmode = 1
return val_qp, state(), tau, svg, vvg, conn, fmode
class SUPGPStabilizationTerm(Term):
r"""
SUPG stabilization term, pressure part ( :math:`\delta` is a local
stabilization parameter).
:Definition:
.. math::
\sum_{K \in \Ical_h}\int_{T_K} \delta_K\ \nabla p\cdot ((\ul{b} \cdot
\nabla) \ul{v})
:Arguments:
- material : :math:`\delta_K`
- virtual : :math:`\ul{v}`
- parameter : :math:`\ul{b}`
- state : :math:`p`
"""
name = 'dw_st_supg_p'
arg_types = ('material', 'virtual', 'parameter', 'state')
arg_shapes = {'material' : '1, 1', 'virtual' : ('D', None),
'parameter' : 'D', 'state' : 1}
function = staticmethod(terms.dw_st_supg_p)
def get_fargs(self, delta, virtual, parameter, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
vvg, _ = self.get_mapping(virtual)
svg, _ = self.get_mapping(state)
val_qp = self.get(parameter, 'val')
if diff_var is None:
grad = self.get(state, 'grad')
fmode = 0
else:
grad = nm.array([0], ndmin=4, dtype=nm.float64)
fmode = 1
return val_qp, grad, delta, vvg, svg, fmode
class SUPGCStabilizationTerm(Term):
r"""
SUPG stabilization term, convective part ( :math:`\delta` is a local
stabilization parameter).
:Definition:
.. math::
\sum_{K \in \Ical_h}\int_{T_K} \delta_K\ ((\ul{b} \cdot \nabla)
\ul{u})\cdot ((\ul{b} \cdot \nabla) \ul{v})
:Arguments:
- material : :math:`\delta_K`
- virtual : :math:`\ul{v}`
- parameter : :math:`\ul{b}`
- state : :math:`\ul{u}`
"""
name = 'dw_st_supg_c'
arg_types = ('material', 'virtual', 'parameter', 'state')
arg_shapes = {'material' : '1, 1', 'virtual' : ('D', 'state'),
'parameter' : 'D', 'state' : 'D'}
function = staticmethod(terms.dw_st_supg_c)
def get_fargs(self, delta, virtual, parameter, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
ap, vg = self.get_approximation(virtual)
val_qp = self.get(parameter, 'val')
conn = ap.get_connectivity(self.region, self.integration)
if diff_var is None:
fmode = 0
else:
fmode = 1
return val_qp, state(), delta, vg, conn, fmode
|
[
"numpy.array",
"sfepy.linalg.dot_sequences",
"numpy.ones",
"numpy.ascontiguousarray"
] |
[((11757, 11785), 'numpy.ascontiguousarray', 'nm.ascontiguousarray', (['div_bf'], {}), '(div_bf)\n', (11777, 11785), True, 'import numpy as nm\n'), ((1821, 1863), 'numpy.ones', 'nm.ones', (['(1, n_qp, 1, 1)'], {'dtype': 'nm.float64'}), '((1, n_qp, 1, 1), dtype=nm.float64)\n', (1828, 1863), True, 'import numpy as nm\n'), ((7094, 7136), 'numpy.ones', 'nm.ones', (['(1, n_qp, 1, 1)'], {'dtype': 'nm.float64'}), '((1, n_qp, 1, 1), dtype=nm.float64)\n', (7101, 7136), True, 'import numpy as nm\n'), ((13021, 13061), 'numpy.array', 'nm.array', (['[0]'], {'ndmin': '(4)', 'dtype': 'nm.float64'}), '([0], ndmin=4, dtype=nm.float64)\n', (13029, 13061), True, 'import numpy as nm\n'), ((15893, 15933), 'numpy.array', 'nm.array', (['[0]'], {'ndmin': '(4)', 'dtype': 'nm.float64'}), '([0], ndmin=4, dtype=nm.float64)\n', (15901, 15933), True, 'import numpy as nm\n'), ((1341, 1380), 'sfepy.linalg.dot_sequences', 'dot_sequences', (['g1[..., None]', 'g2', '"""ATB"""'], {}), "(g1[..., None], g2, 'ATB')\n", (1354, 1380), False, 'from sfepy.linalg import dot_sequences\n'), ((2166, 2206), 'numpy.array', 'nm.array', (['[0]'], {'ndmin': '(4)', 'dtype': 'nm.float64'}), '([0], ndmin=4, dtype=nm.float64)\n', (2174, 2206), True, 'import numpy as nm\n'), ((4832, 4872), 'numpy.array', 'nm.array', (['[0]'], {'ndmin': '(4)', 'dtype': 'nm.float64'}), '([0], ndmin=4, dtype=nm.float64)\n', (4840, 4872), True, 'import numpy as nm\n'), ((7408, 7448), 'numpy.array', 'nm.array', (['[0]'], {'ndmin': '(4)', 'dtype': 'nm.float64'}), '([0], ndmin=4, dtype=nm.float64)\n', (7416, 7448), True, 'import numpy as nm\n')]
|
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
def plot_loss(model, n_iter):
plt.figure()
plt.plot(model.trainloss, 'b-', model.validloss, 'r-')
plt.xlim(0, n_iter)
plt.xlabel('iteration')
plt.ylabel('loss')
plt.title('learning curve')
plt.legend(['training loss', 'validation loss'])
plt.show()
def plot_F1(model, n_iter):
plt.figure()
plt.plot(model.trainF1, 'b-', model.validF1, 'r-')
plt.xlim(0, n_iter)
plt.xlabel('iteration')
plt.ylabel('F1 score')
plt.title('F1 metric curve')
plt.legend(['training F1', 'validation F1'], loc='lower right')
plt.show()
def confusion_matrix(threshold, y_hat, y_target):
# 任务2:实现该函数。函数应返回 TP, FP, FN, TN 四个值。
# y_hat = (y_hat > threshold).astype(np.int32) # 高于阈值的预测值置为1,反之为0
# 提示:对比 y_hat 和 y_target 中的值计算 True Positive,False Positive 等
tmp = np.hstack((y_target, y_hat > threshold))
pass
# return TP, FP, FN, TN
|
[
"matplotlib.pyplot.ylabel",
"numpy.hstack",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] |
[((111, 123), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (121, 123), True, 'import matplotlib.pyplot as plt\n'), ((128, 182), 'matplotlib.pyplot.plot', 'plt.plot', (['model.trainloss', '"""b-"""', 'model.validloss', '"""r-"""'], {}), "(model.trainloss, 'b-', model.validloss, 'r-')\n", (136, 182), True, 'import matplotlib.pyplot as plt\n'), ((187, 206), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', 'n_iter'], {}), '(0, n_iter)\n', (195, 206), True, 'import matplotlib.pyplot as plt\n'), ((211, 234), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""iteration"""'], {}), "('iteration')\n", (221, 234), True, 'import matplotlib.pyplot as plt\n'), ((239, 257), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (249, 257), True, 'import matplotlib.pyplot as plt\n'), ((262, 289), 'matplotlib.pyplot.title', 'plt.title', (['"""learning curve"""'], {}), "('learning curve')\n", (271, 289), True, 'import matplotlib.pyplot as plt\n'), ((294, 342), 'matplotlib.pyplot.legend', 'plt.legend', (["['training loss', 'validation loss']"], {}), "(['training loss', 'validation loss'])\n", (304, 342), True, 'import matplotlib.pyplot as plt\n'), ((347, 357), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (355, 357), True, 'import matplotlib.pyplot as plt\n'), ((400, 412), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (410, 412), True, 'import matplotlib.pyplot as plt\n'), ((417, 467), 'matplotlib.pyplot.plot', 'plt.plot', (['model.trainF1', '"""b-"""', 'model.validF1', '"""r-"""'], {}), "(model.trainF1, 'b-', model.validF1, 'r-')\n", (425, 467), True, 'import matplotlib.pyplot as plt\n'), ((472, 491), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', 'n_iter'], {}), '(0, n_iter)\n', (480, 491), True, 'import matplotlib.pyplot as plt\n'), ((496, 519), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""iteration"""'], {}), "('iteration')\n", (506, 519), True, 'import matplotlib.pyplot as plt\n'), ((524, 546), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""F1 score"""'], {}), "('F1 score')\n", (534, 546), True, 'import matplotlib.pyplot as plt\n'), ((551, 579), 'matplotlib.pyplot.title', 'plt.title', (['"""F1 metric curve"""'], {}), "('F1 metric curve')\n", (560, 579), True, 'import matplotlib.pyplot as plt\n'), ((584, 647), 'matplotlib.pyplot.legend', 'plt.legend', (["['training F1', 'validation F1']"], {'loc': '"""lower right"""'}), "(['training F1', 'validation F1'], loc='lower right')\n", (594, 647), True, 'import matplotlib.pyplot as plt\n'), ((652, 662), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (660, 662), True, 'import matplotlib.pyplot as plt\n'), ((911, 951), 'numpy.hstack', 'np.hstack', (['(y_target, y_hat > threshold)'], {}), '((y_target, y_hat > threshold))\n', (920, 951), True, 'import numpy as np\n')]
|
import pytheia as pt
import os
import numpy as np
def test_track_set_descriptor_read_write():
recon = pt.sfm.Reconstruction()
view_id1 = recon.AddView("0",0.0)
m_view1 = recon.MutableView(view_id1)
m_view1.IsEstimated = True
view_id2 = recon.AddView("1",1.0)
m_view2 = recon.MutableView(view_id2)
m_view2.IsEstimated = True
t_id = recon.AddTrack()
m_track = recon.MutableTrack(t_id)
m_track.AddView(view_id1)
m_track.AddView(view_id2)
m_track.IsEstimated = True
desc = np.asarray([100,200,300,400])
m_track.SetReferenceDescriptor(desc)
assert (m_track.ReferenceDescriptor() == desc).all()
# read write
pt.io.WriteReconstruction(recon,"test")
recon_loaded = pt.io.ReadReconstruction("test")[1]
s_track = recon_loaded.Track(t_id)
assert (s_track.ReferenceDescriptor() == desc).all()
os.remove("test")
if __name__ == "__main__":
test_track_set_descriptor_read_write()
|
[
"pytheia.io.ReadReconstruction",
"numpy.asarray",
"pytheia.sfm.Reconstruction",
"pytheia.io.WriteReconstruction",
"os.remove"
] |
[((107, 130), 'pytheia.sfm.Reconstruction', 'pt.sfm.Reconstruction', ([], {}), '()\n', (128, 130), True, 'import pytheia as pt\n'), ((523, 555), 'numpy.asarray', 'np.asarray', (['[100, 200, 300, 400]'], {}), '([100, 200, 300, 400])\n', (533, 555), True, 'import numpy as np\n'), ((678, 718), 'pytheia.io.WriteReconstruction', 'pt.io.WriteReconstruction', (['recon', '"""test"""'], {}), "(recon, 'test')\n", (703, 718), True, 'import pytheia as pt\n'), ((875, 892), 'os.remove', 'os.remove', (['"""test"""'], {}), "('test')\n", (884, 892), False, 'import os\n'), ((737, 769), 'pytheia.io.ReadReconstruction', 'pt.io.ReadReconstruction', (['"""test"""'], {}), "('test')\n", (761, 769), True, 'import pytheia as pt\n')]
|
# -*- coding: utf-8 -*-
"""
@author: <NAME>.
Department of Aerodynamics
Faculty of Aerospace Engineering
TU Delft, Delft, Netherlands
"""
from numpy import sin, cos, pi
from objects.CSCG._3d.exact_solutions.status.incompressible_Navier_Stokes.base import incompressible_NavierStokes_Base
from objects.CSCG._3d.fields.vector.main import _3dCSCG_VectorField
# noinspection PyAbstractClass
class SinCosRebholz_Conservation(incompressible_NavierStokes_Base):
"""
The sin cos test case for the conservation, see Section 5.2 of paper:
[An Energy- and helicity-conserving finite element scheme for the Navier-Stokes
equations, <NAME>, 2007]
"""
def __init__(self, es):
super(SinCosRebholz_Conservation, self).__init__(es, 0)
@property
def valid_time(self):
return 'valid_only_at_its_first_instant'
def u(self, t, x, y, z): return cos(2 * pi * z)
def u_x(self, t, x, y, z): return 0 * x
def u_y(self, t, x, y, z): return 0 * x
def u_z(self, t, x, y, z): return -2 * pi * sin(2 * pi * z)
def v(self, t, x, y, z): return sin(2 * pi * z)
def v_x(self, t, x, y, z): return 0 * x
def v_y(self, t, x, y, z): return 0 * x
def v_z(self, t, x, y, z): return 2 * pi * cos(2 * pi * z)
def w(self, t, x, y, z): return sin(2 * pi * x)
def w_x(self, t, x, y, z): return 2 * pi * cos(2 * pi * x)
def w_y(self, t, x, y, z): return 0 * x
def w_z(self, t, x, y, z): return 0 * x
def fx(self, t, x, y, z): return 0 * x # can not name it by _fx_
def fy(self, t, x, y, z): return 0 * x # can not name it by _fy_
def fz(self, t, x, y, z): return 0 * x # can not name it by _fz_
@property
def body_force(self):
"""This makes body force valid at all time instants."""
if self._bodyForce_ is None:
self._bodyForce_ = _3dCSCG_VectorField(self.mesh, (self.fx, self.fy, self.fz))
return self._bodyForce_
class SinCosRebholz_Dissipation(incompressible_NavierStokes_Base):
"""
The sin cos test case for the conservation, see Section 5.3 of paper:
[An Energy- and helicity-conserving finite element scheme for the Navier-Stokes
equations, <NAME>, 2007]
"""
def __init__(self, es, nu=1):
super(SinCosRebholz_Dissipation, self).__init__(es, nu)
def u(self, t, x, y, z): return (2 - t) * cos(2 * pi * z)
def u_x(self, t, x, y, z): return 0 * x
def u_y(self, t, x, y, z): return 0 * x
def u_z(self, t, x, y, z): return - 2 * pi * (2 - t) * sin(2 * pi * z)
def u_t(self, t, x, y, z): return - cos(2 * pi * z)
def u_xx(self, t, x, y, z): return 0 * x
def u_yy(self, t, x, y, z): return 0 * y
def u_zz(self, t, x, y, z): return -4 * pi ** 2 * (2 - t) * cos(2 * pi * z)
def v(self, t, x, y, z): return (1 + t) * sin(2 * pi * z)
def v_x(self, t, x, y, z): return 0 * x
def v_y(self, t, x, y, z): return 0 * x
def v_z(self, t, x, y, z): return 2 * pi * (1 + t) * cos(2 * pi * z)
def v_t(self, t, x, y, z): return sin(2 * pi * z)
def v_xx(self, t, x, y, z): return 0 * x
def v_yy(self, t, x, y, z): return 0 * x
def v_zz(self, t, x, y, z): return - 4 * pi ** 2 * (1 + t) * sin(2 * pi * z)
def w(self, t, x, y, z): return (1 - t) * sin(2 * pi * x)
def w_x(self, t, x, y, z): return 2 * pi * (1 - t) * cos(2 * pi * x)
def w_y(self, t, x, y, z): return 0 * x
def w_z(self, t, x, y, z): return 0 * x
def w_t(self, t, x, y, z): return - sin(2 * pi * x)
def w_xx(self, t, x, y, z): return - 4 * pi ** 2 * (1 - t) * sin(2 * pi * x)
def w_yy(self, t, x, y, z): return 0 * x
def w_zz(self, t, x, y, z): return 0 * x
def p(self, t, x, y, z): return sin(2 * pi * (x + y + z + t))
def p_x(self, t, x, y, z): return 2 * pi * cos(2 * pi * (x + y + z + t))
def p_y(self, t, x, y, z): return 2 * pi * cos(2 * pi * (x + y + z + t))
def p_z(self, t, x, y, z): return 2 * pi * cos(2 * pi * (x + y + z + t))
class SinCos_Modified_Dissipation(incompressible_NavierStokes_Base):
"""A modified case that the solution along t is not linear."""
def __init__(self, es, nu=1):
super(SinCos_Modified_Dissipation, self).__init__(es, nu)
def u(self, t, x, y, z): return (1 - sin(2*pi*t)) * cos(2 * pi * z)
def u_x(self, t, x, y, z): return 0 * x
def u_y(self, t, x, y, z): return 0 * x
def u_z(self, t, x, y, z): return - 2 * pi * (1 - sin(2*pi*t)) * sin(2 * pi * z)
def u_t(self, t, x, y, z): return - 2*pi*cos(2*pi*t) * cos(2 * pi * z)
def u_xx(self, t, x, y, z): return 0 * x
def u_yy(self, t, x, y, z): return 0 * y
def u_zz(self, t, x, y, z): return -4 * pi ** 2 * (1 - sin(2*pi*t)) * cos(2 * pi * z)
def v(self, t, x, y, z): return (1 + cos(2*pi*t)) * sin(2 * pi * z)
def v_x(self, t, x, y, z): return 0 * x
def v_y(self, t, x, y, z): return 0 * x
def v_z(self, t, x, y, z): return 2 * pi * (1 + cos(2*pi*t)) * cos(2 * pi * z)
def v_t(self, t, x, y, z): return -2*pi*sin(2*pi*t) * sin(2 * pi * z)
def v_xx(self, t, x, y, z): return 0 * x
def v_yy(self, t, x, y, z): return 0 * x
def v_zz(self, t, x, y, z): return - 4 * pi ** 2 * (1 + cos(2*pi*t)) * sin(2 * pi * z)
def w(self, t, x, y, z): return (1 - sin(2*pi*t)) * sin(2 * pi * x)
def w_x(self, t, x, y, z): return 2 * pi * (1 - sin(2*pi*t)) * cos(2 * pi * x)
def w_y(self, t, x, y, z): return 0 * x
def w_z(self, t, x, y, z): return 0 * x
def w_t(self, t, x, y, z): return - 2*pi*cos(2*pi*t) * sin(2 * pi * x)
def w_xx(self, t, x, y, z): return - 4 * pi ** 2 * (1 - sin(2*pi*t)) * sin(2 * pi * x)
def w_yy(self, t, x, y, z): return 0 * x
def w_zz(self, t, x, y, z): return 0 * x
def p(self, t, x, y, z): return sin(2 * pi * (x + y + z + t))
def p_x(self, t, x, y, z): return 2 * pi * cos(2 * pi * (x + y + z + t))
def p_y(self, t, x, y, z): return 2 * pi * cos(2 * pi * (x + y + z + t))
def p_z(self, t, x, y, z): return 2 * pi * cos(2 * pi * (x + y + z + t))
# noinspection PyAbstractClass
class SinCos_Conservation_Conservative_Body_Force(incompressible_NavierStokes_Base):
"""
The sin cos test case for the conservation, see Section 5.2 of paper:
[An Energy- and helicity-conserving finite element scheme for the Navier-Stokes
equations, <NAME>, 2007]
"""
def __init__(self, es):
super(SinCos_Conservation_Conservative_Body_Force, self).__init__(es, 0)
@property
def valid_time(self):
return 'valid_only_at_its_first_instant'
def u(self, t, x, y, z): return cos(2 * pi * z)
def u_x(self, t, x, y, z): return 0 * x
def u_y(self, t, x, y, z): return 0 * x
def u_z(self, t, x, y, z): return -2 * pi * sin(2 * pi * z)
def v(self, t, x, y, z): return sin(2 * pi * z)
def v_x(self, t, x, y, z): return 0 * x
def v_y(self, t, x, y, z): return 0 * x
def v_z(self, t, x, y, z): return 2 * pi * cos(2 * pi * z)
def w(self, t, x, y, z): return sin(2 * pi * x)
def w_x(self, t, x, y, z): return 2 * pi * cos(2 * pi * x)
def w_y(self, t, x, y, z): return 0 * x
def w_z(self, t, x, y, z): return 0 * x
# varphi(t,x,y,z) = t * sin(2 * pi * x) * sin(2 * pi * y) * sin(2 * pi * z)
def fx(self, t, x, y, z): return 2 * pi * t * cos(2 * pi * x) * sin(2 * pi * y) * sin(2 * pi * z)
def fy(self, t, x, y, z): return 2 * pi * t * sin(2 * pi * x) * cos(2 * pi * y) * sin(2 * pi * z)
def fz(self, t, x, y, z): return 2 * pi * t * sin(2 * pi * x) * sin(2 * pi * y) * cos(2 * pi * z)
@property
def body_force(self):
"""This makes body force valid at all time instants."""
if self._bodyForce_ is None:
self._bodyForce_ = _3dCSCG_VectorField(self.mesh, (self.fx, self.fy, self.fz))
return self._bodyForce_
# noinspection PyAbstractClass
class SinCos_Conservation_Conservative_Body_Force1(incompressible_NavierStokes_Base):
"""
The sin cos test case for the conservation, see Section 5.2 of paper:
[An Energy- and helicity-conserving finite element scheme for the Navier-Stokes
equations, <NAME>, 2007]
"""
def __init__(self, es):
super(SinCos_Conservation_Conservative_Body_Force1, self).__init__(es, 0)
@property
def valid_time(self):
return 'valid_only_at_its_first_instant'
def u(self, t, x, y, z): return cos(2 * pi * z)
def u_x(self, t, x, y, z): return 0 * x
def u_y(self, t, x, y, z): return 0 * x
def u_z(self, t, x, y, z): return -2 * pi * sin(2 * pi * z)
def v(self, t, x, y, z): return sin(2 * pi * z)
def v_x(self, t, x, y, z): return 0 * x
def v_y(self, t, x, y, z): return 0 * x
def v_z(self, t, x, y, z): return 2 * pi * cos(2 * pi * z)
def w(self, t, x, y, z): return sin(2 * pi * x)
def w_x(self, t, x, y, z): return 2 * pi * cos(2 * pi * x)
def w_y(self, t, x, y, z): return 0 * x
def w_z(self, t, x, y, z): return 0 * x
# varphi(t,x,y,z) = sin(2 * pi * x) * sin(2 * pi * y) * sin(2 * pi * z)
def fx(self, t, x, y, z): return 2 * pi * cos(2 * pi * x) * sin(2 * pi * y) * sin(2 * pi * z)
def fy(self, t, x, y, z): return 2 * pi * sin(2 * pi * x) * cos(2 * pi * y) * sin(2 * pi * z)
def fz(self, t, x, y, z): return 2 * pi * sin(2 * pi * x) * sin(2 * pi * y) * cos(2 * pi * z)
@property
def body_force(self):
"""This makes body force valid at all time instants."""
if self._bodyForce_ is None:
self._bodyForce_ = _3dCSCG_VectorField(self.mesh, (self.fx, self.fy, self.fz))
return self._bodyForce_
# noinspection PyAbstractClass
class SinCos_Conservation_Conservative_Body_Force_POLYNOMIALS(incompressible_NavierStokes_Base):
"""
The sin cos test case for the conservation, see Section 5.2 of paper:
[An Energy- and helicity-conserving finite element scheme for the Navier-Stokes
equations, <NAME>, 2007]
"""
def __init__(self, es):
super(SinCos_Conservation_Conservative_Body_Force_POLYNOMIALS, self).__init__(es, 0)
@property
def valid_time(self):
return 'valid_only_at_its_first_instant'
def u(self, t, x, y, z): return cos(2 * pi * z)
def u_x(self, t, x, y, z): return 0 * x
def u_y(self, t, x, y, z): return 0 * x
def u_z(self, t, x, y, z): return -2 * pi * sin(2 * pi * z)
def v(self, t, x, y, z): return sin(2 * pi * z)
def v_x(self, t, x, y, z): return 0 * x
def v_y(self, t, x, y, z): return 0 * x
def v_z(self, t, x, y, z): return 2 * pi * cos(2 * pi * z)
def w(self, t, x, y, z): return sin(2 * pi * x)
def w_x(self, t, x, y, z): return 2 * pi * cos(2 * pi * x)
def w_y(self, t, x, y, z): return 0 * x
def w_z(self, t, x, y, z): return 0 * x
# phi(t,x,y,z) = t * (x**3/3 - x**2/2 + y**3/3 - y**2/2 + z**3/3 - z**2/2)
def fx(self, t, x, y, z): return t * x * (x-1)
def fy(self, t, x, y, z): return t * y * (y-1)
def fz(self, t, x, y, z): return t * z * (z-1)
@property
def body_force(self):
"""This makes body force valid at all time instants."""
if self._bodyForce_ is None:
self._bodyForce_ = _3dCSCG_VectorField(self.mesh, (self.fx, self.fy, self.fz))
return self._bodyForce_
# noinspection PyAbstractClass
class SinCos_Conservation_Conservative_Body_Force_CONSTANT(incompressible_NavierStokes_Base):
"""
The sin cos test case for the conservation, see Section 5.2 of paper:
[An Energy- and helicity-conserving finite element scheme for the Navier-Stokes
equations, <NAME>, 2007]
"""
def __init__(self, es):
super(SinCos_Conservation_Conservative_Body_Force_CONSTANT, self).__init__(es, 0)
@property
def valid_time(self):
return 'valid_only_at_its_first_instant'
def u(self, t, x, y, z): return cos(2 * pi * z)
def u_x(self, t, x, y, z): return 0 * x
def u_y(self, t, x, y, z): return 0 * x
def u_z(self, t, x, y, z): return -2 * pi * sin(2 * pi * z)
def v(self, t, x, y, z): return sin(2 * pi * z)
def v_x(self, t, x, y, z): return 0 * x
def v_y(self, t, x, y, z): return 0 * x
def v_z(self, t, x, y, z): return 2 * pi * cos(2 * pi * z)
def w(self, t, x, y, z): return sin(2 * pi * x)
def w_x(self, t, x, y, z): return 2 * pi * cos(2 * pi * x)
def w_y(self, t, x, y, z): return 0 * x
def w_z(self, t, x, y, z): return 0 * x
# phi(t,x,y,z) = x
def fx(self, t, x, y, z): return 1 + 0 * x * y * z
def fy(self, t, x, y, z): return 0 + 0 * x * y * z
def fz(self, t, x, y, z): return 0 + 0 * x * y * z
@property
def body_force(self):
"""This makes body force valid at all time instants."""
if self._bodyForce_ is None:
self._bodyForce_ = _3dCSCG_VectorField(self.mesh, (self.fx, self.fy, self.fz))
return self._bodyForce_
|
[
"numpy.sin",
"objects.CSCG._3d.fields.vector.main._3dCSCG_VectorField",
"numpy.cos"
] |
[((918, 933), 'numpy.cos', 'cos', (['(2 * pi * z)'], {}), '(2 * pi * z)\n', (921, 933), False, 'from numpy import sin, cos, pi\n'), ((1126, 1141), 'numpy.sin', 'sin', (['(2 * pi * z)'], {}), '(2 * pi * z)\n', (1129, 1141), False, 'from numpy import sin, cos, pi\n'), ((1333, 1348), 'numpy.sin', 'sin', (['(2 * pi * x)'], {}), '(2 * pi * x)\n', (1336, 1348), False, 'from numpy import sin, cos, pi\n'), ((3086, 3101), 'numpy.sin', 'sin', (['(2 * pi * z)'], {}), '(2 * pi * z)\n', (3089, 3101), False, 'from numpy import sin, cos, pi\n'), ((3771, 3800), 'numpy.sin', 'sin', (['(2 * pi * (x + y + z + t))'], {}), '(2 * pi * (x + y + z + t))\n', (3774, 3800), False, 'from numpy import sin, cos, pi\n'), ((5833, 5862), 'numpy.sin', 'sin', (['(2 * pi * (x + y + z + t))'], {}), '(2 * pi * (x + y + z + t))\n', (5836, 5862), False, 'from numpy import sin, cos, pi\n'), ((6664, 6679), 'numpy.cos', 'cos', (['(2 * pi * z)'], {}), '(2 * pi * z)\n', (6667, 6679), False, 'from numpy import sin, cos, pi\n'), ((6872, 6887), 'numpy.sin', 'sin', (['(2 * pi * z)'], {}), '(2 * pi * z)\n', (6875, 6887), False, 'from numpy import sin, cos, pi\n'), ((7079, 7094), 'numpy.sin', 'sin', (['(2 * pi * x)'], {}), '(2 * pi * x)\n', (7082, 7094), False, 'from numpy import sin, cos, pi\n'), ((8473, 8488), 'numpy.cos', 'cos', (['(2 * pi * z)'], {}), '(2 * pi * z)\n', (8476, 8488), False, 'from numpy import sin, cos, pi\n'), ((8681, 8696), 'numpy.sin', 'sin', (['(2 * pi * z)'], {}), '(2 * pi * z)\n', (8684, 8696), False, 'from numpy import sin, cos, pi\n'), ((8888, 8903), 'numpy.sin', 'sin', (['(2 * pi * x)'], {}), '(2 * pi * x)\n', (8891, 8903), False, 'from numpy import sin, cos, pi\n'), ((10288, 10303), 'numpy.cos', 'cos', (['(2 * pi * z)'], {}), '(2 * pi * z)\n', (10291, 10303), False, 'from numpy import sin, cos, pi\n'), ((10496, 10511), 'numpy.sin', 'sin', (['(2 * pi * z)'], {}), '(2 * pi * z)\n', (10499, 10511), False, 'from numpy import sin, cos, pi\n'), ((10703, 10718), 'numpy.sin', 'sin', (['(2 * pi * x)'], {}), '(2 * pi * x)\n', (10706, 10718), False, 'from numpy import sin, cos, pi\n'), ((11962, 11977), 'numpy.cos', 'cos', (['(2 * pi * z)'], {}), '(2 * pi * z)\n', (11965, 11977), False, 'from numpy import sin, cos, pi\n'), ((12170, 12185), 'numpy.sin', 'sin', (['(2 * pi * z)'], {}), '(2 * pi * z)\n', (12173, 12185), False, 'from numpy import sin, cos, pi\n'), ((12377, 12392), 'numpy.sin', 'sin', (['(2 * pi * x)'], {}), '(2 * pi * x)\n', (12380, 12392), False, 'from numpy import sin, cos, pi\n'), ((1073, 1088), 'numpy.sin', 'sin', (['(2 * pi * z)'], {}), '(2 * pi * z)\n', (1076, 1088), False, 'from numpy import sin, cos, pi\n'), ((1280, 1295), 'numpy.cos', 'cos', (['(2 * pi * z)'], {}), '(2 * pi * z)\n', (1283, 1295), False, 'from numpy import sin, cos, pi\n'), ((1397, 1412), 'numpy.cos', 'cos', (['(2 * pi * x)'], {}), '(2 * pi * x)\n', (1400, 1412), False, 'from numpy import sin, cos, pi\n'), ((1888, 1947), 'objects.CSCG._3d.fields.vector.main._3dCSCG_VectorField', '_3dCSCG_VectorField', (['self.mesh', '(self.fx, self.fy, self.fz)'], {}), '(self.mesh, (self.fx, self.fy, self.fz))\n', (1907, 1947), False, 'from objects.CSCG._3d.fields.vector.main import _3dCSCG_VectorField\n'), ((2408, 2423), 'numpy.cos', 'cos', (['(2 * pi * z)'], {}), '(2 * pi * z)\n', (2411, 2423), False, 'from numpy import sin, cos, pi\n'), ((2574, 2589), 'numpy.sin', 'sin', (['(2 * pi * z)'], {}), '(2 * pi * z)\n', (2577, 2589), False, 'from numpy import sin, cos, pi\n'), ((2631, 2646), 'numpy.cos', 'cos', (['(2 * pi * z)'], {}), '(2 * pi * z)\n', (2634, 2646), False, 'from numpy import sin, cos, pi\n'), ((2804, 2819), 'numpy.cos', 'cos', (['(2 * pi * z)'], {}), '(2 * pi * z)\n', (2807, 2819), False, 'from numpy import sin, cos, pi\n'), ((2867, 2882), 'numpy.sin', 'sin', (['(2 * pi * z)'], {}), '(2 * pi * z)\n', (2870, 2882), False, 'from numpy import sin, cos, pi\n'), ((3031, 3046), 'numpy.cos', 'cos', (['(2 * pi * z)'], {}), '(2 * pi * z)\n', (3034, 3046), False, 'from numpy import sin, cos, pi\n'), ((3260, 3275), 'numpy.sin', 'sin', (['(2 * pi * z)'], {}), '(2 * pi * z)\n', (3263, 3275), False, 'from numpy import sin, cos, pi\n'), ((3323, 3338), 'numpy.sin', 'sin', (['(2 * pi * x)'], {}), '(2 * pi * x)\n', (3326, 3338), False, 'from numpy import sin, cos, pi\n'), ((3397, 3412), 'numpy.cos', 'cos', (['(2 * pi * x)'], {}), '(2 * pi * x)\n', (3400, 3412), False, 'from numpy import sin, cos, pi\n'), ((3544, 3559), 'numpy.sin', 'sin', (['(2 * pi * x)'], {}), '(2 * pi * x)\n', (3547, 3559), False, 'from numpy import sin, cos, pi\n'), ((3626, 3641), 'numpy.sin', 'sin', (['(2 * pi * x)'], {}), '(2 * pi * x)\n', (3629, 3641), False, 'from numpy import sin, cos, pi\n'), ((3849, 3878), 'numpy.cos', 'cos', (['(2 * pi * (x + y + z + t))'], {}), '(2 * pi * (x + y + z + t))\n', (3852, 3878), False, 'from numpy import sin, cos, pi\n'), ((3927, 3956), 'numpy.cos', 'cos', (['(2 * pi * (x + y + z + t))'], {}), '(2 * pi * (x + y + z + t))\n', (3930, 3956), False, 'from numpy import sin, cos, pi\n'), ((4005, 4034), 'numpy.cos', 'cos', (['(2 * pi * (x + y + z + t))'], {}), '(2 * pi * (x + y + z + t))\n', (4008, 4034), False, 'from numpy import sin, cos, pi\n'), ((4332, 4347), 'numpy.cos', 'cos', (['(2 * pi * z)'], {}), '(2 * pi * z)\n', (4335, 4347), False, 'from numpy import sin, cos, pi\n'), ((4508, 4523), 'numpy.sin', 'sin', (['(2 * pi * z)'], {}), '(2 * pi * z)\n', (4511, 4523), False, 'from numpy import sin, cos, pi\n'), ((4584, 4599), 'numpy.cos', 'cos', (['(2 * pi * z)'], {}), '(2 * pi * z)\n', (4587, 4599), False, 'from numpy import sin, cos, pi\n'), ((4767, 4782), 'numpy.cos', 'cos', (['(2 * pi * z)'], {}), '(2 * pi * z)\n', (4770, 4782), False, 'from numpy import sin, cos, pi\n'), ((4840, 4855), 'numpy.sin', 'sin', (['(2 * pi * z)'], {}), '(2 * pi * z)\n', (4843, 4855), False, 'from numpy import sin, cos, pi\n'), ((5014, 5029), 'numpy.cos', 'cos', (['(2 * pi * z)'], {}), '(2 * pi * z)\n', (5017, 5029), False, 'from numpy import sin, cos, pi\n'), ((5089, 5104), 'numpy.sin', 'sin', (['(2 * pi * z)'], {}), '(2 * pi * z)\n', (5092, 5104), False, 'from numpy import sin, cos, pi\n'), ((5273, 5288), 'numpy.sin', 'sin', (['(2 * pi * z)'], {}), '(2 * pi * z)\n', (5276, 5288), False, 'from numpy import sin, cos, pi\n'), ((5346, 5361), 'numpy.sin', 'sin', (['(2 * pi * x)'], {}), '(2 * pi * x)\n', (5349, 5361), False, 'from numpy import sin, cos, pi\n'), ((5430, 5445), 'numpy.cos', 'cos', (['(2 * pi * x)'], {}), '(2 * pi * x)\n', (5433, 5445), False, 'from numpy import sin, cos, pi\n'), ((5596, 5611), 'numpy.sin', 'sin', (['(2 * pi * x)'], {}), '(2 * pi * x)\n', (5599, 5611), False, 'from numpy import sin, cos, pi\n'), ((5688, 5703), 'numpy.sin', 'sin', (['(2 * pi * x)'], {}), '(2 * pi * x)\n', (5691, 5703), False, 'from numpy import sin, cos, pi\n'), ((5911, 5940), 'numpy.cos', 'cos', (['(2 * pi * (x + y + z + t))'], {}), '(2 * pi * (x + y + z + t))\n', (5914, 5940), False, 'from numpy import sin, cos, pi\n'), ((5989, 6018), 'numpy.cos', 'cos', (['(2 * pi * (x + y + z + t))'], {}), '(2 * pi * (x + y + z + t))\n', (5992, 6018), False, 'from numpy import sin, cos, pi\n'), ((6067, 6096), 'numpy.cos', 'cos', (['(2 * pi * (x + y + z + t))'], {}), '(2 * pi * (x + y + z + t))\n', (6070, 6096), False, 'from numpy import sin, cos, pi\n'), ((6819, 6834), 'numpy.sin', 'sin', (['(2 * pi * z)'], {}), '(2 * pi * z)\n', (6822, 6834), False, 'from numpy import sin, cos, pi\n'), ((7026, 7041), 'numpy.cos', 'cos', (['(2 * pi * z)'], {}), '(2 * pi * z)\n', (7029, 7041), False, 'from numpy import sin, cos, pi\n'), ((7143, 7158), 'numpy.cos', 'cos', (['(2 * pi * x)'], {}), '(2 * pi * x)\n', (7146, 7158), False, 'from numpy import sin, cos, pi\n'), ((7418, 7433), 'numpy.sin', 'sin', (['(2 * pi * z)'], {}), '(2 * pi * z)\n', (7421, 7433), False, 'from numpy import sin, cos, pi\n'), ((7521, 7536), 'numpy.sin', 'sin', (['(2 * pi * z)'], {}), '(2 * pi * z)\n', (7524, 7536), False, 'from numpy import sin, cos, pi\n'), ((7624, 7639), 'numpy.cos', 'cos', (['(2 * pi * z)'], {}), '(2 * pi * z)\n', (7627, 7639), False, 'from numpy import sin, cos, pi\n'), ((7814, 7873), 'objects.CSCG._3d.fields.vector.main._3dCSCG_VectorField', '_3dCSCG_VectorField', (['self.mesh', '(self.fx, self.fy, self.fz)'], {}), '(self.mesh, (self.fx, self.fy, self.fz))\n', (7833, 7873), False, 'from objects.CSCG._3d.fields.vector.main import _3dCSCG_VectorField\n'), ((8628, 8643), 'numpy.sin', 'sin', (['(2 * pi * z)'], {}), '(2 * pi * z)\n', (8631, 8643), False, 'from numpy import sin, cos, pi\n'), ((8835, 8850), 'numpy.cos', 'cos', (['(2 * pi * z)'], {}), '(2 * pi * z)\n', (8838, 8850), False, 'from numpy import sin, cos, pi\n'), ((8952, 8967), 'numpy.cos', 'cos', (['(2 * pi * x)'], {}), '(2 * pi * x)\n', (8955, 8967), False, 'from numpy import sin, cos, pi\n'), ((9219, 9234), 'numpy.sin', 'sin', (['(2 * pi * z)'], {}), '(2 * pi * z)\n', (9222, 9234), False, 'from numpy import sin, cos, pi\n'), ((9318, 9333), 'numpy.sin', 'sin', (['(2 * pi * z)'], {}), '(2 * pi * z)\n', (9321, 9333), False, 'from numpy import sin, cos, pi\n'), ((9417, 9432), 'numpy.cos', 'cos', (['(2 * pi * z)'], {}), '(2 * pi * z)\n', (9420, 9432), False, 'from numpy import sin, cos, pi\n'), ((9607, 9666), 'objects.CSCG._3d.fields.vector.main._3dCSCG_VectorField', '_3dCSCG_VectorField', (['self.mesh', '(self.fx, self.fy, self.fz)'], {}), '(self.mesh, (self.fx, self.fy, self.fz))\n', (9626, 9666), False, 'from objects.CSCG._3d.fields.vector.main import _3dCSCG_VectorField\n'), ((10443, 10458), 'numpy.sin', 'sin', (['(2 * pi * z)'], {}), '(2 * pi * z)\n', (10446, 10458), False, 'from numpy import sin, cos, pi\n'), ((10650, 10665), 'numpy.cos', 'cos', (['(2 * pi * z)'], {}), '(2 * pi * z)\n', (10653, 10665), False, 'from numpy import sin, cos, pi\n'), ((10767, 10782), 'numpy.cos', 'cos', (['(2 * pi * x)'], {}), '(2 * pi * x)\n', (10770, 10782), False, 'from numpy import sin, cos, pi\n'), ((11284, 11343), 'objects.CSCG._3d.fields.vector.main._3dCSCG_VectorField', '_3dCSCG_VectorField', (['self.mesh', '(self.fx, self.fy, self.fz)'], {}), '(self.mesh, (self.fx, self.fy, self.fz))\n', (11303, 11343), False, 'from objects.CSCG._3d.fields.vector.main import _3dCSCG_VectorField\n'), ((12117, 12132), 'numpy.sin', 'sin', (['(2 * pi * z)'], {}), '(2 * pi * z)\n', (12120, 12132), False, 'from numpy import sin, cos, pi\n'), ((12324, 12339), 'numpy.cos', 'cos', (['(2 * pi * z)'], {}), '(2 * pi * z)\n', (12327, 12339), False, 'from numpy import sin, cos, pi\n'), ((12441, 12456), 'numpy.cos', 'cos', (['(2 * pi * x)'], {}), '(2 * pi * x)\n', (12444, 12456), False, 'from numpy import sin, cos, pi\n'), ((12914, 12973), 'objects.CSCG._3d.fields.vector.main._3dCSCG_VectorField', '_3dCSCG_VectorField', (['self.mesh', '(self.fx, self.fy, self.fz)'], {}), '(self.mesh, (self.fx, self.fy, self.fz))\n', (12933, 12973), False, 'from objects.CSCG._3d.fields.vector.main import _3dCSCG_VectorField\n'), ((4317, 4332), 'numpy.sin', 'sin', (['(2 * pi * t)'], {}), '(2 * pi * t)\n', (4320, 4332), False, 'from numpy import sin, cos, pi\n'), ((4570, 4585), 'numpy.cos', 'cos', (['(2 * pi * t)'], {}), '(2 * pi * t)\n', (4573, 4585), False, 'from numpy import sin, cos, pi\n'), ((4825, 4840), 'numpy.cos', 'cos', (['(2 * pi * t)'], {}), '(2 * pi * t)\n', (4828, 4840), False, 'from numpy import sin, cos, pi\n'), ((5075, 5090), 'numpy.sin', 'sin', (['(2 * pi * t)'], {}), '(2 * pi * t)\n', (5078, 5090), False, 'from numpy import sin, cos, pi\n'), ((5331, 5346), 'numpy.sin', 'sin', (['(2 * pi * t)'], {}), '(2 * pi * t)\n', (5334, 5346), False, 'from numpy import sin, cos, pi\n'), ((5582, 5597), 'numpy.cos', 'cos', (['(2 * pi * t)'], {}), '(2 * pi * t)\n', (5585, 5597), False, 'from numpy import sin, cos, pi\n'), ((7400, 7415), 'numpy.sin', 'sin', (['(2 * pi * y)'], {}), '(2 * pi * y)\n', (7403, 7415), False, 'from numpy import sin, cos, pi\n'), ((7503, 7518), 'numpy.cos', 'cos', (['(2 * pi * y)'], {}), '(2 * pi * y)\n', (7506, 7518), False, 'from numpy import sin, cos, pi\n'), ((7606, 7621), 'numpy.sin', 'sin', (['(2 * pi * y)'], {}), '(2 * pi * y)\n', (7609, 7621), False, 'from numpy import sin, cos, pi\n'), ((9201, 9216), 'numpy.sin', 'sin', (['(2 * pi * y)'], {}), '(2 * pi * y)\n', (9204, 9216), False, 'from numpy import sin, cos, pi\n'), ((9300, 9315), 'numpy.cos', 'cos', (['(2 * pi * y)'], {}), '(2 * pi * y)\n', (9303, 9315), False, 'from numpy import sin, cos, pi\n'), ((9399, 9414), 'numpy.sin', 'sin', (['(2 * pi * y)'], {}), '(2 * pi * y)\n', (9402, 9414), False, 'from numpy import sin, cos, pi\n'), ((4493, 4508), 'numpy.sin', 'sin', (['(2 * pi * t)'], {}), '(2 * pi * t)\n', (4496, 4508), False, 'from numpy import sin, cos, pi\n'), ((4752, 4767), 'numpy.sin', 'sin', (['(2 * pi * t)'], {}), '(2 * pi * t)\n', (4755, 4767), False, 'from numpy import sin, cos, pi\n'), ((4999, 5014), 'numpy.cos', 'cos', (['(2 * pi * t)'], {}), '(2 * pi * t)\n', (5002, 5014), False, 'from numpy import sin, cos, pi\n'), ((5258, 5273), 'numpy.cos', 'cos', (['(2 * pi * t)'], {}), '(2 * pi * t)\n', (5261, 5273), False, 'from numpy import sin, cos, pi\n'), ((5415, 5430), 'numpy.sin', 'sin', (['(2 * pi * t)'], {}), '(2 * pi * t)\n', (5418, 5430), False, 'from numpy import sin, cos, pi\n'), ((5673, 5688), 'numpy.sin', 'sin', (['(2 * pi * t)'], {}), '(2 * pi * t)\n', (5676, 5688), False, 'from numpy import sin, cos, pi\n'), ((7382, 7397), 'numpy.cos', 'cos', (['(2 * pi * x)'], {}), '(2 * pi * x)\n', (7385, 7397), False, 'from numpy import sin, cos, pi\n'), ((7485, 7500), 'numpy.sin', 'sin', (['(2 * pi * x)'], {}), '(2 * pi * x)\n', (7488, 7500), False, 'from numpy import sin, cos, pi\n'), ((7588, 7603), 'numpy.sin', 'sin', (['(2 * pi * x)'], {}), '(2 * pi * x)\n', (7591, 7603), False, 'from numpy import sin, cos, pi\n'), ((9183, 9198), 'numpy.cos', 'cos', (['(2 * pi * x)'], {}), '(2 * pi * x)\n', (9186, 9198), False, 'from numpy import sin, cos, pi\n'), ((9282, 9297), 'numpy.sin', 'sin', (['(2 * pi * x)'], {}), '(2 * pi * x)\n', (9285, 9297), False, 'from numpy import sin, cos, pi\n'), ((9381, 9396), 'numpy.sin', 'sin', (['(2 * pi * x)'], {}), '(2 * pi * x)\n', (9384, 9396), False, 'from numpy import sin, cos, pi\n')]
|
import read_data as RD
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
X = RD.read_data()
print('X = ',X.shape)
X_mean = np.reshape(np.sum(X,1)/X.shape[1],[ X.shape[0],1])
X = X-X_mean
print('X_centerred = ',X.shape)
[U,S,V] = np.linalg.svd(X, full_matrices=False)
print('U = ',U.shape)
print('S = ',S.shape)
print('V = ',V.shape)
N = 12#number of eigen images
Eig_im = U[:,0:N]
plt.figure(figsize=(10,10))
for i in range(0,N):
plt.subplot(int(np.sqrt(N)),int(np.ceil(N/int(np.sqrt(N)))),i+1)
im = np.reshape(Eig_im[:,i],[64,64])
plt.imshow(im,cmap=plt.cm.gray, interpolation='none')
plt.title('Eigen Image = '+str(i+1))
plt.savefig('Eigen_Images.png')
plt.savefig('Eigen_Images.tif')
Y = np.matmul(np.transpose(U),X)
print('Y = ',Y.shape)
plt.figure(figsize=(10,10))
Np = 10#Number of projection coefficients to plot
Ni = 4#Number of images
images = ['a','b','c','d']
for i in range(0,Ni):
plt.plot(np.arange(1,Np+1),Y[0:Np,i],label='Image = '+images[i])
plt.xlabel('Eigenvectors',fontsize=20)
plt.xticks(weight = 'bold',fontsize=15)
plt.ylabel('Magnitude of the projection coefficient',fontsize=20)
plt.yticks(weight = 'bold',fontsize=15)
plt.legend(fontsize=20)
plt.savefig('Projection_Coefficients.png')
plt.savefig('Projection_Coefficients.tif')
#Image synthesis
ind = 0#index of the image to synthesize
m = [1, 5, 10, 15, 20, 30]
plt.figure(figsize=(10,15))
for i in range(0,len(m)):
X_hat = np.reshape(np.matmul(U[:,0:m[i]],Y[0:m[i],ind]),[X.shape[0],1])
print(X_hat.shape)
print(X_mean.shape)
X_hat += X_mean
plt.subplot(3,2,i+1)
im = np.reshape(X_hat,[64,64])
plt.imshow(im,cmap=plt.cm.gray, interpolation='none')
plt.title('m = '+str(m[i]),fontsize=20)
plt.xticks(weight = 'bold',fontsize=15)
plt.yticks(weight = 'bold',fontsize=15)
#img_out = Image.fromarray(im.astype(np.uint8))
#img_out.save('Im_reconstruction_'+str(m[i])+'.tif')
plt.savefig('Im_reconstruction.png')
plt.savefig('Im_reconstruction.tif')
|
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.savefig",
"numpy.reshape",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"numpy.arange",
"numpy.sqrt",
"matplotlib.pyplot.xlabel",
"numpy.sum",
"matplotlib.pyplot.figure",
"read_data.read_data",
"matplotlib.pyplot.yticks",
"numpy.matmul",
"numpy.linalg.svd",
"numpy.transpose",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.legend"
] |
[((101, 115), 'read_data.read_data', 'RD.read_data', ([], {}), '()\n', (113, 115), True, 'import read_data as RD\n'), ((253, 290), 'numpy.linalg.svd', 'np.linalg.svd', (['X'], {'full_matrices': '(False)'}), '(X, full_matrices=False)\n', (266, 290), True, 'import numpy as np\n'), ((406, 434), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (416, 434), True, 'import matplotlib.pyplot as plt\n'), ((653, 684), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Eigen_Images.png"""'], {}), "('Eigen_Images.png')\n", (664, 684), True, 'import matplotlib.pyplot as plt\n'), ((685, 716), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Eigen_Images.tif"""'], {}), "('Eigen_Images.tif')\n", (696, 716), True, 'import matplotlib.pyplot as plt\n'), ((773, 801), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (783, 801), True, 'import matplotlib.pyplot as plt\n'), ((990, 1029), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Eigenvectors"""'], {'fontsize': '(20)'}), "('Eigenvectors', fontsize=20)\n", (1000, 1029), True, 'import matplotlib.pyplot as plt\n'), ((1029, 1067), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'weight': '"""bold"""', 'fontsize': '(15)'}), "(weight='bold', fontsize=15)\n", (1039, 1067), True, 'import matplotlib.pyplot as plt\n'), ((1069, 1135), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Magnitude of the projection coefficient"""'], {'fontsize': '(20)'}), "('Magnitude of the projection coefficient', fontsize=20)\n", (1079, 1135), True, 'import matplotlib.pyplot as plt\n'), ((1135, 1173), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'weight': '"""bold"""', 'fontsize': '(15)'}), "(weight='bold', fontsize=15)\n", (1145, 1173), True, 'import matplotlib.pyplot as plt\n'), ((1175, 1198), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(20)'}), '(fontsize=20)\n', (1185, 1198), True, 'import matplotlib.pyplot as plt\n'), ((1199, 1241), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Projection_Coefficients.png"""'], {}), "('Projection_Coefficients.png')\n", (1210, 1241), True, 'import matplotlib.pyplot as plt\n'), ((1242, 1284), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Projection_Coefficients.tif"""'], {}), "('Projection_Coefficients.tif')\n", (1253, 1284), True, 'import matplotlib.pyplot as plt\n'), ((1371, 1399), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 15)'}), '(figsize=(10, 15))\n', (1381, 1399), True, 'import matplotlib.pyplot as plt\n'), ((1891, 1927), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Im_reconstruction.png"""'], {}), "('Im_reconstruction.png')\n", (1902, 1927), True, 'import matplotlib.pyplot as plt\n'), ((1928, 1964), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Im_reconstruction.tif"""'], {}), "('Im_reconstruction.tif')\n", (1939, 1964), True, 'import matplotlib.pyplot as plt\n'), ((527, 561), 'numpy.reshape', 'np.reshape', (['Eig_im[:, i]', '[64, 64]'], {}), '(Eig_im[:, i], [64, 64])\n', (537, 561), True, 'import numpy as np\n'), ((560, 614), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im'], {'cmap': 'plt.cm.gray', 'interpolation': '"""none"""'}), "(im, cmap=plt.cm.gray, interpolation='none')\n", (570, 614), True, 'import matplotlib.pyplot as plt\n'), ((732, 747), 'numpy.transpose', 'np.transpose', (['U'], {}), '(U)\n', (744, 747), True, 'import numpy as np\n'), ((1557, 1581), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(i + 1)'], {}), '(3, 2, i + 1)\n', (1568, 1581), True, 'import matplotlib.pyplot as plt\n'), ((1584, 1611), 'numpy.reshape', 'np.reshape', (['X_hat', '[64, 64]'], {}), '(X_hat, [64, 64])\n', (1594, 1611), True, 'import numpy as np\n'), ((1611, 1665), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im'], {'cmap': 'plt.cm.gray', 'interpolation': '"""none"""'}), "(im, cmap=plt.cm.gray, interpolation='none')\n", (1621, 1665), True, 'import matplotlib.pyplot as plt\n'), ((1707, 1745), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'weight': '"""bold"""', 'fontsize': '(15)'}), "(weight='bold', fontsize=15)\n", (1717, 1745), True, 'import matplotlib.pyplot as plt\n'), ((1748, 1786), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'weight': '"""bold"""', 'fontsize': '(15)'}), "(weight='bold', fontsize=15)\n", (1758, 1786), True, 'import matplotlib.pyplot as plt\n'), ((158, 170), 'numpy.sum', 'np.sum', (['X', '(1)'], {}), '(X, 1)\n', (164, 170), True, 'import numpy as np\n'), ((934, 954), 'numpy.arange', 'np.arange', (['(1)', '(Np + 1)'], {}), '(1, Np + 1)\n', (943, 954), True, 'import numpy as np\n'), ((1445, 1484), 'numpy.matmul', 'np.matmul', (['U[:, 0:m[i]]', 'Y[0:m[i], ind]'], {}), '(U[:, 0:m[i]], Y[0:m[i], ind])\n', (1454, 1484), True, 'import numpy as np\n'), ((472, 482), 'numpy.sqrt', 'np.sqrt', (['N'], {}), '(N)\n', (479, 482), True, 'import numpy as np\n'), ((502, 512), 'numpy.sqrt', 'np.sqrt', (['N'], {}), '(N)\n', (509, 512), True, 'import numpy as np\n')]
|
import math
from numpy import linalg
from scipy import stats
from scipy.spatial import distance
import numpy
def euclidean(p, Q):
return numpy.apply_along_axis(lambda q: linalg.norm(p - q), 0, Q)
def hellinger(p, Q):
factor = 1 / math.sqrt(2)
sqrt_p = numpy.sqrt(p)
return factor * numpy.apply_along_axis(
lambda q: linalg.norm(sqrt_p - numpy.sqrt(q)), 0, Q
)
def jensen_shannon_distance(p, Q):
"""Square root of Jensen-Shannon divergence."""
return numpy.apply_along_axis(lambda q: distance.jensenshannon(p, q), 0, Q)
def k_directed(p, Q):
"""See: <NAME>. "Divergence Measures Based on the Shannon Entropy". 1991."""
return numpy.apply_along_axis(lambda q: stats.entropy(p, (p + q) / 2), 0, Q)
def kullback_leibler(p, Q):
return numpy.apply_along_axis(lambda q: stats.entropy(p, q), 0, Q)
def neyman_chi_square(p, Q):
return numpy.apply_along_axis(lambda q: numpy.sum(numpy.square(p - q) / q), 0, Q)
def pearson_chi_square(p, Q):
return numpy.apply_along_axis(lambda q: numpy.sum(numpy.square(p - q) / p), 0, Q)
def total_variation(p, Q):
return 0.5 * numpy.apply_along_axis(lambda q: linalg.norm(p - q, 1), 0, Q)
|
[
"scipy.stats.entropy",
"numpy.sqrt",
"math.sqrt",
"numpy.square",
"numpy.linalg.norm",
"scipy.spatial.distance.jensenshannon"
] |
[((269, 282), 'numpy.sqrt', 'numpy.sqrt', (['p'], {}), '(p)\n', (279, 282), False, 'import numpy\n'), ((243, 255), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (252, 255), False, 'import math\n'), ((177, 195), 'numpy.linalg.norm', 'linalg.norm', (['(p - q)'], {}), '(p - q)\n', (188, 195), False, 'from numpy import linalg\n'), ((527, 555), 'scipy.spatial.distance.jensenshannon', 'distance.jensenshannon', (['p', 'q'], {}), '(p, q)\n', (549, 555), False, 'from scipy.spatial import distance\n'), ((713, 742), 'scipy.stats.entropy', 'stats.entropy', (['p', '((p + q) / 2)'], {}), '(p, (p + q) / 2)\n', (726, 742), False, 'from scipy import stats\n'), ((824, 843), 'scipy.stats.entropy', 'stats.entropy', (['p', 'q'], {}), '(p, q)\n', (837, 843), False, 'from scipy import stats\n'), ((1165, 1186), 'numpy.linalg.norm', 'linalg.norm', (['(p - q)', '(1)'], {}), '(p - q, 1)\n', (1176, 1186), False, 'from numpy import linalg\n'), ((936, 955), 'numpy.square', 'numpy.square', (['(p - q)'], {}), '(p - q)\n', (948, 955), False, 'import numpy\n'), ((1054, 1073), 'numpy.square', 'numpy.square', (['(p - q)'], {}), '(p - q)\n', (1066, 1073), False, 'import numpy\n'), ((366, 379), 'numpy.sqrt', 'numpy.sqrt', (['q'], {}), '(q)\n', (376, 379), False, 'import numpy\n')]
|
import json
import math
from dataclasses import dataclass
from datetime import timedelta
from enum import Enum
from pathlib import Path
from typing import List, Optional
import numpy as np
from vad.util.time_utils import (
format_timedelta_to_milliseconds,
format_timedelta_to_timecode,
parse_timecode_to_timedelta,
)
class VoiceActivityVersion(Enum):
v01 = "v0.1"
v02 = "v0.2"
v03 = "v0.3"
class VoiceActivityMillisecondsVersion(Enum):
v01 = "v0.1"
v02 = "v0.2"
v03 = "v0.3"
@dataclass
class Activity:
start: timedelta
end: timedelta
@dataclass
class VoiceActivity:
duration: timedelta
activities: List[Activity]
probs_sample_rate: Optional[int]
probs: Optional[List[float]]
@classmethod
def load(cls, path: Path):
with path.open() as file:
voice_activity_data = json.load(file)
return VoiceActivity.from_json(voice_activity_data)
@classmethod
def from_json(cls, voice_activity_data: dict):
version = voice_activity_data["version"]
if version == VoiceActivityVersion.v01.value:
voice_activity = cls(
duration=parse_timecode_to_timedelta(voice_activity_data["duration"]),
activities=[
Activity(
start=parse_timecode_to_timedelta(speech_block["start_time"]),
end=parse_timecode_to_timedelta(speech_block["end_time"]),
)
for speech_block in voice_activity_data["voice_activity"]
],
probs_sample_rate=voice_activity_data.get("probs_sample_rate"),
probs=voice_activity_data.get("probs"),
)
elif version == VoiceActivityVersion.v02.value:
if voice_activity_data["time_format"] == "timecode":
voice_activity = cls(
duration=parse_timecode_to_timedelta(voice_activity_data["duration"]),
activities=[
Activity(
start=parse_timecode_to_timedelta(speech_block["start_time"]),
end=parse_timecode_to_timedelta(speech_block["end_time"]),
)
for speech_block in voice_activity_data["voice_activity"]
],
probs_sample_rate=voice_activity_data.get("probs_sample_rate"),
probs=voice_activity_data.get("probs"),
)
elif voice_activity_data["time_format"] == "millisecond":
voice_activity = cls(
duration=timedelta(milliseconds=voice_activity_data["duration"]),
activities=[
Activity(
start=timedelta(milliseconds=speech_block["start_time"]),
end=timedelta(milliseconds=speech_block["end_time"]),
)
for speech_block in voice_activity_data["voice_activity"]
],
probs_sample_rate=voice_activity_data.get("probs_sample_rate"),
probs=voice_activity_data.get("probs"),
)
else:
raise NotImplementedError
elif version == VoiceActivityVersion.v03.value:
voice_activity = cls(
duration=parse_timecode_to_timedelta(voice_activity_data["duration"]),
activities=[
Activity(
start=parse_timecode_to_timedelta(activity["start"]),
end=parse_timecode_to_timedelta(activity["end"]),
)
for activity in voice_activity_data["activities"]
],
probs_sample_rate=voice_activity_data.get("probs_sample_rate"),
probs=voice_activity_data.get("probs"),
)
else:
raise NotImplementedError
return voice_activity
def save(self, path: Path, version: VoiceActivityVersion = VoiceActivityVersion.v03):
voice_activity_data = self.to_json(version)
with path.open("w") as file:
json.dump(voice_activity_data, file, ensure_ascii=False, indent=4)
def to_json(self, version: VoiceActivityVersion = VoiceActivityVersion.v03):
if version == VoiceActivityVersion.v01:
voice_activity_formatted = {
"version": VoiceActivityVersion.v01.value,
"duration": format_timedelta_to_timecode(self.duration),
"voice_activity": [
{
"start_time": format_timedelta_to_timecode(activity.start),
"end_time": format_timedelta_to_timecode(activity.end),
}
for activity in self.activities
],
"probs_sample_rate": self.probs_sample_rate,
"probs": self.probs,
}
elif version == VoiceActivityVersion.v02:
voice_activity_formatted = {
"version": VoiceActivityVersion.v02.value,
"duration": format_timedelta_to_timecode(self.duration),
"time_format": "timecode",
"voice_activity": [
{
"start_time": format_timedelta_to_timecode(activity.start),
"end_time": format_timedelta_to_timecode(activity.end),
}
for activity in self.activities
],
"probs_sample_rate": self.probs_sample_rate,
"probs": self.probs,
}
elif version == VoiceActivityVersion.v03:
voice_activity_formatted = {
"version": VoiceActivityVersion.v03.value,
"duration": format_timedelta_to_timecode(self.duration),
"activities": [
{
"start": format_timedelta_to_timecode(activity.start),
"end": format_timedelta_to_timecode(activity.end),
}
for activity in self.activities
],
"probs_sample_rate": self.probs_sample_rate,
"probs": self.probs,
}
else:
raise NotImplementedError
return voice_activity_formatted
def to_milliseconds(
self, version: VoiceActivityMillisecondsVersion = VoiceActivityMillisecondsVersion.v03
):
if version == VoiceActivityMillisecondsVersion.v02:
voice_activity_milliseconds = {
"version": version.value,
"duration": format_timedelta_to_milliseconds(self.duration),
"time_format": "millisecond",
"voice_activity": [
{
"start_time": format_timedelta_to_milliseconds(activity.start),
"end_time": format_timedelta_to_milliseconds(activity.end),
}
for activity in self.activities
],
"probs_sample_rate": self.probs_sample_rate,
"probs": self.probs,
}
elif version == VoiceActivityMillisecondsVersion.v03:
voice_activity_milliseconds = {
"version": version.value,
"duration": {"total_milliseconds": format_timedelta_to_milliseconds(self.duration)},
"activities": [
{
"start": {
"total_milliseconds": format_timedelta_to_milliseconds(activity.start)
},
"end": {
"total_milliseconds": format_timedelta_to_milliseconds(activity.end)
},
}
for activity in self.activities
],
"probs_sample_rate": self.probs_sample_rate,
"probs": self.probs,
}
else:
raise NotImplementedError
return voice_activity_milliseconds
@classmethod
def from_milliseconds(cls, voice_activity_data: dict):
version = voice_activity_data["version"] # version of milliseconds format
if version == VoiceActivityMillisecondsVersion.v02.value:
voice_activity = VoiceActivity(
duration=timedelta(milliseconds=voice_activity_data["duration"]),
activities=[
Activity(
start=timedelta(milliseconds=speech_block["start_time"]),
end=timedelta(milliseconds=speech_block["end_time"]),
)
for speech_block in voice_activity_data["voice_activity"]
],
probs_sample_rate=voice_activity_data.get("probs_sample_rate"),
probs=voice_activity_data.get("probs"),
)
elif version == VoiceActivityMillisecondsVersion.v03.value:
voice_activity = VoiceActivity(
duration=timedelta(
milliseconds=voice_activity_data["duration"]["total_milliseconds"]
),
activities=[
Activity(
start=timedelta(milliseconds=segment["start"]["total_milliseconds"]),
end=timedelta(milliseconds=segment["end"]["total_milliseconds"]),
)
for segment in voice_activity_data["activities"]
],
probs_sample_rate=voice_activity_data.get("probs_sample_rate"),
probs=voice_activity_data.get("probs"),
)
else:
raise NotImplementedError
return voice_activity
def to_labels(self, sample_rate: int) -> np.array:
total_samples = int(self.duration.total_seconds() * sample_rate)
labels = np.zeros(total_samples, dtype=np.long)
for activity in self.activities:
start_sample = int(activity.start.total_seconds() * sample_rate)
end_sample = int(activity.end.total_seconds() * sample_rate)
labels[start_sample:end_sample] = 1
return labels
|
[
"vad.util.time_utils.parse_timecode_to_timedelta",
"vad.util.time_utils.format_timedelta_to_milliseconds",
"numpy.zeros",
"json.load",
"datetime.timedelta",
"json.dump",
"vad.util.time_utils.format_timedelta_to_timecode"
] |
[((10019, 10057), 'numpy.zeros', 'np.zeros', (['total_samples'], {'dtype': 'np.long'}), '(total_samples, dtype=np.long)\n', (10027, 10057), True, 'import numpy as np\n'), ((863, 878), 'json.load', 'json.load', (['file'], {}), '(file)\n', (872, 878), False, 'import json\n'), ((4223, 4289), 'json.dump', 'json.dump', (['voice_activity_data', 'file'], {'ensure_ascii': '(False)', 'indent': '(4)'}), '(voice_activity_data, file, ensure_ascii=False, indent=4)\n', (4232, 4289), False, 'import json\n'), ((4548, 4591), 'vad.util.time_utils.format_timedelta_to_timecode', 'format_timedelta_to_timecode', (['self.duration'], {}), '(self.duration)\n', (4576, 4591), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((6743, 6790), 'vad.util.time_utils.format_timedelta_to_milliseconds', 'format_timedelta_to_milliseconds', (['self.duration'], {}), '(self.duration)\n', (6775, 6790), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((1170, 1230), 'vad.util.time_utils.parse_timecode_to_timedelta', 'parse_timecode_to_timedelta', (["voice_activity_data['duration']"], {}), "(voice_activity_data['duration'])\n", (1197, 1230), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((5198, 5241), 'vad.util.time_utils.format_timedelta_to_timecode', 'format_timedelta_to_timecode', (['self.duration'], {}), '(self.duration)\n', (5226, 5241), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((8489, 8544), 'datetime.timedelta', 'timedelta', ([], {'milliseconds': "voice_activity_data['duration']"}), "(milliseconds=voice_activity_data['duration'])\n", (8498, 8544), False, 'from datetime import timedelta\n'), ((4689, 4733), 'vad.util.time_utils.format_timedelta_to_timecode', 'format_timedelta_to_timecode', (['activity.start'], {}), '(activity.start)\n', (4717, 4733), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((4771, 4813), 'vad.util.time_utils.format_timedelta_to_timecode', 'format_timedelta_to_timecode', (['activity.end'], {}), '(activity.end)\n', (4799, 4813), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((5891, 5934), 'vad.util.time_utils.format_timedelta_to_timecode', 'format_timedelta_to_timecode', (['self.duration'], {}), '(self.duration)\n', (5919, 5934), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((6934, 6982), 'vad.util.time_utils.format_timedelta_to_milliseconds', 'format_timedelta_to_milliseconds', (['activity.start'], {}), '(activity.start)\n', (6966, 6982), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((7020, 7066), 'vad.util.time_utils.format_timedelta_to_milliseconds', 'format_timedelta_to_milliseconds', (['activity.end'], {}), '(activity.end)\n', (7052, 7066), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((7472, 7519), 'vad.util.time_utils.format_timedelta_to_milliseconds', 'format_timedelta_to_milliseconds', (['self.duration'], {}), '(self.duration)\n', (7504, 7519), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((9171, 9248), 'datetime.timedelta', 'timedelta', ([], {'milliseconds': "voice_activity_data['duration']['total_milliseconds']"}), "(milliseconds=voice_activity_data['duration']['total_milliseconds'])\n", (9180, 9248), False, 'from datetime import timedelta\n'), ((1918, 1978), 'vad.util.time_utils.parse_timecode_to_timedelta', 'parse_timecode_to_timedelta', (["voice_activity_data['duration']"], {}), "(voice_activity_data['duration'])\n", (1945, 1978), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((3415, 3475), 'vad.util.time_utils.parse_timecode_to_timedelta', 'parse_timecode_to_timedelta', (["voice_activity_data['duration']"], {}), "(voice_activity_data['duration'])\n", (3442, 3475), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((5382, 5426), 'vad.util.time_utils.format_timedelta_to_timecode', 'format_timedelta_to_timecode', (['activity.start'], {}), '(activity.start)\n', (5410, 5426), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((5464, 5506), 'vad.util.time_utils.format_timedelta_to_timecode', 'format_timedelta_to_timecode', (['activity.end'], {}), '(activity.end)\n', (5492, 5506), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((1321, 1376), 'vad.util.time_utils.parse_timecode_to_timedelta', 'parse_timecode_to_timedelta', (["speech_block['start_time']"], {}), "(speech_block['start_time'])\n", (1348, 1376), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((1406, 1459), 'vad.util.time_utils.parse_timecode_to_timedelta', 'parse_timecode_to_timedelta', (["speech_block['end_time']"], {}), "(speech_block['end_time'])\n", (1433, 1459), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((2655, 2710), 'datetime.timedelta', 'timedelta', ([], {'milliseconds': "voice_activity_data['duration']"}), "(milliseconds=voice_activity_data['duration'])\n", (2664, 2710), False, 'from datetime import timedelta\n'), ((6023, 6067), 'vad.util.time_utils.format_timedelta_to_timecode', 'format_timedelta_to_timecode', (['activity.start'], {}), '(activity.start)\n', (6051, 6067), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((6100, 6142), 'vad.util.time_utils.format_timedelta_to_timecode', 'format_timedelta_to_timecode', (['activity.end'], {}), '(activity.end)\n', (6128, 6142), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((7661, 7709), 'vad.util.time_utils.format_timedelta_to_milliseconds', 'format_timedelta_to_milliseconds', (['activity.start'], {}), '(activity.start)\n', (7693, 7709), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((7820, 7866), 'vad.util.time_utils.format_timedelta_to_milliseconds', 'format_timedelta_to_milliseconds', (['activity.end'], {}), '(activity.end)\n', (7852, 7866), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((8635, 8685), 'datetime.timedelta', 'timedelta', ([], {'milliseconds': "speech_block['start_time']"}), "(milliseconds=speech_block['start_time'])\n", (8644, 8685), False, 'from datetime import timedelta\n'), ((8715, 8763), 'datetime.timedelta', 'timedelta', ([], {'milliseconds': "speech_block['end_time']"}), "(milliseconds=speech_block['end_time'])\n", (8724, 8763), False, 'from datetime import timedelta\n'), ((9377, 9439), 'datetime.timedelta', 'timedelta', ([], {'milliseconds': "segment['start']['total_milliseconds']"}), "(milliseconds=segment['start']['total_milliseconds'])\n", (9386, 9439), False, 'from datetime import timedelta\n'), ((9469, 9529), 'datetime.timedelta', 'timedelta', ([], {'milliseconds': "segment['end']['total_milliseconds']"}), "(milliseconds=segment['end']['total_milliseconds'])\n", (9478, 9529), False, 'from datetime import timedelta\n'), ((2081, 2136), 'vad.util.time_utils.parse_timecode_to_timedelta', 'parse_timecode_to_timedelta', (["speech_block['start_time']"], {}), "(speech_block['start_time'])\n", (2108, 2136), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((2170, 2223), 'vad.util.time_utils.parse_timecode_to_timedelta', 'parse_timecode_to_timedelta', (["speech_block['end_time']"], {}), "(speech_block['end_time'])\n", (2197, 2223), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((3566, 3612), 'vad.util.time_utils.parse_timecode_to_timedelta', 'parse_timecode_to_timedelta', (["activity['start']"], {}), "(activity['start'])\n", (3593, 3612), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((3642, 3686), 'vad.util.time_utils.parse_timecode_to_timedelta', 'parse_timecode_to_timedelta', (["activity['end']"], {}), "(activity['end'])\n", (3669, 3686), False, 'from vad.util.time_utils import format_timedelta_to_milliseconds, format_timedelta_to_timecode, parse_timecode_to_timedelta\n'), ((2813, 2863), 'datetime.timedelta', 'timedelta', ([], {'milliseconds': "speech_block['start_time']"}), "(milliseconds=speech_block['start_time'])\n", (2822, 2863), False, 'from datetime import timedelta\n'), ((2897, 2945), 'datetime.timedelta', 'timedelta', ([], {'milliseconds': "speech_block['end_time']"}), "(milliseconds=speech_block['end_time'])\n", (2906, 2945), False, 'from datetime import timedelta\n')]
|
# -*- coding: utf-8 -*-
"""
Created on 2017-4-25
@author: cheng.li
"""
import datetime as dt
import numpy as np
from sklearn.linear_model import LinearRegression
from alphamind.data.neutralize import neutralize
def benchmark_neutralize(n_samples: int, n_features: int, n_loops: int) -> None:
print("-" * 60)
print("Starting least square fitting benchmarking")
print("Parameters(n_samples: {0}, n_features: {1}, n_loops: {2})".format(n_samples, n_features,
n_loops))
y = np.random.randn(n_samples, 5)
x = np.random.randn(n_samples, n_features)
start = dt.datetime.now()
for _ in range(n_loops):
calc_res = neutralize(x, y)
impl_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Implemented model', impl_model_time))
start = dt.datetime.now()
for _ in range(n_loops):
benchmark_model = LinearRegression(fit_intercept=False)
benchmark_model.fit(x, y)
exp_res = y - x @ benchmark_model.coef_.T
benchmark_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Benchmark model', benchmark_model_time))
np.testing.assert_array_almost_equal(calc_res, exp_res)
def benchmark_neutralize_with_groups(n_samples: int, n_features: int, n_loops: int,
n_groups: int) -> None:
print("-" * 60)
print("Starting least square fitting with group benchmarking")
print(
"Parameters(n_samples: {0}, n_features: {1}, n_loops: {2}, n_groups: {3})".format(n_samples,
n_features,
n_loops,
n_groups))
y = np.random.randn(n_samples, 5)
x = np.random.randn(n_samples, n_features)
groups = np.random.randint(n_groups, size=n_samples)
start = dt.datetime.now()
for _ in range(n_loops):
_ = neutralize(x, y, groups)
impl_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Implemented model', impl_model_time))
start = dt.datetime.now()
model = LinearRegression(fit_intercept=False)
for _ in range(n_loops):
for i in range(n_groups):
curr_x = x[groups == i]
curr_y = y[groups == i]
model.fit(curr_x, curr_y)
_ = curr_y - curr_x @ model.coef_.T
benchmark_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Benchmark model', benchmark_model_time))
if __name__ == '__main__':
benchmark_neutralize(3000, 10, 1000)
benchmark_neutralize_with_groups(3000, 10, 1000, 30)
|
[
"numpy.testing.assert_array_almost_equal",
"alphamind.data.neutralize.neutralize",
"datetime.datetime.now",
"numpy.random.randint",
"numpy.random.randn",
"sklearn.linear_model.LinearRegression"
] |
[((591, 620), 'numpy.random.randn', 'np.random.randn', (['n_samples', '(5)'], {}), '(n_samples, 5)\n', (606, 620), True, 'import numpy as np\n'), ((630, 668), 'numpy.random.randn', 'np.random.randn', (['n_samples', 'n_features'], {}), '(n_samples, n_features)\n', (645, 668), True, 'import numpy as np\n'), ((684, 701), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (699, 701), True, 'import datetime as dt\n'), ((907, 924), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (922, 924), True, 'import datetime as dt\n'), ((1244, 1299), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['calc_res', 'exp_res'], {}), '(calc_res, exp_res)\n', (1280, 1299), True, 'import numpy as np\n'), ((1968, 1997), 'numpy.random.randn', 'np.random.randn', (['n_samples', '(5)'], {}), '(n_samples, 5)\n', (1983, 1997), True, 'import numpy as np\n'), ((2007, 2045), 'numpy.random.randn', 'np.random.randn', (['n_samples', 'n_features'], {}), '(n_samples, n_features)\n', (2022, 2045), True, 'import numpy as np\n'), ((2060, 2103), 'numpy.random.randint', 'np.random.randint', (['n_groups'], {'size': 'n_samples'}), '(n_groups, size=n_samples)\n', (2077, 2103), True, 'import numpy as np\n'), ((2119, 2136), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (2134, 2136), True, 'import datetime as dt\n'), ((2343, 2360), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (2358, 2360), True, 'import datetime as dt\n'), ((2376, 2413), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {'fit_intercept': '(False)'}), '(fit_intercept=False)\n', (2392, 2413), False, 'from sklearn.linear_model import LinearRegression\n'), ((752, 768), 'alphamind.data.neutralize.neutralize', 'neutralize', (['x', 'y'], {}), '(x, y)\n', (762, 768), False, 'from alphamind.data.neutralize import neutralize\n'), ((792, 809), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (807, 809), True, 'import datetime as dt\n'), ((982, 1019), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {'fit_intercept': '(False)'}), '(fit_intercept=False)\n', (998, 1019), False, 'from sklearn.linear_model import LinearRegression\n'), ((1134, 1151), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (1149, 1151), True, 'import datetime as dt\n'), ((2180, 2204), 'alphamind.data.neutralize.neutralize', 'neutralize', (['x', 'y', 'groups'], {}), '(x, y, groups)\n', (2190, 2204), False, 'from alphamind.data.neutralize import neutralize\n'), ((2228, 2245), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (2243, 2245), True, 'import datetime as dt\n'), ((2669, 2686), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (2684, 2686), True, 'import datetime as dt\n')]
|
# coding=utf-8
# Copyright 2020 The Tensor2Robot Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as python3
"""Functions for converting env episode data to tfrecords of transitions."""
import collections
import gin
import numpy as np
from PIL import Image
import six
from six.moves import range
import tensorflow.compat.v1 as tf
_bytes_feature = (
lambda v: tf.train.Feature(bytes_list=tf.train.BytesList(value=v)))
_int64_feature = (
lambda v: tf.train.Feature(int64_list=tf.train.Int64List(value=v)))
_float_feature = (
lambda v: tf.train.Feature(float_list=tf.train.FloatList(value=v)))
_IMAGE_KEY_PREFIX = 'image'
@gin.configurable
def make_fixed_length(
input_list,
fixed_length,
always_include_endpoints=True,
randomized=True):
"""Create a fixed length list by sampling entries from input_list.
Args:
input_list: The original list we sample entries from.
fixed_length: An integer: the desired length of the output list.
always_include_endpoints: If True, always include the first and last entries
of input_list in the output.
randomized: If True, select entries from input_list by random sampling with
replacement. If False, select entries from input_list deterministically.
Returns:
A list of length fixed_length containing sampled entries of input_list.
"""
original_length = len(input_list)
if original_length <= 2:
return None
if not randomized:
indices = np.sort(np.mod(np.arange(fixed_length), original_length))
return [input_list[i] for i in indices]
if always_include_endpoints:
# Always include entries 0 and N-1.
endpoint_indices = np.array([0, original_length - 1])
# The remaining (fixed_length-2) frames are sampled with replacement
# from entries [1, N-1) of input_list.
other_indices = 1 + np.random.choice(
original_length - 2, fixed_length-2, replace=True)
indices = np.concatenate(
(endpoint_indices, other_indices),
axis=0)
else:
indices = np.random.choice(
original_length, fixed_length, replace=True)
indices = np.sort(indices)
return [input_list[i] for i in indices]
@gin.configurable
def episode_to_transitions_reacher(episode_data, is_demo=False):
"""Converts reacher env data to transition examples."""
transitions = []
for i, transition in enumerate(episode_data):
del i
feature_dict = {}
(obs_t, action, reward, obs_tp1, done, debug) = transition
del debug
feature_dict['pose_t'] = _float_feature(obs_t)
feature_dict['pose_tp1'] = _float_feature(obs_tp1)
feature_dict['action'] = _float_feature(action)
feature_dict['reward'] = _float_feature([reward])
feature_dict['done'] = _int64_feature([int(done)])
feature_dict['is_demo'] = _int64_feature([int(is_demo)])
example = tf.train.Example(features=tf.train.Features(feature=feature_dict))
transitions.append(example)
return transitions
@gin.configurable
def episode_to_transitions_metareacher(episode_data):
"""Converts metareacher env data to transition examples."""
context_features = {}
feature_lists = collections.defaultdict(list)
context_features['is_demo'] = _int64_feature(
[int(episode_data[0][-1]['is_demo'])])
context_features['target_idx'] = _int64_feature(
[episode_data[0][-1]['target_idx']])
for i, transition in enumerate(episode_data):
del i
(obs_t, action, reward, obs_tp1, done, debug) = transition
del debug
feature_lists['pose_t'].append(_float_feature(obs_t))
feature_lists['pose_tp1'].append(_float_feature(obs_tp1))
feature_lists['action'].append(_float_feature(action))
feature_lists['reward'].append(_float_feature([reward]))
feature_lists['done'].append(_int64_feature([int(done)]))
tf_feature_lists = {}
for key in feature_lists:
tf_feature_lists[key] = tf.train.FeatureList(feature=feature_lists[key])
return [tf.train.SequenceExample(
context=tf.train.Features(feature=context_features),
feature_lists=tf.train.FeatureLists(feature_list=tf_feature_lists))]
|
[
"tensorflow.compat.v1.train.Features",
"tensorflow.compat.v1.train.FloatList",
"numpy.random.choice",
"numpy.sort",
"numpy.array",
"tensorflow.compat.v1.train.BytesList",
"collections.defaultdict",
"tensorflow.compat.v1.train.FeatureList",
"numpy.concatenate",
"tensorflow.compat.v1.train.Int64List",
"tensorflow.compat.v1.train.FeatureLists",
"numpy.arange"
] |
[((2614, 2630), 'numpy.sort', 'np.sort', (['indices'], {}), '(indices)\n', (2621, 2630), True, 'import numpy as np\n'), ((3634, 3663), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (3657, 3663), False, 'import collections\n'), ((2168, 2202), 'numpy.array', 'np.array', (['[0, original_length - 1]'], {}), '([0, original_length - 1])\n', (2176, 2202), True, 'import numpy as np\n'), ((2434, 2491), 'numpy.concatenate', 'np.concatenate', (['(endpoint_indices, other_indices)'], {'axis': '(0)'}), '((endpoint_indices, other_indices), axis=0)\n', (2448, 2491), True, 'import numpy as np\n'), ((2531, 2592), 'numpy.random.choice', 'np.random.choice', (['original_length', 'fixed_length'], {'replace': '(True)'}), '(original_length, fixed_length, replace=True)\n', (2547, 2592), True, 'import numpy as np\n'), ((4371, 4419), 'tensorflow.compat.v1.train.FeatureList', 'tf.train.FeatureList', ([], {'feature': 'feature_lists[key]'}), '(feature=feature_lists[key])\n', (4391, 4419), True, 'import tensorflow.compat.v1 as tf\n'), ((910, 937), 'tensorflow.compat.v1.train.BytesList', 'tf.train.BytesList', ([], {'value': 'v'}), '(value=v)\n', (928, 937), True, 'import tensorflow.compat.v1 as tf\n'), ((1001, 1028), 'tensorflow.compat.v1.train.Int64List', 'tf.train.Int64List', ([], {'value': 'v'}), '(value=v)\n', (1019, 1028), True, 'import tensorflow.compat.v1 as tf\n'), ((1092, 1119), 'tensorflow.compat.v1.train.FloatList', 'tf.train.FloatList', ([], {'value': 'v'}), '(value=v)\n', (1110, 1119), True, 'import tensorflow.compat.v1 as tf\n'), ((2343, 2412), 'numpy.random.choice', 'np.random.choice', (['(original_length - 2)', '(fixed_length - 2)'], {'replace': '(True)'}), '(original_length - 2, fixed_length - 2, replace=True)\n', (2359, 2412), True, 'import numpy as np\n'), ((1987, 2010), 'numpy.arange', 'np.arange', (['fixed_length'], {}), '(fixed_length)\n', (1996, 2010), True, 'import numpy as np\n'), ((3362, 3401), 'tensorflow.compat.v1.train.Features', 'tf.train.Features', ([], {'feature': 'feature_dict'}), '(feature=feature_dict)\n', (3379, 3401), True, 'import tensorflow.compat.v1 as tf\n'), ((4471, 4514), 'tensorflow.compat.v1.train.Features', 'tf.train.Features', ([], {'feature': 'context_features'}), '(feature=context_features)\n', (4488, 4514), True, 'import tensorflow.compat.v1 as tf\n'), ((4536, 4588), 'tensorflow.compat.v1.train.FeatureLists', 'tf.train.FeatureLists', ([], {'feature_list': 'tf_feature_lists'}), '(feature_list=tf_feature_lists)\n', (4557, 4588), True, 'import tensorflow.compat.v1 as tf\n')]
|
import base64
import datetime
import io
import json
import os
import requests
from collections import namedtuple
from urllib.parse import urlparse
import faust
import numpy as np
import keras_preprocessing.image as keras_img
from avro import schema
from confluent_kafka import avro
from confluent_kafka.avro import AvroProducer
from confluent_kafka.avro.cached_schema_registry_client import CachedSchemaRegistryClient
from confluent_kafka.schema_registry import SchemaRegistryClient
from confluent_kafka.schema_registry.avro import AvroSerializer
from biovolume import calc_biovolume
from blob import Blob, BlobConfig
config_path = os.environ.get('IFCB_STREAM_APP_CONFIG', 'config.json')
with open(config_path) as config_file:
config = json.load(config_file)
Stats = namedtuple(
'Stats',
['time', 'ifcb_id', 'roi', 'name', 'classifier', 'prob', 'classification_time', 'biovolume', 'carbon', 'hab']
)
ClassifierStats = namedtuple(
'ClassifierStats',
['sample_name', 'prob', 'classifier', 'classification_time']
)
schema_config = {
'url': config['schema.registry.url'],
'ssl.ca.location': None
}
# need to use CachedSchemaRegistryClient to get schema
# - need to copy config because it is consumed when used in CachedSchemaRegistryClient
schema_config_copy = schema_config.copy()
cached_schema_client = CachedSchemaRegistryClient(schema_config)
key_schema = str(cached_schema_client.get_latest_schema('ifcb-stats-key')[1])
value_schema = str(cached_schema_client.get_latest_schema('ifcb-stats-value')[1])
key_schema = avro.loads(key_schema)
value_schema = avro.loads(value_schema)
producer = AvroProducer({
'bootstrap.servers': config['bootstrap.servers'],
'schema.registry.url': config['schema.registry.url']
},
default_key_schema=key_schema,
default_value_schema=value_schema
)
app = faust.App(
config['app_name'],
broker=config['broker'],
topic_partitions=config['topic_partitions'],
store='rocksdb://',
consumer_auto_offset_reset='earliest',
version=1
)
image_topic = app.topic(config['image_topic'])
stats_topic = app.topic(config['stats_topic'])
classifier_stats_table = app.Table('ifcb-classifier-stats', default=ClassifierStats)
diatoms = config['diatoms']
class_names = config['class_names']
hab_species = config['hab_species']
def publish_stats(feature_key, image, classifier_stats, blob_config=BlobConfig()):
"""Calculate biovolume, carbon, hab, and publish to Kafka"""
# calculate biovolume
# - scale biovolume for 3d (from ifcb-analysis)
blob = Blob(image, blob_config)
biovolume = calc_biovolume(blob)
mu = 1/3.4
biovolume = biovolume * mu ** 3
carbon = calc_carbon(classifier_stats[0], biovolume)
hab = classifier_stats[0] in hab_species
time, ifcb_id, roi = feature_key.split('_')
roi = int(roi)
timestamp = int(datetime.datetime.strptime(time[1:], '%Y%m%dT%H%M%S').timestamp())
stats = Stats(
timestamp,
ifcb_id,
roi,
classifier_stats[0],
classifier_stats[2],
classifier_stats[1],
classifier_stats[3],
biovolume,
carbon,
hab
)
# send to topic with Avro schema
producer.poll(0)
producer.produce(
topic=config['stats_topic'],
key={
'pid': f"{time}_{ifcb_id}",
'roi': int(roi)
},
value=stats._asdict()
)
producer.flush()
@app.agent(image_topic)
async def classify(images, url=config['tensorflow_url'], target_size=(224, 224)):
async for image in images:
# decode binary blob to png file then resize and normalize
image_str = base64.b64decode(image['image'])
image_file = io.BytesIO(image_str)
img = keras_img.img_to_array(
keras_img.load_img(image_file, target_size=target_size)
)
img /= 255
# create payload and send to TF RESTful API
headers = {"content-type": "application/json"}
data = json.dumps({'instances': [img.tolist()]})
result = requests.post(url, headers=headers, data=data)
# save the probabilities for each class (1d ndarray)
probs = result.json()['predictions'][0][:]
# feature_key is roi
time = datetime.datetime.fromtimestamp(image['datetime'])
feature_key = f"{time:D%Y%m%dT%H%M%S}_{image['ifcb_id']}_{image['roi']:05}"
print(f'processing {feature_key}')
# update table if current prob is greater than what is already in the table
prob = np.nanmax(probs)
if feature_key not in classifier_stats_table or prob > classifier_stats_table[feature_key].prob:
name = class_names[np.argmax(probs)]
classifier, version = get_classifier(url)
classifier_version = f'{classifier}:{version}'
classifier_stats_table[feature_key] = ClassifierStats(
name,
prob,
classifier_version,
int(datetime.datetime.utcnow().timestamp())
)
# send
publish_stats(feature_key, image_str, classifier_stats_table[feature_key])
def get_classifier(url):
"""Given TF style url, return name and version"""
parse_results = urlparse(url)
_, version, _, name_raw = parse_results.path.split('/')
name = name_raw.split(':')[0]
return (name, version)
def calc_carbon(english_name, scaled_biovolume, diatom_list=diatoms):
"""Given volume in u3/cell return carbon in pg C/cell.
$log_10(C) = log(a) + b \cdot log_10(V)$
"""
if english_name in diatom_list:
carbon = 10**(-0.665 + 0.939*np.log10(scaled_biovolume))
else:
carbon = 10**(-0.993 + 0.881*np.log10(scaled_biovolume))
return carbon
if __name__ == '__main__':
app.main()
|
[
"requests.post",
"numpy.log10",
"confluent_kafka.avro.loads",
"faust.App",
"keras_preprocessing.image.load_img",
"io.BytesIO",
"confluent_kafka.avro.cached_schema_registry_client.CachedSchemaRegistryClient",
"blob.BlobConfig",
"blob.Blob",
"numpy.nanmax",
"collections.namedtuple",
"biovolume.calc_biovolume",
"confluent_kafka.avro.AvroProducer",
"numpy.argmax",
"datetime.datetime.fromtimestamp",
"urllib.parse.urlparse",
"datetime.datetime.utcnow",
"datetime.datetime.strptime",
"os.environ.get",
"base64.b64decode",
"json.load"
] |
[((637, 692), 'os.environ.get', 'os.environ.get', (['"""IFCB_STREAM_APP_CONFIG"""', '"""config.json"""'], {}), "('IFCB_STREAM_APP_CONFIG', 'config.json')\n", (651, 692), False, 'import os\n'), ((777, 911), 'collections.namedtuple', 'namedtuple', (['"""Stats"""', "['time', 'ifcb_id', 'roi', 'name', 'classifier', 'prob',\n 'classification_time', 'biovolume', 'carbon', 'hab']"], {}), "('Stats', ['time', 'ifcb_id', 'roi', 'name', 'classifier', 'prob',\n 'classification_time', 'biovolume', 'carbon', 'hab'])\n", (787, 911), False, 'from collections import namedtuple\n'), ((936, 1031), 'collections.namedtuple', 'namedtuple', (['"""ClassifierStats"""', "['sample_name', 'prob', 'classifier', 'classification_time']"], {}), "('ClassifierStats', ['sample_name', 'prob', 'classifier',\n 'classification_time'])\n", (946, 1031), False, 'from collections import namedtuple\n'), ((1336, 1377), 'confluent_kafka.avro.cached_schema_registry_client.CachedSchemaRegistryClient', 'CachedSchemaRegistryClient', (['schema_config'], {}), '(schema_config)\n', (1362, 1377), False, 'from confluent_kafka.avro.cached_schema_registry_client import CachedSchemaRegistryClient\n'), ((1552, 1574), 'confluent_kafka.avro.loads', 'avro.loads', (['key_schema'], {}), '(key_schema)\n', (1562, 1574), False, 'from confluent_kafka import avro\n'), ((1590, 1614), 'confluent_kafka.avro.loads', 'avro.loads', (['value_schema'], {}), '(value_schema)\n', (1600, 1614), False, 'from confluent_kafka import avro\n'), ((1626, 1818), 'confluent_kafka.avro.AvroProducer', 'AvroProducer', (["{'bootstrap.servers': config['bootstrap.servers'], 'schema.registry.url':\n config['schema.registry.url']}"], {'default_key_schema': 'key_schema', 'default_value_schema': 'value_schema'}), "({'bootstrap.servers': config['bootstrap.servers'],\n 'schema.registry.url': config['schema.registry.url']},\n default_key_schema=key_schema, default_value_schema=value_schema)\n", (1638, 1818), False, 'from confluent_kafka.avro import AvroProducer\n'), ((1842, 2020), 'faust.App', 'faust.App', (["config['app_name']"], {'broker': "config['broker']", 'topic_partitions': "config['topic_partitions']", 'store': '"""rocksdb://"""', 'consumer_auto_offset_reset': '"""earliest"""', 'version': '(1)'}), "(config['app_name'], broker=config['broker'], topic_partitions=\n config['topic_partitions'], store='rocksdb://',\n consumer_auto_offset_reset='earliest', version=1)\n", (1851, 2020), False, 'import faust\n'), ((745, 767), 'json.load', 'json.load', (['config_file'], {}), '(config_file)\n', (754, 767), False, 'import json\n'), ((2389, 2401), 'blob.BlobConfig', 'BlobConfig', ([], {}), '()\n', (2399, 2401), False, 'from blob import Blob, BlobConfig\n'), ((2558, 2582), 'blob.Blob', 'Blob', (['image', 'blob_config'], {}), '(image, blob_config)\n', (2562, 2582), False, 'from blob import Blob, BlobConfig\n'), ((2599, 2619), 'biovolume.calc_biovolume', 'calc_biovolume', (['blob'], {}), '(blob)\n', (2613, 2619), False, 'from biovolume import calc_biovolume\n'), ((5241, 5254), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (5249, 5254), False, 'from urllib.parse import urlparse\n'), ((3659, 3691), 'base64.b64decode', 'base64.b64decode', (["image['image']"], {}), "(image['image'])\n", (3675, 3691), False, 'import base64\n'), ((3713, 3734), 'io.BytesIO', 'io.BytesIO', (['image_str'], {}), '(image_str)\n', (3723, 3734), False, 'import io\n'), ((4052, 4098), 'requests.post', 'requests.post', (['url'], {'headers': 'headers', 'data': 'data'}), '(url, headers=headers, data=data)\n', (4065, 4098), False, 'import requests\n'), ((4257, 4307), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (["image['datetime']"], {}), "(image['datetime'])\n", (4288, 4307), False, 'import datetime\n'), ((4536, 4552), 'numpy.nanmax', 'np.nanmax', (['probs'], {}), '(probs)\n', (4545, 4552), True, 'import numpy as np\n'), ((3785, 3840), 'keras_preprocessing.image.load_img', 'keras_img.load_img', (['image_file'], {'target_size': 'target_size'}), '(image_file, target_size=target_size)\n', (3803, 3840), True, 'import keras_preprocessing.image as keras_img\n'), ((2861, 2914), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['time[1:]', '"""%Y%m%dT%H%M%S"""'], {}), "(time[1:], '%Y%m%dT%H%M%S')\n", (2887, 2914), False, 'import datetime\n'), ((4689, 4705), 'numpy.argmax', 'np.argmax', (['probs'], {}), '(probs)\n', (4698, 4705), True, 'import numpy as np\n'), ((5635, 5661), 'numpy.log10', 'np.log10', (['scaled_biovolume'], {}), '(scaled_biovolume)\n', (5643, 5661), True, 'import numpy as np\n'), ((5710, 5736), 'numpy.log10', 'np.log10', (['scaled_biovolume'], {}), '(scaled_biovolume)\n', (5718, 5736), True, 'import numpy as np\n'), ((4987, 5013), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (5011, 5013), False, 'import datetime\n')]
|
#ecoding:utf-8
import DatasetLoader
import RICNNModel
import tensorflow as tf
import sys
import numpy as np
import regularization as re
import os
import trainLoader
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
TRAIN_FILENAME = '/media/liuqi/Files/dataset/test_mnist_ricnn_raw_100.h5'
TEST_FILENAME = '/media/liuqi/Files/dataset/test_mnist_ricnn_raw.h5'
TRAIN_LABELS = '/media/liuqi/Files/dataset/rotate_100_simple.h5'
TEST_LABELS = '/home/liuqi/Desktop/mnist_rotation_new/mnist_all_rotation_normalized_float_test.amat'
LOADED_SIZE = 28
DESIRED_SIZE = 227
# model constants
NUMBER_OF_CLASSES = 10
NUMBER_OF_FILTERS = 40
NUMBER_OF_FC_FEATURES = 5120
NUMBER_OF_TRANSFORMATIONS = 8
# optimization constants
BATCH_SIZE = 64
TEST_CHUNK_SIZE = 100
ADAM_LEARNING_RATE = 1e-5
PRINTING_INTERVAL = 10
# set seeds
np.random.seed(100)
tf.set_random_seed(100)
x = tf.placeholder(tf.float32, shape=[None,
DESIRED_SIZE,
DESIRED_SIZE,
1,
NUMBER_OF_TRANSFORMATIONS])
y_gt = tf.placeholder(tf.float32, shape=[None, NUMBER_OF_CLASSES])
keep_prob = tf.placeholder(tf.float32)
logits, raw_feature, regularization_loss = RICNNModel.define_model(x,
keep_prob,
NUMBER_OF_CLASSES,
NUMBER_OF_FILTERS,
NUMBER_OF_FC_FEATURES)
with tf.name_scope('loss'):
with tf.name_scope('re_loss'):
re_loss = re.regu_constraint(raw_feature, logits)
with tf.name_scope('sotfmax_loss'):
sotfmax_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_gt))
with tf.name_scope('total_loss'):
total_loss = sotfmax_loss
train_step = tf.train.AdamOptimizer(ADAM_LEARNING_RATE).minimize(total_loss)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(y_gt, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
session = tf.Session()
session.run(tf.initialize_all_variables())
train_data_loader = trainLoader.DataLoader(TRAIN_FILENAME,
TRAIN_LABELS,
NUMBER_OF_CLASSES,
NUMBER_OF_TRANSFORMATIONS,
LOADED_SIZE,
DESIRED_SIZE)
test_data_loader = DatasetLoader.DataLoader(TEST_FILENAME,
TEST_LABELS,
NUMBER_OF_CLASSES,
NUMBER_OF_TRANSFORMATIONS,
LOADED_SIZE,
DESIRED_SIZE)
test_size = test_data_loader.all()[1].shape[0]
assert test_size % TEST_CHUNK_SIZE == 0
number_of_test_chunks = test_size / TEST_CHUNK_SIZE
while (True):
batch = train_data_loader.next_batch(BATCH_SIZE) # next_batch from the loader
txt_name = "accary_ricnn.txt"
txt_file = file(txt_name, "a+")
if (train_data_loader.is_new_epoch()):
train_accuracy = session.run(accuracy, feed_dict={x : batch[0],
y_gt : batch[1],
keep_prob : 1.0})
print_loss = session.run(re_loss,feed_dict={x : batch[0],
y_gt : batch[1],
keep_prob : 1.0})
print_loss_1 = session.run(sotfmax_loss, feed_dict={x: batch[0],
y_gt: batch[1],
keep_prob: 1.0})
print(print_loss)
print(print_loss_1)
train_context = "epochs:" + str(train_data_loader.get_completed_epochs()) + '\n'
txt_file.write(train_context)
loss_context = "softmax_loss:" + str(print_loss_1) + '\n'
txt_file.write(loss_context)
txt_file.close()
print("completed_epochs %d, training accuracy %g" %
(train_data_loader.get_completed_epochs(), train_accuracy))
sys.stdout.flush()
if (train_data_loader.get_completed_epochs() % PRINTING_INTERVAL == 0):
sum = 0.0
xt_name = "accary_ricnn.txt"
txt_file = file(txt_name, "a+")
for chunk_index in xrange(number_of_test_chunks):
chunk = test_data_loader.next_batch(TEST_CHUNK_SIZE)
sum += session.run(accuracy, feed_dict={x : chunk[0],
y_gt : chunk[1],
keep_prob : 1.0})
test_accuracy = sum / number_of_test_chunks
new_context = "testing accuracy: " + str(test_accuracy) + '\n'
txt_file.write(new_context)
txt_file.close()
print("testing accuracy %g" % test_accuracy)
sys.stdout.flush()
session.run(train_step, feed_dict={x : batch[0],
y_gt : batch[1],
keep_prob : 0.5})
|
[
"tensorflow.cast",
"sys.stdout.flush",
"tensorflow.initialize_all_variables",
"trainLoader.DataLoader",
"tensorflow.placeholder",
"tensorflow.Session",
"RICNNModel.define_model",
"tensorflow.argmax",
"tensorflow.name_scope",
"numpy.random.seed",
"regularization.regu_constraint",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.train.AdamOptimizer",
"tensorflow.set_random_seed",
"DatasetLoader.DataLoader"
] |
[((801, 820), 'numpy.random.seed', 'np.random.seed', (['(100)'], {}), '(100)\n', (815, 820), True, 'import numpy as np\n'), ((821, 844), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(100)'], {}), '(100)\n', (839, 844), True, 'import tensorflow as tf\n'), ((849, 951), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, DESIRED_SIZE, DESIRED_SIZE, 1, NUMBER_OF_TRANSFORMATIONS]'}), '(tf.float32, shape=[None, DESIRED_SIZE, DESIRED_SIZE, 1,\n NUMBER_OF_TRANSFORMATIONS])\n', (863, 951), True, 'import tensorflow as tf\n'), ((1107, 1166), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, NUMBER_OF_CLASSES]'}), '(tf.float32, shape=[None, NUMBER_OF_CLASSES])\n', (1121, 1166), True, 'import tensorflow as tf\n'), ((1179, 1205), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (1193, 1205), True, 'import tensorflow as tf\n'), ((1249, 1351), 'RICNNModel.define_model', 'RICNNModel.define_model', (['x', 'keep_prob', 'NUMBER_OF_CLASSES', 'NUMBER_OF_FILTERS', 'NUMBER_OF_FC_FEATURES'], {}), '(x, keep_prob, NUMBER_OF_CLASSES, NUMBER_OF_FILTERS,\n NUMBER_OF_FC_FEATURES)\n', (1272, 1351), False, 'import RICNNModel\n'), ((2047, 2059), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2057, 2059), True, 'import tensorflow as tf\n'), ((2123, 2252), 'trainLoader.DataLoader', 'trainLoader.DataLoader', (['TRAIN_FILENAME', 'TRAIN_LABELS', 'NUMBER_OF_CLASSES', 'NUMBER_OF_TRANSFORMATIONS', 'LOADED_SIZE', 'DESIRED_SIZE'], {}), '(TRAIN_FILENAME, TRAIN_LABELS, NUMBER_OF_CLASSES,\n NUMBER_OF_TRANSFORMATIONS, LOADED_SIZE, DESIRED_SIZE)\n', (2145, 2252), False, 'import trainLoader\n'), ((2453, 2582), 'DatasetLoader.DataLoader', 'DatasetLoader.DataLoader', (['TEST_FILENAME', 'TEST_LABELS', 'NUMBER_OF_CLASSES', 'NUMBER_OF_TRANSFORMATIONS', 'LOADED_SIZE', 'DESIRED_SIZE'], {}), '(TEST_FILENAME, TEST_LABELS, NUMBER_OF_CLASSES,\n NUMBER_OF_TRANSFORMATIONS, LOADED_SIZE, DESIRED_SIZE)\n', (2477, 2582), False, 'import DatasetLoader\n'), ((1485, 1506), 'tensorflow.name_scope', 'tf.name_scope', (['"""loss"""'], {}), "('loss')\n", (1498, 1506), True, 'import tensorflow as tf\n'), ((1928, 1948), 'tensorflow.argmax', 'tf.argmax', (['logits', '(1)'], {}), '(logits, 1)\n', (1937, 1948), True, 'import tensorflow as tf\n'), ((1950, 1968), 'tensorflow.argmax', 'tf.argmax', (['y_gt', '(1)'], {}), '(y_gt, 1)\n', (1959, 1968), True, 'import tensorflow as tf\n'), ((1996, 2035), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (2003, 2035), True, 'import tensorflow as tf\n'), ((2072, 2101), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (2099, 2101), True, 'import tensorflow as tf\n'), ((1517, 1541), 'tensorflow.name_scope', 'tf.name_scope', (['"""re_loss"""'], {}), "('re_loss')\n", (1530, 1541), True, 'import tensorflow as tf\n'), ((1561, 1600), 'regularization.regu_constraint', 're.regu_constraint', (['raw_feature', 'logits'], {}), '(raw_feature, logits)\n', (1579, 1600), True, 'import regularization as re\n'), ((1610, 1639), 'tensorflow.name_scope', 'tf.name_scope', (['"""sotfmax_loss"""'], {}), "('sotfmax_loss')\n", (1623, 1639), True, 'import tensorflow as tf\n'), ((1757, 1784), 'tensorflow.name_scope', 'tf.name_scope', (['"""total_loss"""'], {}), "('total_loss')\n", (1770, 1784), True, 'import tensorflow as tf\n'), ((1834, 1876), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['ADAM_LEARNING_RATE'], {}), '(ADAM_LEARNING_RATE)\n', (1856, 1876), True, 'import tensorflow as tf\n'), ((4127, 4145), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4143, 4145), False, 'import sys\n'), ((1679, 1746), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'logits': 'logits', 'labels': 'y_gt'}), '(logits=logits, labels=y_gt)\n', (1718, 1746), True, 'import tensorflow as tf\n'), ((4855, 4873), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4871, 4873), False, 'import sys\n')]
|
"""
The file defines the evaluate process on target dataset.
@Author: <NAME>
@Github: https://github.com/luyanger1799
@Project: https://github.com/luyanger1799/amazing-semantic-segmentation
"""
from sklearn.metrics import multilabel_confusion_matrix
from amazingutils.helpers import *
from amazingutils.utils import load_image
import numpy as np
import argparse
import sys
import cv2
import os
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', help='The path of the dataset.', type=str, default='CamVid')
parser.add_argument('--crop_height', help='The height to crop the image.', type=int, default=256)
parser.add_argument('--crop_width', help='The width to crop the image.', type=int, default=256)
parser.add_argument('--predictions', help='The path of predicted image.', type=str, required=True)
args = parser.parse_args()
# check related paths
paths = check_related_path(os.getcwd())
# get image and label file names for training and validation
_, _, _, _, _, test_label_names = get_dataset_info(args.dataset)
# get color info
csv_file = os.path.join(args.dataset, 'class_dict.csv')
class_names, _ = get_colored_info(csv_file)
# get the prediction file name list
if not os.path.exists(args.predictions):
raise ValueError('the path of predictions does not exit.')
prediction_names = []
for file in sorted(os.listdir(args.predictions)):
prediction_names.append(os.path.join(args.predictions, file))
# evaluated classes
evaluated_classes = get_evaluated_classes(os.path.join(args.dataset, 'evaluated_classes.txt'))
num_classes = len(class_names)
class_iou = dict()
for name in evaluated_classes:
class_iou[name] = list()
class_idx = dict(zip(class_names, range(num_classes)))
# begin evaluate
assert len(test_label_names) == len(prediction_names)
for i, (name1, name2) in enumerate(zip(test_label_names, prediction_names)):
sys.stdout.write('\rRunning test image %d / %d' % (i + 1, len(test_label_names)))
sys.stdout.flush()
label = np.array(cv2.resize(load_image(name1),
dsize=(args.crop_width, args.crop_height), interpolation=cv2.INTER_NEAREST))
pred = np.array(cv2.resize(load_image(name2),
dsize=(args.crop_width, args.crop_height), interpolation=cv2.INTER_NEAREST))
confusion_matrix = multilabel_confusion_matrix(label.flatten(), pred.flatten(), labels=list(class_idx.values()))
for eval_cls in evaluated_classes:
eval_idx = class_idx[eval_cls]
(tn, fp), (fn, tp) = confusion_matrix[eval_idx]
if tp + fn > 0:
class_iou[eval_cls].append(tp / (tp + fp + fn))
print('\n****************************************')
print('* The IoU of each class is as follows: *')
print('****************************************')
for eval_cls in evaluated_classes:
class_iou[eval_cls] = np.mean(class_iou[eval_cls])
print('{cls:}: {iou:.4f}'.format(cls=eval_cls, iou=class_iou[eval_cls]))
print('\n**********************************************')
print('* The Mean IoU of all classes is as follows: *')
print('**********************************************')
print('Mean IoU: {mean_iou:.4f}'.format(mean_iou=np.mean(list(class_iou.values()))))
|
[
"os.path.exists",
"numpy.mean",
"os.listdir",
"argparse.ArgumentParser",
"os.path.join",
"argparse.ArgumentTypeError",
"os.getcwd",
"sys.stdout.flush",
"amazingutils.utils.load_image"
] |
[((651, 676), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (674, 676), False, 'import argparse\n'), ((1311, 1355), 'os.path.join', 'os.path.join', (['args.dataset', '"""class_dict.csv"""'], {}), "(args.dataset, 'class_dict.csv')\n", (1323, 1355), False, 'import os\n'), ((1142, 1153), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1151, 1153), False, 'import os\n'), ((1445, 1477), 'os.path.exists', 'os.path.exists', (['args.predictions'], {}), '(args.predictions)\n', (1459, 1477), False, 'import os\n'), ((1584, 1612), 'os.listdir', 'os.listdir', (['args.predictions'], {}), '(args.predictions)\n', (1594, 1612), False, 'import os\n'), ((1744, 1795), 'os.path.join', 'os.path.join', (['args.dataset', '"""evaluated_classes.txt"""'], {}), "(args.dataset, 'evaluated_classes.txt')\n", (1756, 1795), False, 'import os\n'), ((2204, 2222), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2220, 2222), False, 'import sys\n'), ((3093, 3121), 'numpy.mean', 'np.mean', (['class_iou[eval_cls]'], {}), '(class_iou[eval_cls])\n', (3100, 3121), True, 'import numpy as np\n'), ((1643, 1679), 'os.path.join', 'os.path.join', (['args.predictions', 'file'], {}), '(args.predictions, file)\n', (1655, 1679), False, 'import os\n'), ((586, 639), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['"""Boolean value expected."""'], {}), "('Boolean value expected.')\n", (612, 639), False, 'import argparse\n'), ((2256, 2273), 'amazingutils.utils.load_image', 'load_image', (['name1'], {}), '(name1)\n', (2266, 2273), False, 'from amazingutils.utils import load_image\n'), ((2415, 2432), 'amazingutils.utils.load_image', 'load_image', (['name2'], {}), '(name2)\n', (2425, 2432), False, 'from amazingutils.utils import load_image\n')]
|
from pso.GPSO import GPSO
import numpy as np
import time
import pandas as pd
np.random.seed(42)
# f1 完成
def Sphere(p):
# Sphere函数
out_put = 0
for i in p:
out_put += i ** 2
return out_put
# f2 完成
def Sch222(x):
out_put = 0
out_put01 = 1
for i in x:
out_put += abs(i)
out_put01 = abs(i)*out_put01
out_put = out_put01+out_put
return out_put
# f3 完成
def Quadric(x):
output = 0
# print(x.shape[0])
for i in range(x.shape[0]):
output += (np.sum(x[0:i+1])) ** 2
# print(np.square(np.sum(x[0:i+1])))
return output
# f4 完成
def Schl(x):
# print(np.max(np.abs(x)))
return np.max(np.abs(x))
# f5 完成
def Step(x):
output = 0
for i in x:
output += (np.floor(i+0.5))**2
return output
# f6 完成
def Noise(x):
output = 0
cnt = 1
for i in x:
output = cnt * (i**4) + output
cnt += 1
output += np.random.rand()
return output
# f7 完成
def Rosenbrock(p):
'''
-2.048<=xi<=2.048
函数全局最优点在一个平滑、狭长的抛物线山谷内,使算法很难辨别搜索方向,查找最优也变得十分困难
在(1,...,1)处可以找到极小值0
:param p:
:return:
'''
n_dim = len(p)
res = 0
for i in range(n_dim - 1):
res += 100 * np.square(np.square(p[i]) - p[i + 1]) + np.square(p[i] - 1)
return res
# f8 有问题,忽略,这个是APSO的f8
def Schewel(x):
out_put = 0
for i in x:
out_put += -i*np.sin(np.sqrt(abs(i)))
return out_put
# f9 完成
def Rastrigin(p):
'''
多峰值函数,也是典型的非线性多模态函数
-5.12<=xi<=5.12
在范围内有10n个局部最小值,峰形高低起伏不定跳跃。很难找到全局最优
has a global minimum at x = 0 where f(x) = 0
'''
return np.sum([np.square(x) - 10 * np.cos(2 * np.pi * x) + 10 for x in p])
# f10
def Ackley(x):
part1 = 0
part2 = 0
for i in x:
part1 += (i**2)
part2 += np.cos(2 * np.pi * i)
left = 20 * np.exp(-0.2 * ((part1 / x.shape[0]) ** .5))
right = np.exp(part2 / x.shape[0])
return -left - right + 20 + np.e
# f11 ok
def Griewank(p):
'''
存在多个局部最小值点,数目与问题的维度有关。
此函数是典型的非线性多模态函数,具有广泛的搜索空间,是优化算法很难处理的复杂多模态问题。
在(0,...,0)处取的全局最小值0
-600<=xi<=600
'''
part1 = [np.square(x) / 4000 for x in p]
part2 = [np.cos(x / np.sqrt(i + 1)) for i, x in enumerate(p)]
return np.sum(part1) - np.prod(part2) + 1
g = 10000
times = 30
table = np.zeros((2, 10))
gBest = np.zeros((10, 30)) # 1010个函数的30次的最优值
for i in range(times):
optimizer = GPSO(func=Sphere, dim=30, pop=20, max_iter=g, lb=np.ones(30) * (-100), ub=np.ones(30) * 100,
w=0.9, c1=2, c2=2, acceptance=0.01)
start = time.time()
optimizer.run()
end = time.time()
print('Sphere:', optimizer.gbest_y)
table[0, 0] += optimizer.gbest_y
table[1, 0] += end - start
gBest[0, i] = optimizer.gbest_y
optimizer = GPSO(func=Sch222, dim=30, pop=20, max_iter=g, lb=np.ones(30) * (-10), ub=np.ones(30) * 10,
w=0.9, c1=2, c2=2, acceptance=0.01)
start = time.time()
optimizer.run()
end = time.time()
print('Sch222:', optimizer.gbest_y)
table[0, 1] += optimizer.gbest_y
table[1, 1] += end - start
gBest[1, i] = optimizer.gbest_y
optimizer = GPSO(func=Quadric, dim=30, pop=20, max_iter=g, lb=np.ones(30) * (-100), ub=np.ones(30) * 100,
w=0.9, c1=2, c2=2, acceptance=100)
start = time.time()
optimizer.run()
end = time.time()
print('Quadric:', optimizer.gbest_y)
table[0, 2] += optimizer.gbest_y
table[1, 2] += end - start
gBest[2, i] = optimizer.gbest_y
optimizer = GPSO(func=Rosenbrock, dim=30, pop=20, max_iter=g, lb=np.ones(30) * (-10), ub=np.ones(30) * 10,
w=0.9, c1=2, c2=2, acceptance=100)
start = time.time()
optimizer.run()
end = time.time()
print('Rosenbrock:', optimizer.gbest_y)
table[0, 3] += optimizer.gbest_y
table[1, 3] += end - start
gBest[3, i] = optimizer.gbest_y
optimizer = GPSO(func=Step, dim=30, pop=20, max_iter=g, lb=np.ones(30) * (-100), ub=np.ones(30) * 100,
w=0.9, c1=2, c2=2, acceptance=0)
start = time.time()
optimizer.run()
end = time.time()
print('Step:', optimizer.gbest_y)
table[0, 4] += optimizer.gbest_y
table[1, 4] += end - start
gBest[4, i] = optimizer.gbest_y
optimizer = GPSO(func=Noise, dim=30, pop=20, max_iter=g, lb=np.ones(30) * (-1.28), ub=np.ones(30) * 1.28,
w=0.9, c1=2, c2=2, acceptance=0.01)
start = time.time()
optimizer.run()
end = time.time()
print('Noise:', optimizer.gbest_y)
table[0, 5] += optimizer.gbest_y
table[1, 5] += end - start
gBest[5, i] = optimizer.gbest_y
optimizer = GPSO(func=Schewel, dim=30, pop=20, max_iter=g, lb=np.ones(30) * (-500), ub=np.ones(30) * 500,
w=0.9, c1=2, c2=2, acceptance=-10000)
start = time.time()
optimizer.run()
end = time.time()
print('Schewel:', optimizer.gbest_y)
table[0, 6] += optimizer.gbest_y
table[1, 6] += end - start
gBest[6, i] = optimizer.gbest_y
optimizer = GPSO(func=Rastrigin, dim=30, pop=20, max_iter=g, lb=np.ones(30) * (-5.12), ub=np.ones(30) * 5.12,
w=0.9, c1=2, c2=2, acceptance=50)
start = time.time()
optimizer.run()
end = time.time()
print('Rastrigin:', optimizer.gbest_y)
table[0, 7] += optimizer.gbest_y
table[1, 7] += end - start
gBest[7, i] = optimizer.gbest_y
optimizer = GPSO(func=Ackley, dim=30, pop=20, max_iter=g, lb=np.ones(30) * (-32), ub=np.ones(30) * 32,
w=0.9, c1=2, c2=2, acceptance=0.01)
start = time.time()
optimizer.run()
end = time.time()
print('Ackley:', optimizer.gbest_y)
table[0, 8] += optimizer.gbest_y
table[1, 8] += end - start
gBest[8, i] = optimizer.gbest_y
optimizer = GPSO(func=Griewank, dim=30, pop=20, max_iter=g, lb=np.ones(30) * (-600), ub=np.ones(30) * 600,
w=0.9, c1=2, c2=2, acceptance=0.01)
start = time.time()
optimizer.run()
end = time.time()
print('Griewank:', optimizer.gbest_y)
table[0, 9] += optimizer.gbest_y
table[1, 9] += end - start
gBest[9, i] = optimizer.gbest_y
table = table / times
table = pd.DataFrame(table)
table.columns = ['Sphere', 'Schwefel_P222', 'Quadric', 'Rosenbrock', 'Step', 'Quadric_Noise', 'Schwefel',
'Rastrigin', 'Ackley', 'Griewank']
table.index = ['mean score', 'mean time']
print(table)
print('10个测试函数的30次std:', np.std(gBest, axis=1))
print('10个测试函数的30次best:', np.min(gBest, axis=1))
|
[
"numpy.abs",
"numpy.prod",
"numpy.sqrt",
"numpy.random.rand",
"numpy.ones",
"numpy.floor",
"numpy.min",
"numpy.square",
"numpy.exp",
"numpy.sum",
"numpy.zeros",
"numpy.random.seed",
"numpy.cos",
"numpy.std",
"pandas.DataFrame",
"time.time"
] |
[((78, 96), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (92, 96), True, 'import numpy as np\n'), ((2299, 2316), 'numpy.zeros', 'np.zeros', (['(2, 10)'], {}), '((2, 10))\n', (2307, 2316), True, 'import numpy as np\n'), ((2325, 2343), 'numpy.zeros', 'np.zeros', (['(10, 30)'], {}), '((10, 30))\n', (2333, 2343), True, 'import numpy as np\n'), ((6195, 6214), 'pandas.DataFrame', 'pd.DataFrame', (['table'], {}), '(table)\n', (6207, 6214), True, 'import pandas as pd\n'), ((930, 946), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (944, 946), True, 'import numpy as np\n'), ((1883, 1909), 'numpy.exp', 'np.exp', (['(part2 / x.shape[0])'], {}), '(part2 / x.shape[0])\n', (1889, 1909), True, 'import numpy as np\n'), ((2564, 2575), 'time.time', 'time.time', ([], {}), '()\n', (2573, 2575), False, 'import time\n'), ((2606, 2617), 'time.time', 'time.time', ([], {}), '()\n', (2615, 2617), False, 'import time\n'), ((2939, 2950), 'time.time', 'time.time', ([], {}), '()\n', (2948, 2950), False, 'import time\n'), ((2981, 2992), 'time.time', 'time.time', ([], {}), '()\n', (2990, 2992), False, 'import time\n'), ((3316, 3327), 'time.time', 'time.time', ([], {}), '()\n', (3325, 3327), False, 'import time\n'), ((3358, 3369), 'time.time', 'time.time', ([], {}), '()\n', (3367, 3369), False, 'import time\n'), ((3695, 3706), 'time.time', 'time.time', ([], {}), '()\n', (3704, 3706), False, 'import time\n'), ((3737, 3748), 'time.time', 'time.time', ([], {}), '()\n', (3746, 3748), False, 'import time\n'), ((4071, 4082), 'time.time', 'time.time', ([], {}), '()\n', (4080, 4082), False, 'import time\n'), ((4113, 4124), 'time.time', 'time.time', ([], {}), '()\n', (4122, 4124), False, 'import time\n'), ((4447, 4458), 'time.time', 'time.time', ([], {}), '()\n', (4456, 4458), False, 'import time\n'), ((4489, 4500), 'time.time', 'time.time', ([], {}), '()\n', (4498, 4500), False, 'import time\n'), ((4826, 4837), 'time.time', 'time.time', ([], {}), '()\n', (4835, 4837), False, 'import time\n'), ((4868, 4879), 'time.time', 'time.time', ([], {}), '()\n', (4877, 4879), False, 'import time\n'), ((5207, 5218), 'time.time', 'time.time', ([], {}), '()\n', (5216, 5218), False, 'import time\n'), ((5249, 5260), 'time.time', 'time.time', ([], {}), '()\n', (5258, 5260), False, 'import time\n'), ((5585, 5596), 'time.time', 'time.time', ([], {}), '()\n', (5594, 5596), False, 'import time\n'), ((5627, 5638), 'time.time', 'time.time', ([], {}), '()\n', (5636, 5638), False, 'import time\n'), ((5964, 5975), 'time.time', 'time.time', ([], {}), '()\n', (5973, 5975), False, 'import time\n'), ((6006, 6017), 'time.time', 'time.time', ([], {}), '()\n', (6015, 6017), False, 'import time\n'), ((6467, 6488), 'numpy.std', 'np.std', (['gBest'], {'axis': '(1)'}), '(gBest, axis=1)\n', (6473, 6488), True, 'import numpy as np\n'), ((6516, 6537), 'numpy.min', 'np.min', (['gBest'], {'axis': '(1)'}), '(gBest, axis=1)\n', (6522, 6537), True, 'import numpy as np\n'), ((673, 682), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (679, 682), True, 'import numpy as np\n'), ((1789, 1810), 'numpy.cos', 'np.cos', (['(2 * np.pi * i)'], {}), '(2 * np.pi * i)\n', (1795, 1810), True, 'import numpy as np\n'), ((1827, 1869), 'numpy.exp', 'np.exp', (['(-0.2 * (part1 / x.shape[0]) ** 0.5)'], {}), '(-0.2 * (part1 / x.shape[0]) ** 0.5)\n', (1833, 1869), True, 'import numpy as np\n'), ((516, 534), 'numpy.sum', 'np.sum', (['x[0:i + 1]'], {}), '(x[0:i + 1])\n', (522, 534), True, 'import numpy as np\n'), ((756, 773), 'numpy.floor', 'np.floor', (['(i + 0.5)'], {}), '(i + 0.5)\n', (764, 773), True, 'import numpy as np\n'), ((1255, 1274), 'numpy.square', 'np.square', (['(p[i] - 1)'], {}), '(p[i] - 1)\n', (1264, 1274), True, 'import numpy as np\n'), ((2124, 2136), 'numpy.square', 'np.square', (['x'], {}), '(x)\n', (2133, 2136), True, 'import numpy as np\n'), ((2233, 2246), 'numpy.sum', 'np.sum', (['part1'], {}), '(part1)\n', (2239, 2246), True, 'import numpy as np\n'), ((2249, 2263), 'numpy.prod', 'np.prod', (['part2'], {}), '(part2)\n', (2256, 2263), True, 'import numpy as np\n'), ((2180, 2194), 'numpy.sqrt', 'np.sqrt', (['(i + 1)'], {}), '(i + 1)\n', (2187, 2194), True, 'import numpy as np\n'), ((2451, 2462), 'numpy.ones', 'np.ones', (['(30)'], {}), '(30)\n', (2458, 2462), True, 'import numpy as np\n'), ((2476, 2487), 'numpy.ones', 'np.ones', (['(30)'], {}), '(30)\n', (2483, 2487), True, 'import numpy as np\n'), ((2828, 2839), 'numpy.ones', 'np.ones', (['(30)'], {}), '(30)\n', (2835, 2839), True, 'import numpy as np\n'), ((2852, 2863), 'numpy.ones', 'np.ones', (['(30)'], {}), '(30)\n', (2859, 2863), True, 'import numpy as np\n'), ((3204, 3215), 'numpy.ones', 'np.ones', (['(30)'], {}), '(30)\n', (3211, 3215), True, 'import numpy as np\n'), ((3229, 3240), 'numpy.ones', 'np.ones', (['(30)'], {}), '(30)\n', (3236, 3240), True, 'import numpy as np\n'), ((3585, 3596), 'numpy.ones', 'np.ones', (['(30)'], {}), '(30)\n', (3592, 3596), True, 'import numpy as np\n'), ((3609, 3620), 'numpy.ones', 'np.ones', (['(30)'], {}), '(30)\n', (3616, 3620), True, 'import numpy as np\n'), ((3961, 3972), 'numpy.ones', 'np.ones', (['(30)'], {}), '(30)\n', (3968, 3972), True, 'import numpy as np\n'), ((3986, 3997), 'numpy.ones', 'np.ones', (['(30)'], {}), '(30)\n', (3993, 3997), True, 'import numpy as np\n'), ((4332, 4343), 'numpy.ones', 'np.ones', (['(30)'], {}), '(30)\n', (4339, 4343), True, 'import numpy as np\n'), ((4358, 4369), 'numpy.ones', 'np.ones', (['(30)'], {}), '(30)\n', (4365, 4369), True, 'import numpy as np\n'), ((4711, 4722), 'numpy.ones', 'np.ones', (['(30)'], {}), '(30)\n', (4718, 4722), True, 'import numpy as np\n'), ((4736, 4747), 'numpy.ones', 'np.ones', (['(30)'], {}), '(30)\n', (4743, 4747), True, 'import numpy as np\n'), ((5094, 5105), 'numpy.ones', 'np.ones', (['(30)'], {}), '(30)\n', (5101, 5105), True, 'import numpy as np\n'), ((5120, 5131), 'numpy.ones', 'np.ones', (['(30)'], {}), '(30)\n', (5127, 5131), True, 'import numpy as np\n'), ((5474, 5485), 'numpy.ones', 'np.ones', (['(30)'], {}), '(30)\n', (5481, 5485), True, 'import numpy as np\n'), ((5498, 5509), 'numpy.ones', 'np.ones', (['(30)'], {}), '(30)\n', (5505, 5509), True, 'import numpy as np\n'), ((5851, 5862), 'numpy.ones', 'np.ones', (['(30)'], {}), '(30)\n', (5858, 5862), True, 'import numpy as np\n'), ((5876, 5887), 'numpy.ones', 'np.ones', (['(30)'], {}), '(30)\n', (5883, 5887), True, 'import numpy as np\n'), ((1622, 1634), 'numpy.square', 'np.square', (['x'], {}), '(x)\n', (1631, 1634), True, 'import numpy as np\n'), ((1225, 1240), 'numpy.square', 'np.square', (['p[i]'], {}), '(p[i])\n', (1234, 1240), True, 'import numpy as np\n'), ((1642, 1663), 'numpy.cos', 'np.cos', (['(2 * np.pi * x)'], {}), '(2 * np.pi * x)\n', (1648, 1663), True, 'import numpy as np\n')]
|
import sys
import pygame as pg
import numpy as np
import random
import time
pic = np.zeros(shape=(128,64))
width = 128
height = 64
refresh_rate = 60
interval = 1 / refresh_rate
bootrom_file = "bootrom0"
rom_file = "rom"
# rom_file = "hello_world"
debug = False
pg.display.init()
display = pg.display.set_mode((width*4, height*4), flags=0, depth=8)
screen = pg.Surface((width, height), flags=0, depth=8)
pg.transform.scale(screen, (width*4, height*4), display)
def screen_update(silent=True):
pg.transform.scale(screen, (width*4, height*4), display)
pg.display.flip()
if not silent:
print("Screen Update")
def screen_clear():
screen.fill((0,0,0))
#screen_update()
def screen_draw_line(x, y, pixels):
# print("----------DRAW----------")
# print("x:",x)
# print("y:",y)
# print("pix:",bin(pixels))
j = 0b10000000
for i in range(8):
x_pos = x + i
y_pos = y
if x_pos >= 0 and x_pos < width:
if y_pos >= 0 and y_pos < height:
if pixels & j:
pg.draw.rect(screen, 255, pg.Rect(x_pos,y_pos,1,1))
else:
pg.draw.rect(screen, 0, pg.Rect(x_pos,y_pos,1,1))
j = j >> 1
#screen_update()
screen_clear()
# screen_draw_line(0,0,0b10101011)
# input()
class memByte:
def __init__(self):
self.value = 0x00000000
def write(self, value):
self.value = value & 0xff
def readUpper(self):
return (self.value & 0b11110000) >> 4
def readLower(self):
return self.value & 0b1111
class Flags:
def __init__(self):
self.z = 0
self.n = 0
self.h = 0
self.c = 0
def setZero(self):
self.z = 1
def clearZero(self):
self.z = 0
def setNeg(self):
self.n = 1
def clearNeg(self):
self.n = 0
def setHalf(self):
self.h = 1
def clearHalf(self):
self.h = 0
def setCarry(self):
self.c = 1
def clearCarry(self):
self.c = 0
def clearFlags(self):
self.z = 0
self.n = 0
self.h = 0
self.c = 0
class reg:
def __init__(self):
self.value = 0b00000000
self.value = random.randint(0,255)
def send(self):
sys.stdout.write(chr(self.value))
sys.stdout.flush()
class Dreg:
def __init__(self, r1, r2):
self.r1 = r1
self.r2 = r2
def getvalue(self):
self.value = (self.r1.value << 8) + self.r2.value
def setvalue(self):
self.r1.value = self.value >> 8
self.r2.value = self.value & 0xff
class regPC:
def __init__(self):
self.value = 0x0
def inc(self, length=1):
self.value += length
self.value = self.value & 0xffff
def jump(self, address):
self.value = address & 0xffff
class regSP:
def __init__(self):
self.value = 0xfffe
def inc(self):
self.value += 2
self.value = self.value & 0xffff
def dec(self):
self.value -= 2
def setvalue(self):
#print("SPSET:",hex(self.value))
pass # JUST TO MAKE LDX SIMPLER
ONE_REG = reg()
ONE_REG.value = 1
FL = Flags()
halt = False
A = reg()
B = reg()
C = reg()
D = reg()
E = reg()
H = reg()
L = reg()
BC = Dreg(B, C)
DE = Dreg(D, E)
HL = Dreg(H, L)
#E.value = 0x1 # Randomness loop
PC = regPC()
SP = regSP()
memory = []
jumped = False
print("RESERVING MEMORY...")
for i in range(0x10000):
memory.append(memByte())
print("MEMORY RESERVED.")
print("LOADING MEMORY...")
f = open(bootrom_file, "rb")
rom_data = f.read()
f.close()
for i in range(len(rom_data)):
memory[i+0x0].value = rom_data[i]
f = open(rom_file, "rb")
rom_data = f.read()
f.close()
for i in range(len(rom_data)):
memory[i+0x597].value = rom_data[i]
print("MEMORY LOADED.")
def LDI(R, mem=False):
PC.inc()
if not mem:
R.value = memory[PC.value].value
else:
R.getvalue()
memory[R.value].value = memory[PC.value].value
def LDX(R):
PC.inc()
low = memory[PC.value].value
PC.inc()
R.value = low + (memory[PC.value].value << 8)
R.setvalue()
def PUSH_R(R, mem=False):
if not mem:
memory[SP.value].value = R.value
else:
R.getvalue()
memory[SP.value].value = memory[R.value].value
SP.dec()
def PUSH_RR(RR):
RR.getvalue()
memory[SP.value].value = RR.value & 0xff
memory[SP.value + 1].value = RR.value >> 8
SP.dec()
def POP_R(R, mem=False):
SP.inc()
if not mem:
#print(hex(SP.value))
R.value = memory[SP.value].value
else:
R.getvalue()
memory[R.value].value = memory[SP.value].value
def POP_RR(RR):
SP.inc()
RR.value = memory[SP.value].value + (memory[SP.value + 1].value << 8)
RR.setvalue()
MOV_REGISTERS = [B, C, D, E, H, L, HL, A]
MOVB_OPCODES = [0x09, 0x19, 0x29, 0x39, 0x49, 0x59, 0x69, 0x79]
MOVC_OPCODES = [0x99, 0x99, 0xA9, 0xB9, 0xC9, 0xD9, 0xE9, 0xF9]
MOVD_OPCODES = [0x0A, 0x1A, 0x2A, 0x3A, 0x4A, 0x5A, 0x6A, 0x7A]
MOVE_OPCODES = [0x8A, 0x9A, 0xAA, 0xBA, 0xCA, 0xDA, 0xEA, 0xFA]
MOVH_OPCODES = [0x0B, 0x1B, 0x2B, 0x3B, 0x4B, 0x5B, 0x6B, 0x7B]
MOVL_OPCODES = [0x8B, 0x9B, 0xAB, 0xBB, 0xCB, 0xDB, 0xEB, 0xFB]
MOVMHL_OPCODES = [0x0C, 0x1C, 0x2C, 0x3C, 0x4C, 0x5C, 0x6C, 0x7C]
MOVA_OPCODES = [0x8C, 0x9C, 0xAC, 0xBC, 0xCC, 0xDC, 0xEC, 0xFC]
def MOV(R1, R2index, mem=False):
R2 = MOV_REGISTERS[R2index]
if not mem:
if R2index == 6:
R2.getvalue()
R1.value = memory[R2.value].value
else:
R1.value = R2.value
else:
memory[R1.value].value = R2.value
R1.setvalue()
def MOV_RR(RR1, RR2):
RR2.getvalue()
RR1.value = RR2.value
RR1.setvalue()
def ADD_8(value1, value2):
nib = (value1 & 0xf) + (value2 & 0xf)
value = value1 + value2
FL.clearFlags()
if value & 0xff == 0:
FL.setZero()
if value & 0b10000000:
FL.setNeg()
if nib & 0xf0:
FL.setHalf()
if value >> 8:
FL.setCarry()
return value & 0xff
def ADD_R(R, mem=False):
if not mem:
value = ADD_8(A.value, R.value)
R.value = value
else:
R.getvalue()
value = ADD_8(A.value, memory[R.value].value)
memory[R.value].value = value
def ADD_16(value1, value2):
nib = (value1 & 0xf) + (value2 & 0xf)
value = value1 + value2
FL.clearFlags()
if value & 0xffff == 0:
FL.setZero()
if value & 0b1000000000000000:
FL.setNeg()
if nib & 0xf0:
FL.setHalf()
if value >> 16:
FL.setCarry()
return value & 0xffff
def ADDX_RR(RR):
RR.getvalue()
value = ADD_16(A.value, RR.value)
RR.value = value
RR.setvalue()
def SUB_8(value1, value2):
value = value1 - value2
if value < 0:
value += 0x100
FL.clearFlags()
if value == 0:
FL.setZero()
if value & 0b10000000:
FL.setNeg()
if (value1 & 0xf) <= (value2 & 0xf):
FL.setHalf()
if value1 <= value2:
FL.setCarry()
return value & 0xff
def SUB_R(R, compare_only, mem=False):
if not mem:
value = SUB_8(R.value, A.value)
if not compare_only:
R.value = value
else:
R.getvalue()
value = SUB_8(memory[R.value].value, A.value)
if not compare_only:
memory[R.value].value = value
def INC(R, mem=False):
if not mem:
value = ADD_8(ONE_REG.value, R.value)
R.value = value
else:
R.getvalue()
value = ADD_8(ONE_REG.value, memory[R.value].value)
memory[R.value].value = value
def DEC(R, mem=False):
if not mem:
value = SUB_8(R.value, ONE_REG.value)
R.value = value
else:
R.getvalue()
value = SUB_8(memory[R.value].value, ONE_REG.value)
memory[R.value].value = value
def AND_8(value1, value2):
value = value1 & value2
FL.clearFlags()
if value == 0:
FL.setZero()
if value & 0b10000000:
FL.setNeg()
return value & 0xff
def AND_R(R, mem=False):
if not mem:
value = AND_8(A.value, R.value)
R.value = value
else:
R.getvalue()
value = AND_8(A.value, memory[R.value].value)
memory[R.value].value = value
def OR_8(value1, value2):
value = value1 | value2
FL.clearFlags()
if value == 0:
FL.setZero()
if value & 0b10000000:
FL.setNeg()
return value & 0xff
def OR_R(R, mem=False):
if not mem:
value = OR_8(A.value, R.value)
R.value = value
else:
R.getvalue()
value = OR_8(A.value, memory[R.value].value)
memory[R.value].value = value
def XOR_8(value1, value2):
value = value1 ^ value2
FL.clearFlags()
if value == 0:
FL.setZero()
if value & 0b10000000:
FL.setNeg()
return value & 0xff
def XOR_R(R, mem=False):
if not mem:
value = XOR_8(A.value, R.value)
R.value = value
else:
R.getvalue()
value = XOR_8(A.value, memory[R.value].value)
memory[R.value].value = value
def CMPS(R, mem=False):
if not mem:
Rval = R.value
if Rval & 0b10000000:
Rval = - ((0x100 - Rval) & 0xff)
Aval = A.value
if Aval & 0b10000000:
Aval = - ((0x100 - Aval) & 0xff)
FL.clearFlags()
if Rval == Aval:
FL.setZero()
elif Rval < Aval:
FL.setNeg()
else:
R.getvalue()
Rval = memory[R.value].value
if Rval & 0b10000000:
Rval = - ((0x100 - Rval) & 0xff)
Aval = A.value
if Aval & 0b10000000:
Aval = - ((0x100 - Aval) & 0xff)
FL.clearFlags()
if Rval == Aval:
FL.setZero()
elif Rval < Aval:
FL.setNeg()
def JUMP():
PC.inc()
low = memory[PC.value].value
PC.inc()
high = memory[PC.value].value
global jumped
jumped = True
PC.value = (high << 8) + low
print("JUMP:",hex((high << 8) + low))
def REL_JUMP():
PC.inc()
value = memory[PC.value].value
if value & 0b10000000:
value = - ((0x100 - value) & 0xff)
# ACCORDING TO DOCUMENTATION RELATIVE JUMPS USE THE +2 PC INC
PC.value += value
screen_update()
last_update = time.time()
while not halt:
b_up = memory[PC.value].readUpper()
b_down = memory[PC.value].readLower()
b_val = memory[PC.value].value
jumped = False
if time.time() > last_update + interval:
screen_update()
last_update = time.time()
# Handle pygame events
for event in pg.event.get():
# print("EVENT:",event.type)
# input()
pass
if debug:
pass#input()
if debug or False:
print(hex(PC.value), hex(b_val))
# if b_val in [0x86, 0x96, 0xA6, 0xB6, 0xC6, 0xD6, 0xE6, 0xF6]:
# print("CMP R")
# input()
# if b_val == 0xF7:
# print("CMPI")
# input()
# HCF (HALT)
if b_val == 0x6C:
halt = True
# LDI R, xx
if b_val == 0x20:
LDI(B)
elif b_val == 0x30:
LDI(C)
elif b_val == 0x40:
LDI(D)
elif b_val == 0x50:
LDI(E)
elif b_val == 0x60:
LDI(H)
elif b_val == 0x70:
LDI(L)
elif b_val == 0x80:
LDI(HL, mem=True)
elif b_val == 0x90:
LDI(A)
# LDX RR, xxyy
elif b_val == 0x21:
LDX(BC)
elif b_val == 0x31:
LDX(DE)
elif b_val == 0x41:
LDX(HL)
elif b_val == 0x22:
LDX(SP)
# PUSH R
elif b_val == 0x81:
PUSH_R(B)
elif b_val == 0x91:
PUSH_R(C)
elif b_val == 0xA1:
PUSH_R(D)
elif b_val == 0xB1:
PUSH_R(E)
elif b_val == 0xC1:
PUSH_R(H)
elif b_val == 0xD1:
PUSH_R(L)
elif b_val == 0xC0:
PUSH_R(HL, mem=True)
elif b_val == 0xD0:
PUSH_R(A)
# PUSH RR
elif b_val == 0x51:
PUSH_RR(BC)
elif b_val == 0x61:
PUSH_RR(DE)
elif b_val == 0x71:
PUSH_RR(HL)
# POP R
elif b_val == 0x82:
POP_R(B)
elif b_val == 0x92:
POP_R(C)
elif b_val == 0xA2:
POP_R(D)
elif b_val == 0xB2:
POP_R(E)
elif b_val == 0xC2:
POP_R(H)
elif b_val == 0xD2:
POP_R(L)
elif b_val == 0xC3:
POP_R(HL, mem=True)
elif b_val == 0xD3:
POP_R(A)
# POP RR
elif b_val == 0x52:
POP_RR(BC)
elif b_val == 0x62:
POP_RR(DE)
elif b_val == 0x72:
POP_RR(HL)
# MOV R1, R2
elif b_val in MOVB_OPCODES:
MOV(B, MOVB_OPCODES.index(b_val))
elif b_val in MOVC_OPCODES:
MOV(C, MOVC_OPCODES.index(b_val))
elif b_val in MOVD_OPCODES:
MOV(D, MOVD_OPCODES.index(b_val))
elif b_val in MOVE_OPCODES:
MOV(E, MOVE_OPCODES.index(b_val))
elif b_val in MOVH_OPCODES:
MOV(H, MOVH_OPCODES.index(b_val))
elif b_val in MOVL_OPCODES:
MOV(L, MOVL_OPCODES.index(b_val))
elif b_val in MOVMHL_OPCODES:
MOV(HL, MOVMHL_OPCODES.index(b_val), mem=True)
elif b_val in MOVA_OPCODES:
MOV(A, MOVA_OPCODES.index(b_val))
# MOV RR1, RR2
elif b_val == 0xED:
MOV_RR(HL, BC)
elif b_val == 0xFD:
MOV_RR(HL, DE)
# CLRFLAG
elif b_val == 0x08:
FL.clearFlags()
# SETFLAG f, x
elif b_val == 0x18:
FL.setZero()
elif b_val == 0x28:
FL.clearZero()
elif b_val == 0x38:
FL.setNeg()
elif b_val == 0x48:
FL.clearNeg()
elif b_val == 0x58:
FL.setHalf()
elif b_val == 0x68:
FL.clearHalf()
elif b_val == 0x78:
FL.setCarry()
elif b_val == 0x88:
FL.clearCarry()
# ADD R
elif b_val == 0x04:
ADD_R(B)
elif b_val == 0x14:
ADD_R(C)
elif b_val == 0x24:
ADD_R(D)
elif b_val == 0x34:
ADD_R(E)
elif b_val == 0x44:
ADD_R(H)
elif b_val == 0x54:
ADD_R(L)
elif b_val == 0x64:
ADD_R(HL, mem=True)
elif b_val == 0x74:
ADD_R(A)
# ADDI xx
elif b_val == 0xA7:
PC.inc()
value = ADD_8(A.value, memory[PC.value].value)
A.value = value
# ADDX RR
elif b_val == 0x83:
ADDX_RR(BC)
elif b_val == 0x93:
ADDX_RR(DE)
elif b_val == 0xA3:
ADDX_RR(HL)
# SUB R | CMP R
elif b_val == 0x84 or b_val == 0x86:
SUB_R(B, b_val == 0x86)
elif b_val == 0x94 or b_val == 0x96:
SUB_R(C, b_val == 0x96)
elif b_val == 0xA4 or b_val == 0xA6:
SUB_R(D, b_val == 0xA6)
elif b_val == 0xB4 or b_val == 0xB6:
SUB_R(E, b_val == 0xB6)
elif b_val == 0xC4 or b_val == 0xC6:
SUB_R(H, b_val == 0xC6)
elif b_val == 0xD4 or b_val == 0xD6:
SUB_R(L, b_val == 0xD6)
elif b_val == 0xE4 or b_val == 0xE6:
SUB_R(HL, b_val == 0xE6, mem=True)
elif b_val == 0xF4 or b_val == 0xF6:
SUB_R(A, b_val == 0xF6)
# SUBI xx | CMPI xx
elif b_val == 0xB7 or b_val == 0xF7:
PC.inc()
value = SUB_8(A.value, memory[PC.value].value)
if b_val == 0xB7: # SUBI xx
A.value = value
# INC R
elif b_val == 0x03:
INC(B)
elif b_val == 0x13:
INC(C)
elif b_val == 0x23:
INC(D)
elif b_val == 0x33:
INC(E)
elif b_val == 0x43:
INC(H)
elif b_val == 0x53:
INC(L)
elif b_val == 0x63:
INC(HL, mem=True)
elif b_val == 0x73:
INC(A)
# INX RR
elif b_val == 0xA8:
BC.getvalue()
BC.value += 1
BC.value & 0xffff
BC.setvalue()
elif b_val == 0xB8:
DE.getvalue()
DE.value += 1
DE.value & 0xffff
DE.setvalue()
elif b_val == 0xC8:
HL.getvalue()
HL.value += 1
HL.value & 0xffff
HL.setvalue()
# DEC R
elif b_val == 0x07:
DEC(B)
elif b_val == 0x17:
DEC(C)
elif b_val == 0x27:
DEC(D)
elif b_val == 0x37:
DEC(E)
elif b_val == 0x47:
DEC(H)
elif b_val == 0x57:
DEC(L)
elif b_val == 0x67:
DEC(HL, mem=True)
elif b_val == 0x77:
DEC(A)
# AND R
elif b_val == 0x05:
AND_R(B)
elif b_val == 0x15:
AND_R(C)
elif b_val == 0x25:
AND_R(D)
elif b_val == 0x35:
AND_R(E)
elif b_val == 0x45:
AND_R(H)
elif b_val == 0x55:
AND_R(L)
elif b_val == 0x65:
AND_R(HL, mem=True)
elif b_val == 0x75:
AND_R(A)
# ANDI xx
elif b_val == 0xC7:
PC.inc()
value = AND_8(memory[PC.value].value, A.value)
A.value = value
# OR R
elif b_val == 0x85:
OR_R(B)
elif b_val == 0x95:
OR_R(C)
elif b_val == 0xA5:
OR_R(D)
elif b_val == 0xB5:
OR_R(E)
elif b_val == 0xC5:
OR_R(H)
elif b_val == 0xD5:
OR_R(L)
elif b_val == 0xE5:
OR_R(HL, mem=True)
elif b_val == 0xF5:
OR_R(A)
# ORI xx
elif b_val == 0xD7:
PC.inc()
value = OR_8(memory[PC.value].value, A.value)
A.value = value
# XOR R
elif b_val == 0x06:
XOR_R(B)
elif b_val == 0x16:
XOR_R(C)
elif b_val == 0x26:
XOR_R(D)
elif b_val == 0x36:
XOR_R(E)
elif b_val == 0x46:
XOR_R(H)
elif b_val == 0x56:
XOR_R(L)
elif b_val == 0x66:
XOR_R(HL, mem=True)
elif b_val == 0x76:
XOR_R(A)
# XORI xx
elif b_val == 0xE7:
PC.inc()
value = XOR_8(memory[PC.value].value, A.value)
A.value = value
# CMPS R
elif b_val == 0x0D:
CMPS(B)
elif b_val == 0x1D:
CMPS(C)
elif b_val == 0x2D:
CMPS(D)
elif b_val == 0x3D:
CMPS(E)
elif b_val == 0x4D:
CMPS(H)
elif b_val == 0x5D:
CMPS(L)
elif b_val == 0x6D:
CMPS(HL, mem=True)
elif b_val == 0x7D:
CMPS(A)
# SIN
elif b_val == 0xE0:
A.value = ord(sys.stdin.buffer.read(1)) & 0xff
pass
# SOUT
elif b_val == 0xE1:
print(chr(A.value),end="",flush=True)
if A.value == 7:
print("[BELL]")
pass
# CLRSCR
elif b_val == 0xF0:
screen_clear()
# DRAW
elif b_val == 0xF1:
x = C.value
if x & 0b10000000:
x = - ((0x100 - x) & 0xff)
y = B.value
if y & 0b10000000:
y = - ((0x100 - y) & 0xff)
screen_draw_line(x, y, A.value & 0xff)
# JMP xxyy
elif b_val == 0x0F:
JUMP()
# JMPcc xxyy
elif b_val == 0x1F:
if FL.z:
JUMP()
else:
PC.inc(2)
elif b_val == 0x2F:
if not FL.z:
JUMP()
else:
PC.inc(2)
elif b_val == 0x3F:
if FL.n:
JUMP()
else:
PC.inc(2)
elif b_val == 0x4F:
if not FL.n:
JUMP()
else:
PC.inc(2)
elif b_val == 0x5F:
if FL.h:
JUMP()
elif b_val == 0x6F:
if not FL.h:
JUMP()
else:
PC.inc(2)
elif b_val == 0x7F:
if FL.c:
JUMP()
else:
PC.inc(2)
elif b_val == 0x8F:
if not FL.c:
JUMP()
else:
PC.inc(2)
# JMP xx
elif b_val == 0x9F:
REL_JUMP()
# JMPcc xx
elif b_val == 0xAF:
if FL.z:
REL_JUMP()
else:
PC.inc()
elif b_val == 0xBF:
if not FL.z:
REL_JUMP()
else:
PC.inc()
elif b_val == 0xCF:
if FL.n:
REL_JUMP()
else:
PC.inc()
elif b_val == 0xDF:
if not FL.n:
REL_JUMP()
else:
PC.inc()
elif b_val == 0xEF:
if FL.h:
REL_JUMP()
else:
PC.inc()
elif b_val == 0xFF:
if not FL.h:
REL_JUMP()
else:
PC.inc()
elif b_val == 0xEE:
if FL.c:
REL_JUMP()
else:
PC.inc()
elif b_val == 0xFE:
if not FL.c:
REL_JUMP()
else:
PC.inc()
# CALL xxyy
elif b_val == 0x1E:
memory[SP.value].value = (PC.value+3) & 0xff
memory[SP.value + 1].value = (PC.value+3) >> 8
SP.dec()
JUMP()
# RET
elif b_val == 0x0E:
SP.inc()
PC.value = memory[SP.value].value + (memory[SP.value + 1].value << 8)
jumped = True
# NOP
elif b_val == 0x00:
pass
else:
pass
print("UNKNOWN:",hex(b_val),"@",hex(PC.value))
if debug:
BC.getvalue()
DE.getvalue()
HL.getvalue()
print("A:",hex(A.value),"B:",hex(B.value),"C:",hex(C.value),"D:",hex(D.value),"E:",hex(E.value),"H:",hex(H.value),
"L:",hex(L.value),"BC:",hex(BC.value),"DE:",hex(DE.value),"HL:",hex(HL.value),"PC:",hex(PC.value),"SP:",hex(SP.value))
if not jumped:
PC.inc()
else:
pass
#print("JUMPED")
|
[
"pygame.display.init",
"pygame.Surface",
"pygame.event.get",
"pygame.display.set_mode",
"pygame.display.flip",
"pygame.Rect",
"numpy.zeros",
"sys.stdin.buffer.read",
"sys.stdout.flush",
"time.time",
"random.randint",
"pygame.transform.scale"
] |
[((83, 108), 'numpy.zeros', 'np.zeros', ([], {'shape': '(128, 64)'}), '(shape=(128, 64))\n', (91, 108), True, 'import numpy as np\n'), ((264, 281), 'pygame.display.init', 'pg.display.init', ([], {}), '()\n', (279, 281), True, 'import pygame as pg\n'), ((292, 354), 'pygame.display.set_mode', 'pg.display.set_mode', (['(width * 4, height * 4)'], {'flags': '(0)', 'depth': '(8)'}), '((width * 4, height * 4), flags=0, depth=8)\n', (311, 354), True, 'import pygame as pg\n'), ((360, 405), 'pygame.Surface', 'pg.Surface', (['(width, height)'], {'flags': '(0)', 'depth': '(8)'}), '((width, height), flags=0, depth=8)\n', (370, 405), True, 'import pygame as pg\n'), ((406, 466), 'pygame.transform.scale', 'pg.transform.scale', (['screen', '(width * 4, height * 4)', 'display'], {}), '(screen, (width * 4, height * 4), display)\n', (424, 466), True, 'import pygame as pg\n'), ((10272, 10283), 'time.time', 'time.time', ([], {}), '()\n', (10281, 10283), False, 'import time\n'), ((500, 560), 'pygame.transform.scale', 'pg.transform.scale', (['screen', '(width * 4, height * 4)', 'display'], {}), '(screen, (width * 4, height * 4), display)\n', (518, 560), True, 'import pygame as pg\n'), ((561, 578), 'pygame.display.flip', 'pg.display.flip', ([], {}), '()\n', (576, 578), True, 'import pygame as pg\n'), ((10587, 10601), 'pygame.event.get', 'pg.event.get', ([], {}), '()\n', (10599, 10601), True, 'import pygame as pg\n'), ((2225, 2247), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (2239, 2247), False, 'import random\n'), ((2317, 2335), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2333, 2335), False, 'import sys\n'), ((10446, 10457), 'time.time', 'time.time', ([], {}), '()\n', (10455, 10457), False, 'import time\n'), ((10530, 10541), 'time.time', 'time.time', ([], {}), '()\n', (10539, 10541), False, 'import time\n'), ((1091, 1118), 'pygame.Rect', 'pg.Rect', (['x_pos', 'y_pos', '(1)', '(1)'], {}), '(x_pos, y_pos, 1, 1)\n', (1098, 1118), True, 'import pygame as pg\n'), ((1183, 1210), 'pygame.Rect', 'pg.Rect', (['x_pos', 'y_pos', '(1)', '(1)'], {}), '(x_pos, y_pos, 1, 1)\n', (1190, 1210), True, 'import pygame as pg\n'), ((18032, 18056), 'sys.stdin.buffer.read', 'sys.stdin.buffer.read', (['(1)'], {}), '(1)\n', (18053, 18056), False, 'import sys\n')]
|
#Import modules
import os
import pandas as pd
import numpy as np
from pandas import DatetimeIndex
import dask
import scipy
import time
import glob
import torch
import torch.nn as nn
from live_plotter import live_plotter
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
from functools import partial
from abc import ABCMeta, abstractmethod
import plottingTools
import pytorchModel
import loadData
class pytorchFwdModel(pytorchModel.pytorchModel) :
#######################################################################################################
#Construction functions
#######################################################################################################
def __init__(self,
learningRate,
hyperParameters,
nbUnitsPerLayer,
nbFactors,
modelName = "./bestPyTorchFwdModel"):
super().__init__(learningRate, hyperParameters, nbUnitsPerLayer, nbFactors,
modelName = modelName)
def buildModel(self):
self.fe = pytorchModel.Functional_encoder(self.nbFactors + 1) #Neural network architecture
return
#######################################################################################################
#Evaluation functions
#######################################################################################################
def evalBatch(self, batch, code):
batchLogMoneyness = self.getLogMoneyness(batch)
scaledMoneyness = (batchLogMoneyness.values - self.MeanLogMoneyness) / self.StdLogMoneyness
logMoneynessTensor = torch.Tensor(np.expand_dims(scaledMoneyness, 1)).float() #Log moneyness
# for j in np.random.choice(len(test[k]), 10):
# filt = test[k].nBizDays >= 10
batchLogMat = self.getLogMaturities(batch)
scaledMat = (batchLogMat.values - self.MeanLogMaturity) / self.StdLogMaturity
logMaturity = torch.tensor( np.expand_dims(scaledMat, 1) , requires_grad=True).float()
scaledFwd = (batch[2].values - self.MeanFwd) / self.StdFwd
fwdTensor = torch.tensor( np.expand_dims(scaledFwd, 1) ).float()
codeTensor = code.repeat(batch[0].shape[0], 1).float()
refVol = torch.tensor(batch[0].values)
inputTensor = torch.cat((logMoneynessTensor, logMaturity, fwdTensor, codeTensor), dim=1)
outputTensor = self.fe( inputTensor )[:, 0]
loss = torch.mean( (outputTensor - refVol)[~torch.isnan(outputTensor)] ** 2 )#torch.nanmean( (outputTensor - refVol) ** 2 )
return inputTensor, outputTensor, loss, logMaturity, codeTensor, logMoneynessTensor
def commonEvalSingleDayWithoutCalibration(self,
initialValueForFactors,
dataSetList,
computeSensi = False):
#Rebuild tensor graph
self.restoringGraph()
#Build tensor for reconstruction
nbObs = 1 if initialValueForFactors.ndim == 1 else initialValueForFactors.shape[0]
nbPoints = dataSetList[1].shape[0] if dataSetList[1].ndim == 1 else dataSetList[1].shape[1]
nbFactors = self.nbFactors
reshapedValueForFactors = np.reshape([initialValueForFactors],
(nbObs,nbFactors))
self.code = pytorchModel.Code(nbObs, self.nbFactors, initialValue = reshapedValueForFactors) #Latent variables
codeTensor = self.code.code[k, :].repeat(nbPoints, 1)
batchLogMoneyness = self.getLogMoneyness(dataSetList)
scaledMoneyness = (batchLogMoneyness.values - self.MeanLogMoneyness) / self.StdLogMoneyness
logMoneynessTensor = torch.Tensor(np.expand_dims(scaledMoneyness.values, 1)).float() #Log moneyness
scaledFwd = (dataSetList[2].values - self.MeanFwd) / self.StdFwd
fwdTensor = torch.tensor( np.expand_dims(scaledFwd, 1) ).float()
# for j in np.random.choice(len(test[k]), 10):
# filt = test[k].nBizDays >= 10
batchLogMat = self.getLogMaturities(dataSetList)
scaledMat = (batchLogMat.values - self.MeanLogMaturity) / self.StdLogMaturity
logMaturity = torch.tensor( np.expand_dims(scaledMat, 1) ).float()
inputTensor = torch.cat((logMoneynessTensor, logMaturity, fwdTensor, codeTensor), dim=1)
outputTensor = self.fe( inputTensor )[:, 0]
self.restoreWeights()
#Build tensor for reconstruction
# print("nbPoints : ", nbPoints)
# print("initialValueForFactors : ", initialValueForFactors)
# print("inputFeatures : ", inputFeatures)
# print("outputFeatures : ", outputFeatures)
# print("outputTensor : ", self.outputTensor)
reconstructedSurface = outputTensor.detach().numpy().reshape(batch[0].shape)
inputTensor = torch.cat((strikes, logMaturity, codeTensor), dim=1)
#if computeSensi :
# inputTensor.requires_grad = True
outputTensor = self.fe( inputTensor )[:, 0]
reshapedJacobian = None
if computeSensi :
reshapedJacobian = np.ones((nbObs, nbPoints, nbFactors)) if initialValueForFactors.ndim != 1 else np.ones((nbPoints, nbFactors))
#for p in range(nbPoints) :
# output.backward()
# jacobian = input.grad.data
# reshapedJacobian = tf.reshape(jacobian, shape = [nbObs, nbPoints, nbFactors])
# if self.verbose :
# print(reshapedJacobian)
calibratedSurfaces = outputTensor
factorSensi = None
if initialValueForFactors.ndim == 1 :
calibratedSurfaces = np.reshape(reconstructedSurface, (nbPoints))
if reshapedJacobian is not None :
factorSensi = np.reshape(reshapedJacobian, (nbPoints, nbFactors))
elif initialValueForFactors.ndim == 2 :
calibratedSurfaces = np.reshape(reconstructedSurface, (nbObs,nbPoints))
if reshapedJacobian is not None :
factorSensi = np.reshape(reshapedJacobian, (nbObs, nbPoints, nbFactors))
return calibratedSurfaces, factorSensi
|
[
"pytorchModel.Functional_encoder",
"numpy.reshape",
"numpy.ones",
"torch.tensor",
"pytorchModel.Code",
"numpy.expand_dims",
"torch.isnan",
"torch.cat"
] |
[((1146, 1197), 'pytorchModel.Functional_encoder', 'pytorchModel.Functional_encoder', (['(self.nbFactors + 1)'], {}), '(self.nbFactors + 1)\n', (1177, 1197), False, 'import pytorchModel\n'), ((2399, 2428), 'torch.tensor', 'torch.tensor', (['batch[0].values'], {}), '(batch[0].values)\n', (2411, 2428), False, 'import torch\n'), ((2462, 2536), 'torch.cat', 'torch.cat', (['(logMoneynessTensor, logMaturity, fwdTensor, codeTensor)'], {'dim': '(1)'}), '((logMoneynessTensor, logMaturity, fwdTensor, codeTensor), dim=1)\n', (2471, 2536), False, 'import torch\n'), ((3487, 3543), 'numpy.reshape', 'np.reshape', (['[initialValueForFactors]', '(nbObs, nbFactors)'], {}), '([initialValueForFactors], (nbObs, nbFactors))\n', (3497, 3543), True, 'import numpy as np\n'), ((3630, 3708), 'pytorchModel.Code', 'pytorchModel.Code', (['nbObs', 'self.nbFactors'], {'initialValue': 'reshapedValueForFactors'}), '(nbObs, self.nbFactors, initialValue=reshapedValueForFactors)\n', (3647, 3708), False, 'import pytorchModel\n'), ((4601, 4675), 'torch.cat', 'torch.cat', (['(logMoneynessTensor, logMaturity, fwdTensor, codeTensor)'], {'dim': '(1)'}), '((logMoneynessTensor, logMaturity, fwdTensor, codeTensor), dim=1)\n', (4610, 4675), False, 'import torch\n'), ((5214, 5266), 'torch.cat', 'torch.cat', (['(strikes, logMaturity, codeTensor)'], {'dim': '(1)'}), '((strikes, logMaturity, codeTensor), dim=1)\n', (5223, 5266), False, 'import torch\n'), ((6094, 6136), 'numpy.reshape', 'np.reshape', (['reconstructedSurface', 'nbPoints'], {}), '(reconstructedSurface, nbPoints)\n', (6104, 6136), True, 'import numpy as np\n'), ((5507, 5544), 'numpy.ones', 'np.ones', (['(nbObs, nbPoints, nbFactors)'], {}), '((nbObs, nbPoints, nbFactors))\n', (5514, 5544), True, 'import numpy as np\n'), ((5586, 5616), 'numpy.ones', 'np.ones', (['(nbPoints, nbFactors)'], {}), '((nbPoints, nbFactors))\n', (5593, 5616), True, 'import numpy as np\n'), ((6217, 6268), 'numpy.reshape', 'np.reshape', (['reshapedJacobian', '(nbPoints, nbFactors)'], {}), '(reshapedJacobian, (nbPoints, nbFactors))\n', (6227, 6268), True, 'import numpy as np\n'), ((6352, 6403), 'numpy.reshape', 'np.reshape', (['reconstructedSurface', '(nbObs, nbPoints)'], {}), '(reconstructedSurface, (nbObs, nbPoints))\n', (6362, 6403), True, 'import numpy as np\n'), ((1750, 1784), 'numpy.expand_dims', 'np.expand_dims', (['scaledMoneyness', '(1)'], {}), '(scaledMoneyness, 1)\n', (1764, 1784), True, 'import numpy as np\n'), ((2093, 2121), 'numpy.expand_dims', 'np.expand_dims', (['scaledMat', '(1)'], {}), '(scaledMat, 1)\n', (2107, 2121), True, 'import numpy as np\n'), ((2267, 2295), 'numpy.expand_dims', 'np.expand_dims', (['scaledFwd', '(1)'], {}), '(scaledFwd, 1)\n', (2281, 2295), True, 'import numpy as np\n'), ((4009, 4050), 'numpy.expand_dims', 'np.expand_dims', (['scaledMoneyness.values', '(1)'], {}), '(scaledMoneyness.values, 1)\n', (4023, 4050), True, 'import numpy as np\n'), ((4194, 4222), 'numpy.expand_dims', 'np.expand_dims', (['scaledFwd', '(1)'], {}), '(scaledFwd, 1)\n', (4208, 4222), True, 'import numpy as np\n'), ((4528, 4556), 'numpy.expand_dims', 'np.expand_dims', (['scaledMat', '(1)'], {}), '(scaledMat, 1)\n', (4542, 4556), True, 'import numpy as np\n'), ((6481, 6539), 'numpy.reshape', 'np.reshape', (['reshapedJacobian', '(nbObs, nbPoints, nbFactors)'], {}), '(reshapedJacobian, (nbObs, nbPoints, nbFactors))\n', (6491, 6539), True, 'import numpy as np\n'), ((2653, 2678), 'torch.isnan', 'torch.isnan', (['outputTensor'], {}), '(outputTensor)\n', (2664, 2678), False, 'import torch\n')]
|
import pytest
import numpy as np
import itertools
from numpy.testing import assert_allclose
from keras_contrib.utils.test_utils import layer_test, keras_test
from keras.utils.conv_utils import conv_input_length
from keras import backend as K
from keras_contrib import backend as KC
from keras_contrib.layers import convolutional, pooling
from keras.models import Sequential
# TensorFlow does not support full convolution.
if K.backend() == 'theano':
_convolution_border_modes = ['valid', 'same']
else:
_convolution_border_modes = ['valid', 'same']
@keras_test
def test_cosineconvolution_2d():
num_samples = 2
num_filter = 2
stack_size = 3
num_row = 10
num_col = 6
if K.backend() == 'theano':
data_format = 'channels_first'
elif K.backend() == 'tensorflow':
data_format = 'channels_last'
for border_mode in _convolution_border_modes:
for subsample in [(1, 1), (2, 2)]:
for use_bias_mode in [True, False]:
if border_mode == 'same' and subsample != (1, 1):
continue
layer_test(convolutional.CosineConvolution2D,
kwargs={'filters': num_filter,
'kernel_size': (3, 3),
'padding': border_mode,
'strides': subsample,
'use_bias': use_bias_mode,
'data_format': data_format},
input_shape=(num_samples, num_row, num_col, stack_size))
layer_test(convolutional.CosineConvolution2D,
kwargs={'filters': num_filter,
'kernel_size': (3, 3),
'padding': border_mode,
'strides': subsample,
'use_bias': use_bias_mode,
'data_format': data_format,
'kernel_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2'},
input_shape=(num_samples, num_row, num_col, stack_size))
if data_format == 'channels_first':
X = np.random.randn(1, 3, 5, 5)
input_dim = (3, 5, 5)
W0 = X[:, :, ::-1, ::-1]
elif data_format == 'channels_last':
X = np.random.randn(1, 5, 5, 3)
input_dim = (5, 5, 3)
W0 = X[0, :, :, :, None]
model = Sequential()
model.add(convolutional.CosineConvolution2D(1, (5, 5), use_bias=True,
input_shape=input_dim,
data_format=data_format))
model.compile(loss='mse', optimizer='rmsprop')
W = model.get_weights()
W[0] = W0
W[1] = np.asarray([1.])
model.set_weights(W)
out = model.predict(X)
assert_allclose(out, np.ones((1, 1, 1, 1), dtype=K.floatx()), atol=1e-5)
model = Sequential()
model.add(convolutional.CosineConvolution2D(1, (5, 5), use_bias=False,
input_shape=input_dim,
data_format=data_format))
model.compile(loss='mse', optimizer='rmsprop')
W = model.get_weights()
W[0] = -2 * W0
model.set_weights(W)
out = model.predict(X)
assert_allclose(out, -np.ones((1, 1, 1, 1), dtype=K.floatx()), atol=1e-5)
@keras_test
def test_sub_pixel_upscaling():
num_samples = 2
num_row = 16
num_col = 16
input_dtype = K.floatx()
for scale_factor in [2, 3, 4]:
input_data = np.random.random((num_samples, 4 * (scale_factor ** 2), num_row, num_col))
input_data = input_data.astype(input_dtype)
if K.image_data_format() == 'channels_last':
input_data = input_data.transpose((0, 2, 3, 1))
input_tensor = K.variable(input_data)
expected_output = K.eval(KC.depth_to_space(input_tensor,
scale=scale_factor))
layer_test(convolutional.SubPixelUpscaling,
kwargs={'scale_factor': scale_factor},
input_data=input_data,
expected_output=expected_output,
expected_output_dtype=K.floatx())
if __name__ == '__main__':
pytest.main([__file__])
|
[
"keras.backend.image_data_format",
"numpy.random.random",
"numpy.asarray",
"keras_contrib.utils.test_utils.layer_test",
"keras.backend.floatx",
"pytest.main",
"keras.models.Sequential",
"keras_contrib.layers.convolutional.CosineConvolution2D",
"keras_contrib.backend.depth_to_space",
"keras.backend.variable",
"keras.backend.backend",
"numpy.random.randn"
] |
[((427, 438), 'keras.backend.backend', 'K.backend', ([], {}), '()\n', (436, 438), True, 'from keras import backend as K\n'), ((2581, 2593), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2591, 2593), False, 'from keras.models import Sequential\n'), ((2917, 2934), 'numpy.asarray', 'np.asarray', (['[1.0]'], {}), '([1.0])\n', (2927, 2934), True, 'import numpy as np\n'), ((3076, 3088), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3086, 3088), False, 'from keras.models import Sequential\n'), ((3655, 3665), 'keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (3663, 3665), True, 'from keras import backend as K\n'), ((4439, 4462), 'pytest.main', 'pytest.main', (['[__file__]'], {}), '([__file__])\n', (4450, 4462), False, 'import pytest\n'), ((704, 715), 'keras.backend.backend', 'K.backend', ([], {}), '()\n', (713, 715), True, 'from keras import backend as K\n'), ((2333, 2360), 'numpy.random.randn', 'np.random.randn', (['(1)', '(3)', '(5)', '(5)'], {}), '(1, 3, 5, 5)\n', (2348, 2360), True, 'import numpy as np\n'), ((2608, 2720), 'keras_contrib.layers.convolutional.CosineConvolution2D', 'convolutional.CosineConvolution2D', (['(1)', '(5, 5)'], {'use_bias': '(True)', 'input_shape': 'input_dim', 'data_format': 'data_format'}), '(1, (5, 5), use_bias=True, input_shape=\n input_dim, data_format=data_format)\n', (2641, 2720), False, 'from keras_contrib.layers import convolutional, pooling\n'), ((3103, 3216), 'keras_contrib.layers.convolutional.CosineConvolution2D', 'convolutional.CosineConvolution2D', (['(1)', '(5, 5)'], {'use_bias': '(False)', 'input_shape': 'input_dim', 'data_format': 'data_format'}), '(1, (5, 5), use_bias=False, input_shape=\n input_dim, data_format=data_format)\n', (3136, 3216), False, 'from keras_contrib.layers import convolutional, pooling\n'), ((3723, 3795), 'numpy.random.random', 'np.random.random', (['(num_samples, 4 * scale_factor ** 2, num_row, num_col)'], {}), '((num_samples, 4 * scale_factor ** 2, num_row, num_col))\n', (3739, 3795), True, 'import numpy as np\n'), ((3988, 4010), 'keras.backend.variable', 'K.variable', (['input_data'], {}), '(input_data)\n', (3998, 4010), True, 'from keras import backend as K\n'), ((777, 788), 'keras.backend.backend', 'K.backend', ([], {}), '()\n', (786, 788), True, 'from keras import backend as K\n'), ((2477, 2504), 'numpy.random.randn', 'np.random.randn', (['(1)', '(5)', '(5)', '(3)'], {}), '(1, 5, 5, 3)\n', (2492, 2504), True, 'import numpy as np\n'), ((3862, 3883), 'keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (3881, 3883), True, 'from keras import backend as K\n'), ((4044, 4095), 'keras_contrib.backend.depth_to_space', 'KC.depth_to_space', (['input_tensor'], {'scale': 'scale_factor'}), '(input_tensor, scale=scale_factor)\n', (4061, 4095), True, 'from keras_contrib import backend as KC\n'), ((1098, 1369), 'keras_contrib.utils.test_utils.layer_test', 'layer_test', (['convolutional.CosineConvolution2D'], {'kwargs': "{'filters': num_filter, 'kernel_size': (3, 3), 'padding': border_mode,\n 'strides': subsample, 'use_bias': use_bias_mode, 'data_format': data_format\n }", 'input_shape': '(num_samples, num_row, num_col, stack_size)'}), "(convolutional.CosineConvolution2D, kwargs={'filters': num_filter,\n 'kernel_size': (3, 3), 'padding': border_mode, 'strides': subsample,\n 'use_bias': use_bias_mode, 'data_format': data_format}, input_shape=(\n num_samples, num_row, num_col, stack_size))\n", (1108, 1369), False, 'from keras_contrib.utils.test_utils import layer_test, keras_test\n'), ((1603, 1965), 'keras_contrib.utils.test_utils.layer_test', 'layer_test', (['convolutional.CosineConvolution2D'], {'kwargs': "{'filters': num_filter, 'kernel_size': (3, 3), 'padding': border_mode,\n 'strides': subsample, 'use_bias': use_bias_mode, 'data_format':\n data_format, 'kernel_regularizer': 'l2', 'bias_regularizer': 'l2',\n 'activity_regularizer': 'l2'}", 'input_shape': '(num_samples, num_row, num_col, stack_size)'}), "(convolutional.CosineConvolution2D, kwargs={'filters': num_filter,\n 'kernel_size': (3, 3), 'padding': border_mode, 'strides': subsample,\n 'use_bias': use_bias_mode, 'data_format': data_format,\n 'kernel_regularizer': 'l2', 'bias_regularizer': 'l2',\n 'activity_regularizer': 'l2'}, input_shape=(num_samples, num_row,\n num_col, stack_size))\n", (1613, 1965), False, 'from keras_contrib.utils.test_utils import layer_test, keras_test\n'), ((3039, 3049), 'keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (3047, 3049), True, 'from keras import backend as K\n'), ((4394, 4404), 'keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (4402, 4404), True, 'from keras import backend as K\n'), ((3513, 3523), 'keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (3521, 3523), True, 'from keras import backend as K\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import cv2
import time
def getTransformMatrix(origin, destination):
x = np.zeros(origin.shape[0] + 1) # insert [0]-element for better indexing -> x[1] = first element
x[1:] = origin[:,0]
y = np.copy(x)
y[1:] = origin[:,1]
x_ = np.copy(x)
x_[1:] = destination[:,0]
y_ = np.copy(x)
y_[1:] = destination[:,1]
a11 = (y[1] * (x_[2] - x_[3]) + y[2] * (x_[3] - x_[1]) + y[3] * (x_[1] - x_[2]))
a12 = (x[1] * (x_[3] - x_[2]) + x[2] * (x_[1] - x_[3]) + x[3] * (x_[2] - x_[1]))
a21 = (y[1] * (y_[2] - y_[3]) + y[2] * (y_[3] - y_[1]) + y[3] * (y_[1] - y_[2]))
a22 = (x[1] * (y_[3] - y_[2]) + x[2] * (y_[1] - y_[3]) + x[3] * (y_[2] - y_[1]))
a13 = (x[1] * (y[3]*x_[2] - y[2]*x_[3]) + x[2] * (y[1]*x_[3] - y[3]*x_[1]) + x[3] * (y[2]*x_[1] - y[1]*x_[2]))
a23 = (x[1] * (y[3]*y_[2] - y[2]*y_[3]) + x[2] * (y[1]*y_[3] - y[3]*y_[1]) + x[3] * (y[2]*y_[1] - y[1]*y_[2]))
d = x[1]*(y[3] - y[2]) + x[2]*(y[1] - y[3]) + x[3]*(y[2] - y[1])
return 1/d * np.array([[a11, a12, a13], [a21, a22, a23], [0, 0, 1]])
def transformImage(image, M):
warpedImage = np.zeros(image.shape, dtype=np.int32)
for y, row in enumerate(image):
for x, value in enumerate(row):
newX, newY, _ = np.dot(M, np.array([x,y,1]))
cond1 = newY < warpedImage.shape[0] and newX < warpedImage.shape[1]
cond2 = newY > 0 and newX > 0
if cond1 and cond2:
warpedImage[int(newY)][int(newX)] = value
return warpedImage
def interpolateMissingPixels(image):
#interpImage = np.zeros(image.shape, dtype=np.int32)
interpImage = np.array(image)
for y in range(1, len(image) - 1):
row = interpImage[y]
for x in range(1, len(row) - 1):
if row[x].all() == 0: # empty pixel
windowPixels = interpImage[y-1:y+2, x-1:x+2] # [rgb], [rgb], [rgb]
# if windowPixels.sum() == 0:
# continue
newPixel = np.array([0,0,0])
for channel in range(3): # interpolate rgb
channelValues = windowPixels[:, :, channel]
temp = channelValues != 0
meancount = temp.sum()
newPixel[channel] = channelValues.sum() / meancount if meancount != 0 else 0
interpImage[y][x] = newPixel
return interpImage
def main():
origin = np.array([[50, 50], [50, 100], [100, 50]])
destination = np.array([[50, 100], [100, 250], [150, 50]])
m = getTransformMatrix(origin, destination)
image = plt.imread("scarlet.jpg")[100:400, 100:400]
warpedImage = transformImage(image, m)
interpImage = interpolateMissingPixels(warpedImage)
fig, ax = plt.subplots(1,3)
ax[0].imshow(image)
ax[1].imshow(warpedImage)
ax[2].imshow(interpImage)
plt.show()
if __name__ == "__main__":
main()
|
[
"numpy.copy",
"matplotlib.pyplot.imread",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] |
[((129, 158), 'numpy.zeros', 'np.zeros', (['(origin.shape[0] + 1)'], {}), '(origin.shape[0] + 1)\n', (137, 158), True, 'import numpy as np\n'), ((256, 266), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (263, 266), True, 'import numpy as np\n'), ((301, 311), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (308, 311), True, 'import numpy as np\n'), ((351, 361), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (358, 361), True, 'import numpy as np\n'), ((1278, 1315), 'numpy.zeros', 'np.zeros', (['image.shape'], {'dtype': 'np.int32'}), '(image.shape, dtype=np.int32)\n', (1286, 1315), True, 'import numpy as np\n'), ((1797, 1812), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (1805, 1812), True, 'import numpy as np\n'), ((2584, 2626), 'numpy.array', 'np.array', (['[[50, 50], [50, 100], [100, 50]]'], {}), '([[50, 50], [50, 100], [100, 50]])\n', (2592, 2626), True, 'import numpy as np\n'), ((2646, 2690), 'numpy.array', 'np.array', (['[[50, 100], [100, 250], [150, 50]]'], {}), '([[50, 100], [100, 250], [150, 50]])\n', (2654, 2690), True, 'import numpy as np\n'), ((2913, 2931), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {}), '(1, 3)\n', (2925, 2931), True, 'import matplotlib.pyplot as plt\n'), ((3019, 3029), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3027, 3029), True, 'import matplotlib.pyplot as plt\n'), ((1173, 1228), 'numpy.array', 'np.array', (['[[a11, a12, a13], [a21, a22, a23], [0, 0, 1]]'], {}), '([[a11, a12, a13], [a21, a22, a23], [0, 0, 1]])\n', (1181, 1228), True, 'import numpy as np\n'), ((2754, 2779), 'matplotlib.pyplot.imread', 'plt.imread', (['"""scarlet.jpg"""'], {}), "('scarlet.jpg')\n", (2764, 2779), True, 'import matplotlib.pyplot as plt\n'), ((1430, 1449), 'numpy.array', 'np.array', (['[x, y, 1]'], {}), '([x, y, 1])\n', (1438, 1449), True, 'import numpy as np\n'), ((2158, 2177), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (2166, 2177), True, 'import numpy as np\n')]
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import oneflow as flow
import onnxruntime as ort
import onnx
from collections import OrderedDict
import tempfile
import os
import shutil
def convert_to_onnx_and_check(
job_func,
print_outlier=False,
explicit_init=True,
external_data=False,
ort_optimize=True,
opset=None,
):
check_point = flow.train.CheckPoint()
if explicit_init:
# it is a trick to keep check_point.save() from hanging when there is no variable
@flow.global_function(flow.FunctionConfig())
def add_var():
return flow.get_variable(
name="trick",
shape=(1,),
dtype=flow.float,
initializer=flow.random_uniform_initializer(),
)
check_point.init()
flow_weight_dir = tempfile.TemporaryDirectory()
check_point.save(flow_weight_dir.name)
# TODO(daquexian): a more elegant way?
while not os.path.exists(os.path.join(flow_weight_dir.name, "snapshot_done")):
pass
onnx_model_dir = tempfile.TemporaryDirectory()
onnx_model_path = os.path.join(onnx_model_dir.name, "model.onnx")
flow.onnx.export(
job_func,
flow_weight_dir.name,
onnx_model_path,
opset=opset,
external_data=external_data,
)
flow_weight_dir.cleanup()
ort_sess_opt = ort.SessionOptions()
ort_sess_opt.graph_optimization_level = (
ort.GraphOptimizationLevel.ORT_ENABLE_EXTENDED
if ort_optimize
else ort.GraphOptimizationLevel.ORT_DISABLE_ALL
)
sess = ort.InferenceSession(onnx_model_path, sess_options=ort_sess_opt)
onnx_model_dir.cleanup()
assert len(sess.get_outputs()) == 1
assert len(sess.get_inputs()) <= 1
ipt_dict = OrderedDict()
for ipt in sess.get_inputs():
ipt_data = np.random.uniform(low=-10, high=10, size=ipt.shape).astype(
np.float32
)
ipt_dict[ipt.name] = ipt_data
onnx_res = sess.run([], ipt_dict)[0]
oneflow_res = job_func(*ipt_dict.values()).get().numpy()
rtol, atol = 1e-2, 1e-5
if print_outlier:
a = onnx_res.flatten()
b = oneflow_res.flatten()
for i in range(len(a)):
if np.abs(a[i] - b[i]) > atol + rtol * np.abs(b[i]):
print("a[{}]={}, b[{}]={}".format(i, a[i], i, b[i]))
assert np.allclose(onnx_res, oneflow_res, rtol=rtol, atol=atol)
|
[
"tempfile.TemporaryDirectory",
"collections.OrderedDict",
"numpy.allclose",
"onnxruntime.SessionOptions",
"oneflow.FunctionConfig",
"numpy.abs",
"os.path.join",
"onnxruntime.InferenceSession",
"oneflow.random_uniform_initializer",
"oneflow.train.CheckPoint",
"numpy.random.uniform",
"oneflow.onnx.export"
] |
[((927, 950), 'oneflow.train.CheckPoint', 'flow.train.CheckPoint', ([], {}), '()\n', (948, 950), True, 'import oneflow as flow\n'), ((1396, 1425), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (1423, 1425), False, 'import tempfile\n'), ((1629, 1658), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (1656, 1658), False, 'import tempfile\n'), ((1681, 1728), 'os.path.join', 'os.path.join', (['onnx_model_dir.name', '"""model.onnx"""'], {}), "(onnx_model_dir.name, 'model.onnx')\n", (1693, 1728), False, 'import os\n'), ((1733, 1845), 'oneflow.onnx.export', 'flow.onnx.export', (['job_func', 'flow_weight_dir.name', 'onnx_model_path'], {'opset': 'opset', 'external_data': 'external_data'}), '(job_func, flow_weight_dir.name, onnx_model_path, opset=\n opset, external_data=external_data)\n', (1749, 1845), True, 'import oneflow as flow\n'), ((1937, 1957), 'onnxruntime.SessionOptions', 'ort.SessionOptions', ([], {}), '()\n', (1955, 1957), True, 'import onnxruntime as ort\n'), ((2156, 2220), 'onnxruntime.InferenceSession', 'ort.InferenceSession', (['onnx_model_path'], {'sess_options': 'ort_sess_opt'}), '(onnx_model_path, sess_options=ort_sess_opt)\n', (2176, 2220), True, 'import onnxruntime as ort\n'), ((2344, 2357), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2355, 2357), False, 'from collections import OrderedDict\n'), ((2937, 2993), 'numpy.allclose', 'np.allclose', (['onnx_res', 'oneflow_res'], {'rtol': 'rtol', 'atol': 'atol'}), '(onnx_res, oneflow_res, rtol=rtol, atol=atol)\n', (2948, 2993), True, 'import numpy as np\n'), ((1093, 1114), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (1112, 1114), True, 'import oneflow as flow\n'), ((1541, 1592), 'os.path.join', 'os.path.join', (['flow_weight_dir.name', '"""snapshot_done"""'], {}), "(flow_weight_dir.name, 'snapshot_done')\n", (1553, 1592), False, 'import os\n'), ((2411, 2462), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-10)', 'high': '(10)', 'size': 'ipt.shape'}), '(low=-10, high=10, size=ipt.shape)\n', (2428, 2462), True, 'import numpy as np\n'), ((2807, 2826), 'numpy.abs', 'np.abs', (['(a[i] - b[i])'], {}), '(a[i] - b[i])\n', (2813, 2826), True, 'import numpy as np\n'), ((1297, 1330), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {}), '()\n', (1328, 1330), True, 'import oneflow as flow\n'), ((2843, 2855), 'numpy.abs', 'np.abs', (['b[i]'], {}), '(b[i])\n', (2849, 2855), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# Copyright © 2018 PyHelp Project Contributors
# https://github.com/jnsebgosselin/pyhelp
#
# This file is part of PyHelp.
# Licensed under the terms of the GNU General Public License.
# ---- Standard Library Imports
import os
import os.path as osp
# ---- Third Party imports
import numpy as np
import geopandas as gpd
import netCDF4
import pandas as pd
# ---- Local Libraries Imports
from pyhelp.preprocessing import write_d10d11_allcells, format_d10d11_inputs
from pyhelp.processing import run_help_allcells
from pyhelp.utils import savedata_to_hdf5
from pyhelp.weather_reader import (
save_precip_to_HELP, save_airtemp_to_HELP, save_solrad_to_HELP,
read_cweeds_file, join_daily_cweeds_wy2_and_wy3)
FNAME_CONN_TABLES = 'connect_table.npy'
class HELPManager(object):
def __init__(self, workdir, year_range, path_togrid=None):
super(HELPManager, self).__init__()
self.year_range = year_range
self.set_workdir(workdir)
self._setup_connect_tables()
if path_togrid is not None:
self.load_grid(path_togrid)
else:
self.grid = None
@property
def cellnames(self):
if self.grid is not None:
return self.grid['cid'].tolist()
else:
return []
@property
def inputdir(self):
"""
Return the path to the folder where the HELP input files are going to
be saved in the working directory. This folder is created in case it
doesn't already exist in the file system.
"""
inputdir = osp.join(self.workdir, 'help_input_files')
if not osp.exists(inputdir):
os.makedirs(inputdir)
return inputdir
@property
def workdir(self):
"""Return the path to the current working directory."""
return os.getcwd()
def set_workdir(self, dirname):
"""Set the working directory of the manager."""
if not osp.exists(dirname):
os.makedirs(dirname)
os.chdir(dirname)
# ---- Connect tables
@property
def path_connect_tables(self):
return osp.join(self.inputdir, FNAME_CONN_TABLES)
def _setup_connect_tables(self):
"""Setup the connect tables dictionary."""
if osp.exists(self.path_connect_tables):
self.connect_tables = np.load(self.path_connect_tables).item()
else:
self.connect_tables = {}
def _save_connect_tables(self):
"""Save the connect tables dictionary to a numpy binary file."""
np.save(self.path_connect_tables, self.connect_tables)
# ---- HELP grid
def load_grid(self, path_togrid):
"""
Load the grid that contains the infos required to evaluate regional
groundwater recharge with HELP.
"""
self.grid = load_grid_from_csv(path_togrid)
return self.grid
# ---- Input files creation
def generate_d13_from_cweeds(self, d13fname, fpath_cweed2, fpath_cweed3,
cellnames=None):
"""
Generate the HELP D13 input file for solar radiation from wy2 and
wy3 CWEEDS files at a given location.
"""
d13fpath = osp.join(self.inputdir, d13fname)
if cellnames is None:
cellnames = self.cellnames
else:
# Keep only the cells that are in the grid.
cellnames = self.grid['cid'][self.grid['cid'].isin(cellnames)]
print('Reading CWEEDS files...', end=' ')
daily_wy2 = read_cweeds_file(fpath_cweed2, format_to_daily=True)
daily_wy3 = read_cweeds_file(fpath_cweed3, format_to_daily=True)
wy23_df = join_daily_cweeds_wy2_and_wy3(daily_wy2, daily_wy3)
indexes = np.where((wy23_df['Years'] >= self.year_range[0]) &
(wy23_df['Years'] <= self.year_range[1]))[0]
print('done')
print('Generating HELP D13 file for solar radiation...', end=' ')
save_solrad_to_HELP(d13fpath,
wy23_df['Years'][indexes],
wy23_df['Irradiance'][indexes],
'CAN_QC_MONTREAL-INTL-A_7025251',
wy23_df['Latitude'])
print('done')
if self.year_range[1] > np.max(wy23_df['Years']):
print("Warning: there is no solar radiation data after year %d."
% np.max(wy23_df['Years']))
if self.year_range[0] < np.min(wy23_df['Years']):
print("Warning: there is no solar radiation data before year %d."
% np.min(wy23_df['Years']))
# Update the connection table.
print("\rUpdating the connection table...", end=' ')
d13_connect_table = {cid: d13fpath for cid in cellnames}
self.connect_tables['D13'] = d13_connect_table
self._save_connect_tables()
print("done")
def generate_d10d11_input_files(self, cellnames=None, sf_edepth=1,
sf_ulai=1):
"""Prepare the D10 and D11 input datafiles for each cell."""
d10d11_inputdir = osp.join(self.inputdir, 'd10d11_input_files')
if not osp.exists(d10d11_inputdir):
os.makedirs(d10d11_inputdir)
# Only keep the cells that are going to be run in HELP because we
# don't need the D10 or D11 input files for those that aren't.
cellnames = self.get_run_cellnames(cellnames)
d10data, d11data = format_d10d11_inputs(self.grid, cellnames,
sf_edepth, sf_ulai)
# Write the D10 and D11 input files.
d10_conn_tbl, d11_conn_tbl = write_d10d11_allcells(
d10d11_inputdir, d10data, d11data)
# Update the connection table.
print("\rUpdating the connection table...", end=' ')
self.connect_tables['D10'] = d10_conn_tbl
self.connect_tables['D11'] = d11_conn_tbl
self._save_connect_tables()
print("done")
def generate_d4d7_from_MDELCC_grid(self, path_netcdf_dir, cellnames=None):
"""
Prepare the D4 and D7 input datafiles for each cell from the
interpolated grid of the MDDELCC.
"""
d4d7_inputdir = osp.join(self.inputdir, 'd4d7_input_files')
if not osp.exists(d4d7_inputdir):
os.makedirs(d4d7_inputdir)
cellnames = self.get_run_cellnames(cellnames)
N = len(cellnames)
# Get the latitudes and longitudes of the resulting cells.
lat_dd, lon_dd = self.get_latlon_for_cellnames(cellnames)
# Generate the connectivity table between the HELP grid and the
# MDDELCC interpolated daily weather grid.
print('Generating the connectivity table for each cell...', end=' ')
meteo_manager = NetCDFMeteoManager(path_netcdf_dir)
d4_conn_tbl = {}
d7_conn_tbl = {}
data = []
for i, cellname in enumerate(cellnames):
lat_idx, lon_idx = meteo_manager.get_idx_from_latlon(
lat_dd[i], lon_dd[i])
d4fname = osp.join(
d4d7_inputdir, '%03d_%03d.D4' % (lat_idx, lon_idx))
d7fname = osp.join(
d4d7_inputdir, '%03d_%03d.D7' % (lat_idx, lon_idx))
d4_conn_tbl[cellnames[i]] = d4fname
d7_conn_tbl[cellnames[i]] = d7fname
data.append([lat_idx, lon_idx, d4fname, d7fname])
print('done')
# Fetch the daily weather data from the netCDF files.
data = np.unique(data, axis=0)
lat_indx = data[:, 0].astype(int)
lon_idx = data[:, 1].astype(int)
years = range(self.year_range[0], self.year_range[1]+1)
tasavg, precip, years = meteo_manager.get_data_from_idx(
lat_indx, lon_idx, years)
# Convert and save the weather data to D4 and D7 HELP input files.
N = len(data)
for i in range(N):
print(("\rGenerating HELP D4 and D7 files for location " +
"%d of %d (%0.1f%%)...") % (i+1, N, (i+1)/N * 100), end=' ')
lat = meteo_manager.lat[lat_indx[i]]
lon = meteo_manager.lon[lon_idx[i]]
d4fname, d7fname = data[i, 2], data[i, 3]
city = 'Meteo Grid at lat/lon %0.1f ; %0.1f' % (lat, lon)
# Fill -999 with 0 in daily precip.
precip_i = precip[:, i]
precip_i[precip_i == -999] = 0
# Fill -999 with linear interpolation in daily air temp.
tasavg_i = tasavg[:, i]
time_ = np.arange(len(tasavg_i))
indx = np.where(tasavg_i != -999)[0]
tasavg_i = np.interp(time_, time_[indx], tasavg_i[indx])
if not osp.exists(d4fname):
save_precip_to_HELP(d4fname, years, precip_i, city)
if not osp.exists(d7fname):
save_airtemp_to_HELP(d7fname, years, tasavg_i, city)
print('done')
# Update the connection table.
print("\rUpdating the connection table...", end=' ')
self.connect_tables['D4'] = d4_conn_tbl
self.connect_tables['D7'] = d7_conn_tbl
self._save_connect_tables()
print('done')
def run_help_for(self, path_outfile=None, cellnames=None, tfsoil=0):
"""
Run help for the cells listed in cellnames and save the result in
an hdf5 file.
"""
# Convert from Celcius to Farenheight
tfsoil = (tfsoil * 1.8) + 32
tempdir = osp.join(self.inputdir, ".temp")
if not osp.exists(tempdir):
os.makedirs(tempdir)
run_cellnames = self.get_run_cellnames(cellnames)
cellparams = {}
for cellname in run_cellnames:
fpath_d4 = self.connect_tables['D4'][cellname]
fpath_d7 = self.connect_tables['D7'][cellname]
fpath_d13 = self.connect_tables['D13'][cellname]
fpath_d10 = self.connect_tables['D10'][cellname]
fpath_d11 = self.connect_tables['D11'][cellname]
fpath_out = osp.abspath(osp.join(tempdir, str(cellname) + '.OUT'))
daily_out = 0
monthly_out = 1
yearly_out = 0
summary_out = 0
unit_system = 2 # IP if 1 else SI
simu_nyear = self.year_range[1] - self.year_range[0] + 1
cellparams[cellname] = (fpath_d4, fpath_d7, fpath_d13, fpath_d11,
fpath_d10, fpath_out, daily_out,
monthly_out, yearly_out, summary_out,
unit_system, simu_nyear, tfsoil)
output = run_help_allcells(cellparams)
if path_outfile:
savedata_to_hdf5(output, path_outfile)
return output
def calc_surf_water_cells(self, evp_surf, path_netcdf_dir,
path_outfile=None, cellnames=None):
cellnames = self.get_water_cellnames(cellnames)
lat_dd, lon_dd = self.get_latlon_for_cellnames(cellnames)
meteo_manager = NetCDFMeteoManager(path_netcdf_dir)
N = len(cellnames)
lat_indx = np.empty(N).astype(int)
lon_indx = np.empty(N).astype(int)
for i, cellname in enumerate(cellnames):
lat_indx[i], lon_indx[i] = meteo_manager.get_idx_from_latlon(
lat_dd[i], lon_dd[i])
year_range = np.arange(
self.year_range[0], self.year_range[1] + 1).astype(int)
tasavg, precip, years = meteo_manager.get_data_from_idx(
lat_indx, lon_indx, year_range)
# Fill -999 with 0 in daily precip.
precip[precip == -999] = 0
nyr = len(year_range)
output = {}
for i, cellname in enumerate(cellnames):
data = {}
data['years'] = year_range
data['rain'] = np.zeros(nyr)
data['evapo'] = np.zeros(nyr) + evp_surf
data['runoff'] = np.zeros(nyr)
for k, year in enumerate(year_range):
indx = np.where(years == year)[0]
data['rain'][k] = np.sum(precip[indx, i])
data['runoff'][k] = data['rain'][k] - evp_surf
output[cellname] = data
if path_outfile:
savedata_to_hdf5(output, path_outfile)
return output
# # For cells for which the context is 2, convert recharge and deep
# # subrunoff into superfical subrunoff.
# cellnames_con_2 = cellnames[self.grid[fcon] == 2].tolist()
# for cellname in cellnames_con_2:
# output[cellname]['subrun1'] += output[cellname]['subrun2']
# output[cellname]['subrun1'] += output[cellname]['recharge']
# output[cellname]['subrun2'][:] = 0
# output[cellname]['recharge'][:] = 0
# # For cells for which the context is 3, convert recharge into
# # deep runoff.
# cellnames_con_3 = cellnames[self.grid[fcon] == 3].tolist()
# for cellname in cellnames_con_3:
# output[cellname]['subrun2'] += output[cellname]['recharge']
# output[cellname]['recharge'][:] = 0
# # Comput water budget for cells for which the context is 0.
# cellnames_con_2 = cellnames[self.grid[fcon] == 0].tolist()
# # meteo_manager = NetCDFMeteoManager(path_netcdf_dir)
# # for cellname in cellnames_run0:
# Save the result to an hdf5 file.
# ---- Utilities
def get_water_cellnames(self, cellnames):
"""
Take a list of cellnames and return only those that are considered
to be in a surface water area.
"""
if cellnames is None:
cellnames = self.cellnames
else:
# Keep only the cells that are in the grid.
cellnames = self.grid['cid'][self.grid['cid'].isin(cellnames)]
# Only keep the cells for which context is 0.
cellnames = self.grid['cid'][cellnames][self.grid['context'] == 0]
return cellnames.tolist()
def get_run_cellnames(self, cellnames):
"""
Take a list of cellnames and return only those that are in the grid
and for which HELP can be run.
"""
if cellnames is None:
cellnames = self.cellnames
else:
# Keep only the cells that are in the grid.
cellnames = self.grid['cid'][self.grid['cid'].isin(cellnames)]
# Only keep the cells that are going to be run in HELP because we
# don't need the D4 or D7 input files for those that aren't.
cellnames = self.grid['cid'][cellnames][self.grid['run'] == 1].tolist()
return cellnames
def get_latlon_for_cellnames(self, cells):
"""
Return a numpy array with latitudes and longitudes of the provided
cells cid. Latitude and longitude for cids that are missing from
the grid are set to nan.
"""
lat = np.array(self.grid['lat_dd'].reindex(cells).tolist())
lon = np.array(self.grid['lon_dd'].reindex(cells).tolist())
return lat, lon
class NetCDFMeteoManager(object):
def __init__(self, dirpath_netcdf):
super(NetCDFMeteoManager, self).__init__()
self.dirpath_netcdf = dirpath_netcdf
self.lat = []
self.lon = []
self.setup_ncfile_list()
self.setup_latlon_grid()
def setup_ncfile_list(self):
"""Read all the available netCDF files in dirpath_netcdf."""
self.ncfilelist = []
for file in os.listdir(self.dirpath_netcdf):
if file.endswith('.nc'):
self.ncfilelist.append(osp.join(self.dirpath_netcdf, file))
def setup_latlon_grid(self):
if self.ncfilelist:
netcdf_dset = netCDF4.Dataset(self.ncfilelist[0], 'r+')
self.lat = np.array(netcdf_dset['lat'])
self.lon = np.array(netcdf_dset['lon'])
netcdf_dset.close()
def get_idx_from_latlon(self, latitudes, longitudes, unique=False):
"""
Get the i and j indexes of the grid meshes from a list of latitude
and longitude coordinates. If unique is True, only the unique pairs of
i and j indexes will be returned.
"""
try:
lat_idx = [np.argmin(np.abs(self.lat - lat)) for lat in latitudes]
lon_idx = [np.argmin(np.abs(self.lon - lon)) for lon in longitudes]
if unique:
ijdx = np.vstack({(i, j) for i, j in zip(lat_idx, lon_idx)})
lat_idx = ijdx[:, 0].tolist()
lon_idx = ijdx[:, 1].tolist()
except TypeError:
lat_idx = np.argmin(np.abs(self.lat - latitudes))
lon_idx = np.argmin(np.abs(self.lon - longitudes))
return lat_idx, lon_idx
def get_data_from_latlon(self, latitudes, longitudes, years):
"""
Return the daily minimum, maximum and average air temperature and daily
precipitation
"""
lat_idx, lon_idx = self.get_idx_from_latlon(latitudes, longitudes)
return self.get_data_from_idx(lat_idx, lon_idx, years)
def get_data_from_idx(self, lat_idx, lon_idx, years):
try:
len(lat_idx)
except TypeError:
lat_idx, lon_idx = [lat_idx], [lon_idx]
tasmax_stacks = []
tasmin_stacks = []
precip_stacks = []
years_stack = []
for year in years:
print('\rFetching daily weather data for year %d...' % year,
end=' ')
filename = osp.join(self.dirpath_netcdf, 'GCQ_v2_%d.nc' % year)
netcdf_dset = netCDF4.Dataset(filename, 'r+')
tasmax_stacks.append(
np.array(netcdf_dset['tasmax'])[:, lat_idx, lon_idx])
tasmin_stacks.append(
np.array(netcdf_dset['tasmin'])[:, lat_idx, lon_idx])
precip_stacks.append(
np.array(netcdf_dset['pr'])[:, lat_idx, lon_idx])
years_stack.append(
np.zeros(len(precip_stacks[-1][:])).astype(int) + year)
netcdf_dset.close()
print('done')
tasmax = np.vstack(tasmax_stacks)
tasmin = np.vstack(tasmin_stacks)
precip = np.vstack(precip_stacks)
years = np.hstack(years_stack)
return (tasmax + tasmin)/2, precip, years
def load_grid_from_csv(path_togrid):
"""
Load the csv that contains the infos required to evaluate regional
groundwater recharge with HELP.
"""
print('Reading HELP grid from csv...', end=' ')
grid = pd.read_csv(path_togrid)
print('done')
fname = osp.basename(path_togrid)
req_keys = ['cid', 'lat_dd', 'lon_dd', 'run']
for key in req_keys:
if key not in grid.keys():
raise KeyError("No attribute '%s' found in %s" % (key, fname))
# Make sure that cid is a str.
grid['cid'] = np.array(grid['cid']).astype(str)
# Set 'cid' as the index of the dataframe.
grid.set_index(['cid'], drop=False, inplace=True)
return grid
|
[
"pyhelp.weather_reader.save_airtemp_to_HELP",
"pandas.read_csv",
"numpy.hstack",
"numpy.array",
"pyhelp.processing.run_help_allcells",
"numpy.save",
"numpy.arange",
"os.path.exists",
"os.listdir",
"numpy.where",
"netCDF4.Dataset",
"pyhelp.weather_reader.read_cweeds_file",
"numpy.max",
"numpy.empty",
"numpy.vstack",
"numpy.min",
"pyhelp.weather_reader.save_solrad_to_HELP",
"pyhelp.preprocessing.write_d10d11_allcells",
"numpy.abs",
"pyhelp.weather_reader.save_precip_to_HELP",
"pyhelp.weather_reader.join_daily_cweeds_wy2_and_wy3",
"numpy.interp",
"numpy.unique",
"os.makedirs",
"pyhelp.preprocessing.format_d10d11_inputs",
"os.path.join",
"os.getcwd",
"os.chdir",
"numpy.sum",
"numpy.zeros",
"os.path.basename",
"numpy.load",
"pyhelp.utils.savedata_to_hdf5"
] |
[((18515, 18539), 'pandas.read_csv', 'pd.read_csv', (['path_togrid'], {}), '(path_togrid)\n', (18526, 18539), True, 'import pandas as pd\n'), ((18571, 18596), 'os.path.basename', 'osp.basename', (['path_togrid'], {}), '(path_togrid)\n', (18583, 18596), True, 'import os.path as osp\n'), ((1587, 1629), 'os.path.join', 'osp.join', (['self.workdir', '"""help_input_files"""'], {}), "(self.workdir, 'help_input_files')\n", (1595, 1629), True, 'import os.path as osp\n'), ((1842, 1853), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1851, 1853), False, 'import os\n'), ((2024, 2041), 'os.chdir', 'os.chdir', (['dirname'], {}), '(dirname)\n', (2032, 2041), False, 'import os\n'), ((2134, 2176), 'os.path.join', 'osp.join', (['self.inputdir', 'FNAME_CONN_TABLES'], {}), '(self.inputdir, FNAME_CONN_TABLES)\n', (2142, 2176), True, 'import os.path as osp\n'), ((2277, 2313), 'os.path.exists', 'osp.exists', (['self.path_connect_tables'], {}), '(self.path_connect_tables)\n', (2287, 2313), True, 'import os.path as osp\n'), ((2559, 2613), 'numpy.save', 'np.save', (['self.path_connect_tables', 'self.connect_tables'], {}), '(self.path_connect_tables, self.connect_tables)\n', (2566, 2613), True, 'import numpy as np\n'), ((3216, 3249), 'os.path.join', 'osp.join', (['self.inputdir', 'd13fname'], {}), '(self.inputdir, d13fname)\n', (3224, 3249), True, 'import os.path as osp\n'), ((3535, 3587), 'pyhelp.weather_reader.read_cweeds_file', 'read_cweeds_file', (['fpath_cweed2'], {'format_to_daily': '(True)'}), '(fpath_cweed2, format_to_daily=True)\n', (3551, 3587), False, 'from pyhelp.weather_reader import save_precip_to_HELP, save_airtemp_to_HELP, save_solrad_to_HELP, read_cweeds_file, join_daily_cweeds_wy2_and_wy3\n'), ((3608, 3660), 'pyhelp.weather_reader.read_cweeds_file', 'read_cweeds_file', (['fpath_cweed3'], {'format_to_daily': '(True)'}), '(fpath_cweed3, format_to_daily=True)\n', (3624, 3660), False, 'from pyhelp.weather_reader import save_precip_to_HELP, save_airtemp_to_HELP, save_solrad_to_HELP, read_cweeds_file, join_daily_cweeds_wy2_and_wy3\n'), ((3679, 3730), 'pyhelp.weather_reader.join_daily_cweeds_wy2_and_wy3', 'join_daily_cweeds_wy2_and_wy3', (['daily_wy2', 'daily_wy3'], {}), '(daily_wy2, daily_wy3)\n', (3708, 3730), False, 'from pyhelp.weather_reader import save_precip_to_HELP, save_airtemp_to_HELP, save_solrad_to_HELP, read_cweeds_file, join_daily_cweeds_wy2_and_wy3\n'), ((3979, 4132), 'pyhelp.weather_reader.save_solrad_to_HELP', 'save_solrad_to_HELP', (['d13fpath', "wy23_df['Years'][indexes]", "wy23_df['Irradiance'][indexes]", '"""CAN_QC_MONTREAL-INTL-A_7025251"""', "wy23_df['Latitude']"], {}), "(d13fpath, wy23_df['Years'][indexes], wy23_df[\n 'Irradiance'][indexes], 'CAN_QC_MONTREAL-INTL-A_7025251', wy23_df[\n 'Latitude'])\n", (3998, 4132), False, 'from pyhelp.weather_reader import save_precip_to_HELP, save_airtemp_to_HELP, save_solrad_to_HELP, read_cweeds_file, join_daily_cweeds_wy2_and_wy3\n'), ((5115, 5160), 'os.path.join', 'osp.join', (['self.inputdir', '"""d10d11_input_files"""'], {}), "(self.inputdir, 'd10d11_input_files')\n", (5123, 5160), True, 'import os.path as osp\n'), ((5474, 5536), 'pyhelp.preprocessing.format_d10d11_inputs', 'format_d10d11_inputs', (['self.grid', 'cellnames', 'sf_edepth', 'sf_ulai'], {}), '(self.grid, cellnames, sf_edepth, sf_ulai)\n', (5494, 5536), False, 'from pyhelp.preprocessing import write_d10d11_allcells, format_d10d11_inputs\n'), ((5668, 5724), 'pyhelp.preprocessing.write_d10d11_allcells', 'write_d10d11_allcells', (['d10d11_inputdir', 'd10data', 'd11data'], {}), '(d10d11_inputdir, d10data, d11data)\n', (5689, 5724), False, 'from pyhelp.preprocessing import write_d10d11_allcells, format_d10d11_inputs\n'), ((6236, 6279), 'os.path.join', 'osp.join', (['self.inputdir', '"""d4d7_input_files"""'], {}), "(self.inputdir, 'd4d7_input_files')\n", (6244, 6279), True, 'import os.path as osp\n'), ((7524, 7547), 'numpy.unique', 'np.unique', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (7533, 7547), True, 'import numpy as np\n'), ((9483, 9515), 'os.path.join', 'osp.join', (['self.inputdir', '""".temp"""'], {}), "(self.inputdir, '.temp')\n", (9491, 9515), True, 'import os.path as osp\n'), ((10623, 10652), 'pyhelp.processing.run_help_allcells', 'run_help_allcells', (['cellparams'], {}), '(cellparams)\n', (10640, 10652), False, 'from pyhelp.processing import run_help_allcells\n'), ((15478, 15509), 'os.listdir', 'os.listdir', (['self.dirpath_netcdf'], {}), '(self.dirpath_netcdf)\n', (15488, 15509), False, 'import os\n'), ((18091, 18115), 'numpy.vstack', 'np.vstack', (['tasmax_stacks'], {}), '(tasmax_stacks)\n', (18100, 18115), True, 'import numpy as np\n'), ((18133, 18157), 'numpy.vstack', 'np.vstack', (['tasmin_stacks'], {}), '(tasmin_stacks)\n', (18142, 18157), True, 'import numpy as np\n'), ((18175, 18199), 'numpy.vstack', 'np.vstack', (['precip_stacks'], {}), '(precip_stacks)\n', (18184, 18199), True, 'import numpy as np\n'), ((18216, 18238), 'numpy.hstack', 'np.hstack', (['years_stack'], {}), '(years_stack)\n', (18225, 18238), True, 'import numpy as np\n'), ((1645, 1665), 'os.path.exists', 'osp.exists', (['inputdir'], {}), '(inputdir)\n', (1655, 1665), True, 'import os.path as osp\n'), ((1679, 1700), 'os.makedirs', 'os.makedirs', (['inputdir'], {}), '(inputdir)\n', (1690, 1700), False, 'import os\n'), ((1962, 1981), 'os.path.exists', 'osp.exists', (['dirname'], {}), '(dirname)\n', (1972, 1981), True, 'import os.path as osp\n'), ((1995, 2015), 'os.makedirs', 'os.makedirs', (['dirname'], {}), '(dirname)\n', (2006, 2015), False, 'import os\n'), ((3750, 3847), 'numpy.where', 'np.where', (["((wy23_df['Years'] >= self.year_range[0]) & (wy23_df['Years'] <= self.\n year_range[1]))"], {}), "((wy23_df['Years'] >= self.year_range[0]) & (wy23_df['Years'] <=\n self.year_range[1]))\n", (3758, 3847), True, 'import numpy as np\n'), ((4290, 4314), 'numpy.max', 'np.max', (["wy23_df['Years']"], {}), "(wy23_df['Years'])\n", (4296, 4314), True, 'import numpy as np\n'), ((4471, 4495), 'numpy.min', 'np.min', (["wy23_df['Years']"], {}), "(wy23_df['Years'])\n", (4477, 4495), True, 'import numpy as np\n'), ((5176, 5203), 'os.path.exists', 'osp.exists', (['d10d11_inputdir'], {}), '(d10d11_inputdir)\n', (5186, 5203), True, 'import os.path as osp\n'), ((5217, 5245), 'os.makedirs', 'os.makedirs', (['d10d11_inputdir'], {}), '(d10d11_inputdir)\n', (5228, 5245), False, 'import os\n'), ((6295, 6320), 'os.path.exists', 'osp.exists', (['d4d7_inputdir'], {}), '(d4d7_inputdir)\n', (6305, 6320), True, 'import os.path as osp\n'), ((6334, 6360), 'os.makedirs', 'os.makedirs', (['d4d7_inputdir'], {}), '(d4d7_inputdir)\n', (6345, 6360), False, 'import os\n'), ((7086, 7146), 'os.path.join', 'osp.join', (['d4d7_inputdir', "('%03d_%03d.D4' % (lat_idx, lon_idx))"], {}), "(d4d7_inputdir, '%03d_%03d.D4' % (lat_idx, lon_idx))\n", (7094, 7146), True, 'import os.path as osp\n'), ((7186, 7246), 'os.path.join', 'osp.join', (['d4d7_inputdir', "('%03d_%03d.D7' % (lat_idx, lon_idx))"], {}), "(d4d7_inputdir, '%03d_%03d.D7' % (lat_idx, lon_idx))\n", (7194, 7246), True, 'import os.path as osp\n'), ((8646, 8691), 'numpy.interp', 'np.interp', (['time_', 'time_[indx]', 'tasavg_i[indx]'], {}), '(time_, time_[indx], tasavg_i[indx])\n', (8655, 8691), True, 'import numpy as np\n'), ((9531, 9550), 'os.path.exists', 'osp.exists', (['tempdir'], {}), '(tempdir)\n', (9541, 9550), True, 'import os.path as osp\n'), ((9564, 9584), 'os.makedirs', 'os.makedirs', (['tempdir'], {}), '(tempdir)\n', (9575, 9584), False, 'import os\n'), ((10691, 10729), 'pyhelp.utils.savedata_to_hdf5', 'savedata_to_hdf5', (['output', 'path_outfile'], {}), '(output, path_outfile)\n', (10707, 10729), False, 'from pyhelp.utils import savedata_to_hdf5\n'), ((11819, 11832), 'numpy.zeros', 'np.zeros', (['nyr'], {}), '(nyr)\n', (11827, 11832), True, 'import numpy as np\n'), ((11915, 11928), 'numpy.zeros', 'np.zeros', (['nyr'], {}), '(nyr)\n', (11923, 11928), True, 'import numpy as np\n'), ((12224, 12262), 'pyhelp.utils.savedata_to_hdf5', 'savedata_to_hdf5', (['output', 'path_outfile'], {}), '(output, path_outfile)\n', (12240, 12262), False, 'from pyhelp.utils import savedata_to_hdf5\n'), ((15712, 15753), 'netCDF4.Dataset', 'netCDF4.Dataset', (['self.ncfilelist[0]', '"""r+"""'], {}), "(self.ncfilelist[0], 'r+')\n", (15727, 15753), False, 'import netCDF4\n'), ((15777, 15805), 'numpy.array', 'np.array', (["netcdf_dset['lat']"], {}), "(netcdf_dset['lat'])\n", (15785, 15805), True, 'import numpy as np\n'), ((15829, 15857), 'numpy.array', 'np.array', (["netcdf_dset['lon']"], {}), "(netcdf_dset['lon'])\n", (15837, 15857), True, 'import numpy as np\n'), ((17494, 17546), 'os.path.join', 'osp.join', (['self.dirpath_netcdf', "('GCQ_v2_%d.nc' % year)"], {}), "(self.dirpath_netcdf, 'GCQ_v2_%d.nc' % year)\n", (17502, 17546), True, 'import os.path as osp\n'), ((17573, 17604), 'netCDF4.Dataset', 'netCDF4.Dataset', (['filename', '"""r+"""'], {}), "(filename, 'r+')\n", (17588, 17604), False, 'import netCDF4\n'), ((18836, 18857), 'numpy.array', 'np.array', (["grid['cid']"], {}), "(grid['cid'])\n", (18844, 18857), True, 'import numpy as np\n'), ((8593, 8619), 'numpy.where', 'np.where', (['(tasavg_i != -999)'], {}), '(tasavg_i != -999)\n', (8601, 8619), True, 'import numpy as np\n'), ((8712, 8731), 'os.path.exists', 'osp.exists', (['d4fname'], {}), '(d4fname)\n', (8722, 8731), True, 'import os.path as osp\n'), ((8749, 8800), 'pyhelp.weather_reader.save_precip_to_HELP', 'save_precip_to_HELP', (['d4fname', 'years', 'precip_i', 'city'], {}), '(d4fname, years, precip_i, city)\n', (8768, 8800), False, 'from pyhelp.weather_reader import save_precip_to_HELP, save_airtemp_to_HELP, save_solrad_to_HELP, read_cweeds_file, join_daily_cweeds_wy2_and_wy3\n'), ((8820, 8839), 'os.path.exists', 'osp.exists', (['d7fname'], {}), '(d7fname)\n', (8830, 8839), True, 'import os.path as osp\n'), ((8857, 8909), 'pyhelp.weather_reader.save_airtemp_to_HELP', 'save_airtemp_to_HELP', (['d7fname', 'years', 'tasavg_i', 'city'], {}), '(d7fname, years, tasavg_i, city)\n', (8877, 8909), False, 'from pyhelp.weather_reader import save_precip_to_HELP, save_airtemp_to_HELP, save_solrad_to_HELP, read_cweeds_file, join_daily_cweeds_wy2_and_wy3\n'), ((11113, 11124), 'numpy.empty', 'np.empty', (['N'], {}), '(N)\n', (11121, 11124), True, 'import numpy as np\n'), ((11156, 11167), 'numpy.empty', 'np.empty', (['N'], {}), '(N)\n', (11164, 11167), True, 'import numpy as np\n'), ((11363, 11416), 'numpy.arange', 'np.arange', (['self.year_range[0]', '(self.year_range[1] + 1)'], {}), '(self.year_range[0], self.year_range[1] + 1)\n', (11372, 11416), True, 'import numpy as np\n'), ((11861, 11874), 'numpy.zeros', 'np.zeros', (['nyr'], {}), '(nyr)\n', (11869, 11874), True, 'import numpy as np\n'), ((12063, 12086), 'numpy.sum', 'np.sum', (['precip[indx, i]'], {}), '(precip[indx, i])\n', (12069, 12086), True, 'import numpy as np\n'), ((2349, 2382), 'numpy.load', 'np.load', (['self.path_connect_tables'], {}), '(self.path_connect_tables)\n', (2356, 2382), True, 'import numpy as np\n'), ((4413, 4437), 'numpy.max', 'np.max', (["wy23_df['Years']"], {}), "(wy23_df['Years'])\n", (4419, 4437), True, 'import numpy as np\n'), ((4595, 4619), 'numpy.min', 'np.min', (["wy23_df['Years']"], {}), "(wy23_df['Years'])\n", (4601, 4619), True, 'import numpy as np\n'), ((12002, 12025), 'numpy.where', 'np.where', (['(years == year)'], {}), '(years == year)\n', (12010, 12025), True, 'import numpy as np\n'), ((15587, 15622), 'os.path.join', 'osp.join', (['self.dirpath_netcdf', 'file'], {}), '(self.dirpath_netcdf, file)\n', (15595, 15622), True, 'import os.path as osp\n'), ((16229, 16251), 'numpy.abs', 'np.abs', (['(self.lat - lat)'], {}), '(self.lat - lat)\n', (16235, 16251), True, 'import numpy as np\n'), ((16308, 16330), 'numpy.abs', 'np.abs', (['(self.lon - lon)'], {}), '(self.lon - lon)\n', (16314, 16330), True, 'import numpy as np\n'), ((16605, 16633), 'numpy.abs', 'np.abs', (['(self.lat - latitudes)'], {}), '(self.lat - latitudes)\n', (16611, 16633), True, 'import numpy as np\n'), ((16667, 16696), 'numpy.abs', 'np.abs', (['(self.lon - longitudes)'], {}), '(self.lon - longitudes)\n', (16673, 16696), True, 'import numpy as np\n'), ((17656, 17687), 'numpy.array', 'np.array', (["netcdf_dset['tasmax']"], {}), "(netcdf_dset['tasmax'])\n", (17664, 17687), True, 'import numpy as np\n'), ((17760, 17791), 'numpy.array', 'np.array', (["netcdf_dset['tasmin']"], {}), "(netcdf_dset['tasmin'])\n", (17768, 17791), True, 'import numpy as np\n'), ((17864, 17891), 'numpy.array', 'np.array', (["netcdf_dset['pr']"], {}), "(netcdf_dset['pr'])\n", (17872, 17891), True, 'import numpy as np\n')]
|
import ctypes as ct
import time
import copy
import numpy as np
import sharpy.aero.utils.mapping as mapping
import sharpy.utils.cout_utils as cout
import sharpy.utils.solver_interface as solver_interface
import sharpy.utils.controller_interface as controller_interface
from sharpy.utils.solver_interface import solver, BaseSolver
import sharpy.utils.settings as settings
import sharpy.utils.algebra as algebra
import sharpy.structure.utils.xbeamlib as xbeam
import sharpy.utils.exceptions as exc
@solver
class DynamicCoupled(BaseSolver):
"""
The ``DynamicCoupled`` solver couples the aerodynamic and structural solvers of choice to march forward in time
the aeroelastic system's solution.
Using the ``DynamicCoupled`` solver requires that an instance of the ``StaticCoupled`` solver is called in the
SHARPy solution ``flow`` when defining the problem case.
"""
solver_id = 'DynamicCoupled'
solver_classification = 'Coupled'
settings_types = dict()
settings_default = dict()
settings_description = dict()
settings_types['print_info'] = 'bool'
settings_default['print_info'] = True
settings_description['print_info'] = 'Write status to screen'
settings_types['structural_solver'] = 'str'
settings_default['structural_solver'] = None
settings_description['structural_solver'] = 'Structural solver to use in the coupled simulation'
settings_types['structural_solver_settings'] = 'dict'
settings_default['structural_solver_settings'] = None
settings_description['structural_solver_settings'] = 'Dictionary of settings for the structural solver'
settings_types['aero_solver'] = 'str'
settings_default['aero_solver'] = None
settings_description['aero_solver'] = 'Aerodynamic solver to use in the coupled simulation'
settings_types['aero_solver_settings'] = 'dict'
settings_default['aero_solver_settings'] = None
settings_description['aero_solver_settings'] = 'Dictionary of settings for the aerodynamic solver'
settings_types['n_time_steps'] = 'int'
settings_default['n_time_steps'] = None
settings_description['n_time_steps'] = 'Number of time steps for the simulation'
settings_types['dt'] = 'float'
settings_default['dt'] = None
settings_description['dt'] = 'Time step'
settings_types['fsi_substeps'] = 'int'
settings_default['fsi_substeps'] = 70
settings_description['fsi_substeps'] = 'Max iterations in the FSI loop'
settings_types['fsi_tolerance'] = 'float'
settings_default['fsi_tolerance'] = 1e-5
settings_description['fsi_tolerance'] = 'Convergence threshold for the FSI loop'
settings_types['structural_substeps'] = 'int'
settings_default['structural_substeps'] = 0 # 0 is normal coupled sim.
settings_description['structural_substeps'] = 'Number of extra structural time steps per aero time step. 0 is a fully coupled simulation.'
settings_types['relaxation_factor'] = 'float'
settings_default['relaxation_factor'] = 0.2
settings_description['relaxation_factor'] = 'Relaxation parameter in the FSI iteration. 0 is no relaxation and -> 1 is very relaxed'
settings_types['final_relaxation_factor'] = 'float'
settings_default['final_relaxation_factor'] = 0.0
settings_description['final_relaxation_factor'] = 'Relaxation factor reached in ``relaxation_steps`` with ``dynamic_relaxation`` on'
settings_types['minimum_steps'] = 'int'
settings_default['minimum_steps'] = 3
settings_description['minimum_steps'] = 'Number of minimum FSI iterations before convergence'
settings_types['relaxation_steps'] = 'int'
settings_default['relaxation_steps'] = 100
settings_description['relaxation_steps'] = 'Length of the relaxation factor ramp between ``relaxation_factor`` and ``final_relaxation_factor`` with ``dynamic_relaxation`` on'
settings_types['dynamic_relaxation'] = 'bool'
settings_default['dynamic_relaxation'] = False
settings_description['dynamic_relaxation'] = 'Controls if relaxation factor is modified during the FSI iteration process'
settings_types['postprocessors'] = 'list(str)'
settings_default['postprocessors'] = list()
settings_description['postprocessors'] = 'List of the postprocessors to run at the end of every time step'
settings_types['postprocessors_settings'] = 'dict'
settings_default['postprocessors_settings'] = dict()
settings_description['postprocessors_settings'] = 'Dictionary with the applicable settings for every ``psotprocessor``. Every ``postprocessor`` needs its entry, even if empty'
settings_types['controller_id'] = 'dict'
settings_default['controller_id'] = dict()
settings_description['controller_id'] = 'Dictionary of id of every controller (key) and its type (value)'
settings_types['controller_settings'] = 'dict'
settings_default['controller_settings'] = dict()
settings_description['controller_settings'] = 'Dictionary with settings (value) of every controller id (key)'
settings_types['cleanup_previous_solution'] = 'bool'
settings_default['cleanup_previous_solution'] = False
settings_description['cleanup_previous_solution'] = 'Controls if previous ``timestep_info`` arrays are reset before running the solver'
settings_types['include_unsteady_force_contribution'] = 'bool'
settings_default['include_unsteady_force_contribution'] = False
settings_description['include_unsteady_force_contribution'] = 'If on, added mass contribution is added to the forces. This depends on the time derivative of the bound circulation. Check ``filter_gamma_dot`` in the aero solver'
settings_types['steps_without_unsteady_force'] = 'int'
settings_default['steps_without_unsteady_force'] = 0
settings_description['steps_without_unsteady_force'] = 'Number of initial timesteps that don\'t include unsteady forces contributions. This avoids oscillations due to no perfectly trimmed initial conditions'
settings_types['pseudosteps_ramp_unsteady_force'] = 'int'
settings_default['pseudosteps_ramp_unsteady_force'] = 0
settings_description['pseudosteps_ramp_unsteady_force'] = 'Length of the ramp with which unsteady force contribution is introduced every time step during the FSI iteration process'
settings_table = settings.SettingsTable()
__doc__ += settings_table.generate(settings_types, settings_default, settings_description)
def __init__(self):
self.data = None
self.settings = None
self.structural_solver = None
self.aero_solver = None
self.print_info = False
self.res = 0.0
self.res_dqdt = 0.0
self.res_dqddt = 0.0
self.previous_force = None
self.dt = 0.
self.substep_dt = 0.
self.initial_n_substeps = None
self.predictor = False
self.residual_table = None
self.postprocessors = dict()
self.with_postprocessors = False
self.controllers = None
self.time_aero = 0.
self.time_struc = 0.
def get_g(self):
"""
Getter for ``g``, the gravity value
"""
return self.structural_solver.settings['gravity'].value
def set_g(self, new_g):
"""
Setter for ``g``, the gravity value
"""
self.structural_solver.settings['gravity'] = ct.c_double(new_g)
def get_rho(self):
"""
Getter for ``rho``, the density value
"""
return self.aero_solver.settings['rho'].value
def set_rho(self, new_rho):
"""
Setter for ``rho``, the density value
"""
self.aero_solver.settings['rho'] = ct.c_double(new_rho)
def initialise(self, data, custom_settings=None):
"""
Controls the initialisation process of the solver, including processing
the settings and initialising the aero and structural solvers, postprocessors
and controllers.
"""
self.data = data
if custom_settings is None:
self.settings = data.settings[self.solver_id]
else:
self.settings = custom_settings
settings.to_custom_types(self.settings,
self.settings_types,
self.settings_default)
self.original_settings = copy.deepcopy(self.settings)
self.dt = self.settings['dt']
self.substep_dt = (
self.dt.value/(self.settings['structural_substeps'].value + 1))
self.initial_n_substeps = self.settings['structural_substeps'].value
self.print_info = self.settings['print_info']
if self.settings['cleanup_previous_solution']:
# if there's data in timestep_info[>0], copy the last one to
# timestep_info[0] and remove the rest
self.cleanup_timestep_info()
self.structural_solver = solver_interface.initialise_solver(
self.settings['structural_solver'])
self.structural_solver.initialise(
self.data, self.settings['structural_solver_settings'])
self.aero_solver = solver_interface.initialise_solver(
self.settings['aero_solver'])
self.aero_solver.initialise(self.structural_solver.data,
self.settings['aero_solver_settings'])
self.data = self.aero_solver.data
# initialise postprocessors
self.postprocessors = dict()
if self.settings['postprocessors']:
self.with_postprocessors = True
for postproc in self.settings['postprocessors']:
self.postprocessors[postproc] = solver_interface.initialise_solver(
postproc)
self.postprocessors[postproc].initialise(
self.data, self.settings['postprocessors_settings'][postproc])
# initialise controllers
self.controllers = dict()
self.with_controllers = False
if self.settings['controller_id']:
self.with_controllers = True
for controller_id, controller_type in self.settings['controller_id'].items():
self.controllers[controller_id] = (
controller_interface.initialise_controller(controller_type))
self.controllers[controller_id].initialise(
self.settings['controller_settings'][controller_id],
controller_id)
# print information header
if self.print_info:
self.residual_table = cout.TablePrinter(8, 12, ['g', 'f', 'g', 'f', 'f', 'f', 'e', 'e'])
self.residual_table.field_length[0] = 5
self.residual_table.field_length[1] = 6
self.residual_table.field_length[2] = 4
self.residual_table.print_header(['ts', 't', 'iter', 'struc ratio', 'iter time', 'residual vel',
'FoR_vel(x)', 'FoR_vel(z)'])
def cleanup_timestep_info(self):
if max(len(self.data.aero.timestep_info), len(self.data.structure.timestep_info)) > 1:
# copy last info to first
self.data.aero.timestep_info[0] = self.data.aero.timestep_info[-1]
self.data.structure.timestep_info[0] = self.data.structure.timestep_info[-1]
# delete all the rest
while len(self.data.aero.timestep_info) - 1:
del self.data.aero.timestep_info[-1]
while len(self.data.structure.timestep_info) - 1:
del self.data.structure.timestep_info[-1]
self.data.ts = 0
def process_controller_output(self, controlled_state):
"""
This function modified the solver properties and parameters as
requested from the controller.
This keeps the main loop much cleaner, while allowing for flexibility
Please, if you add options in here, always code the possibility of
that specific option not being there without the code complaining to
the user.
If it possible, use the same Key for the new setting as for the
setting in the solver. For example, if you want to modify the
`structural_substeps` variable in settings, use that Key in the
`info` dictionary.
As a convention: a value of None returns the value to the initial
one specified in settings, while the key not being in the dict
is ignored, so if any change was made before, it will stay there.
"""
try:
info = controlled_state['info']
except KeyError:
return controlled_state['structural'], controlled_state['aero']
# general copy-if-exists, restore if == None
for info_k, info_v in info.items():
if info_k in self.settings:
if info_v is not None:
self.settings[info_k] = info_v
else:
self.settings[info_k] = self.original_settings[info_k]
# specifics of every option
for info_k, info_v in info.items():
if info_k in self.settings:
if info_k == 'structural_substeps':
if info_v is not None:
self.substep_dt = (
self.settings['dt'].value/(
self.settings['structural_substeps'].value + 1))
if info_k == 'structural_solver':
if info_v is not None:
self.structural_solver = solver_interface.initialise_solver(
info['structural_solver'])
self.structural_solver.initialise(
self.data, self.settings['structural_solver_settings'])
return controlled_state['structural'], controlled_state['aero']
def run(self):
"""
Run the time stepping procedure with controllers and postprocessors
included.
"""
# dynamic simulations start at tstep == 1, 0 is reserved for the initial state
for self.data.ts in range(
len(self.data.structure.timestep_info),
self.settings['n_time_steps'].value + len(self.data.structure.timestep_info)):
initial_time = time.perf_counter()
structural_kstep = self.data.structure.timestep_info[-1].copy()
aero_kstep = self.data.aero.timestep_info[-1].copy()
# Add the controller here
if self.with_controllers:
state = {'structural': structural_kstep,
'aero': aero_kstep}
for k, v in self.controllers.items():
state = v.control(self.data, state)
# this takes care of the changes in options for the solver
structural_kstep, aero_kstep = self.process_controller_output(
state)
self.time_aero = 0.0
self.time_struc = 0.0
# Copy the controlled states so that the interpolation does not
# destroy the previous information
controlled_structural_kstep = structural_kstep.copy()
controlled_aero_kstep = aero_kstep.copy()
k = 0
for k in range(self.settings['fsi_substeps'].value + 1):
if (k == self.settings['fsi_substeps'].value and
self.settings['fsi_substeps']):
cout.cout_wrap('The FSI solver did not converge!!!')
break
# generate new grid (already rotated)
aero_kstep = controlled_aero_kstep.copy()
self.aero_solver.update_custom_grid(
structural_kstep,
aero_kstep)
# compute unsteady contribution
force_coeff = 0.0
unsteady_contribution = False
if self.settings['include_unsteady_force_contribution'].value:
if self.data.ts > self.settings['steps_without_unsteady_force'].value:
unsteady_contribution = True
if k < self.settings['pseudosteps_ramp_unsteady_force'].value:
force_coeff = k/self.settings['pseudosteps_ramp_unsteady_force'].value
else:
force_coeff = 1.
# run the solver
ini_time_aero = time.perf_counter()
self.data = self.aero_solver.run(aero_kstep,
structural_kstep,
convect_wake=True,
unsteady_contribution=unsteady_contribution)
self.time_aero += time.perf_counter() - ini_time_aero
previous_kstep = structural_kstep.copy()
structural_kstep = controlled_structural_kstep.copy()
# move the aerodynamic surface according the the structural one
self.aero_solver.update_custom_grid(structural_kstep,
aero_kstep)
self.map_forces(aero_kstep,
structural_kstep,
force_coeff)
# relaxation
relax_factor = self.relaxation_factor(k)
relax(self.data.structure,
structural_kstep,
previous_kstep,
relax_factor)
# check if nan anywhere.
# if yes, raise exception
if np.isnan(structural_kstep.steady_applied_forces).any():
raise exc.NotConvergedSolver('NaN found in steady_applied_forces!')
if np.isnan(structural_kstep.unsteady_applied_forces).any():
raise exc.NotConvergedSolver('NaN found in unsteady_applied_forces!')
copy_structural_kstep = structural_kstep.copy()
ini_time_struc = time.perf_counter()
for i_substep in range(
self.settings['structural_substeps'].value + 1):
# run structural solver
coeff = ((i_substep + 1)/
(self.settings['structural_substeps'].value + 1))
structural_kstep = self.interpolate_timesteps(
step0=self.data.structure.timestep_info[-1],
step1=copy_structural_kstep,
out_step=structural_kstep,
coeff=coeff)
self.data = self.structural_solver.run(
structural_step=structural_kstep,
dt=self.substep_dt)
self.time_struc += time.perf_counter() - ini_time_struc
# check convergence
if self.convergence(k,
structural_kstep,
previous_kstep):
# move the aerodynamic surface according to the structural one
self.aero_solver.update_custom_grid(
structural_kstep,
aero_kstep)
break
# move the aerodynamic surface according the the structural one
self.aero_solver.update_custom_grid(structural_kstep, aero_kstep)
self.aero_solver.add_step()
self.data.aero.timestep_info[-1] = aero_kstep.copy()
self.structural_solver.add_step()
self.data.structure.timestep_info[-1] = structural_kstep.copy()
final_time = time.perf_counter()
if self.print_info:
self.residual_table.print_line([self.data.ts,
self.data.ts*self.dt.value,
k,
self.time_struc/(self.time_aero + self.time_struc),
final_time - initial_time,
np.log10(self.res_dqdt),
structural_kstep.for_vel[0],
structural_kstep.for_vel[2],
np.sum(structural_kstep.steady_applied_forces[:, 0]),
np.sum(structural_kstep.steady_applied_forces[:, 2])])
self.structural_solver.extract_resultants()
# run postprocessors
if self.with_postprocessors:
for postproc in self.postprocessors:
self.data = self.postprocessors[postproc].run(online=True)
if self.print_info:
cout.cout_wrap('...Finished', 1)
return self.data
def convergence(self, k, tstep, previous_tstep):
r"""
Check convergence in the FSI loop.
Convergence is determined as:
.. math:: \epsilon_q^k = \frac{|| q^k - q^{k - 1} ||}{q^0}
.. math:: \epsilon_\dot{q}^k = \frac{|| \dot{q}^k - \dot{q}^{k - 1} ||}{\dot{q}^0}
FSI converged if :math:`\epsilon_q^k < \mathrm{FSI\ tolerance}` and :math:`\epsilon_\dot{q}^k < \mathrm{FSI\ tolerance}`
"""
# check for non-convergence
if not all(np.isfinite(tstep.q)):
import pdb
pdb.set_trace()
raise Exception(
'***Not converged! There is a NaN value in the forces!')
if not k:
# save the value of the vectors for normalising later
self.base_q = np.linalg.norm(tstep.q.copy())
self.base_dqdt = np.linalg.norm(tstep.dqdt.copy())
if self.base_dqdt == 0:
self.base_dqdt = 1.
return False
# relative residuals
self.res = (np.linalg.norm(tstep.q-
previous_tstep.q)/
self.base_q)
self.res_dqdt = (np.linalg.norm(tstep.dqdt-
previous_tstep.dqdt)/
self.base_dqdt)
# we don't want this to converge before introducing the gamma_dot forces!
if self.settings['include_unsteady_force_contribution'].value:
if k < self.settings['pseudosteps_ramp_unsteady_force'].value:
return False
# convergence
if k > self.settings['minimum_steps'].value - 1:
if self.res < self.settings['fsi_tolerance'].value:
if self.res_dqdt < self.settings['fsi_tolerance'].value:
return True
return False
def map_forces(self, aero_kstep, structural_kstep, unsteady_forces_coeff=1.0):
# set all forces to 0
structural_kstep.steady_applied_forces.fill(0.0)
structural_kstep.unsteady_applied_forces.fill(0.0)
# aero forces to structural forces
struct_forces = mapping.aero2struct_force_mapping(
aero_kstep.forces,
self.data.aero.struct2aero_mapping,
aero_kstep.zeta,
structural_kstep.pos,
structural_kstep.psi,
self.data.structure.node_master_elem,
self.data.structure.connectivities,
structural_kstep.cag(),
self.data.aero.aero_dict)
dynamic_struct_forces = unsteady_forces_coeff*mapping.aero2struct_force_mapping(
aero_kstep.dynamic_forces,
self.data.aero.struct2aero_mapping,
aero_kstep.zeta,
structural_kstep.pos,
structural_kstep.psi,
self.data.structure.node_master_elem,
self.data.structure.connectivities,
structural_kstep.cag(),
self.data.aero.aero_dict)
# prescribed forces + aero forces
try:
structural_kstep.steady_applied_forces = (
(struct_forces + self.data.structure.ini_info.steady_applied_forces).
astype(dtype=ct.c_double, order='F', copy=True))
structural_kstep.unsteady_applied_forces = (
(dynamic_struct_forces + self.data.structure.dynamic_input[max(self.data.ts - 1, 0)]['dynamic_forces']).
astype(dtype=ct.c_double, order='F', copy=True))
except KeyError:
structural_kstep.steady_applied_forces = (
(struct_forces + self.data.structure.ini_info.steady_applied_forces).
astype(dtype=ct.c_double, order='F', copy=True))
structural_kstep.unsteady_applied_forces = dynamic_struct_forces
def relaxation_factor(self, k):
initial = self.settings['relaxation_factor'].value
if not self.settings['dynamic_relaxation'].value:
return initial
final = self.settings['final_relaxation_factor'].value
if k >= self.settings['relaxation_steps'].value:
return final
value = initial + (final - initial)/self.settings['relaxation_steps'].value*k
return value
@staticmethod
def interpolate_timesteps(step0, step1, out_step, coeff):
"""
Performs a linear interpolation between step0 and step1 based on coeff
in [0, 1]. 0 means info in out_step == step0 and 1 out_step == step1.
Quantities interpolated:
* `steady_applied_forces`
* `unsteady_applied_forces`
* `velocity` input in Lagrange constraints
"""
if not 0.0 <= coeff <= 1.0:
return out_step
# forces
out_step.steady_applied_forces[:] = (
(1.0 - coeff)*step0.steady_applied_forces +
(coeff)*(step1.steady_applied_forces))
out_step.unsteady_applied_forces[:] = (
(1.0 - coeff)*step0.unsteady_applied_forces +
(coeff)*(step1.unsteady_applied_forces))
# multibody if necessary
if out_step.mb_dict is not None:
for key in step1.mb_dict.keys():
if 'constraint_' in key:
try:
out_step.mb_dict[key]['velocity'][:] = (
(1.0 - coeff)*step0.mb_dict[key]['velocity'] +
(coeff)*step1.mb_dict[key]['velocity'])
except KeyError:
pass
return out_step
def relax(beam, timestep, previous_timestep, coeff):
timestep.steady_applied_forces[:] = ((1.0 - coeff)*timestep.steady_applied_forces +
coeff*previous_timestep.steady_applied_forces)
timestep.unsteady_applied_forces[:] = ((1.0 - coeff)*timestep.unsteady_applied_forces +
coeff*previous_timestep.unsteady_applied_forces)
def normalise_quaternion(tstep):
tstep.dqdt[-4:] = algebra.unit_vector(tstep.dqdt[-4:])
tstep.quat = tstep.dqdt[-4:].astype(dtype=ct.c_double, order='F', copy=True)
|
[
"sharpy.utils.algebra.unit_vector",
"sharpy.utils.cout_utils.TablePrinter",
"sharpy.utils.cout_utils.cout_wrap",
"numpy.log10",
"sharpy.utils.exceptions.NotConvergedSolver",
"sharpy.utils.controller_interface.initialise_controller",
"sharpy.utils.settings.SettingsTable",
"time.perf_counter",
"numpy.linalg.norm",
"numpy.sum",
"numpy.isfinite",
"ctypes.c_double",
"pdb.set_trace",
"copy.deepcopy",
"numpy.isnan",
"sharpy.utils.solver_interface.initialise_solver",
"sharpy.utils.settings.to_custom_types"
] |
[((6294, 6318), 'sharpy.utils.settings.SettingsTable', 'settings.SettingsTable', ([], {}), '()\n', (6316, 6318), True, 'import sharpy.utils.settings as settings\n'), ((26759, 26795), 'sharpy.utils.algebra.unit_vector', 'algebra.unit_vector', (['tstep.dqdt[-4:]'], {}), '(tstep.dqdt[-4:])\n', (26778, 26795), True, 'import sharpy.utils.algebra as algebra\n'), ((7341, 7359), 'ctypes.c_double', 'ct.c_double', (['new_g'], {}), '(new_g)\n', (7352, 7359), True, 'import ctypes as ct\n'), ((7654, 7674), 'ctypes.c_double', 'ct.c_double', (['new_rho'], {}), '(new_rho)\n', (7665, 7674), True, 'import ctypes as ct\n'), ((8130, 8218), 'sharpy.utils.settings.to_custom_types', 'settings.to_custom_types', (['self.settings', 'self.settings_types', 'self.settings_default'], {}), '(self.settings, self.settings_types, self.\n settings_default)\n', (8154, 8218), True, 'import sharpy.utils.settings as settings\n'), ((8314, 8342), 'copy.deepcopy', 'copy.deepcopy', (['self.settings'], {}), '(self.settings)\n', (8327, 8342), False, 'import copy\n'), ((8872, 8942), 'sharpy.utils.solver_interface.initialise_solver', 'solver_interface.initialise_solver', (["self.settings['structural_solver']"], {}), "(self.settings['structural_solver'])\n", (8906, 8942), True, 'import sharpy.utils.solver_interface as solver_interface\n'), ((9094, 9158), 'sharpy.utils.solver_interface.initialise_solver', 'solver_interface.initialise_solver', (["self.settings['aero_solver']"], {}), "(self.settings['aero_solver'])\n", (9128, 9158), True, 'import sharpy.utils.solver_interface as solver_interface\n'), ((9617, 9661), 'sharpy.utils.solver_interface.initialise_solver', 'solver_interface.initialise_solver', (['postproc'], {}), '(postproc)\n', (9651, 9661), True, 'import sharpy.utils.solver_interface as solver_interface\n'), ((10152, 10211), 'sharpy.utils.controller_interface.initialise_controller', 'controller_interface.initialise_controller', (['controller_type'], {}), '(controller_type)\n', (10194, 10211), True, 'import sharpy.utils.controller_interface as controller_interface\n'), ((10475, 10541), 'sharpy.utils.cout_utils.TablePrinter', 'cout.TablePrinter', (['(8)', '(12)', "['g', 'f', 'g', 'f', 'f', 'f', 'e', 'e']"], {}), "(8, 12, ['g', 'f', 'g', 'f', 'f', 'f', 'e', 'e'])\n", (10492, 10541), True, 'import sharpy.utils.cout_utils as cout\n'), ((14188, 14207), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (14205, 14207), False, 'import time\n'), ((19636, 19655), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (19653, 19655), False, 'import time\n'), ((20788, 20820), 'sharpy.utils.cout_utils.cout_wrap', 'cout.cout_wrap', (['"""...Finished"""', '(1)'], {}), "('...Finished', 1)\n", (20802, 20820), True, 'import sharpy.utils.cout_utils as cout\n'), ((21410, 21425), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (21423, 21425), False, 'import pdb\n'), ((21880, 21922), 'numpy.linalg.norm', 'np.linalg.norm', (['(tstep.q - previous_tstep.q)'], {}), '(tstep.q - previous_tstep.q)\n', (21894, 21922), True, 'import numpy as np\n'), ((22016, 22064), 'numpy.linalg.norm', 'np.linalg.norm', (['(tstep.dqdt - previous_tstep.dqdt)'], {}), '(tstep.dqdt - previous_tstep.dqdt)\n', (22030, 22064), True, 'import numpy as np\n'), ((16366, 16385), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (16383, 16385), False, 'import time\n'), ((17984, 18003), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (18001, 18003), False, 'import time\n'), ((21352, 21372), 'numpy.isfinite', 'np.isfinite', (['tstep.q'], {}), '(tstep.q)\n', (21363, 21372), True, 'import numpy as np\n'), ((15372, 15424), 'sharpy.utils.cout_utils.cout_wrap', 'cout.cout_wrap', (['"""The FSI solver did not converge!!!"""'], {}), "('The FSI solver did not converge!!!')\n", (15386, 15424), True, 'import sharpy.utils.cout_utils as cout\n'), ((16710, 16729), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (16727, 16729), False, 'import time\n'), ((17657, 17718), 'sharpy.utils.exceptions.NotConvergedSolver', 'exc.NotConvergedSolver', (['"""NaN found in steady_applied_forces!"""'], {}), "('NaN found in steady_applied_forces!')\n", (17679, 17718), True, 'import sharpy.utils.exceptions as exc\n'), ((17822, 17885), 'sharpy.utils.exceptions.NotConvergedSolver', 'exc.NotConvergedSolver', (['"""NaN found in unsteady_applied_forces!"""'], {}), "('NaN found in unsteady_applied_forces!')\n", (17844, 17885), True, 'import sharpy.utils.exceptions as exc\n'), ((18763, 18782), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (18780, 18782), False, 'import time\n'), ((13442, 13503), 'sharpy.utils.solver_interface.initialise_solver', 'solver_interface.initialise_solver', (["info['structural_solver']"], {}), "(info['structural_solver'])\n", (13476, 13503), True, 'import sharpy.utils.solver_interface as solver_interface\n'), ((17575, 17623), 'numpy.isnan', 'np.isnan', (['structural_kstep.steady_applied_forces'], {}), '(structural_kstep.steady_applied_forces)\n', (17583, 17623), True, 'import numpy as np\n'), ((17738, 17788), 'numpy.isnan', 'np.isnan', (['structural_kstep.unsteady_applied_forces'], {}), '(structural_kstep.unsteady_applied_forces)\n', (17746, 17788), True, 'import numpy as np\n'), ((20101, 20124), 'numpy.log10', 'np.log10', (['self.res_dqdt'], {}), '(self.res_dqdt)\n', (20109, 20124), True, 'import numpy as np\n'), ((20328, 20380), 'numpy.sum', 'np.sum', (['structural_kstep.steady_applied_forces[:, 0]'], {}), '(structural_kstep.steady_applied_forces[:, 0])\n', (20334, 20380), True, 'import numpy as np\n'), ((20430, 20482), 'numpy.sum', 'np.sum', (['structural_kstep.steady_applied_forces[:, 2]'], {}), '(structural_kstep.steady_applied_forces[:, 2])\n', (20436, 20482), True, 'import numpy as np\n')]
|
# coding: utf-8
"""
Test Pyleecan optimization module using Zitzler–Deb–Thiele's function N. 3
"""
import pytest
from ....definitions import PACKAGE_NAME
from ....Tests.Validation.Machine.SCIM_001 import SCIM_001
from ....Classes.InputCurrent import InputCurrent
from ....Classes.MagFEMM import MagFEMM
from ....Classes.Simu1 import Simu1
from ....Classes.Output import Output
from ....Classes.OptiDesignVar import OptiDesignVar
from ....Classes.OptiObjFunc import OptiObjFunc
from ....Classes.OptiConstraint import OptiConstraint
from ....Classes.OptiProblem import OptiProblem
from ....Classes.ImportMatrixVal import ImportMatrixVal
from ....Classes.ImportGenVectLin import ImportGenVectLin
from ....Classes.OptiGenAlgNsga2Deap import OptiGenAlgNsga2Deap
import matplotlib.pyplot as plt
import matplotlib.image as img
import numpy as np
import random
@pytest.mark.validation
@pytest.mark.optimization
def test_zdt3():
# ### Defining reference Output
# Definition of the enforced output of the electrical module
Nt = 2
Nr = ImportMatrixVal(value=np.ones(Nt) * 3000)
Is = ImportMatrixVal(
value=np.array(
[
[6.97244193e-06, 2.25353053e02, -2.25353060e02],
[-2.60215295e02, 1.30107654e02, 1.30107642e02],
# [-6.97244208e-06, -2.25353053e02, 2.25353060e02],
# [2.60215295e02, -1.30107654e02, -1.30107642e02],
]
)
)
Ir = ImportMatrixVal(value=np.zeros(30))
time = ImportGenVectLin(start=0, stop=0.015, num=Nt, endpoint=True)
angle = ImportGenVectLin(
start=0, stop=2 * np.pi, num=64, endpoint=False
) # num=1024
# Definition of the simulation
simu = Simu1(name="Test_machine", machine=SCIM_001)
simu.input = InputCurrent(
Is=Is,
Ir=Ir, # zero current for the rotor
Nr=Nr,
angle_rotor=None, # Will be computed
time=time,
angle=angle,
angle_rotor_initial=0.5216 + np.pi,
)
# Definition of the magnetic simulation
simu.mag = MagFEMM(
is_stator_linear_BH=2,
is_rotor_linear_BH=2,
is_symmetry_a=True,
is_antiper_a=False,
)
simu.mag.Kmesh_fineness = 0.01
# simu.mag.Kgeo_fineness=0.02
simu.mag.sym_a = 4
simu.struct = None
output = Output(simu=simu)
# ### Design variable
my_vars = {}
for i in range(30):
my_vars["var_" + str(i)] = OptiDesignVar(
name="output.simu.input.Ir.value[" + str(i) + "]",
type_var="interval",
space=[0, 1],
function=lambda space: np.random.uniform(*space),
)
# ### Objectives
objs = {
"obj1": OptiObjFunc(
description="Maximization of the torque average",
func=lambda output: output.mag.Tem_av,
),
"obj2": OptiObjFunc(
description="Minimization of the torque ripple",
func=lambda output: output.mag.Tem_rip,
),
}
# ### Evaluation
def evaluate(output):
x = output.simu.input.Ir.value
f1 = lambda x: x[0]
g = lambda x: 1 + (9 / 29) * np.sum(x[1:])
h = lambda f1, g: 1 - np.sqrt(f1 / g) - (f1 / g) * np.sin(10 * np.pi * f1)
output.mag.Tem_av = f1(x)
output.mag.Tem_rip = g(x) * h(f1(x), g(x))
# ### Defining the problem
my_prob = OptiProblem(
output=output, design_var=my_vars, obj_func=objs, eval_func=evaluate
)
solver = OptiGenAlgNsga2Deap(problem=my_prob, size_pop=40, nb_gen=100, p_mutate=0.5)
res = solver.solve()
def plot_pareto(self):
"""Plot every fitness values with the pareto front for 2 fitness
Parameters
----------
self : OutputMultiOpti
"""
# TODO Add a feature to return the design_varibles of each indiv from the Pareto front
# Get fitness and ngen
is_valid = np.array(self.is_valid)
fitness = np.array(self.fitness)
ngen = np.array(self.ngen)
# Keep only valid values
indx = np.where(is_valid)[0]
fitness = fitness[indx]
ngen = ngen[indx]
# Get pareto front
pareto = list(np.unique(fitness, axis=0))
# Get dominated values
to_remove = []
N = len(pareto)
for i in range(N):
for j in range(N):
if all(pareto[j] <= pareto[i]) and any(pareto[j] < pareto[i]):
to_remove.append(pareto[i])
break
# Remove dominated values
for i in to_remove:
for l in range(len(pareto)):
if all(i == pareto[l]):
pareto.pop(l)
break
pareto = np.array(pareto)
fig, axs = plt.subplots(1, 2, figsize=(16, 6))
# Plot Pareto front
axs[0].scatter(
pareto[:, 0],
pareto[:, 1],
facecolors="b",
edgecolors="b",
s=0.8,
label="Pareto Front",
)
axs[0].autoscale()
axs[0].legend()
axs[0].set_title("Pyleecan results")
axs[0].set_xlabel(r"$f_1(x)$")
axs[0].set_ylabel(r"$f_2(x)$")
try:
img_to_find = img.imread(
"pyleecan\\Tests\\Validation\\Optimization\\zdt3.jpg", format="jpg"
)
axs[1].imshow(img_to_find, aspect="auto")
axs[1].axis("off")
axs[1].set_title("Pareto front of the problem")
except (TypeError, ValueError):
print("Pillow is needed to import jpg files")
return fig
fig = plot_pareto(res)
fig.savefig(PACKAGE_NAME + "/Tests/Results/Validation/test_zdt3.png")
|
[
"numpy.sqrt",
"numpy.unique",
"numpy.ones",
"numpy.where",
"matplotlib.image.imread",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.random.uniform",
"numpy.sin",
"matplotlib.pyplot.subplots"
] |
[((3954, 3977), 'numpy.array', 'np.array', (['self.is_valid'], {}), '(self.is_valid)\n', (3962, 3977), True, 'import numpy as np\n'), ((3996, 4018), 'numpy.array', 'np.array', (['self.fitness'], {}), '(self.fitness)\n', (4004, 4018), True, 'import numpy as np\n'), ((4034, 4053), 'numpy.array', 'np.array', (['self.ngen'], {}), '(self.ngen)\n', (4042, 4053), True, 'import numpy as np\n'), ((4774, 4790), 'numpy.array', 'np.array', (['pareto'], {}), '(pareto)\n', (4782, 4790), True, 'import numpy as np\n'), ((4811, 4846), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(16, 6)'}), '(1, 2, figsize=(16, 6))\n', (4823, 4846), True, 'import matplotlib.pyplot as plt\n'), ((1128, 1224), 'numpy.array', 'np.array', (['[[6.97244193e-06, 225.353053, -225.35306], [-260.215295, 130.107654, \n 130.107642]]'], {}), '([[6.97244193e-06, 225.353053, -225.35306], [-260.215295, \n 130.107654, 130.107642]])\n', (1136, 1224), True, 'import numpy as np\n'), ((1501, 1513), 'numpy.zeros', 'np.zeros', (['(30)'], {}), '(30)\n', (1509, 1513), True, 'import numpy as np\n'), ((4103, 4121), 'numpy.where', 'np.where', (['is_valid'], {}), '(is_valid)\n', (4111, 4121), True, 'import numpy as np\n'), ((4234, 4260), 'numpy.unique', 'np.unique', (['fitness'], {'axis': '(0)'}), '(fitness, axis=0)\n', (4243, 4260), True, 'import numpy as np\n'), ((5284, 5363), 'matplotlib.image.imread', 'img.imread', (['"""pyleecan\\\\Tests\\\\Validation\\\\Optimization\\\\zdt3.jpg"""'], {'format': '"""jpg"""'}), "('pyleecan\\\\Tests\\\\Validation\\\\Optimization\\\\zdt3.jpg', format='jpg')\n", (5294, 5363), True, 'import matplotlib.image as img\n'), ((1068, 1079), 'numpy.ones', 'np.ones', (['Nt'], {}), '(Nt)\n', (1075, 1079), True, 'import numpy as np\n'), ((2641, 2666), 'numpy.random.uniform', 'np.random.uniform', (['*space'], {}), '(*space)\n', (2658, 2666), True, 'import numpy as np\n'), ((3177, 3190), 'numpy.sum', 'np.sum', (['x[1:]'], {}), '(x[1:])\n', (3183, 3190), True, 'import numpy as np\n'), ((3221, 3236), 'numpy.sqrt', 'np.sqrt', (['(f1 / g)'], {}), '(f1 / g)\n', (3228, 3236), True, 'import numpy as np\n'), ((3250, 3273), 'numpy.sin', 'np.sin', (['(10 * np.pi * f1)'], {}), '(10 * np.pi * f1)\n', (3256, 3273), True, 'import numpy as np\n')]
|
from builder.laikago_task_bullet import LaikagoTaskBullet
from builder.laikago_task import InitPose
import math
import numpy as np
ABDUCTION_P_GAIN = 220.0
ABDUCTION_D_GAIN = 0.3
HIP_P_GAIN = 220.0
HIP_D_GAIN = 2.0
KNEE_P_GAIN = 220.0
KNEE_D_GAIN = 2.0
class LaikagoStandImitationBulletBase(LaikagoTaskBullet):
def __init__(self,
reward_mode='without_shaping',
run_mode='train'):
super(LaikagoStandImitationBulletBase, self).__init__(run_mode=run_mode,
reward_mode=reward_mode,
init_pose=InitPose.LIE)
self.imitation_action = np.array([-10, 30, -75,
10, 30, -75,
-10, 50, -75,
10, 50, -75]) * np.pi / 180
self._kp = [ABDUCTION_P_GAIN, HIP_P_GAIN, KNEE_P_GAIN,
ABDUCTION_P_GAIN, HIP_P_GAIN, KNEE_P_GAIN,
ABDUCTION_P_GAIN, HIP_P_GAIN, KNEE_P_GAIN,
ABDUCTION_P_GAIN, HIP_P_GAIN, KNEE_P_GAIN]
self._kd = [ABDUCTION_D_GAIN, HIP_D_GAIN, KNEE_D_GAIN,
ABDUCTION_D_GAIN, HIP_D_GAIN, KNEE_D_GAIN,
ABDUCTION_D_GAIN, HIP_D_GAIN, KNEE_D_GAIN,
ABDUCTION_D_GAIN, HIP_D_GAIN, KNEE_D_GAIN]
self._torque_limits = np.ones(12) * 40
class LaikagoStandImitationBullet0(LaikagoStandImitationBulletBase):
def __init__(self, run_mode='train', reward_mode='with_shaping',):
super(LaikagoStandImitationBullet0, self).__init__(run_mode=run_mode,
reward_mode=reward_mode)
@property
def is_healthy(self):
return not (self.done_r_bullet(threshold=30) or
self.done_p_bullet(threshold=30) or
self.done_y_bullet(threshold=30) or
self.done_height_bullet(threshold=0.25) or
self.done_region_bullet(threshold=3) or
self.done_toe_contact_long(threshold=30) or
self.done_toe_distance(threshold=0.2))
def cal_phi_function(self):
pos = np.array(self._env.get_history_angle()[0])
vel = np.array(self._env.get_history_velocity()[0])
target_pos = self.imitation_action
target_vel = np.zeros(12)
motor_torques = -1 * (self._kp * (pos - target_pos)) - self._kd * (vel - target_vel)
return 10 / np.sum(np.abs(motor_torques))
def update_reward(self):
if self.is_healthy:
self.add_reward(1, 1)
|
[
"numpy.array",
"numpy.zeros",
"numpy.abs",
"numpy.ones"
] |
[((2373, 2385), 'numpy.zeros', 'np.zeros', (['(12)'], {}), '(12)\n', (2381, 2385), True, 'import numpy as np\n'), ((1383, 1394), 'numpy.ones', 'np.ones', (['(12)'], {}), '(12)\n', (1390, 1394), True, 'import numpy as np\n'), ((707, 771), 'numpy.array', 'np.array', (['[-10, 30, -75, 10, 30, -75, -10, 50, -75, 10, 50, -75]'], {}), '([-10, 30, -75, 10, 30, -75, -10, 50, -75, 10, 50, -75])\n', (715, 771), True, 'import numpy as np\n'), ((2506, 2527), 'numpy.abs', 'np.abs', (['motor_torques'], {}), '(motor_torques)\n', (2512, 2527), True, 'import numpy as np\n')]
|
import unittest
from datetime import date
from irLib.marketConvention.dayCount import ACT_ACT
from irLib.marketConvention.compounding import annually_k_Spot
from irLib.helpers.yieldCurve import yieldCurve, discountCurve, forwardCurve
import numpy as np
alias_disC = 'disC'
alias_forC = 'forC'
referenceDate = date(2020, 6, 26)
dayCount = ACT_ACT()
compounding = annually_k_Spot()
allowExtrapolation = False
# set synthetic data
timeIndex = [1, 2, 3, 4, 5]
flatR = 0.03
dF = ((flatR + 1) ** -np.arange(1, 6)).tolist()
forwardRates = (flatR * np.ones(5)).tolist()
spots = (flatR * np.ones(5)).tolist()
yearFrac = np.arange(1, 6).tolist()
par = (flatR * np.ones(5)).tolist()
t = date(2021, 6, 30) # try date(2021, 6, 26) will trigger extrapolation warning msg
t1 = date(2022, 6, 26)
t2 = date(2023, 6, 26)
class testYieldCurveGetRate(unittest.TestCase):
def testDiscountCurve(self):
disC = discountCurve(alias_disC, referenceDate,
dayCount, compounding, allowExtrapolation)
disC.values = dF
disC.timeIndex = timeIndex
self.assertAlmostEqual(disC.getRate(t1, t2), (1 + flatR) ** -1) # almostEqual auto rounds to 7 decimals
def testForwardCurve(self):
forwardC = forwardCurve(alias_forC, referenceDate,
dayCount, compounding, allowExtrapolation)
forwardC.values = forwardRates
forwardC.timeIndex = timeIndex
self.assertAlmostEqual(forwardC.getRate(t, t1, t2), flatR)
def testSpot2Df(self):
self.assertCountEqual(np.round(yieldCurve.spot2Df(
spots, yearFrac, compounding), 8), np.round(dF, 8))
self.assertCountEqual(np.round(yieldCurve.spot2Df(
dF, yearFrac, compounding, reverse=True), 8), np.round(spots, 8))
def testDf2Forward(self):
self.assertCountEqual(np.round(yieldCurve.dF2Forward(
dF, yearFrac), 8), np.round(forwardRates, 8))
def testForward2Spot(self):
self.assertCountEqual(np.round(yieldCurve.forward2Spot(
forwardRates, yearFrac, compounding), 8), np.round(spots, 8))
def testPar2Df(self):
self.assertCountEqual(
np.round(yieldCurve.par2Df(par, yearFrac), 8), np.round(dF, 8))
self.assertCountEqual(np.round(yieldCurve.par2Df(
dF, yearFrac, reverse=True), 8), np.round(par, 8))
|
[
"numpy.ones",
"numpy.arange",
"numpy.round",
"irLib.helpers.yieldCurve.yieldCurve.dF2Forward",
"irLib.marketConvention.dayCount.ACT_ACT",
"irLib.helpers.yieldCurve.discountCurve",
"irLib.helpers.yieldCurve.forwardCurve",
"datetime.date",
"irLib.helpers.yieldCurve.yieldCurve.spot2Df",
"irLib.helpers.yieldCurve.yieldCurve.par2Df",
"irLib.helpers.yieldCurve.yieldCurve.forward2Spot",
"irLib.marketConvention.compounding.annually_k_Spot"
] |
[((311, 328), 'datetime.date', 'date', (['(2020)', '(6)', '(26)'], {}), '(2020, 6, 26)\n', (315, 328), False, 'from datetime import date\n'), ((340, 349), 'irLib.marketConvention.dayCount.ACT_ACT', 'ACT_ACT', ([], {}), '()\n', (347, 349), False, 'from irLib.marketConvention.dayCount import ACT_ACT\n'), ((364, 381), 'irLib.marketConvention.compounding.annually_k_Spot', 'annually_k_Spot', ([], {}), '()\n', (379, 381), False, 'from irLib.marketConvention.compounding import annually_k_Spot\n'), ((680, 697), 'datetime.date', 'date', (['(2021)', '(6)', '(30)'], {}), '(2021, 6, 30)\n', (684, 697), False, 'from datetime import date\n'), ((766, 783), 'datetime.date', 'date', (['(2022)', '(6)', '(26)'], {}), '(2022, 6, 26)\n', (770, 783), False, 'from datetime import date\n'), ((789, 806), 'datetime.date', 'date', (['(2023)', '(6)', '(26)'], {}), '(2023, 6, 26)\n', (793, 806), False, 'from datetime import date\n'), ((614, 629), 'numpy.arange', 'np.arange', (['(1)', '(6)'], {}), '(1, 6)\n', (623, 629), True, 'import numpy as np\n'), ((904, 991), 'irLib.helpers.yieldCurve.discountCurve', 'discountCurve', (['alias_disC', 'referenceDate', 'dayCount', 'compounding', 'allowExtrapolation'], {}), '(alias_disC, referenceDate, dayCount, compounding,\n allowExtrapolation)\n', (917, 991), False, 'from irLib.helpers.yieldCurve import yieldCurve, discountCurve, forwardCurve\n'), ((1241, 1327), 'irLib.helpers.yieldCurve.forwardCurve', 'forwardCurve', (['alias_forC', 'referenceDate', 'dayCount', 'compounding', 'allowExtrapolation'], {}), '(alias_forC, referenceDate, dayCount, compounding,\n allowExtrapolation)\n', (1253, 1327), False, 'from irLib.helpers.yieldCurve import yieldCurve, discountCurve, forwardCurve\n'), ((544, 554), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (551, 554), True, 'import numpy as np\n'), ((582, 592), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (589, 592), True, 'import numpy as np\n'), ((654, 664), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (661, 664), True, 'import numpy as np\n'), ((1635, 1650), 'numpy.round', 'np.round', (['dF', '(8)'], {}), '(dF, 8)\n', (1643, 1650), True, 'import numpy as np\n'), ((1769, 1787), 'numpy.round', 'np.round', (['spots', '(8)'], {}), '(spots, 8)\n', (1777, 1787), True, 'import numpy as np\n'), ((1913, 1938), 'numpy.round', 'np.round', (['forwardRates', '(8)'], {}), '(forwardRates, 8)\n', (1921, 1938), True, 'import numpy as np\n'), ((2091, 2109), 'numpy.round', 'np.round', (['spots', '(8)'], {}), '(spots, 8)\n', (2099, 2109), True, 'import numpy as np\n'), ((2228, 2243), 'numpy.round', 'np.round', (['dF', '(8)'], {}), '(dF, 8)\n', (2236, 2243), True, 'import numpy as np\n'), ((2348, 2364), 'numpy.round', 'np.round', (['par', '(8)'], {}), '(par, 8)\n', (2356, 2364), True, 'import numpy as np\n'), ((494, 509), 'numpy.arange', 'np.arange', (['(1)', '(6)'], {}), '(1, 6)\n', (503, 509), True, 'import numpy as np\n'), ((1568, 1616), 'irLib.helpers.yieldCurve.yieldCurve.spot2Df', 'yieldCurve.spot2Df', (['spots', 'yearFrac', 'compounding'], {}), '(spots, yearFrac, compounding)\n', (1586, 1616), False, 'from irLib.helpers.yieldCurve import yieldCurve, discountCurve, forwardCurve\n'), ((1691, 1750), 'irLib.helpers.yieldCurve.yieldCurve.spot2Df', 'yieldCurve.spot2Df', (['dF', 'yearFrac', 'compounding'], {'reverse': '(True)'}), '(dF, yearFrac, compounding, reverse=True)\n', (1709, 1750), False, 'from irLib.helpers.yieldCurve import yieldCurve, discountCurve, forwardCurve\n'), ((1859, 1894), 'irLib.helpers.yieldCurve.yieldCurve.dF2Forward', 'yieldCurve.dF2Forward', (['dF', 'yearFrac'], {}), '(dF, yearFrac)\n', (1880, 1894), False, 'from irLib.helpers.yieldCurve import yieldCurve, discountCurve, forwardCurve\n'), ((2012, 2072), 'irLib.helpers.yieldCurve.yieldCurve.forward2Spot', 'yieldCurve.forward2Spot', (['forwardRates', 'yearFrac', 'compounding'], {}), '(forwardRates, yearFrac, compounding)\n', (2035, 2072), False, 'from irLib.helpers.yieldCurve import yieldCurve, discountCurve, forwardCurve\n'), ((2190, 2222), 'irLib.helpers.yieldCurve.yieldCurve.par2Df', 'yieldCurve.par2Df', (['par', 'yearFrac'], {}), '(par, yearFrac)\n', (2207, 2222), False, 'from irLib.helpers.yieldCurve import yieldCurve, discountCurve, forwardCurve\n'), ((2284, 2329), 'irLib.helpers.yieldCurve.yieldCurve.par2Df', 'yieldCurve.par2Df', (['dF', 'yearFrac'], {'reverse': '(True)'}), '(dF, yearFrac, reverse=True)\n', (2301, 2329), False, 'from irLib.helpers.yieldCurve import yieldCurve, discountCurve, forwardCurve\n')]
|
__all__ = [
"Dataset",
"forgiving_true",
"load_config",
"log",
"make_tdtax_taxonomy",
"plot_gaia_density",
"plot_gaia_hr",
"plot_light_curve_data",
"plot_periods",
]
from astropy.io import fits
import datetime
import json
import healpy as hp
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pathlib
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tqdm.auto import tqdm
from typing import Mapping, Optional, Union
import yaml
def load_config(config_path: Union[str, pathlib.Path]):
"""
Load config and secrets
"""
with open(config_path) as config_yaml:
config = yaml.load(config_yaml, Loader=yaml.FullLoader)
return config
def time_stamp():
"""
:return: UTC time as a formatted string
"""
return datetime.datetime.utcnow().strftime("%Y%m%d_%H:%M:%S")
def log(message: str):
print(f"{time_stamp()}: {message}")
def forgiving_true(expression):
return True if expression in ("t", "True", "true", "1", 1, True) else False
def make_tdtax_taxonomy(taxonomy: Mapping):
"""Recursively convert taxonomy definition from config["taxonomy"]
into tdtax-parsable dictionary
:param taxonomy: config["taxonomy"] section
:return:
"""
tdtax_taxonomy = dict()
if taxonomy["class"] not in ("tds", "phenomenological", "ontological"):
tdtax_taxonomy["name"] = f"{taxonomy['class']}: {taxonomy['name']}"
else:
tdtax_taxonomy["name"] = taxonomy["name"]
if "subclasses" in taxonomy:
tdtax_taxonomy["children"] = []
for cls in taxonomy["subclasses"]:
tdtax_taxonomy["children"].append(make_tdtax_taxonomy(cls))
return tdtax_taxonomy
def plot_light_curve_data(
light_curve_data: pd.DataFrame,
period: Optional[float] = None,
title: Optional[str] = None,
save: Optional[str] = None,
):
"""Plot and save to file light curve data
:param light_curve_data:
:param period: float [days] if set, a phase-folded light curve will be displayed
:param title: plot title
:param save: path to save the plot
:return:
"""
plt.close("all")
# Official start of ZTF MSIP survey, March 17, 2018
jd_start = 2458194.5
colors = {
1: "#28a745",
2: "#dc3545",
3: "#00415a",
"default": "#f3dc11",
}
mask_good_data = light_curve_data["catflags"] == 0
df = light_curve_data.loc[mask_good_data]
if period is not None:
fig = plt.figure(figsize=(16, 9), dpi=200)
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
else:
fig = plt.figure(figsize=(16, 5), dpi=200)
ax1 = fig.add_subplot(111)
if title is not None:
fig.suptitle(title, fontsize=24)
# plot different ZTF bands/filters
for band in df["filter"].unique():
mask_filter = df["filter"] == band
ax1.errorbar(
df.loc[mask_filter, "hjd"] - jd_start,
df.loc[mask_filter, "mag"],
df.loc[mask_filter, "magerr"],
marker=".",
color=colors[band],
lw=0,
)
if period is not None:
for n in [0, -1]:
ax2.errorbar(
(df.loc[mask_filter, "hjd"] - jd_start) / period % 1 + n,
df.loc[mask_filter, "mag"],
df.loc[mask_filter, "magerr"],
marker=".",
color=colors[band],
lw=0,
)
# invert y axes since we are displaying magnitudes
ax1.invert_yaxis()
if period is not None:
ax2.invert_yaxis()
ax1.set_xlabel("Time")
ax1.grid(lw=0.3)
if period is not None:
ax2.set_xlabel(f"phase [period={period:4.4g} days]")
ax2.set_xlim(-1, 1)
ax2.grid(lw=0.3)
if save is not None:
fig.tight_layout()
plt.savefig(save)
def plot_periods(
features: pd.DataFrame,
limits: Optional[list] = None,
loglimits: Optional[bool] = False,
number_of_bins: Optional[int] = 20,
title: Optional[str] = None,
save: Optional[Union[str, pathlib.Path]] = None,
):
"""Plot a histogram of periods for the sample"""
# plot the H-R diagram for 1 M stars within 200 pc from the Sun
plt.rc("text", usetex=True)
# make figure
fig, ax = plt.subplots(figsize=(6, 6))
if title is not None:
fig.suptitle(title, fontsize=24)
if limits is not None:
if loglimits:
edges = np.logspace(
np.log10(limits[0]), np.log10(limits[1]), number_of_bins
)
else:
edges = np.linspace(limits[0], limits[1], number_of_bins)
else:
if loglimits:
edges = np.linspace(
np.log10(0.9 * np.min(features["period"])),
np.log10(1.1 * np.max(features["period"])),
number_of_bins,
)
else:
edges = np.linspace(
0.9 * np.min(features["period"]),
1.1 * np.max(features["period"]),
number_of_bins,
)
hist, bin_edges = np.histogram(features["period"], bins=edges)
hist = hist / np.sum(hist)
bins = (bin_edges[1:] + bin_edges[:-1]) / 2.0
ax.plot(bins, hist, linestyle="-", drawstyle="steps")
ax.set_xlabel("Period [day]")
ax.set_ylabel("Probability Density Function")
# display grid behind all other elements on the plot
ax.set_axisbelow(True)
ax.grid(lw=0.3)
if loglimits:
ax.set_xscale("log")
ax.set_xlim([0.9 * bins[0], 1.1 * bins[-1]])
if save is not None:
fig.tight_layout()
plt.savefig(save)
def plot_gaia_hr(
gaia_data: pd.DataFrame,
path_gaia_hr_histogram: Union[str, pathlib.Path],
title: Optional[str] = None,
save: Optional[Union[str, pathlib.Path]] = None,
):
"""Plot the Gaia HR diagram with a sample of objects over-plotted
source: https://vlas.dev/post/gaia-dr2-hrd/
"""
# plot the H-R diagram for 1 M stars within 200 pc from the Sun
plt.rc("text", usetex=True)
# load background histogram
histogram = np.loadtxt(path_gaia_hr_histogram)
# make figure
fig, ax = plt.subplots(figsize=(6, 6), dpi=200)
if title is not None:
fig.suptitle(title, fontsize=24)
x_edges = np.arange(-0.681896, 5.04454978, 0.02848978)
y_edges = np.arange(-2.90934, 16.5665952, 0.0968952)
ax.pcolormesh(x_edges, y_edges, histogram.T, antialiased=False)
ax.set_xlim(x_edges[0], x_edges[-1])
ax.set_ylim(y_edges[0], y_edges[-1])
ax.invert_yaxis()
ax.set_xlabel(r"$G_{BP} - G_{RP}$")
ax.set_ylabel(r"$M_G$")
# plot sample data
ax.errorbar(
gaia_data["BP-RP"],
gaia_data["M"],
gaia_data["M"] - gaia_data["Ml"],
marker=".",
color="#e68a00",
alpha=0.75,
ls="",
lw=0.5,
)
# display grid behind all other elements on the plot
ax.set_axisbelow(True)
ax.grid(lw=0.3)
if save is not None:
fig.tight_layout()
plt.savefig(save)
def plot_gaia_density(
positions: pd.DataFrame,
path_gaia_density: Union[str, pathlib.Path],
title: Optional[str] = None,
save: Optional[Union[str, pathlib.Path]] = None,
):
"""Plot the RA/DEC Gaia density plot with a sample of objects over-plotted
source: https://vlas.dev/post/gaia-dr2-hrd/
"""
# plot the H-R diagram for 1 M stars within 200 pc from the Sun
plt.rc("text", usetex=True)
# load the data
hdulist = fits.open(path_gaia_density)
hist = hdulist[1].data["srcdens"][np.argsort(hdulist[1].data["hpx8"])]
# make figure
fig, ax = plt.subplots(figsize=(6, 6), dpi=200)
if title is not None:
fig.suptitle(title, fontsize=24)
# background setup
coordsys = ["C", "C"]
nest = True
# colormap
cm = plt.cm.get_cmap("viridis") # colorscale
cm.set_under("w")
cm.set_bad("w")
# plot the data in healpy
norm = "log"
hp.mollview(
hist,
norm=norm,
unit="Stars per sq. arcmin.",
cbar=False,
nest=nest,
title="",
coord=coordsys,
notext=True,
cmap=cm,
flip="astro",
nlocs=4,
min=0.1,
max=300,
)
ax = plt.gca()
image = ax.get_images()[0]
cbar = fig.colorbar(
image,
ax=ax,
ticks=[0.1, 1, 10, 100],
fraction=0.15,
pad=0.05,
location="bottom",
)
cbar.set_label("Stars per sq. arcmin.", size=12)
cbar.ax.tick_params(labelsize=12)
ax.tick_params(axis="both", which="major", labelsize=24)
# borders
lw = 3
pi = np.pi
dtor = pi / 180.0
theta = np.arange(0, 181) * dtor
hp.projplot(theta, theta * 0 - pi, "-k", lw=lw, direct=True)
hp.projplot(theta, theta * 0 + 0.9999 * pi, "-k", lw=lw, direct=True)
phi = np.arange(-180, 180) * dtor
hp.projplot(phi * 0 + 1.0e-10, phi, "-k", lw=lw, direct=True)
hp.projplot(phi * 0 + pi - 1.0e-10, phi, "-k", lw=lw, direct=True)
# ZTF
theta = np.arange(0.0, 360, 0.036)
phi = -30.0 * np.ones_like(theta)
hp.projplot(theta, phi, "k--", coord=["C"], lonlat=True, lw=2)
hp.projtext(170.0, -24.0, r"ZTF Limit", lonlat=True)
theta = np.arange(0.0, 360, 0.036)
# galaxy
for gallat in [15, 0, -15]:
phi = gallat * np.ones_like(theta)
hp.projplot(theta, phi, "w-", coord=["G"], lonlat=True, lw=2)
# ecliptic
for ecllat in [0, -30, 30]:
phi = ecllat * np.ones_like(theta)
hp.projplot(theta, phi, "w-", coord=["E"], lonlat=True, lw=2, ls=":")
# graticule
hp.graticule(ls="-", alpha=0.1, lw=0.5)
# labels
for lat in [60, 30, 0, -30, -60]:
hp.projtext(360.0, lat, str(lat), lonlat=True)
for lon in [0, 60, 120, 240, 300]:
hp.projtext(lon, 0.0, str(lon), lonlat=True)
# NWES
plt.text(0.0, 0.5, r"E", ha="right", transform=ax.transAxes, weight="bold")
plt.text(1.0, 0.5, r"W", ha="left", transform=ax.transAxes, weight="bold")
plt.text(
0.5,
0.992,
r"N",
va="bottom",
ha="center",
transform=ax.transAxes,
weight="bold",
)
plt.text(
0.5, 0.0, r"S", va="top", ha="center", transform=ax.transAxes, weight="bold"
)
color = "k"
lw = 10
alpha = 0.75
for pos in positions:
hp.projplot(
pos[0],
pos[1],
color=color,
markersize=5,
marker="o",
coord=coordsys,
lonlat=True,
lw=lw,
alpha=alpha,
zorder=10,
)
if save is not None:
fig.tight_layout()
plt.savefig(save)
""" Datasets """
class Dataset(object):
def __init__(
self,
tag: str,
path_dataset: str,
features: tuple,
verbose: bool = False,
**kwargs,
):
"""Load csv file with the dataset containing both data and labels
As of 20210317, it is produced by labels*.ipynb - this will likely change in a future PR
:param tag:
:param path_dataset:
:param features:
:param verbose:
"""
self.verbose = verbose
self.tag = tag
self.features = features
self.target = None
if self.verbose:
log(f"Loading {path_dataset}...")
nrows = kwargs.get("nrows", None)
self.df_ds = pd.read_csv(path_dataset, nrows=nrows)
if self.verbose:
log(self.df_ds[list(features)].describe())
self.df_ds = self.df_ds.replace([np.inf, -np.inf, np.nan], 0.0)
dmdt = []
if self.verbose:
print("Moving dmdt's to a dedicated numpy array...")
iterator = tqdm(self.df_ds.itertuples(), total=len(self.df_ds))
else:
iterator = self.df_ds.itertuples()
for i in iterator:
data = np.array(json.loads(self.df_ds["dmdt"][i.Index]))
if len(data.shape) == 0:
dmdt.append(np.zeros((26, 26)))
else:
dmdt.append(data)
self.dmdt = np.array(dmdt)
self.dmdt = np.expand_dims(self.dmdt, axis=-1)
# drop in df_ds:
self.df_ds.drop(columns="dmdt")
@staticmethod
def threshold(a, t: float = 0.5):
b = np.zeros_like(a)
b[np.array(a) > t] = 1
return b
def make(
self,
target_label: str = "variable",
threshold: float = 0.5,
balance: Optional[float] = None,
weight_per_class: bool = True,
scale_features: str = "min_max",
test_size: float = 0.1,
val_size: float = 0.1,
random_state: int = 42,
feature_stats: Optional[dict] = None,
batch_size: int = 256,
shuffle_buffer_size: int = 256,
epochs: int = 300,
**kwargs,
):
"""Make datasets for target_label
:param target_label: corresponds to training.classes.<label> in config
:param threshold: our labels are floats [0, 0.25, 0.5, 0.75, 1]
:param balance: balance ratio for the prevalent class. if null - use all available data
:param weight_per_class:
:param scale_features: min_max | median_std
:param test_size:
:param val_size:
:param random_state: set this for reproducibility
:param feature_stats: feature_stats to use to standardize features.
if None, stats are computed from the data, taking balance into account
:param batch_size
:param shuffle_buffer_size
:param epochs
:return:
"""
# Note: Dataset.from_tensor_slices method requires the target variable to be of the int type.
# TODO: see what to do about it when trying label smoothing in the future.
target = np.asarray(
list(map(int, self.threshold(self.df_ds[target_label].values, t=threshold)))
)
self.target = np.expand_dims(target, axis=1)
neg, pos = np.bincount(target.flatten())
total = neg + pos
if self.verbose:
log(
f"Examples:\n Total: {total}\n Positive: {pos} ({100 * pos / total:.2f}% of total)\n"
)
w_pos = np.rint(self.df_ds[target_label].values) == 1
index_pos = self.df_ds.loc[w_pos].index
if target_label == "variable":
# 'variable' is a special case: there is an explicit 'non-variable' label:
w_neg = (
np.asarray(
list(
map(
int,
self.threshold(
self.df_ds["non-variable"].values, t=threshold
),
)
)
)
== 1
)
else:
w_neg = ~w_pos
index_neg = self.df_ds.loc[w_neg].index
# balance positive and negative examples?
index_dropped = None
if balance:
underrepresented = min(np.sum(w_pos), np.sum(w_neg))
overrepresented = max(np.sum(w_pos), np.sum(w_neg))
sample_size = int(min(overrepresented, underrepresented * balance))
if neg > pos:
index_neg = (
self.df_ds.loc[w_neg].sample(n=sample_size, random_state=1).index
)
index_dropped = self.df_ds.loc[
list(set(self.df_ds.loc[w_neg].index) - set(index_neg))
].index
else:
index_pos = (
self.df_ds.loc[w_pos].sample(n=sample_size, random_state=1).index
)
index_dropped = self.df_ds.loc[
list(set(self.df_ds.loc[w_pos].index) - set(index_pos))
].index
if self.verbose:
log(
"Number of examples to use in training:"
f"\n Positive: {len(index_pos)}\n Negative: {len(index_neg)}\n"
)
ds_indexes = index_pos.to_list() + index_neg.to_list()
# Train/validation/test split (we will use an 81% / 9% / 10% data split by default):
train_indexes, test_indexes = train_test_split(
ds_indexes, shuffle=True, test_size=test_size, random_state=random_state
)
train_indexes, val_indexes = train_test_split(
train_indexes, shuffle=True, test_size=val_size, random_state=random_state
)
# Normalize features (dmdt's are already L2-normalized) (?using only the training samples?).
# Obviously, the same norms will have to be applied at the testing and serving stages.
# load/compute feature norms:
if feature_stats is None:
feature_stats = {
feature: {
"min": np.min(self.df_ds.loc[ds_indexes, feature]),
"max": np.max(self.df_ds.loc[ds_indexes, feature]),
"median": np.median(self.df_ds.loc[ds_indexes, feature]),
"mean": np.mean(self.df_ds.loc[ds_indexes, feature]),
"std": np.std(self.df_ds.loc[ds_indexes, feature]),
}
for feature in self.features
}
if self.verbose:
print("Computed feature stats:\n", feature_stats)
# scale features
for feature in self.features:
stats = feature_stats.get(feature)
if (stats is not None) and (stats["std"] != 0):
if scale_features == "median_std":
self.df_ds[feature] = (
self.df_ds[feature] - stats["median"]
) / stats["std"]
elif scale_features == "min_max":
self.df_ds[feature] = (self.df_ds[feature] - stats["min"]) / (
stats["max"] - stats["min"]
)
# norms = {
# feature: np.linalg.norm(self.df_ds.loc[ds_indexes, feature])
# for feature in self.features
# }
# for feature, norm in norms.items():
# if np.isnan(norm) or norm == 0.0:
# norms[feature] = 1.0
# if self.verbose:
# print('Computed feature norms:\n', norms)
#
# for feature, norm in norms.items():
# self.df_ds[feature] /= norm
train_dataset = tf.data.Dataset.from_tensor_slices(
(
{
"features": self.df_ds.loc[train_indexes, self.features].values,
"dmdt": self.dmdt[train_indexes],
},
target[train_indexes],
)
)
val_dataset = tf.data.Dataset.from_tensor_slices(
(
{
"features": self.df_ds.loc[val_indexes, self.features].values,
"dmdt": self.dmdt[val_indexes],
},
target[val_indexes],
)
)
test_dataset = tf.data.Dataset.from_tensor_slices(
(
{
"features": self.df_ds.loc[test_indexes, self.features].values,
"dmdt": self.dmdt[test_indexes],
},
target[test_indexes],
)
)
dropped_samples = (
tf.data.Dataset.from_tensor_slices(
(
{
"features": self.df_ds.loc[index_dropped, self.features].values,
"dmdt": self.dmdt[index_dropped],
},
target[index_dropped],
)
)
if balance
else None
)
# Shuffle and batch the datasets:
train_dataset = (
train_dataset.shuffle(shuffle_buffer_size).batch(batch_size).repeat(epochs)
)
val_dataset = val_dataset.batch(batch_size).repeat(epochs)
test_dataset = test_dataset.batch(batch_size)
dropped_samples = dropped_samples.batch(batch_size) if balance else None
datasets = {
"train": train_dataset,
"val": val_dataset,
"test": test_dataset,
"dropped_samples": dropped_samples,
}
indexes = {
"train": np.array(train_indexes),
"val": np.array(val_indexes),
"test": np.array(test_indexes),
"dropped_samples": np.array(index_dropped.to_list())
if index_dropped is not None
else None,
}
# How many steps per epoch?
steps_per_epoch_train = len(train_indexes) // batch_size - 1
steps_per_epoch_val = len(val_indexes) // batch_size - 1
steps_per_epoch_test = len(test_indexes) // batch_size - 1
steps_per_epoch = {
"train": steps_per_epoch_train,
"val": steps_per_epoch_val,
"test": steps_per_epoch_test,
}
if self.verbose:
print(f"Steps per epoch: {steps_per_epoch}")
# Weight training data depending on the number of samples?
# Very useful for imbalanced classification, especially in the cases with a small number of examples.
if weight_per_class:
# weight data class depending on number of examples?
# num_training_examples_per_class = np.array([len(target) - np.sum(target), np.sum(target)])
num_training_examples_per_class = np.array([len(index_neg), len(index_pos)])
assert (
0 not in num_training_examples_per_class
), "found class without any examples!"
# fewer examples -- larger weight
weights = (1 / num_training_examples_per_class) / np.linalg.norm(
(1 / num_training_examples_per_class)
)
normalized_weight = weights / np.max(weights)
class_weight = {i: w for i, w in enumerate(normalized_weight)}
else:
# working with binary classifiers only
class_weight = {i: 1 for i in range(2)}
return datasets, indexes, steps_per_epoch, class_weight
|
[
"numpy.log10",
"pandas.read_csv",
"healpy.mollview",
"yaml.load",
"numpy.argsort",
"numpy.array",
"astropy.io.fits.open",
"numpy.linalg.norm",
"numpy.arange",
"numpy.mean",
"numpy.histogram",
"healpy.projplot",
"tensorflow.data.Dataset.from_tensor_slices",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.linspace",
"numpy.rint",
"numpy.min",
"healpy.graticule",
"healpy.projtext",
"json.loads",
"matplotlib.pyplot.savefig",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.gca",
"numpy.std",
"matplotlib.pyplot.cm.get_cmap",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.text",
"numpy.ones_like",
"numpy.median",
"datetime.datetime.utcnow",
"numpy.sum",
"matplotlib.pyplot.figure",
"numpy.zeros",
"numpy.expand_dims",
"numpy.loadtxt",
"numpy.zeros_like",
"matplotlib.pyplot.subplots"
] |
[((2182, 2198), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (2191, 2198), True, 'import matplotlib.pyplot as plt\n'), ((4337, 4364), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (4343, 4364), True, 'import matplotlib.pyplot as plt\n'), ((4398, 4426), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (4410, 4426), True, 'import matplotlib.pyplot as plt\n'), ((5194, 5238), 'numpy.histogram', 'np.histogram', (["features['period']"], {'bins': 'edges'}), "(features['period'], bins=edges)\n", (5206, 5238), True, 'import numpy as np\n'), ((6135, 6162), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (6141, 6162), True, 'import matplotlib.pyplot as plt\n'), ((6212, 6246), 'numpy.loadtxt', 'np.loadtxt', (['path_gaia_hr_histogram'], {}), '(path_gaia_hr_histogram)\n', (6222, 6246), True, 'import numpy as np\n'), ((6280, 6317), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6, 6)', 'dpi': '(200)'}), '(figsize=(6, 6), dpi=200)\n', (6292, 6317), True, 'import matplotlib.pyplot as plt\n'), ((6400, 6444), 'numpy.arange', 'np.arange', (['(-0.681896)', '(5.04454978)', '(0.02848978)'], {}), '(-0.681896, 5.04454978, 0.02848978)\n', (6409, 6444), True, 'import numpy as np\n'), ((6459, 6501), 'numpy.arange', 'np.arange', (['(-2.90934)', '(16.5665952)', '(0.0968952)'], {}), '(-2.90934, 16.5665952, 0.0968952)\n', (6468, 6501), True, 'import numpy as np\n'), ((7566, 7593), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (7572, 7593), True, 'import matplotlib.pyplot as plt\n'), ((7629, 7657), 'astropy.io.fits.open', 'fits.open', (['path_gaia_density'], {}), '(path_gaia_density)\n', (7638, 7657), False, 'from astropy.io import fits\n'), ((7766, 7803), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6, 6)', 'dpi': '(200)'}), '(figsize=(6, 6), dpi=200)\n', (7778, 7803), True, 'import matplotlib.pyplot as plt\n'), ((7962, 7988), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""viridis"""'], {}), "('viridis')\n", (7977, 7988), True, 'import matplotlib.pyplot as plt\n'), ((8097, 8276), 'healpy.mollview', 'hp.mollview', (['hist'], {'norm': 'norm', 'unit': '"""Stars per sq. arcmin."""', 'cbar': '(False)', 'nest': 'nest', 'title': '""""""', 'coord': 'coordsys', 'notext': '(True)', 'cmap': 'cm', 'flip': '"""astro"""', 'nlocs': '(4)', 'min': '(0.1)', 'max': '(300)'}), "(hist, norm=norm, unit='Stars per sq. arcmin.', cbar=False, nest\n =nest, title='', coord=coordsys, notext=True, cmap=cm, flip='astro',\n nlocs=4, min=0.1, max=300)\n", (8108, 8276), True, 'import healpy as hp\n'), ((8388, 8397), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (8395, 8397), True, 'import matplotlib.pyplot as plt\n'), ((8848, 8908), 'healpy.projplot', 'hp.projplot', (['theta', '(theta * 0 - pi)', '"""-k"""'], {'lw': 'lw', 'direct': '(True)'}), "(theta, theta * 0 - pi, '-k', lw=lw, direct=True)\n", (8859, 8908), True, 'import healpy as hp\n'), ((8913, 8982), 'healpy.projplot', 'hp.projplot', (['theta', '(theta * 0 + 0.9999 * pi)', '"""-k"""'], {'lw': 'lw', 'direct': '(True)'}), "(theta, theta * 0 + 0.9999 * pi, '-k', lw=lw, direct=True)\n", (8924, 8982), True, 'import healpy as hp\n'), ((9025, 9084), 'healpy.projplot', 'hp.projplot', (['(phi * 0 + 1e-10)', 'phi', '"""-k"""'], {'lw': 'lw', 'direct': '(True)'}), "(phi * 0 + 1e-10, phi, '-k', lw=lw, direct=True)\n", (9036, 9084), True, 'import healpy as hp\n'), ((9091, 9155), 'healpy.projplot', 'hp.projplot', (['(phi * 0 + pi - 1e-10)', 'phi', '"""-k"""'], {'lw': 'lw', 'direct': '(True)'}), "(phi * 0 + pi - 1e-10, phi, '-k', lw=lw, direct=True)\n", (9102, 9155), True, 'import healpy as hp\n'), ((9181, 9207), 'numpy.arange', 'np.arange', (['(0.0)', '(360)', '(0.036)'], {}), '(0.0, 360, 0.036)\n', (9190, 9207), True, 'import numpy as np\n'), ((9250, 9312), 'healpy.projplot', 'hp.projplot', (['theta', 'phi', '"""k--"""'], {'coord': "['C']", 'lonlat': '(True)', 'lw': '(2)'}), "(theta, phi, 'k--', coord=['C'], lonlat=True, lw=2)\n", (9261, 9312), True, 'import healpy as hp\n'), ((9317, 9368), 'healpy.projtext', 'hp.projtext', (['(170.0)', '(-24.0)', '"""ZTF Limit"""'], {'lonlat': '(True)'}), "(170.0, -24.0, 'ZTF Limit', lonlat=True)\n", (9328, 9368), True, 'import healpy as hp\n'), ((9383, 9409), 'numpy.arange', 'np.arange', (['(0.0)', '(360)', '(0.036)'], {}), '(0.0, 360, 0.036)\n', (9392, 9409), True, 'import numpy as np\n'), ((9759, 9798), 'healpy.graticule', 'hp.graticule', ([], {'ls': '"""-"""', 'alpha': '(0.1)', 'lw': '(0.5)'}), "(ls='-', alpha=0.1, lw=0.5)\n", (9771, 9798), True, 'import healpy as hp\n'), ((10014, 10088), 'matplotlib.pyplot.text', 'plt.text', (['(0.0)', '(0.5)', '"""E"""'], {'ha': '"""right"""', 'transform': 'ax.transAxes', 'weight': '"""bold"""'}), "(0.0, 0.5, 'E', ha='right', transform=ax.transAxes, weight='bold')\n", (10022, 10088), True, 'import matplotlib.pyplot as plt\n'), ((10094, 10167), 'matplotlib.pyplot.text', 'plt.text', (['(1.0)', '(0.5)', '"""W"""'], {'ha': '"""left"""', 'transform': 'ax.transAxes', 'weight': '"""bold"""'}), "(1.0, 0.5, 'W', ha='left', transform=ax.transAxes, weight='bold')\n", (10102, 10167), True, 'import matplotlib.pyplot as plt\n'), ((10173, 10267), 'matplotlib.pyplot.text', 'plt.text', (['(0.5)', '(0.992)', '"""N"""'], {'va': '"""bottom"""', 'ha': '"""center"""', 'transform': 'ax.transAxes', 'weight': '"""bold"""'}), "(0.5, 0.992, 'N', va='bottom', ha='center', transform=ax.transAxes,\n weight='bold')\n", (10181, 10267), True, 'import matplotlib.pyplot as plt\n'), ((10332, 10421), 'matplotlib.pyplot.text', 'plt.text', (['(0.5)', '(0.0)', '"""S"""'], {'va': '"""top"""', 'ha': '"""center"""', 'transform': 'ax.transAxes', 'weight': '"""bold"""'}), "(0.5, 0.0, 'S', va='top', ha='center', transform=ax.transAxes,\n weight='bold')\n", (10340, 10421), True, 'import matplotlib.pyplot as plt\n'), ((687, 733), 'yaml.load', 'yaml.load', (['config_yaml'], {'Loader': 'yaml.FullLoader'}), '(config_yaml, Loader=yaml.FullLoader)\n', (696, 733), False, 'import yaml\n'), ((2543, 2579), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)', 'dpi': '(200)'}), '(figsize=(16, 9), dpi=200)\n', (2553, 2579), True, 'import matplotlib.pyplot as plt\n'), ((2674, 2710), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 5)', 'dpi': '(200)'}), '(figsize=(16, 5), dpi=200)\n', (2684, 2710), True, 'import matplotlib.pyplot as plt\n'), ((3943, 3960), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save'], {}), '(save)\n', (3954, 3960), True, 'import matplotlib.pyplot as plt\n'), ((5257, 5269), 'numpy.sum', 'np.sum', (['hist'], {}), '(hist)\n', (5263, 5269), True, 'import numpy as np\n'), ((5725, 5742), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save'], {}), '(save)\n', (5736, 5742), True, 'import matplotlib.pyplot as plt\n'), ((7147, 7164), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save'], {}), '(save)\n', (7158, 7164), True, 'import matplotlib.pyplot as plt\n'), ((7696, 7731), 'numpy.argsort', 'np.argsort', (["hdulist[1].data['hpx8']"], {}), "(hdulist[1].data['hpx8'])\n", (7706, 7731), True, 'import numpy as np\n'), ((8819, 8836), 'numpy.arange', 'np.arange', (['(0)', '(181)'], {}), '(0, 181)\n', (8828, 8836), True, 'import numpy as np\n'), ((8993, 9013), 'numpy.arange', 'np.arange', (['(-180)', '(180)'], {}), '(-180, 180)\n', (9002, 9013), True, 'import numpy as np\n'), ((9226, 9245), 'numpy.ones_like', 'np.ones_like', (['theta'], {}), '(theta)\n', (9238, 9245), True, 'import numpy as np\n'), ((9507, 9568), 'healpy.projplot', 'hp.projplot', (['theta', 'phi', '"""w-"""'], {'coord': "['G']", 'lonlat': '(True)', 'lw': '(2)'}), "(theta, phi, 'w-', coord=['G'], lonlat=True, lw=2)\n", (9518, 9568), True, 'import healpy as hp\n'), ((9668, 9737), 'healpy.projplot', 'hp.projplot', (['theta', 'phi', '"""w-"""'], {'coord': "['E']", 'lonlat': '(True)', 'lw': '(2)', 'ls': '""":"""'}), "(theta, phi, 'w-', coord=['E'], lonlat=True, lw=2, ls=':')\n", (9679, 9737), True, 'import healpy as hp\n'), ((10514, 10645), 'healpy.projplot', 'hp.projplot', (['pos[0]', 'pos[1]'], {'color': 'color', 'markersize': '(5)', 'marker': '"""o"""', 'coord': 'coordsys', 'lonlat': '(True)', 'lw': 'lw', 'alpha': 'alpha', 'zorder': '(10)'}), "(pos[0], pos[1], color=color, markersize=5, marker='o', coord=\n coordsys, lonlat=True, lw=lw, alpha=alpha, zorder=10)\n", (10525, 10645), True, 'import healpy as hp\n'), ((10833, 10850), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save'], {}), '(save)\n', (10844, 10850), True, 'import matplotlib.pyplot as plt\n'), ((11586, 11624), 'pandas.read_csv', 'pd.read_csv', (['path_dataset'], {'nrows': 'nrows'}), '(path_dataset, nrows=nrows)\n', (11597, 11624), True, 'import pandas as pd\n'), ((12278, 12292), 'numpy.array', 'np.array', (['dmdt'], {}), '(dmdt)\n', (12286, 12292), True, 'import numpy as np\n'), ((12313, 12347), 'numpy.expand_dims', 'np.expand_dims', (['self.dmdt'], {'axis': '(-1)'}), '(self.dmdt, axis=-1)\n', (12327, 12347), True, 'import numpy as np\n'), ((12483, 12499), 'numpy.zeros_like', 'np.zeros_like', (['a'], {}), '(a)\n', (12496, 12499), True, 'import numpy as np\n'), ((14145, 14175), 'numpy.expand_dims', 'np.expand_dims', (['target'], {'axis': '(1)'}), '(target, axis=1)\n', (14159, 14175), True, 'import numpy as np\n'), ((16440, 16534), 'sklearn.model_selection.train_test_split', 'train_test_split', (['ds_indexes'], {'shuffle': '(True)', 'test_size': 'test_size', 'random_state': 'random_state'}), '(ds_indexes, shuffle=True, test_size=test_size,\n random_state=random_state)\n', (16456, 16534), False, 'from sklearn.model_selection import train_test_split\n'), ((16590, 16686), 'sklearn.model_selection.train_test_split', 'train_test_split', (['train_indexes'], {'shuffle': '(True)', 'test_size': 'val_size', 'random_state': 'random_state'}), '(train_indexes, shuffle=True, test_size=val_size,\n random_state=random_state)\n', (16606, 16686), False, 'from sklearn.model_selection import train_test_split\n'), ((18633, 18802), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (["({'features': self.df_ds.loc[train_indexes, self.features].values, 'dmdt':\n self.dmdt[train_indexes]}, target[train_indexes])"], {}), "(({'features': self.df_ds.loc[\n train_indexes, self.features].values, 'dmdt': self.dmdt[train_indexes]},\n target[train_indexes]))\n", (18667, 18802), True, 'import tensorflow as tf\n'), ((18944, 19107), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (["({'features': self.df_ds.loc[val_indexes, self.features].values, 'dmdt':\n self.dmdt[val_indexes]}, target[val_indexes])"], {}), "(({'features': self.df_ds.loc[val_indexes,\n self.features].values, 'dmdt': self.dmdt[val_indexes]}, target[\n val_indexes]))\n", (18978, 19107), True, 'import tensorflow as tf\n'), ((19250, 19416), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (["({'features': self.df_ds.loc[test_indexes, self.features].values, 'dmdt':\n self.dmdt[test_indexes]}, target[test_indexes])"], {}), "(({'features': self.df_ds.loc[\n test_indexes, self.features].values, 'dmdt': self.dmdt[test_indexes]},\n target[test_indexes]))\n", (19284, 19416), True, 'import tensorflow as tf\n'), ((845, 871), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (869, 871), False, 'import datetime\n'), ((4698, 4747), 'numpy.linspace', 'np.linspace', (['limits[0]', 'limits[1]', 'number_of_bins'], {}), '(limits[0], limits[1], number_of_bins)\n', (4709, 4747), True, 'import numpy as np\n'), ((9479, 9498), 'numpy.ones_like', 'np.ones_like', (['theta'], {}), '(theta)\n', (9491, 9498), True, 'import numpy as np\n'), ((9640, 9659), 'numpy.ones_like', 'np.ones_like', (['theta'], {}), '(theta)\n', (9652, 9659), True, 'import numpy as np\n'), ((14429, 14469), 'numpy.rint', 'np.rint', (['self.df_ds[target_label].values'], {}), '(self.df_ds[target_label].values)\n', (14436, 14469), True, 'import numpy as np\n'), ((19576, 19745), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (["({'features': self.df_ds.loc[index_dropped, self.features].values, 'dmdt':\n self.dmdt[index_dropped]}, target[index_dropped])"], {}), "(({'features': self.df_ds.loc[\n index_dropped, self.features].values, 'dmdt': self.dmdt[index_dropped]},\n target[index_dropped]))\n", (19610, 19745), True, 'import tensorflow as tf\n'), ((20547, 20570), 'numpy.array', 'np.array', (['train_indexes'], {}), '(train_indexes)\n', (20555, 20570), True, 'import numpy as np\n'), ((20591, 20612), 'numpy.array', 'np.array', (['val_indexes'], {}), '(val_indexes)\n', (20599, 20612), True, 'import numpy as np\n'), ((20634, 20656), 'numpy.array', 'np.array', (['test_indexes'], {}), '(test_indexes)\n', (20642, 20656), True, 'import numpy as np\n'), ((4593, 4612), 'numpy.log10', 'np.log10', (['limits[0]'], {}), '(limits[0])\n', (4601, 4612), True, 'import numpy as np\n'), ((4614, 4633), 'numpy.log10', 'np.log10', (['limits[1]'], {}), '(limits[1])\n', (4622, 4633), True, 'import numpy as np\n'), ((12079, 12118), 'json.loads', 'json.loads', (["self.df_ds['dmdt'][i.Index]"], {}), "(self.df_ds['dmdt'][i.Index])\n", (12089, 12118), False, 'import json\n'), ((12510, 12521), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (12518, 12521), True, 'import numpy as np\n'), ((15266, 15279), 'numpy.sum', 'np.sum', (['w_pos'], {}), '(w_pos)\n', (15272, 15279), True, 'import numpy as np\n'), ((15281, 15294), 'numpy.sum', 'np.sum', (['w_neg'], {}), '(w_neg)\n', (15287, 15294), True, 'import numpy as np\n'), ((15330, 15343), 'numpy.sum', 'np.sum', (['w_pos'], {}), '(w_pos)\n', (15336, 15343), True, 'import numpy as np\n'), ((15345, 15358), 'numpy.sum', 'np.sum', (['w_neg'], {}), '(w_neg)\n', (15351, 15358), True, 'import numpy as np\n'), ((21989, 22040), 'numpy.linalg.norm', 'np.linalg.norm', (['(1 / num_training_examples_per_class)'], {}), '(1 / num_training_examples_per_class)\n', (22003, 22040), True, 'import numpy as np\n'), ((22115, 22130), 'numpy.max', 'np.max', (['weights'], {}), '(weights)\n', (22121, 22130), True, 'import numpy as np\n'), ((5048, 5074), 'numpy.min', 'np.min', (["features['period']"], {}), "(features['period'])\n", (5054, 5074), True, 'import numpy as np\n'), ((5098, 5124), 'numpy.max', 'np.max', (["features['period']"], {}), "(features['period'])\n", (5104, 5124), True, 'import numpy as np\n'), ((12185, 12203), 'numpy.zeros', 'np.zeros', (['(26, 26)'], {}), '((26, 26))\n', (12193, 12203), True, 'import numpy as np\n'), ((17059, 17102), 'numpy.min', 'np.min', (['self.df_ds.loc[ds_indexes, feature]'], {}), '(self.df_ds.loc[ds_indexes, feature])\n', (17065, 17102), True, 'import numpy as np\n'), ((17131, 17174), 'numpy.max', 'np.max', (['self.df_ds.loc[ds_indexes, feature]'], {}), '(self.df_ds.loc[ds_indexes, feature])\n', (17137, 17174), True, 'import numpy as np\n'), ((17206, 17252), 'numpy.median', 'np.median', (['self.df_ds.loc[ds_indexes, feature]'], {}), '(self.df_ds.loc[ds_indexes, feature])\n', (17215, 17252), True, 'import numpy as np\n'), ((17282, 17326), 'numpy.mean', 'np.mean', (['self.df_ds.loc[ds_indexes, feature]'], {}), '(self.df_ds.loc[ds_indexes, feature])\n', (17289, 17326), True, 'import numpy as np\n'), ((17355, 17398), 'numpy.std', 'np.std', (['self.df_ds.loc[ds_indexes, feature]'], {}), '(self.df_ds.loc[ds_indexes, feature])\n', (17361, 17398), True, 'import numpy as np\n'), ((4844, 4870), 'numpy.min', 'np.min', (["features['period']"], {}), "(features['period'])\n", (4850, 4870), True, 'import numpy as np\n'), ((4904, 4930), 'numpy.max', 'np.max', (["features['period']"], {}), "(features['period'])\n", (4910, 4930), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
from anti_instagram.AntiInstagram import AntiInstagram
from cv_bridge import CvBridge, CvBridgeError
from duckietown_msgs.msg import (AntiInstagramTransform, BoolStamped, Segment,
SegmentList, Vector2D, FSMState)
from duckietown_utils.instantiate_utils import instantiate
from duckietown_utils.jpg import image_cv_from_jpg
from geometry_msgs.msg import Point
from sensor_msgs.msg import CompressedImage, Image
from visualization_msgs.msg import Marker
from line_detector.timekeeper import TimeKeeper
import cv2
import rospy
import threading
import time
from line_detector.line_detector_plot import color_segment, drawLines
import numpy as np
class LineDetectorNode(object):
def __init__(self):
self.node_name = rospy.get_name()
# Thread lock
self.thread_lock = threading.Lock()
# Constructor of line detector
self.bridge = CvBridge()
self.active = True
self.stats = Stats()
# Only be verbose every 10 cycles
self.intermittent_interval = 100
self.intermittent_counter = 0
# color correction
self.ai = AntiInstagram()
# these will be added if it becomes verbose
self.pub_edge = None
self.pub_colorSegment = None
self.detector = None
self.verbose = None
self.updateParams(None)
# Publishers
self.pub_lines = rospy.Publisher("~segment_list", SegmentList, queue_size=1)
self.pub_image = rospy.Publisher("~image_with_lines", Image, queue_size=1)
# Subscribers
self.sub_image = rospy.Subscriber("~image", CompressedImage, self.cbImage, queue_size=1)
self.sub_transform = rospy.Subscriber("~transform", AntiInstagramTransform, self.cbTransform, queue_size=1)
# FSM
self.sub_switch = rospy.Subscriber("~switch", BoolStamped, self.cbSwitch, queue_size=1)
self.sub_fsm_mode = rospy.Subscriber("~fsm_mode", FSMState, self.cbMode, queue_size=1)
rospy.loginfo("[%s] Initialized (verbose = %s)." %(self.node_name, self.verbose))
rospy.Timer(rospy.Duration.from_sec(2.0), self.updateParams)
def updateParams(self, _event):
old_verbose = self.verbose
self.verbose = rospy.get_param('~verbose', True)
# self.loginfo('verbose = %r' % self.verbose)
if self.verbose != old_verbose:
self.loginfo('Verbose is now %r' % self.verbose)
self.image_size = rospy.get_param('~img_size')
self.top_cutoff = rospy.get_param('~top_cutoff')
if self.detector is None:
c = rospy.get_param('~detector')
assert isinstance(c, list) and len(c) == 2, c
# if str(self.detector_config) != str(c):
self.loginfo('new detector config: %s' % str(c))
self.detector = instantiate(c[0], c[1])
# self.detector_config = c
if self.verbose and self.pub_edge is None:
self.pub_edge = rospy.Publisher("~edge", Image, queue_size=1)
self.pub_colorSegment = rospy.Publisher("~colorSegment", Image, queue_size=1)
#FSM
def cbSwitch(self, switch_msg):
self.active = switch_msg.data
#FSM
def cbMode(self, mode_msg):
self.fsm_state = mode_msg.state # String of current FSM state
def cbImage(self, image_msg):
self.stats.received()
if not self.active:
return
# Start a daemon thread to process the image
thread = threading.Thread(target=self.processImage,args=(image_msg,))
thread.setDaemon(True)
thread.start()
# Returns rightaway
def cbTransform(self, transform_msg):
self.ai.shift = transform_msg.s[0:3]
self.ai.scale = transform_msg.s[3:6]
self.loginfo("AntiInstagram transform received")
def loginfo(self, s):
rospy.loginfo('[%s] %s' % (self.node_name, s))
def intermittent_log_now(self):
return self.intermittent_counter % self.intermittent_interval == 1
def intermittent_log(self, s):
if not self.intermittent_log_now():
return
self.loginfo('%3d:%s' % (self.intermittent_counter, s))
def processImage(self, image_msg):
if not self.thread_lock.acquire(False):
self.stats.skipped()
# Return immediately if the thread is locked
return
try:
self.processImage_(image_msg)
finally:
# Release the thread lock
self.thread_lock.release()
def processImage_(self, image_msg):
self.stats.processed()
if self.intermittent_log_now():
self.intermittent_log(self.stats.info())
self.stats.reset()
tk = TimeKeeper(image_msg)
self.intermittent_counter += 1
# Decode from compressed image with OpenCV
try:
image_cv = image_cv_from_jpg(image_msg.data)
except ValueError as e:
self.loginfo('Could not decode image: %s' % e)
return
tk.completed('decoded')
# Resize and crop image
hei_original, wid_original = image_cv.shape[0:2]
if self.image_size[0] != hei_original or self.image_size[1] != wid_original:
# image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
image_cv = cv2.resize(image_cv, (self.image_size[1], self.image_size[0]),
interpolation=cv2.INTER_NEAREST)
image_cv = image_cv[self.top_cutoff:,:,:]
tk.completed('resized')
# apply color correction: AntiInstagram
image_cv_corr = self.ai.applyTransform(image_cv)
image_cv_corr = cv2.convertScaleAbs(image_cv_corr)
tk.completed('corrected')
# Set the image to be detected
self.detector.setImage(image_cv_corr)
# Detect lines and normals
white = self.detector.detectLines('white')
yellow = self.detector.detectLines('yellow')
red = self.detector.detectLines('red')
tk.completed('detected')
# SegmentList constructor
segmentList = SegmentList()
segmentList.header.stamp = image_msg.header.stamp
# Convert to normalized pixel coordinates, and add segments to segmentList
arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff))
arr_ratio = np.array((1./self.image_size[1], 1./self.image_size[0], 1./self.image_size[1], 1./self.image_size[0]))
if len(white.lines) > 0:
lines_normalized_white = ((white.lines + arr_cutoff) * arr_ratio)
segmentList.segments.extend(self.toSegmentMsg(lines_normalized_white, white.normals, Segment.WHITE))
if len(yellow.lines) > 0:
lines_normalized_yellow = ((yellow.lines + arr_cutoff) * arr_ratio)
segmentList.segments.extend(self.toSegmentMsg(lines_normalized_yellow, yellow.normals, Segment.YELLOW))
if len(red.lines) > 0:
lines_normalized_red = ((red.lines + arr_cutoff) * arr_ratio)
segmentList.segments.extend(self.toSegmentMsg(lines_normalized_red, red.normals, Segment.RED))
self.intermittent_log('# segments: white %3d yellow %3d red %3d' % (len(white.lines),
len(yellow.lines), len(red.lines)))
tk.completed('prepared')
# Publish segmentList
self.pub_lines.publish(segmentList)
tk.completed('--pub_lines--')
# VISUALIZATION only below
if self.verbose:
# Draw lines and normals
image_with_lines = np.copy(image_cv_corr)
drawLines(image_with_lines, white.lines, (0, 0, 0))
drawLines(image_with_lines, yellow.lines, (255, 0, 0))
drawLines(image_with_lines, red.lines, (0, 255, 0))
tk.completed('drawn')
# Publish the frame with lines
image_msg_out = self.bridge.cv2_to_imgmsg(image_with_lines, "bgr8")
image_msg_out.header.stamp = image_msg.header.stamp
self.pub_image.publish(image_msg_out)
tk.completed('pub_image')
# if self.verbose:
colorSegment = color_segment(white.area, red.area, yellow.area)
edge_msg_out = self.bridge.cv2_to_imgmsg(self.detector.edges, "mono8")
colorSegment_msg_out = self.bridge.cv2_to_imgmsg(colorSegment, "bgr8")
self.pub_edge.publish(edge_msg_out)
self.pub_colorSegment.publish(colorSegment_msg_out)
tk.completed('pub_edge/pub_segment')
self.intermittent_log(tk.getall())
def onShutdown(self):
self.loginfo("Shutdown.")
def toSegmentMsg(self, lines, normals, color):
segmentMsgList = []
for x1,y1,x2,y2,norm_x,norm_y in np.hstack((lines,normals)):
segment = Segment()
segment.color = color
segment.pixels_normalized[0].x = x1
segment.pixels_normalized[0].y = y1
segment.pixels_normalized[1].x = x2
segment.pixels_normalized[1].y = y2
segment.normal.x = norm_x
segment.normal.y = norm_y
segmentMsgList.append(segment)
return segmentMsgList
class Stats():
def __init__(self):
self.nresets = 0
self.reset()
def reset(self):
self.nresets += 1
self.t0 = time.time()
self.nreceived = 0
self.nskipped = 0
self.nprocessed = 0
def received(self):
if self.nreceived == 0 and self.nresets == 1:
rospy.loginfo('line_detector_node received first image.')
self.nreceived += 1
def skipped(self):
self.nskipped += 1
def processed(self):
if self.nprocessed == 0 and self.nresets == 1:
rospy.loginfo('line_detector_node processing first image.')
self.nprocessed += 1
def info(self):
delta = time.time() - self.t0
if self.nreceived:
skipped_perc = (100.0 * self.nskipped / self.nreceived)
else:
skipped_perc = 0
def fps(x):
return '%.1f fps' % (x / delta)
m = ('In the last %.1f s: received %d (%s) processed %d (%s) skipped %d (%s) (%1.f%%)' %
(delta, self.nreceived, fps(self.nreceived),
self.nprocessed, fps(self.nprocessed),
self.nskipped, fps(self.nskipped), skipped_perc))
return m
if __name__ == '__main__':
rospy.init_node('line_detector',anonymous=False)
line_detector_node = LineDetectorNode()
rospy.on_shutdown(line_detector_node.onShutdown)
rospy.spin()
|
[
"duckietown_utils.jpg.image_cv_from_jpg",
"cv2.convertScaleAbs",
"line_detector.line_detector_plot.drawLines",
"numpy.hstack",
"rospy.init_node",
"duckietown_msgs.msg.Segment",
"numpy.array",
"line_detector.timekeeper.TimeKeeper",
"duckietown_msgs.msg.SegmentList",
"anti_instagram.AntiInstagram.AntiInstagram",
"threading.Lock",
"rospy.Duration.from_sec",
"cv_bridge.CvBridge",
"rospy.spin",
"rospy.Subscriber",
"rospy.get_param",
"duckietown_utils.instantiate_utils.instantiate",
"rospy.get_name",
"cv2.resize",
"rospy.Publisher",
"time.time",
"rospy.loginfo",
"rospy.on_shutdown",
"numpy.copy",
"line_detector.line_detector_plot.color_segment",
"threading.Thread"
] |
[((10551, 10600), 'rospy.init_node', 'rospy.init_node', (['"""line_detector"""'], {'anonymous': '(False)'}), "('line_detector', anonymous=False)\n", (10566, 10600), False, 'import rospy\n'), ((10648, 10696), 'rospy.on_shutdown', 'rospy.on_shutdown', (['line_detector_node.onShutdown'], {}), '(line_detector_node.onShutdown)\n', (10665, 10696), False, 'import rospy\n'), ((10701, 10713), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (10711, 10713), False, 'import rospy\n'), ((752, 768), 'rospy.get_name', 'rospy.get_name', ([], {}), '()\n', (766, 768), False, 'import rospy\n'), ((820, 836), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (834, 836), False, 'import threading\n'), ((907, 917), 'cv_bridge.CvBridge', 'CvBridge', ([], {}), '()\n', (915, 917), False, 'from cv_bridge import CvBridge, CvBridgeError\n'), ((1144, 1159), 'anti_instagram.AntiInstagram.AntiInstagram', 'AntiInstagram', ([], {}), '()\n', (1157, 1159), False, 'from anti_instagram.AntiInstagram import AntiInstagram\n'), ((1428, 1487), 'rospy.Publisher', 'rospy.Publisher', (['"""~segment_list"""', 'SegmentList'], {'queue_size': '(1)'}), "('~segment_list', SegmentList, queue_size=1)\n", (1443, 1487), False, 'import rospy\n'), ((1513, 1570), 'rospy.Publisher', 'rospy.Publisher', (['"""~image_with_lines"""', 'Image'], {'queue_size': '(1)'}), "('~image_with_lines', Image, queue_size=1)\n", (1528, 1570), False, 'import rospy\n'), ((1626, 1697), 'rospy.Subscriber', 'rospy.Subscriber', (['"""~image"""', 'CompressedImage', 'self.cbImage'], {'queue_size': '(1)'}), "('~image', CompressedImage, self.cbImage, queue_size=1)\n", (1642, 1697), False, 'import rospy\n'), ((1727, 1817), 'rospy.Subscriber', 'rospy.Subscriber', (['"""~transform"""', 'AntiInstagramTransform', 'self.cbTransform'], {'queue_size': '(1)'}), "('~transform', AntiInstagramTransform, self.cbTransform,\n queue_size=1)\n", (1743, 1817), False, 'import rospy\n'), ((1854, 1923), 'rospy.Subscriber', 'rospy.Subscriber', (['"""~switch"""', 'BoolStamped', 'self.cbSwitch'], {'queue_size': '(1)'}), "('~switch', BoolStamped, self.cbSwitch, queue_size=1)\n", (1870, 1923), False, 'import rospy\n'), ((1952, 2018), 'rospy.Subscriber', 'rospy.Subscriber', (['"""~fsm_mode"""', 'FSMState', 'self.cbMode'], {'queue_size': '(1)'}), "('~fsm_mode', FSMState, self.cbMode, queue_size=1)\n", (1968, 2018), False, 'import rospy\n'), ((2028, 2115), 'rospy.loginfo', 'rospy.loginfo', (["('[%s] Initialized (verbose = %s).' % (self.node_name, self.verbose))"], {}), "('[%s] Initialized (verbose = %s).' % (self.node_name, self.\n verbose))\n", (2041, 2115), False, 'import rospy\n'), ((2276, 2309), 'rospy.get_param', 'rospy.get_param', (['"""~verbose"""', '(True)'], {}), "('~verbose', True)\n", (2291, 2309), False, 'import rospy\n'), ((2492, 2520), 'rospy.get_param', 'rospy.get_param', (['"""~img_size"""'], {}), "('~img_size')\n", (2507, 2520), False, 'import rospy\n'), ((2547, 2577), 'rospy.get_param', 'rospy.get_param', (['"""~top_cutoff"""'], {}), "('~top_cutoff')\n", (2562, 2577), False, 'import rospy\n'), ((3535, 3596), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.processImage', 'args': '(image_msg,)'}), '(target=self.processImage, args=(image_msg,))\n', (3551, 3596), False, 'import threading\n'), ((3904, 3950), 'rospy.loginfo', 'rospy.loginfo', (["('[%s] %s' % (self.node_name, s))"], {}), "('[%s] %s' % (self.node_name, s))\n", (3917, 3950), False, 'import rospy\n'), ((4789, 4810), 'line_detector.timekeeper.TimeKeeper', 'TimeKeeper', (['image_msg'], {}), '(image_msg)\n', (4799, 4810), False, 'from line_detector.timekeeper import TimeKeeper\n'), ((5729, 5763), 'cv2.convertScaleAbs', 'cv2.convertScaleAbs', (['image_cv_corr'], {}), '(image_cv_corr)\n', (5748, 5763), False, 'import cv2\n'), ((6169, 6182), 'duckietown_msgs.msg.SegmentList', 'SegmentList', ([], {}), '()\n', (6180, 6182), False, 'from duckietown_msgs.msg import AntiInstagramTransform, BoolStamped, Segment, SegmentList, Vector2D, FSMState\n'), ((6354, 6404), 'numpy.array', 'np.array', (['(0, self.top_cutoff, 0, self.top_cutoff)'], {}), '((0, self.top_cutoff, 0, self.top_cutoff))\n', (6362, 6404), True, 'import numpy as np\n'), ((6425, 6544), 'numpy.array', 'np.array', (['(1.0 / self.image_size[1], 1.0 / self.image_size[0], 1.0 / self.image_size[\n 1], 1.0 / self.image_size[0])'], {}), '((1.0 / self.image_size[1], 1.0 / self.image_size[0], 1.0 / self.\n image_size[1], 1.0 / self.image_size[0]))\n', (6433, 6544), True, 'import numpy as np\n'), ((8856, 8883), 'numpy.hstack', 'np.hstack', (['(lines, normals)'], {}), '((lines, normals))\n', (8865, 8883), True, 'import numpy as np\n'), ((9457, 9468), 'time.time', 'time.time', ([], {}), '()\n', (9466, 9468), False, 'import time\n'), ((2131, 2159), 'rospy.Duration.from_sec', 'rospy.Duration.from_sec', (['(2.0)'], {}), '(2.0)\n', (2154, 2159), False, 'import rospy\n'), ((2629, 2657), 'rospy.get_param', 'rospy.get_param', (['"""~detector"""'], {}), "('~detector')\n", (2644, 2657), False, 'import rospy\n'), ((2865, 2888), 'duckietown_utils.instantiate_utils.instantiate', 'instantiate', (['c[0]', 'c[1]'], {}), '(c[0], c[1])\n', (2876, 2888), False, 'from duckietown_utils.instantiate_utils import instantiate\n'), ((3008, 3053), 'rospy.Publisher', 'rospy.Publisher', (['"""~edge"""', 'Image'], {'queue_size': '(1)'}), "('~edge', Image, queue_size=1)\n", (3023, 3053), False, 'import rospy\n'), ((3090, 3143), 'rospy.Publisher', 'rospy.Publisher', (['"""~colorSegment"""', 'Image'], {'queue_size': '(1)'}), "('~colorSegment', Image, queue_size=1)\n", (3105, 3143), False, 'import rospy\n'), ((4947, 4980), 'duckietown_utils.jpg.image_cv_from_jpg', 'image_cv_from_jpg', (['image_msg.data'], {}), '(image_msg.data)\n', (4964, 4980), False, 'from duckietown_utils.jpg import image_cv_from_jpg\n'), ((5385, 5484), 'cv2.resize', 'cv2.resize', (['image_cv', '(self.image_size[1], self.image_size[0])'], {'interpolation': 'cv2.INTER_NEAREST'}), '(image_cv, (self.image_size[1], self.image_size[0]),\n interpolation=cv2.INTER_NEAREST)\n', (5395, 5484), False, 'import cv2\n'), ((7643, 7665), 'numpy.copy', 'np.copy', (['image_cv_corr'], {}), '(image_cv_corr)\n', (7650, 7665), True, 'import numpy as np\n'), ((7678, 7729), 'line_detector.line_detector_plot.drawLines', 'drawLines', (['image_with_lines', 'white.lines', '(0, 0, 0)'], {}), '(image_with_lines, white.lines, (0, 0, 0))\n', (7687, 7729), False, 'from line_detector.line_detector_plot import color_segment, drawLines\n'), ((7742, 7796), 'line_detector.line_detector_plot.drawLines', 'drawLines', (['image_with_lines', 'yellow.lines', '(255, 0, 0)'], {}), '(image_with_lines, yellow.lines, (255, 0, 0))\n', (7751, 7796), False, 'from line_detector.line_detector_plot import color_segment, drawLines\n'), ((7809, 7860), 'line_detector.line_detector_plot.drawLines', 'drawLines', (['image_with_lines', 'red.lines', '(0, 255, 0)'], {}), '(image_with_lines, red.lines, (0, 255, 0))\n', (7818, 7860), False, 'from line_detector.line_detector_plot import color_segment, drawLines\n'), ((8228, 8276), 'line_detector.line_detector_plot.color_segment', 'color_segment', (['white.area', 'red.area', 'yellow.area'], {}), '(white.area, red.area, yellow.area)\n', (8241, 8276), False, 'from line_detector.line_detector_plot import color_segment, drawLines\n'), ((8906, 8915), 'duckietown_msgs.msg.Segment', 'Segment', ([], {}), '()\n', (8913, 8915), False, 'from duckietown_msgs.msg import AntiInstagramTransform, BoolStamped, Segment, SegmentList, Vector2D, FSMState\n'), ((9641, 9698), 'rospy.loginfo', 'rospy.loginfo', (['"""line_detector_node received first image."""'], {}), "('line_detector_node received first image.')\n", (9654, 9698), False, 'import rospy\n'), ((9871, 9930), 'rospy.loginfo', 'rospy.loginfo', (['"""line_detector_node processing first image."""'], {}), "('line_detector_node processing first image.')\n", (9884, 9930), False, 'import rospy\n'), ((9998, 10009), 'time.time', 'time.time', ([], {}), '()\n', (10007, 10009), False, 'import time\n')]
|
"""Copy number detection with CNVkit with specific support for targeted sequencing.
http://cnvkit.readthedocs.org
"""
import copy
import math
import operator
import os
import sys
import tempfile
import subprocess
import pybedtools
import numpy as np
import toolz as tz
from bcbio import utils
from bcbio.bam import ref
from bcbio.distributed.multi import run_multicore, zeromq_aware_logging
from bcbio.distributed.transaction import file_transaction
from bcbio.heterogeneity import chromhacks
from bcbio.log import logger
from bcbio.pipeline import datadict as dd
from bcbio.pipeline import config_utils
from bcbio.provenance import do
from bcbio.variation import bedutils, effects, ploidy, population, vcfutils
from bcbio.structural import annotate, shared, plot
def run(items, background=None):
"""Detect copy number variations from batched set of samples using CNVkit.
"""
if not background: background = []
return _cnvkit_by_type(items, background)
def _sv_workdir(data):
return utils.safe_makedir(os.path.join(data["dirs"]["work"], "structural",
dd.get_sample_name(data), "cnvkit"))
def _cnvkit_by_type(items, background):
"""Dispatch to specific CNVkit functionality based on input type.
"""
if len(items + background) == 1:
return _run_cnvkit_single(items[0])
elif vcfutils.get_paired_phenotype(items[0]):
return _run_cnvkit_cancer(items, background)
else:
return _run_cnvkit_population(items, background)
def _associate_cnvkit_out(ckouts, items, is_somatic=False):
"""Associate cnvkit output with individual items.
"""
assert len(ckouts) == len(items)
out = []
for ckout, data in zip(ckouts, items):
ckout = copy.deepcopy(ckout)
ckout["variantcaller"] = "cnvkit"
if utils.file_exists(ckout["cns"]) and _cna_has_values(ckout["cns"]):
ckout = _add_seg_to_output(ckout, data)
ckout = _add_gainloss_to_output(ckout, data)
ckout = _add_segmetrics_to_output(ckout, data)
ckout = _add_variantcalls_to_output(ckout, data, is_somatic)
# ckout = _add_coverage_bedgraph_to_output(ckout, data)
ckout = _add_cnr_bedgraph_and_bed_to_output(ckout, data)
if "svplots" in dd.get_tools_on(data):
ckout = _add_plots_to_output(ckout, data)
if "sv" not in data:
data["sv"] = []
data["sv"].append(ckout)
out.append(data)
return out
def _run_cnvkit_single(data, background=None):
"""Process a single input file with BAM or uniform background.
"""
if not background:
background = []
ckouts = _run_cnvkit_shared([data], background)
if not ckouts:
return [data]
else:
assert len(ckouts) == 1
return _associate_cnvkit_out(ckouts, [data])
def _run_cnvkit_cancer(items, background):
"""Run CNVkit on a tumor/normal pair.
"""
paired = vcfutils.get_paired_bams([x["align_bam"] for x in items], items)
normal_data = [x for x in items if dd.get_sample_name(x) != paired.tumor_name]
tumor_ready, normal_ready = _match_batches(paired.tumor_data, normal_data[0] if normal_data else None)
ckouts = _run_cnvkit_shared([tumor_ready], [normal_ready] if normal_ready else [])
if not ckouts:
return items
assert len(ckouts) == 1
tumor_data = _associate_cnvkit_out(ckouts, [paired.tumor_data], is_somatic=True)
return tumor_data + normal_data
def _match_batches(tumor, normal):
"""Fix batch names for shared tumor/normals to ensure matching
"""
def _get_batch(x):
b = dd.get_batch(x)
return [b] if not isinstance(b, (list, tuple)) else b
if normal:
tumor = copy.deepcopy(tumor)
normal = copy.deepcopy(normal)
cur_batch = list(set(_get_batch(tumor)) & set(_get_batch(normal)))
assert len(cur_batch) == 1, "No batch overlap: %s and %s" % (_get_batch(tumor), _get_batch(normal))
cur_batch = cur_batch[0]
tumor["metadata"]["batch"] = cur_batch
normal["metadata"]["batch"] = cur_batch
return tumor, normal
def _run_cnvkit_population(items, background):
"""Run CNVkit on a population of samples.
Tries to calculate background based on case/controls, otherwise
uses samples from the same batch as background.
"""
if background and len(background) > 0:
inputs = items
else:
inputs, background = shared.find_case_control(items)
# if we have case/control organized background or a single sample
if len(inputs) == 1 or len(background) > 0:
ckouts = _run_cnvkit_shared(inputs, background)
return _associate_cnvkit_out(ckouts, inputs) + background
# otherwise run each sample with the others in the batch as background
else:
out = []
for cur_input in items:
background = [d for d in items if dd.get_sample_name(d) != dd.get_sample_name(cur_input)]
ckouts = _run_cnvkit_shared([cur_input], background)
out.extend(_associate_cnvkit_out(ckouts, [cur_input]))
return out
def _get_cmd(script_name="cnvkit.py"):
return os.path.join(os.path.dirname(os.path.realpath(sys.executable)), script_name)
def _prep_cmd(cmd, tx_out_file):
"""Wrap CNVkit commands ensuring we use local temporary directories.
"""
cmd = " ".join(cmd) if isinstance(cmd, (list, tuple)) else cmd
return "export TMPDIR=%s && %s" % (os.path.dirname(tx_out_file), cmd)
def _bam_to_outbase(bam_file, work_dir, data):
"""Convert an input BAM file into CNVkit expected output.
Handles previous non-batch cases to avoid re-calculating,
returning both new and old values:
"""
batch = dd.get_batch(data) or dd.get_sample_name(data)
out_base = os.path.splitext(os.path.basename(bam_file))[0].split(".")[0]
base = os.path.join(work_dir, out_base)
return "%s-%s" % (base, batch), base
def _run_cnvkit_shared(inputs, backgrounds):
"""Shared functionality to run CNVkit, parallelizing over multiple BAM files.
"""
work_dir = _sv_workdir(inputs[0])
raw_work_dir = utils.safe_makedir(os.path.join(work_dir, "raw"))
background_name = dd.get_sample_name(backgrounds[0]) if backgrounds else "flat"
background_cnn = os.path.join(raw_work_dir, "%s_background.cnn" % (background_name))
ckouts = []
for cur_input in inputs:
cur_raw_work_dir = utils.safe_makedir(os.path.join(_sv_workdir(cur_input), "raw"))
out_base, out_base_old = _bam_to_outbase(dd.get_align_bam(cur_input), cur_raw_work_dir, cur_input)
if utils.file_exists(out_base_old + ".cns"):
out_base = out_base_old
ckouts.append({"cnr": "%s.cnr" % out_base,
"cns": "%s.cns" % out_base,
"back_cnn": background_cnn})
if not utils.file_exists(ckouts[0]["cns"]):
cov_interval = dd.get_coverage_interval(inputs[0])
raw_target_bed, access_bed = _get_target_access_files(cov_interval, inputs[0], work_dir)
# bail out if we ended up with no regions
if not utils.file_exists(raw_target_bed):
return {}
raw_target_bed = annotate.add_genes(raw_target_bed, inputs[0])
parallel = {"type": "local", "cores": dd.get_cores(inputs[0]), "progs": ["cnvkit"]}
target_bed, antitarget_bed = _cnvkit_targets(raw_target_bed, access_bed, cov_interval,
raw_work_dir, inputs[0])
samples_to_run = zip(["background"] * len(backgrounds), backgrounds) + \
zip(["evaluate"] * len(inputs), inputs)
raw_coverage_cnns = [_cnvkit_coverage(cdata, bed, itype) for itype, cdata in samples_to_run
for bed in [target_bed, antitarget_bed]]
coverage_cnns = reduce(operator.add,
[_cnvkit_metrics(cnns, target_bed, antitarget_bed, cov_interval, inputs + backgrounds)
for cnns in tz.groupby("bam", raw_coverage_cnns).values()])
background_cnn = _cnvkit_background(_select_background_cnns(coverage_cnns),
background_cnn, target_bed, antitarget_bed, inputs[0])
fixed_cnrs = run_multicore(_cnvkit_fix,
[(cnns, background_cnn, inputs + backgrounds) for cnns in
tz.groupby("bam", [x for x in coverage_cnns
if x["itype"] == "evaluate"]).values()],
inputs[0]["config"], parallel)
[_cnvkit_segment(cnr, cov_interval, data) for cnr, data in fixed_cnrs]
return ckouts
def _cna_has_values(fname):
with open(fname) as in_handle:
for i, line in enumerate(in_handle):
if i > 0:
return True
return False
def _cnvkit_segment(cnr_file, cov_interval, data):
"""Perform segmentation and copy number calling on normalized inputs
"""
out_file = "%s.cns" % os.path.splitext(cnr_file)[0]
if not utils.file_uptodate(out_file, cnr_file):
with file_transaction(data, out_file) as tx_out_file:
if not _cna_has_values(cnr_file):
with open(tx_out_file, "w") as out_handle:
out_handle.write("chromosome\tstart\tend\tgene\tlog2\tprobes\tCN1\tCN2\tbaf\tweight\n")
else:
cmd = [_get_cmd(), "segment", "-p", str(dd.get_cores(data)),
"-o", tx_out_file, cnr_file]
small_vrn_files = _compatible_small_variants(data)
if len(small_vrn_files) > 0 and _cna_has_values(cnr_file) and cov_interval != "genome":
cmd += ["-v", small_vrn_files[0]]
if cov_interval == "genome":
cmd += ["--threshold", "0.00001"]
# preferentially use conda installed Rscript
export_cmd = ("%s && export TMPDIR=%s && "
% (utils.get_R_exports(), os.path.dirname(tx_out_file)))
do.run(export_cmd + " ".join(cmd), "CNVkit segment")
return out_file
def _cnvkit_metrics(cnns, target_bed, antitarget_bed, cov_interval, items):
"""Estimate noise of a sample using a flat background.
Only used for panel/targeted data due to memory issues with whole genome
samples.
"""
if cov_interval == "genome":
return cnns
target_cnn = [x["file"] for x in cnns if x["cnntype"] == "target"][0]
background_file = "%s-flatbackground.cnn" % utils.splitext_plus(target_cnn)[0]
background_file = _cnvkit_background([], background_file, target_bed, antitarget_bed, items[0])
cnr_file, data = _cnvkit_fix_base(cnns, background_file, items, "-flatbackground")
cns_file = _cnvkit_segment(cnr_file, cov_interval, data)
metrics_file = "%s-metrics.txt" % utils.splitext_plus(target_cnn)[0]
if not utils.file_exists(metrics_file):
with file_transaction(data, metrics_file) as tx_metrics_file:
cmd = [_get_cmd(), "metrics", "-o", tx_metrics_file, "-s", cns_file, "--", cnr_file]
do.run(_prep_cmd(cmd, tx_metrics_file), "CNVkit metrics")
metrics = _read_metrics_file(metrics_file)
out = []
for cnn in cnns:
cnn["metrics"] = metrics
out.append(cnn)
return out
def _read_metrics_file(in_file):
with open(in_file) as in_handle:
header = in_handle.next().strip().split("\t")[1:]
vals = map(float, in_handle.next().strip().split("\t")[1:])
return dict(zip(header, vals))
@utils.map_wrap
@zeromq_aware_logging
def _cnvkit_fix(cnns, background_cnn, items):
"""Normalize samples, correcting sources of bias.
"""
return [_cnvkit_fix_base(cnns, background_cnn, items)]
def _cnvkit_fix_base(cnns, background_cnn, items, ext=""):
assert len(cnns) == 2, "Expected target and antitarget CNNs: %s" % cnns
target_cnn = [x["file"] for x in cnns if x["cnntype"] == "target"][0]
antitarget_cnn = [x["file"] for x in cnns if x["cnntype"] == "antitarget"][0]
data = [x for x in items if dd.get_sample_name(x) == cnns[0]["sample"]][0]
common_prefix = os.path.commonprefix([target_cnn, antitarget_cnn])
if common_prefix.endswith("."):
common_prefix = common_prefix[:-1]
out_file = "%s%s.cnr" % (common_prefix, ext)
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "fix", "-o", tx_out_file, target_cnn, antitarget_cnn, background_cnn]
do.run(_prep_cmd(cmd, tx_out_file), "CNVkit fix")
return out_file, data
def _select_background_cnns(cnns):
"""Select cnns to use for background calculations.
Uses background samples in cohort, and will remove CNNs with high
on target variability. Uses (number of segments * biweight midvariance) as metric
for variability with higher numbers being more unreliable.
"""
min_for_variability_analysis = 20
pct_keep = 0.10
b_cnns = [x for x in cnns if x["itype"] == "background" and x.get("metrics")]
assert len(b_cnns) % 2 == 0, "Expect even set of target/antitarget cnns for background"
if len(b_cnns) >= min_for_variability_analysis:
b_cnns_w_metrics = []
for b_cnn in b_cnns:
unreliability = b_cnn["metrics"]["segments"] * b_cnn["metrics"]["bivar"]
b_cnns_w_metrics.append((unreliability, b_cnn))
b_cnns_w_metrics.sort()
to_keep = int(math.ceil(pct_keep * len(b_cnns) / 2.0) * 2)
b_cnns = [x[1] for x in b_cnns_w_metrics][:to_keep]
assert len(b_cnns) % 2 == 0, "Expect even set of target/antitarget cnns for background"
return [x["file"] for x in b_cnns]
def _cnvkit_background(background_cnns, out_file, target_bed, antitarget_bed, data):
"""Calculate background reference, handling flat case with no normal sample.
"""
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "reference", "-f", dd.get_ref_file(data), "-o", tx_out_file]
if len(background_cnns) == 0:
cmd += ["-t", target_bed, "-a", antitarget_bed]
else:
cmd += background_cnns
do.run(_prep_cmd(cmd, tx_out_file), "CNVkit background")
return out_file
def _cnvkit_coverage(data, bed_file, input_type):
"""Calculate coverage in a BED file for CNVkit.
"""
bam_file = dd.get_align_bam(data)
work_dir = utils.safe_makedir(os.path.join(_sv_workdir(data), "raw"))
exts = {".target.bed": ("target", "targetcoverage.cnn"),
".antitarget.bed": ("antitarget", "antitargetcoverage.cnn")}
cnntype = None
for orig, (cur_cnntype, ext) in exts.items():
if bed_file.endswith(orig):
cnntype = cur_cnntype
break
if cnntype is None:
assert bed_file.endswith(".bed"), "Unexpected BED file extension for coverage %s" % bed_file
cnntype = ""
base, base_old = _bam_to_outbase(bam_file, work_dir, data)
out_file = "%s.%s" % (base, ext)
out_file_old = "%s.%s" % (base_old, ext)
# back compatible with previous runs to avoid re-calculating
if utils.file_exists(out_file_old):
out_file = out_file_old
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "coverage", "-p", str(dd.get_cores(data)), bam_file, bed_file, "-o", tx_out_file]
do.run(_prep_cmd(cmd, tx_out_file), "CNVkit coverage")
return {"itype": input_type, "file": out_file, "bam": bam_file, "cnntype": cnntype,
"sample": dd.get_sample_name(data)}
def _cnvkit_targets(raw_target_bed, access_bed, cov_interval, work_dir, data):
"""Create target and antitarget regions from target and access files.
"""
batch = dd.get_batch(data) or dd.get_sample_name(data)
basename = os.path.splitext(os.path.basename(raw_target_bed))[0]
target_bed = os.path.join(work_dir, "%s-%s.target.bed" % (basename, batch))
# back compatible with previous runs to avoid re-calculating
target_bed_old = os.path.join(work_dir, "%s.target.bed" % basename)
if utils.file_exists(target_bed_old):
target_bed = target_bed_old
if not utils.file_exists(target_bed):
with file_transaction(data, target_bed) as tx_out_file:
cmd = [_get_cmd(), "target", raw_target_bed, "--split", "-o", tx_out_file]
bin_estimates = _cnvkit_coverage_bin_estimate(raw_target_bed, access_bed, cov_interval, work_dir, data)
if bin_estimates.get("target"):
cmd += ["--avg-size", str(bin_estimates["target"])]
do.run(_prep_cmd(cmd, tx_out_file), "CNVkit target")
antitarget_bed = os.path.join(work_dir, "%s-%s.antitarget.bed" % (basename, batch))
antitarget_bed_old = os.path.join(work_dir, "%s.antitarget.bed" % basename)
# back compatible with previous runs to avoid re-calculating
if os.path.exists(antitarget_bed_old):
antitarget_bed = antitarget_bed_old
if not os.path.exists(antitarget_bed):
with file_transaction(data, antitarget_bed) as tx_out_file:
cmd = [_get_cmd(), "antitarget", "-g", access_bed, target_bed, "-o", tx_out_file]
bin_estimates = _cnvkit_coverage_bin_estimate(raw_target_bed, access_bed, cov_interval, work_dir, data)
if bin_estimates.get("antitarget"):
cmd += ["--avg-size", str(bin_estimates["antitarget"])]
do.run(_prep_cmd(cmd, tx_out_file), "CNVkit antitarget")
return target_bed, antitarget_bed
def _cnvkit_coverage_bin_estimate(raw_target_bed, access_bed, cov_interval, work_dir, data):
"""Estimate good coverage bin sizes for target regions based on coverage.
"""
batch = dd.get_batch(data) or dd.get_sample_name(data)
out_file = os.path.join(work_dir, "%s-%s-bin_estimate.txt" % (
os.path.splitext(os.path.basename(raw_target_bed))[0], batch))
method_map = {"genome": "wgs", "regional": "hybrid", "amplicon": "amplicon"}
if not os.path.exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd("coverage_bin_size.py"), dd.get_align_bam(data),
"-m", method_map[cov_interval], "-t", raw_target_bed,
"-g", access_bed]
cmd = " ".join(cmd) + " > " + tx_out_file
try:
do.run(_prep_cmd(cmd, tx_out_file), "CNVkit coverage bin estimation", log_error=False)
except subprocess.CalledProcessError:
logger.info("Bin size estimate failed, using default values")
with open(tx_out_file, "w") as out_handle:
out_handle.write("Bin size estimate failed, using default values")
avg_bin_sizes = {}
estimate_map = {"On-target": "target", "Off-target": "antitarget",
"Genome": "target", "Targets (sampling)": "target"}
range_map = {("genome", "target"): (500, 1000),
("regional", "target"): (50, 267), ("regional", "antitarget"): (20000, 200000),
("amplicon", "target"): (50, 267)}
with open(out_file) as in_handle:
for line in in_handle:
if line.startswith(tuple(estimate_map.keys())):
name, depth, bin_size = line.strip().split("\t")
name = estimate_map[name.replace(":", "").strip()]
try:
bin_size = int(bin_size)
except ValueError:
bin_size = None
if bin_size and bin_size > 0:
cur_min, cur_max = range_map[(cov_interval, name)]
avg_bin_sizes[name] = max(min(bin_size, cur_max), cur_min)
return avg_bin_sizes
def _get_target_access_files(cov_interval, data, work_dir):
"""Retrieve target and access files based on the type of data to process.
pick targets, anti-targets and access files based on analysis type
http://cnvkit.readthedocs.org/en/latest/nonhybrid.html
"""
base_regions = shared.get_base_cnv_regions(data, work_dir)
target_bed = bedutils.sort_merge(base_regions, data, out_dir=work_dir)
if cov_interval == "amplicon":
return target_bed, target_bed
elif cov_interval == "genome":
return target_bed, target_bed
else:
access_file = _create_access_file(dd.get_ref_file(data), _sv_workdir(data), data)
return target_bed, access_file
def _add_seg_to_output(out, data):
"""Export outputs to 'seg' format compatible with IGV and GenePattern.
"""
out_file = "%s.seg" % os.path.splitext(out["cns"])[0]
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "export",
"seg", "-o", tx_out_file, out["cns"]]
do.run(cmd, "CNVkit export seg")
out["seg"] = out_file
return out
def _add_cnr_bedgraph_and_bed_to_output(out, data):
cnr_file = out["cnr"]
bedgraph_file = cnr_file + ".bedgraph"
if not utils.file_exists(bedgraph_file):
with file_transaction(data, bedgraph_file) as tx_out_file:
cmd = "sed 1d {cnr_file} | cut -f1,2,3,5 > {tx_out_file}"
do.run(cmd.format(**locals()), "Converting cnr to bedgraph format")
out["cnr_bedgraph"] = bedgraph_file
bed_file = cnr_file + ".bed"
if not utils.file_exists(bed_file):
with file_transaction(data, bed_file) as tx_out_file:
cmd = "sed 1d {cnr_file} | cut -f1,2,3,4,5 > {tx_out_file}"
do.run(cmd.format(**locals()), "Converting cnr to bed format")
out["cnr_bed"] = bed_file
return out
def _compatible_small_variants(data):
"""Retrieve small variant (SNP, indel) VCFs compatible with CNVkit.
"""
supported = set(["vardict", "freebayes", "gatk-haplotype", "mutect2", "vardict"])
out = []
for v in data.get("variants", []):
vrn_file = v.get("vrn_file")
if vrn_file and v.get("variantcaller") in supported:
base, ext = utils.splitext_plus(os.path.basename(vrn_file))
if vcfutils.get_paired_phenotype(data):
out.append(vrn_file)
else:
sample_vrn_file = os.path.join(dd.get_work_dir(data), v["variantcaller"],
"%s-%s%s" % (base, dd.get_sample_name(data), ext))
sample_vrn_file = vcfutils.select_sample(vrn_file, dd.get_sample_name(data), sample_vrn_file,
data["config"])
out.append(sample_vrn_file)
return out
def _add_variantcalls_to_output(out, data, is_somatic=False):
"""Call ploidy and convert into VCF and BED representations.
"""
call_file = "%s-call%s" % os.path.splitext(out["cns"])
gender = population.get_gender(data)
if not utils.file_exists(call_file):
with file_transaction(data, call_file) as tx_call_file:
filters = ["--filter", "cn"]
cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "call"] + \
filters + \
["--ploidy", str(ploidy.get_ploidy([data])),
"-o", tx_call_file, out["cns"]]
small_vrn_files = _compatible_small_variants(data)
if len(small_vrn_files) > 0 and _cna_has_values(out["cns"]):
cmd += ["-v", small_vrn_files[0]]
if not is_somatic:
cmd += ["-m", "clonal"]
if gender and gender.lower() != "unknown":
cmd += ["--gender", gender]
if gender.lower() == "male":
cmd += ["--male-reference"]
do.run(cmd, "CNVkit call ploidy")
calls = {}
for outformat in ["bed", "vcf"]:
out_file = "%s.%s" % (os.path.splitext(call_file)[0], outformat)
calls[outformat] = out_file
if not os.path.exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "export",
outformat, "--sample-id", dd.get_sample_name(data),
"--ploidy", str(ploidy.get_ploidy([data])),
"-o", tx_out_file, call_file]
if gender and gender.lower() == "male":
cmd += ["--male-reference"]
do.run(cmd, "CNVkit export %s" % outformat)
out["call_file"] = call_file
out["vrn_bed"] = annotate.add_genes(calls["bed"], data)
effects_vcf, _ = effects.add_to_vcf(calls["vcf"], data, "snpeff")
out["vrn_file"] = effects_vcf or calls["vcf"]
return out
def _add_segmetrics_to_output(out, data):
"""Add metrics for measuring reliability of CNV estimates.
"""
out_file = "%s-segmetrics.txt" % os.path.splitext(out["cns"])[0]
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "segmetrics",
"--ci", "--pi",
"-s", out["cns"], "-o", tx_out_file, out["cnr"]]
# Use less fine grained bootstrapping intervals for whole genome runs
if dd.get_coverage_interval(data) == "genome":
cmd += ["--alpha", "0.1", "--bootstrap", "50"]
else:
cmd += ["--alpha", "0.01", "--bootstrap", "500"]
do.run(cmd, "CNVkit segmetrics")
out["segmetrics"] = out_file
return out
def _add_gainloss_to_output(out, data):
"""Add gainloss based on genes, helpful for identifying changes in smaller genes.
"""
out_file = "%s-gainloss.txt" % os.path.splitext(out["cns"])[0]
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "gainloss",
"-s", out["cns"], "-o", tx_out_file, out["cnr"]]
do.run(cmd, "CNVkit gainloss")
out["gainloss"] = out_file
return out
def _add_coverage_bedgraph_to_output(out, data):
"""Add BedGraph representation of coverage to the output
"""
out_file = "%s.coverage.bedgraph" % os.path.splitext(out["cns"])[0]
if utils.file_exists(out_file):
out["bedgraph"] = out_file
return out
bam_file = dd.get_align_bam(data)
bedtools = config_utils.get_program("bedtools", data["config"])
samtools = config_utils.get_program("samtools", data["config"])
cns_file = out["cns"]
bed_file = tempfile.NamedTemporaryFile(suffix=".bed", delete=False).name
with file_transaction(data, out_file) as tx_out_file:
cmd = ("sed 1d {cns_file} | cut -f1,2,3 > {bed_file}; "
"{samtools} view -b -L {bed_file} {bam_file} | "
"{bedtools} genomecov -bg -ibam - -g {bed_file} >"
"{tx_out_file}").format(**locals())
do.run(cmd, "CNVkit bedGraph conversion")
os.remove(bed_file)
out["bedgraph"] = out_file
return out
def _add_plots_to_output(out, data):
"""Add CNVkit plots summarizing called copy number values.
"""
out["plot"] = {}
diagram_plot = _add_diagram_plot(out, data)
if diagram_plot:
out["plot"]["diagram"] = diagram_plot
scatter = _add_scatter_plot(out, data)
if scatter:
out["plot"]["scatter"] = scatter
scatter_global = _add_global_scatter_plot(out, data)
if scatter_global:
out["plot"]["scatter_global"] = scatter_global
return out
def _get_larger_chroms(ref_file):
"""Retrieve larger chromosomes, avoiding the smaller ones for plotting.
"""
from scipy.cluster.vq import kmeans, vq
all_sizes = []
for c in ref.file_contigs(ref_file):
all_sizes.append(float(c.size))
all_sizes.sort()
# separate out smaller chromosomes and haplotypes with kmeans
centroids, _ = kmeans(np.array(all_sizes), 2)
idx, _ = vq(np.array(all_sizes), centroids)
little_sizes = tz.first(tz.partitionby(lambda xs: xs[0], zip(idx, all_sizes)))
little_sizes = [x[1] for x in little_sizes]
# create one more cluster with the smaller, removing the haplotypes
centroids2, _ = kmeans(np.array(little_sizes), 2)
idx2, _ = vq(np.array(little_sizes), centroids2)
little_sizes2 = tz.first(tz.partitionby(lambda xs: xs[0], zip(idx2, little_sizes)))
little_sizes2 = [x[1] for x in little_sizes2]
# get any chromosomes not in haplotype/random bin
thresh = max(little_sizes2)
larger_chroms = []
for c in ref.file_contigs(ref_file):
if c.size > thresh:
larger_chroms.append(c.name)
return larger_chroms
def _remove_haplotype_chroms(in_file, data):
"""Remove shorter haplotype chromosomes from cns/cnr files for plotting.
"""
larger_chroms = set(_get_larger_chroms(dd.get_ref_file(data)))
out_file = "%s-chromfilter%s" % utils.splitext_plus(in_file)
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
with open(in_file) as in_handle:
with open(tx_out_file, "w") as out_handle:
for line in in_handle:
if line.startswith("chromosome") or line.split()[0] in larger_chroms:
out_handle.write(line)
return out_file
def _add_global_scatter_plot(out, data):
out_file = "%s-scatter_global.pdf" % os.path.splitext(out["cnr"])[0]
if utils.file_exists(out_file):
return out_file
cnr = _remove_haplotype_chroms(out["cnr"], data)
cns = _remove_haplotype_chroms(out["cns"], data)
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "scatter", "-s", cns, "-o", tx_out_file, cnr]
do.run(_prep_cmd(cmd, tx_out_file), "CNVkit global scatter plot")
return out_file
def _add_scatter_plot(out, data):
out_file = "%s-scatter.pdf" % os.path.splitext(out["cnr"])[0]
priority_bed = dd.get_svprioritize(data)
if not priority_bed:
return None
priority_bed = plot._prioritize_plot_regions(pybedtools.BedTool(priority_bed), data, os.path.dirname(out_file))
if utils.file_exists(out_file):
return out_file
cnr = _remove_haplotype_chroms(out["cnr"], data)
cns = _remove_haplotype_chroms(out["cns"], data)
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "scatter", "-s", cns, "-o", tx_out_file, "-l",
priority_bed, cnr]
do.run(_prep_cmd(cmd, tx_out_file), "CNVkit scatter plot")
return out_file
def _cnx_is_empty(in_file):
"""Check if cnr or cns files are empty (only have a header)
"""
with open(in_file) as in_handle:
for i, line in enumerate(in_handle):
if i > 0:
return False
return True
def _add_diagram_plot(out, data):
out_file = "%s-diagram.pdf" % os.path.splitext(out["cnr"])[0]
cnr = _remove_haplotype_chroms(out["cnr"], data)
cns = _remove_haplotype_chroms(out["cns"], data)
if _cnx_is_empty(cnr) or _cnx_is_empty(cns):
return None
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "diagram", "-s", cns,
"-o", tx_out_file, cnr]
gender = population.get_gender(data)
if gender and gender.lower() == "male":
cmd += ["--male-reference"]
do.run(_prep_cmd(cmd, tx_out_file), "CNVkit diagram plot")
return out_file
def _create_access_file(ref_file, out_dir, data):
"""Create genome access file for CNVlib to define available genomic regions.
XXX Can move to installation/upgrade process if too slow here.
"""
out_file = os.path.join(out_dir, "%s-access.bed" % os.path.splitext(os.path.basename(ref_file))[0])
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "access",
ref_file, "-s", "10000", "-o", tx_out_file]
do.run(_prep_cmd(cmd, tx_out_file), "Create CNVkit access file")
return out_file
# ## Theta support
def export_theta(ckout, data):
"""Provide updated set of data with export information for TheTA2 input.
"""
cns_file = chromhacks.bed_to_standardonly(ckout["cns"], data, headers="chromosome")
cnr_file = chromhacks.bed_to_standardonly(ckout["cnr"], data, headers="chromosome")
out_file = "%s-theta.input" % utils.splitext_plus(cns_file)[0]
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [_get_cmd(), "export", "theta", cns_file, cnr_file, "-o", tx_out_file]
do.run(_prep_cmd(cmd, tx_out_file), "Export CNVkit calls as inputs for TheTA2")
ckout["theta_input"] = out_file
return ckout
|
[
"toolz.groupby",
"bcbio.structural.annotate.add_genes",
"bcbio.variation.bedutils.sort_merge",
"numpy.array",
"pybedtools.BedTool",
"bcbio.pipeline.datadict.get_cores",
"copy.deepcopy",
"bcbio.variation.vcfutils.get_paired_bams",
"bcbio.pipeline.datadict.get_align_bam",
"bcbio.pipeline.datadict.get_svprioritize",
"os.remove",
"os.path.exists",
"bcbio.pipeline.datadict.get_work_dir",
"bcbio.variation.effects.add_to_vcf",
"bcbio.structural.shared.get_base_cnv_regions",
"bcbio.pipeline.config_utils.get_program",
"bcbio.pipeline.datadict.get_coverage_interval",
"bcbio.utils.splitext_plus",
"bcbio.variation.population.get_gender",
"bcbio.variation.vcfutils.get_paired_phenotype",
"tempfile.NamedTemporaryFile",
"bcbio.pipeline.datadict.get_ref_file",
"bcbio.heterogeneity.chromhacks.bed_to_standardonly",
"bcbio.utils.file_exists",
"bcbio.provenance.do.run",
"os.path.splitext",
"bcbio.pipeline.datadict.get_sample_name",
"os.path.dirname",
"bcbio.distributed.transaction.file_transaction",
"bcbio.pipeline.datadict.get_batch",
"bcbio.pipeline.datadict.get_tools_on",
"bcbio.utils.file_uptodate",
"os.path.join",
"bcbio.utils.get_R_exports",
"os.path.realpath",
"bcbio.structural.shared.find_case_control",
"os.path.commonprefix",
"os.path.basename",
"bcbio.variation.ploidy.get_ploidy",
"bcbio.log.logger.info",
"bcbio.bam.ref.file_contigs"
] |
[((2981, 3045), 'bcbio.variation.vcfutils.get_paired_bams', 'vcfutils.get_paired_bams', (["[x['align_bam'] for x in items]", 'items'], {}), "([x['align_bam'] for x in items], items)\n", (3005, 3045), False, 'from bcbio.variation import bedutils, effects, ploidy, population, vcfutils\n'), ((5903, 5935), 'os.path.join', 'os.path.join', (['work_dir', 'out_base'], {}), '(work_dir, out_base)\n', (5915, 5935), False, 'import os\n'), ((6325, 6390), 'os.path.join', 'os.path.join', (['raw_work_dir', "('%s_background.cnn' % background_name)"], {}), "(raw_work_dir, '%s_background.cnn' % background_name)\n", (6337, 6390), False, 'import os\n'), ((12266, 12316), 'os.path.commonprefix', 'os.path.commonprefix', (['[target_cnn, antitarget_cnn]'], {}), '([target_cnn, antitarget_cnn])\n', (12286, 12316), False, 'import os\n'), ((14584, 14606), 'bcbio.pipeline.datadict.get_align_bam', 'dd.get_align_bam', (['data'], {}), '(data)\n', (14600, 14606), True, 'from bcbio.pipeline import datadict as dd\n'), ((15335, 15366), 'bcbio.utils.file_exists', 'utils.file_exists', (['out_file_old'], {}), '(out_file_old)\n', (15352, 15366), False, 'from bcbio import utils\n'), ((16125, 16187), 'os.path.join', 'os.path.join', (['work_dir', "('%s-%s.target.bed' % (basename, batch))"], {}), "(work_dir, '%s-%s.target.bed' % (basename, batch))\n", (16137, 16187), False, 'import os\n'), ((16274, 16324), 'os.path.join', 'os.path.join', (['work_dir', "('%s.target.bed' % basename)"], {}), "(work_dir, '%s.target.bed' % basename)\n", (16286, 16324), False, 'import os\n'), ((16332, 16365), 'bcbio.utils.file_exists', 'utils.file_exists', (['target_bed_old'], {}), '(target_bed_old)\n', (16349, 16365), False, 'from bcbio import utils\n'), ((16910, 16976), 'os.path.join', 'os.path.join', (['work_dir', "('%s-%s.antitarget.bed' % (basename, batch))"], {}), "(work_dir, '%s-%s.antitarget.bed' % (basename, batch))\n", (16922, 16976), False, 'import os\n'), ((17002, 17056), 'os.path.join', 'os.path.join', (['work_dir', "('%s.antitarget.bed' % basename)"], {}), "(work_dir, '%s.antitarget.bed' % basename)\n", (17014, 17056), False, 'import os\n'), ((17129, 17163), 'os.path.exists', 'os.path.exists', (['antitarget_bed_old'], {}), '(antitarget_bed_old)\n', (17143, 17163), False, 'import os\n'), ((20232, 20275), 'bcbio.structural.shared.get_base_cnv_regions', 'shared.get_base_cnv_regions', (['data', 'work_dir'], {}), '(data, work_dir)\n', (20259, 20275), False, 'from bcbio.structural import annotate, shared, plot\n'), ((20293, 20350), 'bcbio.variation.bedutils.sort_merge', 'bedutils.sort_merge', (['base_regions', 'data'], {'out_dir': 'work_dir'}), '(base_regions, data, out_dir=work_dir)\n', (20312, 20350), False, 'from bcbio.variation import bedutils, effects, ploidy, population, vcfutils\n'), ((23071, 23098), 'bcbio.variation.population.get_gender', 'population.get_gender', (['data'], {}), '(data)\n', (23092, 23098), False, 'from bcbio.variation import bedutils, effects, ploidy, population, vcfutils\n'), ((24759, 24797), 'bcbio.structural.annotate.add_genes', 'annotate.add_genes', (["calls['bed']", 'data'], {}), "(calls['bed'], data)\n", (24777, 24797), False, 'from bcbio.structural import annotate, shared, plot\n'), ((24819, 24867), 'bcbio.variation.effects.add_to_vcf', 'effects.add_to_vcf', (["calls['vcf']", 'data', '"""snpeff"""'], {}), "(calls['vcf'], data, 'snpeff')\n", (24837, 24867), False, 'from bcbio.variation import bedutils, effects, ploidy, population, vcfutils\n'), ((26544, 26571), 'bcbio.utils.file_exists', 'utils.file_exists', (['out_file'], {}), '(out_file)\n', (26561, 26571), False, 'from bcbio import utils\n'), ((26642, 26664), 'bcbio.pipeline.datadict.get_align_bam', 'dd.get_align_bam', (['data'], {}), '(data)\n', (26658, 26664), True, 'from bcbio.pipeline import datadict as dd\n'), ((26680, 26732), 'bcbio.pipeline.config_utils.get_program', 'config_utils.get_program', (['"""bedtools"""', "data['config']"], {}), "('bedtools', data['config'])\n", (26704, 26732), False, 'from bcbio.pipeline import config_utils\n'), ((26748, 26800), 'bcbio.pipeline.config_utils.get_program', 'config_utils.get_program', (['"""samtools"""', "data['config']"], {}), "('samtools', data['config'])\n", (26772, 26800), False, 'from bcbio.pipeline import config_utils\n'), ((28021, 28047), 'bcbio.bam.ref.file_contigs', 'ref.file_contigs', (['ref_file'], {}), '(ref_file)\n', (28037, 28047), False, 'from bcbio.bam import ref\n'), ((28844, 28870), 'bcbio.bam.ref.file_contigs', 'ref.file_contigs', (['ref_file'], {}), '(ref_file)\n', (28860, 28870), False, 'from bcbio.bam import ref\n'), ((29765, 29792), 'bcbio.utils.file_exists', 'utils.file_exists', (['out_file'], {}), '(out_file)\n', (29782, 29792), False, 'from bcbio import utils\n'), ((30269, 30294), 'bcbio.pipeline.datadict.get_svprioritize', 'dd.get_svprioritize', (['data'], {}), '(data)\n', (30288, 30294), True, 'from bcbio.pipeline import datadict as dd\n'), ((30463, 30490), 'bcbio.utils.file_exists', 'utils.file_exists', (['out_file'], {}), '(out_file)\n', (30480, 30490), False, 'from bcbio import utils\n'), ((32602, 32674), 'bcbio.heterogeneity.chromhacks.bed_to_standardonly', 'chromhacks.bed_to_standardonly', (["ckout['cns']", 'data'], {'headers': '"""chromosome"""'}), "(ckout['cns'], data, headers='chromosome')\n", (32632, 32674), False, 'from bcbio.heterogeneity import chromhacks\n'), ((32690, 32762), 'bcbio.heterogeneity.chromhacks.bed_to_standardonly', 'chromhacks.bed_to_standardonly', (["ckout['cnr']", 'data'], {'headers': '"""chromosome"""'}), "(ckout['cnr'], data, headers='chromosome')\n", (32720, 32762), False, 'from bcbio.heterogeneity import chromhacks\n'), ((1365, 1404), 'bcbio.variation.vcfutils.get_paired_phenotype', 'vcfutils.get_paired_phenotype', (['items[0]'], {}), '(items[0])\n', (1394, 1404), False, 'from bcbio.variation import bedutils, effects, ploidy, population, vcfutils\n'), ((1758, 1778), 'copy.deepcopy', 'copy.deepcopy', (['ckout'], {}), '(ckout)\n', (1771, 1778), False, 'import copy\n'), ((3659, 3674), 'bcbio.pipeline.datadict.get_batch', 'dd.get_batch', (['x'], {}), '(x)\n', (3671, 3674), True, 'from bcbio.pipeline import datadict as dd\n'), ((3768, 3788), 'copy.deepcopy', 'copy.deepcopy', (['tumor'], {}), '(tumor)\n', (3781, 3788), False, 'import copy\n'), ((3806, 3827), 'copy.deepcopy', 'copy.deepcopy', (['normal'], {}), '(normal)\n', (3819, 3827), False, 'import copy\n'), ((4492, 4523), 'bcbio.structural.shared.find_case_control', 'shared.find_case_control', (['items'], {}), '(items)\n', (4516, 4523), False, 'from bcbio.structural import annotate, shared, plot\n'), ((5768, 5786), 'bcbio.pipeline.datadict.get_batch', 'dd.get_batch', (['data'], {}), '(data)\n', (5780, 5786), True, 'from bcbio.pipeline import datadict as dd\n'), ((5790, 5814), 'bcbio.pipeline.datadict.get_sample_name', 'dd.get_sample_name', (['data'], {}), '(data)\n', (5808, 5814), True, 'from bcbio.pipeline import datadict as dd\n'), ((6189, 6218), 'os.path.join', 'os.path.join', (['work_dir', '"""raw"""'], {}), "(work_dir, 'raw')\n", (6201, 6218), False, 'import os\n'), ((6242, 6276), 'bcbio.pipeline.datadict.get_sample_name', 'dd.get_sample_name', (['backgrounds[0]'], {}), '(backgrounds[0])\n', (6260, 6276), True, 'from bcbio.pipeline import datadict as dd\n'), ((6647, 6687), 'bcbio.utils.file_exists', 'utils.file_exists', (["(out_base_old + '.cns')"], {}), "(out_base_old + '.cns')\n", (6664, 6687), False, 'from bcbio import utils\n'), ((6890, 6925), 'bcbio.utils.file_exists', 'utils.file_exists', (["ckouts[0]['cns']"], {}), "(ckouts[0]['cns'])\n", (6907, 6925), False, 'from bcbio import utils\n'), ((6950, 6985), 'bcbio.pipeline.datadict.get_coverage_interval', 'dd.get_coverage_interval', (['inputs[0]'], {}), '(inputs[0])\n', (6974, 6985), True, 'from bcbio.pipeline import datadict as dd\n'), ((7230, 7275), 'bcbio.structural.annotate.add_genes', 'annotate.add_genes', (['raw_target_bed', 'inputs[0]'], {}), '(raw_target_bed, inputs[0])\n', (7248, 7275), False, 'from bcbio.structural import annotate, shared, plot\n'), ((9154, 9193), 'bcbio.utils.file_uptodate', 'utils.file_uptodate', (['out_file', 'cnr_file'], {}), '(out_file, cnr_file)\n', (9173, 9193), False, 'from bcbio import utils\n'), ((11014, 11045), 'bcbio.utils.file_exists', 'utils.file_exists', (['metrics_file'], {}), '(metrics_file)\n', (11031, 11045), False, 'from bcbio import utils\n'), ((12456, 12483), 'bcbio.utils.file_exists', 'utils.file_exists', (['out_file'], {}), '(out_file)\n', (12473, 12483), False, 'from bcbio import utils\n'), ((14023, 14050), 'bcbio.utils.file_exists', 'utils.file_exists', (['out_file'], {}), '(out_file)\n', (14040, 14050), False, 'from bcbio import utils\n'), ((15411, 15438), 'bcbio.utils.file_exists', 'utils.file_exists', (['out_file'], {}), '(out_file)\n', (15428, 15438), False, 'from bcbio import utils\n'), ((15792, 15816), 'bcbio.pipeline.datadict.get_sample_name', 'dd.get_sample_name', (['data'], {}), '(data)\n', (15810, 15816), True, 'from bcbio.pipeline import datadict as dd\n'), ((15992, 16010), 'bcbio.pipeline.datadict.get_batch', 'dd.get_batch', (['data'], {}), '(data)\n', (16004, 16010), True, 'from bcbio.pipeline import datadict as dd\n'), ((16014, 16038), 'bcbio.pipeline.datadict.get_sample_name', 'dd.get_sample_name', (['data'], {}), '(data)\n', (16032, 16038), True, 'from bcbio.pipeline import datadict as dd\n'), ((16414, 16443), 'bcbio.utils.file_exists', 'utils.file_exists', (['target_bed'], {}), '(target_bed)\n', (16431, 16443), False, 'from bcbio import utils\n'), ((17220, 17250), 'os.path.exists', 'os.path.exists', (['antitarget_bed'], {}), '(antitarget_bed)\n', (17234, 17250), False, 'import os\n'), ((17949, 17967), 'bcbio.pipeline.datadict.get_batch', 'dd.get_batch', (['data'], {}), '(data)\n', (17961, 17967), True, 'from bcbio.pipeline import datadict as dd\n'), ((17971, 17995), 'bcbio.pipeline.datadict.get_sample_name', 'dd.get_sample_name', (['data'], {}), '(data)\n', (17989, 17995), True, 'from bcbio.pipeline import datadict as dd\n'), ((18226, 18250), 'os.path.exists', 'os.path.exists', (['out_file'], {}), '(out_file)\n', (18240, 18250), False, 'import os\n'), ((20824, 20851), 'bcbio.utils.file_exists', 'utils.file_exists', (['out_file'], {}), '(out_file)\n', (20841, 20851), False, 'from bcbio import utils\n'), ((21280, 21312), 'bcbio.utils.file_exists', 'utils.file_exists', (['bedgraph_file'], {}), '(bedgraph_file)\n', (21297, 21312), False, 'from bcbio import utils\n'), ((21616, 21643), 'bcbio.utils.file_exists', 'utils.file_exists', (['bed_file'], {}), '(bed_file)\n', (21633, 21643), False, 'from bcbio import utils\n'), ((23029, 23057), 'os.path.splitext', 'os.path.splitext', (["out['cns']"], {}), "(out['cns'])\n", (23045, 23057), False, 'import os\n'), ((23110, 23138), 'bcbio.utils.file_exists', 'utils.file_exists', (['call_file'], {}), '(call_file)\n', (23127, 23138), False, 'from bcbio import utils\n'), ((25127, 25154), 'bcbio.utils.file_exists', 'utils.file_exists', (['out_file'], {}), '(out_file)\n', (25144, 25154), False, 'from bcbio import utils\n'), ((26007, 26034), 'bcbio.utils.file_exists', 'utils.file_exists', (['out_file'], {}), '(out_file)\n', (26024, 26034), False, 'from bcbio import utils\n'), ((26842, 26898), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".bed"""', 'delete': '(False)'}), "(suffix='.bed', delete=False)\n", (26869, 26898), False, 'import tempfile\n'), ((26913, 26945), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['data', 'out_file'], {}), '(data, out_file)\n', (26929, 26945), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((27215, 27256), 'bcbio.provenance.do.run', 'do.run', (['cmd', '"""CNVkit bedGraph conversion"""'], {}), "(cmd, 'CNVkit bedGraph conversion')\n", (27221, 27256), False, 'from bcbio.provenance import do\n'), ((27265, 27284), 'os.remove', 'os.remove', (['bed_file'], {}), '(bed_file)\n', (27274, 27284), False, 'import os\n'), ((28202, 28221), 'numpy.array', 'np.array', (['all_sizes'], {}), '(all_sizes)\n', (28210, 28221), True, 'import numpy as np\n'), ((28242, 28261), 'numpy.array', 'np.array', (['all_sizes'], {}), '(all_sizes)\n', (28250, 28261), True, 'import numpy as np\n'), ((28504, 28526), 'numpy.array', 'np.array', (['little_sizes'], {}), '(little_sizes)\n', (28512, 28526), True, 'import numpy as np\n'), ((28548, 28570), 'numpy.array', 'np.array', (['little_sizes'], {}), '(little_sizes)\n', (28556, 28570), True, 'import numpy as np\n'), ((29200, 29228), 'bcbio.utils.splitext_plus', 'utils.splitext_plus', (['in_file'], {}), '(in_file)\n', (29219, 29228), False, 'from bcbio import utils\n'), ((29240, 29267), 'bcbio.utils.file_exists', 'utils.file_exists', (['out_file'], {}), '(out_file)\n', (29257, 29267), False, 'from bcbio import utils\n'), ((29933, 29965), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['data', 'out_file'], {}), '(data, out_file)\n', (29949, 29965), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((30389, 30421), 'pybedtools.BedTool', 'pybedtools.BedTool', (['priority_bed'], {}), '(priority_bed)\n', (30407, 30421), False, 'import pybedtools\n'), ((30429, 30454), 'os.path.dirname', 'os.path.dirname', (['out_file'], {}), '(out_file)\n', (30444, 30454), False, 'import os\n'), ((30631, 30663), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['data', 'out_file'], {}), '(data, out_file)\n', (30647, 30663), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((31412, 31439), 'bcbio.utils.file_exists', 'utils.file_exists', (['out_file'], {}), '(out_file)\n', (31429, 31439), False, 'from bcbio import utils\n'), ((32158, 32185), 'bcbio.utils.file_exists', 'utils.file_exists', (['out_file'], {}), '(out_file)\n', (32175, 32185), False, 'from bcbio import utils\n'), ((32841, 32868), 'bcbio.utils.file_exists', 'utils.file_exists', (['out_file'], {}), '(out_file)\n', (32858, 32868), False, 'from bcbio import utils\n'), ((1119, 1143), 'bcbio.pipeline.datadict.get_sample_name', 'dd.get_sample_name', (['data'], {}), '(data)\n', (1137, 1143), True, 'from bcbio.pipeline import datadict as dd\n'), ((1832, 1863), 'bcbio.utils.file_exists', 'utils.file_exists', (["ckout['cns']"], {}), "(ckout['cns'])\n", (1849, 1863), False, 'from bcbio import utils\n'), ((5232, 5264), 'os.path.realpath', 'os.path.realpath', (['sys.executable'], {}), '(sys.executable)\n', (5248, 5264), False, 'import os\n'), ((5501, 5529), 'os.path.dirname', 'os.path.dirname', (['tx_out_file'], {}), '(tx_out_file)\n', (5516, 5529), False, 'import os\n'), ((6578, 6605), 'bcbio.pipeline.datadict.get_align_bam', 'dd.get_align_bam', (['cur_input'], {}), '(cur_input)\n', (6594, 6605), True, 'from bcbio.pipeline import datadict as dd\n'), ((7148, 7181), 'bcbio.utils.file_exists', 'utils.file_exists', (['raw_target_bed'], {}), '(raw_target_bed)\n', (7165, 7181), False, 'from bcbio import utils\n'), ((7322, 7345), 'bcbio.pipeline.datadict.get_cores', 'dd.get_cores', (['inputs[0]'], {}), '(inputs[0])\n', (7334, 7345), True, 'from bcbio.pipeline import datadict as dd\n'), ((9113, 9139), 'os.path.splitext', 'os.path.splitext', (['cnr_file'], {}), '(cnr_file)\n', (9129, 9139), False, 'import os\n'), ((9208, 9240), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['data', 'out_file'], {}), '(data, out_file)\n', (9224, 9240), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((10647, 10678), 'bcbio.utils.splitext_plus', 'utils.splitext_plus', (['target_cnn'], {}), '(target_cnn)\n', (10666, 10678), False, 'from bcbio import utils\n'), ((10968, 10999), 'bcbio.utils.splitext_plus', 'utils.splitext_plus', (['target_cnn'], {}), '(target_cnn)\n', (10987, 10999), False, 'from bcbio import utils\n'), ((11060, 11096), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['data', 'metrics_file'], {}), '(data, metrics_file)\n', (11076, 11096), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((12498, 12530), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['data', 'out_file'], {}), '(data, out_file)\n', (12514, 12530), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((14065, 14097), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['data', 'out_file'], {}), '(data, out_file)\n', (14081, 14097), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((15453, 15485), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['data', 'out_file'], {}), '(data, out_file)\n', (15469, 15485), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((16071, 16103), 'os.path.basename', 'os.path.basename', (['raw_target_bed'], {}), '(raw_target_bed)\n', (16087, 16103), False, 'import os\n'), ((16458, 16492), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['data', 'target_bed'], {}), '(data, target_bed)\n', (16474, 16492), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((17265, 17303), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['data', 'antitarget_bed'], {}), '(data, antitarget_bed)\n', (17281, 17303), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((18265, 18297), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['data', 'out_file'], {}), '(data, out_file)\n', (18281, 18297), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((20781, 20809), 'os.path.splitext', 'os.path.splitext', (["out['cns']"], {}), "(out['cns'])\n", (20797, 20809), False, 'import os\n'), ((20866, 20898), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['data', 'out_file'], {}), '(data, out_file)\n', (20882, 20898), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((21073, 21105), 'bcbio.provenance.do.run', 'do.run', (['cmd', '"""CNVkit export seg"""'], {}), "(cmd, 'CNVkit export seg')\n", (21079, 21105), False, 'from bcbio.provenance import do\n'), ((21327, 21364), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['data', 'bedgraph_file'], {}), '(data, bedgraph_file)\n', (21343, 21364), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((21658, 21690), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['data', 'bed_file'], {}), '(data, bed_file)\n', (21674, 21690), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((22341, 22376), 'bcbio.variation.vcfutils.get_paired_phenotype', 'vcfutils.get_paired_phenotype', (['data'], {}), '(data)\n', (22370, 22376), False, 'from bcbio.variation import bedutils, effects, ploidy, population, vcfutils\n'), ((23153, 23186), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['data', 'call_file'], {}), '(data, call_file)\n', (23169, 23186), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((23951, 23984), 'bcbio.provenance.do.run', 'do.run', (['cmd', '"""CNVkit call ploidy"""'], {}), "(cmd, 'CNVkit call ploidy')\n", (23957, 23984), False, 'from bcbio.provenance import do\n'), ((24161, 24185), 'os.path.exists', 'os.path.exists', (['out_file'], {}), '(out_file)\n', (24175, 24185), False, 'import os\n'), ((25084, 25112), 'os.path.splitext', 'os.path.splitext', (["out['cns']"], {}), "(out['cns'])\n", (25100, 25112), False, 'import os\n'), ((25169, 25201), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['data', 'out_file'], {}), '(data, out_file)\n', (25185, 25201), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((25713, 25745), 'bcbio.provenance.do.run', 'do.run', (['cmd', '"""CNVkit segmetrics"""'], {}), "(cmd, 'CNVkit segmetrics')\n", (25719, 25745), False, 'from bcbio.provenance import do\n'), ((25964, 25992), 'os.path.splitext', 'os.path.splitext', (["out['cns']"], {}), "(out['cns'])\n", (25980, 25992), False, 'import os\n'), ((26049, 26081), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['data', 'out_file'], {}), '(data, out_file)\n', (26065, 26081), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((26269, 26299), 'bcbio.provenance.do.run', 'do.run', (['cmd', '"""CNVkit gainloss"""'], {}), "(cmd, 'CNVkit gainloss')\n", (26275, 26299), False, 'from bcbio.provenance import do\n'), ((26505, 26533), 'os.path.splitext', 'os.path.splitext', (["out['cns']"], {}), "(out['cns'])\n", (26521, 26533), False, 'import os\n'), ((29140, 29161), 'bcbio.pipeline.datadict.get_ref_file', 'dd.get_ref_file', (['data'], {}), '(data)\n', (29155, 29161), True, 'from bcbio.pipeline import datadict as dd\n'), ((29282, 29314), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['data', 'out_file'], {}), '(data, out_file)\n', (29298, 29314), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((29726, 29754), 'os.path.splitext', 'os.path.splitext', (["out['cnr']"], {}), "(out['cnr'])\n", (29742, 29754), False, 'import os\n'), ((30218, 30246), 'os.path.splitext', 'os.path.splitext', (["out['cnr']"], {}), "(out['cnr'])\n", (30234, 30246), False, 'import os\n'), ((31194, 31222), 'os.path.splitext', 'os.path.splitext', (["out['cnr']"], {}), "(out['cnr'])\n", (31210, 31222), False, 'import os\n'), ((31454, 31486), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['data', 'out_file'], {}), '(data, out_file)\n', (31470, 31486), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((31620, 31647), 'bcbio.variation.population.get_gender', 'population.get_gender', (['data'], {}), '(data)\n', (31641, 31647), False, 'from bcbio.variation import bedutils, effects, ploidy, population, vcfutils\n'), ((32200, 32232), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['data', 'out_file'], {}), '(data, out_file)\n', (32216, 32232), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((32797, 32826), 'bcbio.utils.splitext_plus', 'utils.splitext_plus', (['cns_file'], {}), '(cns_file)\n', (32816, 32826), False, 'from bcbio import utils\n'), ((32883, 32915), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['data', 'out_file'], {}), '(data, out_file)\n', (32899, 32915), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((2305, 2326), 'bcbio.pipeline.datadict.get_tools_on', 'dd.get_tools_on', (['data'], {}), '(data)\n', (2320, 2326), True, 'from bcbio.pipeline import datadict as dd\n'), ((3085, 3106), 'bcbio.pipeline.datadict.get_sample_name', 'dd.get_sample_name', (['x'], {}), '(x)\n', (3103, 3106), True, 'from bcbio.pipeline import datadict as dd\n'), ((14164, 14185), 'bcbio.pipeline.datadict.get_ref_file', 'dd.get_ref_file', (['data'], {}), '(data)\n', (14179, 14185), True, 'from bcbio.pipeline import datadict as dd\n'), ((18367, 18389), 'bcbio.pipeline.datadict.get_align_bam', 'dd.get_align_bam', (['data'], {}), '(data)\n', (18383, 18389), True, 'from bcbio.pipeline import datadict as dd\n'), ((20549, 20570), 'bcbio.pipeline.datadict.get_ref_file', 'dd.get_ref_file', (['data'], {}), '(data)\n', (20564, 20570), True, 'from bcbio.pipeline import datadict as dd\n'), ((22298, 22324), 'os.path.basename', 'os.path.basename', (['vrn_file'], {}), '(vrn_file)\n', (22314, 22324), False, 'import os\n'), ((24204, 24236), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['data', 'out_file'], {}), '(data, out_file)\n', (24220, 24236), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((24661, 24704), 'bcbio.provenance.do.run', 'do.run', (['cmd', "('CNVkit export %s' % outformat)"], {}), "(cmd, 'CNVkit export %s' % outformat)\n", (24667, 24704), False, 'from bcbio.provenance import do\n'), ((25511, 25541), 'bcbio.pipeline.datadict.get_coverage_interval', 'dd.get_coverage_interval', (['data'], {}), '(data)\n', (25535, 25541), True, 'from bcbio.pipeline import datadict as dd\n'), ((12199, 12220), 'bcbio.pipeline.datadict.get_sample_name', 'dd.get_sample_name', (['x'], {}), '(x)\n', (12217, 12220), True, 'from bcbio.pipeline import datadict as dd\n'), ((15555, 15573), 'bcbio.pipeline.datadict.get_cores', 'dd.get_cores', (['data'], {}), '(data)\n', (15567, 15573), True, 'from bcbio.pipeline import datadict as dd\n'), ((18741, 18802), 'bcbio.log.logger.info', 'logger.info', (['"""Bin size estimate failed, using default values"""'], {}), "('Bin size estimate failed, using default values')\n", (18752, 18802), False, 'from bcbio.log import logger\n'), ((20947, 20978), 'os.path.dirname', 'os.path.dirname', (['sys.executable'], {}), '(sys.executable)\n', (20962, 20978), False, 'import os\n'), ((22480, 22501), 'bcbio.pipeline.datadict.get_work_dir', 'dd.get_work_dir', (['data'], {}), '(data)\n', (22495, 22501), True, 'from bcbio.pipeline import datadict as dd\n'), ((22688, 22712), 'bcbio.pipeline.datadict.get_sample_name', 'dd.get_sample_name', (['data'], {}), '(data)\n', (22706, 22712), True, 'from bcbio.pipeline import datadict as dd\n'), ((24067, 24094), 'os.path.splitext', 'os.path.splitext', (['call_file'], {}), '(call_file)\n', (24083, 24094), False, 'import os\n'), ((24395, 24419), 'bcbio.pipeline.datadict.get_sample_name', 'dd.get_sample_name', (['data'], {}), '(data)\n', (24413, 24419), True, 'from bcbio.pipeline import datadict as dd\n'), ((25250, 25281), 'os.path.dirname', 'os.path.dirname', (['sys.executable'], {}), '(sys.executable)\n', (25265, 25281), False, 'import os\n'), ((26130, 26161), 'os.path.dirname', 'os.path.dirname', (['sys.executable'], {}), '(sys.executable)\n', (26145, 26161), False, 'import os\n'), ((32115, 32141), 'os.path.basename', 'os.path.basename', (['ref_file'], {}), '(ref_file)\n', (32131, 32141), False, 'import os\n'), ((4945, 4966), 'bcbio.pipeline.datadict.get_sample_name', 'dd.get_sample_name', (['d'], {}), '(d)\n', (4963, 4966), True, 'from bcbio.pipeline import datadict as dd\n'), ((4970, 4999), 'bcbio.pipeline.datadict.get_sample_name', 'dd.get_sample_name', (['cur_input'], {}), '(cur_input)\n', (4988, 4999), True, 'from bcbio.pipeline import datadict as dd\n'), ((5847, 5873), 'os.path.basename', 'os.path.basename', (['bam_file'], {}), '(bam_file)\n', (5863, 5873), False, 'import os\n'), ((9544, 9562), 'bcbio.pipeline.datadict.get_cores', 'dd.get_cores', (['data'], {}), '(data)\n', (9556, 9562), True, 'from bcbio.pipeline import datadict as dd\n'), ((10094, 10115), 'bcbio.utils.get_R_exports', 'utils.get_R_exports', ([], {}), '()\n', (10113, 10115), False, 'from bcbio import utils\n'), ((10117, 10145), 'os.path.dirname', 'os.path.dirname', (['tx_out_file'], {}), '(tx_out_file)\n', (10132, 10145), False, 'import os\n'), ((18088, 18120), 'os.path.basename', 'os.path.basename', (['raw_target_bed'], {}), '(raw_target_bed)\n', (18104, 18120), False, 'import os\n'), ((23402, 23427), 'bcbio.variation.ploidy.get_ploidy', 'ploidy.get_ploidy', (['[data]'], {}), '([data])\n', (23419, 23427), False, 'from bcbio.variation import bedutils, effects, ploidy, population, vcfutils\n'), ((24289, 24320), 'os.path.dirname', 'os.path.dirname', (['sys.executable'], {}), '(sys.executable)\n', (24304, 24320), False, 'import os\n'), ((24460, 24485), 'bcbio.variation.ploidy.get_ploidy', 'ploidy.get_ploidy', (['[data]'], {}), '([data])\n', (24477, 24485), False, 'from bcbio.variation import bedutils, effects, ploidy, population, vcfutils\n'), ((8064, 8100), 'toolz.groupby', 'tz.groupby', (['"""bam"""', 'raw_coverage_cnns'], {}), "('bam', raw_coverage_cnns)\n", (8074, 8100), True, 'import toolz as tz\n'), ((8472, 8545), 'toolz.groupby', 'tz.groupby', (['"""bam"""', "[x for x in coverage_cnns if x['itype'] == 'evaluate']"], {}), "('bam', [x for x in coverage_cnns if x['itype'] == 'evaluate'])\n", (8482, 8545), True, 'import toolz as tz\n'), ((22589, 22613), 'bcbio.pipeline.datadict.get_sample_name', 'dd.get_sample_name', (['data'], {}), '(data)\n', (22607, 22613), True, 'from bcbio.pipeline import datadict as dd\n'), ((23277, 23308), 'os.path.dirname', 'os.path.dirname', (['sys.executable'], {}), '(sys.executable)\n', (23292, 23308), False, 'import os\n')]
|
import tensorflow as tf
from keras.preprocessing import image
from keras.applications.inception_v3 import InceptionV3, preprocess_input, decode_predictions
import numpy as np
import h5py
model = InceptionV3(include_top=True, weights='imagenet', input_tensor=None, input_shape=None)
graph = tf.get_default_graph()
def pil2array(pillow_img):
return np.array(pillow_img.getdata(), np.float32).reshape(pillow_img.size[1], pillow_img.size[0], 3)
def predict_pil(pillow_img):
img_array = pil2array(pillow_img)
return predict_nparray(img_array)
def predict_nparray(img_as_array):
global graph
img_batch_as_array = np.expand_dims(img_as_array, axis=0)
img_batch_as_array = preprocess_input(img_batch_as_array)
with graph.as_default():
preds = model.predict(img_batch_as_array)
decoded_preds = decode_predictions(preds, top=3)[0]
predictions = [{'label': label, 'descr': description, 'prob': probability} for label,description, probability in decoded_preds]
return predictions
|
[
"keras.applications.inception_v3.preprocess_input",
"keras.applications.inception_v3.decode_predictions",
"numpy.expand_dims",
"keras.applications.inception_v3.InceptionV3",
"tensorflow.get_default_graph"
] |
[((197, 287), 'keras.applications.inception_v3.InceptionV3', 'InceptionV3', ([], {'include_top': '(True)', 'weights': '"""imagenet"""', 'input_tensor': 'None', 'input_shape': 'None'}), "(include_top=True, weights='imagenet', input_tensor=None,\n input_shape=None)\n", (208, 287), False, 'from keras.applications.inception_v3 import InceptionV3, preprocess_input, decode_predictions\n'), ((292, 314), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (312, 314), True, 'import tensorflow as tf\n'), ((633, 669), 'numpy.expand_dims', 'np.expand_dims', (['img_as_array'], {'axis': '(0)'}), '(img_as_array, axis=0)\n', (647, 669), True, 'import numpy as np\n'), ((695, 731), 'keras.applications.inception_v3.preprocess_input', 'preprocess_input', (['img_batch_as_array'], {}), '(img_batch_as_array)\n', (711, 731), False, 'from keras.applications.inception_v3 import InceptionV3, preprocess_input, decode_predictions\n'), ((833, 865), 'keras.applications.inception_v3.decode_predictions', 'decode_predictions', (['preds'], {'top': '(3)'}), '(preds, top=3)\n', (851, 865), False, 'from keras.applications.inception_v3 import InceptionV3, preprocess_input, decode_predictions\n')]
|
import numpy as np
from typing import Any, Iterable, Tuple
from .ext import EnvSpec
from .parallel import ParallelEnv
from ..prelude import Action, Array, State
from ..utils.rms import RunningMeanStd
class ParallelEnvWrapper(ParallelEnv[Action, State]):
def __init__(self, penv: ParallelEnv) -> None:
self.penv = penv
def close(self) -> None:
self.penv.close()
def reset(self) -> Array[State]:
return self.penv.reset()
def step(
self,
actions: Iterable[Action]
) -> Tuple[Array[State], Array[float], Array[bool], Array[Any]]:
return self.penv.step(actions)
def seed(self, seeds: Iterable[int]) -> None:
self.penv.seed(seeds)
@property
def num_envs(self) -> int:
return self.penv.num_envs
@property
def spec(self) -> EnvSpec:
return self.penv.spec
def extract(self, states: Iterable[State]) -> Array:
return self.penv.extract(states)
class FrameStackParallel(ParallelEnvWrapper):
"""Parallel version of atari_wrappers.FrameStack
"""
def __init__(self, penv: ParallelEnv, nstack: int = 4, dtype: type = np.float32) -> None:
super().__init__(penv)
idx = 0
shape = self.penv.state_dim
for dim in shape:
if dim == 1:
idx += 1
else:
break
self.shape = (nstack, *self.penv.state_dim[idx:])
self.obs = np.zeros((self.num_envs, *self.shape), dtype=dtype)
def step(
self,
actions: Iterable[Action]
) -> Tuple[Array, Array[float], Array[bool], Array[Any]]:
state, reward, done, info = self.penv.step(actions)
self.obs = np.roll(self.obs, shift=-1, axis=1)
for i, _ in filter(lambda t: t[1], enumerate(done)):
self.obs[i] = 0.0
self.obs[:, -1] = self.extract(state).squeeze()
return (self.obs, reward, done, info)
def reset(self) -> Array[State]:
self.obs.fill(0)
state = self.penv.reset()
self.obs[:, -1] = self.extract(state).squeeze()
return self.obs
@property
def state_dim(self) -> Tuple[int, ...]:
return self.shape
class NormalizeObs(ParallelEnvWrapper[Action, Array[float]]):
def __init__(self, penv: ParallelEnv, obs_clip: float = 10.) -> None:
super().__init__(penv)
self.obs_clip = obs_clip
self._rms = RunningMeanStd(shape=self.state_dim)
self.training_mode = True
def step(
self,
actions: Iterable[Action]
) -> Tuple[Array[Array[float]], Array[float], Array[bool], Array[Any]]:
state, reward, done, info = self.penv.step(actions)
return self._filter_obs(state), reward, done, info
def _filter_obs(self, obs: Array[Array[float]]) -> Array[Array[float]]:
if self.training_mode:
self._rms.update(obs) # type: ignore
obs = np.clip((obs - self._rms.mean) / self._rms.std(), -self.obs_clip, self.obs_clip)
return obs
def reset(self) -> Array[Array[float]]:
obs = self.penv.reset()
return self._filter_obs(obs)
class NormalizeReward(ParallelEnvWrapper[Action, State]):
def __init__(self, penv: ParallelEnv, reward_clip: float = 10., gamma: float = 0.99) -> None:
super().__init__(penv)
self.reward_clip = reward_clip
self.gamma = gamma
self._rms = RunningMeanStd(shape=())
self.ret = np.zeros(self.num_envs)
def step(
self,
actions: Iterable[Action]
) -> Tuple[Array[State], Array[float], Array[bool], Array[Any]]:
state, reward, done, info = self.penv.step(actions)
self.ret = self.ret * self.gamma + reward
self._rms.update(self.ret)
reward = np.clip(reward / self._rms.std(), -self.reward_clip, self.reward_clip)
self.ret[done] = 0.0
return state, reward, done, info
def reset(self) -> Array[State]:
self.ret = np.zeros(self.num_envs)
return self.penv.reset()
|
[
"numpy.zeros",
"numpy.roll"
] |
[((1453, 1504), 'numpy.zeros', 'np.zeros', (['(self.num_envs, *self.shape)'], {'dtype': 'dtype'}), '((self.num_envs, *self.shape), dtype=dtype)\n', (1461, 1504), True, 'import numpy as np\n'), ((1717, 1752), 'numpy.roll', 'np.roll', (['self.obs'], {'shift': '(-1)', 'axis': '(1)'}), '(self.obs, shift=-1, axis=1)\n', (1724, 1752), True, 'import numpy as np\n'), ((3472, 3495), 'numpy.zeros', 'np.zeros', (['self.num_envs'], {}), '(self.num_envs)\n', (3480, 3495), True, 'import numpy as np\n'), ((3996, 4019), 'numpy.zeros', 'np.zeros', (['self.num_envs'], {}), '(self.num_envs)\n', (4004, 4019), True, 'import numpy as np\n')]
|
import imutils
import cv2
import numpy as np
import math
from math import sqrt
def find_robot_orientation(image):
robot = {}
robot['angle'] = []
robot['direction'] = []
robotLower = (139, 227, 196)
robotUpper = (255, 255, 255)
distances = []
# img = cv2.imread('all_color_terrain_with_robot.png')
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, robotLower, robotUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# find contours in the mask and initialize the current
# (x, y) center of the ball
# find contours in thresholded image, then grab the largest
# one
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
c = max(cnts, key=cv2.contourArea)
M = cv2.moments(c)
cx = int(M['m10'] / M['m00'])
cy = int(M['m01'] / M['m00'])
# determine the most extreme points along the contour
extLeft = tuple(c[c[:, :, 0].argmin()][0])
extRight = tuple(c[c[:, :, 0].argmax()][0])
extTop = tuple(c[c[:, :, 1].argmin()][0])
extBot = tuple(c[c[:, :, 1].argmax()][0])
print(extBot, extLeft, extRight, extTop, (cx, cy))
# Take care of the extra point, because there are only 3 sides,
# the distance max will be flawed of far point is 2 points (ie bottom and right)
if abs(extLeft[0] - extRight[0]) < 10 and abs(extLeft[1] - extRight[1]) < 10:
extRight = (cx, cy)
if abs(extLeft[0] - extTop[0]) < 10 and abs(extLeft[1] - extTop[1]) < 10:
extTop = (cx, cy)
if abs(extLeft[0] - extBot[0]) < 10 and abs(extLeft[1] - extBot[1]) < 10:
extBot = (cx, cy)
if abs(extBot[0] - extRight[0]) < 10 and abs(extBot[1] - extRight[1]) < 10:
extRight = (cx, cy)
if abs(extTop[0] - extRight[0]) < 10 and abs(extTop[1] - extRight[1]) < 10:
extRight = (cx, cy)
# draw the outline of the object, then draw each of the
# extreme points, where the left-most is red, right-most
# is green, top-most is blue, and bottom-most is teal
cv2.drawContours(image, [c], -1, (0, 255, 255), 2)
cv2.circle(image, (cx, cy), 7, (255, 0, 255), -1)
cv2.circle(image, extLeft, 6, (0, 0, 255), -1)
cv2.circle(image, extRight, 6, (0, 255, 0), -1)
cv2.circle(image, extTop, 6, (255, 0, 0), -1)
cv2.circle(image, extBot, 6, (255, 255, 0), -1)
# create list of extreme points
extreme_points = (extLeft, extRight, extTop, extBot)
for i in range(0, len(extreme_points)):
dist = sqrt((extreme_points[i][0] - extLeft[0]) ** 2 +
(extreme_points[i][1] - extLeft[1]) ** 2 +
(extreme_points[i][0] - extRight[0]) ** 2 +
(extreme_points[i][1] - extRight[1]) ** 2 +
(extreme_points[i][0] - extBot[0]) ** 2 +
(extreme_points[i][1] - extBot[1]) ** 2 +
(extreme_points[i][0] - extTop[0]) ** 2 +
(extreme_points[i][1] - extTop[1]) ** 2)
distances += [dist]
index_min = np.argmax(distances)
print(distances)
top_triangle = (extreme_points[index_min])
print(top_triangle)
center = (cx, cy)
# Create vector containing the top of the isosceles triangle
# and the center of the contour that was found
centerline_points = [center, top_triangle]
# draw a line through the triangle in the direction of the robot motion
rows, cols = image.shape[:2]
[vx, vy, x, y] = cv2.fitLine(np.float32(centerline_points), cv2.DIST_L2, 0, 0.01, 0.01)
lefty = int((-x * vy / vx) + y)
righty = int(((cols - x) * vy / vx) + y)
cv2.line(image, (cols - 1, righty), (0, lefty), (0, 255, 0), 2)
# find the angle of the robot
rad = math.atan2(vx, vy)
angle = math.degrees(rad)
'''
# fix the angle such that the tip pointing up is 0deg,
# movement to the right of that is +deg
# movement to the left is -deg
# angle measurements are from -180:180
'''
if top_triangle[0] < center[0]:
angle = -angle
if top_triangle[0] > center[0]:
angle = 180 - angle
angle = round(angle)
print(angle)
cv2.putText(image, str(angle), (int(cx) - 50, int(cy) - 50), cv2.FONT_HERSHEY_DUPLEX, 0.8, (255, 255, 255), 2,
cv2.LINE_AA)
# show the output image
cv2.imshow("Image", image)
cv2.waitKey(0)
return angle, center
'''
k = cv2.waitKey(0)
if k == 27: # wait for ESC key to exit
cv2.destroyAllWindows()
elif k == ord('s'): # wait for 's' key to save and exit
cv2.imwrite('messigray.png', img)
cv2.destroyAllWindows()
'''
|
[
"cv2.drawContours",
"cv2.inRange",
"cv2.erode",
"cv2.line",
"math.degrees",
"numpy.argmax",
"imutils.is_cv2",
"cv2.imshow",
"math.sqrt",
"cv2.circle",
"math.atan2",
"cv2.cvtColor",
"cv2.moments",
"cv2.dilate",
"cv2.waitKey",
"numpy.float32"
] |
[((336, 374), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2HSV'], {}), '(image, cv2.COLOR_BGR2HSV)\n', (348, 374), False, 'import cv2\n'), ((386, 426), 'cv2.inRange', 'cv2.inRange', (['hsv', 'robotLower', 'robotUpper'], {}), '(hsv, robotLower, robotUpper)\n', (397, 426), False, 'import cv2\n'), ((438, 473), 'cv2.erode', 'cv2.erode', (['mask', 'None'], {'iterations': '(2)'}), '(mask, None, iterations=2)\n', (447, 473), False, 'import cv2\n'), ((485, 521), 'cv2.dilate', 'cv2.dilate', (['mask', 'None'], {'iterations': '(2)'}), '(mask, None, iterations=2)\n', (495, 521), False, 'import cv2\n'), ((899, 913), 'cv2.moments', 'cv2.moments', (['c'], {}), '(c)\n', (910, 913), False, 'import cv2\n'), ((2154, 2204), 'cv2.drawContours', 'cv2.drawContours', (['image', '[c]', '(-1)', '(0, 255, 255)', '(2)'], {}), '(image, [c], -1, (0, 255, 255), 2)\n', (2170, 2204), False, 'import cv2\n'), ((2209, 2258), 'cv2.circle', 'cv2.circle', (['image', '(cx, cy)', '(7)', '(255, 0, 255)', '(-1)'], {}), '(image, (cx, cy), 7, (255, 0, 255), -1)\n', (2219, 2258), False, 'import cv2\n'), ((2263, 2309), 'cv2.circle', 'cv2.circle', (['image', 'extLeft', '(6)', '(0, 0, 255)', '(-1)'], {}), '(image, extLeft, 6, (0, 0, 255), -1)\n', (2273, 2309), False, 'import cv2\n'), ((2314, 2361), 'cv2.circle', 'cv2.circle', (['image', 'extRight', '(6)', '(0, 255, 0)', '(-1)'], {}), '(image, extRight, 6, (0, 255, 0), -1)\n', (2324, 2361), False, 'import cv2\n'), ((2366, 2411), 'cv2.circle', 'cv2.circle', (['image', 'extTop', '(6)', '(255, 0, 0)', '(-1)'], {}), '(image, extTop, 6, (255, 0, 0), -1)\n', (2376, 2411), False, 'import cv2\n'), ((2416, 2463), 'cv2.circle', 'cv2.circle', (['image', 'extBot', '(6)', '(255, 255, 0)', '(-1)'], {}), '(image, extBot, 6, (255, 255, 0), -1)\n', (2426, 2463), False, 'import cv2\n'), ((3148, 3168), 'numpy.argmax', 'np.argmax', (['distances'], {}), '(distances)\n', (3157, 3168), True, 'import numpy as np\n'), ((3732, 3795), 'cv2.line', 'cv2.line', (['image', '(cols - 1, righty)', '(0, lefty)', '(0, 255, 0)', '(2)'], {}), '(image, (cols - 1, righty), (0, lefty), (0, 255, 0), 2)\n', (3740, 3795), False, 'import cv2\n'), ((3840, 3858), 'math.atan2', 'math.atan2', (['vx', 'vy'], {}), '(vx, vy)\n', (3850, 3858), False, 'import math\n'), ((3871, 3888), 'math.degrees', 'math.degrees', (['rad'], {}), '(rad)\n', (3883, 3888), False, 'import math\n'), ((4428, 4454), 'cv2.imshow', 'cv2.imshow', (['"""Image"""', 'image'], {}), "('Image', image)\n", (4438, 4454), False, 'import cv2\n'), ((4459, 4473), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (4470, 4473), False, 'import cv2\n'), ((822, 838), 'imutils.is_cv2', 'imutils.is_cv2', ([], {}), '()\n', (836, 838), False, 'import imutils\n'), ((2617, 2985), 'math.sqrt', 'sqrt', (['((extreme_points[i][0] - extLeft[0]) ** 2 + (extreme_points[i][1] - extLeft\n [1]) ** 2 + (extreme_points[i][0] - extRight[0]) ** 2 + (extreme_points\n [i][1] - extRight[1]) ** 2 + (extreme_points[i][0] - extBot[0]) ** 2 + \n (extreme_points[i][1] - extBot[1]) ** 2 + (extreme_points[i][0] -\n extTop[0]) ** 2 + (extreme_points[i][1] - extTop[1]) ** 2)'], {}), '((extreme_points[i][0] - extLeft[0]) ** 2 + (extreme_points[i][1] -\n extLeft[1]) ** 2 + (extreme_points[i][0] - extRight[0]) ** 2 + (\n extreme_points[i][1] - extRight[1]) ** 2 + (extreme_points[i][0] -\n extBot[0]) ** 2 + (extreme_points[i][1] - extBot[1]) ** 2 + (\n extreme_points[i][0] - extTop[0]) ** 2 + (extreme_points[i][1] - extTop\n [1]) ** 2)\n', (2621, 2985), False, 'from math import sqrt\n'), ((3588, 3617), 'numpy.float32', 'np.float32', (['centerline_points'], {}), '(centerline_points)\n', (3598, 3617), True, 'import numpy as np\n')]
|
import numpy as np
def load_mnist():
# the data, shuffled and split between train and test sets
from keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x = np.concatenate((x_train, x_test))
y = np.concatenate((y_train, y_test))
x = x.reshape(-1, 28, 28, 1).astype('float32')
x = x/255.
print('MNIST:', x.shape)
return x, y
def load_usps(data_path='./data/usps'):
import os
if not os.path.exists(data_path+'/usps_train.jf'):
if not os.path.exists(data_path+'/usps_train.jf.gz'):
os.system('wget http://www-i6.informatik.rwth-aachen.de/~keysers/usps_train.jf.gz -P %s' % data_path)
os.system('wget http://www-i6.informatik.rwth-aachen.de/~keysers/usps_test.jf.gz -P %s' % data_path)
os.system('gunzip %s/usps_train.jf.gz' % data_path)
os.system('gunzip %s/usps_test.jf.gz' % data_path)
with open(data_path + '/usps_train.jf') as f:
data = f.readlines()
data = data[1:-1]
data = [list(map(float, line.split())) for line in data]
data = np.array(data)
data_train, labels_train = data[:, 1:], data[:, 0]
with open(data_path + '/usps_test.jf') as f:
data = f.readlines()
data = data[1:-1]
data = [list(map(float, line.split())) for line in data]
data = np.array(data)
data_test, labels_test = data[:, 1:], data[:, 0]
x = np.concatenate((data_train, data_test)).astype('float32')
x /= 2.0
x = x.reshape([-1, 16, 16, 1])
y = np.concatenate((labels_train, labels_test))
print('USPS samples', x.shape)
return x, y
|
[
"os.path.exists",
"keras.datasets.mnist.load_data",
"numpy.array",
"numpy.concatenate",
"os.system"
] |
[((182, 199), 'keras.datasets.mnist.load_data', 'mnist.load_data', ([], {}), '()\n', (197, 199), False, 'from keras.datasets import mnist\n'), ((209, 242), 'numpy.concatenate', 'np.concatenate', (['(x_train, x_test)'], {}), '((x_train, x_test))\n', (223, 242), True, 'import numpy as np\n'), ((251, 284), 'numpy.concatenate', 'np.concatenate', (['(y_train, y_test)'], {}), '((y_train, y_test))\n', (265, 284), True, 'import numpy as np\n'), ((1089, 1103), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (1097, 1103), True, 'import numpy as np\n'), ((1332, 1346), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (1340, 1346), True, 'import numpy as np\n'), ((1523, 1566), 'numpy.concatenate', 'np.concatenate', (['(labels_train, labels_test)'], {}), '((labels_train, labels_test))\n', (1537, 1566), True, 'import numpy as np\n'), ((463, 507), 'os.path.exists', 'os.path.exists', (["(data_path + '/usps_train.jf')"], {}), "(data_path + '/usps_train.jf')\n", (477, 507), False, 'import os\n'), ((804, 855), 'os.system', 'os.system', (["('gunzip %s/usps_train.jf.gz' % data_path)"], {}), "('gunzip %s/usps_train.jf.gz' % data_path)\n", (813, 855), False, 'import os\n'), ((864, 914), 'os.system', 'os.system', (["('gunzip %s/usps_test.jf.gz' % data_path)"], {}), "('gunzip %s/usps_test.jf.gz' % data_path)\n", (873, 914), False, 'import os\n'), ((522, 569), 'os.path.exists', 'os.path.exists', (["(data_path + '/usps_train.jf.gz')"], {}), "(data_path + '/usps_train.jf.gz')\n", (536, 569), False, 'import os\n'), ((581, 692), 'os.system', 'os.system', (["('wget http://www-i6.informatik.rwth-aachen.de/~keysers/usps_train.jf.gz -P %s'\n % data_path)"], {}), "(\n 'wget http://www-i6.informatik.rwth-aachen.de/~keysers/usps_train.jf.gz -P %s'\n % data_path)\n", (590, 692), False, 'import os\n'), ((695, 805), 'os.system', 'os.system', (["('wget http://www-i6.informatik.rwth-aachen.de/~keysers/usps_test.jf.gz -P %s'\n % data_path)"], {}), "(\n 'wget http://www-i6.informatik.rwth-aachen.de/~keysers/usps_test.jf.gz -P %s'\n % data_path)\n", (704, 805), False, 'import os\n'), ((1409, 1448), 'numpy.concatenate', 'np.concatenate', (['(data_train, data_test)'], {}), '((data_train, data_test))\n', (1423, 1448), True, 'import numpy as np\n')]
|
"""
This module defines a class called "balto_gui" that can be used to
create a graphical user interface (GUI) for downloading data from
OpenDAP servers from and into a Jupyter notebook. If used with Binder,
this GUI runs in a browser window and does not require the user to
install anything on their computer. However, this module should be
included in the same directory as the Jupyter notebook.
"""
#------------------------------------------------------------------------
#
# Copyright (C) 2020. <NAME>
#
#------------------------------------------------------------------------
from ipyleaflet import Map, basemaps, FullScreenControl
from ipyleaflet import MeasureControl, Rectangle
## from ipyleaflet import ScaleControl # (doesn't work)
from traitlets import Tuple
## import ipyleaflet as ipyl
import ipywidgets as widgets
from ipywidgets import Layout
from IPython.display import display, HTML
## from IPython.core.display import display
## from IPython.lib.display import display
import pydap.client # (for open_url, etc.)
import requests # (used by get_filenames() )
import json
import datetime # (used by get_duration() )
import copy
import numpy as np
import balto_plot as bp
#------------------------------------------------------------------------
#
# class balto_gui
# __init__()
# pix_str()
# show_gui()
# make_acc_gui()
# make_tab_gui()
# make_data_panel()
# reset_data_panel()
# make_map_panel()
# make_dates_panel()
# make_download_panel()
# make_prefs_panel()
# #--------------------------
# get_map_bounds()
# replace_map_bounds()
# replace_map_bounds2()
# update_map_bounds()
# zoom_out_to_new_bounds()
# --------------------------
# get_url_dir_filenames()
# update_filename_list()
# get_opendap_file_url()
# open_dataset()
# update_data_panel()
# --------------------------
# update_var_info()
# get_all_var_shortnames()
# get_all_var_longnames()
# get_all_var_units()
# --------------------------
# get_var_shortname()
# get_var_longname()
# get_var_units()
# get_var_shape()
# get_var_dimensions()
# get_var_dtype()
# get_var_attributes()
# get_var_time_attributes()
# -------------------------------
# update_datetime_panel()
# get_years_from_time_since()
# clear_datetime_notes()
# append_datetime_notes()
# list_to_string()
# -------------------------------
# pad_with_zeros()
# get_actual_time_units()
# get_time_delta_str()
# get_datetime_obj_from_str()
# get_datetime_obj_from_one_str()
# get_start_datetime_obj()
# get_end_datetime_obj()
# get_dt_from_datetime_str()
# split_datetime_str()
# split_date_str()
# split_time_str()
# get_datetime_from_time_since()
# get_time_since_from_datetime()
# get_month_difference()
# -------------------------------
# get_new_time_index_range()
# get_new_lat_index_range()
# get_new_lon_index_range()
# -------------------------------
# get_duration() ## not used yet
# ----------------------------
# get_download_format()
# clear_download_log()
# append_download_log()
# print_user_choices()
# download_data()
# show_grid()
# -------------------------------
# get_opendap_package() # (in prefs panel)
# ----------------------------
# get_abbreviated_var_name()
# get_possible_svo_names()
#
#------------------------------
# Example GES DISC opendap URL
#------------------------------
# https://gpm1.gesdisc.eosdis.nasa.gov/opendap/GPM_L3/GPM_3IMERGHHE.05/2014/091/
# 3B-HHR-E.MS.MRG.3IMERG.20140401-S000000-E002959.0000.V05B.HDF5.nc
# ?HQprecipitation[1999:2200][919:1049],lon[1999:2200],lat[919:1049]
#------------------------------------------------------------------------
class balto_gui:
#--------------------------------------------------------------------
def __init__(self):
self.version = '0.5'
self.user_var = None
self.default_url_dir = 'http://test.opendap.org/dap/data/nc/'
self.timeout_secs = 60 # (seconds)
#----------------------------------------------------------
# "full_box_width" = (label_width + widget_width)
# gui_width = left_label_width + mid_width + button_width
# The 2nd, label + widget box, is referred to as "next".
# (2 * half_widget_width) + left_label + next_label = 540
#----------------------------------------------------------
self.gui_width = 680
self.left_label_width = 120
self.next_label_width = 50
self.all_label_width = 170
self.full_box_width = 540
self.widget_width = (self.full_box_width - self.left_label_width)
# self.half_widget_width = (self.full_box_width - self.all_label_width)/2
# self.half_widget_width = 183
self.left_widget_width = 230
self.next_widget_width = 136
self.left_box_width = (self.left_label_width + self.left_widget_width)
self.next_box_width = (self.next_label_width + self.next_widget_width)
self.button_width = 70 # big enough for "Reset"
#-----------------------------------------------------
self.map_width = (self.gui_width - 40)
self.map_height = 230 # was 250
self.map_center_init = (20.0, 0)
self.add_fullscreen_control = True
self.add_scale_control = False # (doesn't work)
self.add_measure_control = True
#-----------------------------------------------------
self.gui_width_px = self.pix_str( self.gui_width )
self.map_width_px = self.pix_str( self.map_width )
self.map_height_px = self.pix_str( self.map_height )
#-----------------------------------------------------
self.date_width_px = '240px'
self.time_width_px = '180px'
self.hint_width_px = '120px'
#---------------------------------------------------
self.log_box_width_px = self.pix_str( self.full_box_width )
self.log_box_height_px = '200px'
#---------------------------------------------------
# These styles are used to control width of labels
# self.init_label_style is the initial default.
#---------------------------------------------------
llw_px = self.pix_str( self.left_label_width )
nlw_px = self.pix_str( self.next_label_width )
self.init_label_style = {'description_width': 'initial'}
self.left_label_style = {'description_width': llw_px}
self.next_label_style = {'description_width': nlw_px}
self.date_style = {'description_width': '70px'}
self.time_style = {'description_width': '70px'}
# __init__()
#--------------------------------------------------------------------
def pix_str(self, num):
return str(num) + 'px'
#--------------------------------------------------------------------
def show_gui(self, ACC_STYLE=False, SHOW_MAP=True):
#------------------------------------------------------
# Encountered a problem where there was some problem
# with ipyleaflets (used for the map panel) that
# prevented any part of the GUI from being displayed.
# The SHOW_MAP flag helps to test for this problem.
#------------------------------------------------------
#------------------------------------
# Create & display the complete GUI
#-----------------------------------
if (ACC_STYLE):
self.make_acc_gui()
else:
# Use the TAB style
self.make_tab_gui( SHOW_MAP=SHOW_MAP)
gui_output = widgets.Output()
display(self.gui, gui_output)
# show_gui()
#--------------------------------------------------------------------
def make_acc_gui(self):
gui_width_px = self.gui_width_px
self.make_data_panel()
self.make_map_panel()
self.make_datetime_panel()
self.make_download_panel()
self.make_prefs_panel()
#---------------------------
p0 = self.data_panel
p1 = self.map_panel
p2 = self.datetime_panel
p3 = self.download_panel
p4 = self.prefs_panel
#---------------------------
p0_title = 'Browse Data'
p1_title = 'Spatial Extent'
p2_title = 'Date Range'
p3_title = 'Download Data'
p4_title = 'Settings'
#-------------------------------------------------------
# selected_index=None causes all cells to be collapsed
#-------------------------------------------------------
acc = widgets.Accordion( children=[p0, p1, p2, p3, p4],
selected_index=None,
layout=Layout(width=gui_width_px) )
acc.set_title(0, p0_title)
acc.set_title(1, p1_title)
acc.set_title(2, p2_title)
acc.set_title(3, p3_title)
acc.set_title(4, p4_title)
# title = 'BALTO User Interface'
# L_tags = "<b><font size=5>"
# R_tags = "</font></b>"
# heading = (L_tags + title + R_tags)
pad = self.get_padding(1, HORIZONTAL=False) # 1 lines
head = widgets.HTML(value=f"<b><font size=4>BALTO User Interface</font></b>")
# head = widgets.Label('BALTO User Interface')
# self.gui = widgets.VBox([pad, head, acc]) # (top padding
self.gui = widgets.VBox([head, acc]) # (no top padding)
# make_acc_gui()
#--------------------------------------------------------------------
def make_tab_gui(self, SHOW_MAP=True):
#---------------------------------------------------------
# If there is a problem with ipyleaflet, it can prevent
# any part of the GUI from being displayed. You can
# set SHOW_MAP=False to remove the map to test for this.
#---------------------------------------------------------
gui_width_px = self.gui_width_px
self.make_data_panel()
self.make_map_panel( SHOW_MAP=SHOW_MAP )
self.make_datetime_panel()
self.make_download_panel()
self.make_prefs_panel()
#---------------------------
p0 = self.data_panel
p1 = self.map_panel
p2 = self.datetime_panel
p3 = self.download_panel
p4 = self.prefs_panel
#---------------------------
p0_title = 'Browse Data'
p1_title = 'Spatial Extent'
p2_title = 'Date Range'
p3_title = 'Download Data'
p4_title = 'Settings'
#-------------------------------------------------------
# selected_index=0 shows Browse Data panel
#-------------------------------------------------------
tab = widgets.Tab( children=[p0, p1, p2, p3, p4],
selected_index=0,
layout=Layout(width=gui_width_px) )
tab.set_title(0, p0_title)
tab.set_title(1, p1_title)
tab.set_title(2, p2_title)
tab.set_title(3, p3_title)
tab.set_title(4, p4_title)
#### tab.titles = [str(i) for i in range(len(children))]
# title = 'BALTO User Interface'
# L_tags = "<b><font size=5>"
# R_tags = "</font></b>"
# heading = (L_tags + title + R_tags)
pad = self.get_padding(1, HORIZONTAL=False) # 1 lines
head = widgets.HTML(value=f"<b><font size=5>BALTO User Interface</font></b>")
# head = widgets.Label('BALTO User Interface')
## self.gui = widgets.VBox([pad, head, acc])
self.gui = widgets.VBox([head, tab]) # (no padding above)
# make_tab_gui()
#--------------------------------------------------------------------
def get_padding(self, n, HORIZONTAL=True):
#-------------------------------
# Get some white space padding
#-------------------------------
if (HORIZONTAL):
#--------------------------------
# Use overloaded multiplication
#--------------------------------
## s = (' ' * n) # overloaded multiplication
s = "<p>" + (' ' * n) + "</p>"
pad = widgets.HTML( value=s )
else:
s = ("<br>" * n)
pad = widgets.HTML( value=s )
return pad
# get_padding()
#--------------------------------------------------------------------
def make_data_panel(self):
#-----------------------------------
# Browse data on an OpenDAP server
#-----------------------------------
left_style = self.left_label_style
next_style = self.next_label_style
full_width_px = self.pix_str( self.full_box_width )
left_width_px = self.pix_str( self.left_box_width )
next_width_px = self.pix_str( self.next_box_width )
btn_width_px = self.pix_str( self.button_width )
#---------------------------------------------------------
o1 = widgets.Text(description='OpenDAP URL Dir:',
value=self.default_url_dir,
disabled=False, style=left_style,
layout=Layout(width=full_width_px))
b1 = widgets.Button(description="Go", layout=Layout(width=btn_width_px))
o2 = widgets.Dropdown( description='Filename:',
options=[''], value='',
disabled=False, style=left_style,
layout=Layout(width=full_width_px) )
#------------------------------------------------------------------
oL = widgets.Text(description='Long name:', style=left_style,
value='', layout=Layout(width=full_width_px) )
## o3 = widgets.Select( description='Variable:',
o3 = widgets.Dropdown( description='Variable:',
options=[''], value='',
disabled=False, style=left_style,
layout=Layout(width=left_width_px) )
o4 = widgets.Text(description='Units:', style=next_style,
value='', layout=Layout(width=next_width_px) )
#------------------------------------------------------------------
o5 = widgets.Text(description='Dimensions:', style=left_style,
value='', layout=Layout(width=left_width_px) )
o6 = widgets.Text(description='Shape:', style=next_style,
value='', layout=Layout(width=next_width_px) )
#------------------------------------------------------------------
o7 = widgets.Text(description='Data type:', style=left_style,
value='', layout=Layout(width=full_width_px) )
o8 = widgets.Dropdown( description='Attributes:',
options=[''], value='',
disabled=False, style=left_style,
layout=Layout(width=full_width_px) )
o9 = widgets.Text(description='Status:', style=left_style,
value='Ready.', layout=Layout(width=full_width_px) )
b2 = widgets.Button(description="Reset", layout=Layout(width=btn_width_px))
## pd = widgets.HTML((' ' * 1)) # for padding
#-------------------------------
# Arrange widgets in the panel
#-------------------------------
url_box = widgets.HBox([o1, b1]) # directory + Go button
stat_box = widgets.HBox([o9, b2]) # status + Reset button
name_box = widgets.VBox([o3, o5])
## pad_box = widgets.VBox([pd, pd])
unit_box = widgets.VBox([o4, o6])
mid_box = widgets.HBox([name_box, unit_box])
## mid_box = widgets.HBox([name_box, pad_box, unit_box])
panel = widgets.VBox([url_box, o2, oL, mid_box, o7, o8, stat_box])
self.data_url_dir = o1 # on an OpenDAP server
self.data_filename = o2
self.data_var_long_name = oL
self.data_var_name = o3 # short_name
self.data_var_units = o4
self.data_var_dims = o5
self.data_var_shape = o6
self.data_var_type = o7
self.data_var_atts = o8
self.data_status = o9
self.data_panel = panel
#-----------------
# Event handlers
#-----------------------------------------------------
# Note: NEED to set names='value' here. If names
# keyword is omitted, only works intermittently.
#------------------------------------------------------------
# "on_click" handler function is passed b1 as argument.
# "observe" handler function is passed "change", which
# is a dictionary, as argument. See Traitlet events.
#------------------------------------------------------------
b1.on_click( self.update_filename_list )
b2.on_click( self.reset_data_panel )
o2.observe( self.update_data_panel, names=['options','value'] )
o3.observe( self.update_var_info, names=['options', 'value'] )
## o3.observe( self.update_var_info, names='value' )
## o2.observe( self.update_data_panel, names='All' )
## o3.observe( self.update_var_info, names='All' )
#-------------------------------------------------------
# It turned out this wasn't an issue, but interesting.
#-------------------------------------------------------
# Note: Method functions have type "method" instead
# of "function" and therefore can't be passed
# directly to widget handlers like "on_click".
# But we can use the "__func__" attribute.
#-------------------------------------------------------
# b1.on_click( self.update_filename_list.__func__ )
# o2.observe( self.update_data_panel.__func__ )
# o3.observe( self.update_var_info.__func__, names='value' )
# make_data_panel()
#--------------------------------------------------------------------
def reset_data_panel(self, caller_obj=None, KEEP_DIR=False):
#----------------------------------------------------
# Note: This is called by the "on_click" method of
# the "Reset" button beside the status box.
# In this case, type(caller_obj) =
# <class 'ipywidgets.widgets.widget_button.Button'>
#----------------------------------------------------
if not(KEEP_DIR):
self.data_url_dir.value = self.default_url_dir
self.data_filename.options = ['']
self.data_var_name.options = [''] # short names
self.data_var_long_name.value = ''
self.data_var_units.value = ''
self.data_var_shape.value = ''
self.data_var_dims.value = ''
self.data_var_type.value = ''
self.data_var_atts.options = ['']
self.data_status.value = 'Ready.'
#------------------------------------------
self.download_log.value = ''
# reset_data_panel()
#--------------------------------------------------------------------
def make_map_panel(self, SHOW_MAP=True):
map_width_px = self.map_width_px
map_height_px = self.map_height_px
btn_width_px = self.pix_str( self.button_width )
#--------------------------------------------------
# bm_style = {'description_width': '70px'} # for top
bbox_style = {'description_width': '100px'}
bbox_width_px = '260px'
#---------------------------------------
# Create the map width with ipyleaflet
# Center lat 20 looks better than 0.
#---------------------------------------
map_center = self.map_center_init # (lat, lon)
m = Map(center=map_center, zoom=1,
layout=Layout(width=map_width_px, height=map_height_px))
#----------------------
# Add more controls ?
#----------------------
if (self.add_fullscreen_control):
m.add_control( FullScreenControl( position='topright' ) )
#---------------------------------------------------------
# Cannot be imported. (2020-05-18)
# if (self.add_scale_control):
# m.add_control(ScaleControl( position='bottomleft' ))
#---------------------------------------------------------
if (self.add_measure_control):
measure = MeasureControl( position='bottomright',
active_color = 'orange',
primary_length_unit = 'kilometers')
m.add_control(measure)
measure.completed_color = 'red'
## measure.add_length_unit('yards', 1.09361, 4)
## measure.secondary_length_unit = 'yards'
## measure.add_area_unit('sqyards', 1.19599, 4)
## measure.secondary_area_unit = 'sqyards'
#-----------------------------------------------------
# Does "step=0.01" restrict accuracy of selection ??
#-----------------------------------------------------
w1 = widgets.BoundedFloatText(
value=-180, step=0.01, min=-360, max=360.0,
description='West edge lon:',
disabled=False, style=bbox_style,
layout=Layout(width=bbox_width_px) )
w2 = widgets.BoundedFloatText(
value=180, step=0.01, min=-360, max=360.0,
description='East edge lon:',
disabled=False, style=bbox_style,
layout=Layout(width=bbox_width_px) )
w3 = widgets.BoundedFloatText(
value=90, min=-90, max=90.0, step=0.01,
# description='North latitude:',
description='North edge lat:',
disabled=False, style=bbox_style,
layout=Layout(width=bbox_width_px) )
w4 = widgets.BoundedFloatText(
value=-90, min=-90, max=90.0, step=0.01,
# description='South latitude:',
description='South edge lat:',
disabled=False, style=bbox_style,
layout=Layout(width=bbox_width_px) )
pd = widgets.HTML((' ' * 2)) # for padding
b1 = widgets.Button(description="Update",
layout=Layout(width=btn_width_px))
b2 = widgets.Button(description="Reset",
layout=Layout(width=btn_width_px))
#---------------------
# Choose the basemap
#---------------------
options = self.get_basemap_list()
bm = widgets.Dropdown( description='Base map:',
options=options, value=options[0],
disabled=False, style=bbox_style,
layout=Layout(width='360px') )
#-----------------------------------
# Arrange the widgets in the panel
#-----------------------------------
lons = widgets.VBox([w1, w2])
lats = widgets.VBox([w3, w4])
pads = widgets.VBox([pd, pd])
btns = widgets.VBox([b1, b2])
bbox = widgets.HBox( [lons, lats, pads, btns])
#------------------------------------------------------
# Encountered a problem where there was some problem
# with ipyleaflets (used for the map panel) that
# prevented any part of the GUI from being displayed.
# The SHOW_MAP flag helps to test for this problem.
#------------------------------------------------------
if (SHOW_MAP):
panel = widgets.VBox( [m, bbox, bm] )
else:
panel = widgets.VBox( [bbox, bm] )
self.map_window = m
self.map_minlon = w1
self.map_maxlon = w2
self.map_maxlat = w3
self.map_minlat = w4
self.map_basemap = bm
self.map_panel = panel
## self.map_bounds = (-180, -90, 180, 90)
#-----------------
# Event handlers
#-----------------
bm.observe( self.change_base_map, names=['options','value'] )
m.on_interaction( self.replace_map_bounds )
m.observe( self.zoom_out_to_new_bounds, 'bounds' )
m.new_bounds = None # (used for "zoom to fit")
b1.on_click( self.update_map_bounds )
b2.on_click( self.reset_map_panel )
# make_map_panel()
#--------------------------------------------------------------------
def get_basemap_list(self):
basemap_list = [
'OpenStreetMap.Mapnik', 'OpenStreetMap.HOT', 'OpenTopoMap',
'Esri.WorldStreetMap', 'Esri.DeLorme', 'Esri.WorldTopoMap',
'Esri.WorldImagery', 'Esri.NatGeoWorldMap',
'NASAGIBS.ModisTerraTrueColorCR', 'NASAGIBS.ModisTerraBands367CR',
'NASAGIBS.ModisTerraBands721CR', 'NASAGIBS.ModisAquaTrueColorCR',
'NASAGIBS.ModisAquaBands721CR', 'NASAGIBS.ViirsTrueColorCR',
'NASAGIBS.ViirsEarthAtNight2012',
'Strava.All', 'Strava.Ride', 'Strava.Run', 'Strava.Water',
'Strava.Winter', 'Stamen.Terrain', 'Stamen.Toner',
'Stamen.Watercolor' ]
#---------------------------------
# 'HikeBike.HikeBike', 'MtbMap'
# 'OpenStreetMap.BlackAndWhite',
# 'OpenStreetMap.France',
#----------------------------------
return basemap_list
# get_basemap_list()
#--------------------------------------------------------------------
def change_base_map(self, caller_obj=None):
#--------------------------------------------------------
# Cannot directly change the basemap for some reason.
# self.map_window.basemap = basemaps.Esri.WorldStreetMap
# Need to call clear_layers(), then add_layer().
#---------------------------------------------------------
map_choice = self.map_basemap.value
self.map_window.clear_layers()
basemap_layer = eval( 'basemaps.' + map_choice )
self.map_window.add_layer( basemap_layer )
# For testing
# print('map_choice =', map_choice)
# print('Changed the basemap.')
# change_base_map()
#--------------------------------------------------------------------
def update_map_view(self, caller_obj=None):
pass
# update_map_view()
#--------------------------------------------------------------------
def reset_map_panel(self, caller_obj=None):
self.map_window.center = self.map_center_init
self.map_window.zoom = 1
self.map_minlon.value = '-225.0'
self.map_maxlon.value = '225.0'
self.map_minlat.value = '-51.6'
self.map_maxlat.value = '70.6'
# reset_map_panel()
#--------------------------------------------------------------------
def make_datetime_panel(self):
full_box_width_px = self.pix_str( self.full_box_width )
date_width_px = self.date_width_px
time_width_px = self.time_width_px
hint_width_px = self.hint_width_px
#-----------------------------------
date_style = self.date_style
time_style = self.time_style
d1 = widgets.DatePicker( description='Start Date:',
disabled=False, style=date_style,
layout=Layout(width=date_width_px) )
d2 = widgets.DatePicker( description='End Date:',
disabled=False, style=date_style,
layout=Layout(width=date_width_px) )
d3 = widgets.Text( description='Start Time:',
disabled=False, style=time_style,
layout=Layout(width=time_width_px) )
d4 = widgets.Text( description='End Time:',
disabled=False, style=time_style,
layout=Layout(width=time_width_px) )
d3.value = '00:00:00'
d4.value = '00:00:00'
#-------------------------------
# Add some padding on the left
#-------------------------------
## margin = '0px 0px 2px 10px' # top right bottom left
pp = widgets.HTML((' ' * 3)) # for padding
d5 = widgets.Label( '(hh:mm:ss, 24-hr)',
layout=Layout(width=hint_width_px) )
## layout=Layout(width=hint_width_px, margin=margin) )
## disabled=False, style=hint_style )
d6 = widgets.Label( '(hh:mm:ss, 24-hr)',
layout=Layout(width=hint_width_px) )
## layout=Layout(width=hint_width_px, margin=margin) )
## disabled=False, style=hint_style )
d7 = widgets.Dropdown( description='Attributes:',
options=[''], value='',
disabled=False, style=date_style,
layout=Layout(width=full_box_width_px) )
# d8 = widgets.Text( description='Notes:',
# disabled=False, style=self.date_style,
# layout=Layout(width=full_box_width_px) )
d8 = widgets.Textarea( description='Notes:', value='',
disabled=False, style=self.date_style,
layout=Layout(width=full_box_width_px, height='140px'))
dates = widgets.VBox([d1, d2])
times = widgets.VBox([d3, d4])
hints = widgets.VBox([d5, d6])
pad = widgets.VBox([pp, pp])
top = widgets.HBox([dates, times, pad, hints])
panel = widgets.VBox([top, d7, d8])
## panel = widgets.VBox([top, pp, d7, d8])
self.datetime_start_date = d1
self.datetime_start_time = d3
self.datetime_end_date = d2
self.datetime_end_time = d4
self.datetime_attributes = d7
self.datetime_notes = d8
self.datetime_panel = panel
# make_datetime_panel()
#--------------------------------------------------------------------
def make_download_panel(self):
init_style = self.init_label_style
f1 = widgets.Dropdown( description='Download Format:',
options=['HDF', 'netCDF', 'netCDF4', 'ASCII'],
value='netCDF',
disabled=False, style=init_style)
pad = widgets.HTML(value=f"<p> </p>") # padding
b3 = widgets.Button(description="Download")
h3 = widgets.HBox([f1, pad, b3])
#-----------------------------------
# Could use this for info messages
#-----------------------------------
# status = widgets.Text(description=' Status:', style=self.style0,
# layout=Layout(width='380px') )
width_px = self.log_box_width_px
height_px = self.log_box_height_px
log = widgets.Textarea( description='', value='',
disabled=False, style=init_style,
layout=Layout(width=width_px, height=height_px))
## panel = widgets.VBox([h3, status, log])
panel = widgets.VBox([h3, log])
self.download_format = f1
self.download_button = b3
self.download_log = log
self.download_panel = panel
#-----------------
# Event handlers
#-----------------
b3.on_click( self.download_data )
# make_download_panel()
#--------------------------------------------------------------------
def make_prefs_panel(self):
full_box_width_px = self.pix_str( self.full_box_width )
left_style = self.left_label_style
w1 = widgets.Dropdown( description='OpenDAP package:',
options=['pydap', 'netcdf4'],
value='pydap',
disabled=False, style=left_style)
ts = self.timeout_secs
t1 = widgets.BoundedIntText( description='Timeout:',
value=ts, min=10, max=1000,
step=1, disabled=False,
style=left_style)
t2 = widgets.Label( ' (seconds)',
layout=Layout(width='80px') )
w2 = widgets.HBox([t1, t2])
note = 'Under construction; preferences will go here.'
w3 = widgets.Textarea( description='Notes:', value=note,
disabled=False, style=left_style,
layout=Layout(width=full_box_width_px, height='50px'))
panel = widgets.VBox([w1, w2, w3])
self.prefs_package = w1
self.prefs_timeout = t1
self.prefs_notes = w2
self.prefs_panel = panel
# make_prefs_panel()
#--------------------------------------------------------------------
#--------------------------------------------------------------------
def get_map_bounds(self, FROM_MAP=True, style='sw_and_ne_corners'):
#-------------------------------------------------------
# Notes: ipyleaflet defines "bounds" as:
# [[minlat, maxlat], [minlon, maxlon]]
# matplotlib.imshow defines "extent" as:
# extent = [minlon, maxlon, minlat, maxlat]
#-------------------------------------------------------
# Return value is a list, not a tuple, but
# ok to use it like this:
# [minlon, minlat, maxlon, maxlat] = get_map_bounds().
#-------------------------------------------------------
if (FROM_MAP):
#------------------------------------
# Get the visible map bounds, after
# interaction such as pan or zoom
#------------------------------------
# bounds = self.map_window.bounds
# minlat = bounds[0][0]
# minlon = bounds[0][1]
# maxlat = bounds[1][0]
# maxlon = bounds[1][1]
#------------------------------------
# Is this more reliable ?
#------------------------------------
minlon = self.map_window.west
minlat = self.map_window.south
maxlon = self.map_window.east
maxlat = self.map_window.north
else:
#---------------------------------
# Get map bounds from text boxes
#---------------------------------
minlon = self.map_minlon.value
minlat = self.map_minlat.value
maxlon = self.map_maxlon.value
maxlat = self.map_maxlat.value
#------------------------------------------
# Return map bounds in different "styles"
#------------------------------------------
if (style == 'ipyleaflet'):
bounds = [[minlat, maxlat], [minlon, maxlon]]
elif (style == 'pyplot_imshow'):
bounds = [minlon, maxlon, minlat, maxlat]
elif (style == 'sw_and_ne_corner'):
bounds = [minlon, minlat, maxlon, maxlat]
else:
bounds = [minlon, minlat, maxlon, maxlat]
return bounds
# get_map_bounds()
#--------------------------------------------------------------------
def replace_map_bounds(self, event, type=None, coordinates=None):
#-------------------------------------------
# Get visible map bounds after interaction
# Called by m.on_interaction().
# Don't need to process separate events?
#-------------------------------------------
[minlon, minlat, maxlon, maxlat] = self.get_map_bounds()
#--------------------------------
# Save new values in text boxes
# Format with 8 decimal places.
#--------------------------------
self.map_minlon.value = "{:.8f}".format( minlon )
self.map_maxlon.value = "{:.8f}".format( maxlon )
self.map_maxlat.value = "{:.8f}".format( maxlat )
self.map_minlat.value = "{:.8f}".format( minlat )
# replace_map_bounds()
#--------------------------------------------------------------------
# def replace_map_bounds2(self, event, type=None, coordinates=None):
#
# # events: mouseup, mousedown, mousemove, mouseover,
# # mouseout, click, dblclick, preclick
# event = kwargs.get('type')
# # print('event = ', event)
# if (event == 'mouseup') or (event == 'mousemove') or \
# (event == 'click') or (event == 'dblclick'):
# w1.value = m.west
# w2.value = m.east
# w3.value = m.north
# w4.value = m.south
#
# # status.value = event
#
# # with output2:
# # print( event )
#
#--------------------------------------------------------------------
def update_map_bounds(self, caller_obj=None):
[bb_minlon, bb_minlat, bb_maxlon, bb_maxlat] = \
self.get_map_bounds( FROM_MAP = False )
bb_midlon = (bb_minlon + bb_maxlon) / 2
bb_midlat = (bb_minlat + bb_maxlat) / 2
bb_center = ( bb_midlat, bb_midlon )
# print('bb_minlon, bb_maxlon =', bb_minlon, bb_maxlon)
# print('bb_minlat, bb_maxlat =', bb_minlat, bb_maxlat)
#----------------------------------------------------------
zoom = self.map_window.max_zoom # (usually 18)
self.map_window.center = bb_center
self.map_window.zoom = zoom
## print('max_zoom =', self.map_window.max_zoom)
## print('map_window.bounds =', self.map_window.bounds )
#------------------------------------
# Add "new_bounds" attribute to map
#------------------------------------
new_bounds = ((bb_minlat, bb_minlon), (bb_maxlat, bb_maxlon))
self.map_window.new_bounds = Tuple()
self.map_window.new_bounds = new_bounds
# update_map_bounds()
#--------------------------------------------------------------------
def zoom_out_to_new_bounds(self, change=None):
# change owner is the widget that triggers the handler
m = change.owner
#-----------------------------------------
# If not zoomed all the way out already,
# and we have a target bounding box
#-----------------------------------------
if (m.zoom > 1 and m.new_bounds):
b = m.new_bounds
n = change.new
if (n[0][0] < b[0][0] and n[0][1] < b[0][1] and
n[1][0] > b[1][0] and n[1][1] > b[1][1]):
#---------------------------------------
# new_bounds are now within map window
# Show bounding box as a rectangle ?
# weight = line/stroke thickness
#---------------------------------------
# rectangle = Rectangle( bounds=b, fill=False, weight=4)
# ## fill_opacity=0.0, \ fill_color="#0033FF" )
# m.add_layer(rectangle)
#-----------------------
m.new_bounds = None # (remove target)
else:
# zoom out
m.zoom = m.zoom - 1
# zoom_out_to_new_bounds()
#--------------------------------------------------------------------
# def zoom_out_to_new_bounds_v0(self, caller_obj=None):
#
# [bb_minlon, bb_minlat, bb_maxlon, bb_maxlat] = \
# self.get_map_bounds( FROM_MAP = False )
# bb_midlon = (bb_minlon + bb_maxlon) / 2
# bb_midlat = (bb_minlat + bb_maxlat) / 2
# bb_center = ( bb_midlat, bb_midlon )
# print('bb_minlon, bb_maxlon =', bb_minlon, bb_maxlon)
# print('bb_minlat, bb_maxlat =', bb_minlat, bb_maxlat)
# zoom = self.map_window.max_zoom # (usually 18)
# zoom = zoom - 1
# ## print('max_zoom =', self.map_window.max_zoom)
#
# self.map_window.center = bb_center
# self.map_window.zoom = zoom
# print('map_window.bounds =', self.map_window.bounds )
# # bounds is read-only
# ## self.map_window.bounds = ((bb_midlat,bb_midlon),(bb_midlat,bb_midlon))
# while (True):
# # time.sleep(0.5) ######
# [minlon, minlat, maxlon, maxlat] = self.get_map_bounds()
# print('minlon, maxlon =', minlon, maxlon )
# print('minlat, maxlat =', minlat, maxlat )
# if (minlon < bb_minlon) and (maxlon > bb_maxlon) and \
# (minlat < bb_minlat) and (maxlat > bb_maxlat):
# break
# else:
# zoom -= 1
# if (zoom > 0):
# print('zoom =', zoom)
# self.map_window.zoom = zoom
# else:
# break
#
# [minlon, minlat, maxlon, maxlat] = self.get_map_bounds()
# print('minlon, maxlon =', minlon, maxlon )
# print('minlat, maxlat =', minlat, maxlat )
# if (minlon < bb_minlon) and (maxlon > bb_maxlon) and \
# (minlat < bb_minlat) and (maxlat > bb_maxlat):
# break
# else:
# zoom -= 1
# if (zoom > 0):
# print('zoom =', zoom)
# self.map_window.zoom = zoom
# else:
# break
#
# # zoom_out_to_new_bounds_v0
#--------------------------------------------------------------------
def get_url_dir_filenames(self):
#-----------------------------------------
# Construct a list of filenames that are
# available in the opendap url directory
#-----------------------------------------
r = requests.get( self.data_url_dir.value )
lines = r.text.splitlines()
# n_lines = len(lines)
filenames = list()
for line in lines:
if ('"sameAs": "http://' in line) and ('www' not in line):
line = line.replace('.html"', '')
parts = line.split("/")
filename = parts[-1]
filenames.append( filename )
return filenames
# get_url_dir_filenames()
#--------------------------------------------------------------------
def update_filename_list(self, caller_obj=None):
#----------------------------------------------------
# Note: This is called by the "on_click" method of
# the "Go" button beside the Dropdown of filenames.
# In this case, type(caller_obj) =
# <class 'ipywidgets.widgets.widget_button.Button'>
#----------------------------------------------------
## default_url_dir = 'http://test.opendap.org/dap/data/nc/'
self.data_status.value = 'Retrieving filenames in URL dir...'
filenames = self.get_url_dir_filenames()
if (len(filenames) == 0):
self.reset_data_panel( KEEP_DIR=True )
msg = 'Error: No data files found in URL dir.'
self.data_status.value = msg
return
#-----------------------------------
# Update filename list & selection
#-----------------------------------
self.data_filename.options = filenames
self.data_filename.value = filenames[0]
self.data_status.value = 'Ready.'
# update_filename_list()
#--------------------------------------------------------------------
def get_opendap_file_url(self):
directory = self.data_url_dir.value
if (directory[-1] != '/'):
directory += '/'
#------------------------------------
filename = self.data_filename.value
self.opendap_file_url = (directory + filename)
# get_opendap_file_url()
#--------------------------------------------------------------------
def open_dataset(self):
timeout = self.timeout_secs
opendap_url = self.opendap_file_url
dataset = pydap.client.open_url( opendap_url, timeout=timeout )
self.dataset = dataset
# open_dataset()
#--------------------------------------------------------------------
def update_data_panel(self, change=None):
#-------------------------------------------------------
# Note: When used as a callback/handler function for a
# widget's "observe" method, a dictionary called
# "change" is passed to this function. This
# callback fails without the "change=None".
# The type of "change" is:
# <class 'traitlets.utils.bunch.Bunch'>
#-------------------------------------------------------
# print('type(change) =', type(change))
if (self.data_filename.value == ''):
## self.update_filename_list() # (try this?)
return
self.get_opendap_file_url()
self.open_dataset()
self.get_all_var_shortnames()
self.get_all_var_longnames()
self.get_all_var_units()
#------------------------------------------
# Create map between long and short names
#------------------------------------------
long_names = self.var_long_names
short_names = self.var_short_names
units_names = self.var_units_names
self.short_name_map = dict(zip(long_names, short_names ))
self.units_map = dict(zip(long_names, units_names ))
#-------------------------------------------
# Update variable list and selected value.
#-------------------------------------------
self.data_var_name.options = short_names
self.data_var_name.value = short_names[0]
#------------------------------------
# Show other info for this variable
#------------------------------------
self.update_var_info()
self.clear_download_log() #####
#-------------------------------------------
# Try to show map extent in map panel
#-------------------------------------------
#### self.update_map_panel()
#-------------------------------------------
# Try to show date range in datetime panel
#-------------------------------------------
self.update_datetime_panel() # clears notes, too
# update_data_panel()
#--------------------------------------------------------------------
def update_var_info(self, change=None):
#-------------------------------------------------------
# Note: When used as a callback/handler function for a
# widget's "observe" method, a dictionary called
# "change" is passed to this function. This
# callback fails without the "change=None".
# The type of "change" is:
# <class 'traitlets.utils.bunch.Bunch'>
#-------------------------------------------------------
short_name = self.get_var_shortname()
if (short_name == ''):
return
#-----------------------------------------------
# Maybe later wrap this block in "try, except"
#----------------------------------------------
# Note: short_name is selected from Dropdown.
# var = dataset[ short_name ]
#----------------------------------------------
long_name = self.get_var_longname( short_name )
units = self.get_var_units( short_name )
shape = self.get_var_shape( short_name )
dims = self.get_var_dimensions( short_name )
dtype = self.get_var_dtype( short_name )
atts = self.get_var_attributes( short_name )
#---------------------------------------------
self.data_var_long_name.value = long_name
self.data_var_units.value = units
self.data_var_shape.value = shape
self.data_var_dims.value = dims
self.data_var_type.value = dtype
self.data_var_atts.options = atts
# update_var_info()
#--------------------------------------------------------------------
def get_all_var_shortnames(self):
self.var_short_names = list( self.dataset.keys() )
# get_all_var_shortnames()
#--------------------------------------------------------------------
def get_all_var_longnames(self):
if not(hasattr(self, 'var_short_names')):
self.get_all_var_shortnames()
long_names = list()
for name in self.var_short_names:
try:
long_name = get_var_longname( name )
long_names.append( long_name )
except:
# Use short name if there is no long_name.
long_names.append( name )
# print('No long name found for:', name)
self.var_long_names = long_names
# get_all_var_longnames()
#--------------------------------------------------------------------
def get_all_var_units(self):
if not(hasattr(self, 'var_short_names')):
self.get_all_var_shortnames()
units_names = list()
for name in self.var_short_names:
try:
units = self.get_var_units( name )
units_names.append( units )
except:
units_names.append( 'unknown' )
# print('No units name found for:', name)
self.var_units_names = units_names
# get_all_var_units()
#--------------------------------------------------------------------
def get_var_shortname(self):
short_name = self.data_var_name.value
if (short_name == ''):
pass
## print('Short name is not set.')
return short_name
# get_var_shortname()
#--------------------------------------------------------------------
def get_var_longname( self, short_name ):
var = self.dataset[ short_name ]
if hasattr(var, 'long_name'):
return var.long_name
else:
return 'Long name not found.'
## return short_name
# get_var_longname()
#--------------------------------------------------------------------
def get_var_units( self, short_name ):
var = self.dataset[ short_name ]
if hasattr(var, 'units'):
return var.units
else:
return 'unknown'
# get_var_units()
#--------------------------------------------------------------------
def get_var_shape( self, short_name ):
var = self.dataset[ short_name ]
return str(var.shape)
# get_var_shape()
#--------------------------------------------------------------------
def get_var_dimensions( self, short_name ):
var = self.dataset[ short_name ]
if hasattr(var, 'dimensions'):
return str(var.dimensions)
else:
return 'No dimensions found.'
# get_var_dimensions()
#--------------------------------------------------------------------
def get_var_dtype( self, short_name ):
# The old Numeric single-character typecodes:
# ('f','d','h', 's','b','B','c','i','l'),
# corresponding to:
# ('f4','f8','i2','i2','i1','i1','S1','i4','i4'),
# are not yet supported.
type_map = {
'i1' : '1-byte signed integer',
'i2' : '2-byte signed integer',
'i4' : '4-byte signed integer',
'i8' : '8-byte signed integer',
'f4' : '4-byte floating point',
'f8' : '8-byte floating point',
'u1' : '1-byte unsigned integer',
'u2' : '2-byte unsigned integer',
'u4' : '4-byte unsigned integer',
'u8' : '8-byte unsigned integer' }
type_list = list( type_map.keys() )
var = self.dataset[ short_name ]
type_str = str( var.dtype )
#----------------------------------------
# The ">" & "<" indicate big and little
# endian byte order (i.e. MSB or LSB)
#----------------------------------------
endian = ''
if (type_str[0] == '>'):
type_str = type_str[1:]
endian = ' (big endian)'
## endian = ' (MSB)'
if (type_str[0] == '<'):
type_str = type_str[1:]
endian = ' (little endian)'
## endian = ' (LSB)'
#---------------------------------
if (type_str in type_list):
return type_map[ type_str ] + endian
elif (type_str[:2] == '|S'):
try:
num = int( type_str[2:] )
return ('string (' + str(num) + '-character max)')
except:
return type_str
elif (type_str[0] == 'S'):
try:
num = int( type_str[1:] )
return ('string (' + str(num) + '-character max)')
except:
return type_str
else:
return type_str
# get_var_dtype()
#--------------------------------------------------------------------
def get_var_attributes( self, short_name ):
var = self.dataset[ short_name ]
if hasattr(var, 'attributes'):
#----------------------------------------
# Convert dictionary to list of strings
# to be displayed in a droplist.
#----------------------------------------
att_list = []
for key, val in var.attributes.items():
att_list.append( str(key) + ': ' + str(val) )
return att_list
#-------------------------------------------
# Return all attributes as one long string
#-------------------------------------------
### return str( var.attributes ) #### use str()
else:
return 'No attributes found.'
# get_var_attributes()
#--------------------------------------------------------------------
def get_time_attributes( self):
if (hasattr(self.dataset, 'time')):
time = self.dataset.time
elif (hasattr(self.dataset, 'TIME')):
time = self.dataset.TIME
if hasattr(time, 'attributes'):
#----------------------------------------
# Convert dictionary to list of strings
# to be displayed in a droplist.
#----------------------------------------
att_list = []
for key, val in time.attributes.items():
att_list.append( str(key) + ': ' + str(val) )
return att_list
#-------------------------------------------
# Return all attributes as one long string
#-------------------------------------------
### return str( time.attributes ) #### use str()
else:
return 'No time attributes found.'
# get_time_attributes()
#--------------------------------------------------------------------
#--------------------------------------------------------------------
def update_datetime_panel(self):
self.clear_datetime_notes() # erase notes
#-----------------------------------------
# Are there any times for this dataset ?
#-----------------------------------------
short_names = self.var_short_names # self.dataset.keys()
if ('time' in short_names):
self.time_obj = self.dataset.time
self.time_var = self.time_obj.data[:]
elif ('TIME' in short_names):
self.time_obj = self.dataset.TIME
self.time_var = self.time_obj.data[:]
else:
msg = 'Unable to find times for this dataset.'
self.append_datetime_notes( msg )
return
#-----------------------------------------
# Show all time attributes in a droplist
#-----------------------------------------
time_att_list = self.get_time_attributes()
if (time_att_list is not None):
self.datetime_attributes.options = time_att_list
#----------------------------------------------------
# Compute the min and max times; save as time_range
#----------------------------------------------------
min_time = self.time_var.min()
max_time = self.time_var.max()
self.time_range = [min_time, max_time]
msg = 'Time range for this dataset = '
msg += '(' + str(min_time) + ', ' + str(max_time) + ')'
self.append_datetime_notes( msg )
#------------------------------------------------
# Is there an attribute called "actual_range" ?
#------------------------------------------------
# if not(hasattr(self.time_obj, 'actual_range')):
# msg = 'Unable to find "actual range" for times.'
# self.datetime_notes.value = msg
# return
# else:
# self.time_range = self.time_obj.actual_range
#-----------------------------------------
# Is there an attribute called "units" ?
#-----------------------------------------
# The full string may be something like:
# hour since 0000-01-01 00:00:00
# Save both full string and just units.
#-----------------------------------------
if (hasattr(self.time_obj, 'units')):
self.time_units_str = self.time_obj.units
self.get_actual_time_units() # (set self.time_units)
else:
msg = 'Unable to find "units" for time.'
self.append_datetime_notes( msg )
return
#-------------------------------------------
# Is there an attribute called "delta_t" ?
# If so, assume it is in "datetime" form,
# such as 00-01-00 00:00:00" for 1 month.
#-------------------------------------------
HAS_DELTA_T = hasattr(self.time_obj, 'delta_t')
if (HAS_DELTA_T):
self.time_delta = self.time_obj.delta_t
else:
self.get_time_delta_str()
# For testing:
# print('In update_datetime_panel():' )
# print('self.time_delta =', self.time_delta )
# print('HAS_DELTA_T =', HAS_DELTA_T )
#---------------------------------------------------
# Are time units given as "time since" some date ?
#---------------------------------------------------
# Sample data has cases with:
# 'days since', 'hour since' (vs hours), 'seconds since'
#--------------------------------------------------------
# Already saved "time_units_str" AND "time_units" above.
# strip() removes leading and trailing whitespace
#--------------------------------------------------------
time_units_str = self.time_units_str
if ('since' not in time_units_str):
msg = 'Time units string has no "since" part.'
self.append_datetime_notes( msg )
return
#-------------------------------------
# Process the "origin" date and time
#-------------------------------------
parts = time_units_str.split('since')
odt = parts[1].strip()
self.origin_datetime_str = odt
(date_str, time_str) = self.split_datetime_str( odt )
if (date_str.startswith('0000')):
msg = 'Warning: "Since" year must be > 0, changing to 1.'
self.append_datetime_notes( msg )
date_str = date_str[:3] + '1' + date_str[4:]
self.origin_datetime_obj = self.get_datetime_obj_from_str( date_str, time_str)
#---------------------------------------------
# Now process time_since for start and end
#---------------------------------------------
time_since1 = self.time_range[0]
time_since2 = self.time_range[1]
start_datetime_obj = self.get_datetime_from_time_since(time_since1)
end_datetime_obj = self.get_datetime_from_time_since(time_since2)
start_datetime_str = str(start_datetime_obj)
end_datetime_str = str(end_datetime_obj)
(start_date, start_time) = self.split_datetime_str( start_datetime_str )
(end_date, end_time) = self.split_datetime_str( end_datetime_str )
#-------------------------------
# Save these also, as numbers.
#-------------------------------
self.start_year = start_datetime_obj.year
self.end_year = end_datetime_obj.year
# (y1,m1,d1) = self.split_date_str( start_date )
# (y2,m2,d2) = self.split_date_str( end_date )
# self.start_year = y1
# self.end_year = y2
#-----------------------------------------------------------
# Be sure to set date values as date_obj, not datetime_obj
#-----------------------------------------------------------
self.datetime_start_date.value = start_datetime_obj.date()
self.datetime_end_date.value = end_datetime_obj.date()
self.datetime_start_time.value = start_time
self.datetime_end_time.value = end_time
#----------------------------------
# This also works, but more steps
#----------------------------------
# (y1,m1,d1) = self.split_date_str( start_date )
# (y2,m2,d2) = self.split_date_str( end_date )
# self.datetime_start_date.value = datetime.date(y1, m1, d1)
# self.datetime_end_date.value = datetime.date(y2, m2, d2)
# update_datetime_panel()
#--------------------------------------------------------------------
def get_years_from_time_since(self, data_time_since):
#----------------------------------------------------
# Notes: self.time_var contains "times since" some
# origin time, in days, hours or seconds,
# unrestricted by user start/end times.
# self.time_range[0] = self.time_var.min()
# self.time_range[1] = self.time_var.max()
#----------------------------------------------------
# For plots, want to convert these time
# offsets to decimal years, keeping in mind
# that user may have restricted the time
# range further.
#----------------------------------------------------
units_per_year = {
'years':1.0, 'days':365.0, 'hours':8760.0,
'minutes':525600.0, 'seconds':31536000.0 }
min_data_time_since = self.time_range[0]
time_since_start = (data_time_since - min_data_time_since)
#----------------------------------------------------
units = self.time_units
if (units in units_per_year.keys()):
factor = units_per_year[ units ]
years_since_start = (time_since_start / factor)
else:
print('ERROR, Unsupported units:', units)
return None
#----------------------------------------------------
start_year = self.start_year
dec_years = (years_since_start + start_year)
return dec_years
# get_years_from_time_since()
#--------------------------------------------------------------------
def clear_datetime_notes(self):
self.datetime_notes.value = ''
# clear_datetime_notes()
#--------------------------------------------------------------------
def append_datetime_notes(self, msg):
self.datetime_notes.value += (msg + '\n')
# append_datetime_notes()
#--------------------------------------------------------------------
# def list_to_string( self, array ):
#
# s = ''
# for item in array:
# s = s + item + '\n'
# return s
#
# # list_to_string()
#--------------------------------------------------------------------
def pad_with_zeros(self, num, target_len):
num_string = str( int(num) ) # int removes decimal part
n = len( num_string )
m = (target_len - n)
num_string = ('0'*m) + num_string
return num_string
# pad_with_zeros()
#--------------------------------------------------------------------
def get_actual_time_units(self):
# secs_per_unit_list = [1, 60.0, 3600.0, 86400, 31536000.0, -1]
# next_unit_factor = [60.0, 60.0, 24.0, 365.0, -1, -1]
units_list = ['second', 'minute', 'hour',
'day', 'year', 'None'] # ascending, skip month
for units in units_list:
if (self.time_units_str.startswith(units)):
break
if (units != None):
units += 's' # (make units plural now; not before)
else:
print('ERROR: No match found for units.')
return
self.time_units = units
# get_actual_time_units()
#--------------------------------------------------------------------
def get_time_delta_str(self):
## print('### self.time_var.size =', self.time_var.size )
## print('###')
#-----------------------------------
# Check size of the time_var array
#-----------------------------------
if (self.time_var.size == 1):
dt = 0
self.time_delta = '0000-00-00 00:00:00'
# print('At top of get_time_delta_str():')
# print('self.time_var.size =', self.time_var.size )
# print('self.time_delta =', self.time_delta )
return
if (self.time_var.size > 1):
dt = (self.time_var[1] - self.time_var[0])
print('dt1 =', dt)
if (self.time_var.size > 3):
dt2 = (self.time_var[2] - self.time_var[1]) ###
dt3 = (self.time_var[3] - self.time_var[2]) ###
print('dt2 =', dt2) # check if evenly spaced
print('dt3 =', dt3)
#---------------------------------------------------
# Note: Actual time units were stripped from units
# string and saved as self.time_units.
# A full units attribute string may be:
# 'hour since 0000-00-00 00:00:00'
#---------------------------------------------------
units_list = ['seconds', 'minutes', 'hours',
'days', 'years', 'None'] # ascending, skip month
secs_per_unit_list = [1, 60.0, 3600.0, 86400, 31536000.0, -1]
next_unit_factor = [60.0, 60.0, 24.0, 365.0, -1, -1]
units = self.time_units
units_index = units_list.index( units )
#----------------------------------------
if (units == 'years'):
s = self.pad_with_zeros(dt,4)
else:
if (len(str(dt)) <= 2):
s = self.pad_with_zeros(dt,2)
else:
#-------------------------------
# Must convert units to get dt
# down to 1 or 2 digits.
#-------------------------------
old_dt = dt
old_units = units
k = units_index
n = len( str(int(dt)) )
while (n > 2) and (units != 'None'):
k = k + 1
dt = (dt / next_unit_factor[k-1])
units = units_list[k]
n = len( str(int(dt)) )
if (units == 'None'):
print('#####################################')
print('ERROR in get_time_delta_str():')
print(' dt has too many digits.')
print('#####################################')
return
else:
# Note that any remainder has been dropped.
s = self.pad_with_zeros(dt,2)
print('Old dt and units =', old_dt, old_units)
print('New dt and units =', dt, units)
print('Remainder not retained yet.')
#----------------------------------------------
if (units == 'years'):
td = (s + '-00-00 00:00:00')
# if (units == 'months'):
# td= ('0000-' + s + '-00 00:00:00')
if (units == 'days'):
td = ('0000-00-' + s + ' 00:00:00')
if (units == 'hours'):
td = ('0000-00-00 ' + s + ':00:00')
if (units == 'minutes'):
td = ('0000-00-00 00:' + s + ':00')
if (units == 'seconds'):
td = ('0000-00-00 00:00:' + s)
#------------------------------------------------
self.time_delta = td
# print('At bottom of get_time_delta_str():')
# print('self.time_delta =', td)
# print()
# get_time_delta_str()
#--------------------------------------------------------------------
def get_datetime_obj_from_str(self, date_str, time_str='00:00:00'):
#---------------------------------------------------
# date_str = 'YYYY-MM-DD', time_str = 'HH:MM:SS'
#---------------------------------------------------
## e.g. d1 = str(self.datetime_end_date.value)
## e.g. t1 = self.datetime_end_time.value
(y, m1, d) = self.split_date_str(date_str)
(h, m2, s) = self.split_time_str(time_str)
if( y <= 0 ):
# msg = 'Year cannot be < 1 in start date.\n'
# msg += 'Changed year from ' + str(y) + ' to 1.'
# self.datetime_notes.value = msg
print('Year cannot be < 1 in start date.')
print('Changed year from ' + str(y) + ' to 1.')
print()
y = 1
datetime_obj = datetime.datetime(y, m1, d, h, m2, s)
return datetime_obj
# get_datetime_obj_from_str()
#--------------------------------------------------------------------
def get_datetime_obj_from_one_str(self, datetime_str):
(date, time) = self.split_datetime_str( datetime_str )
(y, m1, d) = self.split_date_str( date )
(h, m2, s) = self.split_time_str( time )
datetime_obj = datetime.datetime(y, m1, d, h, m2, s)
return datetime_obj
# get_datetime_obj_from_one_str()
#--------------------------------------------------------------------
def get_start_datetime_obj(self):
#---------------------------------------
# d1.value is a datetime "date object"
# t1.value is a time string: 00:00:00
#---------------------------------------
d1 = self.datetime_start_date
t1 = self.datetime_start_time
if (d1.value is None):
return None
date_str = str(d1.value)
time_str = t1.value # (already string)
## print('In get_start_datetime_obj():')
## print('date_str =', date_str)
## print('time_str =', time_str)
datetime_obj = self.get_datetime_obj_from_str(date_str, time_str)
return datetime_obj
# get_start_datetime_obj()
#--------------------------------------------------------------------
def get_end_datetime_obj(self):
#---------------------------------------
# d1.value is a datetime "date object"
# t1.value is a time string: 00:00:00
#---------------------------------------
d1 = self.datetime_end_date
t1 = self.datetime_end_time
if (d1.value is None):
return None
date_str = str(d1.value)
time_str = t1.value # (already string)
## print('In get_end_datetime_obj():')
## print('date_str =', date_str)
## print('time_str =', time_str)
datetime_obj = self.get_datetime_obj_from_str(date_str, time_str)
return datetime_obj
# get_end_datetime_obj()
#--------------------------------------------------------------------
def split_datetime_str(self, datetime_obj, datetime_sep=' ',
ALL=False):
#-----------------------------------------------
# Note: Still works if datetime_obj is string.
#-----------------------------------------------
datetime_str = str(datetime_obj)
parts = datetime_str.split( datetime_sep )
## print('## datetime_str =', datetime_str )
## print('## parts =', str(parts) )
date_str = parts[0]
time_str = parts[1]
if not(ALL):
return (date_str, time_str)
else:
(y,m1,d) = self.split_date_str( date_str )
(h,m2,s) = self.split_time_str( time_str )
return (y,m1,d,h,m2,s)
# split_datetime_str()
#--------------------------------------------------------------------
def split_date_str(self, date_str, date_sep='-'):
date_parts = date_str.split( date_sep )
year = int(date_parts[0])
month = int(date_parts[1]) # NOTE: int('08') = 8
day = int(date_parts[2])
return (year, month, day)
# split_date_str()
#--------------------------------------------------------------------
def split_time_str(self, time_str, time_sep=':'):
time_parts = time_str.split( time_sep )
hour = int(time_parts[0])
minute = int(time_parts[1])
second = int(time_parts[2])
return (hour, minute, second)
# split_time_str()
#--------------------------------------------------------------------
def get_datetime_from_time_since(self, time_since):
# For testing
# print('## type(times_since) =', type(time_since) )
# print('## time_since =', time_since )
# print('## int(time_since) =', int(time_since) )
#---------------------------------------------------
# Note: datetime.timedelta() can take integer or
# float arguments, and the arguments can be
# very large numbers. However, it does not
# accept any numpy types, whether float or
# int (e.g. np.int16, np.float32).
# https://docs.python.org/3/library/datetime.html
#---------------------------------------------------
units = self.time_units # ('days', 'hours', etc.)
delta = None
time_since2 = float(time_since) ## No numpy types
#------------------------------------------------------
if (units == 'days'):
delta = datetime.timedelta( days=time_since2 )
if (units == 'hours'):
delta = datetime.timedelta( hours=time_since2 )
if (units == 'minutes'):
delta = datetime.timedelta( minutes=time_since2 )
if (units == 'seconds'):
delta = datetime.timedelta( seconds=time_since2 )
#------------------------------------------------------
if (delta is None):
msg = 'ERROR: Units: ' + units + ' not supported.'
self.append_datetime_notes( msg )
return
# For testing
## print('#### delta =', delta)
#---------------------------------------------
# Create new datetime object from time_since
#---------------------------------------------
origin_obj = self.origin_datetime_obj
new_dt_obj = (origin_obj + delta)
return new_dt_obj
# get_datetime_from_time_since()
#--------------------------------------------------------------------
# def get_datetime_from_time_since_OLD(self, time_since):
#
# #---------------------------------------------------
# # datetime.timedelta has limits on inputs, e.g.
# # numpy.int32 is unsupported time for seconds arg.
# # So here we adjust big numbers for timedelta.
# # The days argument can handle really big numbers.
# #---------------------------------------------------
# maxint = 32767
# units = self.time_units # ('days', 'hours', etc.)
# n_per_day = {'seconds':86400.0, 'minutes':1440.0,
# 'hours':24.0, 'days':1.0}
# if (time_since > maxint):
# time_since = time_since / n_per_day[ units ]
# units = 'days' # (new units)
#
# #-------------------------------------------------
# # Note: We now save self.time_units_str separate
# # from self.time_units.
# #-------------------------------------------------
# delta = None
# if (units == 'days'):
# delta = datetime.timedelta( days=time_since )
# if (units == 'hours'):
# delta = datetime.timedelta( hours=time_since )
# if (units == 'minutes'):
# delta = datetime.timedelta( minutes=time_since )
# if (units == 'seconds'):
# delta = datetime.timedelta( seconds=time_since )
# #-----------------------------------------------------
# if (delta is None):
# msg = 'ERROR: Units: ' + units + ' not supported.'
# self.append_datetime_notes( msg )
# return
#
# #---------------------------------------------
# # Create new datetime object from time_since
# #---------------------------------------------
# origin_obj = self.origin_datetime_obj
# new_dt_obj = (origin_obj + delta)
# return new_dt_obj
#
# # For testing
# ## print('origin_datetime_obj =', str(origin_obj) )
# ## print('time_since delta =', str(delta) )
# ## print('new_dt_obj =', str(new_dt_obj) )
# ## return new_dt_obj
#
# # get_datetime_from_time_since()
#--------------------------------------------------------------------
def get_time_since_from_datetime(self, datetime_obj, units='days'):
#-------------------------------------------------
# Compute time duration between datetime objects
#-------------------------------------------------
origin_obj = self.origin_datetime_obj
duration_obj = (datetime_obj - origin_obj)
duration_secs = duration_obj.total_seconds()
#---------------------------------------------------
# There is not a fixed number of seconds per month
# Also 52 (weeks/year) * 7 (days/week) = 364.
#---------------------------------------------------
secs_per_unit_map = {
'years':31536000.0, 'weeks':604800.0, 'days':86400.0,
'hours':3600.0, 'minutes':60.0, 'seconds':1 }
secs_per_unit = secs_per_unit_map[ units ]
duration = (duration_secs / secs_per_unit )
time_since = duration # (in units provided)
return time_since
# get_time_since_from_datetime()
#--------------------------------------------------------------------
def get_month_difference(self, start_datetime_obj, end_datetime_obj ):
#-------------------------------------------
# Example 0: 2017-09 to 2017-09
# months = (2017-2017)*12 = 0
# months = (months - 9) = (0-9) = -0
# months = (months + 9) = 0 (as index)
#-------------------------------------------
# Example 1: 2017-09 to 2018-02
# 9:10, 10:11, 11:12, 12:1, 1:2 = 5 (if same days)
# months = (2018-2017)*12 = 12
# months = (months - 9) = 3
# months = (months + 2) = 3 + 2 = 5
#-------------------------------------------
start_year = start_datetime_obj.year
end_year = end_datetime_obj.year
months = (end_year - start_year) * 12
#-------------------------------------------
start_month = start_datetime_obj.month
end_month = end_datetime_obj.month
months = months - start_month
months = months + end_month
## months = months + 1 # (no: get 1 if dates same)
## print('month difference =', months)
return months
# get_month_difference()
#--------------------------------------------------------------------
def get_new_time_index_range(self, REPORT=True):
if not(hasattr(self, 'origin_datetime_str')):
msg = 'Sorry, origin datetime is not set.'
self.append_download_log( [msg, ' '] )
if (hasattr(self, 'time_var')):
nt = len(self.time_var)
return (0, nt - 1) # (unrestricted by choices)
else:
return (None, None)
#----------------------------------------------------
# Get min possible datetime, from time_vars.min().
# Every time_var value is measured from an "origin"
# such as: '1800-01-01 00:00:00'
#----------------------------------------------------
## origin_datetime_obj = self.origin_datetime_obj
time_since_min = self.time_var.min()
min_datetime_obj = self.get_datetime_from_time_since( time_since_min )
#-----------------------------------------------
# Get current settings from the datetime panel
#-----------------------------------------------
start_datetime_obj = self.get_start_datetime_obj()
end_datetime_obj = self.get_end_datetime_obj()
#---------------------------------------------------
# Convert dt datetime string to "timedelta" object
# e.g. 00-01-00 00:00:00
#---------------------------------------------------
# Note: datetime.timedelta() does not do "months",
# since they're not a fixed number of days,
# so we use "get_month_difference()". Also
# it does not have a "years" argument.
#---------------------------------------------------
## print('In get_new_time_index_range():')
## print('self.time_delta =', self.time_delta)
USE_LOOPS = True
(y,m1,d,h,m2,s) = self.split_datetime_str(self.time_delta, ALL=True)
## print('time_delta =', self.time_delta )
## print('y, m1, d, h, m2, s =', y, m1, d, h, m2, s )
if (m1 == 0):
d = (y*365) + d # python int(), not 2-byte int.
# print('days =', d)
dt_timedelta_obj = datetime.timedelta(days=d, hours=h, minutes=m2, seconds=s)
elif (m1 > 0 and (y+d+h+m2+s == 0)):
n_months1 = self.get_month_difference( min_datetime_obj, start_datetime_obj )
n_months2 = self.get_month_difference( min_datetime_obj, end_datetime_obj )
start_index = int(n_months1 / m1)
end_index = int(n_months2 / m1)
USE_LOOPS = False
else:
# Note: I think there is a "monthdelta" package ?
# Or we may be able to use dateutils.
print('ERROR: Cannot handle this dt case yet.')
return None
#-------------------------------------------------
# Compute start and end index into time array.
# General method, if delta_t is datetime string.
#-------------------------------------------------
if (USE_LOOPS):
start_index = 0
# print('min_datetime_str =', str(min_datetime_obj) )
# print('dt_timedelta_str =', str(dt_timedelta_obj) )
next = copy.copy( min_datetime_obj )
while (True):
next = (next + dt_timedelta_obj)
## print('next =', str(next))
if (next < start_datetime_obj):
start_index += 1
else: break
#-------------------------------------------------
end_index = 0
next = copy.copy( min_datetime_obj )
while (True):
next = (next + dt_timedelta_obj)
if (next < end_datetime_obj):
end_index += 1
else: break
#---------------------------------
# Make sure indices are in range
#---------------------------------
nt = len( self.time_var )
start_index = max(0, start_index)
end_index = min(end_index, nt-1)
#---------------------------------------
# User time period may be smaller than
# time spacing (dt).
#----------------------------------------------------
# We are using these indices like this:
# a[ t_i1:t_i2, lat_i1:lat_i2, lon_i1:lon_i2]
# So if indices are equal, result will be empty.
# If indices differ by 1, get 1 value for that dim.
#----------------------------------------------------
if (start_index == end_index):
end_index = start_index + 1
if (REPORT):
# print('n_times =', nt)
# print('New time indices =', start_index, ',', end_index)
# print()
#--------------------------
i1s = str(start_index)
i2s = str(end_index)
msg1 = 'n_times = ' + str(nt)
msg2 = 'New time indices = ' + i1s + ',' + i2s
self.append_download_log( [msg1, msg2, ' '] )
return (start_index, end_index)
# Not needed for current problem.
# days_since1 = self.get_days_since_from_datetime(start_datetime_obj)
# days_since2 = self.get_days_since_from_datetime(end_datetime_obj)
# For testing
# print('type(start_index) =', type(start_index) )
# print('type(end_index) =', type(end_index) )
# print('start_index =', start_index)
# print('end_index =', end_index)
# print('n_times =', nt)
# return (start_index, end_index)
# get_new_time_index_range()
#--------------------------------------------------------------------
def get_new_lat_index_range(self, REPORT=True):
short_name = self.get_var_shortname()
#-------------------------------------------------
# Note: dimensions can be things like 'ni', 'nj'
# so its better to use the list of all
# variable short names, stored earlier.
# They are valid keys to self.dataset.
#-------------------------------------------------
## dim_list = self.dataset[ short_name ].dimensions
## dim_list = self.dataset[ short_name ].attributes.keys()
dim_list = self.var_short_names
lat_name_list = ['lat', 'LAT', 'coadsy', 'COADSY',
'latitude', 'LATITUDE', 'None']
for lat_name in lat_name_list:
if (lat_name in dim_list):
break
if (lat_name == 'None'):
msg1 = 'Sorry, could not find a "latitude" variable.'
msg2 = 'Checked: lat, LAT, coadsy, COADSY,'
msg3 = ' latitude and LATITUDE.'
self.append_download_log( [msg1, msg2, msg3] )
return (None, None)
#--------------------------------------------
# Are lats for grid cell edges or centers ?
#--------------------------------------------
att_dict = self.dataset[ lat_name ].attributes
CENTERS = False
if ('coordinate_defines' in att_dict.keys() ):
if (att_dict['coordinate_defines'] == 'center'):
CENTERS = True
#------------------------------------
# Get user-select minlat and maxlat
#------------------------------------
user_minlat = self.map_minlat.value
user_maxlat = self.map_maxlat.value
#----------------------------------
# Get the array of lats, and info
#-----------------------------------------
# <class 'pydap.model.BaseType'>' object
# has no attribute 'array'
#--------------------------------------------------
# Next line type: <class 'pydap.model.BaseType'>
# and has no attribute "array".
#--------------------------------------------------
# lats = self.dataset[ lat_name ]
# lats = self.dataset[ lat_name ].array
#----------------------------------------------------------
# Next line type: <class 'pydap.handlers.dap.BaseProxy'>
# and has no attribute "size".
#----------------------------------------------------------
# lats = self.dataset[ lat_name ].data
#----------------------------------------------------------
# Next line type: <class 'pydap.model.BaseType'>
# and data is downloaded from server.
#----------------------------------------------------------
# lats = self.dataset[ lat_name ][:]
#----------------------------------------------------------
# Next line type: <class 'numpy.ndarray'>
#----------------------------------------------------------
lats = self.dataset[ lat_name ][:].data
if (lats.ndim > 1):
msg1 = 'Sorry, cannot yet restrict latitude indices'
msg2 = ' when lat array has more than 1 dimension.'
self.append_download_log( [msg1, msg2] )
return (None, None)
# print('## type(lats) =', type(lats) )
# print('## lats.shape =', lats.shape )
# print('## lats =', lats )
#------------------------------------------------
# It seems that values may be reverse sorted to
# indicate that the origin is upper left corner
# Don't sort them, need indices into original.
#------------------------------------------------
if (lats[0] > lats[-1]):
origin = 'upper'
else:
origin = 'lower'
#------------------------------------------
# Compute the latitude spacing, dlat
#------------------------------------------
# This only works if lats are a 1D list.
# If a "list of lists", len() will be for
# the outer list and min() won't work.
# Also, no "size" attribute, etc.
#------------------------------------------
nlats = lats.size
minlat = lats.min()
maxlat = lats.max()
dlat = np.abs(lats[1] - lats[0])
#--------------
# Another way
#--------------
# latdif = (maxlat - minlat)
# if (CENTERS):
# dlat = (latdif / (nlats - 1))
# else:
# dlat = (latdif / nlats)
#--------------------------------------
# Compute the new, restricted indices
# New method: (2020-12-12)
#--------------------------------------
all_indices = np.arange( nlats )
w = np.logical_and(lats > user_minlat, lats < user_maxlat) # boolean array
indices = all_indices[w]
if (indices.size > 0):
lat_i1 = indices[0]
lat_i2 = indices[-1]
else:
lat_i1 = 0
lat_i2 = nlats-1
#--------------------------------------
# Compute the new, restricted indices
#--------------------------------------
# Here, int() behaves like "floor()".
# So maybe add 1 to lat_i2 ???
#--------------------------------------
# lat_i1 = int( (user_minlat - minlat) / dlat )
# lat_i2 = int( (user_maxlat - minlat) / dlat )
# lat_i2 = (lat_i2 + 1) ########
#---------------------------------
# Make sure indices are in range
#----------------------------------------
# lat_i1 = min( max(lat_i1, 0), nlats-1 )
# lat_i2 = min( max(lat_i2, 0), nlats-1 )
#------------------------------------------
# User region may be smaller than v_dlat,
# as is the case with Puerto Rico, where
# data grid cells are 1 deg x 1 deg or so.
#------------------------------------------
# if (lat_i1 == lat_i2): # (still possible?)
# lat_i2 = lat_i1 + 1
if (REPORT):
print('lat_name =', lat_name)
print('minlat =', minlat, '(var)' )
print('maxlat =', maxlat, '(var)' )
print('dlat =', dlat)
print('u_minlat =', user_minlat, '(user)' )
print('u_maxlat =', user_maxlat, '(user)' )
print('lat_i1 =', lat_i1, '(new index)')
print('lat_i2 =', lat_i2, '(new index)')
# print('nlats =', nlats)
# print('New latitude indices =', lat_i1, ',', lat_i2)
# print()
#-------------------------------
i1s = str(lat_i1)
i2s = str(lat_i2)
msg1 = 'lat_name = ' + lat_name
msg2 = 'dlat = ' + str(dlat)
msg3 = 'nlats = ' + str(nlats)
msg4 = 'min, max = ' + str(minlat) + ', ' + str(maxlat) + ' (data)'
msg5 = 'min, max = ' + str(user_minlat) + ', ' + str(user_maxlat) + ' (user)'
msg6 = 'New latitude indices = ' + i1s + ', ' + i2s
self.append_download_log([msg1, msg2, msg3, msg4, msg5, msg6, ' '])
return (lat_i1, lat_i2)
# get_new_lat_index_range()
#--------------------------------------------------------------------
def get_new_lon_index_range(self, REPORT=True):
short_name = self.get_var_shortname()
#-------------------------------------------------
# Note: dimensions can be things like 'ni', 'nj'
# so its better to use the list of all
# variable short names, stored earlier.
# They are valid keys to self.dataset.
#-------------------------------------------------
## dim_list = self.dataset[ short_name ].dimensions
## dim_list = self.dataset[ short_name ].attributes.keys()
dim_list = self.var_short_names
lon_name_list = ['lon', 'LON', 'coadsx', 'COADSX',
'longitude', 'LONGITUDE', 'None']
for lon_name in lon_name_list:
if (lon_name in dim_list):
break
if (lon_name == 'None'):
msg1 = 'Sorry, could not find a "longitude" variable.'
msg2 = 'Checked: lon, LON, coadsx, COADSX,'
msg3 = ' longitude and LONGITUDE.'
self.append_download_log( [msg1, msg2, msg3] )
return (None, None)
#--------------------------------------------
# Are lons for grid cell edges or centers ?
#--------------------------------------------
att_dict = self.dataset[ lon_name ].attributes
CENTERS = False
if ('coordinate_defines' in att_dict.keys() ):
if (att_dict['coordinate_defines'] == 'center'):
CENTERS = True
#------------------------------------
# Get user-select minlat and maxlat
#------------------------------------
user_minlon = self.map_minlon.value
user_maxlon = self.map_maxlon.value
#----------------------------------
# Get the array of lons, and info
#----------------------------------
lons = self.dataset[ lon_name ][:].data
if (lons.ndim > 1):
msg1 = 'Sorry, cannot yet restrict longitude indices'
msg2 = ' when lon array has more than 1 dimension.'
self.append_download_log( [msg1, msg2] )
return (None, None)
# print('## type(lons) =', type(lons) )
# print('## lons.shape =', lons.shape )
# print('## lons.ndim =', lons.ndim )
#------------------------------------------
# Compute the longitude spacing, dlon
#------------------------------------------
# This only works if lons are a 1D list.
# If a "list of lists", len() will be for
# the outer list and min() won't work.
# Also, no "size" attribute, etc.
#------------------------------------------
nlons = lons.size
minlon = lons.min()
maxlon = lons.max()
dlon = np.abs(lons[1] - lons[0])
#--------------
# Another way
#--------------
# londif = (maxlon - minlon)
# if (CENTERS):
# dlon = (londif / (nlons - 1))
# else:
# dlon = (londif / nlons)
#-----------------------------------------
# Convert lons to have range [-180,180]?
#-----------------------------------------
# lons = ((lons + 180.0) % 360) - 180
# lons.sort() #####################
# user_maxlon = ((user_maxlon + 180.0) % 360) - 180
# user_minlon = ((user_minlon + 180.0) % 360) - 180
# if (user_minlon > user_maxlon):
# user_minlon -= 180.0
#-------------------------------------------
# Convert user lons to have range [0,360]?
#-------------------------------------------
if (minlon >= 0) and (maxlon <= 360):
user_minlon = (user_minlon + 360.0) % 360
user_maxlon = (user_maxlon + 360.0) % 360
#--------------------------------------
# Compute the new, restricted indices
# New method: (2020-12-12)
#--------------------------------------
all_indices = np.arange( nlons )
w = np.logical_and(lons > user_minlon, lons < user_maxlon) # boolean array
indices = all_indices[w]
if (indices.size > 0):
lon_i1 = indices[0]
lon_i2 = indices[-1]
else:
lon_i1 = 0
lon_i2 = nlons-1
#--------------------------------------
# Compute the new, restricted indices
#--------------------------------------
# Here, int() behaves like "floor()".
# So maybe add 1 to lon_i2 ???
#--------------------------------------
# lon_i1 = int( (user_minlon - minlon) / dlon )
# lon_i2 = int( (user_maxlon - minlon) / dlon )
# lon_i2 = lon_i2 + 1 #######
#---------------------------------
# Make sure indices are in range
#----------------------------------------
# lon_i1 = min( max(lon_i1, 0), nlons-1 )
# lon_i2 = min( max(lon_i2, 0), nlons-1 )
#------------------------------------------
# User region may be smaller than v_dlat,
# as is the case with Puerto Rico, where
# data grid cells are 1 deg x 1 deg or so.
#------------------------------------------
# if (lon_i1 == lon_i2): # (still needed?)
# lon_i2 = lon_i1 + 1
if (REPORT):
print()
print('lon_name =', lon_name)
print('minlon =', minlon, '(var)')
print('maxlon =', maxlon, '(var)')
print('dlon =', dlon)
print('u_minlon =', user_minlon, '(user)')
print('u_maxlon =', user_maxlon, '(user)')
print('lon_i1 =', lon_i1, '(new index)')
print('lon_i2 =', lon_i2, '(new index)')
# print('nlons =', nlons)
# print('New longitude indices =', lon_i1, ',', lon_i2 )
# print()
#--------------------------------------------------
i1s = str(lon_i1)
i2s = str(lon_i2)
msg1 = 'lon_name = ' + lon_name
msg2 = 'dlon = ' + str(dlon)
msg3 = 'nlons = ' + str(nlons)
msg4 = 'min, max = ' + str(minlon) + ', ' + str(maxlon) + ' (data)'
msg5 = 'min, max = ' + str(user_minlon) + ', ' + str(user_maxlon) + ' (user)'
msg6 = 'New longitude indices = ' + i1s + ', ' + i2s
self.append_download_log([msg1, msg2, msg3, msg4, msg5, msg6, ' '])
return (lon_i1, lon_i2)
# get_new_lon_index_range()
#--------------------------------------------------------------------
def get_duration(self, start_date=None, start_time=None,
end_date=None, end_time=None,
dur_units=None, REPORT=False):
#------------------------------------------------
# Note: Compute time span between 2 datetimes.
#------------------------------------------------
## date_sep = '/'
date_sep = '-'
time_sep = ':'
#-------------------------------------
# Get parts of the start date & time
#-------------------------------------
(y1, m1, d1) = self.split_date_str( start_date )
(h1, mm1, s1) = self.split_time_str( start_time )
#-----------------------------------
# Get parts of the end date & time
#-----------------------------------
(y2, m2, d2) = self.split_date_str( end_date )
(h2, mm2, s2) = self.split_time_str( end_time )
#------------------------------
# Convert to datetime objects
#------------------------------
start_obj = datetime.datetime(y1, m1, d1, h1, mm1, s1)
end_obj = datetime.datetime(y2, m2, d2, h2, mm2, s2)
#---------------------------------------------
# Comput time duration between start and end
#---------------------------------------------
duration_obj = (end_obj - start_obj)
duration_secs = duration_obj.total_seconds()
#-----------------------------------------
# Convert duration to dur_units provided
#-----------------------------------------
if (dur_units == 'seconds'):
duration = duration_secs
elif (dur_units == 'minutes'):
duration = (duration_secs / 60.0)
elif (dur_units == 'hours'):
duration = (duration_secs / 3600.0)
elif (dur_units == 'days'):
duration = (duration_secs / 86400.0)
elif (dur_units == 'years'):
duration = (duration_secs / 31536000.0)
else:
print('Unknown duration units = ' + dur_units + '.')
print('Returning duration in hours.')
duration = (duration_secs / 3600.0)
if (REPORT):
print( 'duration =', duration, '[' + dur_units + ']' )
return duration
#-----------------------------------------
# Alternate approach, where dur_units is
# determined and then returned
#-----------------------------------------
# if (duration_secs < 60):
# duration = duration_secs
# dur_units = 'seconds'
# elif (duration_secs < 3600):
# duration = divmod( duration_secs, 60 )[0]
# dur_units = 'minutes'
# elif (duration_secs < 86400):
# duration = divmod( duration_secs, 3600 )[0]
# dur_units = 'hours'
# elif (duration_secs < 31536000):
# duration = divmod( duration_secs, 86400 )[0]
# dur_units = 'days'
# else:
# duration = divmod( duration_secs, 86400 )[0]
# dur_units = 'days'
#
# return (duration, dur_units)
# get_duration()
#--------------------------------------------------------------------
def get_download_format(self):
return self.download_format.value
# get_download_format()
#--------------------------------------------------------------------
def clear_download_log(self):
self.download_log.value = ''
# clear_download_log()
#--------------------------------------------------------------------
def append_download_log(self, msg):
## type_str = str( type(msg) )
## if (type_str == "<class 'list'>"):
if (isinstance( msg, list)):
for string in msg:
self.download_log.value += (string + '\n')
else:
self.download_log.value += (msg + '\n')
# append_download_log()
#--------------------------------------------------------------------
def print_user_choices(self):
if not(hasattr(self, 'dataset')):
msg = 'ERROR: No dataset has been selected.'
self.append_download_log( msg )
return ############
start_datetime_obj = self.get_start_datetime_obj()
if (start_datetime_obj is not None):
start_date = str( start_datetime_obj.date() )
start_time = str( start_datetime_obj.time() )
else:
start_date = 'unknown'
start_time = 'unknown'
end_datetime_obj = self.get_end_datetime_obj()
if (end_datetime_obj is not None):
end_date = str( end_datetime_obj.date() )
end_time = str( end_datetime_obj.time() )
else:
end_date = 'unknown'
end_time = 'unknown'
#------------------------------------------
# Show message in downloads panel log box
#------------------------------------------
msg1 = 'var short name = ' + self.get_var_shortname()
msg2 = 'download format = ' + self.get_download_format()
msg3 = 'map bounds = ' + str(self.get_map_bounds( FROM_MAP=False ))
msg4 = 'start date and time = ' + start_date + ' ' + start_time
msg5 = 'end date and time = ' + end_date + ' ' + end_time
## msg6 = 'opendap package = ' + self.get_opendap_package()
msgs = [msg1, msg2, msg3, msg4, msg5]
self.append_download_log( msgs )
# print_user_choices()
#--------------------------------------------------------------------
def download_data(self, caller_obj=None):
#-------------------------------------------------
# Note: After a reset, self still has a dataset,
# but short_name was reset to ''.
#-------------------------------------------------
short_name = self.get_var_shortname()
if (short_name == ''):
msg = 'Sorry, no variable has been selected.'
self.download_log.value = msg
return
#----------------------------------------------------
# Note: This is called by the "on_click" method of
# the "Go" button beside the Dropdown of filenames.
# In this case, type(caller_obj) =
# <class 'ipywidgets.widgets.widget_button.Button'>
#----------------------------------------------------
## status = self.download_status
self.print_user_choices()
#--------------------------------------------------
# print_user_choices() already displayed error msg
#--------------------------------------------------
if not(hasattr(self, 'dataset')):
return
#----------------------------------------
# Get names of the variables dimensions
#----------------------------------------
dim_list = self.dataset[ short_name ].dimensions
#--------------------------------------
# Uncomment to test other time_deltas
#------------------------------------------
# If test time_delta is too small, we'll
# get a start_index that is out of range.
# Next 3 worked in some SST tests.
#------------------------------------------
# self.time_delta = '0000-02-00 00:00:00'
# self.time_delta = '0000-00-30 12:00:00'
# self.time_delta = '0001-00-00 00:00:00'
#----------------------------------------------
# Is there a time variable ? If so, use time
# range selected in GUI to clip the data.
#----------------------------------------------
(t_i1, t_i2) = self.get_new_time_index_range( REPORT=True)
#--------------------------------------------
# Is there a lat variable ? If so, use lat
# range selected in GUI to clip the data.
# Default is the full range.
#--------------------------------------------
(lat_i1, lat_i2) = self.get_new_lat_index_range( REPORT=True)
#--------------------------------------------
# Is there a lon variable ? If so, use lon
# range selected in GUI to clip the data.
# Default is the full range.
#--------------------------------------------
(lon_i1, lon_i2) = self.get_new_lon_index_range( REPORT=True)
#--------------------------------------
# Did user set a spatial resolution ?
#--------------------------------------
# Asynchronous download. How do we know its here?
# print('Downloading variable:', short_name, '...' )
# print('Variable saved in: balto.user_var')
# print()
msg1 = 'Downloading variable: ' + short_name + '...'
msg2 = 'Variable saved in: balto.user_var'
msg3 = ' '
self.append_download_log( [msg1, msg2, msg3] )
#---------------------------------------------
# Convert reference to actual numpy variable
# which causes it to be downloaded, and then
# store it into balto.user_var.
#---------------------------------------------------
# This grid includes var and its dimension vectors.
# Note: type(pydap_grid) = pydap.model.GridType
#---------------------------------------------------
pydap_grid = self.dataset[ short_name ]
ndims = len( pydap_grid.dimensions ) # (e.g. time, lat, lon)
## data_obj = self.dataset[ short_name ]
## data_dims = data_obj.dimensions
## ndim = len( data_dims )
#------------------------------------------------
# Actually download the data here to a variable
# in the notebook, but restrict indices first,
# to only download the required data.
#------------------------------------------------
if (ndims == 3):
#-------------------------------------
# Assume dims are: (time, lat, lon)
#------------------------------------------
# After subscripting, grid still has type:
# pydap.model.GridType
#------------------------------------------
if (lat_i1 is None) or (lon_i1 is None):
if (t_i1 is None):
grid = pydap_grid[:]
else:
grid = pydap_grid[t_i1:t_i2, :, :]
else:
if (t_i1 is None):
grid = pydap_grid[:, lat_i1:lat_i2, lon_i1:lon_i2]
else:
grid = pydap_grid[t_i1:t_i2, lat_i1:lat_i2, lon_i1:lon_i2]
#----------------------------------------
elif (ndims == 1): # time series
if (t_i1 is None):
grid = pydap_grid[:]
else:
grid = pydap_grid[t_i1:t_i2]
#-----------------------------------
elif (ndims == 2): # spatial grid
#-------------------------------
# Assume dims are: (lat, lon)
#-------------------------------
if (lat_i1 is None) or (lon_i1 is None):
grid = pydap_grid[:]
else:
grid = pydap_grid[lat_i1:lat_i2, lon_i1:lon_i2]
#------------------------------------
else:
grid = pydap_grid[:]
#--------------------------------------------------
# Note: type(pydap_grid) = pydap.model.gridtype
# type(grid) = pydap.model.gridtype
# type(grid[:].data) = list
# type(grid.data) = list
#--------------------------------------------------
# Subscript by *ranges* doesn't change data type.
#--------------------------------------------------
grid_list = grid.data ########
n_list = len(grid_list)
var = grid_list[0]
# For testing
# print('## type(grid) =', type(grid) )
# print('## type(grid.data) =', type(grid_list) )
# print('## len(grid.data) =', n_list )
# print('## type(var) =', type(var) )
# print()
times = None # (defaults)
lats = None
lons = None
if (n_list > 1):
times = grid_list[1]
if (n_list > 2):
lats = grid_list[2]
if (n_list > 3):
lons = grid_list[3]
#----------------------------------------------
# Are lats in reverse order ? (2020-12-12)
# MUST DO THIS BEFORE SUBSETTING WITH INDICES
#----------------------------------------------
# origin = None
# if (lats is not None):
# if (lats[0] > lats[-1]):
# origin = 'upper' # (row major?)
# lats.sort() #############################
# else:
# origin = 'lower'
#----------------------------------------------
# Adjust the longitudes ?
# MUST DO THIS BEFORE SUBSETTING WITH INDICES
#----------------------------------------------
# if (n_list > 3):
# SIGNED_LONS = True
# if (SIGNED_LONS):
# #----------------------------------------
# # Convert lons to have range [-180,180]
# #----------------------------------------
# lons = ((lons + 180.0) % 360) - 180
# lons.sort() #################
#-----------------------------
# Is there a missing value ?
# Is there a fill value ?
#-----------------------------
atts = pydap_grid.attributes
REPLACE_MISSING = False
if ('missing_value' in atts.keys()):
REPLACE_MISSING = True
missing_value = pydap_grid.attributes['missing_value']
w = (var == missing_value)
#---------------------------------------
# Is there a scale factor and offset ?
#---------------------------------------
if ('scale_factor' in atts.keys()):
#---------------------------------------------------
# Note: var may have type ">i2" while scale_factor
# may have type "float64", so need to upcast
# var and can't use "*="
#---------------------------------------------------
factor = pydap_grid.attributes['scale_factor']
## print('type(var) =', type(var))
## print('type(factor) =', type(factor))
var = var * factor
if ('add_offset' in atts.keys()):
offset = pydap_grid.attributes['add_offset']
## print('type(var) =', type(var))
## print('type(offset) =', type(offset))
var = var + offset
#-----------------------------------------
# Restore missing values after scaling ?
#-----------------------------------------
if (REPLACE_MISSING):
var[w] = missing_value
#-----------------------------------------
# Save var into balto object as user_var
#-----------------------------------------
self.user_var = var
self.user_var_times = times # (maybe None)
self.user_var_lats = lats # (maybe None)
self.user_var_lons = lons # (maybe None)
#----------------------------------------------------
# Could define self.user_var as a list, and append
# new variables to the list as downloaded.
# Could also put them into a dictionary.
#----------------------------------------------------
# download_data()
#--------------------------------------------------------------------
def show_grid(self, grid, var_name=None, extent=None,
cmap='rainbow', xsize=8, ysize=8 ):
#---------------------------------------------------
# Note: extent = [minlon, maxlon, minlat, maxlat]
# But get_map_bounds() returns:
# (minlon, minlat, maxlon, maxlat)
#---------------------------------------------------
if (grid.ndim != 2):
print('Sorry, show_grid() only works for 2D arrays.')
return
if (var_name is None):
var_name = self.data_var_long_name.value
## var_name = self.data_var_name.value
if (extent is None):
extent = self.get_map_bounds(style='plt.imshow')
## (minlon, minlat, maxlon, maxlat) = self.get_map_bounds()
## extent = [minlon, maxlon, minlat, maxlat]
bp.show_grid_as_image( grid, var_name, extent=extent,
cmap='rainbow', stretch='hist_equal',
xsize=xsize, ysize=ysize,
nodata_value=None )
## NO_SHOW=False, im_file=None,
# show_grid()
#--------------------------------------------------------------------
def get_opendap_package(self):
return self.prefs_package.value
#--------------------------------------------------------------------
def get_abbreviated_var_name(self, abbreviation ):
map = {
'lat' : ['geodetic_latitude', 'quantity'],
'lon' : ['geodetic_longitude', 'quantity'],
'sst' : ['sea_surface__temperature', 'variable'],
'temp': ['temperature', 'quantity'],
'x' : ['x-coordinate', 'quantity'],
'y' : ['y-coordinate', 'quantity'],
'z' : ['z-coordinate', 'quantity'] }
try:
return map[ abbreviation ]
except:
print('Sorry, no matches found for abbreviation.')
# get_abbreviated_var_name()
#--------------------------------------------------------------------
def get_possible_svo_names(self, var_name, SHOW_IRI=False):
#-----------------------------------------------------
# Use the SVO "match phrase" service to get a
# ranked list of possible SVO variable name matches.
#-----------------------------------------------------
# var_name should be a list of words, as a single
# string, separated by underscores.
#-----------------------------------------------------
var_name2 = var_name.replace(' ', '_')
match_phrase_svc = 'http://34.73.227.230:8000/match_phrase/'
match_phrase_url = match_phrase_svc + var_name2 + '/'
print('Working...')
#-----------------------------------------------------------------
# The result is in JSON format, for example:
# result = { "results": [
# {"IRI":"result1_IRI", "label":"result1_label", "matchrank": "result1_rank"},
# {"IRI":"result2_IRI", "label":"result2_label", "matchrank": "result2_rank"} ] }
#------------------------------------------------------------------
result = requests.get( match_phrase_url )
print('Finished.')
print()
json_str = result.text
# print( json_str )
json_data = json.loads( json_str )
match_list = json_data['results']
for item in match_list:
## print('item =', item)
if (SHOW_IRI):
print('IRI =', item['IRI'])
print('label =', item['label'])
print('rank =', item['matchrank'])
print()
# get_possible_svo_names()
#-------------------------------------------------------------------
|
[
"IPython.display.display",
"ipywidgets.VBox",
"ipywidgets.Dropdown",
"ipyleaflet.FullScreenControl",
"ipywidgets.BoundedIntText",
"datetime.timedelta",
"copy.copy",
"numpy.arange",
"ipywidgets.HBox",
"datetime.datetime",
"ipywidgets.Button",
"balto_plot.show_grid_as_image",
"ipywidgets.Output",
"ipyleaflet.MeasureControl",
"traitlets.Tuple",
"ipywidgets.HTML",
"numpy.abs",
"json.loads",
"requests.get",
"ipywidgets.Layout",
"numpy.logical_and"
] |
[((7936, 7952), 'ipywidgets.Output', 'widgets.Output', ([], {}), '()\n', (7950, 7952), True, 'import ipywidgets as widgets\n'), ((7961, 7990), 'IPython.display.display', 'display', (['self.gui', 'gui_output'], {}), '(self.gui, gui_output)\n', (7968, 7990), False, 'from IPython.display import display, HTML\n'), ((9529, 9599), 'ipywidgets.HTML', 'widgets.HTML', ([], {'value': 'f"""<b><font size=4>BALTO User Interface</font></b>"""'}), "(value=f'<b><font size=4>BALTO User Interface</font></b>')\n", (9541, 9599), True, 'import ipywidgets as widgets\n'), ((9742, 9767), 'ipywidgets.VBox', 'widgets.VBox', (['[head, acc]'], {}), '([head, acc])\n', (9754, 9767), True, 'import ipywidgets as widgets\n'), ((11753, 11823), 'ipywidgets.HTML', 'widgets.HTML', ([], {'value': 'f"""<b><font size=5>BALTO User Interface</font></b>"""'}), "(value=f'<b><font size=5>BALTO User Interface</font></b>')\n", (11765, 11823), True, 'import ipywidgets as widgets\n'), ((11951, 11976), 'ipywidgets.VBox', 'widgets.VBox', (['[head, tab]'], {}), '([head, tab])\n', (11963, 11976), True, 'import ipywidgets as widgets\n'), ((15890, 15912), 'ipywidgets.HBox', 'widgets.HBox', (['[o1, b1]'], {}), '([o1, b1])\n', (15902, 15912), True, 'import ipywidgets as widgets\n'), ((15961, 15983), 'ipywidgets.HBox', 'widgets.HBox', (['[o9, b2]'], {}), '([o9, b2])\n', (15973, 15983), True, 'import ipywidgets as widgets\n'), ((16032, 16054), 'ipywidgets.VBox', 'widgets.VBox', (['[o3, o5]'], {}), '([o3, o5])\n', (16044, 16054), True, 'import ipywidgets as widgets\n'), ((16119, 16141), 'ipywidgets.VBox', 'widgets.VBox', (['[o4, o6]'], {}), '([o4, o6])\n', (16131, 16141), True, 'import ipywidgets as widgets\n'), ((16161, 16195), 'ipywidgets.HBox', 'widgets.HBox', (['[name_box, unit_box]'], {}), '([name_box, unit_box])\n', (16173, 16195), True, 'import ipywidgets as widgets\n'), ((16281, 16339), 'ipywidgets.VBox', 'widgets.VBox', (['[url_box, o2, oL, mid_box, o7, o8, stat_box]'], {}), '([url_box, o2, oL, mid_box, o7, o8, stat_box])\n', (16293, 16339), True, 'import ipywidgets as widgets\n'), ((22714, 22740), 'ipywidgets.HTML', 'widgets.HTML', (["(' ' * 2)"], {}), "(' ' * 2)\n", (22726, 22740), True, 'import ipywidgets as widgets\n'), ((23519, 23541), 'ipywidgets.VBox', 'widgets.VBox', (['[w1, w2]'], {}), '([w1, w2])\n', (23531, 23541), True, 'import ipywidgets as widgets\n'), ((23558, 23580), 'ipywidgets.VBox', 'widgets.VBox', (['[w3, w4]'], {}), '([w3, w4])\n', (23570, 23580), True, 'import ipywidgets as widgets\n'), ((23597, 23619), 'ipywidgets.VBox', 'widgets.VBox', (['[pd, pd]'], {}), '([pd, pd])\n', (23609, 23619), True, 'import ipywidgets as widgets\n'), ((23636, 23658), 'ipywidgets.VBox', 'widgets.VBox', (['[b1, b2]'], {}), '([b1, b2])\n', (23648, 23658), True, 'import ipywidgets as widgets\n'), ((23675, 23713), 'ipywidgets.HBox', 'widgets.HBox', (['[lons, lats, pads, btns]'], {}), '([lons, lats, pads, btns])\n', (23687, 23713), True, 'import ipywidgets as widgets\n'), ((28738, 28764), 'ipywidgets.HTML', 'widgets.HTML', (["(' ' * 3)"], {}), "(' ' * 3)\n", (28750, 28764), True, 'import ipywidgets as widgets\n'), ((29995, 30017), 'ipywidgets.VBox', 'widgets.VBox', (['[d1, d2]'], {}), '([d1, d2])\n', (30007, 30017), True, 'import ipywidgets as widgets\n'), ((30034, 30056), 'ipywidgets.VBox', 'widgets.VBox', (['[d3, d4]'], {}), '([d3, d4])\n', (30046, 30056), True, 'import ipywidgets as widgets\n'), ((30073, 30095), 'ipywidgets.VBox', 'widgets.VBox', (['[d5, d6]'], {}), '([d5, d6])\n', (30085, 30095), True, 'import ipywidgets as widgets\n'), ((30112, 30134), 'ipywidgets.VBox', 'widgets.VBox', (['[pp, pp]'], {}), '([pp, pp])\n', (30124, 30134), True, 'import ipywidgets as widgets\n'), ((30151, 30191), 'ipywidgets.HBox', 'widgets.HBox', (['[dates, times, pad, hints]'], {}), '([dates, times, pad, hints])\n', (30163, 30191), True, 'import ipywidgets as widgets\n'), ((30208, 30235), 'ipywidgets.VBox', 'widgets.VBox', (['[top, d7, d8]'], {}), '([top, d7, d8])\n', (30220, 30235), True, 'import ipywidgets as widgets\n'), ((30780, 30929), 'ipywidgets.Dropdown', 'widgets.Dropdown', ([], {'description': '"""Download Format:"""', 'options': "['HDF', 'netCDF', 'netCDF4', 'ASCII']", 'value': '"""netCDF"""', 'disabled': '(False)', 'style': 'init_style'}), "(description='Download Format:', options=['HDF', 'netCDF',\n 'netCDF4', 'ASCII'], value='netCDF', disabled=False, style=init_style)\n", (30796, 30929), True, 'import ipywidgets as widgets\n'), ((31034, 31065), 'ipywidgets.HTML', 'widgets.HTML', ([], {'value': 'f"""<p> </p>"""'}), "(value=f'<p> </p>')\n", (31046, 31065), True, 'import ipywidgets as widgets\n'), ((31092, 31130), 'ipywidgets.Button', 'widgets.Button', ([], {'description': '"""Download"""'}), "(description='Download')\n", (31106, 31130), True, 'import ipywidgets as widgets\n'), ((31145, 31172), 'ipywidgets.HBox', 'widgets.HBox', (['[f1, pad, b3]'], {}), '([f1, pad, b3])\n', (31157, 31172), True, 'import ipywidgets as widgets\n'), ((31817, 31840), 'ipywidgets.VBox', 'widgets.VBox', (['[h3, log]'], {}), '([h3, log])\n', (31829, 31840), True, 'import ipywidgets as widgets\n'), ((32418, 32549), 'ipywidgets.Dropdown', 'widgets.Dropdown', ([], {'description': '"""OpenDAP package:"""', 'options': "['pydap', 'netcdf4']", 'value': '"""pydap"""', 'disabled': '(False)', 'style': 'left_style'}), "(description='OpenDAP package:', options=['pydap',\n 'netcdf4'], value='pydap', disabled=False, style=left_style)\n", (32434, 32549), True, 'import ipywidgets as widgets\n'), ((32711, 32831), 'ipywidgets.BoundedIntText', 'widgets.BoundedIntText', ([], {'description': '"""Timeout:"""', 'value': 'ts', 'min': '(10)', 'max': '(1000)', 'step': '(1)', 'disabled': '(False)', 'style': 'left_style'}), "(description='Timeout:', value=ts, min=10, max=1000,\n step=1, disabled=False, style=left_style)\n", (32733, 32831), True, 'import ipywidgets as widgets\n'), ((33034, 33056), 'ipywidgets.HBox', 'widgets.HBox', (['[t1, t2]'], {}), '([t1, t2])\n', (33046, 33056), True, 'import ipywidgets as widgets\n'), ((33378, 33404), 'ipywidgets.VBox', 'widgets.VBox', (['[w1, w2, w3]'], {}), '([w1, w2, w3])\n', (33390, 33404), True, 'import ipywidgets as widgets\n'), ((38674, 38681), 'traitlets.Tuple', 'Tuple', ([], {}), '()\n', (38679, 38681), False, 'from traitlets import Tuple\n'), ((42594, 42631), 'requests.get', 'requests.get', (['self.data_url_dir.value'], {}), '(self.data_url_dir.value)\n', (42606, 42631), False, 'import requests\n'), ((70721, 70758), 'datetime.datetime', 'datetime.datetime', (['y', 'm1', 'd', 'h', 'm2', 's'], {}), '(y, m1, d, h, m2, s)\n', (70738, 70758), False, 'import datetime\n'), ((71182, 71219), 'datetime.datetime', 'datetime.datetime', (['y', 'm1', 'd', 'h', 'm2', 's'], {}), '(y, m1, d, h, m2, s)\n', (71199, 71219), False, 'import datetime\n'), ((91452, 91477), 'numpy.abs', 'np.abs', (['(lats[1] - lats[0])'], {}), '(lats[1] - lats[0])\n', (91458, 91477), True, 'import numpy as np\n'), ((91908, 91924), 'numpy.arange', 'np.arange', (['nlats'], {}), '(nlats)\n', (91917, 91924), True, 'import numpy as np\n'), ((91939, 91993), 'numpy.logical_and', 'np.logical_and', (['(lats > user_minlat)', '(lats < user_maxlat)'], {}), '(lats > user_minlat, lats < user_maxlat)\n', (91953, 91993), True, 'import numpy as np\n'), ((97331, 97356), 'numpy.abs', 'np.abs', (['(lons[1] - lons[0])'], {}), '(lons[1] - lons[0])\n', (97337, 97356), True, 'import numpy as np\n'), ((98555, 98571), 'numpy.arange', 'np.arange', (['nlons'], {}), '(nlons)\n', (98564, 98571), True, 'import numpy as np\n'), ((98586, 98640), 'numpy.logical_and', 'np.logical_and', (['(lons > user_minlon)', '(lons < user_maxlon)'], {}), '(lons > user_minlon, lons < user_maxlon)\n', (98600, 98640), True, 'import numpy as np\n'), ((102245, 102287), 'datetime.datetime', 'datetime.datetime', (['y1', 'm1', 'd1', 'h1', 'mm1', 's1'], {}), '(y1, m1, d1, h1, mm1, s1)\n', (102262, 102287), False, 'import datetime\n'), ((102308, 102350), 'datetime.datetime', 'datetime.datetime', (['y2', 'm2', 'd2', 'h2', 'mm2', 's2'], {}), '(y2, m2, d2, h2, mm2, s2)\n', (102325, 102350), False, 'import datetime\n'), ((118063, 118202), 'balto_plot.show_grid_as_image', 'bp.show_grid_as_image', (['grid', 'var_name'], {'extent': 'extent', 'cmap': '"""rainbow"""', 'stretch': '"""hist_equal"""', 'xsize': 'xsize', 'ysize': 'ysize', 'nodata_value': 'None'}), "(grid, var_name, extent=extent, cmap='rainbow',\n stretch='hist_equal', xsize=xsize, ysize=ysize, nodata_value=None)\n", (118084, 118202), True, 'import balto_plot as bp\n'), ((120396, 120426), 'requests.get', 'requests.get', (['match_phrase_url'], {}), '(match_phrase_url)\n', (120408, 120426), False, 'import requests\n'), ((120553, 120573), 'json.loads', 'json.loads', (['json_str'], {}), '(json_str)\n', (120563, 120573), False, 'import json\n'), ((12562, 12583), 'ipywidgets.HTML', 'widgets.HTML', ([], {'value': 's'}), '(value=s)\n', (12574, 12583), True, 'import ipywidgets as widgets\n'), ((12647, 12668), 'ipywidgets.HTML', 'widgets.HTML', ([], {'value': 's'}), '(value=s)\n', (12659, 12668), True, 'import ipywidgets as widgets\n'), ((21018, 21117), 'ipyleaflet.MeasureControl', 'MeasureControl', ([], {'position': '"""bottomright"""', 'active_color': '"""orange"""', 'primary_length_unit': '"""kilometers"""'}), "(position='bottomright', active_color='orange',\n primary_length_unit='kilometers')\n", (21032, 21117), False, 'from ipyleaflet import MeasureControl, Rectangle\n'), ((24135, 24162), 'ipywidgets.VBox', 'widgets.VBox', (['[m, bbox, bm]'], {}), '([m, bbox, bm])\n', (24147, 24162), True, 'import ipywidgets as widgets\n'), ((24199, 24223), 'ipywidgets.VBox', 'widgets.VBox', (['[bbox, bm]'], {}), '([bbox, bm])\n', (24211, 24223), True, 'import ipywidgets as widgets\n'), ((75577, 75613), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'time_since2'}), '(days=time_since2)\n', (75595, 75613), False, 'import datetime\n'), ((75667, 75704), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': 'time_since2'}), '(hours=time_since2)\n', (75685, 75704), False, 'import datetime\n'), ((75760, 75799), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': 'time_since2'}), '(minutes=time_since2)\n', (75778, 75799), False, 'import datetime\n'), ((75855, 75894), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'time_since2'}), '(seconds=time_since2)\n', (75873, 75894), False, 'import datetime\n'), ((83443, 83501), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'd', 'hours': 'h', 'minutes': 'm2', 'seconds': 's'}), '(days=d, hours=h, minutes=m2, seconds=s)\n', (83461, 83501), False, 'import datetime\n'), ((84509, 84536), 'copy.copy', 'copy.copy', (['min_datetime_obj'], {}), '(min_datetime_obj)\n', (84518, 84536), False, 'import copy\n'), ((84881, 84908), 'copy.copy', 'copy.copy', (['min_datetime_obj'], {}), '(min_datetime_obj)\n', (84890, 84908), False, 'import copy\n'), ((9086, 9112), 'ipywidgets.Layout', 'Layout', ([], {'width': 'gui_width_px'}), '(width=gui_width_px)\n', (9092, 9112), False, 'from ipywidgets import Layout\n'), ((11237, 11263), 'ipywidgets.Layout', 'Layout', ([], {'width': 'gui_width_px'}), '(width=gui_width_px)\n', (11243, 11263), False, 'from ipywidgets import Layout\n'), ((13568, 13595), 'ipywidgets.Layout', 'Layout', ([], {'width': 'full_width_px'}), '(width=full_width_px)\n', (13574, 13595), False, 'from ipywidgets import Layout\n'), ((13650, 13676), 'ipywidgets.Layout', 'Layout', ([], {'width': 'btn_width_px'}), '(width=btn_width_px)\n', (13656, 13676), False, 'from ipywidgets import Layout\n'), ((13892, 13919), 'ipywidgets.Layout', 'Layout', ([], {'width': 'full_width_px'}), '(width=full_width_px)\n', (13898, 13919), False, 'from ipywidgets import Layout\n'), ((14111, 14138), 'ipywidgets.Layout', 'Layout', ([], {'width': 'full_width_px'}), '(width=full_width_px)\n', (14117, 14138), False, 'from ipywidgets import Layout\n'), ((14412, 14439), 'ipywidgets.Layout', 'Layout', ([], {'width': 'left_width_px'}), '(width=left_width_px)\n', (14418, 14439), False, 'from ipywidgets import Layout\n'), ((14551, 14578), 'ipywidgets.Layout', 'Layout', ([], {'width': 'next_width_px'}), '(width=next_width_px)\n', (14557, 14578), False, 'from ipywidgets import Layout\n'), ((14771, 14798), 'ipywidgets.Layout', 'Layout', ([], {'width': 'left_width_px'}), '(width=left_width_px)\n', (14777, 14798), False, 'from ipywidgets import Layout\n'), ((14910, 14937), 'ipywidgets.Layout', 'Layout', ([], {'width': 'next_width_px'}), '(width=next_width_px)\n', (14916, 14937), False, 'from ipywidgets import Layout\n'), ((15129, 15156), 'ipywidgets.Layout', 'Layout', ([], {'width': 'full_width_px'}), '(width=full_width_px)\n', (15135, 15156), False, 'from ipywidgets import Layout\n'), ((15375, 15402), 'ipywidgets.Layout', 'Layout', ([], {'width': 'full_width_px'}), '(width=full_width_px)\n', (15381, 15402), False, 'from ipywidgets import Layout\n'), ((15521, 15548), 'ipywidgets.Layout', 'Layout', ([], {'width': 'full_width_px'}), '(width=full_width_px)\n', (15527, 15548), False, 'from ipywidgets import Layout\n'), ((15619, 15645), 'ipywidgets.Layout', 'Layout', ([], {'width': 'btn_width_px'}), '(width=btn_width_px)\n', (15625, 15645), False, 'from ipywidgets import Layout\n'), ((20395, 20443), 'ipywidgets.Layout', 'Layout', ([], {'width': 'map_width_px', 'height': 'map_height_px'}), '(width=map_width_px, height=map_height_px)\n', (20401, 20443), False, 'from ipywidgets import Layout\n'), ((20625, 20663), 'ipyleaflet.FullScreenControl', 'FullScreenControl', ([], {'position': '"""topright"""'}), "(position='topright')\n", (20642, 20663), False, 'from ipyleaflet import Map, basemaps, FullScreenControl\n'), ((21874, 21901), 'ipywidgets.Layout', 'Layout', ([], {'width': 'bbox_width_px'}), '(width=bbox_width_px)\n', (21880, 21901), False, 'from ipywidgets import Layout\n'), ((22110, 22137), 'ipywidgets.Layout', 'Layout', ([], {'width': 'bbox_width_px'}), '(width=bbox_width_px)\n', (22116, 22137), False, 'from ipywidgets import Layout\n'), ((22389, 22416), 'ipywidgets.Layout', 'Layout', ([], {'width': 'bbox_width_px'}), '(width=bbox_width_px)\n', (22395, 22416), False, 'from ipywidgets import Layout\n'), ((22669, 22696), 'ipywidgets.Layout', 'Layout', ([], {'width': 'bbox_width_px'}), '(width=bbox_width_px)\n', (22675, 22696), False, 'from ipywidgets import Layout\n'), ((22843, 22869), 'ipywidgets.Layout', 'Layout', ([], {'width': 'btn_width_px'}), '(width=btn_width_px)\n', (22849, 22869), False, 'from ipywidgets import Layout\n'), ((22955, 22981), 'ipywidgets.Layout', 'Layout', ([], {'width': 'btn_width_px'}), '(width=btn_width_px)\n', (22961, 22981), False, 'from ipywidgets import Layout\n'), ((23342, 23363), 'ipywidgets.Layout', 'Layout', ([], {'width': '"""360px"""'}), "(width='360px')\n", (23348, 23363), False, 'from ipywidgets import Layout\n'), ((27953, 27980), 'ipywidgets.Layout', 'Layout', ([], {'width': 'date_width_px'}), '(width=date_width_px)\n', (27959, 27980), False, 'from ipywidgets import Layout\n'), ((28122, 28149), 'ipywidgets.Layout', 'Layout', ([], {'width': 'date_width_px'}), '(width=date_width_px)\n', (28128, 28149), False, 'from ipywidgets import Layout\n'), ((28287, 28314), 'ipywidgets.Layout', 'Layout', ([], {'width': 'time_width_px'}), '(width=time_width_px)\n', (28293, 28314), False, 'from ipywidgets import Layout\n'), ((28450, 28477), 'ipywidgets.Layout', 'Layout', ([], {'width': 'time_width_px'}), '(width=time_width_px)\n', (28456, 28477), False, 'from ipywidgets import Layout\n'), ((28859, 28886), 'ipywidgets.Layout', 'Layout', ([], {'width': 'hint_width_px'}), '(width=hint_width_px)\n', (28865, 28886), False, 'from ipywidgets import Layout\n'), ((29101, 29128), 'ipywidgets.Layout', 'Layout', ([], {'width': 'hint_width_px'}), '(width=hint_width_px)\n', (29107, 29128), False, 'from ipywidgets import Layout\n'), ((29482, 29513), 'ipywidgets.Layout', 'Layout', ([], {'width': 'full_box_width_px'}), '(width=full_box_width_px)\n', (29488, 29513), False, 'from ipywidgets import Layout\n'), ((29867, 29914), 'ipywidgets.Layout', 'Layout', ([], {'width': 'full_box_width_px', 'height': '"""140px"""'}), "(width=full_box_width_px, height='140px')\n", (29873, 29914), False, 'from ipywidgets import Layout\n'), ((31704, 31744), 'ipywidgets.Layout', 'Layout', ([], {'width': 'width_px', 'height': 'height_px'}), '(width=width_px, height=height_px)\n', (31710, 31744), False, 'from ipywidgets import Layout\n'), ((32998, 33018), 'ipywidgets.Layout', 'Layout', ([], {'width': '"""80px"""'}), "(width='80px')\n", (33004, 33018), False, 'from ipywidgets import Layout\n'), ((33291, 33337), 'ipywidgets.Layout', 'Layout', ([], {'width': 'full_box_width_px', 'height': '"""50px"""'}), "(width=full_box_width_px, height='50px')\n", (33297, 33337), False, 'from ipywidgets import Layout\n')]
|
__author__ = 'stephen'
import numpy as np
import scipy.io
import scipy.sparse
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import matplotlib.pylab as pylab
from .utils import get_subindices
import matplotlib.ticker as mtick
from collections import Counter
from sklearn.neighbors.kde import KernelDensity
from scipy import stats
from mpl_toolkits.axes_grid1 import make_axes_locatable
def plot_cluster(labels, phi_angles, psi_angles, name, outliers=-1, step=1, potential=False):
'''
:param labels: the assignments after clustering or lumping
:param phi_angles: the phi angles
:param psi_angles: the psi angles
:param name: the name of the result pictures
:param outliers: outliers default is -1
:return: None
'''
clusters = np.unique(labels)
plt.rc("font", size=10)
if step > 1:
clusters = clusters[0:len(clusters):step]
colors_jet = plt.cm.jet(np.linspace(0, 1, np.max(clusters)+1))
if potential is False: #plot Alanine Dipeptide
for i in clusters:
if i != outliers:
point = np.where(labels == i)
plt.plot(phi_angles[point], psi_angles[point], '.', markersize=1.0, alpha=0.7)#, color=colors_jet[i])
#else:
# point = np.where(labels == i)
# plt.plot(phi_angles[point], psi_angles[point], '.', markersize=1.0, alpha=0.7, color='black') # , color=colors_jet[i])
plt.title("Alanine Dipeptide " + name + " states", fontsize=10)
# plt.xlim([-180, 180])
# plt.ylim([-180, 180])
# plt.xticks([-110, -60, 0, 60, 120])
# plt.yticks([-120, -60, 0, 60, 120])
else: # if plot 2D potential
plt.figure(figsize=(10, 10))
for i in clusters:
if i != outliers:
plt.plot(phi_angles[np.where(labels == i)],
psi_angles[np.where(labels == i)], '.', markersize=1.0, alpha=0.7) #markersize=20.0, color=colors_jet[i])
#plt.plot(phi_angles[np.where(labels == i)],
# psi_angles[np.where(labels == i)],
# '.', color=colors_jet[i], label='State %d' % i)
#plt.title("2D potential " + name + " states", fontsize=20)
plt.xlim([-75, 75])
plt.ylim([-75, 75])
plt.xticks([-50, 0, 50])
plt.yticks([-50, 0, 50])
plt.xlabel(r"$\phi$", fontsize=25)
plt.ylabel(r"$\psi$", fontsize=25)
# Save the result figure
plt.savefig('./'+name+'.png', dpi=400)
plt.close()
#plt.show()
def plot_each_cluster(labels, phi_angles, psi_angles, name, outliers=-1, step=1):
'''
:param labels: the assignments after clustering or lumping
:param phi_angles: the phi angles
:param psi_angles: the psi angles
:param name: the name of the result pictures
:param outliers: outliers default is -1
:return: None
'''
clusters = np.unique(labels)
if step > 1:
clusters = clusters[0:len(clusters):step]
colors_jet = plt.cm.jet(np.linspace(0, 1, np.max(clusters)+1))
for i in np.unique(clusters):
if i != outliers:
plt.plot(phi_angles[np.where(labels == i)],
psi_angles[np.where(labels == i)],
'x', color=colors_jet[i], label='State %d' % i)
#plt.title("Alanine Dipeptide " + name + " state_" + str(i))
plt.xlabel(r"$\phi$")
plt.ylabel(r"$\psi$")
plt.xlim([-180, 180])
plt.ylim([-180, 180])
plt.xticks([-120, -60, 0, 60, 120])
plt.yticks([-120, -60, 0, 60, 120])
# Save the result figure
plt.savefig('./'+ name + " state_" + str(i)+'.png', dpi = 400)
plt.close()
#plt.show()
def contour_cluster(labels, phi_angles, psi_angles, name, outliers=-1):
'''
:param labels: the assignments after clustering or lumping
:param phi_angles: the phi angles
:param psi_angles: the psi angles
:param name: the name of the result pictures
:param outliers: outliers default is -1
:return: None
'''
# lables_array = np.array(labels)
# colors_jet = plt.cm.jet(np.linspace(0, 1, np.max(lables_array)+1))
for i in np.unique(labels):
#if i != outliers:
if i == 1:
print("i=", i)
x = phi_angles[np.where(labels == i)]
y = psi_angles[np.where(labels == i)]
indices = get_subindices(assignments=x, state=None, samples=1000)
x = x[indices]
y = y[indices]
X, Y= np.meshgrid(x, y)
positions = np.vstack([X.ravel(), Y.ravel()])
values = np.vstack([x, y])
kernel = stats.gaussian_kde(values)
Z = np.reshape(kernel(positions).T, X.shape)
#kde = KernelDensity(kernel='gaussian', bandwidth=0.2)
#kde_results = kde.score_samples([x,y])
#X, Y, Z = np.meshgrid(x, y, kde_results)
#Z = np.reshape(kernel([x,y]).T, x.shape)
#Z1 = mlab.bivariate_normal(X, Y, 5.0, 5.0, 0.0, 0.0)
#Z2 = mlab.bivariate_normal(X, Y, 7.5, 2.5, 5, 5)
# difference of Gaussians
#Z = 10.0 * (Z2 - Z1)
#step = Z.max()-Z.min()/10
#print "Z min:",Z.min(), "Z.max:", Z.max(), "step:", step
#levels = np.arange(Z.min(), Z.min(), Z.max())
#print levels
plt.contour(X, Y, Z, origin='lower') #, linewidths=Z.min(), levels=levels)
plt.title("Alanine Dipeptide " + name + " states")
plt.xlabel(r"$\phi$")
plt.ylabel(r"$\psi$")
plt.xlim([-180, 180])
plt.ylim([-180, 180])
# Save the result figure
plt.savefig('./'+name+'.png', dpi=400)
plt.close()
#plt.show()
def plot_matrix(tProb_=None, name=None):
'''
if labels is not None:
n_states = len(set(labels)) - (1 if -1 in labels else 0)
print 'n_states=', n_states
#diagC = tProb_.diagonal()
length = len(labels)
print "length=", length
Cmn = scipy.sparse.lil_matrix(n_states, n_states, dtype=np.float32)
Cmn = np.zeros((n_states, n_states))
print "size of tProb", tProb_.shape
if scipy.sparse.issparse(tProb_):
tProb_ = tProb_.todense()
for i in xrange(length):
for j in xrange(length):
Cmn[labels[i], labels[j]] += tProb_[i, j]
#for i in xrange(n_states):
#Cmn[i,i] += diagC[i]
# for j in xrange(n_states):
# Cmn[i, j] += Cmn[j, i]
# Cmn[j, i] = Cmn[i, j]
for j in xrange(n_states):
sum_row = np.sum(Cmn[j,:])
if sum_row is not 0:
Cmn[j,:] /= sum_row
pylab.matshow(Cmn, cmap=plt.cm.OrRd)
else:
'''
pylab.matshow(tProb_, cmap=plt.cm.OrRd)
plt.colorbar()
#pylab.show()
plt.savefig('./' + name + 'Matrix.png', dpi=400)
plt.close()
def plot_block_matrix(labels, tProb_, name='BlockMatrix'):
print("Plot Block Matrix")
indices = np.argsort(labels)
#print indices
block_matrix = tProb_[:,indices]
block_matrix = block_matrix[indices,:]
block_matrix = 1 - block_matrix
#print block_matrix
pylab.matshow(block_matrix, cmap=plt.cm.OrRd)
plt.colorbar()
plt.savefig('./' + name + '.png', dpi=400)
#pylab.show()
plt.close()
def plot_cluster_size_distribution(populations, name='Populations'):
fig = plt.figure(1, (10,6))
distrib = fig.add_subplot(1,1,1)
fmt = '%.0f%%' # Format you want the ticks, e.g. '40%'
xticks = mtick.FormatStrFormatter(fmt)
distrib.yaxis.set_major_formatter(xticks)
plt.rc("font", size=30)
plt.title('Cluster size distributions', fontsize=20)
distrib.grid(True)
X = range(len(populations))
X_xtick = ['']
for i in xrange(1, len(populations)+1):
xx = '$10^' + str(i) + '$'
X_xtick.append(xx)
print(X_xtick)
#plt.xticks(X , ('$10^0$', '$10^1$', '$10^2$', '$10^3$', '$10^4$'))
plt.xticks(np.arange(len(populations)+1), X_xtick)
plt.ylabel(r"Probability")
plt.ylim([0,100])
print("X:", X)
distrib.bar(X, populations*100, facecolor='black', edgecolor='white', width=1.0) #facecolor='#f78181',
plt.savefig('./' + name + '_Distribution.png', dpi=400)
plt.close()
#plt.show()
def plot_compare_cluster_size_distribution(populations_1, populations_2, name='Populations'):
fig = plt.figure(1, (10,8))
distrib = fig.add_subplot(1,1,1)
fmt = '%.0f%%' # Format you want the ticks, e.g. '40%'
xticks = mtick.FormatStrFormatter(fmt)
distrib.yaxis.set_major_formatter(xticks)
bar_width = 0.45
plt.rc("font", size=20)
#plt.title('Cluster size distributions', fontsize=20)
distrib.grid(True)
X = np.arange(len(populations_1))
X_xtick = ['']
for i in xrange(1, len(populations_1)+1):
xx = '$10^' + str(i) + '$'
X_xtick.append(xx)
print(X_xtick)
#plt.xticks(X , ('$10^0$', '$10^1$', '$10^2$', '$10^3$', '$10^4$'))
print("X:", X)
distrib.bar(X, populations_1*100, facecolor='black', edgecolor='white', width=bar_width,label="kNN Density Peaks 3645 states") #facecolor='#f78181',
# populations_2
#X = range(len(populations_2))
X_xtick = ['']
for i in xrange(1, len(populations_2)+1):
xx = '$10^' + str(i) + '$'
X_xtick.append(xx)
print(X_xtick)
#plt.xticks(X , ('$10^0$', '$10^1$', '$10^2$', '$10^3$', '$10^4$'))
print("X:", X)
distrib.bar(X+bar_width, populations_2*100, facecolor='gray', edgecolor='white', width=bar_width, label="kNN Density Peaks 117 states") #facecolor='#f78181',
plt.xticks(np.arange(len(populations_1)+1+bar_width), X_xtick)
#plt.ylabel(r"Fraction number of clusters")
plt.ylabel(r"Probability")
plt.ylim([0,60])
plt.legend()
plt.savefig('./' + name + '_Distribution.png', dpi=400)
plt.close()
#plt.show()
#From Wang Wei's code
def plot_landscape(labels=None, phi_angles=None, psi_angles=None, phi_ctr=None, psi_ctr=None, name='Energy_Landscape', bins=80, potential=False):
H, xedges, yedges = np.histogram2d(psi_angles, phi_angles, bins=bins)
#since we calculate total number in 10 interval, thus bin of every dimension must be 36
#If element in H is zero, set the final energy to be 9
plt.rc("font", size=25)
maxH = np.max(H)
for i in range(len(H)):
for j in range(len(H)):
if H[i][j]==0:
H[i][j]=9
else:
H[i][j] = -np.log(H[i][j]/maxH)
#H = -np.log(H/np.max(H))
extent =[np.min(xedges), np.max(xedges), np.min(yedges), np.max(yedges)]
plt.figure(figsize=(12, 12))
plt.imshow(H, extent=extent, origin="lower", cmap=plt.cm.gray) #plt.cm.jet
#plot cluster centers on landscape
if labels is not None:
plt.plot(phi_ctr, psi_ctr, '.', markersize=10, color='r')
distribution = np.array([0,0,0,0,0,0,0,0,0,0], dtype=np.float64)
#print "len phi_ctr", len(phi_ctr)
#print "shape of xedges", xedges.shape
for i in range(0, len(phi_angles)):
if psi_angles[i] > 179.0:
index_x = np.where(xedges > 179.0)[0][0] - 1
else:
index_x = np.where(xedges > psi_angles[i])[0][0] - 1
if phi_angles[i] > 179.0:
index_y = np.where(yedges > 179.0)[0][0] - 1
else:
index_y = np.where(yedges > phi_angles[i])[0][0] - 1
index_distrib = int(H[index_x][index_y])
distribution[index_distrib] += 1
distribution /= len(phi_angles)
print(distribution)
# print "clenter:", i, "[", phi_ctr,",", psi_ctr,"]", "H=", H[index_x][index_y]
plt.xlabel('$\phi$', fontsize=20)
plt.ylabel('$\Psi$', fontsize=20)
cbar = plt.colorbar(shrink=0.77)
#plt.title('Free energy landscape', fontsize=20)
cbar.set_label("$k_B T$", size=20)
cbar.ax.tick_params(labelsize=20)
if potential is False:
plt.xlim([-180, 180])
plt.ylim([-180, 180])
plt.xticks([-120, -60, 0, 60, 120])
plt.yticks([-120, -60, 0, 60, 120])
else:
plt.xlim([-75, 75])
plt.ylim([-75, 75])
plt.xticks([-50, 0, 50])
plt.yticks([-50, 0, 50])
plt.savefig('./' + name + '.png', dpi=400)
#plt.show()
plt.close()
#Cluster Centers on Free energy landscape distribution
fig = plt.figure(1, (10,6))
plt.rc("font", size=15)
distrib = fig.add_subplot(1,1,1)
distrib.grid(True)
fmt = '%.0f%%' # Format you want the ticks, e.g. '40%'
xticks = mtick.FormatStrFormatter(fmt)
distrib.yaxis.set_major_formatter(xticks)
plt.title('Cluster Centers on Free energy landscape distribution', fontsize=20)
plt.xlabel("$k_B T$")
plt.ylabel(r"Probability")
plt.ylim([0, 100])
plt.xticks(np.arange(11), ('', '1', '', '3', '', '5', '', '7', '', '9', ''))
distrib.bar(np.arange(10), distribution*100, facecolor='black', edgecolor='white', width=1.0) #facecolor='#f78181'
plt.savefig('./' + name + '_Distribution.png', dpi=400)
#plt.show()
plt.close()
def plot_compare_distribution(labels_1=None, labels_2=None, phi_angles=None, psi_angles=None, phi_ctr_1=None, psi_ctr_1=None, phi_ctr_2=None, psi_ctr_2=None, name='Energy_Landscape', bins=36, potential=False):
H, xedges, yedges = np.histogram2d(psi_angles, phi_angles, bins=bins)
#since we calculate total number in 10 interval, thus bin of every dimension must be 36
#If element in H is zero, set the final energy to be 9
plt.rc("font", size=25)
maxH = np.max(H)
for i in range(len(H)):
for j in range(len(H)):
if H[i][j]==0:
H[i][j]=9
else:
H[i][j] = -np.log(H[i][j]/maxH)
#H = -np.log(H/np.max(H))
#extent =[np.min(xedges), np.max(xedges), np.min(yedges), np.max(yedges)]
#plt.figure(figsize=(10, 10))
#plt.imshow(H, extent=extent, origin="lower", cmap=plt.cm.gray) #plt.cm.jet
#plot cluster centers on landscape
#if labels_1 is not None:
# plt.plot(phi_ctr_1, psi_ctr_1, '*', markersize=8, color='r')
distribution_1 = np.array([0,0,0,0,0,0,0,0,0,0], dtype=np.float64)
for i in xrange(0, len(phi_ctr_1)):
if psi_ctr_1[i] > 179.0:
index_x = np.where(xedges > 179.0)[0][0] - 1
else:
index_x = np.where(xedges > psi_ctr_1[i])[0][0] - 1
if phi_ctr_1[i] > 179.0:
index_y = np.where(yedges > 179.0)[0][0] - 1
else:
index_y = np.where(yedges > phi_ctr_1[i])[0][0] - 1
index_distrib = int(H[index_x][index_y])
distribution_1[index_distrib] += 1
distribution_1 /= len(phi_ctr_1)
print(distribution_1)
distribution_2 = np.array([0,0,0,0,0,0,0,0,0,0], dtype=np.float64)
for i in xrange(0, len(phi_ctr_2)):
if psi_ctr_2[i] > 179.0:
index_x = np.where(xedges > 179.0)[0][0] - 1
else:
index_x = np.where(xedges > psi_ctr_2[i])[0][0] - 1
if phi_ctr_2[i] > 179.0:
index_y = np.where(yedges > 179.0)[0][0] - 1
else:
index_y = np.where(yedges > phi_ctr_2[i])[0][0] - 1
index_distrib = int(H[index_x][index_y])
distribution_2[index_distrib] += 1
distribution_2 /= len(phi_ctr_2)
print(distribution_2)
# print "clenter:", i, "[", phi_ctr,",", psi_ctr,"]", "H=", H[index_x][index_y]
plt.xlabel('$\phi$', fontsize=20)
plt.ylabel('$\Psi$', fontsize=20)
#cbar = plt.colorbar(shrink=0.77)
##plt.title('Free energy landscape', fontsize=20)
#cbar.set_label("$k_B T$", size=20)
#cbar.ax.tick_params(labelsize=20)
#if potential is False:
# plt.xlim([-180, 180])
# plt.ylim([-180, 180])
# plt.xticks([-120, -60, 0, 60, 120])
# plt.yticks([-120, -60, 0, 60, 120])
#else:
# plt.xlim([-75, 75])
# plt.ylim([-75, 75])
# plt.xticks([-50, 0, 50])
# plt.yticks([-50, 0, 50])
#plt.savefig('./' + name + '.png', dpi=400)
##plt.show()
#plt.close()
#Cluster Centers on Free energy landscape distribution
fig=plt.figure(1, (10,6))
plt.rc("font", size=15)
distrib = fig.add_subplot(1,1,1)
distrib.grid(True)
fmt = '%.0f%%' # Format you want the ticks, e.g. '40%'
xticks = mtick.FormatStrFormatter(fmt)
distrib.yaxis.set_major_formatter(xticks)
# plt.xticks(np.arange(11), ('', '1', '', '3', '', '5', '', '7', '', '9', ''))
n_groups = 10
index = np.arange(n_groups)
bar_width = 0.45
distrib.bar(index, distribution_1*100, facecolor='black', edgecolor='white', width=bar_width, label="kNN Density Peaks 3645 states") #facecolor='#f78181'
distrib.bar(index+bar_width, distribution_2*100, facecolor='gray', edgecolor='white', width=bar_width, label="kNN Density Peaks 117 states")
#plt.title('Cluster Centers on Free energy landscape distribution', fontsize=10)
plt.xlabel("$k_B T$")
plt.ylabel(r"Fraction number of clusters")
plt.ylim([0, 50])
plt.xticks(index+bar_width, ('', '1', '', '3', '', '5', '', '7', '', '9', ''))
plt.legend()
#plt.tight_layout()
plt.savefig('./' + name + '_Distribution.png', dpi=400)
#plt.show()
plt.close()
def plot_landscape_barrier(labels=None, selected=1, phi_angles=None, psi_angles=None, phi_ctr=None, psi_ctr=None, name='Energy_Landscape', bins=36, potential=False, outliers=-1):
H, xedges, yedges = np.histogram2d(psi_angles, phi_angles, bins=bins)
#since we calculate total number in 10 interval, thus bin of every dimension must be 36
#If element in H is zero, set the final energy to be 9
plt.rc("font", size=25)
maxH = np.max(H)
for i in range(len(H)):
for j in range(len(H)):
if H[i][j]==0:
H[i][j]=9
else:
H[i][j] = -np.log(H[i][j]/maxH)
#H = -np.log(H/np.max(H))
extent =[np.min(xedges), np.max(xedges), np.min(yedges), np.max(yedges)]
plt.figure(figsize=(12, 12))
plt.imshow(H, extent=extent, origin="lower", cmap=plt.cm.gray) #plt.cm.jet
#plot points
colors = ['y', 'b', 'tomato', 'm', 'g', 'c', 'yellowgreen']
color_index = 0
clusters = np.unique(labels)
for i in clusters:
if i != outliers:
if i in selected:
point = np.where(labels == i)
plt.plot(phi_angles[point], psi_angles[point], '2', alpha=0.20, color=colors[color_index])#, color=colors_jet[i])
color_index += 1
#plot cluster centers on landscape
if labels is not None:
plt.plot(phi_ctr, psi_ctr, '*', markersize=10, color='r')
distribution = np.array([0,0,0,0,0,0,0,0,0,0], dtype=np.float64)
#print "len phi_ctr", len(phi_ctr)
#print "shape of xedges", xedges.shape
for i in xrange(0, len(phi_ctr)):
if psi_ctr[i] > 179.0:
index_x = np.where(xedges > 179.0)[0][0] - 1
else:
index_x = np.where(xedges > psi_ctr[i])[0][0] - 1
if phi_ctr[i] > 179.0:
index_y = np.where(yedges > 179.0)[0][0] - 1
else:
index_y = np.where(yedges > phi_ctr[i])[0][0] - 1
index_distrib = int(H[index_x][index_y])
distribution[index_distrib] += 1
distribution /= len(phi_ctr)
print(distribution)
# print "clenter:", i, "[", phi_ctr,",", psi_ctr,"]", "H=", H[index_x][index_y]
plt.xlabel('$\phi$', fontsize=20)
plt.ylabel('$\Psi$', fontsize=20)
cbar = plt.colorbar(shrink=0.77)
#plt.title('Free energy landscape', fontsize=20)
cbar.set_label("$k_B T$", size=20)
cbar.ax.tick_params(labelsize=20)
plt.xlim([-180, 180])
plt.ylim([-180, 180])
plt.xticks([-120, -60, 0, 60, 120])
plt.yticks([-120, -60, 0, 60, 120])
plt.plot([-103,-103],[30,180],'w') #plot the barrier
plt.savefig('./' + name + '.png', dpi=400)
#plt.show()
plt.close()
def calculate_population(labels, name='Populations'):
print("Calculating and plotting population...")
counts = list(Counter(labels).values())
total_states = np.max(labels) + 1
#states_magnitude = int(np.ceil(np.log10(total_states)))
total_frames = len(labels)
frames_magnitude = int(np.ceil(np.log10(total_frames)))
print("states", total_states, "frames", total_frames)
populations = np.zeros(frames_magnitude+1)
for i in counts:
if i > 0:
log_i = np.log10(i)
magnitude = np.ceil(log_i)
populations[magnitude] += 1
#print magnitude populations
print("Populations Probability:")
#bins = [0]
for i in xrange(len(populations)):
populations[i] = populations[i] / total_states
print("10 ^", i, "to", "10 ^", i+1,":", populations[i]*100, "%")
#bins.append(10**(i+1))
name += '_Populations'
print("name:", name)
plot_cluster_size_distribution(populations=populations, name=name)
print("Done.")
def compare_population(labels_1, labels_2, name='Compare_Populations'):
print("Calculating and plotting population...")
counts = list(Counter(labels_1).values())
total_states = np.max(labels_1) + 1
total_frames = len(labels_1)
frames_magnitude = int(np.ceil(np.log10(total_frames)))
print("states", total_states, "frames", total_frames)
populations_1 = np.zeros(frames_magnitude+1)
for i in counts:
if i > 0:
log_i = np.log10(i)
magnitude = np.ceil(log_i)
populations_1[magnitude] += 1
print("Populations Probability:")
for i in xrange(len(populations_1)):
populations_1[i] = populations_1[i] / total_states
print("10 ^", i, "to", "10 ^", i+1,":", populations_1[i]*100, "%")
counts = list(Counter(labels_2).values())
total_states = np.max(labels_2) + 1
total_frames = len(labels_2)
frames_magnitude = int(np.ceil(np.log10(total_frames)))
print("states", total_states, "frames", total_frames)
populations_2 = np.zeros(frames_magnitude+1)
for i in counts:
if i > 0:
log_i = np.log10(i)
magnitude = np.ceil(log_i)
populations_2[magnitude] += 1
print("Populations Probability:")
for i in xrange(len(populations_2)):
populations_2[i] = populations_2[i] / total_states
print("10 ^", i, "to", "10 ^", i+1,":", populations_2[i]*100, "%")
name += '_Populations'
print("name:", name)
plot_compare_cluster_size_distribution(populations_1=populations_1, populations_2=populations_2, name=name)
#plot_cluster_size_distribution(populations_1=populations_1, name=name)
print("Done.")
def calculate_landscape(labels, centers, phi_angles, psi_angles, potential=False, name='Energy_Landscape'):
print("Calculating and plotting Landscape...")
phi_ctr = phi_angles[centers]
psi_ctr = psi_angles[centers]
labels_ctr = labels[centers]
name = name + '_Energy_Landscape'
print("name:", name)
plot_landscape(labels=labels_ctr, phi_angles=phi_angles, psi_angles=psi_angles, phi_ctr=phi_ctr, psi_ctr=psi_ctr, potential=potential, name=name)
print("Done")
#plot_landscape(labels=None, phi_angles=phi_angles, psi_angles=psi_angles)
|
[
"numpy.log10",
"matplotlib.pyplot.ylabel",
"numpy.log",
"numpy.argsort",
"numpy.array",
"numpy.arange",
"matplotlib.pyplot.imshow",
"scipy.stats.gaussian_kde",
"numpy.where",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.max",
"matplotlib.pyplot.close",
"matplotlib.pyplot.contour",
"matplotlib.pyplot.yticks",
"numpy.vstack",
"numpy.min",
"numpy.histogram2d",
"matplotlib.pyplot.ylim",
"numpy.meshgrid",
"numpy.ceil",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xticks",
"matplotlib.use",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"matplotlib.ticker.FormatStrFormatter",
"matplotlib.pyplot.legend",
"matplotlib.pylab.matshow",
"matplotlib.pyplot.rc",
"numpy.unique",
"matplotlib.pyplot.colorbar",
"collections.Counter",
"matplotlib.pyplot.figure",
"numpy.zeros"
] |
[((96, 117), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (110, 117), False, 'import matplotlib\n'), ((818, 835), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (827, 835), True, 'import numpy as np\n'), ((840, 863), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(10)'}), "('font', size=10)\n", (846, 863), True, 'import matplotlib.pyplot as plt\n'), ((2405, 2439), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\phi$"""'], {'fontsize': '(25)'}), "('$\\\\phi$', fontsize=25)\n", (2415, 2439), True, 'import matplotlib.pyplot as plt\n'), ((2444, 2478), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\psi$"""'], {'fontsize': '(25)'}), "('$\\\\psi$', fontsize=25)\n", (2454, 2478), True, 'import matplotlib.pyplot as plt\n'), ((2512, 2554), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('./' + name + '.png')"], {'dpi': '(400)'}), "('./' + name + '.png', dpi=400)\n", (2523, 2554), True, 'import matplotlib.pyplot as plt\n'), ((2555, 2566), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2564, 2566), True, 'import matplotlib.pyplot as plt\n'), ((2949, 2966), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (2958, 2966), True, 'import numpy as np\n'), ((3115, 3134), 'numpy.unique', 'np.unique', (['clusters'], {}), '(clusters)\n', (3124, 3134), True, 'import numpy as np\n'), ((4273, 4290), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (4282, 4290), True, 'import numpy as np\n'), ((5550, 5600), 'matplotlib.pyplot.title', 'plt.title', (["('Alanine Dipeptide ' + name + ' states')"], {}), "('Alanine Dipeptide ' + name + ' states')\n", (5559, 5600), True, 'import matplotlib.pyplot as plt\n'), ((5605, 5626), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\phi$"""'], {}), "('$\\\\phi$')\n", (5615, 5626), True, 'import matplotlib.pyplot as plt\n'), ((5631, 5652), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\psi$"""'], {}), "('$\\\\psi$')\n", (5641, 5652), True, 'import matplotlib.pyplot as plt\n'), ((5658, 5679), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-180, 180]'], {}), '([-180, 180])\n', (5666, 5679), True, 'import matplotlib.pyplot as plt\n'), ((5684, 5705), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-180, 180]'], {}), '([-180, 180])\n', (5692, 5705), True, 'import matplotlib.pyplot as plt\n'), ((5739, 5781), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('./' + name + '.png')"], {'dpi': '(400)'}), "('./' + name + '.png', dpi=400)\n", (5750, 5781), True, 'import matplotlib.pyplot as plt\n'), ((5782, 5793), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5791, 5793), True, 'import matplotlib.pyplot as plt\n'), ((6859, 6898), 'matplotlib.pylab.matshow', 'pylab.matshow', (['tProb_'], {'cmap': 'plt.cm.OrRd'}), '(tProb_, cmap=plt.cm.OrRd)\n', (6872, 6898), True, 'import matplotlib.pylab as pylab\n'), ((6903, 6917), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (6915, 6917), True, 'import matplotlib.pyplot as plt\n'), ((6940, 6988), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('./' + name + 'Matrix.png')"], {'dpi': '(400)'}), "('./' + name + 'Matrix.png', dpi=400)\n", (6951, 6988), True, 'import matplotlib.pyplot as plt\n'), ((6993, 7004), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7002, 7004), True, 'import matplotlib.pyplot as plt\n'), ((7110, 7128), 'numpy.argsort', 'np.argsort', (['labels'], {}), '(labels)\n', (7120, 7128), True, 'import numpy as np\n'), ((7292, 7337), 'matplotlib.pylab.matshow', 'pylab.matshow', (['block_matrix'], {'cmap': 'plt.cm.OrRd'}), '(block_matrix, cmap=plt.cm.OrRd)\n', (7305, 7337), True, 'import matplotlib.pylab as pylab\n'), ((7342, 7356), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (7354, 7356), True, 'import matplotlib.pyplot as plt\n'), ((7361, 7403), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('./' + name + '.png')"], {'dpi': '(400)'}), "('./' + name + '.png', dpi=400)\n", (7372, 7403), True, 'import matplotlib.pyplot as plt\n'), ((7426, 7437), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7435, 7437), True, 'import matplotlib.pyplot as plt\n'), ((7518, 7540), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)', '(10, 6)'], {}), '(1, (10, 6))\n', (7528, 7540), True, 'import matplotlib.pyplot as plt\n'), ((7649, 7678), 'matplotlib.ticker.FormatStrFormatter', 'mtick.FormatStrFormatter', (['fmt'], {}), '(fmt)\n', (7673, 7678), True, 'import matplotlib.ticker as mtick\n'), ((7729, 7752), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(30)'}), "('font', size=30)\n", (7735, 7752), True, 'import matplotlib.pyplot as plt\n'), ((7757, 7809), 'matplotlib.pyplot.title', 'plt.title', (['"""Cluster size distributions"""'], {'fontsize': '(20)'}), "('Cluster size distributions', fontsize=20)\n", (7766, 7809), True, 'import matplotlib.pyplot as plt\n'), ((8140, 8165), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Probability"""'], {}), "('Probability')\n", (8150, 8165), True, 'import matplotlib.pyplot as plt\n'), ((8171, 8189), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 100]'], {}), '([0, 100])\n', (8179, 8189), True, 'import matplotlib.pyplot as plt\n'), ((8319, 8374), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('./' + name + '_Distribution.png')"], {'dpi': '(400)'}), "('./' + name + '_Distribution.png', dpi=400)\n", (8330, 8374), True, 'import matplotlib.pyplot as plt\n'), ((8379, 8390), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8388, 8390), True, 'import matplotlib.pyplot as plt\n'), ((8513, 8535), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)', '(10, 8)'], {}), '(1, (10, 8))\n', (8523, 8535), True, 'import matplotlib.pyplot as plt\n'), ((8644, 8673), 'matplotlib.ticker.FormatStrFormatter', 'mtick.FormatStrFormatter', (['fmt'], {}), '(fmt)\n', (8668, 8673), True, 'import matplotlib.ticker as mtick\n'), ((8745, 8768), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(20)'}), "('font', size=20)\n", (8751, 8768), True, 'import matplotlib.pyplot as plt\n'), ((9856, 9881), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Probability"""'], {}), "('Probability')\n", (9866, 9881), True, 'import matplotlib.pyplot as plt\n'), ((9887, 9904), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 60]'], {}), '([0, 60])\n', (9895, 9904), True, 'import matplotlib.pyplot as plt\n'), ((9908, 9920), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (9918, 9920), True, 'import matplotlib.pyplot as plt\n'), ((9925, 9980), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('./' + name + '_Distribution.png')"], {'dpi': '(400)'}), "('./' + name + '_Distribution.png', dpi=400)\n", (9936, 9980), True, 'import matplotlib.pyplot as plt\n'), ((9985, 9996), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9994, 9996), True, 'import matplotlib.pyplot as plt\n'), ((10206, 10255), 'numpy.histogram2d', 'np.histogram2d', (['psi_angles', 'phi_angles'], {'bins': 'bins'}), '(psi_angles, phi_angles, bins=bins)\n', (10220, 10255), True, 'import numpy as np\n'), ((10411, 10434), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(25)'}), "('font', size=25)\n", (10417, 10434), True, 'import matplotlib.pyplot as plt\n'), ((10446, 10455), 'numpy.max', 'np.max', (['H'], {}), '(H)\n', (10452, 10455), True, 'import numpy as np\n'), ((10747, 10775), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 12)'}), '(figsize=(12, 12))\n', (10757, 10775), True, 'import matplotlib.pyplot as plt\n'), ((10780, 10842), 'matplotlib.pyplot.imshow', 'plt.imshow', (['H'], {'extent': 'extent', 'origin': '"""lower"""', 'cmap': 'plt.cm.gray'}), "(H, extent=extent, origin='lower', cmap=plt.cm.gray)\n", (10790, 10842), True, 'import matplotlib.pyplot as plt\n'), ((11008, 11066), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]'], {'dtype': 'np.float64'}), '([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=np.float64)\n', (11016, 11066), True, 'import numpy as np\n'), ((11762, 11796), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\phi$"""'], {'fontsize': '(20)'}), "('$\\\\phi$', fontsize=20)\n", (11772, 11796), True, 'import matplotlib.pyplot as plt\n'), ((11800, 11834), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\Psi$"""'], {'fontsize': '(20)'}), "('$\\\\Psi$', fontsize=20)\n", (11810, 11834), True, 'import matplotlib.pyplot as plt\n'), ((11846, 11871), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'shrink': '(0.77)'}), '(shrink=0.77)\n', (11858, 11871), True, 'import matplotlib.pyplot as plt\n'), ((12313, 12355), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('./' + name + '.png')"], {'dpi': '(400)'}), "('./' + name + '.png', dpi=400)\n", (12324, 12355), True, 'import matplotlib.pyplot as plt\n'), ((12376, 12387), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (12385, 12387), True, 'import matplotlib.pyplot as plt\n'), ((12458, 12480), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)', '(10, 6)'], {}), '(1, (10, 6))\n', (12468, 12480), True, 'import matplotlib.pyplot as plt\n'), ((12484, 12507), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(15)'}), "('font', size=15)\n", (12490, 12507), True, 'import matplotlib.pyplot as plt\n'), ((12640, 12669), 'matplotlib.ticker.FormatStrFormatter', 'mtick.FormatStrFormatter', (['fmt'], {}), '(fmt)\n', (12664, 12669), True, 'import matplotlib.ticker as mtick\n'), ((12720, 12799), 'matplotlib.pyplot.title', 'plt.title', (['"""Cluster Centers on Free energy landscape distribution"""'], {'fontsize': '(20)'}), "('Cluster Centers on Free energy landscape distribution', fontsize=20)\n", (12729, 12799), True, 'import matplotlib.pyplot as plt\n'), ((12804, 12825), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$k_B T$"""'], {}), "('$k_B T$')\n", (12814, 12825), True, 'import matplotlib.pyplot as plt\n'), ((12830, 12855), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Probability"""'], {}), "('Probability')\n", (12840, 12855), True, 'import matplotlib.pyplot as plt\n'), ((12861, 12879), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 100]'], {}), '([0, 100])\n', (12869, 12879), True, 'import matplotlib.pyplot as plt\n'), ((13084, 13139), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('./' + name + '_Distribution.png')"], {'dpi': '(400)'}), "('./' + name + '_Distribution.png', dpi=400)\n", (13095, 13139), True, 'import matplotlib.pyplot as plt\n'), ((13160, 13171), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (13169, 13171), True, 'import matplotlib.pyplot as plt\n'), ((13407, 13456), 'numpy.histogram2d', 'np.histogram2d', (['psi_angles', 'phi_angles'], {'bins': 'bins'}), '(psi_angles, phi_angles, bins=bins)\n', (13421, 13456), True, 'import numpy as np\n'), ((13612, 13635), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(25)'}), "('font', size=25)\n", (13618, 13635), True, 'import matplotlib.pyplot as plt\n'), ((13647, 13656), 'numpy.max', 'np.max', (['H'], {}), '(H)\n', (13653, 13656), True, 'import numpy as np\n'), ((14221, 14279), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]'], {'dtype': 'np.float64'}), '([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=np.float64)\n', (14229, 14279), True, 'import numpy as np\n'), ((14825, 14883), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]'], {'dtype': 'np.float64'}), '([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=np.float64)\n', (14833, 14883), True, 'import numpy as np\n'), ((15498, 15532), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\phi$"""'], {'fontsize': '(20)'}), "('$\\\\phi$', fontsize=20)\n", (15508, 15532), True, 'import matplotlib.pyplot as plt\n'), ((15536, 15570), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\Psi$"""'], {'fontsize': '(20)'}), "('$\\\\Psi$', fontsize=20)\n", (15546, 15570), True, 'import matplotlib.pyplot as plt\n'), ((16209, 16231), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)', '(10, 6)'], {}), '(1, (10, 6))\n', (16219, 16231), True, 'import matplotlib.pyplot as plt\n'), ((16235, 16258), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(15)'}), "('font', size=15)\n", (16241, 16258), True, 'import matplotlib.pyplot as plt\n'), ((16391, 16420), 'matplotlib.ticker.FormatStrFormatter', 'mtick.FormatStrFormatter', (['fmt'], {}), '(fmt)\n', (16415, 16420), True, 'import matplotlib.ticker as mtick\n'), ((16580, 16599), 'numpy.arange', 'np.arange', (['n_groups'], {}), '(n_groups)\n', (16589, 16599), True, 'import numpy as np\n'), ((17014, 17035), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$k_B T$"""'], {}), "('$k_B T$')\n", (17024, 17035), True, 'import matplotlib.pyplot as plt\n'), ((17040, 17081), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Fraction number of clusters"""'], {}), "('Fraction number of clusters')\n", (17050, 17081), True, 'import matplotlib.pyplot as plt\n'), ((17087, 17104), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 50]'], {}), '([0, 50])\n', (17095, 17104), True, 'import matplotlib.pyplot as plt\n'), ((17109, 17194), 'matplotlib.pyplot.xticks', 'plt.xticks', (['(index + bar_width)', "('', '1', '', '3', '', '5', '', '7', '', '9', '')"], {}), "(index + bar_width, ('', '1', '', '3', '', '5', '', '7', '', '9', '')\n )\n", (17119, 17194), True, 'import matplotlib.pyplot as plt\n'), ((17192, 17204), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (17202, 17204), True, 'import matplotlib.pyplot as plt\n'), ((17233, 17288), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('./' + name + '_Distribution.png')"], {'dpi': '(400)'}), "('./' + name + '_Distribution.png', dpi=400)\n", (17244, 17288), True, 'import matplotlib.pyplot as plt\n'), ((17309, 17320), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (17318, 17320), True, 'import matplotlib.pyplot as plt\n'), ((17525, 17574), 'numpy.histogram2d', 'np.histogram2d', (['psi_angles', 'phi_angles'], {'bins': 'bins'}), '(psi_angles, phi_angles, bins=bins)\n', (17539, 17574), True, 'import numpy as np\n'), ((17730, 17753), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(25)'}), "('font', size=25)\n", (17736, 17753), True, 'import matplotlib.pyplot as plt\n'), ((17765, 17774), 'numpy.max', 'np.max', (['H'], {}), '(H)\n', (17771, 17774), True, 'import numpy as np\n'), ((18066, 18094), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 12)'}), '(figsize=(12, 12))\n', (18076, 18094), True, 'import matplotlib.pyplot as plt\n'), ((18099, 18161), 'matplotlib.pyplot.imshow', 'plt.imshow', (['H'], {'extent': 'extent', 'origin': '"""lower"""', 'cmap': 'plt.cm.gray'}), "(H, extent=extent, origin='lower', cmap=plt.cm.gray)\n", (18109, 18161), True, 'import matplotlib.pyplot as plt\n'), ((18292, 18309), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (18301, 18309), True, 'import numpy as np\n'), ((18751, 18809), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]'], {'dtype': 'np.float64'}), '([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=np.float64)\n', (18759, 18809), True, 'import numpy as np\n'), ((19488, 19522), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\phi$"""'], {'fontsize': '(20)'}), "('$\\\\phi$', fontsize=20)\n", (19498, 19522), True, 'import matplotlib.pyplot as plt\n'), ((19526, 19560), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\Psi$"""'], {'fontsize': '(20)'}), "('$\\\\Psi$', fontsize=20)\n", (19536, 19560), True, 'import matplotlib.pyplot as plt\n'), ((19572, 19597), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'shrink': '(0.77)'}), '(shrink=0.77)\n', (19584, 19597), True, 'import matplotlib.pyplot as plt\n'), ((19732, 19753), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-180, 180]'], {}), '([-180, 180])\n', (19740, 19753), True, 'import matplotlib.pyplot as plt\n'), ((19758, 19779), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-180, 180]'], {}), '([-180, 180])\n', (19766, 19779), True, 'import matplotlib.pyplot as plt\n'), ((19784, 19819), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[-120, -60, 0, 60, 120]'], {}), '([-120, -60, 0, 60, 120])\n', (19794, 19819), True, 'import matplotlib.pyplot as plt\n'), ((19824, 19859), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[-120, -60, 0, 60, 120]'], {}), '([-120, -60, 0, 60, 120])\n', (19834, 19859), True, 'import matplotlib.pyplot as plt\n'), ((19864, 19902), 'matplotlib.pyplot.plot', 'plt.plot', (['[-103, -103]', '[30, 180]', '"""w"""'], {}), "([-103, -103], [30, 180], 'w')\n", (19872, 19902), True, 'import matplotlib.pyplot as plt\n'), ((19921, 19963), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('./' + name + '.png')"], {'dpi': '(400)'}), "('./' + name + '.png', dpi=400)\n", (19932, 19963), True, 'import matplotlib.pyplot as plt\n'), ((19984, 19995), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (19993, 19995), True, 'import matplotlib.pyplot as plt\n'), ((20414, 20444), 'numpy.zeros', 'np.zeros', (['(frames_magnitude + 1)'], {}), '(frames_magnitude + 1)\n', (20422, 20444), True, 'import numpy as np\n'), ((21406, 21436), 'numpy.zeros', 'np.zeros', (['(frames_magnitude + 1)'], {}), '(frames_magnitude + 1)\n', (21414, 21436), True, 'import numpy as np\n'), ((22061, 22091), 'numpy.zeros', 'np.zeros', (['(frames_magnitude + 1)'], {}), '(frames_magnitude + 1)\n', (22069, 22091), True, 'import numpy as np\n'), ((1481, 1544), 'matplotlib.pyplot.title', 'plt.title', (["('Alanine Dipeptide ' + name + ' states')"], {'fontsize': '(10)'}), "('Alanine Dipeptide ' + name + ' states', fontsize=10)\n", (1490, 1544), True, 'import matplotlib.pyplot as plt\n'), ((1739, 1767), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (1749, 1767), True, 'import matplotlib.pyplot as plt\n'), ((2285, 2304), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-75, 75]'], {}), '([-75, 75])\n', (2293, 2304), True, 'import matplotlib.pyplot as plt\n'), ((2313, 2332), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-75, 75]'], {}), '([-75, 75])\n', (2321, 2332), True, 'import matplotlib.pyplot as plt\n'), ((2341, 2365), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[-50, 0, 50]'], {}), '([-50, 0, 50])\n', (2351, 2365), True, 'import matplotlib.pyplot as plt\n'), ((2374, 2398), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[-50, 0, 50]'], {}), '([-50, 0, 50])\n', (2384, 2398), True, 'import matplotlib.pyplot as plt\n'), ((10679, 10693), 'numpy.min', 'np.min', (['xedges'], {}), '(xedges)\n', (10685, 10693), True, 'import numpy as np\n'), ((10695, 10709), 'numpy.max', 'np.max', (['xedges'], {}), '(xedges)\n', (10701, 10709), True, 'import numpy as np\n'), ((10711, 10725), 'numpy.min', 'np.min', (['yedges'], {}), '(yedges)\n', (10717, 10725), True, 'import numpy as np\n'), ((10727, 10741), 'numpy.max', 'np.max', (['yedges'], {}), '(yedges)\n', (10733, 10741), True, 'import numpy as np\n'), ((10930, 10987), 'matplotlib.pyplot.plot', 'plt.plot', (['phi_ctr', 'psi_ctr', '"""."""'], {'markersize': '(10)', 'color': '"""r"""'}), "(phi_ctr, psi_ctr, '.', markersize=10, color='r')\n", (10938, 10987), True, 'import matplotlib.pyplot as plt\n'), ((12037, 12058), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-180, 180]'], {}), '([-180, 180])\n', (12045, 12058), True, 'import matplotlib.pyplot as plt\n'), ((12067, 12088), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-180, 180]'], {}), '([-180, 180])\n', (12075, 12088), True, 'import matplotlib.pyplot as plt\n'), ((12097, 12132), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[-120, -60, 0, 60, 120]'], {}), '([-120, -60, 0, 60, 120])\n', (12107, 12132), True, 'import matplotlib.pyplot as plt\n'), ((12141, 12176), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[-120, -60, 0, 60, 120]'], {}), '([-120, -60, 0, 60, 120])\n', (12151, 12176), True, 'import matplotlib.pyplot as plt\n'), ((12195, 12214), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-75, 75]'], {}), '([-75, 75])\n', (12203, 12214), True, 'import matplotlib.pyplot as plt\n'), ((12223, 12242), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-75, 75]'], {}), '([-75, 75])\n', (12231, 12242), True, 'import matplotlib.pyplot as plt\n'), ((12251, 12275), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[-50, 0, 50]'], {}), '([-50, 0, 50])\n', (12261, 12275), True, 'import matplotlib.pyplot as plt\n'), ((12284, 12308), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[-50, 0, 50]'], {}), '([-50, 0, 50])\n', (12294, 12308), True, 'import matplotlib.pyplot as plt\n'), ((12895, 12908), 'numpy.arange', 'np.arange', (['(11)'], {}), '(11)\n', (12904, 12908), True, 'import numpy as np\n'), ((12977, 12990), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (12986, 12990), True, 'import numpy as np\n'), ((17998, 18012), 'numpy.min', 'np.min', (['xedges'], {}), '(xedges)\n', (18004, 18012), True, 'import numpy as np\n'), ((18014, 18028), 'numpy.max', 'np.max', (['xedges'], {}), '(xedges)\n', (18020, 18028), True, 'import numpy as np\n'), ((18030, 18044), 'numpy.min', 'np.min', (['yedges'], {}), '(yedges)\n', (18036, 18044), True, 'import numpy as np\n'), ((18046, 18060), 'numpy.max', 'np.max', (['yedges'], {}), '(yedges)\n', (18052, 18060), True, 'import numpy as np\n'), ((18673, 18730), 'matplotlib.pyplot.plot', 'plt.plot', (['phi_ctr', 'psi_ctr', '"""*"""'], {'markersize': '(10)', 'color': '"""r"""'}), "(phi_ctr, psi_ctr, '*', markersize=10, color='r')\n", (18681, 18730), True, 'import matplotlib.pyplot as plt\n'), ((20166, 20180), 'numpy.max', 'np.max', (['labels'], {}), '(labels)\n', (20172, 20180), True, 'import numpy as np\n'), ((21213, 21229), 'numpy.max', 'np.max', (['labels_1'], {}), '(labels_1)\n', (21219, 21229), True, 'import numpy as np\n'), ((21868, 21884), 'numpy.max', 'np.max', (['labels_2'], {}), '(labels_2)\n', (21874, 21884), True, 'import numpy as np\n'), ((3426, 3447), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\phi$"""'], {}), "('$\\\\phi$')\n", (3436, 3447), True, 'import matplotlib.pyplot as plt\n'), ((3460, 3481), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\psi$"""'], {}), "('$\\\\psi$')\n", (3470, 3481), True, 'import matplotlib.pyplot as plt\n'), ((3495, 3516), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-180, 180]'], {}), '([-180, 180])\n', (3503, 3516), True, 'import matplotlib.pyplot as plt\n'), ((3529, 3550), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-180, 180]'], {}), '([-180, 180])\n', (3537, 3550), True, 'import matplotlib.pyplot as plt\n'), ((3563, 3598), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[-120, -60, 0, 60, 120]'], {}), '([-120, -60, 0, 60, 120])\n', (3573, 3598), True, 'import matplotlib.pyplot as plt\n'), ((3611, 3646), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[-120, -60, 0, 60, 120]'], {}), '([-120, -60, 0, 60, 120])\n', (3621, 3646), True, 'import matplotlib.pyplot as plt\n'), ((3771, 3782), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3780, 3782), True, 'import matplotlib.pyplot as plt\n'), ((4615, 4632), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (4626, 4632), True, 'import numpy as np\n'), ((4712, 4729), 'numpy.vstack', 'np.vstack', (['[x, y]'], {}), '([x, y])\n', (4721, 4729), True, 'import numpy as np\n'), ((4751, 4777), 'scipy.stats.gaussian_kde', 'stats.gaussian_kde', (['values'], {}), '(values)\n', (4769, 4777), False, 'from scipy import stats\n'), ((5470, 5506), 'matplotlib.pyplot.contour', 'plt.contour', (['X', 'Y', 'Z'], {'origin': '"""lower"""'}), "(X, Y, Z, origin='lower')\n", (5481, 5506), True, 'import matplotlib.pyplot as plt\n'), ((20312, 20334), 'numpy.log10', 'np.log10', (['total_frames'], {}), '(total_frames)\n', (20320, 20334), True, 'import numpy as np\n'), ((20502, 20513), 'numpy.log10', 'np.log10', (['i'], {}), '(i)\n', (20510, 20513), True, 'import numpy as np\n'), ((20538, 20552), 'numpy.ceil', 'np.ceil', (['log_i'], {}), '(log_i)\n', (20545, 20552), True, 'import numpy as np\n'), ((21302, 21324), 'numpy.log10', 'np.log10', (['total_frames'], {}), '(total_frames)\n', (21310, 21324), True, 'import numpy as np\n'), ((21494, 21505), 'numpy.log10', 'np.log10', (['i'], {}), '(i)\n', (21502, 21505), True, 'import numpy as np\n'), ((21530, 21544), 'numpy.ceil', 'np.ceil', (['log_i'], {}), '(log_i)\n', (21537, 21544), True, 'import numpy as np\n'), ((21957, 21979), 'numpy.log10', 'np.log10', (['total_frames'], {}), '(total_frames)\n', (21965, 21979), True, 'import numpy as np\n'), ((22149, 22160), 'numpy.log10', 'np.log10', (['i'], {}), '(i)\n', (22157, 22160), True, 'import numpy as np\n'), ((22185, 22199), 'numpy.ceil', 'np.ceil', (['log_i'], {}), '(log_i)\n', (22192, 22199), True, 'import numpy as np\n'), ((977, 993), 'numpy.max', 'np.max', (['clusters'], {}), '(clusters)\n', (983, 993), True, 'import numpy as np\n'), ((1130, 1151), 'numpy.where', 'np.where', (['(labels == i)'], {}), '(labels == i)\n', (1138, 1151), True, 'import numpy as np\n'), ((1168, 1246), 'matplotlib.pyplot.plot', 'plt.plot', (['phi_angles[point]', 'psi_angles[point]', '"""."""'], {'markersize': '(1.0)', 'alpha': '(0.7)'}), "(phi_angles[point], psi_angles[point], '.', markersize=1.0, alpha=0.7)\n", (1176, 1246), True, 'import matplotlib.pyplot as plt\n'), ((3080, 3096), 'numpy.max', 'np.max', (['clusters'], {}), '(clusters)\n', (3086, 3096), True, 'import numpy as np\n'), ((4392, 4413), 'numpy.where', 'np.where', (['(labels == i)'], {}), '(labels == i)\n', (4400, 4413), True, 'import numpy as np\n'), ((4442, 4463), 'numpy.where', 'np.where', (['(labels == i)'], {}), '(labels == i)\n', (4450, 4463), True, 'import numpy as np\n'), ((18413, 18434), 'numpy.where', 'np.where', (['(labels == i)'], {}), '(labels == i)\n', (18421, 18434), True, 'import numpy as np\n'), ((18451, 18545), 'matplotlib.pyplot.plot', 'plt.plot', (['phi_angles[point]', 'psi_angles[point]', '"""2"""'], {'alpha': '(0.2)', 'color': 'colors[color_index]'}), "(phi_angles[point], psi_angles[point], '2', alpha=0.2, color=colors\n [color_index])\n", (18459, 18545), True, 'import matplotlib.pyplot as plt\n'), ((20121, 20136), 'collections.Counter', 'Counter', (['labels'], {}), '(labels)\n', (20128, 20136), False, 'from collections import Counter\n'), ((21166, 21183), 'collections.Counter', 'Counter', (['labels_1'], {}), '(labels_1)\n', (21173, 21183), False, 'from collections import Counter\n'), ((21821, 21838), 'collections.Counter', 'Counter', (['labels_2'], {}), '(labels_2)\n', (21828, 21838), False, 'from collections import Counter\n'), ((3194, 3215), 'numpy.where', 'np.where', (['(labels == i)'], {}), '(labels == i)\n', (3202, 3215), True, 'import numpy as np\n'), ((3249, 3270), 'numpy.where', 'np.where', (['(labels == i)'], {}), '(labels == i)\n', (3257, 3270), True, 'import numpy as np\n'), ((10614, 10636), 'numpy.log', 'np.log', (['(H[i][j] / maxH)'], {}), '(H[i][j] / maxH)\n', (10620, 10636), True, 'import numpy as np\n'), ((13815, 13837), 'numpy.log', 'np.log', (['(H[i][j] / maxH)'], {}), '(H[i][j] / maxH)\n', (13821, 13837), True, 'import numpy as np\n'), ((17933, 17955), 'numpy.log', 'np.log', (['(H[i][j] / maxH)'], {}), '(H[i][j] / maxH)\n', (17939, 17955), True, 'import numpy as np\n'), ((1861, 1882), 'numpy.where', 'np.where', (['(labels == i)'], {}), '(labels == i)\n', (1869, 1882), True, 'import numpy as np\n'), ((1920, 1941), 'numpy.where', 'np.where', (['(labels == i)'], {}), '(labels == i)\n', (1928, 1941), True, 'import numpy as np\n'), ((11236, 11260), 'numpy.where', 'np.where', (['(xedges > 179.0)'], {}), '(xedges > 179.0)\n', (11244, 11260), True, 'import numpy as np\n'), ((11307, 11339), 'numpy.where', 'np.where', (['(xedges > psi_angles[i])'], {}), '(xedges > psi_angles[i])\n', (11315, 11339), True, 'import numpy as np\n'), ((11406, 11430), 'numpy.where', 'np.where', (['(yedges > 179.0)'], {}), '(yedges > 179.0)\n', (11414, 11430), True, 'import numpy as np\n'), ((11477, 11509), 'numpy.where', 'np.where', (['(yedges > phi_angles[i])'], {}), '(yedges > phi_angles[i])\n', (11485, 11509), True, 'import numpy as np\n'), ((14366, 14390), 'numpy.where', 'np.where', (['(xedges > 179.0)'], {}), '(xedges > 179.0)\n', (14374, 14390), True, 'import numpy as np\n'), ((14437, 14468), 'numpy.where', 'np.where', (['(xedges > psi_ctr_1[i])'], {}), '(xedges > psi_ctr_1[i])\n', (14445, 14468), True, 'import numpy as np\n'), ((14534, 14558), 'numpy.where', 'np.where', (['(yedges > 179.0)'], {}), '(yedges > 179.0)\n', (14542, 14558), True, 'import numpy as np\n'), ((14605, 14636), 'numpy.where', 'np.where', (['(yedges > phi_ctr_1[i])'], {}), '(yedges > phi_ctr_1[i])\n', (14613, 14636), True, 'import numpy as np\n'), ((14970, 14994), 'numpy.where', 'np.where', (['(xedges > 179.0)'], {}), '(xedges > 179.0)\n', (14978, 14994), True, 'import numpy as np\n'), ((15041, 15072), 'numpy.where', 'np.where', (['(xedges > psi_ctr_2[i])'], {}), '(xedges > psi_ctr_2[i])\n', (15049, 15072), True, 'import numpy as np\n'), ((15138, 15162), 'numpy.where', 'np.where', (['(yedges > 179.0)'], {}), '(yedges > 179.0)\n', (15146, 15162), True, 'import numpy as np\n'), ((15209, 15240), 'numpy.where', 'np.where', (['(yedges > phi_ctr_2[i])'], {}), '(yedges > phi_ctr_2[i])\n', (15217, 15240), True, 'import numpy as np\n'), ((18974, 18998), 'numpy.where', 'np.where', (['(xedges > 179.0)'], {}), '(xedges > 179.0)\n', (18982, 18998), True, 'import numpy as np\n'), ((19045, 19074), 'numpy.where', 'np.where', (['(xedges > psi_ctr[i])'], {}), '(xedges > psi_ctr[i])\n', (19053, 19074), True, 'import numpy as np\n'), ((19138, 19162), 'numpy.where', 'np.where', (['(yedges > 179.0)'], {}), '(yedges > 179.0)\n', (19146, 19162), True, 'import numpy as np\n'), ((19209, 19238), 'numpy.where', 'np.where', (['(yedges > phi_ctr[i])'], {}), '(yedges > phi_ctr[i])\n', (19217, 19238), True, 'import numpy as np\n')]
|
import cv2
import numpy as np
def drawPoint(canvas,x,y):
canvas[y,x] = 0
def drawLine(canvas,x1,y1,x2,y2):
dx, dy = abs(x2 - x1), abs(y2 - y1)
xi, yi = x1, y1
sx, sy = 1 if (x2 - x1) > 0 else -1, 1 if (y2 - y1) > 0 else -1
pi = 2*dy - dx
while xi != x2 + 1:
if pi < 0:
pi += 2 * dy
else:
pi += 2 * dy - 2 * dx
yi += 1 * sy
drawPoint(canvas,xi,yi)
xi += 1 * sx
def drawCircle(canvas,x,y,r):
x0, y0 = x, y
xi = 0
yi = r
pi = 5/4 - r
while xi <= yi:
if pi < 0:
pi += 2 * (xi + 1) + 1
else:
pi += 2 * (xi + 1) + 1 - 2 * (yi - 1)
yi -= 1
drawPoint(canvas,xi+x0,yi+y0)
drawPoint(canvas,-xi+x0,yi+y0)
drawPoint(canvas,xi+x0,-yi+y0)
drawPoint(canvas,-xi+x0,-yi+y0)
xi += 1
xi = r
yi = 0
pi = 5/4 - r
while not (xi == yi+1 or xi == yi):
if pi < 0:
pi += 2 * (yi + 1) + 1
else:
pi += 2 * (yi + 1) + 1 - 2 * (xi - 1)
xi -= 1
drawPoint(canvas,xi+x0,yi+y0)
drawPoint(canvas,-xi+x0,yi+y0)
drawPoint(canvas,xi+x0,-yi+y0)
drawPoint(canvas,-xi+x0,-yi+y0)
yi += 1
def drawEllipse(canvas,x,y,rx,ry):
x0, y0 = x, y
xi, yi = 0, ry
rx2 = rx ** 2
ry2 = ry ** 2
p1i = ry2 - rx2 * ry + rx2 / 4
while 2*ry2*xi < 2*rx2*yi:
if p1i < 0:
p1i += 2 * ry2 * (xi + 1) + ry2
else:
p1i += 2 * ry2 * (xi + 1) - 2* rx2 * (yi - 1) + ry2
yi -= 1
drawPoint(canvas,xi+x0,yi+y0)
drawPoint(canvas,-xi+x0,yi+y0)
drawPoint(canvas,xi+x0,-yi+y0)
drawPoint(canvas,-xi+x0,-yi+y0)
xi += 1
xi -= 1
p2i = ry2 * (xi + .5) ** 2 + rx2 * (yi - 1) ** 2 - rx2 * ry2
while yi >= 0:
if p2i > 0:
p2i += -2 * rx2 * (yi - 1) + rx2
else:
p2i += 2 * ry2 * (xi + 1) - 2 * rx2 * (yi - 1) + rx2
xi += 1
drawPoint(canvas,xi+x0,yi+y0)
drawPoint(canvas,-xi+x0,yi+y0)
drawPoint(canvas,xi+x0,-yi+y0)
drawPoint(canvas,-xi+x0,-yi+y0)
yi -= 1
if __name__ == '__main__':
canvas = np.ones([1000,1000],dtype=np.uint8) * 255
drawLine(canvas,800,100,100,600)
cv2.imwrite('line.png',canvas)
canvas = np.ones([1000,1000],dtype=np.uint8) * 255
drawCircle(canvas,500,500,300)
cv2.imwrite('circle.png',canvas)
canvas = np.ones([1000,1000],dtype=np.uint8) * 255
drawEllipse(canvas,500,500,100,200)
cv2.imwrite('ellipse.png',canvas)
|
[
"cv2.imwrite",
"numpy.ones"
] |
[((2335, 2366), 'cv2.imwrite', 'cv2.imwrite', (['"""line.png"""', 'canvas'], {}), "('line.png', canvas)\n", (2346, 2366), False, 'import cv2\n'), ((2460, 2493), 'cv2.imwrite', 'cv2.imwrite', (['"""circle.png"""', 'canvas'], {}), "('circle.png', canvas)\n", (2471, 2493), False, 'import cv2\n'), ((2592, 2626), 'cv2.imwrite', 'cv2.imwrite', (['"""ellipse.png"""', 'canvas'], {}), "('ellipse.png', canvas)\n", (2603, 2626), False, 'import cv2\n'), ((2252, 2289), 'numpy.ones', 'np.ones', (['[1000, 1000]'], {'dtype': 'np.uint8'}), '([1000, 1000], dtype=np.uint8)\n', (2259, 2289), True, 'import numpy as np\n'), ((2379, 2416), 'numpy.ones', 'np.ones', (['[1000, 1000]'], {'dtype': 'np.uint8'}), '([1000, 1000], dtype=np.uint8)\n', (2386, 2416), True, 'import numpy as np\n'), ((2506, 2543), 'numpy.ones', 'np.ones', (['[1000, 1000]'], {'dtype': 'np.uint8'}), '([1000, 1000], dtype=np.uint8)\n', (2513, 2543), True, 'import numpy as np\n')]
|
import numpy as np
import numpy.matlib
# soma das matrizes
A = np.array([[1,0],[0,2]])
B = np.array([[0,1],[1,0]])
C = A + B
print(C)
# soma das linhas
A = np.array([[1,0],[0,2]])
B = np.array([[0,1],[1,0]])
s_linha = sum(A)
print(s_linha)
# soma dos elementos
A = np.array([[1,0],[0,2]])
B = np.array([[0,1],[1,0]])
soma = sum(sum(A))
print(soma)
A = np.array([[1,0],[0,2]])
B = np.array([[0,1],[1,0]])
C = A - B
print(C)
A = np.array([[1,0],[0,2]])
B = np.array([[0,1],[1,0]])
C = np.matmul(A,B)
print(C)
# transposta
A = np.array([[1,0],[0,2]])
A_transposta = A.T
print(A_transposta)
# inversa
from numpy.linalg import *
from numpy import linalg as LA
A = np.array([[1,3],[2,0]])
A_inv = inv(A)
print(A_inv)
I = np.matmul(A,A_inv)
print(I)
A = ([2,2],[4,8])
A_det = LA.det(A)
print(A_det)
A = ([[1,2],[1,2]])
A_n = LA.matrix_power(A, 2)
|
[
"numpy.array",
"numpy.linalg.matrix_power",
"numpy.matmul",
"numpy.linalg.det"
] |
[((66, 92), 'numpy.array', 'np.array', (['[[1, 0], [0, 2]]'], {}), '([[1, 0], [0, 2]])\n', (74, 92), True, 'import numpy as np\n'), ((94, 120), 'numpy.array', 'np.array', (['[[0, 1], [1, 0]]'], {}), '([[0, 1], [1, 0]])\n', (102, 120), True, 'import numpy as np\n'), ((161, 187), 'numpy.array', 'np.array', (['[[1, 0], [0, 2]]'], {}), '([[1, 0], [0, 2]])\n', (169, 187), True, 'import numpy as np\n'), ((189, 215), 'numpy.array', 'np.array', (['[[0, 1], [1, 0]]'], {}), '([[0, 1], [1, 0]])\n', (197, 215), True, 'import numpy as np\n'), ((270, 296), 'numpy.array', 'np.array', (['[[1, 0], [0, 2]]'], {}), '([[1, 0], [0, 2]])\n', (278, 296), True, 'import numpy as np\n'), ((298, 324), 'numpy.array', 'np.array', (['[[0, 1], [1, 0]]'], {}), '([[0, 1], [1, 0]])\n', (306, 324), True, 'import numpy as np\n'), ((358, 384), 'numpy.array', 'np.array', (['[[1, 0], [0, 2]]'], {}), '([[1, 0], [0, 2]])\n', (366, 384), True, 'import numpy as np\n'), ((386, 412), 'numpy.array', 'np.array', (['[[0, 1], [1, 0]]'], {}), '([[0, 1], [1, 0]])\n', (394, 412), True, 'import numpy as np\n'), ((434, 460), 'numpy.array', 'np.array', (['[[1, 0], [0, 2]]'], {}), '([[1, 0], [0, 2]])\n', (442, 460), True, 'import numpy as np\n'), ((462, 488), 'numpy.array', 'np.array', (['[[0, 1], [1, 0]]'], {}), '([[0, 1], [1, 0]])\n', (470, 488), True, 'import numpy as np\n'), ((490, 505), 'numpy.matmul', 'np.matmul', (['A', 'B'], {}), '(A, B)\n', (499, 505), True, 'import numpy as np\n'), ((532, 558), 'numpy.array', 'np.array', (['[[1, 0], [0, 2]]'], {}), '([[1, 0], [0, 2]])\n', (540, 558), True, 'import numpy as np\n'), ((670, 696), 'numpy.array', 'np.array', (['[[1, 3], [2, 0]]'], {}), '([[1, 3], [2, 0]])\n', (678, 696), True, 'import numpy as np\n'), ((726, 745), 'numpy.matmul', 'np.matmul', (['A', 'A_inv'], {}), '(A, A_inv)\n', (735, 745), True, 'import numpy as np\n'), ((782, 791), 'numpy.linalg.det', 'LA.det', (['A'], {}), '(A)\n', (788, 791), True, 'from numpy import linalg as LA\n'), ((832, 853), 'numpy.linalg.matrix_power', 'LA.matrix_power', (['A', '(2)'], {}), '(A, 2)\n', (847, 853), True, 'from numpy import linalg as LA\n')]
|
import numpy as np
import scipy
import matplotlib.pyplot as plt
import sys
def compute_r_squared(data, predictions):
'''
In exercise 5, we calculated the R^2 value for you. But why don't you try and
and calculate the R^2 value yourself.
Given a list of original data points, and also a list of predicted data points,
write a function that will compute and return the coefficient of determination (R^2)
for this data. numpy.mean() and numpy.sum() might both be useful here, but
not necessary.
Documentation about numpy.mean() and numpy.sum() below:
http://docs.scipy.org/doc/numpy/reference/generated/numpy.mean.html
http://docs.scipy.org/doc/numpy/reference/generated/numpy.sum.html
'''
mean = data.mean()
numerator = np.sum((data - predictions)**2)
denom = np.sum((data-mean)**2)
r_squared = 1 - numerator/denom
return r_squared
|
[
"numpy.sum"
] |
[((804, 837), 'numpy.sum', 'np.sum', (['((data - predictions) ** 2)'], {}), '((data - predictions) ** 2)\n', (810, 837), True, 'import numpy as np\n'), ((849, 875), 'numpy.sum', 'np.sum', (['((data - mean) ** 2)'], {}), '((data - mean) ** 2)\n', (855, 875), True, 'import numpy as np\n')]
|
from sklearn import preprocessing, svm
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn import cross_validation
import pandas as pd
import numpy as np
import quandl
import math
df = quandl.get('WIKI/GOOGL')
df = df[['Adj. Open', 'Adj. High', 'Adj. Low', 'Adj. Close', 'Adj. Volume']]
df['HL_PCT'] = (df['Adj. High'] - df['Adj. Low']) / df['Adj. Close'] * 100.0
df['PCT_change'] = (df['Adj. Close'] - df['Adj. Open']) / df['Adj. Open'] * 100.0
df = df[['Adj. Close', 'HL_PCT', 'PCT_change', 'Adj. Volume']]
forecast_col = 'Adj. Close'
df.fillna(-99999, inplace = True)
forecast_out = int(math.ceil(0.01 * len(df)))
print(forecast_out)
df['label'] = df[forecast_col].shift(-forecast_out)
df.dropna(inplace = True)
X = np.array(df.drop(['label'],1))
y = np.array(df['label'])
X = preprocessing.scale(X)
y = np.array(df['label'])
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X,y, test_size = 0.2)
clf = LinearRegression()
clf.fit(X_train, y_train)
accuracy = clf.score(X_test,y_test)
print(accuracy)
|
[
"numpy.array",
"quandl.get",
"sklearn.cross_validation.train_test_split",
"sklearn.linear_model.LinearRegression",
"sklearn.preprocessing.scale"
] |
[((250, 274), 'quandl.get', 'quandl.get', (['"""WIKI/GOOGL"""'], {}), "('WIKI/GOOGL')\n", (260, 274), False, 'import quandl\n'), ((826, 847), 'numpy.array', 'np.array', (["df['label']"], {}), "(df['label'])\n", (834, 847), True, 'import numpy as np\n'), ((852, 874), 'sklearn.preprocessing.scale', 'preprocessing.scale', (['X'], {}), '(X)\n', (871, 874), False, 'from sklearn import preprocessing, svm\n'), ((879, 900), 'numpy.array', 'np.array', (["df['label']"], {}), "(df['label'])\n", (887, 900), True, 'import numpy as np\n'), ((937, 991), 'sklearn.cross_validation.train_test_split', 'cross_validation.train_test_split', (['X', 'y'], {'test_size': '(0.2)'}), '(X, y, test_size=0.2)\n', (970, 991), False, 'from sklearn import cross_validation\n'), ((1000, 1018), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (1016, 1018), False, 'from sklearn.linear_model import LinearRegression\n')]
|
import datetime
import os
import yaml
import numpy as np
import pandas as pd
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from scipy.integrate import solve_ivp
from scipy.optimize import minimize
import plotly.graph_objs as go
ENV_FILE = '../env.yaml'
with open(ENV_FILE) as f:
params = yaml.load(f, Loader=yaml.FullLoader)
# Initialisation des chemins vers les fichiers
ROOT_DIR = os.path.dirname(os.path.abspath(ENV_FILE))
DATA_FILE = os.path.join(ROOT_DIR,
params['directories']['processed'],
params['files']['all_data'])
#Lecture du fihcier de données
epidemie_df = (pd.read_csv(DATA_FILE, parse_dates=['Last Update'])
.assign(day=lambda _df:_df['Last Update'].dt.date)
.drop_duplicates(subset=['Country/Region', 'Province/State', 'day'])
[lambda df: df['day'] <= datetime.date(2020,3,20)]
)
# replacing Mainland china with just China
cases = ['Confirmed', 'Deaths', 'Recovered']
# After 14/03/2020 the names of the countries are quite different
epidemie_df['Country/Region'] = epidemie_df['Country/Region'].replace('Mainland China', 'China')
# filling missing values
epidemie_df[['Province/State']] = epidemie_df[['Province/State']].fillna('')
epidemie_df[cases] = epidemie_df[cases].fillna(0)
countries=[{'label':c, 'value': c} for c in epidemie_df['Country/Region'].unique()]
app = dash.Dash('C0VID-19 Explorer')
app.layout = html.Div([
html.H1(['C0VID-19 Explorer'], style={'textAlign': 'center', 'color': 'navy', 'font-weight': 'bold'}),
dcc.Tabs([
dcc.Tab(label='Time', children=[
dcc.Markdown("""
Select a country:
""",style={'textAlign': 'left', 'color': 'navy', 'font-weight': 'bold'} ),
html.Div([
dcc.Dropdown(
id='country',
options=countries,
placeholder="Select a country...",
)
]),
html.Div([
dcc.Markdown("""You can select a second country:""",
style={'textAlign': 'left', 'color': 'navy', 'font-weight': 'bold'} ),
dcc.Dropdown(
id='country2',
options=countries,
placeholder="Select a country...",
)
]),
html.Div([dcc.Markdown("""Cases: """,
style={'textAlign': 'left', 'color': 'navy', 'font-weight': 'bold'} ),
dcc.RadioItems(
id='variable',
options=[
{'label':'Confirmed', 'value': 'Confirmed'},
{'label':'Deaths', 'value': 'Deaths'},
{'label':'Recovered', 'value': 'Recovered'}
],
value='Confirmed',
labelStyle={'display': 'inline-block'}
)
]),
html.Div([
dcc.Graph(id='graph1')
])
]),
dcc.Tab(label='Map', children=[
#html.H6(['COVID-19 in numbers:']),
dcc.Markdown("""
**COVID-19**
This is a graph that shows the evolution of the COVID-19 around the world
** Cases:**
""", style={'textAlign': 'left', 'color': 'navy', 'font-weight': 'bold'} ),
dcc.Dropdown(id="value-selected", value='Confirmed',
options=[{'label': "Deaths ", 'value': 'Deaths'},
{'label': "Confirmed", 'value': 'Confirmed'},
{'label': "Recovered", 'value': 'Recovered'}],
placeholder="Select a country...",
style={"display": "inline-block", "margin-left": "auto", "margin-right": "auto",
"width": "70%"}, className="six columns"),
dcc.Graph(id='map1'),
dcc.Slider(
id='map_day',
min=0,
max=(epidemie_df['day'].max() - epidemie_df['day'].min()).days,
value=0,
marks={i:str(i) for i, date in enumerate(epidemie_df['day'].unique())}
)
]),
dcc.Tab(label='SIR Model', children=[
dcc.Markdown("""
**SIR model**
S(Susceptible)I(Infectious)R(Recovered) is a model describing the dynamics of infectious disease. The model divides the population into compartments. Each compartment is expected to have the same characteristics. SIR represents the three compartments segmented by the model.
**Select a country:**
""", style={'textAlign': 'left', 'color': 'navy'}),
html.Div([
dcc.Dropdown(
id='Country',
value='Portugal',
options=countries),
]),
dcc.Markdown("""Select:""", style={'textAlign': 'left', 'color': 'navy'}),
dcc.Dropdown(id='cases',
options=[
{'label': 'Confirmed', 'value': 'Confirmed'},
{'label': 'Deaths', 'value': 'Deaths'},
{'label': 'Recovered', 'value': 'Recovered'}],
value=['Confirmed','Deaths','Recovered'],
multi=True),
dcc.Markdown("""
**Select your paramaters:**
""", style={'textAlign': 'left', 'color': 'navy'}),
html.Label( style={'textAlign': 'left', 'color': 'navy', "width": "20%"}),
html.Div([
dcc.Markdown(""" Beta:
""", style={'textAlign': 'left', 'color': 'navy'}),
dcc.Input(
id='input-beta',
type ='number',
placeholder='Input Beta',
min =-50,
max =100,
step =0.01,
value=0.45
)
]),
html.Div([
dcc.Markdown(""" Gamma:
""", style={'textAlign': 'left', 'color': 'navy'}),
dcc.Input(
id='input-gamma',
type ='number',
placeholder='Input Gamma',
min =-50,
max =100,
step =0.01,
value=0.55
)
]),
html.Div([
dcc.Markdown(""" Population:
""", style={'textAlign': 'left', 'color': 'navy'}),
dcc.Input(
id='input-pop',placeholder='Population',
type ='number',
min =1000,
max =1000000000000000,
step =1000,
value=1000,
)
]),
html.Div([
dcc.RadioItems(id='variable2',
options=[
{'label':'Optimize','value':'optimize'}],
value='Confirmed',
labelStyle={'display':'inline-block','color': 'navy', "width": "20%"})
]),
html.Div([
dcc.Graph(id='graph2')
]),
])
]),
])
@app.callback(
Output('graph1', 'figure'),
[
Input('country','value'),
Input('country2','value'),
Input('variable','value'),
]
)
def update_graph(country, country2, variable):
print(country)
if country is None:
graph_df = epidemie_df.groupby('day').agg({variable:'sum'}).reset_index()
else:
graph_df=(epidemie_df[epidemie_df['Country/Region'] == country]
.groupby(['Country/Region', 'day'])
.agg({variable:'sum'})
.reset_index()
)
if country2 is not None:
graph2_df=(epidemie_df[epidemie_df['Country/Region'] == country2]
.groupby(['Country/Region', 'day'])
.agg({variable:'sum'})
.reset_index()
)
return {
'data':[
dict(
x=graph_df['day'],
y=graph_df[variable],
type='line',
name=country if country is not None else 'Total'
)
] + ([
dict(
x=graph2_df['day'],
y=graph2_df[variable],
type='line',
name=country2
)
] if country2 is not None else [])
}
@app.callback(
Output('map1', 'figure'),
[
Input('map_day','value'),
Input("value-selected", "value")
]
)
def update_map(map_day,selected):
day= epidemie_df['day'].sort_values(ascending=False).unique()[map_day]
map_df = (epidemie_df[epidemie_df['day'] == day]
.groupby(['Country/Region'])
.agg({selected:'sum', 'Latitude': 'mean', 'Longitude': 'mean'})
.reset_index()
)
return {
'data':[
dict(
type='scattergeo',
lon=map_df['Longitude'],
lat=map_df['Latitude'],
text=map_df.apply(lambda r: r['Country/Region'] + '(' + str(r[selected]) + ')', axis=1),
mode='markers',
marker=dict(
size=np.maximum(map_df[selected]/ 1_000, 10)
)
)
],
'layout': dict(
title=str(day),
geo=dict(showland=True)
)
}
@app.callback(
Output('graph2', 'figure'),
[
Input('input-beta', 'value'),
Input('input-gamma','value'),
Input('input-pop','value'),
Input('Country','value')
#Input('variable2','value')
]
)
def update_model(beta, gamma, population, Country):
print(Country)
country=Country
country_df = (epidemie_df[epidemie_df['Country/Region'] == country]
.groupby(['Country/Region', 'day'])
.agg({'Confirmed': 'sum', 'Deaths': 'sum', 'Recovered': 'sum'})
.reset_index())
country_df['Infected'] = country_df['Confirmed'].diff()
steps = len(country_df['Infected'])
def SIR(t, y):
S = y[0]; I = y[1]; R = y[2]
return([-beta*S*I, beta*S*I-gamma*I, gamma*I])
solution = solve_ivp(SIR, [0, steps], [population, 1, 0], t_eval=np.arange(0, steps, 1))
#def sumsq_error(parameters):
#beta, gamma = parameters
#def SIR(t,y):
#S=y[0]
#I=y[1]
#R=y[2]
#return([-beta*S*I, beta*S*I-gamma*I, gamma*I])
#solution = solve_ivp(SIR,[0,nb_steps-1],[total_population,1,0],t_eval=np.arange(0,nb_steps,1))
#return(sum((solution.y[1]-infected_population)**2))
#msol = minimize(sumsq_error,[0.001,0.1],method='Nelder-Mead')
#if variable2 == 'optimize':
#gamma,beta == msol.x
return {
'data': [
dict(
x=solution.t,
y=solution.y[0],
type='line',
name=country+': Susceptible')
] + ([
dict(
x=solution.t,
y=solution.y[1],
type='line',
name=country+': Infected')
]) + ([
dict(
x=solution.t,
y=solution.y[2],
type='line',
name=country+': Recovered')
]) + ([
dict(
x=solution.t,
y=country_df['Infected'],
type='line',
name=country+': Original Data(Infected)')
])
}
if __name__ == '__main__':
app.run_server(debug=True)
|
[
"pandas.read_csv",
"numpy.arange",
"dash_core_components.RadioItems",
"dash_core_components.Input",
"dash.dependencies.Output",
"os.path.join",
"yaml.load",
"dash.dependencies.Input",
"dash_core_components.Dropdown",
"dash_html_components.Label",
"datetime.date",
"dash_core_components.Markdown",
"dash_html_components.H1",
"os.path.abspath",
"numpy.maximum",
"dash.Dash",
"dash_core_components.Graph"
] |
[((530, 622), 'os.path.join', 'os.path.join', (['ROOT_DIR', "params['directories']['processed']", "params['files']['all_data']"], {}), "(ROOT_DIR, params['directories']['processed'], params['files'][\n 'all_data'])\n", (542, 622), False, 'import os\n'), ((1497, 1527), 'dash.Dash', 'dash.Dash', (['"""C0VID-19 Explorer"""'], {}), "('C0VID-19 Explorer')\n", (1506, 1527), False, 'import dash\n'), ((379, 415), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (388, 415), False, 'import yaml\n'), ((491, 516), 'os.path.abspath', 'os.path.abspath', (['ENV_FILE'], {}), '(ENV_FILE)\n', (506, 516), False, 'import os\n'), ((7634, 7660), 'dash.dependencies.Output', 'Output', (['"""graph1"""', '"""figure"""'], {}), "('graph1', 'figure')\n", (7640, 7660), False, 'from dash.dependencies import Input, Output\n'), ((8901, 8925), 'dash.dependencies.Output', 'Output', (['"""map1"""', '"""figure"""'], {}), "('map1', 'figure')\n", (8907, 8925), False, 'from dash.dependencies import Input, Output\n'), ((9914, 9940), 'dash.dependencies.Output', 'Output', (['"""graph2"""', '"""figure"""'], {}), "('graph2', 'figure')\n", (9920, 9940), False, 'from dash.dependencies import Input, Output\n'), ((1557, 1662), 'dash_html_components.H1', 'html.H1', (["['C0VID-19 Explorer']"], {'style': "{'textAlign': 'center', 'color': 'navy', 'font-weight': 'bold'}"}), "(['C0VID-19 Explorer'], style={'textAlign': 'center', 'color':\n 'navy', 'font-weight': 'bold'})\n", (1564, 1662), True, 'import dash_html_components as html\n'), ((7676, 7701), 'dash.dependencies.Input', 'Input', (['"""country"""', '"""value"""'], {}), "('country', 'value')\n", (7681, 7701), False, 'from dash.dependencies import Input, Output\n'), ((7710, 7736), 'dash.dependencies.Input', 'Input', (['"""country2"""', '"""value"""'], {}), "('country2', 'value')\n", (7715, 7736), False, 'from dash.dependencies import Input, Output\n'), ((7745, 7771), 'dash.dependencies.Input', 'Input', (['"""variable"""', '"""value"""'], {}), "('variable', 'value')\n", (7750, 7771), False, 'from dash.dependencies import Input, Output\n'), ((8941, 8966), 'dash.dependencies.Input', 'Input', (['"""map_day"""', '"""value"""'], {}), "('map_day', 'value')\n", (8946, 8966), False, 'from dash.dependencies import Input, Output\n'), ((8975, 9007), 'dash.dependencies.Input', 'Input', (['"""value-selected"""', '"""value"""'], {}), "('value-selected', 'value')\n", (8980, 9007), False, 'from dash.dependencies import Input, Output\n'), ((9956, 9984), 'dash.dependencies.Input', 'Input', (['"""input-beta"""', '"""value"""'], {}), "('input-beta', 'value')\n", (9961, 9984), False, 'from dash.dependencies import Input, Output\n'), ((9994, 10023), 'dash.dependencies.Input', 'Input', (['"""input-gamma"""', '"""value"""'], {}), "('input-gamma', 'value')\n", (9999, 10023), False, 'from dash.dependencies import Input, Output\n'), ((10032, 10059), 'dash.dependencies.Input', 'Input', (['"""input-pop"""', '"""value"""'], {}), "('input-pop', 'value')\n", (10037, 10059), False, 'from dash.dependencies import Input, Output\n'), ((10068, 10093), 'dash.dependencies.Input', 'Input', (['"""Country"""', '"""value"""'], {}), "('Country', 'value')\n", (10073, 10093), False, 'from dash.dependencies import Input, Output\n'), ((957, 983), 'datetime.date', 'datetime.date', (['(2020)', '(3)', '(20)'], {}), '(2020, 3, 20)\n', (970, 983), False, 'import datetime\n'), ((10788, 10810), 'numpy.arange', 'np.arange', (['(0)', 'steps', '(1)'], {}), '(0, steps, 1)\n', (10797, 10810), True, 'import numpy as np\n'), ((715, 766), 'pandas.read_csv', 'pd.read_csv', (['DATA_FILE'], {'parse_dates': "['Last Update']"}), "(DATA_FILE, parse_dates=['Last Update'])\n", (726, 766), True, 'import pandas as pd\n'), ((1728, 1888), 'dash_core_components.Markdown', 'dcc.Markdown', (['"""\n Select a country:\n \n """'], {'style': "{'textAlign': 'left', 'color': 'navy', 'font-weight': 'bold'}"}), '(\n """\n Select a country:\n \n """,\n style={\'textAlign\': \'left\', \'color\': \'navy\', \'font-weight\': \'bold\'})\n', (1740, 1888), True, 'import dash_core_components as dcc\n'), ((3275, 3549), 'dash_core_components.Markdown', 'dcc.Markdown', (['"""\n **COVID-19**\n This is a graph that shows the evolution of the COVID-19 around the world \n \n ** Cases:**\n """'], {'style': "{'textAlign': 'left', 'color': 'navy', 'font-weight': 'bold'}"}), '(\n """\n **COVID-19**\n This is a graph that shows the evolution of the COVID-19 around the world \n \n ** Cases:**\n """\n , style={\'textAlign\': \'left\', \'color\': \'navy\', \'font-weight\': \'bold\'})\n', (3287, 3549), True, 'import dash_core_components as dcc\n'), ((3554, 3927), 'dash_core_components.Dropdown', 'dcc.Dropdown', ([], {'id': '"""value-selected"""', 'value': '"""Confirmed"""', 'options': "[{'label': 'Deaths ', 'value': 'Deaths'}, {'label': 'Confirmed', 'value':\n 'Confirmed'}, {'label': 'Recovered', 'value': 'Recovered'}]", 'placeholder': '"""Select a country..."""', 'style': "{'display': 'inline-block', 'margin-left': 'auto', 'margin-right': 'auto',\n 'width': '70%'}", 'className': '"""six columns"""'}), "(id='value-selected', value='Confirmed', options=[{'label':\n 'Deaths ', 'value': 'Deaths'}, {'label': 'Confirmed', 'value':\n 'Confirmed'}, {'label': 'Recovered', 'value': 'Recovered'}],\n placeholder='Select a country...', style={'display': 'inline-block',\n 'margin-left': 'auto', 'margin-right': 'auto', 'width': '70%'},\n className='six columns')\n", (3566, 3927), True, 'import dash_core_components as dcc\n'), ((4109, 4129), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': '"""map1"""'}), "(id='map1')\n", (4118, 4129), True, 'import dash_core_components as dcc\n'), ((4489, 4951), 'dash_core_components.Markdown', 'dcc.Markdown', (['"""\n **SIR model**\n S(Susceptible)I(Infectious)R(Recovered) is a model describing the dynamics of infectious disease. The model divides the population into compartments. Each compartment is expected to have the same characteristics. SIR represents the three compartments segmented by the model.\n \n **Select a country:**\n """'], {'style': "{'textAlign': 'left', 'color': 'navy'}"}), '(\n """\n **SIR model**\n S(Susceptible)I(Infectious)R(Recovered) is a model describing the dynamics of infectious disease. The model divides the population into compartments. Each compartment is expected to have the same characteristics. SIR represents the three compartments segmented by the model.\n \n **Select a country:**\n """\n , style={\'textAlign\': \'left\', \'color\': \'navy\'})\n', (4501, 4951), True, 'import dash_core_components as dcc\n'), ((5136, 5205), 'dash_core_components.Markdown', 'dcc.Markdown', (['"""Select:"""'], {'style': "{'textAlign': 'left', 'color': 'navy'}"}), "('Select:', style={'textAlign': 'left', 'color': 'navy'})\n", (5148, 5205), True, 'import dash_core_components as dcc\n'), ((5223, 5457), 'dash_core_components.Dropdown', 'dcc.Dropdown', ([], {'id': '"""cases"""', 'options': "[{'label': 'Confirmed', 'value': 'Confirmed'}, {'label': 'Deaths', 'value':\n 'Deaths'}, {'label': 'Recovered', 'value': 'Recovered'}]", 'value': "['Confirmed', 'Deaths', 'Recovered']", 'multi': '(True)'}), "(id='cases', options=[{'label': 'Confirmed', 'value':\n 'Confirmed'}, {'label': 'Deaths', 'value': 'Deaths'}, {'label':\n 'Recovered', 'value': 'Recovered'}], value=['Confirmed', 'Deaths',\n 'Recovered'], multi=True)\n", (5235, 5457), True, 'import dash_core_components as dcc\n'), ((5581, 5739), 'dash_core_components.Markdown', 'dcc.Markdown', (['"""\n \n **Select your paramaters:**\n \n """'], {'style': "{'textAlign': 'left', 'color': 'navy'}"}), '(\n """\n \n **Select your paramaters:**\n \n """\n , style={\'textAlign\': \'left\', \'color\': \'navy\'})\n', (5593, 5739), True, 'import dash_core_components as dcc\n'), ((5743, 5815), 'dash_html_components.Label', 'html.Label', ([], {'style': "{'textAlign': 'left', 'color': 'navy', 'width': '20%'}"}), "(style={'textAlign': 'left', 'color': 'navy', 'width': '20%'})\n", (5753, 5815), True, 'import dash_html_components as html\n'), ((9699, 9738), 'numpy.maximum', 'np.maximum', (['(map_df[selected] / 1000)', '(10)'], {}), '(map_df[selected] / 1000, 10)\n', (9709, 9738), True, 'import numpy as np\n'), ((1920, 2005), 'dash_core_components.Dropdown', 'dcc.Dropdown', ([], {'id': '"""country"""', 'options': 'countries', 'placeholder': '"""Select a country..."""'}), "(id='country', options=countries, placeholder='Select a country...'\n )\n", (1932, 2005), True, 'import dash_core_components as dcc\n'), ((2135, 2256), 'dash_core_components.Markdown', 'dcc.Markdown', (['"""You can select a second country:"""'], {'style': "{'textAlign': 'left', 'color': 'navy', 'font-weight': 'bold'}"}), "('You can select a second country:', style={'textAlign': 'left',\n 'color': 'navy', 'font-weight': 'bold'})\n", (2147, 2256), True, 'import dash_core_components as dcc\n'), ((2305, 2391), 'dash_core_components.Dropdown', 'dcc.Dropdown', ([], {'id': '"""country2"""', 'options': 'countries', 'placeholder': '"""Select a country..."""'}), "(id='country2', options=countries, placeholder=\n 'Select a country...')\n", (2317, 2391), True, 'import dash_core_components as dcc\n'), ((2505, 2601), 'dash_core_components.Markdown', 'dcc.Markdown', (['"""Cases: """'], {'style': "{'textAlign': 'left', 'color': 'navy', 'font-weight': 'bold'}"}), "('Cases: ', style={'textAlign': 'left', 'color': 'navy',\n 'font-weight': 'bold'})\n", (2517, 2601), True, 'import dash_core_components as dcc\n'), ((2650, 2893), 'dash_core_components.RadioItems', 'dcc.RadioItems', ([], {'id': '"""variable"""', 'options': "[{'label': 'Confirmed', 'value': 'Confirmed'}, {'label': 'Deaths', 'value':\n 'Deaths'}, {'label': 'Recovered', 'value': 'Recovered'}]", 'value': '"""Confirmed"""', 'labelStyle': "{'display': 'inline-block'}"}), "(id='variable', options=[{'label': 'Confirmed', 'value':\n 'Confirmed'}, {'label': 'Deaths', 'value': 'Deaths'}, {'label':\n 'Recovered', 'value': 'Recovered'}], value='Confirmed', labelStyle={\n 'display': 'inline-block'})\n", (2664, 2893), True, 'import dash_core_components as dcc\n'), ((3125, 3147), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': '"""graph1"""'}), "(id='graph1')\n", (3134, 3147), True, 'import dash_core_components as dcc\n'), ((4982, 5045), 'dash_core_components.Dropdown', 'dcc.Dropdown', ([], {'id': '"""Country"""', 'value': '"""Portugal"""', 'options': 'countries'}), "(id='Country', value='Portugal', options=countries)\n", (4994, 5045), True, 'import dash_core_components as dcc\n'), ((5858, 5953), 'dash_core_components.Markdown', 'dcc.Markdown', (['""" Beta: \n """'], {'style': "{'textAlign': 'left', 'color': 'navy'}"}), "(' Beta: \\n ', style={'textAlign': 'left',\n 'color': 'navy'})\n", (5870, 5953), True, 'import dash_core_components as dcc\n'), ((5970, 6082), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""input-beta"""', 'type': '"""number"""', 'placeholder': '"""Input Beta"""', 'min': '(-50)', 'max': '(100)', 'step': '(0.01)', 'value': '(0.45)'}), "(id='input-beta', type='number', placeholder='Input Beta', min=-50,\n max=100, step=0.01, value=0.45)\n", (5979, 6082), True, 'import dash_core_components as dcc\n'), ((6303, 6399), 'dash_core_components.Markdown', 'dcc.Markdown', (['""" Gamma: \n """'], {'style': "{'textAlign': 'left', 'color': 'navy'}"}), "(' Gamma: \\n ', style={'textAlign': 'left',\n 'color': 'navy'})\n", (6315, 6399), True, 'import dash_core_components as dcc\n'), ((6416, 6531), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""input-gamma"""', 'type': '"""number"""', 'placeholder': '"""Input Gamma"""', 'min': '(-50)', 'max': '(100)', 'step': '(0.01)', 'value': '(0.55)'}), "(id='input-gamma', type='number', placeholder='Input Gamma', min=-\n 50, max=100, step=0.01, value=0.55)\n", (6425, 6531), True, 'import dash_core_components as dcc\n'), ((6747, 6848), 'dash_core_components.Markdown', 'dcc.Markdown', (['""" Population: \n """'], {'style': "{'textAlign': 'left', 'color': 'navy'}"}), "(' Population: \\n ', style={'textAlign':\n 'left', 'color': 'navy'})\n", (6759, 6848), True, 'import dash_core_components as dcc\n'), ((6865, 6990), 'dash_core_components.Input', 'dcc.Input', ([], {'id': '"""input-pop"""', 'placeholder': '"""Population"""', 'type': '"""number"""', 'min': '(1000)', 'max': '(1000000000000000)', 'step': '(1000)', 'value': '(1000)'}), "(id='input-pop', placeholder='Population', type='number', min=1000,\n max=1000000000000000, step=1000, value=1000)\n", (6874, 6990), True, 'import dash_core_components as dcc\n'), ((7187, 7371), 'dash_core_components.RadioItems', 'dcc.RadioItems', ([], {'id': '"""variable2"""', 'options': "[{'label': 'Optimize', 'value': 'optimize'}]", 'value': '"""Confirmed"""', 'labelStyle': "{'display': 'inline-block', 'color': 'navy', 'width': '20%'}"}), "(id='variable2', options=[{'label': 'Optimize', 'value':\n 'optimize'}], value='Confirmed', labelStyle={'display': 'inline-block',\n 'color': 'navy', 'width': '20%'})\n", (7201, 7371), True, 'import dash_core_components as dcc\n'), ((7539, 7561), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': '"""graph2"""'}), "(id='graph2')\n", (7548, 7561), True, 'import dash_core_components as dcc\n')]
|
import pytest
from astropy.io import fits
import numpy as np
from lightkurve.io.kepseismic import read_kepseismic_lightcurve
from lightkurve.io.detect import detect_filetype
@pytest.mark.remote_data
def test_detect_kepseismic():
"""Can we detect the correct format for KEPSEISMIC files?"""
url = "https://archive.stsci.edu/hlsps/kepseismic/001200000/92147/20d-filter/hlsp_kepseismic_kepler_phot_kplr001292147-20d_kepler_v1_cor-filt-inp.fits"
f = fits.open(url)
assert detect_filetype(f) == "KEPSEISMIC"
@pytest.mark.remote_data
def test_read_kepseismic():
"""Can we read KEPSEISMIC files?"""
url = "https://archive.stsci.edu/hlsps/kepseismic/001200000/92147/20d-filter/hlsp_kepseismic_kepler_phot_kplr001292147-20d_kepler_v1_cor-filt-inp.fits"
with fits.open(url, mode="readonly") as hdulist:
fluxes = hdulist[1].data["FLUX"]
lc = read_kepseismic_lightcurve(url)
flux_lc = lc.flux.value
# print(flux_lc, fluxes)
assert np.sum(fluxes) == np.sum(flux_lc)
|
[
"lightkurve.io.kepseismic.read_kepseismic_lightcurve",
"lightkurve.io.detect.detect_filetype",
"numpy.sum",
"astropy.io.fits.open"
] |
[((461, 475), 'astropy.io.fits.open', 'fits.open', (['url'], {}), '(url)\n', (470, 475), False, 'from astropy.io import fits\n'), ((878, 909), 'lightkurve.io.kepseismic.read_kepseismic_lightcurve', 'read_kepseismic_lightcurve', (['url'], {}), '(url)\n', (904, 909), False, 'from lightkurve.io.kepseismic import read_kepseismic_lightcurve\n'), ((488, 506), 'lightkurve.io.detect.detect_filetype', 'detect_filetype', (['f'], {}), '(f)\n', (503, 506), False, 'from lightkurve.io.detect import detect_filetype\n'), ((783, 814), 'astropy.io.fits.open', 'fits.open', (['url'], {'mode': '"""readonly"""'}), "(url, mode='readonly')\n", (792, 814), False, 'from astropy.io import fits\n'), ((980, 994), 'numpy.sum', 'np.sum', (['fluxes'], {}), '(fluxes)\n', (986, 994), True, 'import numpy as np\n'), ((998, 1013), 'numpy.sum', 'np.sum', (['flux_lc'], {}), '(flux_lc)\n', (1004, 1013), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
from __future__ import absolute_import, division, print_function
import argparse
import logging
import os
import random
import sys
from io import open
import numpy as np
import torch
import json
from torch.utils.data import (DataLoader, SequentialSampler, RandomSampler, TensorDataset)
from tqdm import tqdm, trange
import ray
from ray import tune
from ray.tune.schedulers import HyperBandScheduler
from models.modeling_bert import QuestionAnswering, Config
from utils.optimization import AdamW, WarmupLinearSchedule
from utils.tokenization import BertTokenizer
from utils.korquad_utils import (read_squad_examples, convert_examples_to_features, RawResult, write_predictions)
from debug.evaluate_korquad import evaluate as korquad_eval
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
# In[2]:
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
# In[3]:
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
# In[4]:
from ray import tune
from ray.tune import track
from ray.tune.schedulers import HyperBandScheduler
from ray.tune.suggest.bayesopt import BayesOptSearch
ray.shutdown()
ray.init(webui_host='127.0.0.1')
# In[5]:
search_space = {
"max_seq_length": 512,
"doc_stride": 128,
"max_query_length": tune.sample_from(lambda _: int(np.random.uniform(50, 100))), #tune.uniform(50, 100),
"train_batch_size": 32,
"learning_rate": tune.loguniform(5e-4, 5e-7, 10),
"num_train_epochs": tune.grid_search([4, 8, 12, 16]),
"max_grad_norm": 1.0,
"adam_epsilon": 1e-6,
"warmup_proportion": 0.1,
"n_best_size": tune.sample_from(lambda _: int(np.random.uniform(50, 100))), #tune.uniform(50, 100),
"max_answer_length": tune.sample_from(lambda _: int(np.random.uniform(12, 25))), #tune.uniform(12, 25),
"seed": tune.sample_from(lambda _: int(np.random.uniform(1e+6, 1e+8)))
}
# In[ ]:
def load_and_cache_examples(predict_file, max_seq_length, doc_stride, max_query_length, tokenizer):
# Load data features from cache or dataset file
examples = read_squad_examples(input_file=predict_file,
is_training=False,
version_2_with_negative=False)
features = convert_examples_to_features(examples=examples,
tokenizer=tokenizer,
max_seq_length=max_seq_length,
doc_stride=doc_stride,
max_query_length=max_query_length,
is_training=False)
return examples, features
# In[ ]:
def evaluate(predict_file, batch_size, device, output_dir, n_best_size, max_answer_length, model, eval_examples, eval_features):
""" Eval """
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_example_index)
sampler = SequentialSampler(dataset)
dataloader = DataLoader(dataset, sampler=sampler, batch_size=batch_size)
logger.info("***** Evaluating *****")
logger.info(" Num features = %d", len(dataset))
logger.info(" Batch size = %d", batch_size)
model.eval()
all_results = []
# set_seed(args) # Added here for reproductibility (even between python 2 and 3)
logger.info("Start evaluating!")
for input_ids, input_mask, segment_ids, example_indices in tqdm(dataloader, desc="Evaluating"):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
with torch.no_grad():
batch_start_logits, batch_end_logits = model(input_ids, segment_ids, input_mask)
for i, example_index in enumerate(example_indices):
start_logits = batch_start_logits[i].detach().cpu().tolist()
end_logits = batch_end_logits[i].detach().cpu().tolist()
eval_feature = eval_features[example_index.item()]
unique_id = int(eval_feature.unique_id)
all_results.append(RawResult(unique_id=unique_id,
start_logits=start_logits,
end_logits=end_logits))
output_prediction_file = os.path.join(output_dir, "predictions.json")
output_nbest_file = os.path.join(output_dir, "nbest_predictions.json")
write_predictions(eval_examples, eval_features, all_results,
n_best_size, max_answer_length,
False, output_prediction_file, output_nbest_file,
None, False, False, 0.0)
expected_version = 'KorQuAD_v1.0'
with open(predict_file) as dataset_file:
dataset_json = json.load(dataset_file)
read_version = "_".join(dataset_json['version'].split("_")[:-1])
if (read_version != expected_version):
logger.info('Evaluation expects ' + expected_version + ', but got dataset with ' + read_version, file=sys.stderr)
dataset = dataset_json['data']
with open(os.path.join(output_dir, "predictions.json")) as prediction_file:
predictions = json.load(prediction_file)
_eval = korquad_eval(dataset, predictions)
logger.info(json.dumps(_eval))
return _eval
# In[6]:
def train_korquad(train_config):
# setup
basepath = '/jupyterhome/enpline_bert_competition/korquad-challenge/src'
logger.info("train_config : %s" % str(train_config))
output_dir='output'
checkpoint=os.path.join(basepath,'data/bert_small_ckpt.bin')
model_config=os.path.join(basepath,'data/bert_small.json')
vocab_file=os.path.join(basepath,'data/ko_vocab_32k.txt')
train_file=os.path.join(basepath, 'data/KorQuAD_v1.0_train.json')
predict_file=os.path.join(basepath, 'data/KorQuAD_v1.0_dev.json')
null_score_diff_threshold = 0.0
no_cuda = False
verbose_logging = False
fp16 = True
fp16_opt_level = 'O2'
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
print("device: {} n_gpu: {}, 16-bits training: {}".format(device, n_gpu, fp16))
random.seed(train_config['seed'])
np.random.seed(train_config['seed'])
torch.manual_seed(train_config['seed'])
if n_gpu > 0:
torch.cuda.manual_seed_all(train_config['seed'])
if not os.path.exists(output_dir):
os.makedirs(output_dir)
tokenizer = BertTokenizer(vocab_file, max_len=train_config['max_seq_length'], do_basic_tokenize=True)
# Prepare model
config = Config.from_json_file(model_config)
model = QuestionAnswering(config)
model.bert.load_state_dict(torch.load(checkpoint))
num_params = count_parameters(model)
logger.info("Total Parameter: %d" % num_params)
logger.info("Hyper-parameters: %s" % str(train_config))
paramfile_path = os.path.join(output_dir, 'hyperparameters.txt')
with open(paramfile_path, "w") as paramfile:
logger.info("writing hyperparameters at",paramfile_path)
paramfile.write("%s" % str(train_config))
model.to(device)
cached_train_features_file = train_file + '_{0}_{1}_{2}'.format(str(train_config['max_seq_length']), str(train_config['doc_stride']),
str(train_config['max_query_length']))
train_examples = read_squad_examples(input_file=train_file, is_training=True, version_2_with_negative=False)
try:
with open(cached_train_features_file, "rb") as reader:
train_features = pickle.load(reader)
except:
train_features = convert_examples_to_features(
examples=train_examples,
tokenizer=tokenizer,
max_seq_length=train_config['max_seq_length'],
doc_stride=train_config['doc_stride'],
max_query_length=train_config['max_query_length'],
is_training=True)
logger.info(" Saving train features into cached file %s", cached_train_features_file)
with open(cached_train_features_file, "wb") as writer:
pickle.dump(train_features, writer)
num_train_optimization_steps = int(len(train_features) / train_config['train_batch_size']) * train_config['num_train_epochs']
# Prepare optimizer
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters,
lr=train_config['learning_rate'],
eps=train_config['adam_epsilon'])
scheduler = WarmupLinearSchedule(optimizer,
warmup_steps=num_train_optimization_steps*0.1,
t_total=num_train_optimization_steps)
if fp16:
try:
from apex import amp
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=fp16_opt_level)
logger.info("***** Running training *****")
logger.info(" Num orig examples = %d", len(train_examples))
logger.info(" Num split examples = %d", len(train_features))
logger.info(" Batch size = %d", train_config['train_batch_size'])
logger.info(" Num steps = %d", num_train_optimization_steps)
num_train_step = num_train_optimization_steps
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_start_positions = torch.tensor([f.start_position for f in train_features], dtype=torch.long)
all_end_positions = torch.tensor([f.end_position for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids,
all_start_positions, all_end_positions)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=train_config['train_batch_size'])
model.train()
global_step = 0
epoch = 0
output_model_file = ''
# training
# for epoch_idx in trange(int(train_config['num_train_epochs'])):
# iter_bar = tqdm(train_dataloader, desc="Train(XX Epoch) Step(XX/XX) (Mean loss=X.X) (loss=X.X)")
for epoch_idx in range(int(train_config['num_train_epochs'])):
tr_step, total_loss, mean_loss = 0, 0., 0.
for step, batch in enumerate(train_dataloader):
if n_gpu == 1:
batch = tuple(t.to(device) for t in batch) # multi-gpu does scattering it-self
input_ids, input_mask, segment_ids, start_positions, end_positions = batch
loss = model(input_ids, segment_ids, input_mask, start_positions, end_positions)
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), train_config['max_grad_norm'])
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), train_config['max_grad_norm'])
scheduler.step()
optimizer.step()
optimizer.zero_grad()
global_step += 1
tr_step += 1
total_loss += loss
mean_loss = total_loss / tr_step
# iter_bar.set_description("Train Step(%d / %d) (Mean loss=%5.5f) (loss=%5.5f)" %
# (global_step, num_train_step, mean_loss, loss.item()))
epoch += 1
logger.info("** ** * Saving file * ** **")
model_checkpoint = "korquad_%d.bin" % (epoch)
logger.info(model_checkpoint)
#save the last model
output_model_file = os.path.join(output_dir, model_checkpoint)
if n_gpu > 1:
torch.save(model.module.state_dict(), output_model_file)
else:
torch.save(model.state_dict(), output_model_file)
# Evaluate with final model
examples, features = load_and_cache_examples(predict_file, train_config['max_seq_length'], train_config['doc_stride'],
train_config['max_query_length'], tokenizer)
eval = evaluate(predict_file=predict_file, batch_size=16, device=device, output_dir=output_dir, n_best_size=train_config['n_best_size'], max_answer_length=train_config['max_answer_length'],
model=model, eval_examples=examples, eval_features=features)
logger.info("-" * 16, 'evaltion', "-" * 16)
logger.info(eval)
track.log(f1 = eval['f1'])
# In[ ]:
analysis = tune.run(train_korquad, config=search_space, scheduler=HyperBandScheduler(metric='f1', mode='max'), resources_per_trial={'gpu':1})
# In[ ]:
dfs = analysis.trial_dataframes
# In[ ]:
# ax = None
# for d in dfs.values():
# ax = d.mean_loss.plot(ax=ax, legend=True)
# ax.set_xlabel("Epochs")
# ax.set_ylabel("Mean Loss")
|
[
"logging.getLogger",
"models.modeling_bert.Config.from_json_file",
"ray.tune.track.log",
"apex.amp.scale_loss",
"utils.korquad_utils.RawResult",
"torch.cuda.device_count",
"io.open",
"ray.tune.grid_search",
"apex.amp.initialize",
"torch.cuda.is_available",
"ray.init",
"os.path.exists",
"json.dumps",
"utils.tokenization.BertTokenizer",
"ray.tune.loguniform",
"utils.korquad_utils.write_predictions",
"numpy.random.seed",
"apex.amp.master_params",
"ray.tune.schedulers.HyperBandScheduler",
"utils.optimization.AdamW",
"torch.utils.data.SequentialSampler",
"pickle.load",
"torch.utils.data.TensorDataset",
"numpy.random.uniform",
"utils.optimization.WarmupLinearSchedule",
"models.modeling_bert.QuestionAnswering",
"logging.basicConfig",
"torch.manual_seed",
"torch.cuda.manual_seed_all",
"pickle.dump",
"ray.shutdown",
"os.makedirs",
"torch.load",
"tqdm.tqdm",
"os.path.join",
"random.seed",
"torch.utils.data.RandomSampler",
"debug.evaluate_korquad.evaluate",
"utils.korquad_utils.read_squad_examples",
"utils.korquad_utils.convert_examples_to_features",
"torch.tensor",
"torch.utils.data.DataLoader",
"json.load",
"torch.no_grad"
] |
[((887, 1030), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(levelname)s - %(name)s - %(message)s"""', 'datefmt': '"""%m/%d/%Y %H:%M:%S"""', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt=\n '%m/%d/%Y %H:%M:%S', level=logging.INFO)\n", (906, 1030), False, 'import logging\n'), ((1070, 1097), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1087, 1097), False, 'import logging\n'), ((1380, 1394), 'ray.shutdown', 'ray.shutdown', ([], {}), '()\n', (1392, 1394), False, 'import ray\n'), ((1395, 1427), 'ray.init', 'ray.init', ([], {'webui_host': '"""127.0.0.1"""'}), "(webui_host='127.0.0.1')\n", (1403, 1427), False, 'import ray\n'), ((1666, 1700), 'ray.tune.loguniform', 'tune.loguniform', (['(0.0005)', '(5e-07)', '(10)'], {}), '(0.0005, 5e-07, 10)\n', (1681, 1700), False, 'from ray import tune\n'), ((1723, 1755), 'ray.tune.grid_search', 'tune.grid_search', (['[4, 8, 12, 16]'], {}), '([4, 8, 12, 16])\n', (1739, 1755), False, 'from ray import tune\n'), ((2309, 2407), 'utils.korquad_utils.read_squad_examples', 'read_squad_examples', ([], {'input_file': 'predict_file', 'is_training': '(False)', 'version_2_with_negative': '(False)'}), '(input_file=predict_file, is_training=False,\n version_2_with_negative=False)\n', (2328, 2407), False, 'from utils.korquad_utils import read_squad_examples, convert_examples_to_features, RawResult, write_predictions\n'), ((2494, 2679), 'utils.korquad_utils.convert_examples_to_features', 'convert_examples_to_features', ([], {'examples': 'examples', 'tokenizer': 'tokenizer', 'max_seq_length': 'max_seq_length', 'doc_stride': 'doc_stride', 'max_query_length': 'max_query_length', 'is_training': '(False)'}), '(examples=examples, tokenizer=tokenizer,\n max_seq_length=max_seq_length, doc_stride=doc_stride, max_query_length=\n max_query_length, is_training=False)\n', (2522, 2679), False, 'from utils.korquad_utils import read_squad_examples, convert_examples_to_features, RawResult, write_predictions\n'), ((3100, 3168), 'torch.tensor', 'torch.tensor', (['[f.input_ids for f in eval_features]'], {'dtype': 'torch.long'}), '([f.input_ids for f in eval_features], dtype=torch.long)\n', (3112, 3168), False, 'import torch\n'), ((3190, 3259), 'torch.tensor', 'torch.tensor', (['[f.input_mask for f in eval_features]'], {'dtype': 'torch.long'}), '([f.input_mask for f in eval_features], dtype=torch.long)\n', (3202, 3259), False, 'import torch\n'), ((3282, 3352), 'torch.tensor', 'torch.tensor', (['[f.segment_ids for f in eval_features]'], {'dtype': 'torch.long'}), '([f.segment_ids for f in eval_features], dtype=torch.long)\n', (3294, 3352), False, 'import torch\n'), ((3445, 3530), 'torch.utils.data.TensorDataset', 'TensorDataset', (['all_input_ids', 'all_input_mask', 'all_segment_ids', 'all_example_index'], {}), '(all_input_ids, all_input_mask, all_segment_ids, all_example_index\n )\n', (3458, 3530), False, 'from torch.utils.data import DataLoader, SequentialSampler, RandomSampler, TensorDataset\n'), ((3540, 3566), 'torch.utils.data.SequentialSampler', 'SequentialSampler', (['dataset'], {}), '(dataset)\n', (3557, 3566), False, 'from torch.utils.data import DataLoader, SequentialSampler, RandomSampler, TensorDataset\n'), ((3584, 3643), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'sampler': 'sampler', 'batch_size': 'batch_size'}), '(dataset, sampler=sampler, batch_size=batch_size)\n', (3594, 3643), False, 'from torch.utils.data import DataLoader, SequentialSampler, RandomSampler, TensorDataset\n'), ((4014, 4049), 'tqdm.tqdm', 'tqdm', (['dataloader'], {'desc': '"""Evaluating"""'}), "(dataloader, desc='Evaluating')\n", (4018, 4049), False, 'from tqdm import tqdm, trange\n'), ((4844, 4888), 'os.path.join', 'os.path.join', (['output_dir', '"""predictions.json"""'], {}), "(output_dir, 'predictions.json')\n", (4856, 4888), False, 'import os\n'), ((4913, 4963), 'os.path.join', 'os.path.join', (['output_dir', '"""nbest_predictions.json"""'], {}), "(output_dir, 'nbest_predictions.json')\n", (4925, 4963), False, 'import os\n'), ((4968, 5143), 'utils.korquad_utils.write_predictions', 'write_predictions', (['eval_examples', 'eval_features', 'all_results', 'n_best_size', 'max_answer_length', '(False)', 'output_prediction_file', 'output_nbest_file', 'None', '(False)', '(False)', '(0.0)'], {}), '(eval_examples, eval_features, all_results, n_best_size,\n max_answer_length, False, output_prediction_file, output_nbest_file,\n None, False, False, 0.0)\n', (4985, 5143), False, 'from utils.korquad_utils import read_squad_examples, convert_examples_to_features, RawResult, write_predictions\n'), ((5759, 5793), 'debug.evaluate_korquad.evaluate', 'korquad_eval', (['dataset', 'predictions'], {}), '(dataset, predictions)\n', (5771, 5793), True, 'from debug.evaluate_korquad import evaluate as korquad_eval\n'), ((6093, 6143), 'os.path.join', 'os.path.join', (['basepath', '"""data/bert_small_ckpt.bin"""'], {}), "(basepath, 'data/bert_small_ckpt.bin')\n", (6105, 6143), False, 'import os\n'), ((6160, 6206), 'os.path.join', 'os.path.join', (['basepath', '"""data/bert_small.json"""'], {}), "(basepath, 'data/bert_small.json')\n", (6172, 6206), False, 'import os\n'), ((6221, 6268), 'os.path.join', 'os.path.join', (['basepath', '"""data/ko_vocab_32k.txt"""'], {}), "(basepath, 'data/ko_vocab_32k.txt')\n", (6233, 6268), False, 'import os\n'), ((6283, 6337), 'os.path.join', 'os.path.join', (['basepath', '"""data/KorQuAD_v1.0_train.json"""'], {}), "(basepath, 'data/KorQuAD_v1.0_train.json')\n", (6295, 6337), False, 'import os\n'), ((6355, 6407), 'os.path.join', 'os.path.join', (['basepath', '"""data/KorQuAD_v1.0_dev.json"""'], {}), "(basepath, 'data/KorQuAD_v1.0_dev.json')\n", (6367, 6407), False, 'import os\n'), ((6636, 6661), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (6659, 6661), False, 'import torch\n'), ((6751, 6784), 'random.seed', 'random.seed', (["train_config['seed']"], {}), "(train_config['seed'])\n", (6762, 6784), False, 'import random\n'), ((6789, 6825), 'numpy.random.seed', 'np.random.seed', (["train_config['seed']"], {}), "(train_config['seed'])\n", (6803, 6825), True, 'import numpy as np\n'), ((6830, 6869), 'torch.manual_seed', 'torch.manual_seed', (["train_config['seed']"], {}), "(train_config['seed'])\n", (6847, 6869), False, 'import torch\n'), ((7034, 7127), 'utils.tokenization.BertTokenizer', 'BertTokenizer', (['vocab_file'], {'max_len': "train_config['max_seq_length']", 'do_basic_tokenize': '(True)'}), "(vocab_file, max_len=train_config['max_seq_length'],\n do_basic_tokenize=True)\n", (7047, 7127), False, 'from utils.tokenization import BertTokenizer\n'), ((7162, 7197), 'models.modeling_bert.Config.from_json_file', 'Config.from_json_file', (['model_config'], {}), '(model_config)\n', (7183, 7197), False, 'from models.modeling_bert import QuestionAnswering, Config\n'), ((7210, 7235), 'models.modeling_bert.QuestionAnswering', 'QuestionAnswering', (['config'], {}), '(config)\n', (7227, 7235), False, 'from models.modeling_bert import QuestionAnswering, Config\n'), ((7465, 7512), 'os.path.join', 'os.path.join', (['output_dir', '"""hyperparameters.txt"""'], {}), "(output_dir, 'hyperparameters.txt')\n", (7477, 7512), False, 'import os\n'), ((7976, 8071), 'utils.korquad_utils.read_squad_examples', 'read_squad_examples', ([], {'input_file': 'train_file', 'is_training': '(True)', 'version_2_with_negative': '(False)'}), '(input_file=train_file, is_training=True,\n version_2_with_negative=False)\n', (7995, 8071), False, 'from utils.korquad_utils import read_squad_examples, convert_examples_to_features, RawResult, write_predictions\n'), ((9284, 9392), 'utils.optimization.AdamW', 'AdamW', (['optimizer_grouped_parameters'], {'lr': "train_config['learning_rate']", 'eps': "train_config['adam_epsilon']"}), "(optimizer_grouped_parameters, lr=train_config['learning_rate'], eps=\n train_config['adam_epsilon'])\n", (9289, 9392), False, 'from utils.optimization import AdamW, WarmupLinearSchedule\n'), ((9448, 9570), 'utils.optimization.WarmupLinearSchedule', 'WarmupLinearSchedule', (['optimizer'], {'warmup_steps': '(num_train_optimization_steps * 0.1)', 't_total': 'num_train_optimization_steps'}), '(optimizer, warmup_steps=num_train_optimization_steps *\n 0.1, t_total=num_train_optimization_steps)\n', (9468, 9570), False, 'from utils.optimization import AdamW, WarmupLinearSchedule\n'), ((10349, 10418), 'torch.tensor', 'torch.tensor', (['[f.input_ids for f in train_features]'], {'dtype': 'torch.long'}), '([f.input_ids for f in train_features], dtype=torch.long)\n', (10361, 10418), False, 'import torch\n'), ((10440, 10510), 'torch.tensor', 'torch.tensor', (['[f.input_mask for f in train_features]'], {'dtype': 'torch.long'}), '([f.input_mask for f in train_features], dtype=torch.long)\n', (10452, 10510), False, 'import torch\n'), ((10533, 10604), 'torch.tensor', 'torch.tensor', (['[f.segment_ids for f in train_features]'], {'dtype': 'torch.long'}), '([f.segment_ids for f in train_features], dtype=torch.long)\n', (10545, 10604), False, 'import torch\n'), ((10631, 10705), 'torch.tensor', 'torch.tensor', (['[f.start_position for f in train_features]'], {'dtype': 'torch.long'}), '([f.start_position for f in train_features], dtype=torch.long)\n', (10643, 10705), False, 'import torch\n'), ((10730, 10802), 'torch.tensor', 'torch.tensor', (['[f.end_position for f in train_features]'], {'dtype': 'torch.long'}), '([f.end_position for f in train_features], dtype=torch.long)\n', (10742, 10802), False, 'import torch\n'), ((10820, 10925), 'torch.utils.data.TensorDataset', 'TensorDataset', (['all_input_ids', 'all_input_mask', 'all_segment_ids', 'all_start_positions', 'all_end_positions'], {}), '(all_input_ids, all_input_mask, all_segment_ids,\n all_start_positions, all_end_positions)\n', (10833, 10925), False, 'from torch.utils.data import DataLoader, SequentialSampler, RandomSampler, TensorDataset\n'), ((10974, 10999), 'torch.utils.data.RandomSampler', 'RandomSampler', (['train_data'], {}), '(train_data)\n', (10987, 10999), False, 'from torch.utils.data import DataLoader, SequentialSampler, RandomSampler, TensorDataset\n'), ((11023, 11118), 'torch.utils.data.DataLoader', 'DataLoader', (['train_data'], {'sampler': 'train_sampler', 'batch_size': "train_config['train_batch_size']"}), "(train_data, sampler=train_sampler, batch_size=train_config[\n 'train_batch_size'])\n", (11033, 11118), False, 'from torch.utils.data import DataLoader, SequentialSampler, RandomSampler, TensorDataset\n'), ((12999, 13041), 'os.path.join', 'os.path.join', (['output_dir', 'model_checkpoint'], {}), '(output_dir, model_checkpoint)\n', (13011, 13041), False, 'import os\n'), ((13799, 13823), 'ray.tune.track.log', 'track.log', ([], {'f1': "eval['f1']"}), "(f1=eval['f1'])\n", (13808, 13823), False, 'from ray.tune import track\n'), ((5250, 5268), 'io.open', 'open', (['predict_file'], {}), '(predict_file)\n', (5254, 5268), False, 'from io import open\n'), ((5309, 5332), 'json.load', 'json.load', (['dataset_file'], {}), '(dataset_file)\n', (5318, 5332), False, 'import json\n'), ((5720, 5746), 'json.load', 'json.load', (['prediction_file'], {}), '(prediction_file)\n', (5729, 5746), False, 'import json\n'), ((5810, 5827), 'json.dumps', 'json.dumps', (['_eval'], {}), '(_eval)\n', (5820, 5827), False, 'import json\n'), ((6896, 6944), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (["train_config['seed']"], {}), "(train_config['seed'])\n", (6922, 6944), False, 'import torch\n'), ((6957, 6983), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (6971, 6983), False, 'import os\n'), ((6993, 7016), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (7004, 7016), False, 'import os\n'), ((7267, 7289), 'torch.load', 'torch.load', (['checkpoint'], {}), '(checkpoint)\n', (7277, 7289), False, 'import torch\n'), ((7527, 7552), 'io.open', 'open', (['paramfile_path', '"""w"""'], {}), "(paramfile_path, 'w')\n", (7531, 7552), False, 'from io import open\n'), ((9902, 9960), 'apex.amp.initialize', 'amp.initialize', (['model', 'optimizer'], {'opt_level': 'fp16_opt_level'}), '(model, optimizer, opt_level=fp16_opt_level)\n', (9916, 9960), False, 'from apex import amp\n'), ((13910, 13953), 'ray.tune.schedulers.HyperBandScheduler', 'HyperBandScheduler', ([], {'metric': '"""f1"""', 'mode': '"""max"""'}), "(metric='f1', mode='max')\n", (13928, 13953), False, 'from ray.tune.schedulers import HyperBandScheduler\n'), ((4193, 4208), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4206, 4208), False, 'import torch\n'), ((5632, 5676), 'os.path.join', 'os.path.join', (['output_dir', '"""predictions.json"""'], {}), "(output_dir, 'predictions.json')\n", (5644, 5676), False, 'import os\n'), ((6586, 6611), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6609, 6611), False, 'import torch\n'), ((8095, 8133), 'io.open', 'open', (['cached_train_features_file', '"""rb"""'], {}), "(cached_train_features_file, 'rb')\n", (8099, 8133), False, 'from io import open\n'), ((8174, 8193), 'pickle.load', 'pickle.load', (['reader'], {}), '(reader)\n', (8185, 8193), False, 'import pickle\n'), ((8231, 8473), 'utils.korquad_utils.convert_examples_to_features', 'convert_examples_to_features', ([], {'examples': 'train_examples', 'tokenizer': 'tokenizer', 'max_seq_length': "train_config['max_seq_length']", 'doc_stride': "train_config['doc_stride']", 'max_query_length': "train_config['max_query_length']", 'is_training': '(True)'}), "(examples=train_examples, tokenizer=tokenizer,\n max_seq_length=train_config['max_seq_length'], doc_stride=train_config[\n 'doc_stride'], max_query_length=train_config['max_query_length'],\n is_training=True)\n", (8259, 8473), False, 'from utils.korquad_utils import read_squad_examples, convert_examples_to_features, RawResult, write_predictions\n'), ((1563, 1589), 'numpy.random.uniform', 'np.random.uniform', (['(50)', '(100)'], {}), '(50, 100)\n', (1580, 1589), True, 'import numpy as np\n'), ((1889, 1915), 'numpy.random.uniform', 'np.random.uniform', (['(50)', '(100)'], {}), '(50, 100)\n', (1906, 1915), True, 'import numpy as np\n'), ((1999, 2024), 'numpy.random.uniform', 'np.random.uniform', (['(12)', '(25)'], {}), '(12, 25)\n', (2016, 2024), True, 'import numpy as np\n'), ((2095, 2136), 'numpy.random.uniform', 'np.random.uniform', (['(1000000.0)', '(100000000.0)'], {}), '(1000000.0, 100000000.0)\n', (2112, 2136), True, 'import numpy as np\n'), ((4651, 4736), 'utils.korquad_utils.RawResult', 'RawResult', ([], {'unique_id': 'unique_id', 'start_logits': 'start_logits', 'end_logits': 'end_logits'}), '(unique_id=unique_id, start_logits=start_logits, end_logits=end_logits\n )\n', (4660, 4736), False, 'from utils.korquad_utils import read_squad_examples, convert_examples_to_features, RawResult, write_predictions\n'), ((8642, 8680), 'io.open', 'open', (['cached_train_features_file', '"""wb"""'], {}), "(cached_train_features_file, 'wb')\n", (8646, 8680), False, 'from io import open\n'), ((8704, 8739), 'pickle.dump', 'pickle.dump', (['train_features', 'writer'], {}), '(train_features, writer)\n', (8715, 8739), False, 'import pickle\n'), ((12012, 12043), 'apex.amp.scale_loss', 'amp.scale_loss', (['loss', 'optimizer'], {}), '(loss, optimizer)\n', (12026, 12043), False, 'from apex import amp\n'), ((12150, 12178), 'apex.amp.master_params', 'amp.master_params', (['optimizer'], {}), '(optimizer)\n', (12167, 12178), False, 'from apex import amp\n')]
|
import torch
from lib.utils import is_parallel
import numpy as np
np.set_printoptions(threshold=np.inf)
import cv2
from sklearn.cluster import DBSCAN
def build_targets(cfg, predictions, targets, model, bdd=True):
'''
predictions
[16, 3, 32, 32, 85]
[16, 3, 16, 16, 85]
[16, 3, 8, 8, 85]
torch.tensor(predictions[i].shape)[[3, 2, 3, 2]]
[32,32,32,32]
[16,16,16,16]
[8,8,8,8]
targets[3,x,7]
t [index, class, x, y, w, h, head_index]
'''
# Build targets for compute_loss(), input targets(image,class,x,y,w,h)
if bdd:
if is_parallel(model):
det = model.module.det_out_bdd
else:
det = model.det_out_bdd
else:
if is_parallel(model):
det = model.module.det_out_bosch
else:
det = model.det_out_bosch
# print(type(model))
# det = model.model[model.detector_index]
# print(type(det))
na, nt = det.na, targets.shape[0] # number of anchors, targets
tcls, tbox, indices, anch = [], [], [], []
gain = torch.ones(7, device=targets.device) # normalized to gridspace gain
ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)
targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices
g = 0.5 # bias
off = torch.tensor([[0, 0],
[1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m
# [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm
], device=targets.device).float() * g # offsets
for i in range(det.nl):
anchors = det.anchors[i] #[3,2]
gain[2:6] = torch.tensor(predictions[i].shape)[[3, 2, 3, 2]] # xyxy gain
# Match targets to anchors
t = targets * gain
if nt:
# Matches
r = t[:, :, 4:6] / anchors[:, None] # wh ratio
j = torch.max(r, 1. / r).max(2)[0] < cfg.TRAIN.ANCHOR_THRESHOLD # compare
# j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))
t = t[j] # filter
# Offsets
gxy = t[:, 2:4] # grid xy
gxi = gain[[2, 3]] - gxy # inverse
j, k = ((gxy % 1. < g) & (gxy > 1.)).T
l, m = ((gxi % 1. < g) & (gxi > 1.)).T
j = torch.stack((torch.ones_like(j), j, k, l, m))
t = t.repeat((5, 1, 1))[j]
offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]
else:
t = targets[0]
offsets = 0
# Define
b, c = t[:, :2].long().T # image, class
gxy = t[:, 2:4] # grid xy
gwh = t[:, 4:6] # grid wh
gij = (gxy - offsets).long()
gi, gj = gij.T # grid xy indices
# Append
a = t[:, 6].long() # anchor indices
indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices
tbox.append(torch.cat((gxy - gij, gwh), 1)) # box
anch.append(anchors[a]) # anchors
tcls.append(c) # class
return tcls, tbox, indices, anch
def morphological_process(image, kernel_size=5, func_type=cv2.MORPH_CLOSE):
"""
morphological process to fill the hole in the binary segmentation result
:param image:
:param kernel_size:
:return:
"""
if len(image.shape) == 3:
raise ValueError('Binary segmentation result image should be a single channel image')
if image.dtype is not np.uint8:
image = np.array(image, np.uint8)
kernel = cv2.getStructuringElement(shape=cv2.MORPH_ELLIPSE, ksize=(kernel_size, kernel_size))
# close operation fille hole
closing = cv2.morphologyEx(image, func_type, kernel, iterations=1)
return closing
def connect_components_analysis(image):
"""
connect components analysis to remove the small components
:param image:
:return:
"""
if len(image.shape) == 3:
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
else:
gray_image = image
# print(gray_image.dtype)
return cv2.connectedComponentsWithStats(gray_image, connectivity=8, ltype=cv2.CV_32S)
def if_y(samples_x):
for sample_x in samples_x:
if len(sample_x):
# if len(sample_x) != (sample_x[-1] - sample_x[0] + 1) or sample_x[-1] == sample_x[0]:
if sample_x[-1] == sample_x[0]:
return False
return True
def fitlane(mask, sel_labels, labels, stats):
H, W = mask.shape
for label_group in sel_labels:
states = [stats[k] for k in label_group]
x, y, w, h, _ = states[0]
# if len(label_group) > 1:
# print('in')
# for m in range(len(label_group)-1):
# labels[labels == label_group[m+1]] = label_group[0]
t = label_group[0]
# samples_y = np.linspace(y, H-1, 30)
# else:
samples_y = np.linspace(y, y+h-1, 30)
samples_x = [np.where(labels[int(sample_y)]==t)[0] for sample_y in samples_y]
if if_y(samples_x):
samples_x = [int(np.mean(sample_x)) if len(sample_x) else -1 for sample_x in samples_x]
samples_x = np.array(samples_x)
samples_y = np.array(samples_y)
samples_y = samples_y[samples_x != -1]
samples_x = samples_x[samples_x != -1]
func = np.polyfit(samples_y, samples_x, 2)
x_limits = np.polyval(func, H-1)
# if (y_max + h - 1) >= 720:
if x_limits < 0 or x_limits > W:
# if (y_max + h - 1) > 720:
# draw_y = np.linspace(y, 720-1, 720-y)
draw_y = np.linspace(y, y+h-1, h)
else:
# draw_y = np.linspace(y, y+h-1, y+h-y)
draw_y = np.linspace(y, H-1, H-y)
draw_x = np.polyval(func, draw_y)
# draw_y = draw_y[draw_x < W]
# draw_x = draw_x[draw_x < W]
draw_points = (np.asarray([draw_x, draw_y]).T).astype(np.int32)
cv2.polylines(mask, [draw_points], False, 1, thickness=15)
else:
# if ( + w - 1) >= 1280:
samples_x = np.linspace(x, W-1, 30)
# else:
# samples_x = np.linspace(x, x_max+w-1, 30)
samples_y = [np.where(labels[:, int(sample_x)]==t)[0] for sample_x in samples_x]
samples_y = [int(np.mean(sample_y)) if len(sample_y) else -1 for sample_y in samples_y]
samples_x = np.array(samples_x)
samples_y = np.array(samples_y)
samples_x = samples_x[samples_y != -1]
samples_y = samples_y[samples_y != -1]
try:
func = np.polyfit(samples_x, samples_y, 2)
except:
pass
# y_limits = np.polyval(func, 0)
# if y_limits > 720 or y_limits < 0:
# if (x + w - 1) >= 1280:
# draw_x = np.linspace(x, 1280-1, 1280-x)
# else:
y_limits = np.polyval(func, 0)
if y_limits >= H or y_limits < 0:
draw_x = np.linspace(x, x+w-1, w+x-x)
else:
y_limits = np.polyval(func, W-1)
if y_limits >= H or y_limits < 0:
draw_x = np.linspace(x, x+w-1, w+x-x)
# if x+w-1 < 640:
# draw_x = np.linspace(0, x+w-1, w+x-x)
else:
draw_x = np.linspace(x, W-1, W-x)
draw_y = np.polyval(func, draw_x)
draw_points = (np.asarray([draw_x, draw_y]).T).astype(np.int32)
cv2.polylines(mask, [draw_points], False, 1, thickness=15)
return mask
def connect_lane(image, shadow_height=0):
if len(image.shape) == 3:
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
else:
gray_image = image
if shadow_height:
image[:shadow_height] = 0
mask = np.zeros((image.shape[0], image.shape[1]), np.uint8)
num_labels, labels, stats, centers = cv2.connectedComponentsWithStats(gray_image, connectivity=8, ltype=cv2.CV_32S)
# ratios = []
selected_label = []
for t in range(1, num_labels, 1):
_, _, _, _, area = stats[t]
if area > 400:
selected_label.append(t)
if len(selected_label) == 0:
return mask
else:
split_labels = [[label,] for label in selected_label]
mask_post = fitlane(mask, split_labels, labels, stats)
return mask_post
|
[
"lib.utils.is_parallel",
"numpy.polyfit",
"torch.max",
"numpy.array",
"torch.arange",
"numpy.mean",
"numpy.asarray",
"numpy.linspace",
"numpy.polyval",
"torch.zeros_like",
"torch.ones_like",
"cv2.polylines",
"cv2.morphologyEx",
"cv2.cvtColor",
"torch.cat",
"numpy.set_printoptions",
"torch.tensor",
"numpy.zeros",
"cv2.connectedComponentsWithStats",
"cv2.getStructuringElement",
"torch.ones"
] |
[((66, 103), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.inf'}), '(threshold=np.inf)\n', (85, 103), True, 'import numpy as np\n'), ((1059, 1095), 'torch.ones', 'torch.ones', (['(7)'], {'device': 'targets.device'}), '(7, device=targets.device)\n', (1069, 1095), False, 'import torch\n'), ((3616, 3704), 'cv2.getStructuringElement', 'cv2.getStructuringElement', ([], {'shape': 'cv2.MORPH_ELLIPSE', 'ksize': '(kernel_size, kernel_size)'}), '(shape=cv2.MORPH_ELLIPSE, ksize=(kernel_size,\n kernel_size))\n', (3641, 3704), False, 'import cv2\n'), ((3749, 3805), 'cv2.morphologyEx', 'cv2.morphologyEx', (['image', 'func_type', 'kernel'], {'iterations': '(1)'}), '(image, func_type, kernel, iterations=1)\n', (3765, 3805), False, 'import cv2\n'), ((4146, 4224), 'cv2.connectedComponentsWithStats', 'cv2.connectedComponentsWithStats', (['gray_image'], {'connectivity': '(8)', 'ltype': 'cv2.CV_32S'}), '(gray_image, connectivity=8, ltype=cv2.CV_32S)\n', (4178, 4224), False, 'import cv2\n'), ((7970, 8022), 'numpy.zeros', 'np.zeros', (['(image.shape[0], image.shape[1])', 'np.uint8'], {}), '((image.shape[0], image.shape[1]), np.uint8)\n', (7978, 8022), True, 'import numpy as np\n'), ((8069, 8147), 'cv2.connectedComponentsWithStats', 'cv2.connectedComponentsWithStats', (['gray_image'], {'connectivity': '(8)', 'ltype': 'cv2.CV_32S'}), '(gray_image, connectivity=8, ltype=cv2.CV_32S)\n', (8101, 8147), False, 'import cv2\n'), ((587, 605), 'lib.utils.is_parallel', 'is_parallel', (['model'], {}), '(model)\n', (598, 605), False, 'from lib.utils import is_parallel\n'), ((721, 739), 'lib.utils.is_parallel', 'is_parallel', (['model'], {}), '(model)\n', (732, 739), False, 'from lib.utils import is_parallel\n'), ((3576, 3601), 'numpy.array', 'np.array', (['image', 'np.uint8'], {}), '(image, np.uint8)\n', (3584, 3601), True, 'import numpy as np\n'), ((4028, 4067), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (4040, 4067), False, 'import cv2\n'), ((4973, 5002), 'numpy.linspace', 'np.linspace', (['y', '(y + h - 1)', '(30)'], {}), '(y, y + h - 1, 30)\n', (4984, 5002), True, 'import numpy as np\n'), ((7826, 7865), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (7838, 7865), False, 'import cv2\n'), ((1710, 1744), 'torch.tensor', 'torch.tensor', (['predictions[i].shape'], {}), '(predictions[i].shape)\n', (1722, 1744), False, 'import torch\n'), ((3022, 3052), 'torch.cat', 'torch.cat', (['(gxy - gij, gwh)', '(1)'], {}), '((gxy - gij, gwh), 1)\n', (3031, 3052), False, 'import torch\n'), ((5247, 5266), 'numpy.array', 'np.array', (['samples_x'], {}), '(samples_x)\n', (5255, 5266), True, 'import numpy as np\n'), ((5291, 5310), 'numpy.array', 'np.array', (['samples_y'], {}), '(samples_y)\n', (5299, 5310), True, 'import numpy as np\n'), ((5432, 5467), 'numpy.polyfit', 'np.polyfit', (['samples_y', 'samples_x', '(2)'], {}), '(samples_y, samples_x, 2)\n', (5442, 5467), True, 'import numpy as np\n'), ((5491, 5514), 'numpy.polyval', 'np.polyval', (['func', '(H - 1)'], {}), '(func, H - 1)\n', (5501, 5514), True, 'import numpy as np\n'), ((5890, 5914), 'numpy.polyval', 'np.polyval', (['func', 'draw_y'], {}), '(func, draw_y)\n', (5900, 5914), True, 'import numpy as np\n'), ((6087, 6145), 'cv2.polylines', 'cv2.polylines', (['mask', '[draw_points]', '(False)', '(1)'], {'thickness': '(15)'}), '(mask, [draw_points], False, 1, thickness=15)\n', (6100, 6145), False, 'import cv2\n'), ((6221, 6246), 'numpy.linspace', 'np.linspace', (['x', '(W - 1)', '(30)'], {}), '(x, W - 1, 30)\n', (6232, 6246), True, 'import numpy as np\n'), ((6542, 6561), 'numpy.array', 'np.array', (['samples_x'], {}), '(samples_x)\n', (6550, 6561), True, 'import numpy as np\n'), ((6586, 6605), 'numpy.array', 'np.array', (['samples_y'], {}), '(samples_y)\n', (6594, 6605), True, 'import numpy as np\n'), ((7058, 7077), 'numpy.polyval', 'np.polyval', (['func', '(0)'], {}), '(func, 0)\n', (7068, 7077), True, 'import numpy as np\n'), ((7544, 7568), 'numpy.polyval', 'np.polyval', (['func', 'draw_x'], {}), '(func, draw_x)\n', (7554, 7568), True, 'import numpy as np\n'), ((7657, 7715), 'cv2.polylines', 'cv2.polylines', (['mask', '[draw_points]', '(False)', '(1)'], {'thickness': '(15)'}), '(mask, [draw_points], False, 1, thickness=15)\n', (7670, 7715), False, 'import cv2\n'), ((1376, 1455), 'torch.tensor', 'torch.tensor', (['[[0, 0], [1, 0], [0, 1], [-1, 0], [0, -1]]'], {'device': 'targets.device'}), '([[0, 0], [1, 0], [0, 1], [-1, 0], [0, -1]], device=targets.device)\n', (1388, 1455), False, 'import torch\n'), ((5720, 5748), 'numpy.linspace', 'np.linspace', (['y', '(y + h - 1)', 'h'], {}), '(y, y + h - 1, h)\n', (5731, 5748), True, 'import numpy as np\n'), ((5844, 5872), 'numpy.linspace', 'np.linspace', (['y', '(H - 1)', '(H - y)'], {}), '(y, H - 1, H - y)\n', (5855, 5872), True, 'import numpy as np\n'), ((6748, 6783), 'numpy.polyfit', 'np.polyfit', (['samples_x', 'samples_y', '(2)'], {}), '(samples_x, samples_y, 2)\n', (6758, 6783), True, 'import numpy as np\n'), ((7149, 7185), 'numpy.linspace', 'np.linspace', (['x', '(x + w - 1)', '(w + x - x)'], {}), '(x, x + w - 1, w + x - x)\n', (7160, 7185), True, 'import numpy as np\n'), ((7223, 7246), 'numpy.polyval', 'np.polyval', (['func', '(W - 1)'], {}), '(func, W - 1)\n', (7233, 7246), True, 'import numpy as np\n'), ((2400, 2418), 'torch.ones_like', 'torch.ones_like', (['j'], {}), '(j)\n', (2415, 2418), False, 'import torch\n'), ((7324, 7360), 'numpy.linspace', 'np.linspace', (['x', '(x + w - 1)', '(w + x - x)'], {}), '(x, x + w - 1, w + x - x)\n', (7335, 7360), True, 'import numpy as np\n'), ((7498, 7526), 'numpy.linspace', 'np.linspace', (['x', '(W - 1)', '(W - x)'], {}), '(x, W - 1, W - x)\n', (7509, 7526), True, 'import numpy as np\n'), ((2495, 2516), 'torch.zeros_like', 'torch.zeros_like', (['gxy'], {}), '(gxy)\n', (2511, 2516), False, 'import torch\n'), ((5152, 5169), 'numpy.mean', 'np.mean', (['sample_x'], {}), '(sample_x)\n', (5159, 5169), True, 'import numpy as np\n'), ((6026, 6054), 'numpy.asarray', 'np.asarray', (['[draw_x, draw_y]'], {}), '([draw_x, draw_y])\n', (6036, 6054), True, 'import numpy as np\n'), ((6447, 6464), 'numpy.mean', 'np.mean', (['sample_y'], {}), '(sample_y)\n', (6454, 6464), True, 'import numpy as np\n'), ((7596, 7624), 'numpy.asarray', 'np.asarray', (['[draw_x, draw_y]'], {}), '([draw_x, draw_y])\n', (7606, 7624), True, 'import numpy as np\n'), ((1137, 1176), 'torch.arange', 'torch.arange', (['na'], {'device': 'targets.device'}), '(na, device=targets.device)\n', (1149, 1176), False, 'import torch\n'), ((1948, 1969), 'torch.max', 'torch.max', (['r', '(1.0 / r)'], {}), '(r, 1.0 / r)\n', (1957, 1969), False, 'import torch\n')]
|
"""
ckwg +31
Copyright 2016 by Kitware, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither name of Kitware, Inc. nor the names of any contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
==============================================================================
Interface to VITAL camera_intrinsics objects
"""
import collections
import ctypes
import numpy
from vital.types.eigen import EigenArray
from vital.util import VitalErrorHandle, VitalObject
class CameraIntrinsics (VitalObject):
def __init__(self, focal_length=1., principle_point=(0, 0),
aspect_ratio=1., skew=0., dist_coeffs=(), from_cptr=None):
"""
:param focal_length: Focal length (default=1.0)
:type focal_length: float
:param principle_point: Principle point (default: [0,0]).
Values are copied into this structure.
:type principle_point: collections.Sequence[float]
:param aspect_ratio: Aspect ratio (default: 1.0)
:type aspect_ratio: float
:param skew: Skew (default: 0.0)
:type skew: float
:param dist_coeffs: Existing distortion coefficients (Default: empty).
Values are copied into this structure.
:type dist_coeffs: collections.Sequence[float]
"""
super(CameraIntrinsics, self).__init__(from_cptr, focal_length,
principle_point, aspect_ratio,
skew, dist_coeffs)
def _new(self, focal_length, principle_point, aspect_ratio, skew,
dist_coeffs):
"""
Construct a new vital::camera_intrinsics instance
:type focal_length: float
:type principle_point: collections.Sequence[float]
:type aspect_ratio: float
:type skew: float
:type dist_coeffs: collections.Sequence[float]
"""
ci_new = self.VITAL_LIB['vital_camera_intrinsics_new']
ci_new.argtypes = [
ctypes.c_double,
EigenArray.c_ptr_type(2, 1, ctypes.c_double),
ctypes.c_double,
ctypes.c_double,
EigenArray.c_ptr_type('X', 1, ctypes.c_double),
VitalErrorHandle.C_TYPE_PTR,
]
ci_new.restype = self.C_TYPE_PTR
# Make "vectors"
pp = EigenArray.from_iterable(principle_point, target_shape=(2, 1))
dc = EigenArray(len(dist_coeffs), dynamic_rows=True)
if len(dist_coeffs):
dc.T[:] = dist_coeffs
with VitalErrorHandle() as eh:
return ci_new(focal_length, pp, aspect_ratio, skew, dc, eh)
def _destroy(self):
ci_dtor = self.VITAL_LIB['vital_camera_intrinsics_destroy']
ci_dtor.argtypes = [self.C_TYPE_PTR, VitalErrorHandle.C_TYPE_PTR]
with VitalErrorHandle() as eh:
ci_dtor(self, eh)
@property
def focal_length(self):
f = self.VITAL_LIB['vital_camera_intrinsics_get_focal_length']
f.argtypes = [self.C_TYPE_PTR, VitalErrorHandle.C_TYPE_PTR]
f.restype = ctypes.c_double
with VitalErrorHandle() as eh:
return f(self, eh)
@property
def principle_point(self):
f = self.VITAL_LIB['vital_camera_intrinsics_get_principle_point']
f.argtypes = [self.C_TYPE_PTR, VitalErrorHandle.C_TYPE_PTR]
f.restype = EigenArray.c_ptr_type(2, 1, ctypes.c_double)
with VitalErrorHandle() as eh:
m_ptr = f(self, eh)
return EigenArray(2, from_cptr=m_ptr, owns_data=True)
@property
def aspect_ratio(self):
f = self.VITAL_LIB['vital_camera_intrinsics_get_aspect_ratio']
f.argtypes = [self.C_TYPE_PTR, VitalErrorHandle.C_TYPE_PTR]
f.restype = ctypes.c_double
with VitalErrorHandle() as eh:
return f(self, eh)
@property
def skew(self):
f = self.VITAL_LIB['vital_camera_intrinsics_get_skew']
f.argtypes = [self.C_TYPE_PTR, VitalErrorHandle.C_TYPE_PTR]
f.restype = ctypes.c_double
with VitalErrorHandle() as eh:
return f(self, eh)
@property
def dist_coeffs(self):
""" Get the distortion coefficients array """
f = self.VITAL_LIB['vital_camera_intrinsics_get_dist_coeffs']
f.argtypes = [self.C_TYPE_PTR, VitalErrorHandle.C_TYPE_PTR]
f.restype = EigenArray.c_ptr_type('X', 1, ctypes.c_double)
with VitalErrorHandle() as eh:
m_ptr = f(self, eh)
return EigenArray(dynamic_rows=1, from_cptr=m_ptr, owns_data=True)
def __eq__(self, other):
if isinstance(other, CameraIntrinsics):
return (
self.focal_length == other.focal_length and
numpy.allclose(self.principle_point, other.principle_point) and
self.aspect_ratio == other.aspect_ratio and
self.skew == other.skew and
numpy.allclose(self.dist_coeffs, other.dist_coeffs)
)
return False
def __ne__(self, other):
return not (self == other)
def as_matrix(self):
"""
Access the intrinsics as an upper triangular matrix
**Note:** *This matrix includes the focal length, principal point,
aspect ratio, and skew, but does not model distortion.*
:return: 3x3 upper triangular matrix
"""
f = self.VITAL_LIB['vital_camera_intrinsics_as_matrix']
f.argtypes = [self.C_TYPE_PTR, VitalErrorHandle.C_TYPE_PTR]
f.restype = EigenArray.c_ptr_type(3, 3, ctypes.c_double)
with VitalErrorHandle() as eh:
m_ptr = f(self, eh)
return EigenArray(3, 3, from_cptr=m_ptr, owns_data=True)
def map_2d(self, norm_pt):
"""
Map normalized image coordinates into actual image coordinates
This function applies both distortion and application of the
calibration matrix to map into actual image coordinates.
:param norm_pt: Normalized image coordinate to map to an image
coordinate (2-element sequence).
:type norm_pt: collections.Sequence[float]
:return: Mapped 2D image coordinate
:rtype: EigenArray[float]
"""
assert len(norm_pt) == 2, "Input sequence was not of length 2"
f = self.VITAL_LIB['vital_camera_intrinsics_map_2d']
f.argtypes = [self.C_TYPE_PTR,
EigenArray.c_ptr_type(2, 1, ctypes.c_double),
VitalErrorHandle.C_TYPE_PTR]
f.restype = EigenArray.c_ptr_type(2, 1, ctypes.c_double)
p = EigenArray(2)
p.T[:] = norm_pt
with VitalErrorHandle() as eh:
m_ptr = f(self, p, eh)
return EigenArray(2, 1, from_cptr=m_ptr, owns_data=True)
def map_3d(self, norm_hpt):
"""
Map a 3D point in camera coordinates into actual image coordinates
:param norm_hpt: Normalized coordinate to map to an image coordinate
(3-element sequence)
:type norm_hpt: collections.Sequence[float]
:return: Mapped 2D image coordinate
:rtype: EigenArray[float]
"""
assert len(norm_hpt) == 3, "Input sequence was not of length 3"
f = self.VITAL_LIB['vital_camera_intrinsics_map_3d']
f.argtypes = [self.C_TYPE_PTR,
EigenArray.c_ptr_type(3, 1, ctypes.c_double),
VitalErrorHandle.C_TYPE_PTR]
f.restype = EigenArray.c_ptr_type(2, 1, ctypes.c_double)
p = EigenArray(3)
p.T[:] = norm_hpt
with VitalErrorHandle() as eh:
m_ptr = f(self, p, eh)
return EigenArray(2, 1, from_cptr=m_ptr, owns_data=True)
def unmap_2d(self, pt):
"""
Unmap actual image coordinates back into normalized image coordinates
This function applies both application of the inverse calibration matrix
and undistortion of the normalized coordinates
:param pt: Actual image 2D point to un-map.
:return: Un-mapped normalized image coordinate.
"""
assert len(pt) == 2, "Input sequence was not of length 2"
f = self.VITAL_LIB['vital_camera_intrinsics_unmap_2d']
f.argtypes = [self.C_TYPE_PTR,
EigenArray.c_ptr_type(2, 1, ctypes.c_double),
VitalErrorHandle.C_TYPE_PTR]
f.restype = EigenArray.c_ptr_type(2, 1, ctypes.c_double)
p = EigenArray(2)
p.T[:] = pt
with VitalErrorHandle() as eh:
m_ptr = f(self, p, eh)
return EigenArray(2, 1, from_cptr=m_ptr, owns_data=True)
def distort_2d(self, norm_pt):
"""
Map normalized image coordinates into distorted coordinates
:param norm_pt: Normalized 2D image coordinate.
:return: Distorted 2D coordinate.
"""
assert len(norm_pt) == 2, "Input sequence was not of length 2"
f = self.VITAL_LIB['vital_camera_intrinsics_distort_2d']
f.argtypes = [self.C_TYPE_PTR,
EigenArray.c_ptr_type(2, 1, ctypes.c_double),
VitalErrorHandle.C_TYPE_PTR]
f.restype = EigenArray.c_ptr_type(2, 1, ctypes.c_double)
p = EigenArray(2)
p.T[:] = norm_pt
with VitalErrorHandle() as eh:
m_ptr = f(self, p, eh)
return EigenArray(2, 1, from_cptr=m_ptr, owns_data=True)
def undistort_2d(self, dist_pt):
"""
Unmap distorted normalized coordinates into normalized coordinates
:param dist_pt: Distorted 2D coordinate to un-distort.
:return: Normalized 2D image coordinate.
"""
assert len(dist_pt) == 2, "Input sequence was not of length 2"
f = self.VITAL_LIB['vital_camera_intrinsics_undistort_2d']
f.argtypes = [self.C_TYPE_PTR,
EigenArray.c_ptr_type(2, 1, ctypes.c_double),
VitalErrorHandle.C_TYPE_PTR]
f.restype = EigenArray.c_ptr_type(2, 1, ctypes.c_double)
p = EigenArray(2)
p.T[:] = dist_pt
with VitalErrorHandle() as eh:
m_ptr = f(self, p, eh)
return EigenArray(2, 1, from_cptr=m_ptr, owns_data=True)
|
[
"vital.types.eigen.EigenArray.from_iterable",
"numpy.allclose",
"vital.types.eigen.EigenArray.c_ptr_type",
"vital.types.eigen.EigenArray",
"vital.util.VitalErrorHandle"
] |
[((3614, 3676), 'vital.types.eigen.EigenArray.from_iterable', 'EigenArray.from_iterable', (['principle_point'], {'target_shape': '(2, 1)'}), '(principle_point, target_shape=(2, 1))\n', (3638, 3676), False, 'from vital.types.eigen import EigenArray\n'), ((4645, 4689), 'vital.types.eigen.EigenArray.c_ptr_type', 'EigenArray.c_ptr_type', (['(2)', '(1)', 'ctypes.c_double'], {}), '(2, 1, ctypes.c_double)\n', (4666, 4689), False, 'from vital.types.eigen import EigenArray\n'), ((5641, 5687), 'vital.types.eigen.EigenArray.c_ptr_type', 'EigenArray.c_ptr_type', (['"""X"""', '(1)', 'ctypes.c_double'], {}), "('X', 1, ctypes.c_double)\n", (5662, 5687), False, 'from vital.types.eigen import EigenArray\n'), ((6798, 6842), 'vital.types.eigen.EigenArray.c_ptr_type', 'EigenArray.c_ptr_type', (['(3)', '(3)', 'ctypes.c_double'], {}), '(3, 3, ctypes.c_double)\n', (6819, 6842), False, 'from vital.types.eigen import EigenArray\n'), ((7803, 7847), 'vital.types.eigen.EigenArray.c_ptr_type', 'EigenArray.c_ptr_type', (['(2)', '(1)', 'ctypes.c_double'], {}), '(2, 1, ctypes.c_double)\n', (7824, 7847), False, 'from vital.types.eigen import EigenArray\n'), ((7860, 7873), 'vital.types.eigen.EigenArray', 'EigenArray', (['(2)'], {}), '(2)\n', (7870, 7873), False, 'from vital.types.eigen import EigenArray\n'), ((8728, 8772), 'vital.types.eigen.EigenArray.c_ptr_type', 'EigenArray.c_ptr_type', (['(2)', '(1)', 'ctypes.c_double'], {}), '(2, 1, ctypes.c_double)\n', (8749, 8772), False, 'from vital.types.eigen import EigenArray\n'), ((8785, 8798), 'vital.types.eigen.EigenArray', 'EigenArray', (['(3)'], {}), '(3)\n', (8795, 8798), False, 'from vital.types.eigen import EigenArray\n'), ((9654, 9698), 'vital.types.eigen.EigenArray.c_ptr_type', 'EigenArray.c_ptr_type', (['(2)', '(1)', 'ctypes.c_double'], {}), '(2, 1, ctypes.c_double)\n', (9675, 9698), False, 'from vital.types.eigen import EigenArray\n'), ((9711, 9724), 'vital.types.eigen.EigenArray', 'EigenArray', (['(2)'], {}), '(2)\n', (9721, 9724), False, 'from vital.types.eigen import EigenArray\n'), ((10431, 10475), 'vital.types.eigen.EigenArray.c_ptr_type', 'EigenArray.c_ptr_type', (['(2)', '(1)', 'ctypes.c_double'], {}), '(2, 1, ctypes.c_double)\n', (10452, 10475), False, 'from vital.types.eigen import EigenArray\n'), ((10488, 10501), 'vital.types.eigen.EigenArray', 'EigenArray', (['(2)'], {}), '(2)\n', (10498, 10501), False, 'from vital.types.eigen import EigenArray\n'), ((11238, 11282), 'vital.types.eigen.EigenArray.c_ptr_type', 'EigenArray.c_ptr_type', (['(2)', '(1)', 'ctypes.c_double'], {}), '(2, 1, ctypes.c_double)\n', (11259, 11282), False, 'from vital.types.eigen import EigenArray\n'), ((11295, 11308), 'vital.types.eigen.EigenArray', 'EigenArray', (['(2)'], {}), '(2)\n', (11305, 11308), False, 'from vital.types.eigen import EigenArray\n'), ((3320, 3364), 'vital.types.eigen.EigenArray.c_ptr_type', 'EigenArray.c_ptr_type', (['(2)', '(1)', 'ctypes.c_double'], {}), '(2, 1, ctypes.c_double)\n', (3341, 3364), False, 'from vital.types.eigen import EigenArray\n'), ((3436, 3482), 'vital.types.eigen.EigenArray.c_ptr_type', 'EigenArray.c_ptr_type', (['"""X"""', '(1)', 'ctypes.c_double'], {}), "('X', 1, ctypes.c_double)\n", (3457, 3482), False, 'from vital.types.eigen import EigenArray\n'), ((3815, 3833), 'vital.util.VitalErrorHandle', 'VitalErrorHandle', ([], {}), '()\n', (3831, 3833), False, 'from vital.util import VitalErrorHandle, VitalObject\n'), ((4093, 4111), 'vital.util.VitalErrorHandle', 'VitalErrorHandle', ([], {}), '()\n', (4109, 4111), False, 'from vital.util import VitalErrorHandle, VitalObject\n'), ((4380, 4398), 'vital.util.VitalErrorHandle', 'VitalErrorHandle', ([], {}), '()\n', (4396, 4398), False, 'from vital.util import VitalErrorHandle, VitalObject\n'), ((4703, 4721), 'vital.util.VitalErrorHandle', 'VitalErrorHandle', ([], {}), '()\n', (4719, 4721), False, 'from vital.util import VitalErrorHandle, VitalObject\n'), ((4780, 4826), 'vital.types.eigen.EigenArray', 'EigenArray', (['(2)'], {'from_cptr': 'm_ptr', 'owns_data': '(True)'}), '(2, from_cptr=m_ptr, owns_data=True)\n', (4790, 4826), False, 'from vital.types.eigen import EigenArray\n'), ((5058, 5076), 'vital.util.VitalErrorHandle', 'VitalErrorHandle', ([], {}), '()\n', (5074, 5076), False, 'from vital.util import VitalErrorHandle, VitalObject\n'), ((5330, 5348), 'vital.util.VitalErrorHandle', 'VitalErrorHandle', ([], {}), '()\n', (5346, 5348), False, 'from vital.util import VitalErrorHandle, VitalObject\n'), ((5701, 5719), 'vital.util.VitalErrorHandle', 'VitalErrorHandle', ([], {}), '()\n', (5717, 5719), False, 'from vital.util import VitalErrorHandle, VitalObject\n'), ((5778, 5837), 'vital.types.eigen.EigenArray', 'EigenArray', ([], {'dynamic_rows': '(1)', 'from_cptr': 'm_ptr', 'owns_data': '(True)'}), '(dynamic_rows=1, from_cptr=m_ptr, owns_data=True)\n', (5788, 5837), False, 'from vital.types.eigen import EigenArray\n'), ((6856, 6874), 'vital.util.VitalErrorHandle', 'VitalErrorHandle', ([], {}), '()\n', (6872, 6874), False, 'from vital.util import VitalErrorHandle, VitalObject\n'), ((6933, 6982), 'vital.types.eigen.EigenArray', 'EigenArray', (['(3)', '(3)'], {'from_cptr': 'm_ptr', 'owns_data': '(True)'}), '(3, 3, from_cptr=m_ptr, owns_data=True)\n', (6943, 6982), False, 'from vital.types.eigen import EigenArray\n'), ((7686, 7730), 'vital.types.eigen.EigenArray.c_ptr_type', 'EigenArray.c_ptr_type', (['(2)', '(1)', 'ctypes.c_double'], {}), '(2, 1, ctypes.c_double)\n', (7707, 7730), False, 'from vital.types.eigen import EigenArray\n'), ((7912, 7930), 'vital.util.VitalErrorHandle', 'VitalErrorHandle', ([], {}), '()\n', (7928, 7930), False, 'from vital.util import VitalErrorHandle, VitalObject\n'), ((7992, 8041), 'vital.types.eigen.EigenArray', 'EigenArray', (['(2)', '(1)'], {'from_cptr': 'm_ptr', 'owns_data': '(True)'}), '(2, 1, from_cptr=m_ptr, owns_data=True)\n', (8002, 8041), False, 'from vital.types.eigen import EigenArray\n'), ((8611, 8655), 'vital.types.eigen.EigenArray.c_ptr_type', 'EigenArray.c_ptr_type', (['(3)', '(1)', 'ctypes.c_double'], {}), '(3, 1, ctypes.c_double)\n', (8632, 8655), False, 'from vital.types.eigen import EigenArray\n'), ((8838, 8856), 'vital.util.VitalErrorHandle', 'VitalErrorHandle', ([], {}), '()\n', (8854, 8856), False, 'from vital.util import VitalErrorHandle, VitalObject\n'), ((8918, 8967), 'vital.types.eigen.EigenArray', 'EigenArray', (['(2)', '(1)'], {'from_cptr': 'm_ptr', 'owns_data': '(True)'}), '(2, 1, from_cptr=m_ptr, owns_data=True)\n', (8928, 8967), False, 'from vital.types.eigen import EigenArray\n'), ((9537, 9581), 'vital.types.eigen.EigenArray.c_ptr_type', 'EigenArray.c_ptr_type', (['(2)', '(1)', 'ctypes.c_double'], {}), '(2, 1, ctypes.c_double)\n', (9558, 9581), False, 'from vital.types.eigen import EigenArray\n'), ((9758, 9776), 'vital.util.VitalErrorHandle', 'VitalErrorHandle', ([], {}), '()\n', (9774, 9776), False, 'from vital.util import VitalErrorHandle, VitalObject\n'), ((9838, 9887), 'vital.types.eigen.EigenArray', 'EigenArray', (['(2)', '(1)'], {'from_cptr': 'm_ptr', 'owns_data': '(True)'}), '(2, 1, from_cptr=m_ptr, owns_data=True)\n', (9848, 9887), False, 'from vital.types.eigen import EigenArray\n'), ((10314, 10358), 'vital.types.eigen.EigenArray.c_ptr_type', 'EigenArray.c_ptr_type', (['(2)', '(1)', 'ctypes.c_double'], {}), '(2, 1, ctypes.c_double)\n', (10335, 10358), False, 'from vital.types.eigen import EigenArray\n'), ((10540, 10558), 'vital.util.VitalErrorHandle', 'VitalErrorHandle', ([], {}), '()\n', (10556, 10558), False, 'from vital.util import VitalErrorHandle, VitalObject\n'), ((10620, 10669), 'vital.types.eigen.EigenArray', 'EigenArray', (['(2)', '(1)'], {'from_cptr': 'm_ptr', 'owns_data': '(True)'}), '(2, 1, from_cptr=m_ptr, owns_data=True)\n', (10630, 10669), False, 'from vital.types.eigen import EigenArray\n'), ((11121, 11165), 'vital.types.eigen.EigenArray.c_ptr_type', 'EigenArray.c_ptr_type', (['(2)', '(1)', 'ctypes.c_double'], {}), '(2, 1, ctypes.c_double)\n', (11142, 11165), False, 'from vital.types.eigen import EigenArray\n'), ((11347, 11365), 'vital.util.VitalErrorHandle', 'VitalErrorHandle', ([], {}), '()\n', (11363, 11365), False, 'from vital.util import VitalErrorHandle, VitalObject\n'), ((11427, 11476), 'vital.types.eigen.EigenArray', 'EigenArray', (['(2)', '(1)'], {'from_cptr': 'm_ptr', 'owns_data': '(True)'}), '(2, 1, from_cptr=m_ptr, owns_data=True)\n', (11437, 11476), False, 'from vital.types.eigen import EigenArray\n'), ((6013, 6072), 'numpy.allclose', 'numpy.allclose', (['self.principle_point', 'other.principle_point'], {}), '(self.principle_point, other.principle_point)\n', (6027, 6072), False, 'import numpy\n'), ((6197, 6248), 'numpy.allclose', 'numpy.allclose', (['self.dist_coeffs', 'other.dist_coeffs'], {}), '(self.dist_coeffs, other.dist_coeffs)\n', (6211, 6248), False, 'import numpy\n')]
|
import numpy as np
import util.data
def ndcg(X_test, y_test, y_pred, ):
Xy_pred = X_test.copy([['srch_id', 'prop_id', 'score']])
Xy_pred['score_pred'] = y_pred
Xy_pred['score'] = y_test
Xy_pred.sort_values(['srch_id', 'score_pred'], ascending=[True, False])
dcg_test = DCG_dict(Xy_pred)
ndcg = np.mean(np.array(list(dcg_test.values())))
return ndcg
def sort_pred_test(x_test, y_test, y_pred):
# calculate dcg of test set per srch_id
Xy_pred = util.data.Xy_pred(x_test, y_pred)
# put true y values on indexes, do not sort !
Xy_true = util.data.Xy_pred(x_test, y_test)
return Xy_pred, Xy_true
def dcg_at_k(r, k, method=0):
r = np.asfarray(r)[:k]
if r.size:
if method == 0:
return r[0] + np.sum(r[1:] / np.log2(np.arange(2, r.size + 1)))
elif method == 1:
return np.sum(r / np.log2(np.arange(2, r.size + 2)))
else:
raise ValueError('method must be 0 or 1.')
return 0.
def ndcg_at_k(r, k, method=0):
dcg_max = dcg_at_k(sorted(r, reverse=True), k, method)
if not dcg_max:
return 0.
return dcg_at_k(r, k, method) / dcg_max
def DCG_dict(data):
DCG = {}
# for id in data['srch_id']:
# rows = rows_srch_id(data, id)
# r = relevance_scores(rows)
r = []
prev_srch_id = -1
position = 0
for i in data.index.tolist():
if prev_srch_id == -1:
row = data.loc[i]
cur_srch_id = row.srch_id
prev_srch_id = 0
row = data.loc[i]
next_id = row.srch_id
score = row.score
# compute position
if cur_srch_id != next_id:
DCG[cur_srch_id] = ndcg_at_k(r, k=len(r))
cur_srch_id = next_id
r = []
r.append(score)
position += 1
else:
r.append(score)
position += 1
DCG[cur_srch_id] = ndcg_at_k(r, k=len(r))
return DCG
|
[
"numpy.asfarray",
"numpy.arange"
] |
[((683, 697), 'numpy.asfarray', 'np.asfarray', (['r'], {}), '(r)\n', (694, 697), True, 'import numpy as np\n'), ((790, 814), 'numpy.arange', 'np.arange', (['(2)', '(r.size + 1)'], {}), '(2, r.size + 1)\n', (799, 814), True, 'import numpy as np\n'), ((881, 905), 'numpy.arange', 'np.arange', (['(2)', '(r.size + 2)'], {}), '(2, r.size + 2)\n', (890, 905), True, 'import numpy as np\n')]
|
from __future__ import division
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import gsd
import gsd.fl
import numpy as np
import os
import sys
import datetime
import time
import pickle
from shutil import copyfile
import inspect
import md_tools27 as md_tools
from multiprocessing import Pool
"""
This script plots diffusion vs Gamma in log(D)-log(Gamma) or log(D)-gamma format. The data from a .dat file is used, must be precalculated by plotDiff_pG_parallel.py.
Arguments: --cmfree, --cmfixed for the free-moving center of mass regime, and v_cm subtracted respectively.
--sf <fubfolder>: subfolder to process (e.g. p32)
--NP <number>: number of subprocesses to use for parallelization. Very efficient acceleration by a factor of <number>.
"""
#Use LaTex for text
from matplotlib import rc
rc('font',**{'family':'serif','serif':['Computer Modern Roman']})
rc('text', usetex=True)
def read_log(path):
coulomb_status = ''
with open(path + '/log.txt', 'r') as f:
for i, line in enumerate(f.readlines()):
if i == 0:
timestamp = line.rstrip()
if line[:10] == '# Periodic':
words = line.split(' ')
p = int(words[9])
A = float(words[6])
if line[:4] == '# a ':
words = line.split(' ')
repeat_x = int(words[6])
repeat_y = int(words[9])
Np = 2*repeat_x*repeat_y
if line[:7] == '# Gamma':
words = line.split(' ')
dt = float(words[9])
if line[:9] == '# Coulomb':
words = line.split(' ')
coulomb_status = words[-1]
if line[:9] == '# N_therm':
words = line.split(' ')
snap_period = int(float(words[5]))
# T_gamma = 31.8265130646
if line[:9] == '# T_gamma':
words = line.split(' ')
T_gamma = float(words[3])
return {'timestamp': timestamp,'A':A, 'p':p, 'Np': Np, 'coulomb_status':coulomb_status, 'snap_period':snap_period,\
'dt':dt, 'T_gamma':T_gamma}
def OLS(x, y):
'''OLS: x must be a vertical two-dimensional array'''
X = np.hstack((np.reshape(np.ones(x.shape[0]), (-1,1)), x))#.transpose()
Xpr = X.transpose()
beta = np.dot(np.dot(np.linalg.inv(np.dot(Xpr, X)), Xpr), y)
#Estimate errors
sigma_sq = np.dot(y - np.dot(X, beta), y - np.dot(X, beta))/(len(y) - 1.)
sigma_beta_sq = sigma_sq*np.linalg.inv(np.dot(Xpr, X))
return beta, sigma_beta_sq # = [f_0, df/d(A^2)]
def diffusion_from_transport_gsd(folder_path, f_name, center_fixed = True, useframes = -1):
"""
Diffusion constant D is calculated from 4Dt = <(r(t) - r(0))^2>, or 2D_x*t = <(x(t) - x(0))^2>.
The average is calculated over all particles and over different time origins.
Time origins go from 0 to n_frames/2, and t goes from 0 to n_frames/2. This way,
the data are always within the trajectory.
center_fixed = True: eliminate oveall motion of center of mass
return D_x, D_y
D_x, D_y diffusion for x- and y-coordinates;
"""
params = read_log(folder_path)
if folder_path[-1] != '/':
folder_path = folder_path + '/'
with gsd.fl.GSDFile(folder_path + f_name, 'rb') as f:
n_frames = f.nframes
box = f.read_chunk(frame=0, name='configuration/box')
half_frames = int(n_frames/2) - 1 #sligtly less than half to avoid out of bound i
if useframes < 1 or useframes > half_frames:
useframes = half_frames
t_step = f.read_chunk(frame=0, name='configuration/step')
n_p = f.read_chunk(frame=0, name='particles/N')
x_sq_av = np.zeros(useframes)
y_sq_av = np.zeros(useframes)
for t_origin in range(n_frames - useframes - 1):
pos_0 = f.read_chunk(frame=t_origin, name='particles/position')
mean_pos_0 = np.mean(pos_0, axis = 0)
pos = pos_0
pos_raw = pos_0
for j_frame in range(useframes):
pos_m1 = pos
pos_m1_raw = pos_raw
pos_raw = f.read_chunk(frame=j_frame + t_origin, name='particles/position') - pos_0
pos = md_tools.correct_jumps(pos_raw, pos_m1, pos_m1_raw, box[0], box[1])
if center_fixed:
pos -= np.mean(pos, axis = 0) - mean_pos_0 #correct for center of mass movement
x_sq_av[j_frame] += np.mean(pos[:,0]**2)
y_sq_av[j_frame] += np.mean(pos[:,1]**2)
x_sq_av /= (n_frames - useframes - 1)
y_sq_av /= (n_frames - useframes - 1)
# OLS estimate for beta_x[0] + beta_x[1]*t = <|x_i(t) - x_i(0)|^2>
a = np.ones((useframes, 2)) # matrix a = ones(half_frames) | (0; dt; 2dt; 3dt; ...)
a[:,1] = params['snap_period']*params['dt']*np.cumsum(np.ones(useframes), axis = 0) - params['dt']
b_cutoff = int(useframes/10) #cutoff to get only linear part of x_sq_av, makes results a bit more clean
beta_x = np.linalg.lstsq(a[b_cutoff:, :], x_sq_av[b_cutoff:], rcond=-1)
beta_y = np.linalg.lstsq(a[b_cutoff:, :], y_sq_av[b_cutoff:], rcond=-1)
fig, ax = plt.subplots(1,1, figsize=(7,5))
ax.scatter(a[:,1], x_sq_av, label='$\\langle x^2\\rangle$')
ax.scatter(a[:,1], y_sq_av, label='$\\langle y^2\\rangle$')
ax.legend(loc=7)
ax.set_xlabel('$t$')
ax.set_ylabel('$\\langle r_i^2 \\rangle$')
if center_fixed:
center_fixed_str = 'cm_fixed'
else:
center_fixed_str = 'cm_free'
fig.savefig(folder_path + 'r2_diff_' + f_name +'_' + center_fixed_str + '.png')
plt.close('all')
D_x = beta_x[0][1]/2
D_y = beta_y[0][1]/2
print('D_x = {}'.format(D_x))
print('D_y = {}'.format(D_y))
return (D_x, D_y)
def diffusion_helper(arg_dict):
return diffusion_from_transport_gsd(arg_dict['sf'], arg_dict['fname'], center_fixed=arg_dict['center_fixed'], useframes = arg_dict['useframes'])
def Teff_from_gsd(args):
fpath = args['sf'] + '/' + args['fname']
with gsd.fl.GSDFile(fpath, 'rb') as f:
n_frames = f.nframes
N = f.read_chunk(frame=0, name='particles/N')
v = np.zeros((n_frames, int(N), 2))
for t in range(n_frames):
v_t = f.read_chunk(frame=t, name='particles/velocity')
v[t, :, 0] = v_t[:,0]
v[t, :, 1] = v_t[:,1]
#v_cm = np.mean(v, axis=1)
#mean_v_cmx = np.mean(v_cm[:,0])
#print("mean v_cm = {}".format(mean_v_cmx))
#sigma_v_cmx = np.sqrt(np.mean((v_cm[:,0] - mean_v_cmx)**2))/np.sqrt(n_frames)
#print("error = {}".format(sigma_v_cmx))
#mean_v_cmy = np.mean(v_cm[:,1])
#print("mean v_cm_y = {}".format(mean_v_cmy))
#sigma_v_cmy = np.sqrt(np.mean((v_cm[:,1] - mean_v_cmy)**2))/np.sqrt(n_frames)
#print("error_y = {}".format(sigma_v_cmy))
#v_rel = np.swapaxes(v, 0,1) - v_cm
v_swap = np.swapaxes(v, 0,1)
#T_eff = 0.5*np.mean(v_rel[:,:,0]**2 + v_rel[:,:,1]**2, axis = 0)
T_eff = 0.5*np.mean(v_swap[:,:,0]**2 + v_swap[:,:,1]**2, axis = 0)
print('T_eff = {}'.format(np.mean(T_eff)))
return np.mean(T_eff)
def print_help():
print('This script plots diffusion vs Gamma for data taken in diffusion measurements.')
print('===========================================================')
print('Usage: python plotDiff_pG.py diffusion_data/a32x32_* [--options]')
print('This will process all folders that match mobility_data/a32x32_*')
print('===========================================================')
print('Options:')
print('\t--cmfixed will subtract the displacement of the center of mass in diffusion calculation (default behavior)')
print('\t--cmfree will NOT subtract the displacement of the center of mass in diffusion calculation (default behavior)')
print('\t--showtext will print text info on the plots')
print('\t--NP N - will use N parallel processes in the calculations')
print('\t--sf [subfolder] - will only process the specified subfolder in all folders')
print('\t--help or -h will print this help')
## =======================================================================
# Units
unit_M = 9.10938356e-31 # kg, electron mass
unit_D = 1e-6 # m, micron
unit_E = 1.38064852e-23 # m^2*kg/s^2
unit_t = np.sqrt(unit_M*unit_D**2/unit_E) # = 2.568638150515e-10 s
epsilon_0 = 8.854187817e-12 # F/m = C^2/(J*m), vacuum permittivity
hbar = 1.0545726e-27/(unit_E*1e7)/unit_t
m_e = 9.10938356e-31/unit_M
unit_Q = np.sqrt(unit_E*1e7*unit_D*1e2) # Coulombs
unit_Qe = unit_Q/4.8032068e-10 # e, unit charge in units of elementary charge e
e_charge = 1/unit_Qe # electron charge in units of unit_Q
curr_fname = inspect.getfile(inspect.currentframe())
curr_path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
##=======================================================================
# Make a list of folders we want to process
cm_fixed = True #default that can be changed by --cmfree
cm_fixed_str = 'cm_fixed'
show_text = False
Nproc = 1
selected_subfolders = []
folder_list = []
for i in range(len(sys.argv)):
if os.path.isdir(sys.argv[i]):
folder_list.append(sys.argv[i])
elif sys.argv[i] == '--sf':
try:
selected_subfolders.append(sys.argv[i+1])
except:
raise RuntimeError('Could not recognize the value of --sf. argv={}'.format(argv))
elif sys.argv[i] == '--showtext':
show_text = True
elif sys.argv[i] == '--GC':
gamma_c = float(sys.argv[i+1])
elif sys.argv[i] == '--help' or sys.argv[i] == '-h':
print_help()
exit()
try:
print('Gamma_c = {}'.format(gamma_c))
except:
raise RuntimeError('Gamma_c not specified. Use --GC argument.')
print('Selected subfolders: {}'.format(selected_subfolders))
# Make a list of subfolders p### in each folders
subfolder_lists = []
for folder in folder_list:
sf_list = []
for item in os.walk(folder):
# subfolder name and contained files
sf_list.append((item[0], item[2]))
sf_list = sf_list[1:]
subfolder_lists.append(sf_list)
##=======================================================================
for ifold, folder in enumerate(folder_list):
print('==========================================================')
print(folder)
print('==========================================================')
# Keep only selected subfolders in the list is there is selection
if len(selected_subfolders) > 0:
sf_lists_to_go = []
for isf, sf in enumerate(subfolder_lists[ifold]):
sf_words = sf[0].split('/')
if sf_words[-1] in selected_subfolders:
sf_lists_to_go.append(sf)
else:
sf_lists_to_go = subfolder_lists[ifold]
for isf, sf in enumerate(sf_lists_to_go):
sf_words = sf[0].split('/')
print(sf_words[-1])
if sf_words[-1][0] != 'p':
raise ValueError("Expected subfolder name to start with `p`, in {}".format(fname))
log_data = read_log(sf[0])
folder_name = folder.split('/')[-1]
if sf[0][-1] == '/':
sf[0] = sf[0][:-1]
sf_name = sf[0].split('/')[-1]
#Read Dx Dy vs Gamma from the .dat file
#DxDy_data = {'Dx_arr':Dx_arr, 'Dy_arr':Dy_arr, 'Dx_arr_gauss': Dx_arr*cm2s_convert, 'Dy_arr_gauss':Dy_arr*cm2s_convert, \
# 'gamma_arr':gamma_arr, 'gamma_eff_arr':gamma_eff_arr}
cm_fixed_str = 'cm_fixed'
with open(sf[0] + '/DxDy_data_' + cm_fixed_str + '_' + sf_name + '_' + folder_name + '.dat', 'r') as ff:
DxDy_data = pickle.load(ff)
Dx_arr = DxDy_data['Dx_arr']
Dy_arr = DxDy_data['Dy_arr']
gamma_eff_arr = DxDy_data['gamma_eff_arr']
# Remove points where gamma > gamma_c
clip_ind = np.where(gamma_eff_arr < gamma_c)[0]
Dx_arr_clip = Dx_arr[clip_ind]
Dy_arr_clip = Dy_arr[clip_ind]
gamma_arr_clip = gamma_eff_arr[clip_ind]
print('Dx_arr = {}'.format(Dx_arr_clip))
print('Dy_arr = {}'.format(Dy_arr_clip))
## ======================================================================
## Plot Dx,Dy vs effective G (calculated from data rather then read from the log)
# in Gaussian units
labelfont = 28
tickfont = labelfont - 4
legendfont = labelfont - 4
cm2s_convert = unit_D**2/unit_t*1e4
fig, ax1 = plt.subplots(1,1, figsize=(7,6))
scatter1 = ax1.scatter(gamma_arr_clip, np.log(Dx_arr_clip*cm2s_convert), label='$D_\\perp$', color = 'green', marker='o')
ax1.set_xlabel('$\\Gamma$', fontsize=labelfont)
ax1.set_ylabel('$\\log(D/D_0)$', fontsize=labelfont)
scatter2 = ax1.scatter(gamma_arr_clip, np.log(Dy_arr_clip*cm2s_convert), label='$D_\\parallel$', color = 'red', marker='s')
#ax1.set_xlim([np.min(gamma_eff_arr) - 2, np.max(gamma_eff_arr) + 2])
ax1.legend(loc=1, fontsize=legendfont)
ax1.tick_params(labelsize= tickfont)
ax1.locator_params(nbins=6, axis='y')
formatter = mticker.ScalarFormatter(useMathText=True)
formatter.set_powerlimits((-3,2))
ax1.yaxis.set_major_formatter(formatter)
#Place text
if show_text:
text_list = ['$\\Gamma_c = {:.1f}$'.format(gamma_c)]
y_lim = ax1.get_ylim()
x_lim = ax1.get_xlim()
h = y_lim[1] - y_lim[0]
w = x_lim[1] - x_lim[0]
text_x = x_lim[0] + 0.5*w
text_y = y_lim[1] - 0.05*h
if type(text_list) == list:
n_str = len(text_list)
for i_fig in range(n_str):
ax1.text(text_x, text_y - 0.05*h*i_fig, text_list[i_fig])
elif type(text_list) == str:
ax1.text(text_x, text_y, text_list)
else:
raise TypeError('text_list must be a list of strings or a string')
#fig.patch.set_alpha(alpha=1)
plt.tight_layout()
fig.savefig(folder + '/' + 'DxDy_G_log_' + sf_name + '_' + folder_name + '_{:.2f}'.format(gamma_c) + '.pdf')
#fig.savefig(sf[0] + '/' + 'DxDy_Geff_' + cm_fixed_str + '_' + sf_name + '_' + folder_name + '.png')
#fig.savefig(sf[0] + '/' + 'DxDy_Geff_' + cm_fixed_str + '_' + sf_name + '_' + folder_name + '.eps')
#fig.savefig(sf[0] + '/' + 'DxDy_Geff_' + cm_fixed_str + '_' + sf_name + '_' + folder_name + '.pdf')
plt.close('all')
|
[
"numpy.sqrt",
"numpy.log",
"matplotlib.ticker.ScalarFormatter",
"matplotlib.rc",
"os.walk",
"numpy.mean",
"md_tools27.correct_jumps",
"numpy.where",
"matplotlib.pyplot.close",
"numpy.dot",
"os.path.isdir",
"numpy.linalg.lstsq",
"numpy.ones",
"matplotlib.use",
"pickle.load",
"inspect.currentframe",
"numpy.swapaxes",
"gsd.fl.GSDFile",
"numpy.zeros",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots"
] |
[((59, 73), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (66, 73), True, 'import matplotlib as mpl\n'), ((880, 949), 'matplotlib.rc', 'rc', (['"""font"""'], {}), "('font', **{'family': 'serif', 'serif': ['Computer Modern Roman']})\n", (882, 949), False, 'from matplotlib import rc\n'), ((947, 970), 'matplotlib.rc', 'rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (949, 970), False, 'from matplotlib import rc\n'), ((8605, 8643), 'numpy.sqrt', 'np.sqrt', (['(unit_M * unit_D ** 2 / unit_E)'], {}), '(unit_M * unit_D ** 2 / unit_E)\n', (8612, 8643), True, 'import numpy as np\n'), ((8812, 8857), 'numpy.sqrt', 'np.sqrt', (['(unit_E * 10000000.0 * unit_D * 100.0)'], {}), '(unit_E * 10000000.0 * unit_D * 100.0)\n', (8819, 8857), True, 'import numpy as np\n'), ((4963, 4986), 'numpy.ones', 'np.ones', (['(useframes, 2)'], {}), '((useframes, 2))\n', (4970, 4986), True, 'import numpy as np\n'), ((5270, 5332), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['a[b_cutoff:, :]', 'x_sq_av[b_cutoff:]'], {'rcond': '(-1)'}), '(a[b_cutoff:, :], x_sq_av[b_cutoff:], rcond=-1)\n', (5285, 5332), True, 'import numpy as np\n'), ((5347, 5409), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['a[b_cutoff:, :]', 'y_sq_av[b_cutoff:]'], {'rcond': '(-1)'}), '(a[b_cutoff:, :], y_sq_av[b_cutoff:], rcond=-1)\n', (5362, 5409), True, 'import numpy as np\n'), ((5431, 5465), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(7, 5)'}), '(1, 1, figsize=(7, 5))\n', (5443, 5465), True, 'import matplotlib.pyplot as plt\n'), ((5890, 5906), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (5899, 5906), True, 'import matplotlib.pyplot as plt\n'), ((7189, 7209), 'numpy.swapaxes', 'np.swapaxes', (['v', '(0)', '(1)'], {}), '(v, 0, 1)\n', (7200, 7209), True, 'import numpy as np\n'), ((7412, 7426), 'numpy.mean', 'np.mean', (['T_eff'], {}), '(T_eff)\n', (7419, 7426), True, 'import numpy as np\n'), ((9030, 9052), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (9050, 9052), False, 'import inspect\n'), ((9462, 9488), 'os.path.isdir', 'os.path.isdir', (['sys.argv[i]'], {}), '(sys.argv[i])\n', (9475, 9488), False, 'import os\n'), ((10307, 10322), 'os.walk', 'os.walk', (['folder'], {}), '(folder)\n', (10314, 10322), False, 'import os\n'), ((3473, 3515), 'gsd.fl.GSDFile', 'gsd.fl.GSDFile', (['(folder_path + f_name)', '"""rb"""'], {}), "(folder_path + f_name, 'rb')\n", (3487, 3515), False, 'import gsd\n'), ((3940, 3959), 'numpy.zeros', 'np.zeros', (['useframes'], {}), '(useframes)\n', (3948, 3959), True, 'import numpy as np\n'), ((3979, 3998), 'numpy.zeros', 'np.zeros', (['useframes'], {}), '(useframes)\n', (3987, 3998), True, 'import numpy as np\n'), ((6325, 6352), 'gsd.fl.GSDFile', 'gsd.fl.GSDFile', (['fpath', '"""rb"""'], {}), "(fpath, 'rb')\n", (6339, 6352), False, 'import gsd\n'), ((7297, 7357), 'numpy.mean', 'np.mean', (['(v_swap[:, :, 0] ** 2 + v_swap[:, :, 1] ** 2)'], {'axis': '(0)'}), '(v_swap[:, :, 0] ** 2 + v_swap[:, :, 1] ** 2, axis=0)\n', (7304, 7357), True, 'import numpy as np\n'), ((12908, 12942), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(7, 6)'}), '(1, 1, figsize=(7, 6))\n', (12920, 12942), True, 'import matplotlib.pyplot as plt\n'), ((13577, 13618), 'matplotlib.ticker.ScalarFormatter', 'mticker.ScalarFormatter', ([], {'useMathText': '(True)'}), '(useMathText=True)\n', (13600, 13618), True, 'import matplotlib.ticker as mticker\n'), ((14540, 14558), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (14556, 14558), True, 'import matplotlib.pyplot as plt\n'), ((15020, 15036), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (15029, 15036), True, 'import matplotlib.pyplot as plt\n'), ((2635, 2649), 'numpy.dot', 'np.dot', (['Xpr', 'X'], {}), '(Xpr, X)\n', (2641, 2649), True, 'import numpy as np\n'), ((4160, 4182), 'numpy.mean', 'np.mean', (['pos_0'], {'axis': '(0)'}), '(pos_0, axis=0)\n', (4167, 4182), True, 'import numpy as np\n'), ((7383, 7397), 'numpy.mean', 'np.mean', (['T_eff'], {}), '(T_eff)\n', (7390, 7397), True, 'import numpy as np\n'), ((9115, 9137), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (9135, 9137), False, 'import inspect\n'), ((12043, 12058), 'pickle.load', 'pickle.load', (['ff'], {}), '(ff)\n', (12054, 12058), False, 'import pickle\n'), ((12254, 12287), 'numpy.where', 'np.where', (['(gamma_eff_arr < gamma_c)'], {}), '(gamma_eff_arr < gamma_c)\n', (12262, 12287), True, 'import numpy as np\n'), ((12989, 13023), 'numpy.log', 'np.log', (['(Dx_arr_clip * cm2s_convert)'], {}), '(Dx_arr_clip * cm2s_convert)\n', (12995, 13023), True, 'import numpy as np\n'), ((13239, 13273), 'numpy.log', 'np.log', (['(Dy_arr_clip * cm2s_convert)'], {}), '(Dy_arr_clip * cm2s_convert)\n', (13245, 13273), True, 'import numpy as np\n'), ((2352, 2371), 'numpy.ones', 'np.ones', (['x.shape[0]'], {}), '(x.shape[0])\n', (2359, 2371), True, 'import numpy as np\n'), ((2464, 2478), 'numpy.dot', 'np.dot', (['Xpr', 'X'], {}), '(Xpr, X)\n', (2470, 2478), True, 'import numpy as np\n'), ((2539, 2554), 'numpy.dot', 'np.dot', (['X', 'beta'], {}), '(X, beta)\n', (2545, 2554), True, 'import numpy as np\n'), ((2560, 2575), 'numpy.dot', 'np.dot', (['X', 'beta'], {}), '(X, beta)\n', (2566, 2575), True, 'import numpy as np\n'), ((4477, 4544), 'md_tools27.correct_jumps', 'md_tools.correct_jumps', (['pos_raw', 'pos_m1', 'pos_m1_raw', 'box[0]', 'box[1]'], {}), '(pos_raw, pos_m1, pos_m1_raw, box[0], box[1])\n', (4499, 4544), True, 'import md_tools27 as md_tools\n'), ((4717, 4740), 'numpy.mean', 'np.mean', (['(pos[:, 0] ** 2)'], {}), '(pos[:, 0] ** 2)\n', (4724, 4740), True, 'import numpy as np\n'), ((4775, 4798), 'numpy.mean', 'np.mean', (['(pos[:, 1] ** 2)'], {}), '(pos[:, 1] ** 2)\n', (4782, 4798), True, 'import numpy as np\n'), ((5102, 5120), 'numpy.ones', 'np.ones', (['useframes'], {}), '(useframes)\n', (5109, 5120), True, 'import numpy as np\n'), ((4607, 4627), 'numpy.mean', 'np.mean', (['pos'], {'axis': '(0)'}), '(pos, axis=0)\n', (4614, 4627), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
import matplotlib
matplotlib.use('pgf')
import matplotlib.pyplot as plt
import numpy as np
from multi_isotope_calculator import Multi_isotope
import plotsettings as ps
plt.style.use('seaborn-darkgrid')
plt.rcParams.update(ps.tex_fonts())
def main():
plot()
#figure5()
def figure1():
"""Compare data to Sharp paper (tails U234 vs product U235)"""
data = np.genfromtxt("../data/sharp_fig1.csv", delimiter=",")
data = data[np.argsort(data[:,0])]
composition = {'234': 5.5e-3, '235': (0.72, 3, 0.2)}
calculator = Multi_isotope(composition, feed=1, process='diffusion',
downblend=False)
results = np.empty(shape=data.shape, dtype=float)
for i, xp in enumerate(data[:,0]):
calculator.set_product_enrichment(xp*100)
calculator.calculate_staging()
results[i,0] = calculator.xp[3]
results[i,1] = calculator.xt[2]
data *= 100
results *= 100
pulls = 100 * (data[:,1]-results[:,1]) / data[:,1]
ylims = (1e299, 0)
for values in (data, results):
ylims = (min(ylims[0], min(values[:,1])),
max(ylims[1], max(values[:,1])))
return data, results, pulls
def figure5():
"""Compare data to Sharp paper (tails qty vs product qty)"""
sharp = np.genfromtxt("../data/sharp_fig5.csv", delimiter=",")
sharp = sharp[np.argsort(sharp[:,0])]
calc = Multi_isotope({'235': (0.711, 5, 0.2)}, max_swu=15000,
process='diffusion', downblend=False)
results = np.empty(shape=sharp.shape, dtype=float)
for i, xp in enumerate(sharp[:,0]):
calc.set_product_enrichment(xp*100)
calc.calculate_staging()
results[i,0] = calc.xp[3] * 100
results[i,1] = calc.t
sharp[:,0] *= 100
pulls = 100 * (sharp[:,1]-results[:,1]) / sharp[:,1]
return sharp, results, pulls
def plot():
fig1 = figure1()
fig5 = figure5()
figsize = ps.set_size(subplots=(2,2))
fig, ax = plt.subplots(figsize=figsize, nrows=2, ncols=2)
plt.rcParams.update({'lines.markersize': 4})
for i, (data, result, pulls) in enumerate((fig1, fig5)):
ax[0,i].plot(result[:,0], result[:,1], color=ps.colors(0),
label="MARC algorithm", zorder=2, linewidth=1)
ax[0,i].scatter(data[::3,0], data[::3,1], marker="x",
color=ps.colors(1), label="Sharp 2013", zorder=3)
ax[1,i].scatter(data[:,0], pulls, s=1, zorder=2)
ax[0,i].legend()
ax[0,i].set_xlim(0, 100)
ax[1,i].set_xlim(0, 100)
ax[1,i].set_xlabel(r"$x_{235,P}$ [\%at]")
ax[1,i].axhline(0, color="C3", zorder=1, linewidth=1)
ax[0,1].ticklabel_format(axis="y", style="sci", scilimits=(-2,2))
ax[0,0].set_ylabel(r"$x_{234,T}$ [\%at]")
ax[1,0].set_ylabel(r"relative difference [%]")
ax[0,1].set_ylabel(r"$T$ [kg/yr]")
ax[1,1].set_ylabel(r"relative difference [%]")
plt.tight_layout()
plt.savefig("../plots/checks_marc_sharp1.pdf")
plt.close()
return
if __name__=='__main__':
main()
|
[
"plotsettings.set_size",
"matplotlib.pyplot.savefig",
"matplotlib.use",
"multi_isotope_calculator.Multi_isotope",
"plotsettings.tex_fonts",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.close",
"matplotlib.pyplot.rcParams.update",
"numpy.argsort",
"numpy.empty",
"matplotlib.pyplot.tight_layout",
"plotsettings.colors",
"numpy.genfromtxt",
"matplotlib.pyplot.subplots"
] |
[((42, 63), 'matplotlib.use', 'matplotlib.use', (['"""pgf"""'], {}), "('pgf')\n", (56, 63), False, 'import matplotlib\n'), ((194, 227), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-darkgrid"""'], {}), "('seaborn-darkgrid')\n", (207, 227), True, 'import matplotlib.pyplot as plt\n'), ((248, 262), 'plotsettings.tex_fonts', 'ps.tex_fonts', ([], {}), '()\n', (260, 262), True, 'import plotsettings as ps\n'), ((398, 452), 'numpy.genfromtxt', 'np.genfromtxt', (['"""../data/sharp_fig1.csv"""'], {'delimiter': '""","""'}), "('../data/sharp_fig1.csv', delimiter=',')\n", (411, 452), True, 'import numpy as np\n'), ((567, 639), 'multi_isotope_calculator.Multi_isotope', 'Multi_isotope', (['composition'], {'feed': '(1)', 'process': '"""diffusion"""', 'downblend': '(False)'}), "(composition, feed=1, process='diffusion', downblend=False)\n", (580, 639), False, 'from multi_isotope_calculator import Multi_isotope\n'), ((685, 724), 'numpy.empty', 'np.empty', ([], {'shape': 'data.shape', 'dtype': 'float'}), '(shape=data.shape, dtype=float)\n', (693, 724), True, 'import numpy as np\n'), ((1322, 1376), 'numpy.genfromtxt', 'np.genfromtxt', (['"""../data/sharp_fig5.csv"""'], {'delimiter': '""","""'}), "('../data/sharp_fig5.csv', delimiter=',')\n", (1335, 1376), True, 'import numpy as np\n'), ((1435, 1531), 'multi_isotope_calculator.Multi_isotope', 'Multi_isotope', (["{'235': (0.711, 5, 0.2)}"], {'max_swu': '(15000)', 'process': '"""diffusion"""', 'downblend': '(False)'}), "({'235': (0.711, 5, 0.2)}, max_swu=15000, process='diffusion',\n downblend=False)\n", (1448, 1531), False, 'from multi_isotope_calculator import Multi_isotope\n'), ((1568, 1608), 'numpy.empty', 'np.empty', ([], {'shape': 'sharp.shape', 'dtype': 'float'}), '(shape=sharp.shape, dtype=float)\n', (1576, 1608), True, 'import numpy as np\n'), ((1993, 2021), 'plotsettings.set_size', 'ps.set_size', ([], {'subplots': '(2, 2)'}), '(subplots=(2, 2))\n', (2004, 2021), True, 'import plotsettings as ps\n'), ((2035, 2082), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize', 'nrows': '(2)', 'ncols': '(2)'}), '(figsize=figsize, nrows=2, ncols=2)\n', (2047, 2082), True, 'import matplotlib.pyplot as plt\n'), ((2092, 2136), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'lines.markersize': 4}"], {}), "({'lines.markersize': 4})\n", (2111, 2136), True, 'import matplotlib.pyplot as plt\n'), ((3008, 3026), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3024, 3026), True, 'import matplotlib.pyplot as plt\n'), ((3031, 3077), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../plots/checks_marc_sharp1.pdf"""'], {}), "('../plots/checks_marc_sharp1.pdf')\n", (3042, 3077), True, 'import matplotlib.pyplot as plt\n'), ((3082, 3093), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3091, 3093), True, 'import matplotlib.pyplot as plt\n'), ((469, 491), 'numpy.argsort', 'np.argsort', (['data[:, 0]'], {}), '(data[:, 0])\n', (479, 491), True, 'import numpy as np\n'), ((1395, 1418), 'numpy.argsort', 'np.argsort', (['sharp[:, 0]'], {}), '(sharp[:, 0])\n', (1405, 1418), True, 'import numpy as np\n'), ((2252, 2264), 'plotsettings.colors', 'ps.colors', (['(0)'], {}), '(0)\n', (2261, 2264), True, 'import plotsettings as ps\n'), ((2426, 2438), 'plotsettings.colors', 'ps.colors', (['(1)'], {}), '(1)\n', (2435, 2438), True, 'import plotsettings as ps\n')]
|
#!/usr/bin/env python3
import datetime
import time
import os
import matplotlib.pyplot as plt
import matplotlib.dates as md
import numpy as np
class handle_data:
data_file = "./data/data.log"
data_list = []
def __init__(self):
pass
def insert_data(self, timestamp, temp, state_onoff, state_light, state_cooling, state_heating):
"""
Insert data to log file and add timestamp.
"""
if state_onoff == 'on':
state_onoff = 1
else:
state_onoff = 0
if state_light == 'on':
state_light = 1
else:
state_light = 0
if state_cooling == 'on':
state_cooling = 1
else:
state_cooling = 0
if state_heating == 'on':
state_heating = 1
else:
state_heating = 0
data_string = str(timestamp) + ";" + str(temp) + ";" + str(state_onoff) + ";" + str(state_light) + ";" + str(state_cooling) + ";" + str(state_heating) + "\n"
self.data_list.append(data_string)
#print(datetime.datetime.now().strftime('%Y-%m-%d_%a_%H:%M:%S.%f'), "\tInserted data: data_list.append len=", len(self.data_list))
return
def append_data_to_file(self):
"""
Append data to log file.
"""
try:
with open(self.data_file, "a") as outfile:
for entry in self.data_list:
outfile.write(str(entry))
except IOError:
print(datetime.datetime.now().strftime('%Y-%m-%d_%a_%H:%M:%S.%f'), "\tIOError opening data.log for appending data")
self.data_list.clear()
return
def clean_file(self):
"""
Clean log file in order to reset measurement.
"""
try:
with open(self.data_file, "w") as outfile:
outfile.write("Timestamp; Temp; State_onoff; State_light; State_cooling; State_heating\n")
except IOError:
print(datetime.datetime.now().strftime('%Y-%m-%d_%a_%H:%M:%S.%f'), "\tIOError opening data.log for writing")
return
def update_graph(self, path):
"""
Generate or update graph from data file.
"""
lines = sum(1 for _ in open(self.data_file))
if lines > 1:
data=np.genfromtxt(self.data_file, delimiter=';', skip_header=1, names=['Time', 'Temp', 'Onoff', 'Light', 'Cooling', 'Heating'], dtype=([('Time', '<U30'), ('Temp', '<f8'), ('Onoff', '<f8'), ('Light', '<f8'), ('Cooling', '<f8'), ('Heating', '<f8')]))
fig, ax1 = plt.subplots()
if data['Temp'].shape:
if data['Temp'].shape[0] > 120:
ax1.plot(data['Temp'][((data['Temp'].shape[0])-120):(data['Temp'].shape[0])], color = 'r', label = 'Temp.')
else:
ax1.plot(data['Temp'], color = 'r', label = 'Temp.')
else:
ax1.plot(data['Temp'], color = 'r', label = 'Temp.')
ax1.set_xlim([0,120])
ax1.set_xticks([0,30,60,90,120])
ax1.set_ylabel('Temp (°C)', color='r')
ax1.tick_params('y', colors='r')
yt=range(-1,41,1)
ax1.set_yticks(yt, minor=True)
ax1.set_xlabel('last two hours (scale:min.)')
"""
ax2 = ax1.twinx()
ax2.plot(data['Light'], color = 'g', label = 'Light', marker = 'o')
ax2.plot(data['Onoff'], color = 'y', label = 'Onoff', marker = '*')
ax2.plot(data['Heating'], color = 'r', label = 'Heating')
ax2.plot(data['Cooling'], color = 'b', label = 'Cooling')
ax2.set_ylabel('Light (on=1/off=0)', color='b')
ax2.tick_params('y', colors='b')
ax2.set_yticks([0,1], minor=False)
"""
fig.tight_layout()
#plt.legend(['Temp. inside'], loc='upper left')
plt.savefig(path, bbox_inches='tight')
plt.close(fig)
print(datetime.datetime.now().strftime('%Y-%m-%d_%a_%H:%M:%S.%f'), "\tGraph generated/updated.")
else:
#os.remove(path)
#os.mknod(path)
#os.chmod(path, 0o644)
try:
with open(path, "w") as outfile:
outfile.write("")
except IOError:
print(datetime.datetime.now().strftime('%Y-%m-%d_%a_%H:%M:%S.%f'), "\tIOError: Could not generate empty graph file.")
print(datetime.datetime.now().strftime('%Y-%m-%d_%a_%H:%M:%S.%f'), "\tNo data, graph is empty.")
return
# Test:
if __name__ == '__main__':
hd = handle_data()
#hd.clean_file()
hd.update_graph('./static/data_log.png')
|
[
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.close",
"datetime.datetime.now",
"numpy.genfromtxt",
"matplotlib.pyplot.subplots"
] |
[((1935, 2190), 'numpy.genfromtxt', 'np.genfromtxt', (['self.data_file'], {'delimiter': '""";"""', 'skip_header': '(1)', 'names': "['Time', 'Temp', 'Onoff', 'Light', 'Cooling', 'Heating']", 'dtype': "[('Time', '<U30'), ('Temp', '<f8'), ('Onoff', '<f8'), ('Light', '<f8'), (\n 'Cooling', '<f8'), ('Heating', '<f8')]"}), "(self.data_file, delimiter=';', skip_header=1, names=['Time',\n 'Temp', 'Onoff', 'Light', 'Cooling', 'Heating'], dtype=[('Time', '<U30'\n ), ('Temp', '<f8'), ('Onoff', '<f8'), ('Light', '<f8'), ('Cooling',\n '<f8'), ('Heating', '<f8')])\n", (1948, 2190), True, 'import numpy as np\n'), ((2194, 2208), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2206, 2208), True, 'import matplotlib.pyplot as plt\n'), ((3266, 3304), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {'bbox_inches': '"""tight"""'}), "(path, bbox_inches='tight')\n", (3277, 3304), True, 'import matplotlib.pyplot as plt\n'), ((3308, 3322), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (3317, 3322), True, 'import matplotlib.pyplot as plt\n'), ((3332, 3355), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3353, 3355), False, 'import datetime\n'), ((3715, 3738), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3736, 3738), False, 'import datetime\n'), ((1259, 1282), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1280, 1282), False, 'import datetime\n'), ((1664, 1687), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1685, 1687), False, 'import datetime\n'), ((3594, 3617), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3615, 3617), False, 'import datetime\n')]
|
"""
This is a simple application for sentence embeddings: clustering
Sentences are mapped to sentence embeddings and then agglomerative clustering with a threshold is applied.
"""
from sentence_transformers import SentenceTransformer
from sklearn.cluster import AgglomerativeClustering
import numpy as np
embedder = SentenceTransformer('paraphrase-MiniLM-L6-v2')
# Corpus with example sentences
corpus = ['A man is eating food.',
'A man is eating a piece of bread.',
'A man is eating pasta.',
'The girl is carrying a baby.',
'The baby is carried by the woman',
'A man is riding a horse.',
'A man is riding a white horse on an enclosed ground.',
'A monkey is playing drums.',
'Someone in a gorilla costume is playing a set of drums.',
'A cheetah is running behind its prey.',
'A cheetah chases prey on across a field.'
]
corpus_embeddings = embedder.encode(corpus)
# Normalize the embeddings to unit length
corpus_embeddings = corpus_embeddings / np.linalg.norm(corpus_embeddings, axis=1, keepdims=True)
# Perform kmean clustering
clustering_model = AgglomerativeClustering(n_clusters=None, distance_threshold=1.5) #, affinity='cosine', linkage='average', distance_threshold=0.4)
clustering_model.fit(corpus_embeddings)
cluster_assignment = clustering_model.labels_
clustered_sentences = {}
for sentence_id, cluster_id in enumerate(cluster_assignment):
if cluster_id not in clustered_sentences:
clustered_sentences[cluster_id] = []
clustered_sentences[cluster_id].append(corpus[sentence_id])
for i, cluster in clustered_sentences.items():
print("Cluster ", i+1)
print(cluster)
print("")
|
[
"sklearn.cluster.AgglomerativeClustering",
"sentence_transformers.SentenceTransformer",
"numpy.linalg.norm"
] |
[((318, 364), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['"""paraphrase-MiniLM-L6-v2"""'], {}), "('paraphrase-MiniLM-L6-v2')\n", (337, 364), False, 'from sentence_transformers import SentenceTransformer\n'), ((1165, 1229), 'sklearn.cluster.AgglomerativeClustering', 'AgglomerativeClustering', ([], {'n_clusters': 'None', 'distance_threshold': '(1.5)'}), '(n_clusters=None, distance_threshold=1.5)\n', (1188, 1229), False, 'from sklearn.cluster import AgglomerativeClustering\n'), ((1061, 1117), 'numpy.linalg.norm', 'np.linalg.norm', (['corpus_embeddings'], {'axis': '(1)', 'keepdims': '(True)'}), '(corpus_embeddings, axis=1, keepdims=True)\n', (1075, 1117), True, 'import numpy as np\n')]
|
import pandas as pd
wine = pd.read_csv('https://bit.ly/wine-date')
# wine = pd.read_csv('../data/wine.csv')
print(wine.head())
data = wine[['alcohol', 'sugar', 'pH']].to_numpy()
target = wine['class'].to_numpy()
from sklearn.model_selection import train_test_split
train_input, test_input, train_target, test_target = train_test_split(data, target, test_size=0.2, random_state=42)
print(train_input.shape, test_input.shape)
sub_input, val_input, sub_target, val_target = train_test_split(train_input, train_target, test_size=0.2, random_state=42)
print(sub_input.shape, val_input.shape)
from sklearn.tree import DecisionTreeClassifier
dt = DecisionTreeClassifier(random_state=42)
dt.fit(sub_input, sub_target)
print(dt.score(sub_input, sub_target))
print(dt.score(val_input, val_target))
from sklearn.model_selection import cross_validate
scores = cross_validate(dt, train_input, train_target)
print(scores)
import numpy as np
print(np.mean(scores['test_score']))
from sklearn.model_selection import StratifiedKFold
scores = cross_validate(dt, train_input, train_target, cv=StratifiedKFold())
print(np.mean(scores['test_score']))
splitter = StratifiedKFold(n_splits=10, shuffle=True, random_state=42)
scores = cross_validate(dt, train_input, train_target, cv=splitter)
print(np.mean(scores['test_score']))
from sklearn.model_selection import GridSearchCV
params = {'min_impurity_decrease': [0.0001, 0.0002, 0.0003, 0.0004, 0.0005]}
gs = GridSearchCV(DecisionTreeClassifier(random_state=42), params, n_jobs=1)
gs.fit(train_input, train_target)
dt = gs.best_estimator_
print(dt.score(train_input, train_target))
print(gs.best_params_)
print(gs.cv_results_['mean_test_score'])
best_index = np.argmax(gs.cv_results_['mean_test_score'])
print(gs.cv_results_['params'][best_index])
params = {'min_impurity_decrease': np.arange(0.0001, 0.001, 0.0001),
'max_depth': range(5, 20, 1),
'min_samples_split': range(2, 100, 10)
}
gs = GridSearchCV(DecisionTreeClassifier(random_state=42), params, n_jobs=-1)
gs.fit(train_input, train_target)
print(gs.best_params_)
print(np.max(gs.cv_results_['mean_test_score']))
from scipy.stats import uniform, randint
rgen = randint(0, 10)
print(rgen.rvs(10))
print(np.unique(rgen.rvs(1000), return_counts=True))
ugen = uniform(0, 1)
print(ugen.rvs(10))
params = {'min_impurity_decrease': uniform(0.0001, 0.001),
'max_depth': randint(20, 50),
'min_samples_split': randint(2, 25),
'min_samples_leaf': randint(1, 25)
}
from sklearn.model_selection import RandomizedSearchCV
gs = RandomizedSearchCV(DecisionTreeClassifier(random_state=42), params, n_iter=100, n_jobs=-1, random_state=42)
gs.fit(train_input, train_target)
print(gs.best_params_)
print(np.max(gs.cv_results_['mean_test_score']))
dt = gs.best_estimator_
print(dt.score(test_input, test_target))
# Exam
gs = RandomizedSearchCV(DecisionTreeClassifier(splitter='random', random_state=42), params, n_iter=100, n_jobs=-1, random_state=42)
gs.fit(train_input, train_target)
print(gs.best_params_)
print(np.max(gs.cv_results_['mean_test_score']))
dt = gs.best_estimator_
print(dt.score(test_input, test_target))
|
[
"scipy.stats.randint",
"numpy.mean",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.model_selection.cross_validate",
"sklearn.tree.DecisionTreeClassifier",
"scipy.stats.uniform",
"numpy.argmax",
"numpy.max",
"sklearn.model_selection.StratifiedKFold",
"numpy.arange"
] |
[((28, 67), 'pandas.read_csv', 'pd.read_csv', (['"""https://bit.ly/wine-date"""'], {}), "('https://bit.ly/wine-date')\n", (39, 67), True, 'import pandas as pd\n'), ((322, 384), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'target'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(data, target, test_size=0.2, random_state=42)\n', (338, 384), False, 'from sklearn.model_selection import train_test_split\n'), ((476, 551), 'sklearn.model_selection.train_test_split', 'train_test_split', (['train_input', 'train_target'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(train_input, train_target, test_size=0.2, random_state=42)\n', (492, 551), False, 'from sklearn.model_selection import train_test_split\n'), ((647, 686), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'random_state': '(42)'}), '(random_state=42)\n', (669, 686), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((857, 902), 'sklearn.model_selection.cross_validate', 'cross_validate', (['dt', 'train_input', 'train_target'], {}), '(dt, train_input, train_target)\n', (871, 902), False, 'from sklearn.model_selection import cross_validate\n'), ((1155, 1214), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': '(10)', 'shuffle': '(True)', 'random_state': '(42)'}), '(n_splits=10, shuffle=True, random_state=42)\n', (1170, 1214), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((1224, 1282), 'sklearn.model_selection.cross_validate', 'cross_validate', (['dt', 'train_input', 'train_target'], {'cv': 'splitter'}), '(dt, train_input, train_target, cv=splitter)\n', (1238, 1282), False, 'from sklearn.model_selection import cross_validate\n'), ((1706, 1750), 'numpy.argmax', 'np.argmax', (["gs.cv_results_['mean_test_score']"], {}), "(gs.cv_results_['mean_test_score'])\n", (1715, 1750), True, 'import numpy as np\n'), ((2203, 2217), 'scipy.stats.randint', 'randint', (['(0)', '(10)'], {}), '(0, 10)\n', (2210, 2217), False, 'from scipy.stats import uniform, randint\n'), ((2300, 2313), 'scipy.stats.uniform', 'uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (2307, 2313), False, 'from scipy.stats import uniform, randint\n'), ((944, 973), 'numpy.mean', 'np.mean', (["scores['test_score']"], {}), "(scores['test_score'])\n", (951, 973), True, 'import numpy as np\n'), ((1112, 1141), 'numpy.mean', 'np.mean', (["scores['test_score']"], {}), "(scores['test_score'])\n", (1119, 1141), True, 'import numpy as np\n'), ((1289, 1318), 'numpy.mean', 'np.mean', (["scores['test_score']"], {}), "(scores['test_score'])\n", (1296, 1318), True, 'import numpy as np\n'), ((1466, 1505), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'random_state': '(42)'}), '(random_state=42)\n', (1488, 1505), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((1831, 1863), 'numpy.arange', 'np.arange', (['(0.0001)', '(0.001)', '(0.0001)'], {}), '(0.0001, 0.001, 0.0001)\n', (1840, 1863), True, 'import numpy as np\n'), ((1985, 2024), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'random_state': '(42)'}), '(random_state=42)\n', (2007, 2024), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((2110, 2151), 'numpy.max', 'np.max', (["gs.cv_results_['mean_test_score']"], {}), "(gs.cv_results_['mean_test_score'])\n", (2116, 2151), True, 'import numpy as np\n'), ((2370, 2392), 'scipy.stats.uniform', 'uniform', (['(0.0001)', '(0.001)'], {}), '(0.0001, 0.001)\n', (2377, 2392), False, 'from scipy.stats import uniform, randint\n'), ((2417, 2432), 'scipy.stats.randint', 'randint', (['(20)', '(50)'], {}), '(20, 50)\n', (2424, 2432), False, 'from scipy.stats import uniform, randint\n'), ((2465, 2479), 'scipy.stats.randint', 'randint', (['(2)', '(25)'], {}), '(2, 25)\n', (2472, 2479), False, 'from scipy.stats import uniform, randint\n'), ((2511, 2525), 'scipy.stats.randint', 'randint', (['(1)', '(25)'], {}), '(1, 25)\n', (2518, 2525), False, 'from scipy.stats import uniform, randint\n'), ((2619, 2658), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'random_state': '(42)'}), '(random_state=42)\n', (2641, 2658), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((2773, 2814), 'numpy.max', 'np.max', (["gs.cv_results_['mean_test_score']"], {}), "(gs.cv_results_['mean_test_score'])\n", (2779, 2814), True, 'import numpy as np\n'), ((2915, 2973), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'splitter': '"""random"""', 'random_state': '(42)'}), "(splitter='random', random_state=42)\n", (2937, 2973), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((3088, 3129), 'numpy.max', 'np.max', (["gs.cv_results_['mean_test_score']"], {}), "(gs.cv_results_['mean_test_score'])\n", (3094, 3129), True, 'import numpy as np\n'), ((1087, 1104), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {}), '()\n', (1102, 1104), False, 'from sklearn.model_selection import StratifiedKFold\n')]
|
# Module to build a potential landscape
import numpy as np
def gauss(x,mean=0.0,stddev=0.02,peak=1.0):
'''
Input:
x : x-coordintes
Output:
f(x) where f is a Gaussian with the given mean, stddev and peak value
'''
stddev = 5*(x[1] - x[0])
return peak*np.exp(-(x-mean)**2/(2*stddev**2))
def init_ndot(x,n_dot):
'''
Input:
x : 1d grid for the dots
ndot : number of dots
Output:
y : cordinates of the potential grid with ndots
The potential barriers are modelled as gaussians
'''
# n dots imply n+1 barriers
bar_centers = x[0] + (x[-1] - x[0])*np.random.rand(n_dot+1)
bar_heights = np.random.rand(n_dot+1)
#bar_heights = 0.5*np.ones(n_dot+1)
N = len(x)
y = np.zeros(N)
# no need to optimize here really since the dot number is generally small, the calculation of the gauss function is already done in a vectorised manner
for j in range(n_dot+1):
y += gauss(x-bar_centers[j],peak=bar_heights[j])
return y
|
[
"numpy.exp",
"numpy.zeros",
"numpy.random.rand"
] |
[((659, 684), 'numpy.random.rand', 'np.random.rand', (['(n_dot + 1)'], {}), '(n_dot + 1)\n', (673, 684), True, 'import numpy as np\n'), ((747, 758), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (755, 758), True, 'import numpy as np\n'), ((283, 327), 'numpy.exp', 'np.exp', (['(-(x - mean) ** 2 / (2 * stddev ** 2))'], {}), '(-(x - mean) ** 2 / (2 * stddev ** 2))\n', (289, 327), True, 'import numpy as np\n'), ((616, 641), 'numpy.random.rand', 'np.random.rand', (['(n_dot + 1)'], {}), '(n_dot + 1)\n', (630, 641), True, 'import numpy as np\n')]
|
import sys, os, seaborn as sns, rasterio, pandas as pd
import numpy as np
import matplotlib.pyplot as plt
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from config.definitions import ROOT_DIR, ancillary_path, city,year
attr_value ="totalpop"
gtP = ROOT_DIR + "/Evaluation/{0}_groundTruth/{2}_{0}_{1}.tif".format(city,attr_value,year)
srcGT= rasterio.open(gtP)
popGT = srcGT.read(1)
print(popGT.min(),popGT.max(), popGT.mean())
#prP = ROOT_DIR + "/Evaluation/{0}/apcatbr/div_{0}_dissever01WIESMN_500_2018_ams_DasyA_apcatbr_p[1]_12AIL12_12IL_it10_ag_{1}.tif".format(city,attr_value)
def scatterplot(prP):
cp = "C:/Users/NM12LQ/OneDrive - Aalborg Universitet/PopNetV2_backup/data_prep/ams_ProjectData/temp_tif/ams_CLC_2012_2018Reclas3.tif"
srcC= rasterio.open(cp)
corine = srcC.read(1)
name = prP.split(".tif")[0].split("/")[-1]
print(name)
gtP = ROOT_DIR + "/Evaluation/{0}_groundTruth/{2}_{0}_{1}.tif".format(city,attr_value,year)
srcGT= rasterio.open(gtP)
popGT = srcGT.read(1)
print(popGT.min(),popGT.max(), popGT.mean())
srcPR= rasterio.open(prP)
popPR = srcPR.read(1)
popPR[(np.where(popPR <= -9999))] = 0
print(popPR.min(),popPR.max(), popPR.mean())
cr=corine.flatten()
x=popGT.flatten()
y=popPR.flatten()
df = pd.DataFrame(data={"gt": x, "predictions":y, "cr":cr})
plt.figure(figsize=(20,20))
g= sns.lmplot(data=df, x="gt", y="predictions", hue="cr", palette=["#0d2dc1","#ff9c1c","#71b951","#24f33d","#90308f", "#a8a8a8"],ci = None, order=2, scatter_kws={"s":0.5, "alpha": 0.5}, line_kws={"lw":2, "alpha": 0.5}, legend=False)
plt.legend(title= "Land Cover", labels= ['Water','Urban Fabric', 'Agriculture', 'Green Spaces','Industry','Transportation' ], loc='lower right', fontsize=5)
plt.title('{0}'.format( name), fontsize=11)
# Set x-axis label
plt.xlabel('Ground Truth (persons)', fontsize=11)
# Set y-axis label
plt.ylabel('Predictions (persons)', fontsize=11)
#total pop
#plt.xscale('log')
#plt.yscale('log')
#mobile Adults
#plt.xlim((0,200))
#plt.ylim((-100,500))pl
plt.axis('square')
plt.xlim((0,400))
plt.ylim((0,350))
plt.tight_layout()
#plt.show()
plt.savefig(ROOT_DIR + "/Evaluation/{0}/ScatterPlots/SP4_{2}.png".format(city,attr_value, name),format='png',dpi=300)
evalFiles = [#gtP,
#ROOT_DIR + "/Evaluation/{0}/aprf/dissever00/{0}_dissever00WIESMN_2018_ams_Dasy_aprf_p[1]_12AIL12_1IL_it10_{1}.tif".format(city,attr_value),
#ROOT_DIR + "/Evaluation/{0}/aprf/dissever01/{0}_dissever01WIESMN_100_2018_ams_DasyA_aprf_p[1]_12AIL12_13IL_it10_{1}.tif".format(city,attr_value),
#ROOT_DIR + "/Evaluation/{0}/apcatbr/{0}_dissever01WIESMN_100_2018_ams_DasyA_apcatbr_p[1]_12AIL12_12IL_it10_ag_{1}.tif".format(city,attr_value),
#ROOT_DIR + "/Evaluation/{0}/apcatbr/{0}_dissever01WIESMN_250_2018_ams_DasyA_apcatbr_p[1]_12AIL12_12IL_it10_ag_{1}.tif".format(city,attr_value),
ROOT_DIR + "/Evaluation/{0}/apcatbr/{0}_dissever01WIESMN_500_2018_ams_DasyA_apcatbr_p[1]_12AIL12_12IL_it10_ag_{1}.tif".format(city,attr_value),
]
evalFilesMAEbp = [ROOT_DIR + "/Evaluation/{0}/Pycno/mae_{0}_{2}_{0}_{1}_pycno.tif".format(city,attr_value,year),
ROOT_DIR + "/Evaluation/{0}/Dasy/mae_{0}_{2}_{0}_{1}_dasyWIESMN.tif".format(city,attr_value,year),
ROOT_DIR + "/Evaluation/{0}/aprf/dissever00/mae_{0}_dissever00WIESMN_2018_ams_Dasy_aprf_p[1]_12AIL12_1IL_it10_{1}.tif".format(city,attr_value),
ROOT_DIR + "/Evaluation/{0}/aprf/dissever01/mae_{0}_dissever01WIESMN_100_2018_ams_DasyA_aprf_p[1]_12AIL12_13IL_it10_{1}.tif".format(city,attr_value),
ROOT_DIR + "/Evaluation/{0}/apcatbr/mae_{0}_dissever01WIESMN_100_2018_ams_DasyA_apcatbr_p[1]_12AIL12_12IL_it10_ag_{1}.tif".format(city,attr_value),
ROOT_DIR + "/Evaluation/{0}/apcatbr/mae_{0}_dissever01WIESMN_250_2018_ams_DasyA_apcatbr_p[1]_12AIL12_12IL_it10_ag_{1}.tif".format(city,attr_value),
ROOT_DIR + "/Evaluation/{0}/apcatbr/mae_{0}_dissever01WIESMN_500_2018_ams_DasyA_apcatbr_p[1]_12AIL12_12IL_it10_ag_{1}.tif".format(city,attr_value),
ROOT_DIR + "/Evaluation/{0}/apcatbr/mae_{0}_dissever01WIESMN_250_2018_ams_DasyA_apcatbr_p[1]_3AIL5_12IL_it10_ag_{1}.tif".format(city,attr_value)]
evalFilesPEbp = [ROOT_DIR + "/Evaluation/{0}/Pycno/div_{0}_{2}_{0}_{1}_pycno.tif".format(city,attr_value,year),
ROOT_DIR + "/Evaluation/{0}/Dasy/div_{0}_{2}_{0}_{1}_dasyWIESMN.tif".format(city,attr_value,year),
ROOT_DIR + "/Evaluation/{0}/aprf/dissever00/div_{0}_dissever00WIESMN_2018_ams_Dasy_aprf_p[1]_12AIL12_1IL_it10_{1}.tif".format(city,attr_value),
ROOT_DIR + "/Evaluation/{0}/aprf/dissever01/div_{0}_dissever01WIESMN_100_2018_ams_DasyA_aprf_p[1]_12AIL12_13IL_it10_{1}.tif".format(city,attr_value),
ROOT_DIR + "/Evaluation/{0}/apcatbr/div_{0}_dissever01WIESMN_100_2018_ams_DasyA_apcatbr_p[1]_12AIL12_12IL_it10_ag_{1}.tif".format(city,attr_value),
ROOT_DIR + "/Evaluation/{0}/apcatbr/div_{0}_dissever01WIESMN_250_2018_ams_DasyA_apcatbr_p[1]_12AIL12_12IL_it10_ag_{1}.tif".format(city,attr_value),
ROOT_DIR + "/Evaluation/{0}/apcatbr/div_{0}_dissever01WIESMN_500_2018_ams_DasyA_apcatbr_p[1]_12AIL12_12IL_it10_ag_{1}.tif".format(city,attr_value)]
for i in evalFiles:
scatterplot(i)
|
[
"seaborn.lmplot",
"matplotlib.pyplot.ylabel",
"numpy.where",
"rasterio.open",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"pandas.DataFrame",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.ylim",
"os.path.abspath",
"matplotlib.pyplot.legend"
] |
[((378, 396), 'rasterio.open', 'rasterio.open', (['gtP'], {}), '(gtP)\n', (391, 396), False, 'import sys, os, seaborn as sns, rasterio, pandas as pd\n'), ((789, 806), 'rasterio.open', 'rasterio.open', (['cp'], {}), '(cp)\n', (802, 806), False, 'import sys, os, seaborn as sns, rasterio, pandas as pd\n'), ((1003, 1021), 'rasterio.open', 'rasterio.open', (['gtP'], {}), '(gtP)\n', (1016, 1021), False, 'import sys, os, seaborn as sns, rasterio, pandas as pd\n'), ((1108, 1126), 'rasterio.open', 'rasterio.open', (['prP'], {}), '(prP)\n', (1121, 1126), False, 'import sys, os, seaborn as sns, rasterio, pandas as pd\n'), ((1322, 1378), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'gt': x, 'predictions': y, 'cr': cr}"}), "(data={'gt': x, 'predictions': y, 'cr': cr})\n", (1334, 1378), True, 'import sys, os, seaborn as sns, rasterio, pandas as pd\n'), ((1382, 1410), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 20)'}), '(figsize=(20, 20))\n', (1392, 1410), True, 'import matplotlib.pyplot as plt\n'), ((1418, 1666), 'seaborn.lmplot', 'sns.lmplot', ([], {'data': 'df', 'x': '"""gt"""', 'y': '"""predictions"""', 'hue': '"""cr"""', 'palette': "['#0d2dc1', '#ff9c1c', '#71b951', '#24f33d', '#90308f', '#a8a8a8']", 'ci': 'None', 'order': '(2)', 'scatter_kws': "{'s': 0.5, 'alpha': 0.5}", 'line_kws': "{'lw': 2, 'alpha': 0.5}", 'legend': '(False)'}), "(data=df, x='gt', y='predictions', hue='cr', palette=['#0d2dc1',\n '#ff9c1c', '#71b951', '#24f33d', '#90308f', '#a8a8a8'], ci=None, order=\n 2, scatter_kws={'s': 0.5, 'alpha': 0.5}, line_kws={'lw': 2, 'alpha': \n 0.5}, legend=False)\n", (1428, 1666), True, 'import sys, os, seaborn as sns, rasterio, pandas as pd\n'), ((1653, 1818), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'title': '"""Land Cover"""', 'labels': "['Water', 'Urban Fabric', 'Agriculture', 'Green Spaces', 'Industry',\n 'Transportation']", 'loc': '"""lower right"""', 'fontsize': '(5)'}), "(title='Land Cover', labels=['Water', 'Urban Fabric',\n 'Agriculture', 'Green Spaces', 'Industry', 'Transportation'], loc=\n 'lower right', fontsize=5)\n", (1663, 1818), True, 'import matplotlib.pyplot as plt\n'), ((1885, 1934), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Ground Truth (persons)"""'], {'fontsize': '(11)'}), "('Ground Truth (persons)', fontsize=11)\n", (1895, 1934), True, 'import matplotlib.pyplot as plt\n'), ((1962, 2010), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Predictions (persons)"""'], {'fontsize': '(11)'}), "('Predictions (persons)', fontsize=11)\n", (1972, 2010), True, 'import matplotlib.pyplot as plt\n'), ((2151, 2169), 'matplotlib.pyplot.axis', 'plt.axis', (['"""square"""'], {}), "('square')\n", (2159, 2169), True, 'import matplotlib.pyplot as plt\n'), ((2174, 2192), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0, 400)'], {}), '((0, 400))\n', (2182, 2192), True, 'import matplotlib.pyplot as plt\n'), ((2196, 2214), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 350)'], {}), '((0, 350))\n', (2204, 2214), True, 'import matplotlib.pyplot as plt\n'), ((2218, 2236), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2234, 2236), True, 'import matplotlib.pyplot as plt\n'), ((1164, 1188), 'numpy.where', 'np.where', (['(popPR <= -9999)'], {}), '(popPR <= -9999)\n', (1172, 1188), True, 'import numpy as np\n'), ((154, 179), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (169, 179), False, 'import sys, os, seaborn as sns, rasterio, pandas as pd\n')]
|
import numpy as np
import pytest
import nengo
from nengo.builder import Builder
from nengo.builder.operator import Reset, Copy
from nengo.builder.signal import Signal
from nengo.dists import UniformHypersphere
from nengo.exceptions import ValidationError
from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja
from nengo.processes import WhiteSignal
from nengo.synapses import Alpha, Lowpass
def best_weights(weight_data):
return np.argmax(np.sum(np.var(weight_data, axis=0), axis=0))
def _test_pes(
Simulator,
nl,
plt,
seed,
allclose,
pre_neurons=False,
post_neurons=False,
weight_solver=False,
vin=np.array([0.5, -0.5]),
vout=None,
n=200,
function=None,
transform=np.array(1.0),
rate=1e-3,
):
vout = np.array(vin) if vout is None else vout
with nengo.Network(seed=seed) as model:
model.config[nengo.Ensemble].neuron_type = nl()
stim = nengo.Node(output=vin)
target = nengo.Node(output=vout)
pre = nengo.Ensemble(n, dimensions=stim.size_out)
post = nengo.Ensemble(n, dimensions=stim.size_out)
error = nengo.Ensemble(n, dimensions=target.size_out)
nengo.Connection(stim, pre)
postslice = post[: target.size_out] if target.size_out < stim.size_out else post
pre = pre.neurons if pre_neurons else pre
post = post.neurons if post_neurons else postslice
conn = nengo.Connection(
pre,
post,
function=function,
transform=transform,
learning_rule_type=PES(rate),
)
if weight_solver:
conn.solver = nengo.solvers.LstsqL2(weights=True)
nengo.Connection(target, error, transform=-1)
nengo.Connection(postslice, error)
nengo.Connection(error, conn.learning_rule)
post_p = nengo.Probe(postslice, synapse=0.03)
error_p = nengo.Probe(error, synapse=0.03)
weights_p = nengo.Probe(conn, "weights", sample_every=0.01)
with Simulator(model) as sim:
sim.run(0.5)
t = sim.trange()
weights = sim.data[weights_p]
plt.subplot(211)
plt.plot(t, sim.data[post_p])
plt.ylabel("Post decoded value")
plt.subplot(212)
plt.plot(t, sim.data[error_p])
plt.ylabel("Error decoded value")
plt.xlabel("Time (s)")
tend = t > 0.4
assert allclose(sim.data[post_p][tend], vout, atol=0.05)
assert allclose(sim.data[error_p][tend], 0, atol=0.05)
assert not allclose(weights[0], weights[-1], atol=1e-5, record_rmse=False)
def test_pes_ens_ens(Simulator, nl_nodirect, plt, seed, allclose):
function = lambda x: [x[1], x[0]]
_test_pes(Simulator, nl_nodirect, plt, seed, allclose, function=function)
def test_pes_weight_solver(Simulator, plt, seed, allclose):
function = lambda x: [x[1], x[0]]
_test_pes(
Simulator, nengo.LIF, plt, seed, allclose, function=function, weight_solver=True
)
def test_pes_ens_slice(Simulator, plt, seed, allclose):
vin = [0.5, -0.5]
vout = [vin[0] ** 2 + vin[1] ** 2]
function = lambda x: [x[0] - x[1]]
_test_pes(
Simulator, nengo.LIF, plt, seed, allclose, vin=vin, vout=vout, function=function
)
def test_pes_neuron_neuron(Simulator, plt, seed, rng, allclose):
n = 200
initial_weights = rng.uniform(high=4e-4, size=(n, n))
_test_pes(
Simulator,
nengo.LIF,
plt,
seed,
allclose,
pre_neurons=True,
post_neurons=True,
n=n,
transform=initial_weights,
rate=7e-4,
)
def test_pes_neuron_ens(Simulator, plt, seed, rng, allclose):
n = 200
initial_weights = rng.uniform(high=1e-4, size=(2, n))
_test_pes(
Simulator,
nengo.LIF,
plt,
seed,
allclose,
pre_neurons=True,
post_neurons=False,
n=n,
transform=initial_weights,
)
def test_pes_transform(Simulator, seed, allclose):
"""Test behaviour of PES when function and transform both defined."""
n = 200
# error must be with respect to transformed vector (conn.size_out)
T = np.asarray([[0.5], [-0.5]]) # transform to output
m = nengo.Network(seed=seed)
with m:
u = nengo.Node(output=[1])
a = nengo.Ensemble(n, dimensions=1)
b = nengo.Node(size_in=2)
e = nengo.Node(size_in=1)
nengo.Connection(u, a)
learned_conn = nengo.Connection(
a,
b,
function=lambda x: [0],
transform=T,
learning_rule_type=nengo.PES(learning_rate=1e-3),
)
assert T.shape[0] == learned_conn.size_out
assert T.shape[1] == learned_conn.size_mid
nengo.Connection(b[0], e, synapse=None)
nengo.Connection(nengo.Node(output=-1), e)
nengo.Connection(e, learned_conn.learning_rule, transform=T, synapse=None)
p_b = nengo.Probe(b, synapse=0.05)
with Simulator(m) as sim:
sim.run(1.0)
tend = sim.trange() > 0.7
assert allclose(sim.data[p_b][tend], [1, -1], atol=1e-2)
def test_pes_multidim_error(Simulator, seed):
"""Test that PES works on error connections mapping from N to 1 dims.
Note that the transform is applied before the learning rule, so the error
signal should be 1-dimensional.
"""
with nengo.Network(seed=seed) as net:
err = nengo.Node(output=[0])
ens1 = nengo.Ensemble(20, 3)
ens2 = nengo.Ensemble(10, 1)
# Case 1: ens -> ens, weights=False
conn = nengo.Connection(
ens1,
ens2,
transform=np.ones((1, 3)),
solver=nengo.solvers.LstsqL2(weights=False),
learning_rule_type={"pes": nengo.PES()},
)
nengo.Connection(err, conn.learning_rule["pes"])
# Case 2: ens -> ens, weights=True
conn = nengo.Connection(
ens1,
ens2,
transform=np.ones((1, 3)),
solver=nengo.solvers.LstsqL2(weights=True),
learning_rule_type={"pes": nengo.PES()},
)
nengo.Connection(err, conn.learning_rule["pes"])
# Case 3: neurons -> ens
conn = nengo.Connection(
ens1.neurons,
ens2,
transform=np.ones((1, ens1.n_neurons)),
learning_rule_type={"pes": nengo.PES()},
)
nengo.Connection(err, conn.learning_rule["pes"])
with Simulator(net) as sim:
sim.run(0.01)
@pytest.mark.parametrize("pre_synapse", [0, Lowpass(tau=0.05), Alpha(tau=0.005)])
def test_pes_synapse(Simulator, seed, pre_synapse, allclose):
rule = PES(pre_synapse=pre_synapse)
with nengo.Network(seed=seed) as model:
stim = nengo.Node(output=WhiteSignal(0.5, high=10))
x = nengo.Ensemble(100, 1)
nengo.Connection(stim, x, synapse=None)
conn = nengo.Connection(x, x, learning_rule_type=rule)
p_neurons = nengo.Probe(x.neurons, synapse=pre_synapse)
p_pes = nengo.Probe(conn.learning_rule, "activities")
with Simulator(model) as sim:
sim.run(0.5)
assert allclose(sim.data[p_neurons][1:, :], sim.data[p_pes][:-1, :])
@pytest.mark.parametrize("weights", [False, True])
def test_pes_recurrent_slice(Simulator, seed, weights, allclose):
"""Test that PES works on recurrent connections from N to 1 dims."""
with nengo.Network(seed=seed) as net:
err = nengo.Node(output=[-1])
stim = nengo.Node(output=[0, 0])
post = nengo.Ensemble(50, 2, radius=2)
nengo.Connection(stim, post)
conn = nengo.Connection(
post,
post[1],
function=lambda x: 0.0,
solver=nengo.solvers.LstsqL2(weights=weights),
learning_rule_type=nengo.PES(learning_rate=5e-4),
)
nengo.Connection(err, conn.learning_rule)
p = nengo.Probe(post, synapse=0.025)
with Simulator(net) as sim:
sim.run(0.2)
# Learning rule should drive second dimension high, but not first
assert allclose(sim.data[p][-10:, 0], 0, atol=0.2)
assert np.all(sim.data[p][-10:, 1] > 0.8)
def test_pes_cycle(Simulator):
"""Test that PES works when connection output feeds back into error."""
with nengo.Network() as net:
a = nengo.Ensemble(10, 1)
b = nengo.Node(size_in=1)
c = nengo.Connection(a, b, synapse=None, learning_rule_type=nengo.PES())
nengo.Connection(b, c.learning_rule, synapse=None)
with Simulator(net):
# just checking that this builds without error
pass
@pytest.mark.parametrize(
"rule_type, solver",
[
(BCM(learning_rate=1e-8), False),
(Oja(learning_rate=1e-5), False),
([Oja(learning_rate=1e-5), BCM(learning_rate=1e-8)], False),
([Oja(learning_rate=1e-5), BCM(learning_rate=1e-8)], True),
],
)
def test_unsupervised(Simulator, rule_type, solver, seed, rng, plt, allclose):
n = 200
m = nengo.Network(seed=seed)
with m:
u = nengo.Node(WhiteSignal(0.5, high=10), size_out=2)
a = nengo.Ensemble(n, dimensions=2)
b = nengo.Ensemble(n + 1, dimensions=2)
nengo.Connection(u, a)
if solver:
conn = nengo.Connection(a, b, solver=nengo.solvers.LstsqL2(weights=True))
else:
initial_weights = rng.uniform(high=1e-3, size=(b.n_neurons, a.n_neurons))
conn = nengo.Connection(a.neurons, b.neurons, transform=initial_weights)
conn.learning_rule_type = rule_type
inp_p = nengo.Probe(u)
weights_p = nengo.Probe(conn, "weights", sample_every=0.01)
ap = nengo.Probe(a, synapse=0.03)
up = nengo.Probe(b, synapse=0.03)
with Simulator(m, seed=seed + 1) as sim:
sim.run(0.5)
t = sim.trange()
plt.subplot(2, 1, 1)
plt.plot(t, sim.data[inp_p], label="Input")
plt.plot(t, sim.data[ap], label="Pre")
plt.plot(t, sim.data[up], label="Post")
plt.legend(loc="best", fontsize="x-small")
plt.subplot(2, 1, 2)
best_ix = best_weights(sim.data[weights_p])
plt.plot(sim.trange(sample_every=0.01), sim.data[weights_p][..., best_ix])
plt.xlabel("Time (s)")
plt.ylabel("Weights")
assert not allclose(
sim.data[weights_p][0], sim.data[weights_p][-1], record_rmse=False
)
def learning_net(learning_rule=nengo.PES, net=None, rng=np.random):
net = nengo.Network() if net is None else net
with net:
if learning_rule is nengo.PES:
learning_rule_type = learning_rule(learning_rate=1e-5)
else:
learning_rule_type = learning_rule()
u = nengo.Node(output=1.0)
pre = nengo.Ensemble(10, dimensions=1)
post = nengo.Ensemble(10, dimensions=1)
initial_weights = rng.uniform(high=1e-3, size=(pre.n_neurons, post.n_neurons))
conn = nengo.Connection(
pre.neurons,
post.neurons,
transform=initial_weights,
learning_rule_type=learning_rule_type,
)
if learning_rule is nengo.PES:
err = nengo.Ensemble(10, dimensions=1)
nengo.Connection(u, err)
nengo.Connection(err, conn.learning_rule)
net.activity_p = nengo.Probe(pre.neurons, synapse=0.01)
net.weights_p = nengo.Probe(conn, "weights", synapse=None, sample_every=0.01)
return net
@pytest.mark.parametrize("learning_rule", [nengo.PES, nengo.BCM, nengo.Oja])
def test_dt_dependence(Simulator, plt, learning_rule, seed, rng, allclose):
"""Learning rules should work the same regardless of dt."""
m = learning_net(learning_rule, nengo.Network(seed=seed), rng)
trans_data = []
# Using dts greater near tau_ref (0.002 by default) causes learning to
# differ due to lowered presynaptic firing rate
dts = (0.0001, 0.001)
colors = ("b", "g", "r")
ax1 = plt.subplot(2, 1, 1)
ax2 = plt.subplot(2, 1, 2)
for c, dt in zip(colors, dts):
with Simulator(m, dt=dt) as sim:
sim.run(0.1)
trans_data.append(sim.data[m.weights_p])
best_ix = best_weights(sim.data[m.weights_p])
ax1.plot(
sim.trange(sample_every=0.01), sim.data[m.weights_p][..., best_ix], c=c
)
ax2.plot(sim.trange(), sim.data[m.activity_p], c=c)
ax1.set_xlim(right=sim.trange()[-1])
ax1.set_ylabel("Connection weight")
ax2.set_xlim(right=sim.trange()[-1])
ax2.set_ylabel("Presynaptic activity")
assert allclose(trans_data[0], trans_data[1], atol=3e-3)
assert not allclose(
sim.data[m.weights_p][0], sim.data[m.weights_p][-1], record_rmse=False
)
@pytest.mark.parametrize("learning_rule", [nengo.PES, nengo.BCM, nengo.Oja])
def test_reset(Simulator, learning_rule, plt, seed, rng, allclose):
"""Make sure resetting learning rules resets all state."""
m = learning_net(learning_rule, nengo.Network(seed=seed), rng)
with Simulator(m) as sim:
sim.run(0.1)
sim.run(0.2)
first_t = sim.trange()
first_t_trans = sim.trange(sample_every=0.01)
first_activity_p = np.array(sim.data[m.activity_p], copy=True)
first_weights_p = np.array(sim.data[m.weights_p], copy=True)
sim.reset()
sim.run(0.3)
plt.subplot(2, 1, 1)
plt.ylabel("Neural activity")
plt.plot(first_t, first_activity_p, c="b")
plt.plot(sim.trange(), sim.data[m.activity_p], c="g")
plt.subplot(2, 1, 2)
plt.ylabel("Connection weight")
best_ix = best_weights(first_weights_p)
plt.plot(first_t_trans, first_weights_p[..., best_ix], c="b")
plt.plot(sim.trange(sample_every=0.01), sim.data[m.weights_p][..., best_ix], c="g")
assert allclose(sim.trange(), first_t)
assert allclose(sim.trange(sample_every=0.01), first_t_trans)
assert allclose(sim.data[m.activity_p], first_activity_p)
assert allclose(sim.data[m.weights_p], first_weights_p)
def test_learningruletypeparam():
"""LearningRuleTypeParam must be one or many learning rules."""
class Test:
lrp = LearningRuleTypeParam("lrp", default=None)
inst = Test()
assert inst.lrp is None
inst.lrp = Oja()
assert isinstance(inst.lrp, Oja)
inst.lrp = [Oja(), Oja()]
for lr in inst.lrp:
assert isinstance(lr, Oja)
# Non-LR no good
with pytest.raises(ValueError):
inst.lrp = "a"
# All elements in list must be LR
with pytest.raises(ValueError):
inst.lrp = [Oja(), "a", Oja()]
def test_learningrule_attr(seed):
"""Test learning_rule attribute on Connection"""
def check_rule(rule, conn, rule_type):
assert rule.connection is conn and rule.learning_rule_type is rule_type
with nengo.Network(seed=seed):
a, b, e = [nengo.Ensemble(10, 2) for i in range(3)]
T = np.ones((10, 10))
r1 = PES()
c1 = nengo.Connection(a.neurons, b.neurons, learning_rule_type=r1)
check_rule(c1.learning_rule, c1, r1)
r2 = [PES(), BCM()]
c2 = nengo.Connection(a.neurons, b.neurons, learning_rule_type=r2, transform=T)
assert isinstance(c2.learning_rule, list)
for rule, rule_type in zip(c2.learning_rule, r2):
check_rule(rule, c2, rule_type)
r3 = dict(oja=Oja(), bcm=BCM())
c3 = nengo.Connection(a.neurons, b.neurons, learning_rule_type=r3, transform=T)
assert isinstance(c3.learning_rule, dict)
assert set(c3.learning_rule) == set(r3) # assert same keys
for key in r3:
check_rule(c3.learning_rule[key], c3, r3[key])
def test_voja_encoders(Simulator, nl_nodirect, rng, seed, allclose):
"""Tests that voja changes active encoders to the input."""
n = 200
learned_vector = np.asarray([0.3, -0.4, 0.6])
learned_vector /= np.linalg.norm(learned_vector)
n_change = n // 2 # modify first half of the encoders
# Set the first half to always fire with random encoders, and the
# remainder to never fire due to their encoder's dot product with the input
intercepts = np.asarray([-1] * n_change + [0.99] * (n - n_change))
rand_encoders = UniformHypersphere(surface=True).sample(
n_change, len(learned_vector), rng=rng
)
encoders = np.append(rand_encoders, [-learned_vector] * (n - n_change), axis=0)
m = nengo.Network(seed=seed)
with m:
m.config[nengo.Ensemble].neuron_type = nl_nodirect()
u = nengo.Node(output=learned_vector)
x = nengo.Ensemble(
n,
dimensions=len(learned_vector),
intercepts=intercepts,
encoders=encoders,
max_rates=nengo.dists.Uniform(300.0, 400.0),
radius=2.0,
) # to test encoder scaling
conn = nengo.Connection(
u, x, synapse=None, learning_rule_type=Voja(learning_rate=1e-1)
)
p_enc = nengo.Probe(conn.learning_rule, "scaled_encoders")
p_enc_ens = nengo.Probe(x, "scaled_encoders")
with Simulator(m) as sim:
sim.run(1.0)
t = sim.trange()
tend = t > 0.5
# Voja's rule relies on knowing exactly how the encoders were scaled
# during the build process, because it modifies the scaled_encoders signal
# proportional to this factor. Therefore, we should check that its
# assumption actually holds.
encoder_scale = (sim.data[x].gain / x.radius)[:, np.newaxis]
assert allclose(sim.data[x].encoders, sim.data[x].scaled_encoders / encoder_scale)
# Check that the last half kept the same encoders throughout the simulation
assert allclose(sim.data[p_enc][0, n_change:], sim.data[p_enc][:, n_change:])
# and that they are also equal to their originally assigned value
assert allclose(
sim.data[p_enc][0, n_change:] / encoder_scale[n_change:], -learned_vector
)
# Check that the first half converged to the input
assert allclose(
sim.data[p_enc][tend, :n_change] / encoder_scale[:n_change],
learned_vector,
atol=0.01,
)
# Check that encoders probed from ensemble equal encoders probed from Voja
assert allclose(sim.data[p_enc], sim.data[p_enc_ens])
def test_voja_modulate(Simulator, nl_nodirect, seed, allclose):
"""Tests that voja's rule can be modulated on/off."""
n = 200
learned_vector = np.asarray([0.5])
def control_signal(t):
"""Modulates the learning on/off."""
return 0 if t < 0.5 else -1
m = nengo.Network(seed=seed)
with m:
m.config[nengo.Ensemble].neuron_type = nl_nodirect()
control = nengo.Node(output=control_signal)
u = nengo.Node(output=learned_vector)
x = nengo.Ensemble(n, dimensions=len(learned_vector))
conn = nengo.Connection(
u, x, synapse=None, learning_rule_type=Voja(post_synapse=None)
)
nengo.Connection(control, conn.learning_rule, synapse=None)
p_enc = nengo.Probe(conn.learning_rule, "scaled_encoders")
with Simulator(m) as sim:
sim.run(1.0)
tend = sim.trange() > 0.5
# Check that encoders stop changing after 0.5s
assert allclose(sim.data[p_enc][tend], sim.data[p_enc][-1])
# Check that encoders changed during first 0.5s
i = np.where(tend)[0][0] # first time point after changeover
assert not allclose(sim.data[p_enc][0], sim.data[p_enc][i], record_rmse=False)
def test_frozen():
"""Test attributes inherited from FrozenObject"""
a = PES(learning_rate=2e-3, pre_synapse=4e-3)
b = PES(learning_rate=2e-3, pre_synapse=4e-3)
c = PES(learning_rate=2e-3, pre_synapse=5e-3)
assert hash(a) == hash(a)
assert hash(b) == hash(b)
assert hash(c) == hash(c)
assert a == b
assert hash(a) == hash(b)
assert a != c
assert hash(a) != hash(c) # not guaranteed, but highly likely
assert b != c
assert hash(b) != hash(c) # not guaranteed, but highly likely
with pytest.raises((ValueError, RuntimeError)):
a.learning_rate = 1e-1
def test_pes_direct_errors():
"""Test that applying a learning rule to a direct ensemble errors."""
with nengo.Network():
pre = nengo.Ensemble(10, 1, neuron_type=nengo.Direct())
post = nengo.Ensemble(10, 1)
conn = nengo.Connection(pre, post)
with pytest.raises(ValidationError):
conn.learning_rule_type = nengo.PES()
def test_custom_type(Simulator, allclose):
"""Test with custom learning rule type.
A custom learning type may have ``size_in`` not equal to 0, 1, or None.
"""
class TestRule(nengo.learning_rules.LearningRuleType):
modifies = "decoders"
def __init__(self):
super().__init__(1.0, size_in=3)
@Builder.register(TestRule)
def build_test_rule(model, test_rule, rule):
error = Signal(np.zeros(rule.connection.size_in))
model.add_op(Reset(error))
model.sig[rule]["in"] = error[: rule.size_in]
model.add_op(Copy(error, model.sig[rule]["delta"]))
with nengo.Network() as net:
a = nengo.Ensemble(10, 1)
b = nengo.Ensemble(10, 1)
conn = nengo.Connection(
a.neurons, b, transform=np.zeros((1, 10)), learning_rule_type=TestRule()
)
err = nengo.Node([1, 2, 3])
nengo.Connection(err, conn.learning_rule, synapse=None)
p = nengo.Probe(conn, "weights")
with Simulator(net) as sim:
sim.run(sim.dt * 5)
assert allclose(sim.data[p][:, 0, :3], np.outer(np.arange(1, 6), np.arange(1, 4)))
assert allclose(sim.data[p][:, :, 3:], 0)
@pytest.mark.parametrize("LearningRule", (nengo.PES, nengo.BCM, nengo.Voja, nengo.Oja))
def test_tau_deprecation(LearningRule):
params = [
("pre_tau", "pre_synapse"),
("post_tau", "post_synapse"),
("theta_tau", "theta_synapse"),
]
kwargs = {}
for i, (p0, p1) in enumerate(params):
if hasattr(LearningRule, p0):
kwargs[p0] = i
with pytest.warns(DeprecationWarning):
l_rule = LearningRule(learning_rate=0, **kwargs)
for i, (p0, p1) in enumerate(params):
if hasattr(LearningRule, p0):
assert getattr(l_rule, p0) == i
assert getattr(l_rule, p1) == Lowpass(i)
def test_slicing(Simulator, seed, allclose):
with nengo.Network(seed=seed) as model:
a = nengo.Ensemble(50, 1)
b = nengo.Ensemble(30, 2)
conn = nengo.Connection(
a, b, learning_rule_type=PES(), function=lambda x: (0, 0)
)
nengo.Connection(nengo.Node(1.0), a)
err1 = nengo.Node(lambda t, x: x - 0.75, size_in=1)
nengo.Connection(b[0], err1)
nengo.Connection(err1, conn.learning_rule[0])
err2 = nengo.Node(lambda t, x: x + 0.5, size_in=1)
nengo.Connection(b[1], err2)
nengo.Connection(err2, conn.learning_rule[1])
p = nengo.Probe(b, synapse=0.03)
with Simulator(model) as sim:
sim.run(1.0)
t = sim.trange() > 0.8
assert allclose(sim.data[p][t, 0], 0.75, atol=0.15)
assert allclose(sim.data[p][t, 1], -0.5, atol=0.15)
|
[
"nengo.learning_rules.BCM",
"nengo.dists.Uniform",
"nengo.processes.WhiteSignal",
"nengo.Node",
"numpy.array",
"numpy.var",
"numpy.linalg.norm",
"numpy.arange",
"nengo.builder.operator.Copy",
"nengo.builder.operator.Reset",
"nengo.Ensemble",
"numpy.where",
"numpy.asarray",
"nengo.learning_rules.Voja",
"numpy.ones",
"nengo.dists.UniformHypersphere",
"nengo.synapses.Alpha",
"pytest.raises",
"nengo.PES",
"nengo.synapses.Lowpass",
"nengo.learning_rules.LearningRuleTypeParam",
"nengo.Network",
"nengo.Probe",
"nengo.solvers.LstsqL2",
"nengo.learning_rules.Oja",
"numpy.append",
"pytest.mark.parametrize",
"nengo.learning_rules.PES",
"numpy.zeros",
"nengo.builder.Builder.register",
"nengo.Connection",
"nengo.Direct",
"numpy.all",
"pytest.warns"
] |
[((7196, 7245), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""weights"""', '[False, True]'], {}), "('weights', [False, True])\n", (7219, 7245), False, 'import pytest\n'), ((11392, 11467), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""learning_rule"""', '[nengo.PES, nengo.BCM, nengo.Oja]'], {}), "('learning_rule', [nengo.PES, nengo.BCM, nengo.Oja])\n", (11415, 11467), False, 'import pytest\n'), ((12657, 12732), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""learning_rule"""', '[nengo.PES, nengo.BCM, nengo.Oja]'], {}), "('learning_rule', [nengo.PES, nengo.BCM, nengo.Oja])\n", (12680, 12732), False, 'import pytest\n'), ((21534, 21624), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""LearningRule"""', '(nengo.PES, nengo.BCM, nengo.Voja, nengo.Oja)'], {}), "('LearningRule', (nengo.PES, nengo.BCM, nengo.Voja,\n nengo.Oja))\n", (21557, 21624), False, 'import pytest\n'), ((666, 687), 'numpy.array', 'np.array', (['[0.5, -0.5]'], {}), '([0.5, -0.5])\n', (674, 687), True, 'import numpy as np\n'), ((748, 761), 'numpy.array', 'np.array', (['(1.0)'], {}), '(1.0)\n', (756, 761), True, 'import numpy as np\n'), ((4152, 4179), 'numpy.asarray', 'np.asarray', (['[[0.5], [-0.5]]'], {}), '([[0.5], [-0.5]])\n', (4162, 4179), True, 'import numpy as np\n'), ((4212, 4236), 'nengo.Network', 'nengo.Network', ([], {'seed': 'seed'}), '(seed=seed)\n', (4225, 4236), False, 'import nengo\n'), ((6655, 6683), 'nengo.learning_rules.PES', 'PES', ([], {'pre_synapse': 'pre_synapse'}), '(pre_synapse=pre_synapse)\n', (6658, 6683), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((8118, 8152), 'numpy.all', 'np.all', (['(sim.data[p][-10:, 1] > 0.8)'], {}), '(sim.data[p][-10:, 1] > 0.8)\n', (8124, 8152), True, 'import numpy as np\n'), ((8987, 9011), 'nengo.Network', 'nengo.Network', ([], {'seed': 'seed'}), '(seed=seed)\n', (9000, 9011), False, 'import nengo\n'), ((14168, 14173), 'nengo.learning_rules.Oja', 'Oja', ([], {}), '()\n', (14171, 14173), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((15738, 15766), 'numpy.asarray', 'np.asarray', (['[0.3, -0.4, 0.6]'], {}), '([0.3, -0.4, 0.6])\n', (15748, 15766), True, 'import numpy as np\n'), ((15789, 15819), 'numpy.linalg.norm', 'np.linalg.norm', (['learned_vector'], {}), '(learned_vector)\n', (15803, 15819), True, 'import numpy as np\n'), ((16047, 16100), 'numpy.asarray', 'np.asarray', (['([-1] * n_change + [0.99] * (n - n_change))'], {}), '([-1] * n_change + [0.99] * (n - n_change))\n', (16057, 16100), True, 'import numpy as np\n'), ((16230, 16298), 'numpy.append', 'np.append', (['rand_encoders', '([-learned_vector] * (n - n_change))'], {'axis': '(0)'}), '(rand_encoders, [-learned_vector] * (n - n_change), axis=0)\n', (16239, 16298), True, 'import numpy as np\n'), ((16308, 16332), 'nengo.Network', 'nengo.Network', ([], {'seed': 'seed'}), '(seed=seed)\n', (16321, 16332), False, 'import nengo\n'), ((18296, 18313), 'numpy.asarray', 'np.asarray', (['[0.5]'], {}), '([0.5])\n', (18306, 18313), True, 'import numpy as np\n'), ((18432, 18456), 'nengo.Network', 'nengo.Network', ([], {'seed': 'seed'}), '(seed=seed)\n', (18445, 18456), False, 'import nengo\n'), ((19428, 19471), 'nengo.learning_rules.PES', 'PES', ([], {'learning_rate': '(0.002)', 'pre_synapse': '(0.004)'}), '(learning_rate=0.002, pre_synapse=0.004)\n', (19431, 19471), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((19478, 19521), 'nengo.learning_rules.PES', 'PES', ([], {'learning_rate': '(0.002)', 'pre_synapse': '(0.004)'}), '(learning_rate=0.002, pre_synapse=0.004)\n', (19481, 19521), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((19528, 19571), 'nengo.learning_rules.PES', 'PES', ([], {'learning_rate': '(0.002)', 'pre_synapse': '(0.005)'}), '(learning_rate=0.002, pre_synapse=0.005)\n', (19531, 19571), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((20679, 20705), 'nengo.builder.Builder.register', 'Builder.register', (['TestRule'], {}), '(TestRule)\n', (20695, 20705), False, 'from nengo.builder import Builder\n'), ((792, 805), 'numpy.array', 'np.array', (['vin'], {}), '(vin)\n', (800, 805), True, 'import numpy as np\n'), ((842, 866), 'nengo.Network', 'nengo.Network', ([], {'seed': 'seed'}), '(seed=seed)\n', (855, 866), False, 'import nengo\n'), ((949, 971), 'nengo.Node', 'nengo.Node', ([], {'output': 'vin'}), '(output=vin)\n', (959, 971), False, 'import nengo\n'), ((989, 1012), 'nengo.Node', 'nengo.Node', ([], {'output': 'vout'}), '(output=vout)\n', (999, 1012), False, 'import nengo\n'), ((1027, 1070), 'nengo.Ensemble', 'nengo.Ensemble', (['n'], {'dimensions': 'stim.size_out'}), '(n, dimensions=stim.size_out)\n', (1041, 1070), False, 'import nengo\n'), ((1086, 1129), 'nengo.Ensemble', 'nengo.Ensemble', (['n'], {'dimensions': 'stim.size_out'}), '(n, dimensions=stim.size_out)\n', (1100, 1129), False, 'import nengo\n'), ((1146, 1191), 'nengo.Ensemble', 'nengo.Ensemble', (['n'], {'dimensions': 'target.size_out'}), '(n, dimensions=target.size_out)\n', (1160, 1191), False, 'import nengo\n'), ((1201, 1228), 'nengo.Connection', 'nengo.Connection', (['stim', 'pre'], {}), '(stim, pre)\n', (1217, 1228), False, 'import nengo\n'), ((1710, 1755), 'nengo.Connection', 'nengo.Connection', (['target', 'error'], {'transform': '(-1)'}), '(target, error, transform=-1)\n', (1726, 1755), False, 'import nengo\n'), ((1764, 1798), 'nengo.Connection', 'nengo.Connection', (['postslice', 'error'], {}), '(postslice, error)\n', (1780, 1798), False, 'import nengo\n'), ((1807, 1850), 'nengo.Connection', 'nengo.Connection', (['error', 'conn.learning_rule'], {}), '(error, conn.learning_rule)\n', (1823, 1850), False, 'import nengo\n'), ((1869, 1905), 'nengo.Probe', 'nengo.Probe', (['postslice'], {'synapse': '(0.03)'}), '(postslice, synapse=0.03)\n', (1880, 1905), False, 'import nengo\n'), ((1924, 1956), 'nengo.Probe', 'nengo.Probe', (['error'], {'synapse': '(0.03)'}), '(error, synapse=0.03)\n', (1935, 1956), False, 'import nengo\n'), ((1978, 2025), 'nengo.Probe', 'nengo.Probe', (['conn', '"""weights"""'], {'sample_every': '(0.01)'}), "(conn, 'weights', sample_every=0.01)\n", (1989, 2025), False, 'import nengo\n'), ((4261, 4283), 'nengo.Node', 'nengo.Node', ([], {'output': '[1]'}), '(output=[1])\n', (4271, 4283), False, 'import nengo\n'), ((4296, 4327), 'nengo.Ensemble', 'nengo.Ensemble', (['n'], {'dimensions': '(1)'}), '(n, dimensions=1)\n', (4310, 4327), False, 'import nengo\n'), ((4340, 4361), 'nengo.Node', 'nengo.Node', ([], {'size_in': '(2)'}), '(size_in=2)\n', (4350, 4361), False, 'import nengo\n'), ((4374, 4395), 'nengo.Node', 'nengo.Node', ([], {'size_in': '(1)'}), '(size_in=1)\n', (4384, 4395), False, 'import nengo\n'), ((4405, 4427), 'nengo.Connection', 'nengo.Connection', (['u', 'a'], {}), '(u, a)\n', (4421, 4427), False, 'import nengo\n'), ((4743, 4782), 'nengo.Connection', 'nengo.Connection', (['b[0]', 'e'], {'synapse': 'None'}), '(b[0], e, synapse=None)\n', (4759, 4782), False, 'import nengo\n'), ((4842, 4916), 'nengo.Connection', 'nengo.Connection', (['e', 'learned_conn.learning_rule'], {'transform': 'T', 'synapse': 'None'}), '(e, learned_conn.learning_rule, transform=T, synapse=None)\n', (4858, 4916), False, 'import nengo\n'), ((4932, 4960), 'nengo.Probe', 'nengo.Probe', (['b'], {'synapse': '(0.05)'}), '(b, synapse=0.05)\n', (4943, 4960), False, 'import nengo\n'), ((5360, 5384), 'nengo.Network', 'nengo.Network', ([], {'seed': 'seed'}), '(seed=seed)\n', (5373, 5384), False, 'import nengo\n'), ((5407, 5429), 'nengo.Node', 'nengo.Node', ([], {'output': '[0]'}), '(output=[0])\n', (5417, 5429), False, 'import nengo\n'), ((5445, 5466), 'nengo.Ensemble', 'nengo.Ensemble', (['(20)', '(3)'], {}), '(20, 3)\n', (5459, 5466), False, 'import nengo\n'), ((5482, 5503), 'nengo.Ensemble', 'nengo.Ensemble', (['(10)', '(1)'], {}), '(10, 1)\n', (5496, 5503), False, 'import nengo\n'), ((5785, 5833), 'nengo.Connection', 'nengo.Connection', (['err', "conn.learning_rule['pes']"], {}), "(err, conn.learning_rule['pes'])\n", (5801, 5833), False, 'import nengo\n'), ((6112, 6160), 'nengo.Connection', 'nengo.Connection', (['err', "conn.learning_rule['pes']"], {}), "(err, conn.learning_rule['pes'])\n", (6128, 6160), False, 'import nengo\n'), ((6394, 6442), 'nengo.Connection', 'nengo.Connection', (['err', "conn.learning_rule['pes']"], {}), "(err, conn.learning_rule['pes'])\n", (6410, 6442), False, 'import nengo\n'), ((6694, 6718), 'nengo.Network', 'nengo.Network', ([], {'seed': 'seed'}), '(seed=seed)\n', (6707, 6718), False, 'import nengo\n'), ((6801, 6823), 'nengo.Ensemble', 'nengo.Ensemble', (['(100)', '(1)'], {}), '(100, 1)\n', (6815, 6823), False, 'import nengo\n'), ((6833, 6872), 'nengo.Connection', 'nengo.Connection', (['stim', 'x'], {'synapse': 'None'}), '(stim, x, synapse=None)\n', (6849, 6872), False, 'import nengo\n'), ((6888, 6935), 'nengo.Connection', 'nengo.Connection', (['x', 'x'], {'learning_rule_type': 'rule'}), '(x, x, learning_rule_type=rule)\n', (6904, 6935), False, 'import nengo\n'), ((6957, 7000), 'nengo.Probe', 'nengo.Probe', (['x.neurons'], {'synapse': 'pre_synapse'}), '(x.neurons, synapse=pre_synapse)\n', (6968, 7000), False, 'import nengo\n'), ((7017, 7062), 'nengo.Probe', 'nengo.Probe', (['conn.learning_rule', '"""activities"""'], {}), "(conn.learning_rule, 'activities')\n", (7028, 7062), False, 'import nengo\n'), ((6544, 6561), 'nengo.synapses.Lowpass', 'Lowpass', ([], {'tau': '(0.05)'}), '(tau=0.05)\n', (6551, 6561), False, 'from nengo.synapses import Alpha, Lowpass\n'), ((6563, 6579), 'nengo.synapses.Alpha', 'Alpha', ([], {'tau': '(0.005)'}), '(tau=0.005)\n', (6568, 6579), False, 'from nengo.synapses import Alpha, Lowpass\n'), ((7395, 7419), 'nengo.Network', 'nengo.Network', ([], {'seed': 'seed'}), '(seed=seed)\n', (7408, 7419), False, 'import nengo\n'), ((7442, 7465), 'nengo.Node', 'nengo.Node', ([], {'output': '[-1]'}), '(output=[-1])\n', (7452, 7465), False, 'import nengo\n'), ((7481, 7506), 'nengo.Node', 'nengo.Node', ([], {'output': '[0, 0]'}), '(output=[0, 0])\n', (7491, 7506), False, 'import nengo\n'), ((7522, 7553), 'nengo.Ensemble', 'nengo.Ensemble', (['(50)', '(2)'], {'radius': '(2)'}), '(50, 2, radius=2)\n', (7536, 7553), False, 'import nengo\n'), ((7562, 7590), 'nengo.Connection', 'nengo.Connection', (['stim', 'post'], {}), '(stim, post)\n', (7578, 7590), False, 'import nengo\n'), ((7840, 7881), 'nengo.Connection', 'nengo.Connection', (['err', 'conn.learning_rule'], {}), '(err, conn.learning_rule)\n', (7856, 7881), False, 'import nengo\n'), ((7894, 7926), 'nengo.Probe', 'nengo.Probe', (['post'], {'synapse': '(0.025)'}), '(post, synapse=0.025)\n', (7905, 7926), False, 'import nengo\n'), ((8272, 8287), 'nengo.Network', 'nengo.Network', ([], {}), '()\n', (8285, 8287), False, 'import nengo\n'), ((8308, 8329), 'nengo.Ensemble', 'nengo.Ensemble', (['(10)', '(1)'], {}), '(10, 1)\n', (8322, 8329), False, 'import nengo\n'), ((8342, 8363), 'nengo.Node', 'nengo.Node', ([], {'size_in': '(1)'}), '(size_in=1)\n', (8352, 8363), False, 'import nengo\n'), ((8453, 8503), 'nengo.Connection', 'nengo.Connection', (['b', 'c.learning_rule'], {'synapse': 'None'}), '(b, c.learning_rule, synapse=None)\n', (8469, 8503), False, 'import nengo\n'), ((9098, 9129), 'nengo.Ensemble', 'nengo.Ensemble', (['n'], {'dimensions': '(2)'}), '(n, dimensions=2)\n', (9112, 9129), False, 'import nengo\n'), ((9142, 9177), 'nengo.Ensemble', 'nengo.Ensemble', (['(n + 1)'], {'dimensions': '(2)'}), '(n + 1, dimensions=2)\n', (9156, 9177), False, 'import nengo\n'), ((9186, 9208), 'nengo.Connection', 'nengo.Connection', (['u', 'a'], {}), '(u, a)\n', (9202, 9208), False, 'import nengo\n'), ((9561, 9575), 'nengo.Probe', 'nengo.Probe', (['u'], {}), '(u)\n', (9572, 9575), False, 'import nengo\n'), ((9596, 9643), 'nengo.Probe', 'nengo.Probe', (['conn', '"""weights"""'], {'sample_every': '(0.01)'}), "(conn, 'weights', sample_every=0.01)\n", (9607, 9643), False, 'import nengo\n'), ((9658, 9686), 'nengo.Probe', 'nengo.Probe', (['a'], {'synapse': '(0.03)'}), '(a, synapse=0.03)\n', (9669, 9686), False, 'import nengo\n'), ((9700, 9728), 'nengo.Probe', 'nengo.Probe', (['b'], {'synapse': '(0.03)'}), '(b, synapse=0.03)\n', (9711, 9728), False, 'import nengo\n'), ((10417, 10432), 'nengo.Network', 'nengo.Network', ([], {}), '()\n', (10430, 10432), False, 'import nengo\n'), ((10653, 10675), 'nengo.Node', 'nengo.Node', ([], {'output': '(1.0)'}), '(output=1.0)\n', (10663, 10675), False, 'import nengo\n'), ((10690, 10722), 'nengo.Ensemble', 'nengo.Ensemble', (['(10)'], {'dimensions': '(1)'}), '(10, dimensions=1)\n', (10704, 10722), False, 'import nengo\n'), ((10738, 10770), 'nengo.Ensemble', 'nengo.Ensemble', (['(10)'], {'dimensions': '(1)'}), '(10, dimensions=1)\n', (10752, 10770), False, 'import nengo\n'), ((10873, 10986), 'nengo.Connection', 'nengo.Connection', (['pre.neurons', 'post.neurons'], {'transform': 'initial_weights', 'learning_rule_type': 'learning_rule_type'}), '(pre.neurons, post.neurons, transform=initial_weights,\n learning_rule_type=learning_rule_type)\n', (10889, 10986), False, 'import nengo\n'), ((11249, 11287), 'nengo.Probe', 'nengo.Probe', (['pre.neurons'], {'synapse': '(0.01)'}), '(pre.neurons, synapse=0.01)\n', (11260, 11287), False, 'import nengo\n'), ((11312, 11373), 'nengo.Probe', 'nengo.Probe', (['conn', '"""weights"""'], {'synapse': 'None', 'sample_every': '(0.01)'}), "(conn, 'weights', synapse=None, sample_every=0.01)\n", (11323, 11373), False, 'import nengo\n'), ((11644, 11668), 'nengo.Network', 'nengo.Network', ([], {'seed': 'seed'}), '(seed=seed)\n', (11657, 11668), False, 'import nengo\n'), ((12900, 12924), 'nengo.Network', 'nengo.Network', ([], {'seed': 'seed'}), '(seed=seed)\n', (12913, 12924), False, 'import nengo\n'), ((13117, 13160), 'numpy.array', 'np.array', (['sim.data[m.activity_p]'], {'copy': '(True)'}), '(sim.data[m.activity_p], copy=True)\n', (13125, 13160), True, 'import numpy as np\n'), ((13187, 13229), 'numpy.array', 'np.array', (['sim.data[m.weights_p]'], {'copy': '(True)'}), '(sim.data[m.weights_p], copy=True)\n', (13195, 13229), True, 'import numpy as np\n'), ((14063, 14105), 'nengo.learning_rules.LearningRuleTypeParam', 'LearningRuleTypeParam', (['"""lrp"""'], {'default': 'None'}), "('lrp', default=None)\n", (14084, 14105), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((14227, 14232), 'nengo.learning_rules.Oja', 'Oja', ([], {}), '()\n', (14230, 14232), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((14234, 14239), 'nengo.learning_rules.Oja', 'Oja', ([], {}), '()\n', (14237, 14239), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((14330, 14355), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (14343, 14355), False, 'import pytest\n'), ((14427, 14452), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (14440, 14452), False, 'import pytest\n'), ((14716, 14740), 'nengo.Network', 'nengo.Network', ([], {'seed': 'seed'}), '(seed=seed)\n', (14729, 14740), False, 'import nengo\n'), ((14814, 14831), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (14821, 14831), True, 'import numpy as np\n'), ((14846, 14851), 'nengo.learning_rules.PES', 'PES', ([], {}), '()\n', (14849, 14851), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((14865, 14926), 'nengo.Connection', 'nengo.Connection', (['a.neurons', 'b.neurons'], {'learning_rule_type': 'r1'}), '(a.neurons, b.neurons, learning_rule_type=r1)\n', (14881, 14926), False, 'import nengo\n'), ((15014, 15088), 'nengo.Connection', 'nengo.Connection', (['a.neurons', 'b.neurons'], {'learning_rule_type': 'r2', 'transform': 'T'}), '(a.neurons, b.neurons, learning_rule_type=r2, transform=T)\n', (15030, 15088), False, 'import nengo\n'), ((15295, 15369), 'nengo.Connection', 'nengo.Connection', (['a.neurons', 'b.neurons'], {'learning_rule_type': 'r3', 'transform': 'T'}), '(a.neurons, b.neurons, learning_rule_type=r3, transform=T)\n', (15311, 15369), False, 'import nengo\n'), ((16418, 16451), 'nengo.Node', 'nengo.Node', ([], {'output': 'learned_vector'}), '(output=learned_vector)\n', (16428, 16451), False, 'import nengo\n'), ((16859, 16909), 'nengo.Probe', 'nengo.Probe', (['conn.learning_rule', '"""scaled_encoders"""'], {}), "(conn.learning_rule, 'scaled_encoders')\n", (16870, 16909), False, 'import nengo\n'), ((16930, 16963), 'nengo.Probe', 'nengo.Probe', (['x', '"""scaled_encoders"""'], {}), "(x, 'scaled_encoders')\n", (16941, 16963), False, 'import nengo\n'), ((18548, 18581), 'nengo.Node', 'nengo.Node', ([], {'output': 'control_signal'}), '(output=control_signal)\n', (18558, 18581), False, 'import nengo\n'), ((18594, 18627), 'nengo.Node', 'nengo.Node', ([], {'output': 'learned_vector'}), '(output=learned_vector)\n', (18604, 18627), False, 'import nengo\n'), ((18817, 18876), 'nengo.Connection', 'nengo.Connection', (['control', 'conn.learning_rule'], {'synapse': 'None'}), '(control, conn.learning_rule, synapse=None)\n', (18833, 18876), False, 'import nengo\n'), ((18894, 18944), 'nengo.Probe', 'nengo.Probe', (['conn.learning_rule', '"""scaled_encoders"""'], {}), "(conn.learning_rule, 'scaled_encoders')\n", (18905, 18944), False, 'import nengo\n'), ((19890, 19931), 'pytest.raises', 'pytest.raises', (['(ValueError, RuntimeError)'], {}), '((ValueError, RuntimeError))\n', (19903, 19931), False, 'import pytest\n'), ((20079, 20094), 'nengo.Network', 'nengo.Network', ([], {}), '()\n', (20092, 20094), False, 'import nengo\n'), ((20175, 20196), 'nengo.Ensemble', 'nengo.Ensemble', (['(10)', '(1)'], {}), '(10, 1)\n', (20189, 20196), False, 'import nengo\n'), ((20212, 20239), 'nengo.Connection', 'nengo.Connection', (['pre', 'post'], {}), '(pre, post)\n', (20228, 20239), False, 'import nengo\n'), ((20973, 20988), 'nengo.Network', 'nengo.Network', ([], {}), '()\n', (20986, 20988), False, 'import nengo\n'), ((21009, 21030), 'nengo.Ensemble', 'nengo.Ensemble', (['(10)', '(1)'], {}), '(10, 1)\n', (21023, 21030), False, 'import nengo\n'), ((21043, 21064), 'nengo.Ensemble', 'nengo.Ensemble', (['(10)', '(1)'], {}), '(10, 1)\n', (21057, 21064), False, 'import nengo\n'), ((21208, 21229), 'nengo.Node', 'nengo.Node', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (21218, 21229), False, 'import nengo\n'), ((21238, 21293), 'nengo.Connection', 'nengo.Connection', (['err', 'conn.learning_rule'], {'synapse': 'None'}), '(err, conn.learning_rule, synapse=None)\n', (21254, 21293), False, 'import nengo\n'), ((21307, 21335), 'nengo.Probe', 'nengo.Probe', (['conn', '"""weights"""'], {}), "(conn, 'weights')\n", (21318, 21335), False, 'import nengo\n'), ((21929, 21961), 'pytest.warns', 'pytest.warns', (['DeprecationWarning'], {}), '(DeprecationWarning)\n', (21941, 21961), False, 'import pytest\n'), ((22254, 22278), 'nengo.Network', 'nengo.Network', ([], {'seed': 'seed'}), '(seed=seed)\n', (22267, 22278), False, 'import nengo\n'), ((22301, 22322), 'nengo.Ensemble', 'nengo.Ensemble', (['(50)', '(1)'], {}), '(50, 1)\n', (22315, 22322), False, 'import nengo\n'), ((22335, 22356), 'nengo.Ensemble', 'nengo.Ensemble', (['(30)', '(2)'], {}), '(30, 2)\n', (22349, 22356), False, 'import nengo\n'), ((22531, 22575), 'nengo.Node', 'nengo.Node', (['(lambda t, x: x - 0.75)'], {'size_in': '(1)'}), '(lambda t, x: x - 0.75, size_in=1)\n', (22541, 22575), False, 'import nengo\n'), ((22584, 22612), 'nengo.Connection', 'nengo.Connection', (['b[0]', 'err1'], {}), '(b[0], err1)\n', (22600, 22612), False, 'import nengo\n'), ((22621, 22666), 'nengo.Connection', 'nengo.Connection', (['err1', 'conn.learning_rule[0]'], {}), '(err1, conn.learning_rule[0])\n', (22637, 22666), False, 'import nengo\n'), ((22683, 22726), 'nengo.Node', 'nengo.Node', (['(lambda t, x: x + 0.5)'], {'size_in': '(1)'}), '(lambda t, x: x + 0.5, size_in=1)\n', (22693, 22726), False, 'import nengo\n'), ((22735, 22763), 'nengo.Connection', 'nengo.Connection', (['b[1]', 'err2'], {}), '(b[1], err2)\n', (22751, 22763), False, 'import nengo\n'), ((22772, 22817), 'nengo.Connection', 'nengo.Connection', (['err2', 'conn.learning_rule[1]'], {}), '(err2, conn.learning_rule[1])\n', (22788, 22817), False, 'import nengo\n'), ((22831, 22859), 'nengo.Probe', 'nengo.Probe', (['b'], {'synapse': '(0.03)'}), '(b, synapse=0.03)\n', (22842, 22859), False, 'import nengo\n'), ((475, 502), 'numpy.var', 'np.var', (['weight_data'], {'axis': '(0)'}), '(weight_data, axis=0)\n', (481, 502), True, 'import numpy as np\n'), ((1665, 1700), 'nengo.solvers.LstsqL2', 'nengo.solvers.LstsqL2', ([], {'weights': '(True)'}), '(weights=True)\n', (1686, 1700), False, 'import nengo\n'), ((4808, 4829), 'nengo.Node', 'nengo.Node', ([], {'output': '(-1)'}), '(output=-1)\n', (4818, 4829), False, 'import nengo\n'), ((9047, 9072), 'nengo.processes.WhiteSignal', 'WhiteSignal', (['(0.5)'], {'high': '(10)'}), '(0.5, high=10)\n', (9058, 9072), False, 'from nengo.processes import WhiteSignal\n'), ((9434, 9499), 'nengo.Connection', 'nengo.Connection', (['a.neurons', 'b.neurons'], {'transform': 'initial_weights'}), '(a.neurons, b.neurons, transform=initial_weights)\n', (9450, 9499), False, 'import nengo\n'), ((8666, 8690), 'nengo.learning_rules.BCM', 'BCM', ([], {'learning_rate': '(1e-08)'}), '(learning_rate=1e-08)\n', (8669, 8690), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((8708, 8732), 'nengo.learning_rules.Oja', 'Oja', ([], {'learning_rate': '(1e-05)'}), '(learning_rate=1e-05)\n', (8711, 8732), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((11099, 11131), 'nengo.Ensemble', 'nengo.Ensemble', (['(10)'], {'dimensions': '(1)'}), '(10, dimensions=1)\n', (11113, 11131), False, 'import nengo\n'), ((11144, 11168), 'nengo.Connection', 'nengo.Connection', (['u', 'err'], {}), '(u, err)\n', (11160, 11168), False, 'import nengo\n'), ((11181, 11222), 'nengo.Connection', 'nengo.Connection', (['err', 'conn.learning_rule'], {}), '(err, conn.learning_rule)\n', (11197, 11222), False, 'import nengo\n'), ((14474, 14479), 'nengo.learning_rules.Oja', 'Oja', ([], {}), '()\n', (14477, 14479), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((14486, 14491), 'nengo.learning_rules.Oja', 'Oja', ([], {}), '()\n', (14489, 14491), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((14761, 14782), 'nengo.Ensemble', 'nengo.Ensemble', (['(10)', '(2)'], {}), '(10, 2)\n', (14775, 14782), False, 'import nengo\n'), ((14987, 14992), 'nengo.learning_rules.PES', 'PES', ([], {}), '()\n', (14990, 14992), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((14994, 14999), 'nengo.learning_rules.BCM', 'BCM', ([], {}), '()\n', (14997, 14999), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((16121, 16153), 'nengo.dists.UniformHypersphere', 'UniformHypersphere', ([], {'surface': '(True)'}), '(surface=True)\n', (16139, 16153), False, 'from nengo.dists import UniformHypersphere\n'), ((19204, 19218), 'numpy.where', 'np.where', (['tend'], {}), '(tend)\n', (19212, 19218), True, 'import numpy as np\n'), ((20253, 20283), 'pytest.raises', 'pytest.raises', (['ValidationError'], {}), '(ValidationError)\n', (20266, 20283), False, 'import pytest\n'), ((20323, 20334), 'nengo.PES', 'nengo.PES', ([], {}), '()\n', (20332, 20334), False, 'import nengo\n'), ((20778, 20811), 'numpy.zeros', 'np.zeros', (['rule.connection.size_in'], {}), '(rule.connection.size_in)\n', (20786, 20811), True, 'import numpy as np\n'), ((20834, 20846), 'nengo.builder.operator.Reset', 'Reset', (['error'], {}), '(error)\n', (20839, 20846), False, 'from nengo.builder.operator import Reset, Copy\n'), ((20924, 20961), 'nengo.builder.operator.Copy', 'Copy', (['error', "model.sig[rule]['delta']"], {}), "(error, model.sig[rule]['delta'])\n", (20928, 20961), False, 'from nengo.builder.operator import Reset, Copy\n'), ((21450, 21465), 'numpy.arange', 'np.arange', (['(1)', '(6)'], {}), '(1, 6)\n', (21459, 21465), True, 'import numpy as np\n'), ((21467, 21482), 'numpy.arange', 'np.arange', (['(1)', '(4)'], {}), '(1, 4)\n', (21476, 21482), True, 'import numpy as np\n'), ((22495, 22510), 'nengo.Node', 'nengo.Node', (['(1.0)'], {}), '(1.0)\n', (22505, 22510), False, 'import nengo\n'), ((1592, 1601), 'nengo.learning_rules.PES', 'PES', (['rate'], {}), '(rate)\n', (1595, 1601), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((4591, 4621), 'nengo.PES', 'nengo.PES', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (4600, 4621), False, 'import nengo\n'), ((5640, 5655), 'numpy.ones', 'np.ones', (['(1, 3)'], {}), '((1, 3))\n', (5647, 5655), True, 'import numpy as np\n'), ((5676, 5712), 'nengo.solvers.LstsqL2', 'nengo.solvers.LstsqL2', ([], {'weights': '(False)'}), '(weights=False)\n', (5697, 5712), False, 'import nengo\n'), ((5968, 5983), 'numpy.ones', 'np.ones', (['(1, 3)'], {}), '((1, 3))\n', (5975, 5983), True, 'import numpy as np\n'), ((6004, 6039), 'nengo.solvers.LstsqL2', 'nengo.solvers.LstsqL2', ([], {'weights': '(True)'}), '(weights=True)\n', (6025, 6039), False, 'import nengo\n'), ((6293, 6321), 'numpy.ones', 'np.ones', (['(1, ens1.n_neurons)'], {}), '((1, ens1.n_neurons))\n', (6300, 6321), True, 'import numpy as np\n'), ((6762, 6787), 'nengo.processes.WhiteSignal', 'WhiteSignal', (['(0.5)'], {'high': '(10)'}), '(0.5, high=10)\n', (6773, 6787), False, 'from nengo.processes import WhiteSignal\n'), ((7719, 7757), 'nengo.solvers.LstsqL2', 'nengo.solvers.LstsqL2', ([], {'weights': 'weights'}), '(weights=weights)\n', (7740, 7757), False, 'import nengo\n'), ((7790, 7821), 'nengo.PES', 'nengo.PES', ([], {'learning_rate': '(0.0005)'}), '(learning_rate=0.0005)\n', (7799, 7821), False, 'import nengo\n'), ((8432, 8443), 'nengo.PES', 'nengo.PES', ([], {}), '()\n', (8441, 8443), False, 'import nengo\n'), ((8751, 8775), 'nengo.learning_rules.Oja', 'Oja', ([], {'learning_rate': '(1e-05)'}), '(learning_rate=1e-05)\n', (8754, 8775), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((8776, 8800), 'nengo.learning_rules.BCM', 'BCM', ([], {'learning_rate': '(1e-08)'}), '(learning_rate=1e-08)\n', (8779, 8800), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((8820, 8844), 'nengo.learning_rules.Oja', 'Oja', ([], {'learning_rate': '(1e-05)'}), '(learning_rate=1e-05)\n', (8823, 8844), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((8845, 8869), 'nengo.learning_rules.BCM', 'BCM', ([], {'learning_rate': '(1e-08)'}), '(learning_rate=1e-08)\n', (8848, 8869), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((15264, 15269), 'nengo.learning_rules.Oja', 'Oja', ([], {}), '()\n', (15267, 15269), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((15275, 15280), 'nengo.learning_rules.BCM', 'BCM', ([], {}), '()\n', (15278, 15280), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((16627, 16660), 'nengo.dists.Uniform', 'nengo.dists.Uniform', (['(300.0)', '(400.0)'], {}), '(300.0, 400.0)\n', (16646, 16660), False, 'import nengo\n'), ((16808, 16831), 'nengo.learning_rules.Voja', 'Voja', ([], {'learning_rate': '(0.1)'}), '(learning_rate=0.1)\n', (16812, 16831), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((18775, 18798), 'nengo.learning_rules.Voja', 'Voja', ([], {'post_synapse': 'None'}), '(post_synapse=None)\n', (18779, 18798), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((20144, 20158), 'nengo.Direct', 'nengo.Direct', ([], {}), '()\n', (20156, 20158), False, 'import nengo\n'), ((21134, 21151), 'numpy.zeros', 'np.zeros', (['(1, 10)'], {}), '((1, 10))\n', (21142, 21151), True, 'import numpy as np\n'), ((22187, 22197), 'nengo.synapses.Lowpass', 'Lowpass', (['i'], {}), '(i)\n', (22194, 22197), False, 'from nengo.synapses import Alpha, Lowpass\n'), ((22427, 22432), 'nengo.learning_rules.PES', 'PES', ([], {}), '()\n', (22430, 22432), False, 'from nengo.learning_rules import LearningRuleTypeParam, PES, BCM, Oja, Voja\n'), ((5753, 5764), 'nengo.PES', 'nengo.PES', ([], {}), '()\n', (5762, 5764), False, 'import nengo\n'), ((6080, 6091), 'nengo.PES', 'nengo.PES', ([], {}), '()\n', (6089, 6091), False, 'import nengo\n'), ((6362, 6373), 'nengo.PES', 'nengo.PES', ([], {}), '()\n', (6371, 6373), False, 'import nengo\n'), ((9278, 9313), 'nengo.solvers.LstsqL2', 'nengo.solvers.LstsqL2', ([], {'weights': '(True)'}), '(weights=True)\n', (9299, 9313), False, 'import nengo\n')]
|
import numpy as np
from django.core.management.base import BaseCommand
from oscar.core.loading import get_classes
StatsSpe, StatsItem, Test, Speciality, Item, Conference = get_classes(
'confs.models',
(
"StatsSpe", "StatsItem", "Test", "Speciality", "Item", "Conference"
)
)
class Command(BaseCommand):
help = 'Evaluate new stats for all specialies and items'
def handle(self, *args, **options):
for spe in Speciality.objects.all():
stats = StatsSpe.objects.get_or_create(speciality=spe)[0]
l = [
test.score for test
in Test.objects.filter(conf__specialities__in=[spe], finished=True).all()
]
l = l if l != [] else [0]
stats.average = np.mean(l)
stats.median = np.median(l)
stats.std_dev = np.std(l)
stats.save()
for item in Item.objects.all():
stats = StatsItem.objects.get_or_create(item=item)[0]
l = [
test.score for test
in Test.objects.filter(conf__items__in=[item], finished=True).all()
]
l = l if l != [] else [0]
stats.average = np.mean(l)
stats.median = np.median(l)
stats.std_dev = np.std(l)
stats.save()
for conf in Conference.objects.filter(tests__isnull=False, for_sale=True).distinct():
conf.update_stats()
|
[
"numpy.mean",
"numpy.median",
"oscar.core.loading.get_classes",
"numpy.std"
] |
[((175, 277), 'oscar.core.loading.get_classes', 'get_classes', (['"""confs.models"""', "('StatsSpe', 'StatsItem', 'Test', 'Speciality', 'Item', 'Conference')"], {}), "('confs.models', ('StatsSpe', 'StatsItem', 'Test', 'Speciality',\n 'Item', 'Conference'))\n", (186, 277), False, 'from oscar.core.loading import get_classes\n'), ((769, 779), 'numpy.mean', 'np.mean', (['l'], {}), '(l)\n', (776, 779), True, 'import numpy as np\n'), ((807, 819), 'numpy.median', 'np.median', (['l'], {}), '(l)\n', (816, 819), True, 'import numpy as np\n'), ((848, 857), 'numpy.std', 'np.std', (['l'], {}), '(l)\n', (854, 857), True, 'import numpy as np\n'), ((1208, 1218), 'numpy.mean', 'np.mean', (['l'], {}), '(l)\n', (1215, 1218), True, 'import numpy as np\n'), ((1246, 1258), 'numpy.median', 'np.median', (['l'], {}), '(l)\n', (1255, 1258), True, 'import numpy as np\n'), ((1287, 1296), 'numpy.std', 'np.std', (['l'], {}), '(l)\n', (1293, 1296), True, 'import numpy as np\n')]
|
import matplotlib.pyplot as plt
import numpy as np
from ipywidgets import interactive, interactive_output, fixed, HBox, VBox
import ipywidgets as widgets
def true_function_old(x):
x_copy = -1 * x
f = 2 * x_copy * np.sin(0.8*x_copy) + 0.5 * x_copy**2 - 5
return f
def sigmoid(x, L=10, k=2, x_0=20):
return L / (1 + np.exp(-k * (x - x_0)))
def true_function(x):
const = 17
lin = -0.25 * x
quad = 0.2*(x-20)**2
sig = sigmoid(x, L=-20, k=0.6, x_0=30)
# quad_sig = - sigmoid(xx, L=1, k=0.6, x_0=30) * (0.1 * (x-40)**2)
sig2 = sigmoid(x, L=-50, k=0.8, x_0=37)
f = const + lin + quad + sig + sig2
return f
def generate_data(n_samples=20, random_state=None):
rng = np.random.RandomState(random_state)
# Beobachtungen
x_sample = 40 * rng.rand(n_samples)
# Kennzeichnungen/Labels
f_sample = true_function(x_sample)
noise = 7 * rng.randn(n_samples)
y_sample = f_sample + noise
return x_sample[:, np.newaxis], y_sample
|
[
"numpy.exp",
"numpy.sin",
"numpy.random.RandomState"
] |
[((717, 752), 'numpy.random.RandomState', 'np.random.RandomState', (['random_state'], {}), '(random_state)\n', (738, 752), True, 'import numpy as np\n'), ((334, 356), 'numpy.exp', 'np.exp', (['(-k * (x - x_0))'], {}), '(-k * (x - x_0))\n', (340, 356), True, 'import numpy as np\n'), ((223, 243), 'numpy.sin', 'np.sin', (['(0.8 * x_copy)'], {}), '(0.8 * x_copy)\n', (229, 243), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
import os
import sys
import jieba
import numpy as np
jieba.setLogLevel(60) # quiet
fname = sys.argv[1]
with open(fname) as f:
text = f.read()
tokenizer = jieba.Tokenizer()
tokens = list(tokenizer.cut(text))
occurences = np.array([tokenizer.FREQ[w] for w in tokens if w in tokenizer.FREQ])
difficulties = 1 / (occurences + 1)
max_occurence = np.max(list(tokenizer.FREQ.values()))
min_score = 1 / (max_occurence + 1)
max_score = 1
perc = 75
mean = np.mean(difficulties)
median = np.percentile(difficulties, perc)
def norm(x):
return (x - min_score) / (max_score - min_score)
normalized_mean = norm(mean)
normalized_median = norm(median)
print(
f"{os.path.basename(fname)}: "
f"mean: {normalized_mean:.6f}, {perc}th percentile: {normalized_median:.6f} "
f"in [0: trivial, 1: hardest]"
)
import matplotlib.pyplot as plt
clipped = difficulties[(difficulties <= 0.01) & (difficulties >= 0.0001)]
plt.hist(clipped, bins=20, density=True)
ax = plt.gca()
ax.set_title(fname)
plt.show()
|
[
"numpy.mean",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.gca",
"numpy.array",
"os.path.basename",
"numpy.percentile",
"jieba.setLogLevel",
"jieba.Tokenizer",
"matplotlib.pyplot.show"
] |
[((77, 98), 'jieba.setLogLevel', 'jieba.setLogLevel', (['(60)'], {}), '(60)\n', (94, 98), False, 'import jieba\n'), ((186, 203), 'jieba.Tokenizer', 'jieba.Tokenizer', ([], {}), '()\n', (201, 203), False, 'import jieba\n'), ((252, 320), 'numpy.array', 'np.array', (['[tokenizer.FREQ[w] for w in tokens if w in tokenizer.FREQ]'], {}), '([tokenizer.FREQ[w] for w in tokens if w in tokenizer.FREQ])\n', (260, 320), True, 'import numpy as np\n'), ((481, 502), 'numpy.mean', 'np.mean', (['difficulties'], {}), '(difficulties)\n', (488, 502), True, 'import numpy as np\n'), ((512, 545), 'numpy.percentile', 'np.percentile', (['difficulties', 'perc'], {}), '(difficulties, perc)\n', (525, 545), True, 'import numpy as np\n'), ((949, 989), 'matplotlib.pyplot.hist', 'plt.hist', (['clipped'], {'bins': '(20)', 'density': '(True)'}), '(clipped, bins=20, density=True)\n', (957, 989), True, 'import matplotlib.pyplot as plt\n'), ((995, 1004), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1002, 1004), True, 'import matplotlib.pyplot as plt\n'), ((1025, 1035), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1033, 1035), True, 'import matplotlib.pyplot as plt\n'), ((693, 716), 'os.path.basename', 'os.path.basename', (['fname'], {}), '(fname)\n', (709, 716), False, 'import os\n')]
|
#!/usr/bin/env python
# coding: utf-8
# # Self-Driving Car Engineer Nanodegree
#
#
# ## Project: **Finding Lane Lines on the Road**
# ***
# In this project, you will use the tools you learned about in the lesson to identify lane lines on the road. You can develop your pipeline on a series of individual images, and later apply the result to a video stream (really just a series of images). Check out the video clip "raw-lines-example.mp4" (also contained in this repository) to see what the output should look like after using the helper functions below.
#
# Once you have a result that looks roughly like "raw-lines-example.mp4", you'll need to get creative and try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video "P1_example.mp4". Ultimately, you would like to draw just one line for the left side of the lane, and one for the right.
#
# In addition to implementing code, there is a brief writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a [write up template](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) that can be used to guide the writing process. Completing both the code in the Ipython notebook and the writeup template will cover all of the [rubric points](https://review.udacity.com/#!/rubrics/322/view) for this project.
#
# ---
# Let's have a look at our first image called 'test_images/solidWhiteRight.jpg'. Run the 2 cells below (hit Shift-Enter or the "play" button above) to display the image.
#
# **Note: If, at any point, you encounter frozen display windows or other confounding issues, you can always start again with a clean slate by going to the "Kernel" menu above and selecting "Restart & Clear Output".**
#
# ---
# **The tools you have are color selection, region of interest selection, grayscaling, Gaussian smoothing, Canny Edge Detection and Hough Tranform line detection. You are also free to explore and try other techniques that were not presented in the lesson. Your goal is piece together a pipeline to detect the line segments in the image, then average/extrapolate them and draw them onto the image for display (as below). Once you have a working pipeline, try it out on the video stream below.**
#
# ---
#
# <figure>
# <img src="examples/line-segments-example.jpg" width="380" alt="Combined Image" />
# <figcaption>
# <p></p>
# <p style="text-align: center;"> Your output should look something like this (above) after detecting line segments using the helper functions below </p>
# </figcaption>
# </figure>
# <p></p>
# <figure>
# <img src="examples/laneLines_thirdPass.jpg" width="380" alt="Combined Image" />
# <figcaption>
# <p></p>
# <p style="text-align: center;"> Your goal is to connect/average/extrapolate line segments to get output like this</p>
# </figcaption>
# </figure>
# **Run the cell below to import some packages. If you get an `import error` for a package you've already installed, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.**
# ## Import Packages
# In[1]:
#importing some useful packages
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
# ## Read in an Image
# In[2]:
#reading in an image
image = mpimg.imread('test_images/solidWhiteRight.jpg')
#printing out some stats and plotting
print('This image is:', type(image), 'with dimensions:', image.shape)
plt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')
# ## Ideas for Lane Detection Pipeline
# **Some OpenCV functions (beyond those introduced in the lesson) that might be useful for this project are:**
#
# `cv2.inRange()` for color selection
# `cv2.fillPoly()` for regions selection
# `cv2.line()` to draw lines on an image given endpoints
# `cv2.addWeighted()` to coadd / overlay two images
# `cv2.cvtColor()` to grayscale or change color
# `cv2.imwrite()` to output images to file
# `cv2.bitwise_and()` to apply a mask to an image
#
# **Check out the OpenCV documentation to learn about these and discover even more awesome functionality!**
# ## Helper Functions
# Below are some helper functions to help get you started. They should look familiar from the lesson!
# In[3]:
import math
def grayscale(img):
"""Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')"""
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
# return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def canny(img, low_threshold, high_threshold):
"""Applies the Canny transform"""
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
`vertices` should be a numpy array of integer points.
"""
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def draw_lines_new(img, lines, color=[255, 0, 0], thickness=6):
"""
NOTE: this is the function you might want to use as a starting point once you want to
average/extrapolate the line segments you detect to map out the full
extent of the lane (going from the result shown in raw-lines-example.mp4
to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
"""
## create an empty array with all the line slope
all_slopes = np.zeros((len(lines)))
## create an empty array for left lines
left_line_slope = []
## create an empty array for right lines
right_line_slope = []
# keep each line slope in the array
for index,line in enumerate(lines):
for x1,y1,x2,y2 in line:
all_slopes[index] = (y2-y1)/(x2-x1)
# get all left line slope if it is positive
left_line_slope = all_slopes[all_slopes > 0]
# get all left line slope if it is negetive
right_line_slope = all_slopes[all_slopes < 0]
## mean value of left slope and right slope
m_l = left_line_slope.mean()
m_r = right_line_slope.mean()
# Create empty list for all the left points and right points
final_x4_l = []
final_x3_l = []
final_x4_r = []
final_x3_r = []
## get fixed y-cordinate in both top and bottom point
y4 = 320
y3 = img.shape[0]
## Go for each line to calculate left top x-cordinate, right top x-cordinate,
## left buttom x-cordinate, right bottom top x-cordinate
for index,line in enumerate(lines):
for x1,y1,x2,y2 in line:
m = (y2-y1)/(x2-x1)
if m > 0 :
final_x4_l.append(int(((x1 + (y4 - y1) / m_l) + (x2 + (y4 - y2) / m_l))/ 2))
final_x3_l.append(int(((x1 + (y3 - y1) / m_l) + (x2 + (y3 - y2) / m_l))/ 2))
else:
final_x4_r.append(int(((x1 + (y4 - y1) / m_r) + (x2 + (y4 - y2) / m_r))/ 2))
final_x3_r.append(int(((x1 + (y3 - y1) / m_r) + (x2 + (y3 - y2) / m_r))/ 2))
try :
## taking average of each points
x4_l = int(sum(final_x4_l)/ len(final_x4_l))
x4_r = int(sum(final_x4_r)/ len(final_x4_r))
x3_l = int(sum(final_x3_l)/ len(final_x3_l))
x3_r = int(sum(final_x3_r)/ len(final_x3_r))
## Draw the left line and right line
cv2.line(img, (x4_l, y4), (x3_l, y3), color, thickness)
cv2.line(img, (x4_r, y4), (x3_r, y3), color, thickness)
except:
pass
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
"""
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
"""
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lines_new(line_img, lines)
return line_img
# Python 3 has support for cool math symbols.
def weighted_img(img, initial_img, α=0.8, β=1., γ=0.):
"""
`img` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * α + img * β + γ
NOTE: initial_img and img must be the same shape!
"""
return cv2.addWeighted(initial_img, α, img, β, γ)
# ## Test Images
#
# Build your pipeline to work on the images in the directory "test_images"
# **You should make sure your pipeline works well on these images before you try the videos.**
# In[4]:
import os
os.listdir("test_images/")
# ## Build a Lane Finding Pipeline
#
#
# Build the pipeline and run your solution on all test_images. Make copies into the `test_images_output` directory, and you can use the images in your writeup report.
#
# Try tuning the various parameters, especially the low and high Canny thresholds as well as the Hough lines parameters.
# In[18]:
# TODO: Build your pipeline that will draw lane lines on the test_images
# then save them to the test_images_output directory.
def preprocess_image(image_path):
image = mpimg.imread(image_path)
gray_image = grayscale(image)
blured_image = gaussian_blur(gray_image, 5)
canny_image = canny(gray_image, low_threshold=100, high_threshold=170)
vertices = np.array([[(80,image.shape[0]),(450, 320), (490, 320), (image.shape[1],image.shape[0])]], dtype=np.int32)
roi_image = region_of_interest(canny_image, vertices)
hough_img = hough_lines(roi_image, rho=2, theta=np.pi/180, threshold=50, min_line_len=100, max_line_gap=160)
final_img= weighted_img(hough_img, image, α=0.8, β=1., γ=0.)
return final_img
def process_test_images(source_folder,destination_folder):
## create destination folder if not present
if not os.path.exists(destination_folder):
os.makedirs(destination_folder)
## Get all input files from the source folder
list_test_files = os.listdir(source_folder)
## process all the input files
for file in list_test_files:
output = preprocess_image(source_folder+ '/' + file)
cv2.imwrite(destination_folder+'/'+ file, cv2.cvtColor(output, cv2.COLOR_RGB2BGR))
process_test_images('test_images','test_images_output')
# In[19]:
# In[20]:
os.listdir('test_images')
# In[21]:
# Checking in an image
plt.figure(figsize=(15,8))
plt.subplot(121)
image = mpimg.imread('test_images/solidYellowCurve.jpg')
plt.imshow(image)
plt.title('Original image')
plt.subplot(122)
image = mpimg.imread('test_images_output/whiteCarLaneSwitch.jpg')
plt.imshow(image)
plt.title('Output image')
plt.show()
# ## Test on Videos
#
# You know what's cooler than drawing lanes over images? Drawing lanes over video!
#
# We can test our solution on two provided videos:
#
# `solidWhiteRight.mp4`
#
# `solidYellowLeft.mp4`
#
# **Note: if you get an import error when you run the next cell, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.**
#
# **If you get an error that looks like this:**
# ```
# NeedDownloadError: Need ffmpeg exe.
# You can download it by calling:
# imageio.plugins.ffmpeg.download()
# ```
# **Follow the instructions in the error message and check out [this forum post](https://discussions.udacity.com/t/project-error-of-test-on-videos/274082) for more troubleshooting tips across operating systems.**
# In[9]:
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
# In[10]:
def process_image(image):
# NOTE: The output you return should be a color image (3 channel) for processing video below
# TODO: put your pipeline here,
# you should return the final output (image where lines are drawn on lanes)
gray_image = grayscale(image)
blured_image = gaussian_blur(gray_image, 5)
canny_image = canny(gray_image, low_threshold=100, high_threshold=170)
vertices = np.array([[(80,image.shape[0]),(450, 320), (490, 320), (image.shape[1],image.shape[0])]], dtype=np.int32)
roi_image = region_of_interest(canny_image, vertices)
hough_img = hough_lines(roi_image, rho=2, theta=np.pi/180, threshold=50, min_line_len=100, max_line_gap=160)
result= weighted_img(hough_img, image, α=0.8, β=1., γ=0.)
return result
# Let's try the one with the solid white lane on the right first ...
# In[11]:
white_output = 'test_videos_output/solidWhiteRight.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5)
clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4")
white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!
white_clip.write_videofile(white_output, audio=False)
# ## Improve the draw_lines() function
#
# **At this point, if you were successful with making the pipeline and tuning parameters, you probably have the Hough line segments drawn onto the road, but what about identifying the full extent of the lane and marking it clearly as in the example video (P1_example.mp4)? Think about defining a line to run the full length of the visible lane based on the line segments you identified with the Hough Transform. As mentioned previously, try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video "P1_example.mp4".**
#
# **Go back and modify your draw_lines function accordingly and try re-running your pipeline. The new output should draw a single, solid line over the left lane line and a single, solid line over the right lane line. The lines should start from the bottom of the image and extend out to the top of the region of interest.**
# Now for the one with the solid yellow lane on the left. This one's more tricky!
# In[13]:
yellow_output = 'test_videos_output/solidYellowLeft.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4').subclip(0,5)
clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4')
yellow_clip = clip2.fl_image(process_image)
yellow_clip.write_videofile(yellow_output, audio=False)
def process_image(image):
# NOTE: The output you return should be a color image (3 channel) for processing video below
# TODO: put your pipeline here,
# you should return the final output (image where lines are drawn on lanes)
cv2.imwrite('image_test.jpg',image)
gray_image = grayscale(image)
blured_image = gaussian_blur(gray_image, 5)
canny_image = canny(gray_image, low_threshold=100, high_threshold=170)
cv2.imwrite('image_test_canny.jpg',canny_image)
x_size = image.shape[1]
y_size = image.shape[0]
left_bottom = (80, y_size)
left_top = (x_size / 2 - 50, y_size / 2 + 50)
right_bottom = (x_size - 80, y_size)
right_top = (x_size / 2 + 50, y_size / 2 + 50)
#vertices = np.array([[left_bottom, left_top, right_top, right_bottom]], dtype=np.int32)
#vertices = np.array([[(280,image.shape[0]),(450, 320), (490, 320), (image.shape[1],image.shape[0])]], dtype=np.int32)
vertices = np.array([[(300,680),(620, 460), (720, 460), (1085,673)]], dtype=np.int32)
roi_image = region_of_interest(canny_image, vertices)
try:
hough_img = hough_lines(roi_image, rho=2, theta=np.pi/180, threshold=50, min_line_len=100, max_line_gap=160)
result= weighted_img(hough_img, image, α=0.8, β=1., γ=0.)
return result
except:
return image
# In[16]:
challenge_output = 'test_videos_output/challenge.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip3 = VideoFileClip('test_videos/challenge.mp4').subclip(0,5)
clip3 = VideoFileClip('test_videos/challenge.mp4')
challenge_clip = clip3.fl_image(process_image)
challenge_clip.write_videofile(challenge_output, audio=False)
|
[
"matplotlib.image.imread",
"numpy.array",
"matplotlib.pyplot.imshow",
"os.path.exists",
"os.listdir",
"cv2.line",
"cv2.addWeighted",
"moviepy.editor.VideoFileClip",
"cv2.fillPoly",
"matplotlib.pyplot.subplot",
"cv2.cvtColor",
"matplotlib.pyplot.title",
"cv2.GaussianBlur",
"cv2.Canny",
"matplotlib.pyplot.show",
"cv2.imwrite",
"os.makedirs",
"cv2.bitwise_and",
"matplotlib.pyplot.figure",
"numpy.zeros",
"numpy.zeros_like"
] |
[((3572, 3619), 'matplotlib.image.imread', 'mpimg.imread', (['"""test_images/solidWhiteRight.jpg"""'], {}), "('test_images/solidWhiteRight.jpg')\n", (3584, 3619), True, 'import matplotlib.image as mpimg\n'), ((3729, 3746), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (3739, 3746), True, 'import matplotlib.pyplot as plt\n'), ((10679, 10705), 'os.listdir', 'os.listdir', (['"""test_images/"""'], {}), "('test_images/')\n", (10689, 10705), False, 'import os\n'), ((12431, 12456), 'os.listdir', 'os.listdir', (['"""test_images"""'], {}), "('test_images')\n", (12441, 12456), False, 'import os\n'), ((12494, 12521), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 8)'}), '(figsize=(15, 8))\n', (12504, 12521), True, 'import matplotlib.pyplot as plt\n'), ((12521, 12537), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (12532, 12537), True, 'import matplotlib.pyplot as plt\n'), ((12546, 12594), 'matplotlib.image.imread', 'mpimg.imread', (['"""test_images/solidYellowCurve.jpg"""'], {}), "('test_images/solidYellowCurve.jpg')\n", (12558, 12594), True, 'import matplotlib.image as mpimg\n'), ((12595, 12612), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (12605, 12612), True, 'import matplotlib.pyplot as plt\n'), ((12613, 12640), 'matplotlib.pyplot.title', 'plt.title', (['"""Original image"""'], {}), "('Original image')\n", (12622, 12640), True, 'import matplotlib.pyplot as plt\n'), ((12642, 12658), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (12653, 12658), True, 'import matplotlib.pyplot as plt\n'), ((12667, 12724), 'matplotlib.image.imread', 'mpimg.imread', (['"""test_images_output/whiteCarLaneSwitch.jpg"""'], {}), "('test_images_output/whiteCarLaneSwitch.jpg')\n", (12679, 12724), True, 'import matplotlib.image as mpimg\n'), ((12725, 12742), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (12735, 12742), True, 'import matplotlib.pyplot as plt\n'), ((12743, 12768), 'matplotlib.pyplot.title', 'plt.title', (['"""Output image"""'], {}), "('Output image')\n", (12752, 12768), True, 'import matplotlib.pyplot as plt\n'), ((12769, 12779), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12777, 12779), True, 'import matplotlib.pyplot as plt\n'), ((15152, 15200), 'moviepy.editor.VideoFileClip', 'VideoFileClip', (['"""test_videos/solidWhiteRight.mp4"""'], {}), "('test_videos/solidWhiteRight.mp4')\n", (15165, 15200), False, 'from moviepy.editor import VideoFileClip\n'), ((16952, 17000), 'moviepy.editor.VideoFileClip', 'VideoFileClip', (['"""test_videos/solidYellowLeft.mp4"""'], {}), "('test_videos/solidYellowLeft.mp4')\n", (16965, 17000), False, 'from moviepy.editor import VideoFileClip\n'), ((18950, 18992), 'moviepy.editor.VideoFileClip', 'VideoFileClip', (['"""test_videos/challenge.mp4"""'], {}), "('test_videos/challenge.mp4')\n", (18963, 18992), False, 'from moviepy.editor import VideoFileClip\n'), ((4914, 4951), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2GRAY'], {}), '(img, cv2.COLOR_RGB2GRAY)\n', (4926, 4951), False, 'import cv2\n'), ((5165, 5210), 'cv2.Canny', 'cv2.Canny', (['img', 'low_threshold', 'high_threshold'], {}), '(img, low_threshold, high_threshold)\n', (5174, 5210), False, 'import cv2\n'), ((5302, 5354), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img', '(kernel_size, kernel_size)', '(0)'], {}), '(img, (kernel_size, kernel_size), 0)\n', (5318, 5354), False, 'import cv2\n'), ((5682, 5700), 'numpy.zeros_like', 'np.zeros_like', (['img'], {}), '(img)\n', (5695, 5700), True, 'import numpy as np\n'), ((6099, 6146), 'cv2.fillPoly', 'cv2.fillPoly', (['mask', 'vertices', 'ignore_mask_color'], {}), '(mask, vertices, ignore_mask_color)\n', (6111, 6146), False, 'import cv2\n'), ((6231, 6257), 'cv2.bitwise_and', 'cv2.bitwise_and', (['img', 'mask'], {}), '(img, mask)\n', (6246, 6257), False, 'import cv2\n'), ((9824, 9881), 'numpy.zeros', 'np.zeros', (['(img.shape[0], img.shape[1], 3)'], {'dtype': 'np.uint8'}), '((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n', (9832, 9881), True, 'import numpy as np\n'), ((10420, 10462), 'cv2.addWeighted', 'cv2.addWeighted', (['initial_img', 'α', 'img', 'β', 'γ'], {}), '(initial_img, α, img, β, γ)\n', (10435, 10462), False, 'import cv2\n'), ((11229, 11253), 'matplotlib.image.imread', 'mpimg.imread', (['image_path'], {}), '(image_path)\n', (11241, 11253), True, 'import matplotlib.image as mpimg\n'), ((11426, 11538), 'numpy.array', 'np.array', (['[[(80, image.shape[0]), (450, 320), (490, 320), (image.shape[1], image.\n shape[0])]]'], {'dtype': 'np.int32'}), '([[(80, image.shape[0]), (450, 320), (490, 320), (image.shape[1],\n image.shape[0])]], dtype=np.int32)\n', (11434, 11538), True, 'import numpy as np\n'), ((12073, 12098), 'os.listdir', 'os.listdir', (['source_folder'], {}), '(source_folder)\n', (12083, 12098), False, 'import os\n'), ((14204, 14316), 'numpy.array', 'np.array', (['[[(80, image.shape[0]), (450, 320), (490, 320), (image.shape[1], image.\n shape[0])]]'], {'dtype': 'np.int32'}), '([[(80, image.shape[0]), (450, 320), (490, 320), (image.shape[1],\n image.shape[0])]], dtype=np.int32)\n', (14212, 14316), True, 'import numpy as np\n'), ((17347, 17383), 'cv2.imwrite', 'cv2.imwrite', (['"""image_test.jpg"""', 'image'], {}), "('image_test.jpg', image)\n", (17358, 17383), False, 'import cv2\n'), ((17544, 17592), 'cv2.imwrite', 'cv2.imwrite', (['"""image_test_canny.jpg"""', 'canny_image'], {}), "('image_test_canny.jpg', canny_image)\n", (17555, 17592), False, 'import cv2\n'), ((18062, 18139), 'numpy.array', 'np.array', (['[[(300, 680), (620, 460), (720, 460), (1085, 673)]]'], {'dtype': 'np.int32'}), '([[(300, 680), (620, 460), (720, 460), (1085, 673)]], dtype=np.int32)\n', (18070, 18139), True, 'import numpy as np\n'), ((9339, 9394), 'cv2.line', 'cv2.line', (['img', '(x4_l, y4)', '(x3_l, y3)', 'color', 'thickness'], {}), '(img, (x4_l, y4), (x3_l, y3), color, thickness)\n', (9347, 9394), False, 'import cv2\n'), ((9405, 9460), 'cv2.line', 'cv2.line', (['img', '(x4_r, y4)', '(x3_r, y3)', 'color', 'thickness'], {}), '(img, (x4_r, y4), (x3_r, y3), color, thickness)\n', (9413, 9460), False, 'import cv2\n'), ((9742, 9754), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (9750, 9754), True, 'import numpy as np\n'), ((11920, 11954), 'os.path.exists', 'os.path.exists', (['destination_folder'], {}), '(destination_folder)\n', (11934, 11954), False, 'import os\n'), ((11964, 11995), 'os.makedirs', 'os.makedirs', (['destination_folder'], {}), '(destination_folder)\n', (11975, 11995), False, 'import os\n'), ((12283, 12322), 'cv2.cvtColor', 'cv2.cvtColor', (['output', 'cv2.COLOR_RGB2BGR'], {}), '(output, cv2.COLOR_RGB2BGR)\n', (12295, 12322), False, 'import cv2\n')]
|
import numpy
from scipy.spatial import distance
import matplotlib.pyplot as plt
import math
import matplotlib.ticker as mtick
freqs = [20, 25, 31.5, 40, 50, 63, 80, 100, 125, 160, 200, 250, 315, 400, 500, 630, 800, 1000, 1250, 1600, 2000, 2500, 3150, 4000, 5000, 6300, 8000, 10000, 12500]
def cosine_distance(a, b, weight = None):
assert len(a) == len(b)
if weight is None:
weight = [1.0] * len(a)
ab_sum, a_sum, b_sum = 0, 0, 0
for ai, bi, wi in zip(a, b, weight):
ab_sum += ai * bi
a_sum += ai * ai
b_sum += bi * bi
return 1 - ab_sum / math.sqrt(a_sum * b_sum)
# from scipy
def _validate_weights(w, dtype=numpy.double):
w = _validate_vector(w, dtype=dtype)
if numpy.any(w < 0):
raise ValueError("Input weights should be all non-negative")
return w
# from scipy
def _validate_vector(u, dtype=None):
# XXX Is order='c' really necessary?
u = numpy.asarray(u, dtype=dtype, order='c').squeeze()
# Ensure values such as u=1 and u=[1] still return 1-D arrays.
u = numpy.atleast_1d(u)
if u.ndim > 1:
raise ValueError("Input vector should be 1-D.")
return u
# from scipy
def dist_cosine(u, v, w=None):
u = _validate_vector(u)
v = _validate_vector(v)
if w is not None:
w = _validate_weights(w)
uv = numpy.average(u * v, weights=w)
uu = numpy.average(numpy.square(u), weights=w)
vv = numpy.average(numpy.square(v), weights=w)
dist = 1.0 - uv / numpy.sqrt(uu * vv)
return dist
def autocolor(bar):
for col in bar:
if col.get_height() > 0.995:
col.set_color('r')
trigger = [40.49, 39.14, 34.47, 30.5, 39.54, 31.98, 38.37, 43.84, 36.09, 43.72, 40.55, 39.25, 39.15, 38.36, 38.3, 36.58,
39.9, 47.76, 51.64, 37.2, 44.89, 46.6, 51.08, 37.77, 28, 29.59, 30.25, 23.16, 25.74]
weight = [0.04,0.04,0.04,0.04,0.04,0.04,0.04,0.14,0.14,0.14,0.14,0.14,0.14,0.14,0.14,0.14,0.14,0.14,0.14, 0.24, 0.41,
0.60, 0.80, 0.94, 1.0, 0.94, 0.80, 0.60, 0.41]
ref_spectrum = numpy.genfromtxt('test/test2_far.csv', delimiter=',', skip_header=1, usecols=range(5, 34))
test1_spectrum = numpy.genfromtxt('test/test1_near.csv', delimiter=',', skip_header=1, usecols=range(5, 34))
test2_spectrum = numpy.genfromtxt('test/test2_far_far.csv', delimiter=',', skip_header=1, usecols=range(5, 34))
test3_spectrum = numpy.genfromtxt('test/test_background.csv', delimiter=',', skip_header=1, usecols=range(5, 34))
dist0 = numpy.ones(len(ref_spectrum)) - [distance.cosine(trigger, ref_spectrum[idfreq], w=weight) for idfreq in range(len(ref_spectrum))]
dist1 = numpy.ones(len(ref_spectrum)) - [distance.cosine(trigger, test1_spectrum[idfreq], w=weight) for idfreq in range(len(ref_spectrum))]
dist2 = numpy.ones(len(ref_spectrum)) - [distance.cosine(trigger, test2_spectrum[idfreq], w=weight) for idfreq in range(len(ref_spectrum))]
dist3 = numpy.ones(len(ref_spectrum)) - [distance.cosine(trigger, test3_spectrum[idfreq], w=weight) for idfreq in range(len(ref_spectrum))]
dist0_bis = numpy.ones(len(ref_spectrum)) - [dist_cosine(trigger, ref_spectrum[idfreq], w=weight) for idfreq in range(len(ref_spectrum))]
#print(numpy.around(dist0_bis - dist0, 3))
ref_spectrum = numpy.rot90(ref_spectrum)
test1_spectrum = numpy.rot90(test1_spectrum)
test2_spectrum = numpy.rot90(test2_spectrum)
test3_spectrum = numpy.rot90(test3_spectrum)
fig, axes = plt.subplots(nrows=4, ncols=3, constrained_layout=True)
gs = axes[0, 0].get_gridspec()
axes[0, 1].imshow(ref_spectrum)
autocolor(axes[0, 2].bar(numpy.arange(len(dist0)), dist0))
axes[1, 1].imshow(test1_spectrum)
autocolor(axes[1, 2].bar(numpy.arange(len(dist1)), dist1))
axes[2, 1].imshow(test2_spectrum)
autocolor(axes[2, 2].bar(numpy.arange(len(dist2)), dist2))
axes[3, 1].imshow(test3_spectrum)
axes[3, 2].bar(numpy.arange(len(dist2)), dist3)
for ax in axes[0:, 0]:
ax.remove()
axbig = fig.add_subplot(gs[0:, 0])
axbig.set_title("Spectrum trigger")
axbig.imshow(numpy.rot90([trigger]))
for i in range(len(axes)):
axes[i, 2].set_ylim([0.95, 1.0])
axes[i, 1].set_yticks(range(len(freqs))[::5])
axes[i, 1].set_yticklabels([str(ylab) + " Hz" for ylab in freqs[::5]][::-1])
axes[i, 1].set_xticks(range(len(ref_spectrum[0]))[::20])
axes[i, 1].set_xticklabels([str(xlabel)+" s" % xlabel for xlabel in numpy.arange(0, 10, 0.125)][::20])
axes[i, 2].set_xticks(range(len(ref_spectrum[0]))[::20])
axes[i, 2].set_xticklabels([str(xlabel)+" s" % xlabel for xlabel in numpy.arange(0, 10, 0.125)][::20])
axes[i, 2].set_ylabel("Cosine similarity (%)")
axes[i, 2].yaxis.set_major_formatter(mtick.PercentFormatter(1.0))
axes[i, 1].set_title("Spectrogram "+str(i)+" (dB)")
axbig.set_yticks(range(len(freqs)))
axbig.set_yticklabels([str(ylab) + " Hz" for ylab in freqs][::-1])
axbig.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False) # labels along the bottom edge are off
plt.show()
|
[
"scipy.spatial.distance.cosine",
"numpy.sqrt",
"matplotlib.pyplot.show",
"numpy.average",
"matplotlib.ticker.PercentFormatter",
"math.sqrt",
"numpy.asarray",
"numpy.any",
"numpy.square",
"numpy.rot90",
"matplotlib.pyplot.subplots",
"numpy.arange",
"numpy.atleast_1d"
] |
[((3222, 3247), 'numpy.rot90', 'numpy.rot90', (['ref_spectrum'], {}), '(ref_spectrum)\n', (3233, 3247), False, 'import numpy\n'), ((3266, 3293), 'numpy.rot90', 'numpy.rot90', (['test1_spectrum'], {}), '(test1_spectrum)\n', (3277, 3293), False, 'import numpy\n'), ((3312, 3339), 'numpy.rot90', 'numpy.rot90', (['test2_spectrum'], {}), '(test2_spectrum)\n', (3323, 3339), False, 'import numpy\n'), ((3358, 3385), 'numpy.rot90', 'numpy.rot90', (['test3_spectrum'], {}), '(test3_spectrum)\n', (3369, 3385), False, 'import numpy\n'), ((3399, 3454), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(4)', 'ncols': '(3)', 'constrained_layout': '(True)'}), '(nrows=4, ncols=3, constrained_layout=True)\n', (3411, 3454), True, 'import matplotlib.pyplot as plt\n'), ((5139, 5149), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5147, 5149), True, 'import matplotlib.pyplot as plt\n'), ((726, 742), 'numpy.any', 'numpy.any', (['(w < 0)'], {}), '(w < 0)\n', (735, 742), False, 'import numpy\n'), ((1052, 1071), 'numpy.atleast_1d', 'numpy.atleast_1d', (['u'], {}), '(u)\n', (1068, 1071), False, 'import numpy\n'), ((1325, 1356), 'numpy.average', 'numpy.average', (['(u * v)'], {'weights': 'w'}), '(u * v, weights=w)\n', (1338, 1356), False, 'import numpy\n'), ((3981, 4003), 'numpy.rot90', 'numpy.rot90', (['[trigger]'], {}), '([trigger])\n', (3992, 4003), False, 'import numpy\n'), ((1380, 1395), 'numpy.square', 'numpy.square', (['u'], {}), '(u)\n', (1392, 1395), False, 'import numpy\n'), ((1431, 1446), 'numpy.square', 'numpy.square', (['v'], {}), '(v)\n', (1443, 1446), False, 'import numpy\n'), ((2506, 2562), 'scipy.spatial.distance.cosine', 'distance.cosine', (['trigger', 'ref_spectrum[idfreq]'], {'w': 'weight'}), '(trigger, ref_spectrum[idfreq], w=weight)\n', (2521, 2562), False, 'from scipy.spatial import distance\n'), ((2644, 2702), 'scipy.spatial.distance.cosine', 'distance.cosine', (['trigger', 'test1_spectrum[idfreq]'], {'w': 'weight'}), '(trigger, test1_spectrum[idfreq], w=weight)\n', (2659, 2702), False, 'from scipy.spatial import distance\n'), ((2784, 2842), 'scipy.spatial.distance.cosine', 'distance.cosine', (['trigger', 'test2_spectrum[idfreq]'], {'w': 'weight'}), '(trigger, test2_spectrum[idfreq], w=weight)\n', (2799, 2842), False, 'from scipy.spatial import distance\n'), ((2924, 2982), 'scipy.spatial.distance.cosine', 'distance.cosine', (['trigger', 'test3_spectrum[idfreq]'], {'w': 'weight'}), '(trigger, test3_spectrum[idfreq], w=weight)\n', (2939, 2982), False, 'from scipy.spatial import distance\n'), ((4629, 4656), 'matplotlib.ticker.PercentFormatter', 'mtick.PercentFormatter', (['(1.0)'], {}), '(1.0)\n', (4651, 4656), True, 'import matplotlib.ticker as mtick\n'), ((593, 617), 'math.sqrt', 'math.sqrt', (['(a_sum * b_sum)'], {}), '(a_sum * b_sum)\n', (602, 617), False, 'import math\n'), ((926, 966), 'numpy.asarray', 'numpy.asarray', (['u'], {'dtype': 'dtype', 'order': '"""c"""'}), "(u, dtype=dtype, order='c')\n", (939, 966), False, 'import numpy\n'), ((1481, 1500), 'numpy.sqrt', 'numpy.sqrt', (['(uu * vv)'], {}), '(uu * vv)\n', (1491, 1500), False, 'import numpy\n'), ((4334, 4360), 'numpy.arange', 'numpy.arange', (['(0)', '(10)', '(0.125)'], {}), '(0, 10, 0.125)\n', (4346, 4360), False, 'import numpy\n'), ((4502, 4528), 'numpy.arange', 'numpy.arange', (['(0)', '(10)', '(0.125)'], {}), '(0, 10, 0.125)\n', (4514, 4528), False, 'import numpy\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Sat May 21 17:05:48 2022
@author: <NAME>
"""
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, balanced_accuracy_score, confusion_matrix
from ibllib.atlas import BrainRegions
from joblib import load
from model_functions import load_channel_data, load_trained_model
import matplotlib.pyplot as plt
import seaborn as sns
br = BrainRegions()
# Settings
FEATURES = ['psd_delta', 'psd_theta', 'psd_alpha', 'psd_beta', 'psd_gamma', 'rms_ap', 'rms_lf',
'spike_rate', 'axial_um', 'x', 'y', 'depth']
# Load in data
chan_volt = load_channel_data()
# chan_volt = pd.read_parquet("/home/sebastian/Downloads/FlatIron/tables/channels_voltage_features.pqt")
chan_volt = chan_volt.loc[~chan_volt['rms_ap'].isnull()] # remove NaNs
# 31d8dfb1-71fd-4c53-9229-7cd48bee07e4 64d04585-67e7-4320-baad-8d4589fd18f7
if True:
test = chan_volt.loc[['31d8dfb1-71fd-4c53-9229-7cd48bee07e4', '64d04585-67e7-4320-baad-8d4589fd18f7'], : ]
else:
test = chan_volt
feature_arr = test[FEATURES].to_numpy()
regions = test['cosmos_acronyms'].values
# Load model
clf = load_trained_model('channels', 'cosmos')
# Decode brain regions
print('Decoding brain regions..')
predictions = clf.predict(feature_arr)
probs = clf.predict_proba(feature_arr)
# histogram of response probabilities
certainties = probs.max(1)
plt.hist(certainties)
plt.close()
# plot of calibration, how certain are correct versus incorrect predicitions
plt.hist(certainties[regions == predictions], label='Correct predictions')
plt.hist(certainties[regions != predictions], label='Wrong predictions')
plt.title("Model calibration", size=24)
plt.legend(frameon=False, fontsize=16)
plt.ylabel("Occurences", size=21)
plt.xlabel("Prob for predicted region", size=21)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
sns.despine()
plt.tight_layout()
plt.savefig("/home/sebastian/Pictures/calibration")
plt.close()
# compute accuracy and balanced for our highly imbalanced dataset
acc = accuracy_score(regions, predictions)
bacc = balanced_accuracy_score(regions, predictions)
print(f'Accuracy: {acc*100:.1f}%')
print(f'Balanced accuracy: {bacc*100:.1f}%')
# compute confusion matrix
names = np.unique(np.append(regions, predictions))
cm = confusion_matrix(regions, predictions, labels=names)
cm = cm / cm.sum(1)[:, None]
cm_copy = cm.copy()
# list top n classifications
n = 10
np.max(cm[~np.isnan(cm)])
cm[np.isnan(cm)] = 0
for i in range(n):
ind = np.unravel_index(np.argmax(cm, axis=None), cm.shape)
if ind[0] != ind[1]:
print("Top {} classification, mistake: {} gets classified as {}".format(i+1, names[ind[0]], names[ind[1]]))
else:
print("Top {} classification, success: {} gets classified as {}".format(i+1, names[ind[0]], names[ind[1]]))
cm[ind] = 0
# plot confusion matrix
plt.imshow(cm_copy)
plt.yticks(range(len(names)), names)
plt.xticks(range(len(names)), names, rotation='65')
plt.show()
|
[
"matplotlib.pyplot.hist",
"sklearn.metrics.balanced_accuracy_score",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.imshow",
"seaborn.despine",
"model_functions.load_channel_data",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.close",
"matplotlib.pyplot.yticks",
"sklearn.metrics.confusion_matrix",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xticks",
"numpy.argmax",
"numpy.isnan",
"matplotlib.pyplot.title",
"sklearn.metrics.accuracy_score",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"model_functions.load_trained_model",
"numpy.append",
"ibllib.atlas.BrainRegions",
"matplotlib.pyplot.tight_layout"
] |
[((450, 464), 'ibllib.atlas.BrainRegions', 'BrainRegions', ([], {}), '()\n', (462, 464), False, 'from ibllib.atlas import BrainRegions\n'), ((658, 677), 'model_functions.load_channel_data', 'load_channel_data', ([], {}), '()\n', (675, 677), False, 'from model_functions import load_channel_data, load_trained_model\n'), ((1180, 1220), 'model_functions.load_trained_model', 'load_trained_model', (['"""channels"""', '"""cosmos"""'], {}), "('channels', 'cosmos')\n", (1198, 1220), False, 'from model_functions import load_channel_data, load_trained_model\n'), ((1423, 1444), 'matplotlib.pyplot.hist', 'plt.hist', (['certainties'], {}), '(certainties)\n', (1431, 1444), True, 'import matplotlib.pyplot as plt\n'), ((1445, 1456), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1454, 1456), True, 'import matplotlib.pyplot as plt\n'), ((1535, 1609), 'matplotlib.pyplot.hist', 'plt.hist', (['certainties[regions == predictions]'], {'label': '"""Correct predictions"""'}), "(certainties[regions == predictions], label='Correct predictions')\n", (1543, 1609), True, 'import matplotlib.pyplot as plt\n'), ((1610, 1682), 'matplotlib.pyplot.hist', 'plt.hist', (['certainties[regions != predictions]'], {'label': '"""Wrong predictions"""'}), "(certainties[regions != predictions], label='Wrong predictions')\n", (1618, 1682), True, 'import matplotlib.pyplot as plt\n'), ((1683, 1722), 'matplotlib.pyplot.title', 'plt.title', (['"""Model calibration"""'], {'size': '(24)'}), "('Model calibration', size=24)\n", (1692, 1722), True, 'import matplotlib.pyplot as plt\n'), ((1723, 1761), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'frameon': '(False)', 'fontsize': '(16)'}), '(frameon=False, fontsize=16)\n', (1733, 1761), True, 'import matplotlib.pyplot as plt\n'), ((1762, 1795), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Occurences"""'], {'size': '(21)'}), "('Occurences', size=21)\n", (1772, 1795), True, 'import matplotlib.pyplot as plt\n'), ((1796, 1844), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Prob for predicted region"""'], {'size': '(21)'}), "('Prob for predicted region', size=21)\n", (1806, 1844), True, 'import matplotlib.pyplot as plt\n'), ((1845, 1868), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(14)'}), '(fontsize=14)\n', (1855, 1868), True, 'import matplotlib.pyplot as plt\n'), ((1869, 1892), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(14)'}), '(fontsize=14)\n', (1879, 1892), True, 'import matplotlib.pyplot as plt\n'), ((1894, 1907), 'seaborn.despine', 'sns.despine', ([], {}), '()\n', (1905, 1907), True, 'import seaborn as sns\n'), ((1908, 1926), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1924, 1926), True, 'import matplotlib.pyplot as plt\n'), ((1927, 1978), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""/home/sebastian/Pictures/calibration"""'], {}), "('/home/sebastian/Pictures/calibration')\n", (1938, 1978), True, 'import matplotlib.pyplot as plt\n'), ((1979, 1990), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1988, 1990), True, 'import matplotlib.pyplot as plt\n'), ((2064, 2100), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['regions', 'predictions'], {}), '(regions, predictions)\n', (2078, 2100), False, 'from sklearn.metrics import accuracy_score, balanced_accuracy_score, confusion_matrix\n'), ((2108, 2153), 'sklearn.metrics.balanced_accuracy_score', 'balanced_accuracy_score', (['regions', 'predictions'], {}), '(regions, predictions)\n', (2131, 2153), False, 'from sklearn.metrics import accuracy_score, balanced_accuracy_score, confusion_matrix\n'), ((2319, 2371), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['regions', 'predictions'], {'labels': 'names'}), '(regions, predictions, labels=names)\n', (2335, 2371), False, 'from sklearn.metrics import accuracy_score, balanced_accuracy_score, confusion_matrix\n'), ((2896, 2915), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cm_copy'], {}), '(cm_copy)\n', (2906, 2915), True, 'import matplotlib.pyplot as plt\n'), ((3005, 3015), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3013, 3015), True, 'import matplotlib.pyplot as plt\n'), ((2281, 2312), 'numpy.append', 'np.append', (['regions', 'predictions'], {}), '(regions, predictions)\n', (2290, 2312), True, 'import numpy as np\n'), ((2488, 2500), 'numpy.isnan', 'np.isnan', (['cm'], {}), '(cm)\n', (2496, 2500), True, 'import numpy as np\n'), ((2552, 2576), 'numpy.argmax', 'np.argmax', (['cm'], {'axis': 'None'}), '(cm, axis=None)\n', (2561, 2576), True, 'import numpy as np\n'), ((2470, 2482), 'numpy.isnan', 'np.isnan', (['cm'], {}), '(cm)\n', (2478, 2482), True, 'import numpy as np\n')]
|
import struct
import numpy as np
import pandas as pd
df_train = pd.read_csv('../data/train_data.csv')
df_valid = pd.read_csv('../data/valid_data.csv')
df_test = pd.read_csv('../data/test_data.csv')
with open('result.dat', 'rb') as f:
N, = struct.unpack('i', f.read(4))
no_dims, = struct.unpack('i', f.read(4))
print(N, no_dims)
mappedX = struct.unpack('{}d'.format(N * no_dims), f.read(8 * N * no_dims))
mappedX = np.array(mappedX).reshape((N, no_dims))
print(mappedX)
tsne_train = mappedX[:len(df_train)]
tsne_valid = mappedX[len(df_train):len(df_train)+len(df_valid)]
tsne_test = mappedX[len(df_train)+len(df_valid):]
assert(len(tsne_train) == len(df_train))
assert(len(tsne_valid) == len(df_valid))
assert(len(tsne_test) == len(df_test))
save_path = '../data/tsne_{}d_30p.npz'.format(no_dims)
np.savez(save_path, train=tsne_train, valid=tsne_valid, test=tsne_test)
print('Saved: {}'.format(save_path))
# landmarks, = struct.unpack('{}i'.format(N), f.read(4 * N))
# costs, = struct.unpack('{}d'.format(N), f.read(8 * N))
|
[
"numpy.array",
"numpy.savez",
"pandas.read_csv"
] |
[((65, 102), 'pandas.read_csv', 'pd.read_csv', (['"""../data/train_data.csv"""'], {}), "('../data/train_data.csv')\n", (76, 102), True, 'import pandas as pd\n'), ((114, 151), 'pandas.read_csv', 'pd.read_csv', (['"""../data/valid_data.csv"""'], {}), "('../data/valid_data.csv')\n", (125, 151), True, 'import pandas as pd\n'), ((162, 198), 'pandas.read_csv', 'pd.read_csv', (['"""../data/test_data.csv"""'], {}), "('../data/test_data.csv')\n", (173, 198), True, 'import pandas as pd\n'), ((858, 929), 'numpy.savez', 'np.savez', (['save_path'], {'train': 'tsne_train', 'valid': 'tsne_valid', 'test': 'tsne_test'}), '(save_path, train=tsne_train, valid=tsne_valid, test=tsne_test)\n', (866, 929), True, 'import numpy as np\n'), ((437, 454), 'numpy.array', 'np.array', (['mappedX'], {}), '(mappedX)\n', (445, 454), True, 'import numpy as np\n')]
|
import pygame;
import numpy as np;
from math import sin, cos;
pygame.init();
width, height, depth = 640, 480, 800;
camera = [width // 2, height // 2, depth];
units_x, units_y, units_z = 8, 8, 8;
scale_x, scale_y, scale_z = width / units_x, height / units_y, depth / units_z;
screen = pygame.display.set_mode((width, height));
pygame.display.set_caption("3D perspective projection test");
pygame.key.set_repeat(100, 50);
def scale(p):
""" scale a point by the number of pixels per unit in each direction """
return p[0] * scale_x, p[1] * scale_y, p[2] * scale_z;
def translate_to_screen(p):
""" convert from projected cartesian coordinates to canvas coordinates """
return p[0] + width // 2, height // 2 - p[1];
def project(p):
""" project a point onto the 2D plane """
proj_x = (camera[2] * (p[0] - camera[0])) / (camera[2] + p[2]) + camera[0];
proj_y = (camera[2] * (p[1] - camera[1])) / (camera[2] + p[2]) + camera[1];
return proj_x, proj_y;
def rproj(a, tx, ty, tz):
rotation = rot_mat_x(tx).dot(rot_mat_y(ty)).dot(rot_mat_z(tz));
sub = np.array([a]) - np.array([camera]);
d = list(sub.dot(rotation)[0]);
e = width, height, depth;
return e[2] / d[2] * d[0] + e[0], e[2] / d[2] * d[1] + e[1];
def screen_point(p):
""" convert a point in 3D cartesian space to a point in 2D canvas space """
return translate_to_screen(project(scale(p)));
def project_triangle(tri):
""" return the screen coordinates of a triangle """
angs = (tx, ty, tz);
return rproj(tri[0], *angs), rproj(tri[1], *angs), rproj(tri[2], *angs);
## return screen_point(tri[0]), screen_point(tri[1]), screen_point(tri[2]);
def project_line(line):
""" return the screen coordinates of a line """
return screen_point(line[0]), screen_point(line[1]);
def rot_mat_x(theta):
return np.array([
[1, 0, 0],
[0, cos(theta), -sin(theta)],
[0, sin(theta), cos(theta)],
]);
def rot_mat_y(theta):
return np.array([
[cos(theta), 0, sin(theta)],
[0, 1, 0],
[-sin(theta), 0, cos(theta)],
]);
def rot_mat_z(theta):
return np.array([
[cos(theta), -sin(theta), 0],
[sin(theta), cos(theta), 0],
[0, 0, 1],
]);
triangle = ((1, 1, 1), (2, 2, 2), (1, 2, 1));
x_axis = ((-2, 0, 0), (2, 0, 0));
y_axis = ((0, -2, 0), (0, 2, 0));
z_axis = ((0, 0, -2), (0, 0, 2));
tx, ty, tz = 0, 0, 0;
clock = pygame.time.Clock();
running = True;
while running:
screen.fill((255, 255, 200));
proj_triangle = project_triangle(triangle);
pygame.draw.polygon(screen, (255, 0, 200), proj_triangle);
pygame.draw.polygon(screen, (0, 0, 0), proj_triangle, 1);
pygame.draw.rect(screen, (255, 0, 0), (*proj_triangle[0], 10, 10));
pygame.draw.rect(screen, (0, 255, 0), (*proj_triangle[1], 10, 10));
pygame.draw.rect(screen, (0, 0, 255), (*proj_triangle[2], 10, 10));
## proj_ax, proj_ay, proj_az = project_line(x_axis), project_line(y_axis), project_line(z_axis);
## pygame.draw.line(screen, (255, 0, 0), proj_ax[0], proj_ax[1], 1);
## pygame.draw.line(screen, (0, 255, 0), proj_ay[0], proj_ay[1], 1);
## pygame.draw.line(screen, (0, 0, 255), proj_az[0], proj_az[1], 1);
pygame.display.flip();
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False;
break;
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
#camera[0] -= 25;
## camera = list(np.array([camera]).dot(rot_mat_y(0.2).dot(rot_mat_z(0.1)))[0]);
tx += 0.1;
elif event.key == pygame.K_RIGHT:
#camera[0] += 25;
## camera = list(np.array([camera]).dot(rot_mat_z(-0.1))[0]);
tx -= 0.1;
elif event.key == pygame.K_UP:
ty += 0.1;
elif event.key == pygame.K_DOWN:
ty -= 0.1;
elif event.key == pygame.K_SPACE:
print(camera);
elif event.key == pygame.K_ESCAPE:
running = False;
break;
clock.tick(30);
pygame.quit();
|
[
"pygame.key.set_repeat",
"pygame.display.set_caption",
"pygame.draw.polygon",
"pygame.init",
"pygame.quit",
"pygame.event.get",
"pygame.display.set_mode",
"pygame.display.flip",
"math.cos",
"numpy.array",
"pygame.draw.rect",
"pygame.time.Clock",
"math.sin"
] |
[((62, 75), 'pygame.init', 'pygame.init', ([], {}), '()\n', (73, 75), False, 'import pygame\n'), ((286, 326), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(width, height)'], {}), '((width, height))\n', (309, 326), False, 'import pygame\n'), ((328, 388), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""3D perspective projection test"""'], {}), "('3D perspective projection test')\n", (354, 388), False, 'import pygame\n'), ((390, 420), 'pygame.key.set_repeat', 'pygame.key.set_repeat', (['(100)', '(50)'], {}), '(100, 50)\n', (411, 420), False, 'import pygame\n'), ((2407, 2426), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (2424, 2426), False, 'import pygame\n'), ((4128, 4141), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (4139, 4141), False, 'import pygame\n'), ((2546, 2603), 'pygame.draw.polygon', 'pygame.draw.polygon', (['screen', '(255, 0, 200)', 'proj_triangle'], {}), '(screen, (255, 0, 200), proj_triangle)\n', (2565, 2603), False, 'import pygame\n'), ((2609, 2665), 'pygame.draw.polygon', 'pygame.draw.polygon', (['screen', '(0, 0, 0)', 'proj_triangle', '(1)'], {}), '(screen, (0, 0, 0), proj_triangle, 1)\n', (2628, 2665), False, 'import pygame\n'), ((2671, 2737), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', '(255, 0, 0)', '(*proj_triangle[0], 10, 10)'], {}), '(screen, (255, 0, 0), (*proj_triangle[0], 10, 10))\n', (2687, 2737), False, 'import pygame\n'), ((2743, 2809), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', '(0, 255, 0)', '(*proj_triangle[1], 10, 10)'], {}), '(screen, (0, 255, 0), (*proj_triangle[1], 10, 10))\n', (2759, 2809), False, 'import pygame\n'), ((2815, 2881), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', '(0, 0, 255)', '(*proj_triangle[2], 10, 10)'], {}), '(screen, (0, 0, 255), (*proj_triangle[2], 10, 10))\n', (2831, 2881), False, 'import pygame\n'), ((3203, 3224), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (3222, 3224), False, 'import pygame\n'), ((3248, 3266), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (3264, 3266), False, 'import pygame\n'), ((1070, 1083), 'numpy.array', 'np.array', (['[a]'], {}), '([a])\n', (1078, 1083), True, 'import numpy as np\n'), ((1086, 1104), 'numpy.array', 'np.array', (['[camera]'], {}), '([camera])\n', (1094, 1104), True, 'import numpy as np\n'), ((1861, 1871), 'math.cos', 'cos', (['theta'], {}), '(theta)\n', (1864, 1871), False, 'from math import sin, cos\n'), ((1899, 1909), 'math.sin', 'sin', (['theta'], {}), '(theta)\n', (1902, 1909), False, 'from math import sin, cos\n'), ((1911, 1921), 'math.cos', 'cos', (['theta'], {}), '(theta)\n', (1914, 1921), False, 'from math import sin, cos\n'), ((1986, 1996), 'math.cos', 'cos', (['theta'], {}), '(theta)\n', (1989, 1996), False, 'from math import sin, cos\n'), ((2001, 2011), 'math.sin', 'sin', (['theta'], {}), '(theta)\n', (2004, 2011), False, 'from math import sin, cos\n'), ((2058, 2068), 'math.cos', 'cos', (['theta'], {}), '(theta)\n', (2061, 2068), False, 'from math import sin, cos\n'), ((2133, 2143), 'math.cos', 'cos', (['theta'], {}), '(theta)\n', (2136, 2143), False, 'from math import sin, cos\n'), ((2171, 2181), 'math.sin', 'sin', (['theta'], {}), '(theta)\n', (2174, 2181), False, 'from math import sin, cos\n'), ((2183, 2193), 'math.cos', 'cos', (['theta'], {}), '(theta)\n', (2186, 2193), False, 'from math import sin, cos\n'), ((1874, 1884), 'math.sin', 'sin', (['theta'], {}), '(theta)\n', (1877, 1884), False, 'from math import sin, cos\n'), ((2043, 2053), 'math.sin', 'sin', (['theta'], {}), '(theta)\n', (2046, 2053), False, 'from math import sin, cos\n'), ((2146, 2156), 'math.sin', 'sin', (['theta'], {}), '(theta)\n', (2149, 2156), False, 'from math import sin, cos\n')]
|
import pytest
from easydict import EasyDict
import numpy as np
import gym
from copy import deepcopy
from ding.envs.env import check_array_space, check_different_memory, check_all, demonstrate_correct_procedure
from ding.envs.env.tests import DemoEnv
@pytest.mark.unittest
def test_an_implemented_env():
demo_env = DemoEnv({})
check_all(demo_env)
demonstrate_correct_procedure(DemoEnv)
@pytest.mark.unittest
def test_check_array_space():
seq_array = (np.array([1, 2, 3], dtype=np.int64), np.array([4., 5., 6.], dtype=np.float32))
seq_space = [gym.spaces.Box(low=0, high=10, shape=(3, ), dtype=np.int64) for _ in range(2)]
with pytest.raises(AssertionError):
check_array_space(seq_array, seq_space, 'test_sequence')
dict_array = {'a': np.array([1, 2, 3], dtype=np.int64), 'b': np.array([4., 5., 6.], dtype=np.float32)}
int_box = gym.spaces.Box(low=0, high=10, shape=(3, ), dtype=np.int64)
dict_space = {'a': deepcopy(int_box), 'b': deepcopy(int_box)}
with pytest.raises(AssertionError):
check_array_space(dict_array, dict_space, 'test_dict')
with pytest.raises(TypeError):
check_array_space(1, dict_space, 'test_type_error')
@pytest.mark.unittest
def test_check_different_memory():
int_seq = np.array([1, 2, 3], dtype=np.int64)
seq_array1 = (int_seq, np.array([4., 5., 6.], dtype=np.float32))
seq_array2 = (int_seq, np.array([4., 5., 6.], dtype=np.float32))
with pytest.raises(AssertionError):
check_different_memory(seq_array1, seq_array2, -1)
dict_array1 = {'a': np.array([4., 5., 6.], dtype=np.float32), 'b': int_seq}
dict_array2 = {'a': np.array([4., 5., 6.], dtype=np.float32), 'b': int_seq}
with pytest.raises(AssertionError):
check_different_memory(dict_array1, dict_array2, -1)
with pytest.raises(AssertionError):
check_different_memory(1, dict_array1, -1)
with pytest.raises(TypeError):
check_different_memory(1, 2, -1)
|
[
"ding.envs.env.tests.DemoEnv",
"ding.envs.env.demonstrate_correct_procedure",
"gym.spaces.Box",
"numpy.array",
"pytest.raises",
"ding.envs.env.check_array_space",
"ding.envs.env.check_different_memory",
"copy.deepcopy",
"ding.envs.env.check_all"
] |
[((321, 332), 'ding.envs.env.tests.DemoEnv', 'DemoEnv', (['{}'], {}), '({})\n', (328, 332), False, 'from ding.envs.env.tests import DemoEnv\n'), ((337, 356), 'ding.envs.env.check_all', 'check_all', (['demo_env'], {}), '(demo_env)\n', (346, 356), False, 'from ding.envs.env import check_array_space, check_different_memory, check_all, demonstrate_correct_procedure\n'), ((361, 399), 'ding.envs.env.demonstrate_correct_procedure', 'demonstrate_correct_procedure', (['DemoEnv'], {}), '(DemoEnv)\n', (390, 399), False, 'from ding.envs.env import check_array_space, check_different_memory, check_all, demonstrate_correct_procedure\n'), ((873, 931), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': '(0)', 'high': '(10)', 'shape': '(3,)', 'dtype': 'np.int64'}), '(low=0, high=10, shape=(3,), dtype=np.int64)\n', (887, 931), False, 'import gym\n'), ((1271, 1306), 'numpy.array', 'np.array', (['[1, 2, 3]'], {'dtype': 'np.int64'}), '([1, 2, 3], dtype=np.int64)\n', (1279, 1306), True, 'import numpy as np\n'), ((471, 506), 'numpy.array', 'np.array', (['[1, 2, 3]'], {'dtype': 'np.int64'}), '([1, 2, 3], dtype=np.int64)\n', (479, 506), True, 'import numpy as np\n'), ((508, 551), 'numpy.array', 'np.array', (['[4.0, 5.0, 6.0]'], {'dtype': 'np.float32'}), '([4.0, 5.0, 6.0], dtype=np.float32)\n', (516, 551), True, 'import numpy as np\n'), ((567, 625), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': '(0)', 'high': '(10)', 'shape': '(3,)', 'dtype': 'np.int64'}), '(low=0, high=10, shape=(3,), dtype=np.int64)\n', (581, 625), False, 'import gym\n'), ((655, 684), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (668, 684), False, 'import pytest\n'), ((694, 750), 'ding.envs.env.check_array_space', 'check_array_space', (['seq_array', 'seq_space', '"""test_sequence"""'], {}), "(seq_array, seq_space, 'test_sequence')\n", (711, 750), False, 'from ding.envs.env import check_array_space, check_different_memory, check_all, demonstrate_correct_procedure\n'), ((775, 810), 'numpy.array', 'np.array', (['[1, 2, 3]'], {'dtype': 'np.int64'}), '([1, 2, 3], dtype=np.int64)\n', (783, 810), True, 'import numpy as np\n'), ((817, 860), 'numpy.array', 'np.array', (['[4.0, 5.0, 6.0]'], {'dtype': 'np.float32'}), '([4.0, 5.0, 6.0], dtype=np.float32)\n', (825, 860), True, 'import numpy as np\n'), ((956, 973), 'copy.deepcopy', 'deepcopy', (['int_box'], {}), '(int_box)\n', (964, 973), False, 'from copy import deepcopy\n'), ((980, 997), 'copy.deepcopy', 'deepcopy', (['int_box'], {}), '(int_box)\n', (988, 997), False, 'from copy import deepcopy\n'), ((1008, 1037), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (1021, 1037), False, 'import pytest\n'), ((1047, 1101), 'ding.envs.env.check_array_space', 'check_array_space', (['dict_array', 'dict_space', '"""test_dict"""'], {}), "(dict_array, dict_space, 'test_dict')\n", (1064, 1101), False, 'from ding.envs.env import check_array_space, check_different_memory, check_all, demonstrate_correct_procedure\n'), ((1112, 1136), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1125, 1136), False, 'import pytest\n'), ((1146, 1197), 'ding.envs.env.check_array_space', 'check_array_space', (['(1)', 'dict_space', '"""test_type_error"""'], {}), "(1, dict_space, 'test_type_error')\n", (1163, 1197), False, 'from ding.envs.env import check_array_space, check_different_memory, check_all, demonstrate_correct_procedure\n'), ((1334, 1377), 'numpy.array', 'np.array', (['[4.0, 5.0, 6.0]'], {'dtype': 'np.float32'}), '([4.0, 5.0, 6.0], dtype=np.float32)\n', (1342, 1377), True, 'import numpy as np\n'), ((1403, 1446), 'numpy.array', 'np.array', (['[4.0, 5.0, 6.0]'], {'dtype': 'np.float32'}), '([4.0, 5.0, 6.0], dtype=np.float32)\n', (1411, 1446), True, 'import numpy as np\n'), ((1454, 1483), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (1467, 1483), False, 'import pytest\n'), ((1493, 1543), 'ding.envs.env.check_different_memory', 'check_different_memory', (['seq_array1', 'seq_array2', '(-1)'], {}), '(seq_array1, seq_array2, -1)\n', (1515, 1543), False, 'from ding.envs.env import check_array_space, check_different_memory, check_all, demonstrate_correct_procedure\n'), ((1569, 1612), 'numpy.array', 'np.array', (['[4.0, 5.0, 6.0]'], {'dtype': 'np.float32'}), '([4.0, 5.0, 6.0], dtype=np.float32)\n', (1577, 1612), True, 'import numpy as np\n'), ((1649, 1692), 'numpy.array', 'np.array', (['[4.0, 5.0, 6.0]'], {'dtype': 'np.float32'}), '([4.0, 5.0, 6.0], dtype=np.float32)\n', (1657, 1692), True, 'import numpy as np\n'), ((1714, 1743), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (1727, 1743), False, 'import pytest\n'), ((1753, 1805), 'ding.envs.env.check_different_memory', 'check_different_memory', (['dict_array1', 'dict_array2', '(-1)'], {}), '(dict_array1, dict_array2, -1)\n', (1775, 1805), False, 'from ding.envs.env import check_array_space, check_different_memory, check_all, demonstrate_correct_procedure\n'), ((1816, 1845), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (1829, 1845), False, 'import pytest\n'), ((1855, 1897), 'ding.envs.env.check_different_memory', 'check_different_memory', (['(1)', 'dict_array1', '(-1)'], {}), '(1, dict_array1, -1)\n', (1877, 1897), False, 'from ding.envs.env import check_array_space, check_different_memory, check_all, demonstrate_correct_procedure\n'), ((1907, 1931), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1920, 1931), False, 'import pytest\n'), ((1941, 1973), 'ding.envs.env.check_different_memory', 'check_different_memory', (['(1)', '(2)', '(-1)'], {}), '(1, 2, -1)\n', (1963, 1973), False, 'from ding.envs.env import check_array_space, check_different_memory, check_all, demonstrate_correct_procedure\n')]
|
import sys
sys.path.insert(0, '/home/hena/caffe-ocr/buildcmake/install/python')
sys.path.insert(0, '/home/hena/tool/protobuf-3.1.0/python')
import caffe
import math
import numpy as np
def SoftMax(net_ans):
tmp_net = [math.exp(i) for i in net_ans]
sum_exp = sum(tmp_net)
return [i/sum_exp for i in tmp_net]
class AggregationCrossEntropyLayer(caffe.Layer):
"""
Comput the Aggregation Cross Entropy loss for ocr rec plan
"""
def setup(self, bottom, top):
print("==============================================================Hi")
self.dict_size = 1220
if len(bottom) != 2:
raise Exception("Need two inputs to computer loss.")
def reshape(self, bottom, top):
self.diff = np.zeros_like(bottom[0].data, dtype=np.float32)
# top[0].reshape(*bottom[0].data.shape)
def forward(self, bottom, top):
print("==============================================================Hi1")
# score = bottom[0].data
# label = bottom[1].data
# print(score)
# print(type(score))
# print(score.shape)
# T_ = len(score)
self.diff[...] = bottom[0].data - bottom[1].data
top[0].data[...] = np.sum(self.diff**2) / bottom[0].num / 2.
def backward(self, top, propagate_down, bottom):
for i in range(2):
if not propagate_down[i]:
continue
if i == 0:
sign = 1
else:
sign = -1
bottom[i].diff[...] = sign * self.diff / bottom[i].num
def get_n_k(self, label):
pass
|
[
"numpy.sum",
"math.exp",
"sys.path.insert",
"numpy.zeros_like"
] |
[((11, 79), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""/home/hena/caffe-ocr/buildcmake/install/python"""'], {}), "(0, '/home/hena/caffe-ocr/buildcmake/install/python')\n", (26, 79), False, 'import sys\n'), ((80, 139), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""/home/hena/tool/protobuf-3.1.0/python"""'], {}), "(0, '/home/hena/tool/protobuf-3.1.0/python')\n", (95, 139), False, 'import sys\n'), ((223, 234), 'math.exp', 'math.exp', (['i'], {}), '(i)\n', (231, 234), False, 'import math\n'), ((747, 794), 'numpy.zeros_like', 'np.zeros_like', (['bottom[0].data'], {'dtype': 'np.float32'}), '(bottom[0].data, dtype=np.float32)\n', (760, 794), True, 'import numpy as np\n'), ((1234, 1256), 'numpy.sum', 'np.sum', (['(self.diff ** 2)'], {}), '(self.diff ** 2)\n', (1240, 1256), True, 'import numpy as np\n')]
|
"""
Tools for creating and working with Line (Station) Grids
"""
from typing import Union
import pyproj
import numpy as np
_atype = Union[type(None), np.ndarray]
_ptype = Union[type(None), pyproj.Proj]
class StaHGrid:
"""
Stations Grid
EXAMPLES:
--------
>>> x = arange(8)
>>> y = arange(8)*2-1
>>> grd = pyroms.grid.StaHGrid(x, y)
>>> print grd.x
[4.5 4.5 4.5 4.5 4.5 4.5 4.5]
"""
def __init__(self, x: np.ndarray, y: np.ndarray, angle: _atype = None):
assert x.ndim == 1 and y.ndim == 1 and x.shape == y.shape, \
'x and y must be 2D arrays of the same size.'
mask = np.isnan(x) | np.isnan(y)
if np.any(mask):
x = np.ma.masked_where(mask, x)
y = np.ma.masked_where(mask, y)
self.spherical = False
self._x, self._y = x, y
if angle is None:
self.angle = np.zeros(len(self.y))
else:
self.angle = angle
return
x = property(lambda self: self._x)
y = property(lambda self: self._y)
class StaHGridGeo(StaHGrid):
"""
Stations Grid
EXAMPLES:
--------
>>> lon = arange(8)
>>> lat = arange(8)*2-1
>>> proj = pyproj()
>>> grd = pyroms.grid.StaHGridGeo(lon, lat, proj)
>>> print grd.x
[xxx, xxx, xxx, xxx, xxx, xxx, xxx, xxx]
"""
def __init__(self, lon: np.ndarray, lat: np.ndarray,
x: _atype = None, y: _atype = None,
angle: _atype = None, proj: _ptype = None):
self.spherical = True
self._lon, self._lat = lon, lat
self.proj = proj
if x is not None and y is not None:
super(StaHGridGeo, self).__init__(x, y, angle)
self.spherical = True
else:
if proj is not None:
self._x, self._y = proj(lon, lat)
else:
raise ValueError('Projection transformer must be ' +
'provided if x/y are missing.')
return
@property
def lon(self):
return self._lon
@lon.setter
def lon(self, lon):
if self.proj is not None:
self.__init__(lon, self._lat, angle=self.angle, proj=self.proj)
else:
self._lon = lon
@property
def lat(self):
return self._lat
@lat.setter
def lat(self, lat):
if self.proj is not None:
self.__init__(self._lon, lat, angle=self.angle, proj=self.proj)
else:
self._lat = lat
|
[
"numpy.isnan",
"numpy.any",
"numpy.ma.masked_where"
] |
[((690, 702), 'numpy.any', 'np.any', (['mask'], {}), '(mask)\n', (696, 702), True, 'import numpy as np\n'), ((653, 664), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (661, 664), True, 'import numpy as np\n'), ((667, 678), 'numpy.isnan', 'np.isnan', (['y'], {}), '(y)\n', (675, 678), True, 'import numpy as np\n'), ((720, 747), 'numpy.ma.masked_where', 'np.ma.masked_where', (['mask', 'x'], {}), '(mask, x)\n', (738, 747), True, 'import numpy as np\n'), ((764, 791), 'numpy.ma.masked_where', 'np.ma.masked_where', (['mask', 'y'], {}), '(mask, y)\n', (782, 791), True, 'import numpy as np\n')]
|
import numpy as np
from ..reco.disp import disp_vector
import astropy.units as u
import matplotlib.pyplot as plt
from ctapipe.visualization import CameraDisplay
__all__ = [
'overlay_disp_vector',
'overlay_hillas_major_axis',
'overlay_source',
'display_dl1_event',
]
def display_dl1_event(event, camera_geometry, tel_id=1, axes=None, **kwargs):
"""
Display a DL1 event (image and pulse time map) side by side
Parameters
----------
event: ctapipe event
tel_id: int
axes: list of `matplotlib.pyplot.axes` of shape (2,) or None
kwargs: kwargs for `ctapipe.visualization.CameraDisplay`
Returns
-------
axes: `matplotlib.pyplot.axes`
"""
if axes is None:
fig, axes = plt.subplots(1, 2, figsize=(12, 5))
image = event.dl1.tel[tel_id].image
peak_time = event.dl1.tel[tel_id].peak_time
if image is None or peak_time is None:
raise Exception(f"There is no calibrated image or pulse time map for telescope {tel_id}")
d1 = CameraDisplay(camera_geometry, image, ax=axes[0], **kwargs)
d1.add_colorbar(ax=axes[0])
d2 = CameraDisplay(camera_geometry, peak_time, ax=axes[1], **kwargs)
d2.add_colorbar(ax=axes[1])
return axes
def overlay_source(display, source_pos_x, source_pos_y, **kwargs):
"""
Display the source (event) position in the camera
Parameters
----------
display: `ctapipe.visualization.CameraDisplay`
source_pos_x: `astropy.units.Quantity`
source_pos_y: `astropy.units.Quantity`
kwargs: args for `matplotlib.pyplot.scatter`
Returns
-------
`matplotlib.pyplot.axes`
"""
kwargs['marker'] = 'x' if 'marker' not in kwargs else kwargs['marker']
kwargs['color'] = 'red' if 'color' not in kwargs else kwargs['color']
display.axes.scatter(source_pos_x, source_pos_y, **kwargs)
def overlay_disp_vector(display, disp, hillas, **kwargs):
"""
Overlay disp vector on a CameraDisplay
Parameters
----------
display: `ctapipe.visualization.CameraDisplay`
disp: `DispContainer`
hillas: `ctapipe.containers.HillasParametersContainer`
kwargs: args for `matplotlib.pyplot.quiver`
"""
assert np.isfinite([hillas.x.value, hillas.y.value]).all()
if not np.isfinite([disp.dx.value, disp.dy.value]).all():
disp_vector(disp)
display.axes.quiver(hillas.x, hillas.y,
disp.dx, disp.dy,
units='xy', scale=1*u.m,
angles='xy',
**kwargs,
)
display.axes.quiver(hillas.x.value, hillas.y.value, disp.dx.value, disp.dy.value, units='xy', scale=1)
def overlay_hillas_major_axis(display, hillas, **kwargs):
"""
Overlay hillas ellipse major axis on a CameraDisplay.
Parameters
----------
display: `ctapipe.visualization.CameraDisplay`
hillas: `ctapipe.containers.HillaParametersContainer`
kwargs: args for `matplotlib.pyplot.plot`
"""
kwargs['color'] = 'black' if 'color' not in kwargs else kwargs['color']
length = hillas.length * 2
x = -length + 2 * length * np.arange(10) / 10
display.axes.plot(hillas.x + x * np.cos(hillas.psi.to(u.rad).value),
hillas.y + x * np.sin(hillas.psi.to(u.rad).value),
**kwargs,
)
|
[
"matplotlib.pyplot.subplots",
"numpy.isfinite",
"ctapipe.visualization.CameraDisplay",
"numpy.arange"
] |
[((1019, 1078), 'ctapipe.visualization.CameraDisplay', 'CameraDisplay', (['camera_geometry', 'image'], {'ax': 'axes[0]'}), '(camera_geometry, image, ax=axes[0], **kwargs)\n', (1032, 1078), False, 'from ctapipe.visualization import CameraDisplay\n'), ((1120, 1183), 'ctapipe.visualization.CameraDisplay', 'CameraDisplay', (['camera_geometry', 'peak_time'], {'ax': 'axes[1]'}), '(camera_geometry, peak_time, ax=axes[1], **kwargs)\n', (1133, 1183), False, 'from ctapipe.visualization import CameraDisplay\n'), ((742, 777), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(12, 5)'}), '(1, 2, figsize=(12, 5))\n', (754, 777), True, 'import matplotlib.pyplot as plt\n'), ((2201, 2246), 'numpy.isfinite', 'np.isfinite', (['[hillas.x.value, hillas.y.value]'], {}), '([hillas.x.value, hillas.y.value])\n', (2212, 2246), True, 'import numpy as np\n'), ((2264, 2307), 'numpy.isfinite', 'np.isfinite', (['[disp.dx.value, disp.dy.value]'], {}), '([disp.dx.value, disp.dy.value])\n', (2275, 2307), True, 'import numpy as np\n'), ((3142, 3155), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (3151, 3155), True, 'import numpy as np\n')]
|
import numpy as np
import random
from scipy.stats import skew as scipy_skew
from skimage.transform import resize as skimage_resize
from QFlow import config
## set of functions for loading and preparing a dataset for training.
def get_num_min_class(labels):
'''
Get the number of the minimum represented class in label vector.
Used for resampling data.
input:
labels: np.ndarray of labels
outputs:
num_samples: int number of samples for minimum class
'''
# use argmax as example's class
argmax_labels = np.argmax(labels, axis=-1)
# max of num_samples is all one label
num_samples = labels.shape[0]
for i in range(labels.shape[-1]):
lab_elems = np.sum(argmax_labels==i)
if lab_elems < num_samples:
num_samples = lab_elems
return num_samples
def resample_data(features, state_labels, labels=None, seed=None):
'''
Resample data to be evenly distributed across classes in labels by cutting
number of examples for each class to be equal to the number of examples
in the least represented class. (classes assumed to be last axis of
labels). Shuffles after resampling.
inputs:
features: ndarray of features to be resampled. Resample along first axis.
state_labels: ndarray of labels to be used for resampling
labels: ndarray of labels to be resampled.
return_state: bool specifying whether to return state labels
seed: Seed of random number generator for shuffling idxs during resample
and for shuffling resampled features and labels.
outputs:
features: list of resampled features
labels: list of resampled labels
'''
rng = np.random.default_rng(seed)
num_samples = get_num_min_class(state_labels)
features_resamp = []; state_labels_resamp = []; labels_resamp = []
for i in range(state_labels.shape[-1]):
s_idxs = state_labels.argmax(axis=-1)==i
# first get full array of single state
features_s_full = features[s_idxs]
state_labels_s_full = state_labels[s_idxs]
if labels is not None:
labels_s_full = labels[s_idxs]
# then get idxs (0-length), shuffle, and slice to num_samples
# shuffle idxs to be sure labels and features are shuffled together
idxs = list(range(features_s_full.shape[0]))
rng.shuffle(idxs)
features_resamp.append(features_s_full[idxs[:num_samples]])
state_labels_resamp.append(state_labels_s_full[idxs[:num_samples]])
if labels is not None:
labels_resamp.append(labels_s_full[idxs[:num_samples]])
features_resamp_arr = np.concatenate(features_resamp, axis=0)
state_labels_resamp_arr = np.concatenate(state_labels_resamp, axis=0)
if labels is not None:
labels_resamp_arr = np.concatenate(labels_resamp, axis=0)
idxs = list(range(features_resamp_arr.shape[0]))
rng.shuffle(idxs)
if labels is not None:
return features_resamp_arr[idxs], labels_resamp_arr[idxs]
elif labels is None:
return features_resamp_arr[idxs], state_labels_resamp_arr[idxs]
def noise_mag_to_class(state_labels, noise_mags,
low_thresholds=None, high_thresholds=None):
'''
Function to convert noise magnitudes to noise classes.
Noise class thresholds are defined here. Thresholds for states
order is: no dot, left dot, central dot, right dot, double dot
Default low thresholds is the linear extrapolation to 100 % accuracy
of an average noisy-trained model vs. noise_mag. Default high
thresholds are from linear extrapolation to 0 % accuracy of an
average noisy trained model vs. noise_mag.
inputs:
state_labels: list of state labels. shape assumed to be
(num_examples, num_states).
noise_mags: list of float noise_mags for state_labels. shape assumed
to be (num_examples, ).
low_thresholds: list of floats of shape (num_state, ) specifying
high signal to noise class thresholds.
high_thresholds: list of floats of shape (num_state, ) specifying
high signal to noise class thresholds.
'''
# set number of noise classes and states.
# length of thresholds must be equal to num_states.
# no num_quality_classes != 3 are supported.
num_quality_classes = config.NUM_QUALITY_CLASSES
num_states = config.NUM_STATES
# set default thresholds
if high_thresholds is None:
high_thresholds = [1.22, 1.00, 1.21, 0.68, 2.00]
if low_thresholds is None:
low_thresholds = [0.31, 0.32, 0.41, 0.05, 0.47]
low_thresholds = np.array(low_thresholds)
high_thresholds = np.array(high_thresholds)
quality_classes = np.zeros(noise_mags.shape+(num_quality_classes,))
# use fractional labels by taking weighted average after
# applying thresholds
num_states = state_labels.shape[-1]
# get per state classes then sum across last axis later
per_state_classes = np.zeros(
noise_mags.shape + (num_quality_classes,) + (num_states,))
# use boolean indexing to define classes from noise mags/threshold arrays
for i in range(num_states):
per_state_classes[noise_mags <= low_thresholds[i],0, i] = 1
per_state_classes[(noise_mags > low_thresholds[i]) &\
(noise_mags <= high_thresholds[i]), 1, i] = 1
per_state_classes[noise_mags > high_thresholds[i], 2, i] = 1
# multiply each first axis element then sum across last axes
quality_classes = np.einsum('ijk,ik->ij', per_state_classes, state_labels)
return quality_classes
def get_data(f, train_test_split=0.9,
dat_key='sensor', label_key='state',
resample=True, seed=None,
low_thresholds=None, high_thresholds=None):
'''
Reads in the subregion data and converts it to a format useful for training
Note that the data is shuffled after reading in.
inputs:
f: one of:
str path to .npz file containing cropped data
dict of cropped data.
train_test_split: float fraction of data to use for training.
resample: bool specifying whether to resample data to get even state
representation.
seed: int random seed for file shuffling.
label_key: string key for data used for the label. One of:
'data_quality', 'noise_mag_factor', 'state'.
low_threshold: list of noise levels to use for high/moderate signal
to noise ratio threshold.
high_threshold: list of noise levels to use for moderate/low signal
to noise ratio threshold.
outputs:
train_data: np.ndarray of training data.
train_labels: np.ndarray of training labels.
eval_data: np.ndarray of training data.
eval_labels: np.ndarray of training labels.
'''
# treat f as path, or if TypeError treat as dict.
try:
dict_of_dicts = np.load(f, allow_pickle = True)
file_on_disk = True
except TypeError:
dict_of_dicts = f
file_on_disk = False
files = list(dict_of_dicts.keys())
random.Random(seed).shuffle(files)
inp = []
oup_state = []
# if we want a nonstate label load it so we can resample
if label_key!='state':
oup_labels = []
else:
oup_labels = None
train_labels = None
eval_labels = None
# if label is noise class, we need to get noise mag labels first
# then process to turn the mag into a class label
if label_key == 'data_quality':
data_quality = True
label_key = 'noise_mag_factor'
else:
data_quality = False
for file in files:
# for compressed data, file is the key of the dict of dicts
if file_on_disk:
data_dict = dict_of_dicts[file].item()
else:
data_dict = dict_of_dicts[file]
dat = data_dict[dat_key]
# generates a list of arrays
inp.append(dat.reshape(config.SUB_SIZE,config.SUB_SIZE,1))
oup_state.append(data_dict['state']) # generates a list of arrays
if oup_labels is not None:
oup_labels.append(data_dict[label_key])
inp = np.array(inp) # converts the list to np.array
oup_state = np.array(oup_state) # converts the list to np.array
if oup_labels is not None:
oup_labels = np.array(oup_labels)
# split data into train and evaluatoin data/labels
n_samples = inp.shape[0]
print("Total number of samples :", n_samples)
n_train = int(train_test_split * n_samples)
train_data = inp[:n_train]
print("Training data info:", train_data.shape)
train_states = oup_state[:n_train]
if oup_labels is not None:
train_labels = oup_labels[:n_train]
eval_data = inp[n_train:]
print("Evaluation data info:", eval_data.shape)
eval_states = oup_state[n_train:]
if oup_labels is not None:
eval_labels = oup_labels[n_train:]
# convert noise mag to class before resampling/getting noise mags if
# needed because resampling doesnt return state labels
if data_quality:
train_labels = noise_mag_to_class(
train_states, train_labels,
low_thresholds=low_thresholds,
high_thresholds=high_thresholds,
)
eval_labels = noise_mag_to_class(
eval_states, eval_labels,
low_thresholds=low_thresholds,
high_thresholds=high_thresholds,
)
# resample to make state representation even
if resample:
train_data, train_labels = resample_data(
train_data, train_states, train_labels)
eval_data, eval_labels = resample_data(
eval_data, eval_states, eval_labels)
elif not resample and label_key=='state':
train_labels = train_states
eval_labels = eval_states
# expand dim of labels to make sure that they have proper shape
if oup_labels is not None and len(train_labels.shape)==1:
np.expand_dims(train_labels, 1)
if oup_labels is not None and len(eval_labels.shape)==1:
np.expand_dims(eval_labels, 1)
return train_data, train_labels, eval_data, eval_labels
## preprocess functions
def gradient(x):
'''
Take gradient of an ndarray in specified direction. Thin wrapper around
np.gradient(). Also note that x -> axis=1 and y-> axis=0
input:
x: An numpy ndarray to take the gradient of
output:
numpy ndarray containing gradient in x direction.
'''
return np.gradient(x, axis=1)
def apply_threshold(x, threshold_val=10, threshold_to=0):
'''
Thresholds an numpy ndarray to remove
Args:
x = numpy array with data to be filtered
threshold_val = percentile below which to set values to zero
'''
x[x < np.abs(np.percentile(x.flatten(),threshold_val))] = threshold_to
return x
def apply_clipping(x, clip_val=3, clip_to='clip_val'):
'''
Clip input symmetrically at clip_val number of std devs.
Do not zscore norm x, but apply thresholds using normed x
'''
x_clipped = np.copy(x)
mean = np.mean(x)
std = np.std(x)
norm_x = (x - mean) / std
# set clipped values to either the mean or clip threshold
if clip_to.lower() == 'clip_val':
x_clipped[norm_x < -clip_val] = -clip_val * std + mean
x_clipped[norm_x > clip_val] = clip_val * std + mean
elif clip_to.lower() == 'mean':
x_clipped[norm_x < -clip_val] = mean
x_clipped[norm_x > clip_val] = mean
else:
raise KeyError('"clip_to" option not valid: ' +str(clip_to) +\
'Valid options: clip_val, mean')
return x_clipped
def autoflip_skew(data):
'''
Autoflip a numpy ndarray based on the skew of the values
(effective for gradient data).
'''
skew_sign = np.sign(scipy_skew(np.ravel(data)))
return data*skew_sign
def zscore_norm(x):
'''
Takes a numpy ndarray and returns a z-score normalized version
'''
return (x-x.mean())/x.std()
class Preprocessor():
def __init__(self, autoflip=False, denoising=[],
clip_val=None, thresh_val=None):
'''
Class for doing preprocessing of data.
inputs:
autoflip: bool specifying whether to autoflip data.
denoising: list of str specifying denoising to apply to data.
clip_val: value for clipping denoising. Unused if 'clip' not in
denoising.
thresh_val
'''
self.autoflip = autoflip
valid_denoising = ['threshold', 'clip']
if not set(denoising).issubset(valid_denoising):
raise ValueError(
'invalid denoising ', denoising,
' Valid values:', valid_denoising)
self.denoising = denoising
self.clip_val = clip_val
self.thresh_val = thresh_val
def proc_subimage(self, x):
'''
Takes the gradient of the measured data, applies denoising if specified,
normalizes, autoflips if specified,
and then adjusts the size (if necessary)
Args:
x = an array with data
'''
# take gradient
x = gradient(x)
# apply thresholding
if 'threshold' in self.denoising:
if self.threshold_val is not None:
grad_x = apply_threshold(x, self.threshold_val)
else:
grad_x = apply_threshold(x)
# apply clipping
if 'clip' in self.denoising:
if self.clip_val is not None:
grad_x = apply_clipping(grad_x, self.clip_val)
else:
grad_x = apply_clipping(grad_x)
# normalize with zscore normalization
x = zscore_norm(x)
# autoflip by skew of image gradient
if self.autoflip:
x = autoflip_skew(x)
target_shape = (config.SUB_SIZE, config.SUB_SIZE, 1)
if x.shape != target_shape:
x = skimage_resize(x, target_shape)
return x
def proc_subimage_set(self, x_arr):
'''
Loop through subimages and apply preprocessing to each one.
inputs:
x: full dataset of images. First axis assumed to be example index.
returns:
Full dataset of images with same shape, processed.
'''
return np.array([self.proc_subimage(x) for x in x_arr])
|
[
"numpy.copy",
"numpy.mean",
"numpy.random.default_rng",
"random.Random",
"numpy.argmax",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.einsum",
"numpy.concatenate",
"numpy.std",
"numpy.expand_dims",
"numpy.gradient",
"numpy.ravel",
"skimage.transform.resize",
"numpy.load"
] |
[((558, 584), 'numpy.argmax', 'np.argmax', (['labels'], {'axis': '(-1)'}), '(labels, axis=-1)\n', (567, 584), True, 'import numpy as np\n'), ((1729, 1756), 'numpy.random.default_rng', 'np.random.default_rng', (['seed'], {}), '(seed)\n', (1750, 1756), True, 'import numpy as np\n'), ((2684, 2723), 'numpy.concatenate', 'np.concatenate', (['features_resamp'], {'axis': '(0)'}), '(features_resamp, axis=0)\n', (2698, 2723), True, 'import numpy as np\n'), ((2754, 2797), 'numpy.concatenate', 'np.concatenate', (['state_labels_resamp'], {'axis': '(0)'}), '(state_labels_resamp, axis=0)\n', (2768, 2797), True, 'import numpy as np\n'), ((4690, 4714), 'numpy.array', 'np.array', (['low_thresholds'], {}), '(low_thresholds)\n', (4698, 4714), True, 'import numpy as np\n'), ((4737, 4762), 'numpy.array', 'np.array', (['high_thresholds'], {}), '(high_thresholds)\n', (4745, 4762), True, 'import numpy as np\n'), ((4786, 4837), 'numpy.zeros', 'np.zeros', (['(noise_mags.shape + (num_quality_classes,))'], {}), '(noise_mags.shape + (num_quality_classes,))\n', (4794, 4837), True, 'import numpy as np\n'), ((5049, 5116), 'numpy.zeros', 'np.zeros', (['(noise_mags.shape + (num_quality_classes,) + (num_states,))'], {}), '(noise_mags.shape + (num_quality_classes,) + (num_states,))\n', (5057, 5116), True, 'import numpy as np\n'), ((5598, 5654), 'numpy.einsum', 'np.einsum', (['"""ijk,ik->ij"""', 'per_state_classes', 'state_labels'], {}), "('ijk,ik->ij', per_state_classes, state_labels)\n", (5607, 5654), True, 'import numpy as np\n'), ((8279, 8292), 'numpy.array', 'np.array', (['inp'], {}), '(inp)\n', (8287, 8292), True, 'import numpy as np\n'), ((8341, 8360), 'numpy.array', 'np.array', (['oup_state'], {}), '(oup_state)\n', (8349, 8360), True, 'import numpy as np\n'), ((10654, 10676), 'numpy.gradient', 'np.gradient', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (10665, 10676), True, 'import numpy as np\n'), ((11221, 11231), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (11228, 11231), True, 'import numpy as np\n'), ((11243, 11253), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (11250, 11253), True, 'import numpy as np\n'), ((11264, 11273), 'numpy.std', 'np.std', (['x'], {}), '(x)\n', (11270, 11273), True, 'import numpy as np\n'), ((720, 746), 'numpy.sum', 'np.sum', (['(argmax_labels == i)'], {}), '(argmax_labels == i)\n', (726, 746), True, 'import numpy as np\n'), ((2853, 2890), 'numpy.concatenate', 'np.concatenate', (['labels_resamp'], {'axis': '(0)'}), '(labels_resamp, axis=0)\n', (2867, 2890), True, 'import numpy as np\n'), ((7025, 7054), 'numpy.load', 'np.load', (['f'], {'allow_pickle': '(True)'}), '(f, allow_pickle=True)\n', (7032, 7054), True, 'import numpy as np\n'), ((8445, 8465), 'numpy.array', 'np.array', (['oup_labels'], {}), '(oup_labels)\n', (8453, 8465), True, 'import numpy as np\n'), ((10076, 10107), 'numpy.expand_dims', 'np.expand_dims', (['train_labels', '(1)'], {}), '(train_labels, 1)\n', (10090, 10107), True, 'import numpy as np\n'), ((10177, 10207), 'numpy.expand_dims', 'np.expand_dims', (['eval_labels', '(1)'], {}), '(eval_labels, 1)\n', (10191, 10207), True, 'import numpy as np\n'), ((7206, 7225), 'random.Random', 'random.Random', (['seed'], {}), '(seed)\n', (7219, 7225), False, 'import random\n'), ((11976, 11990), 'numpy.ravel', 'np.ravel', (['data'], {}), '(data)\n', (11984, 11990), True, 'import numpy as np\n'), ((14116, 14147), 'skimage.transform.resize', 'skimage_resize', (['x', 'target_shape'], {}), '(x, target_shape)\n', (14130, 14147), True, 'from skimage.transform import resize as skimage_resize\n')]
|
'''
This file implements various optimization methods, including
-- SGD with gradient norm clipping
-- AdaGrad
-- AdaDelta
-- Adam
Transparent to switch between CPU / GPU.
@author: <NAME> (<EMAIL>)
'''
import random
from collections import OrderedDict
import numpy as np
import theano
import theano.tensor as T
from theano.sandbox.cuda.basic_ops import HostFromGpu
from theano.sandbox.cuda.var import CudaNdarraySharedVariable
from theano.printing import debugprint
from .initialization import default_mrng
def create_optimization_updates(
cost, params, method="sgd",
max_norm=5, updates=None, gradients=None,
lr=0.01, eps=None, rho=0.99, gamma=0.999,
beta1=0.9, beta2=0.999, momentum=0.0):
_momentum = momentum
lr = theano.shared(np.float64(lr).astype(theano.config.floatX))
rho = theano.shared(np.float64(rho).astype(theano.config.floatX))
beta1 = theano.shared(np.float64(beta1).astype(theano.config.floatX))
beta2 = theano.shared(np.float64(beta2).astype(theano.config.floatX))
momentum = theano.shared(np.float64(momentum).astype(theano.config.floatX))
gamma = theano.shared(np.float64(gamma).astype(theano.config.floatX))
if eps is None:
eps = 1e-8 if method.lower() != "esgd" else 1e-4
eps = np.float64(eps).astype(theano.config.floatX)
gparams = T.grad(cost, params) if gradients is None else gradients
g_norm = 0
for g in gparams:
g_norm = g_norm + g.norm(2)**2
g_norm = T.sqrt(g_norm)
# max_norm is useful for sgd
if method != "sgd": max_norm = None
if max_norm is not None and max_norm is not False:
max_norm = theano.shared(np.float64(max_norm).astype(theano.config.floatX))
shrink_factor = T.minimum(max_norm, g_norm + eps) / (g_norm + eps)
gparams_clipped = [ ]
for g in gparams:
g = shrink_factor * g
gparams_clipped.append(g)
gparams = gparams_clipped
if updates is None:
updates = OrderedDict()
gsums = create_accumulators(params) if method != "sgd" or _momentum > 0.0 else \
[ None for p in params ]
xsums = create_accumulators(params) if method != "sgd" and method != "adagrad" else None
if method == "sgd":
create_sgd_updates(updates, params, gparams, gsums, lr, momentum)
elif method == "adagrad":
create_adagrad_updates(updates, params, gparams, gsums, lr, eps)
elif method == "adadelta":
create_adadelta_updates(updates, params, gparams, gsums, xsums, lr, eps, rho)
elif method == "adam":
create_adam_updates(updates, params, gparams, gsums, xsums, lr, eps, beta1, beta2)
elif method == "esgd":
create_esgd_updates(updates, params, gparams, gsums, xsums, lr, eps, gamma, momentum)
else:
raise Exception("Unknown optim method: {}\n".format(method))
if method == "adadelta":
lr = rho
return updates, lr, g_norm, gsums, xsums, max_norm
def is_subtensor_op(p):
if hasattr(p, 'owner') and hasattr(p.owner, 'op'):
return isinstance(p.owner.op, T.AdvancedSubtensor1) or \
isinstance(p.owner.op, T.Subtensor)
return False
def get_subtensor_op_inputs(p):
origin, indexes = p.owner.inputs
if hasattr(origin, 'owner') and hasattr(origin.owner, 'op') and \
isinstance(origin.owner.op, HostFromGpu):
origin = origin.owner.inputs[0]
assert isinstance(origin, CudaNdarraySharedVariable)
return origin, indexes
def get_similar_subtensor(matrix, indexes, param_op):
'''
So far there is only two possible subtensor operation used.
'''
if isinstance(param_op.owner.op, T.AdvancedSubtensor1):
return matrix[indexes]
else:
# indexes is start index in this case
return matrix[indexes:]
def create_accumulators(params):
accums = [ ]
for p in params:
if is_subtensor_op(p):
origin, _ = get_subtensor_op_inputs(p)
acc = theano.shared(np.zeros_like(origin.get_value(borrow=True), \
dtype=theano.config.floatX))
else:
acc = theano.shared(np.zeros_like(p.get_value(borrow=True), \
dtype=theano.config.floatX))
accums.append(acc)
return accums
def create_sgd_updates(updates, params, gparams, gsums, lr, momentum):
has_momentum = momentum.get_value() > 0.0
for p, g, acc in zip(params, gparams, gsums):
if is_subtensor_op(p):
origin, indexes = get_subtensor_op_inputs(p)
if has_momentum:
acc_slices = get_similar_subtensor(acc, indexes, p)
new_acc = acc_slices*momentum + g
updates[acc] = T.set_subtensor(acc_slices, new_acc)
else:
new_acc = g
updates[origin] = T.inc_subtensor(p, - lr * new_acc)
else:
if has_momentum:
new_acc = acc*momentum + g
updates[acc] = new_acc
else:
new_acc = g
updates[p] = p - lr * new_acc
def create_adagrad_updates(updates, params, gparams, gsums, lr, eps):
for p, g, acc in zip(params, gparams, gsums):
if is_subtensor_op(p):
origin, indexes = get_subtensor_op_inputs(p)
#acc_slices = acc[indexes]
acc_slices = get_similar_subtensor(acc, indexes, p)
new_acc = acc_slices + g**2
updates[acc] = T.set_subtensor(acc_slices, new_acc)
updates[origin] = T.inc_subtensor(p, \
- lr * (g / T.sqrt(new_acc + eps)))
else:
new_acc = acc + g**2
updates[acc] = new_acc
updates[p] = p - lr * (g / T.sqrt(new_acc + eps))
#updates[p] = p - lr * (g / (T.sqrt(new_acc) + eps))
# which one to use?
def create_adadelta_updates(updates, params, gparams, gsums, xsums,\
lr, eps, rho):
for p, g, gacc, xacc in zip(params, gparams, gsums, xsums):
if is_subtensor_op(p):
origin, indexes = get_subtensor_op_inputs(p)
gacc_slices = gacc[indexes]
xacc_slices = xacc[indexes]
new_gacc = rho * gacc_slices + (1.0-rho) * g**2
d = -T.sqrt((xacc_slices + eps)/(new_gacc + eps)) * g
new_xacc = rho * xacc_slices + (1.0-rho) * d**2
updates[gacc] = T.set_subtensor(gacc_slices, new_gacc)
updates[xacc] = T.set_subtensor(xacc_slices, new_xacc)
updates[origin] = T.inc_subtensor(p, d)
else:
new_gacc = rho * gacc + (1.0-rho) * g**2
d = -T.sqrt((xacc + eps)/(new_gacc + eps)) * g
new_xacc = rho * xacc + (1.0-rho) * d**2
updates[gacc] = new_gacc
updates[xacc] = new_xacc
updates[p] = p + d
def create_adam_updates(updates, params, gparams, gsums, xsums, \
lr, eps, beta1, beta2):
i = theano.shared(np.float64(0.0).astype(theano.config.floatX))
i_t = i + 1.0
omb1_t = 1.0 - beta1**i_t
omb2_t = 1.0 - beta2**i_t
lr_t = lr * (T.sqrt(omb2_t) / omb1_t)
for p, g, m, v in zip(params, gparams, gsums, xsums):
if is_subtensor_op(p):
origin, indexes = get_subtensor_op_inputs(p)
m_sub = m[indexes]
v_sub = v[indexes]
m_t = beta1*m_sub + (1.0-beta1)*g
v_t = beta2*v_sub + (1.0-beta2)*T.sqr(g)
g_t = m_t / (T.sqrt(v_t) + eps)
updates[m] = T.set_subtensor(m_sub, m_t)
updates[v] = T.set_subtensor(v_sub, v_t)
updates[origin] = T.inc_subtensor(p, -lr_t*g_t)
else:
m_t = beta1*m + (1.0-beta1)*g
v_t = beta2*v + (1.0-beta2)*T.sqr(g)
g_t = m_t / (T.sqrt(v_t) + eps)
updates[m] = m_t
updates[v] = v_t
updates[p] = p - lr_t*g_t
updates[i] = i_t
def create_esgd_updates(updates, params, gparams, gsums, xsums, lr, eps, gamma, momentum):
has_momentum = momentum.get_value() > 0.0
samples = [ default_mrng.normal(size=p.shape, avg=0, std=1,
dtype=theano.config.floatX) for p in params ]
HVs = T.Lop(gparams, params, samples)
i = theano.shared(np.float64(0.0).astype(theano.config.floatX))
i_t = i + 1.0
omg_t = 1.0 - gamma**i_t
for p, g, m, D, Hv in zip(params, gparams, gsums, xsums, HVs):
if is_subtensor_op(p):
raise Exception("ESGD subtensor update not implemented!")
else:
D_t = D * gamma + T.sqr(Hv) * (1.0-gamma)
if has_momentum:
m_t = m*momentum + g
updates[m] = m_t
else:
m_t = g
g_t = m_t / ( T.sqrt(D_t/omg_t + eps) )
#g_t = m_t / ( T.sqrt(D_t + eps) )
updates[D] = D_t
updates[p] = p - lr*g_t
updates[i] = i_t
|
[
"theano.tensor.Lop",
"collections.OrderedDict",
"theano.tensor.sqrt",
"theano.tensor.minimum",
"numpy.float64",
"theano.tensor.sqr",
"theano.tensor.set_subtensor",
"theano.tensor.inc_subtensor",
"theano.tensor.grad"
] |
[((1566, 1580), 'theano.tensor.sqrt', 'T.sqrt', (['g_norm'], {}), '(g_norm)\n', (1572, 1580), True, 'import theano.tensor as T\n'), ((8330, 8361), 'theano.tensor.Lop', 'T.Lop', (['gparams', 'params', 'samples'], {}), '(gparams, params, samples)\n', (8335, 8361), True, 'import theano.tensor as T\n'), ((1419, 1439), 'theano.tensor.grad', 'T.grad', (['cost', 'params'], {}), '(cost, params)\n', (1425, 1439), True, 'import theano.tensor as T\n'), ((2075, 2088), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2086, 2088), False, 'from collections import OrderedDict\n'), ((1359, 1374), 'numpy.float64', 'np.float64', (['eps'], {}), '(eps)\n', (1369, 1374), True, 'import numpy as np\n'), ((1819, 1852), 'theano.tensor.minimum', 'T.minimum', (['max_norm', '(g_norm + eps)'], {}), '(max_norm, g_norm + eps)\n', (1828, 1852), True, 'import theano.tensor as T\n'), ((4945, 4978), 'theano.tensor.inc_subtensor', 'T.inc_subtensor', (['p', '(-lr * new_acc)'], {}), '(p, -lr * new_acc)\n', (4960, 4978), True, 'import theano.tensor as T\n'), ((5572, 5608), 'theano.tensor.set_subtensor', 'T.set_subtensor', (['acc_slices', 'new_acc'], {}), '(acc_slices, new_acc)\n', (5587, 5608), True, 'import theano.tensor as T\n'), ((6520, 6558), 'theano.tensor.set_subtensor', 'T.set_subtensor', (['gacc_slices', 'new_gacc'], {}), '(gacc_slices, new_gacc)\n', (6535, 6558), True, 'import theano.tensor as T\n'), ((6587, 6625), 'theano.tensor.set_subtensor', 'T.set_subtensor', (['xacc_slices', 'new_xacc'], {}), '(xacc_slices, new_xacc)\n', (6602, 6625), True, 'import theano.tensor as T\n'), ((6656, 6677), 'theano.tensor.inc_subtensor', 'T.inc_subtensor', (['p', 'd'], {}), '(p, d)\n', (6671, 6677), True, 'import theano.tensor as T\n'), ((7244, 7258), 'theano.tensor.sqrt', 'T.sqrt', (['omb2_t'], {}), '(omb2_t)\n', (7250, 7258), True, 'import theano.tensor as T\n'), ((7645, 7672), 'theano.tensor.set_subtensor', 'T.set_subtensor', (['m_sub', 'm_t'], {}), '(m_sub, m_t)\n', (7660, 7672), True, 'import theano.tensor as T\n'), ((7698, 7725), 'theano.tensor.set_subtensor', 'T.set_subtensor', (['v_sub', 'v_t'], {}), '(v_sub, v_t)\n', (7713, 7725), True, 'import theano.tensor as T\n'), ((7756, 7787), 'theano.tensor.inc_subtensor', 'T.inc_subtensor', (['p', '(-lr_t * g_t)'], {}), '(p, -lr_t * g_t)\n', (7771, 7787), True, 'import theano.tensor as T\n'), ((854, 868), 'numpy.float64', 'np.float64', (['lr'], {}), '(lr)\n', (864, 868), True, 'import numpy as np\n'), ((923, 938), 'numpy.float64', 'np.float64', (['rho'], {}), '(rho)\n', (933, 938), True, 'import numpy as np\n'), ((995, 1012), 'numpy.float64', 'np.float64', (['beta1'], {}), '(beta1)\n', (1005, 1012), True, 'import numpy as np\n'), ((1069, 1086), 'numpy.float64', 'np.float64', (['beta2'], {}), '(beta2)\n', (1079, 1086), True, 'import numpy as np\n'), ((1146, 1166), 'numpy.float64', 'np.float64', (['momentum'], {}), '(momentum)\n', (1156, 1166), True, 'import numpy as np\n'), ((1223, 1240), 'numpy.float64', 'np.float64', (['gamma'], {}), '(gamma)\n', (1233, 1240), True, 'import numpy as np\n'), ((4832, 4868), 'theano.tensor.set_subtensor', 'T.set_subtensor', (['acc_slices', 'new_acc'], {}), '(acc_slices, new_acc)\n', (4847, 4868), True, 'import theano.tensor as T\n'), ((7103, 7118), 'numpy.float64', 'np.float64', (['(0.0)'], {}), '(0.0)\n', (7113, 7118), True, 'import numpy as np\n'), ((8385, 8400), 'numpy.float64', 'np.float64', (['(0.0)'], {}), '(0.0)\n', (8395, 8400), True, 'import numpy as np\n'), ((8881, 8906), 'theano.tensor.sqrt', 'T.sqrt', (['(D_t / omg_t + eps)'], {}), '(D_t / omg_t + eps)\n', (8887, 8906), True, 'import theano.tensor as T\n'), ((1744, 1764), 'numpy.float64', 'np.float64', (['max_norm'], {}), '(max_norm)\n', (1754, 1764), True, 'import numpy as np\n'), ((6383, 6429), 'theano.tensor.sqrt', 'T.sqrt', (['((xacc_slices + eps) / (new_gacc + eps))'], {}), '((xacc_slices + eps) / (new_gacc + eps))\n', (6389, 6429), True, 'import theano.tensor as T\n'), ((6762, 6801), 'theano.tensor.sqrt', 'T.sqrt', (['((xacc + eps) / (new_gacc + eps))'], {}), '((xacc + eps) / (new_gacc + eps))\n', (6768, 6801), True, 'import theano.tensor as T\n'), ((7567, 7575), 'theano.tensor.sqr', 'T.sqr', (['g'], {}), '(g)\n', (7572, 7575), True, 'import theano.tensor as T\n'), ((7601, 7612), 'theano.tensor.sqrt', 'T.sqrt', (['v_t'], {}), '(v_t)\n', (7607, 7612), True, 'import theano.tensor as T\n'), ((7882, 7890), 'theano.tensor.sqr', 'T.sqr', (['g'], {}), '(g)\n', (7887, 7890), True, 'import theano.tensor as T\n'), ((7916, 7927), 'theano.tensor.sqrt', 'T.sqrt', (['v_t'], {}), '(v_t)\n', (7922, 7927), True, 'import theano.tensor as T\n'), ((8690, 8699), 'theano.tensor.sqr', 'T.sqr', (['Hv'], {}), '(Hv)\n', (8695, 8699), True, 'import theano.tensor as T\n'), ((5692, 5713), 'theano.tensor.sqrt', 'T.sqrt', (['(new_acc + eps)'], {}), '(new_acc + eps)\n', (5698, 5713), True, 'import theano.tensor as T\n'), ((5837, 5858), 'theano.tensor.sqrt', 'T.sqrt', (['(new_acc + eps)'], {}), '(new_acc + eps)\n', (5843, 5858), True, 'import theano.tensor as T\n')]
|
import sys
from pathlib import Path
import numpy as np
import pandas as pd
from bokeh.models import ColumnDataSource
from bokeh.io import export_png
from bokeh.plotting import figure
def plot_lifetime(df, type, path):
df = df.copy()
palette = ["#c9d9d3", "#718dbf", "#e84d60", "#648450"]
ylist = []
list0 = []
list1 = []
list2 = []
list3 = []
interv = np.sort(df["age_real"].unique())
for a in interv:
df_rel = df[df["age_real"]==a]
n = len(df_rel)
status0 = sum(df_rel["employment_status_" + type] == 0)/n
status1 = sum(df_rel["employment_status_" + type] == 1)/n
status2 = sum(df_rel["employment_status_" + type] == 2)/n
status3 = sum(df_rel["employment_status_" + type] == 3)/n
ylist.append(str(a))
list0.append(status0)
list1.append(status1)
list2.append(status2)
list3.append(status3)
dici = {"age": ylist,
"0": list0,
"1": list1,
"2": list2,
"3": list3}
#alllist = ["0", "1", "2", "3"]
#labels = ["N.E.", "Rente", "Teilzeit", "Vollzeit"]
alllist = ["3", "2", "0", "1"]
labels = ["Vollzeit", "Teilzeit", "N.E.", "Rente"]
p = figure(x_range=ylist, plot_height=250, plot_width=1500, title="Employment Status by age: West Germany / type: " + type)
p.vbar_stack(alllist, x='age', width=0.9, color=palette, source=dici,
legend_label=labels)
p.y_range.start = 0
p.x_range.range_padding = 0.1
p.xgrid.grid_line_color = None
p.axis.minor_tick_line_color = None
p.outline_line_color = None
p.legend.location = "bottom_left"
p.legend.orientation = "horizontal"
str_path = "employment_" + type + ".png"
export_png(p, filename=str(path/ str_path))
def var_by_method(dataf, variable):
dataf_out = pd.DataFrame()
dataf_out["pid"] = dataf["pid"]
dataf_out["year"] = dataf["year"]
dataf_out["hid"] = dataf["hid_real"]
dataf_out["age"] = dataf["age_real"]
for m in ["real", "ext"]:
dataf_out[m] = dataf[variable + "_" + m]
return dataf_out
def plot_mean_by_age(dataf, m_list, variable, path):
m_list = ["real", "ext"]
dataf = dataf.copy()
df = var_by_method(dataf, variable)
df_plot = df.groupby("age")[m_list].mean()
fig_title = variable
file_title = variable + ".png"
# return df
plot_age(df_plot, fig_title, file_title, path)
def make_pretty(p):
p.xgrid.grid_line_color = None
p.yaxis.minor_tick_line_width=0
p.xaxis.minor_tick_line_width=0
# p.legend.location = "bottom_right"
return p
def plot_employment_status_by_age(dataf, employment_status, path, female=None, east=None):
dataf = dataf.copy()
dataf_rest = rest_dataf(dataf, female, east)
status_list = ["N_E", "Rente", "Teilzeit", "Vollzeit"]
status = status_list[employment_status]
df_tmp = var_by_method(dataf_rest, "employment_status")
tmp = df_tmp[["real", "ext"]] == employment_status
df_plot = pd.concat([df_tmp["age"], tmp], axis=1)
df_plot = df_plot.groupby("age").mean()
# Plotting
fig_title, file_title = get_titles(female, east, status)
plot_age(df_plot, fig_title, file_title, path, interv=1)
def plot_age(dataf, fig_title, file_title, path, interv=0):
source = ColumnDataSource(dataf)
if interv==1:
p = figure(title = fig_title, y_range=(0, 1))
else:
p = figure(title = fig_title)
p.line(x="age", y="real", source=source,
line_color="black", line_dash = "solid", line_width=2,
legend_label = "Real")
p.line(x="age", y="ext", source=source,
line_color="black", line_dash = "dotted", line_width=2,
legend_label = "Ext")
p.xaxis.axis_label = "Age"
p = make_pretty(p)
export_png(p, filename=str(path/ file_title))
def plot_year(dataf, fig_title, file_title, path, interv=0):
source = ColumnDataSource(dataf)
if interv==1:
p = figure(title = fig_title, y_range=(0, 1))
else:
p = figure(title = fig_title)
p.line(x="year", y="real", source=source,
line_color="black", line_dash = "solid", line_width=2,
legend_label = "Real")
p.line(x="year", y="ext", source=source,
line_color="black", line_dash = "dotted", line_width=2,
legend_label = "Ext")
p.xaxis.axis_label = "Year"
p = make_pretty(p)
export_png(p, filename=str(path/ file_title))
def plot_year_age(ploto, by="year"):
source = ColumnDataSource(ploto.df_plot)
if ploto.y_range is None:
p = figure(title = ploto.fig_title)
else:
p = figure(title = ploto.fig_title, y_range=ploto.y_range)
p.line(x=by, y="real", source=source,
line_color="black", line_dash = "solid", line_width=2,
legend_label = "Real")
p.line(x=by, y="ext", source=source,
line_color="black", line_dash = "dotted", line_width=2,
legend_label = "Ext")
if by == "year":
p.xaxis.axis_label = "Year"
elif by == "age":
p.xaxis.axis_label = "Age"
p = make_pretty(p)
export_png(p, filename=str(ploto.path/ ploto.file_title))
def rest_dataf(dataf, female, east):
dataf = dataf.copy()
method = "real" # Gender and East do not change during the simulation
# Including either all people, or only male and female
if female == 1:
condition_female = dataf["female_" + method] == 1
elif female == 0:
condition_female = dataf["female_" + method] == 0
else:
condition_female = np.ones(len(dataf))
# Including either all people, or only east or west germans
if east == 1:
condition_east = dataf["east_" + method] == 1
elif east == 0:
condition_east = dataf["east_" + method] == 0
else:
condition_east = np.ones(len(dataf))
# Output is then sum of both conditions
final_condition = (condition_female).astype(int) \
+ (condition_east).astype(int)
df_out = dataf[final_condition == 2]
return df_out
def get_titles(female, east, status):
title = ""
shorttitle = status
if (female==None) & (east==None):
title = "Employment status: " + status + "; all people"
shorttitle += "_mfew.png"
elif (female==None) & (east==0):
title = "Employment status: " + status + "; all genders, west Germany"
shorttitle += "_mfw.png"
elif (female==None) & (east==1):
title = "Employment status: " + status + "; all genders, east Germany"
shorttitle += "_mfe.png"
elif (female==0) & (east==None):
title = "Employment status: " + status + "; male, whole Germany"
shorttitle += "_mew.png"
elif (female==1) & (east==None):
title = "Employment status: " + status + "; female, whole Germany"
shorttitle += "_few.png"
elif (female==0) & (east==0):
title = "Employment status: " + status + "; male, west Germany"
shorttitle += "_mw.png"
elif (female==0) & (east==1):
title = "Employment status: " + status + "; male, east Germany"
shorttitle += "_me.png"
elif (female==1) & (east==0):
title = "Employment status: " + status + "; female, west Germany"
shorttitle += "_fw.png"
elif (female==1) & (east==1):
title = "Employment status: " + status + "; female, east Germany"
shorttitle += "_fe.png"
return title, shorttitle
def get_titles_incomes(suffix, variable, working, female, fulltime, measure):
w_string = ""
f_string = ""
t_string = ""
if working==1:
w_string = "_working"
else:
pass
if female==1:
f_string = "_female"
elif female==0:
f_string = "_male"
else:
pass
if fulltime==1:
t_string = "_fulltime"
elif fulltime==0:
t_string = "_parttime"
else:
pass
fig_title = suffix + measure + "_" + variable + w_string + f_string + t_string
file_title = fig_title + ".png"
return fig_title, file_title
def wrap_employment_plots(dataf, path):
dataf = dataf.copy()
for emp in np.arange(4):
# All people, all employment status
plot_employment_status_by_age(dataf, emp, path)
# Males, all employment status
plot_employment_status_by_age(dataf, emp, path, female=0)
# Females, all employment status
plot_employment_status_by_age(dataf, emp, path, female=1)
# All_people, east Germany, all employment status
plot_employment_status_by_age(dataf, emp, path, east=1)
# All_people, west Germany, all employment status
plot_employment_status_by_age(dataf, emp, path, east=0)
# Males, east Germany, all employment status
plot_employment_status_by_age(dataf, emp, path, female=0, east=1)
# Males, west Germany, all employment status
plot_employment_status_by_age(dataf, emp, path, female=0, east=0)
# Females, east Germany, all employment status
plot_employment_status_by_age(dataf, emp, path, female=1, east=1)
# Females, west Germany, all employment status
plot_employment_status_by_age(dataf, emp, path, female=1, east=0)
def condition_by_type(dataf, method, working=False, female=None, fulltime=None):
dataf = dataf.copy()
# Condition to include all or only working people
if working:
condition_work = dataf["working_" + method] == 1
else:
condition_work = np.ones(len(dataf))
# Including either all people, or only male and female
if female == 1:
condition_female = dataf["female_" + method] == 1
elif female == 0:
condition_female = dataf["female_" + method] == 0
else:
condition_female = np.ones(len(dataf))
# Including either all people, or only male and female
if fulltime == 1:
condition_fulltime = dataf["fulltime_" + method] == 1
elif fulltime == 0:
condition_fulltime = dataf["parttimetime_" + method] == 1
else:
condition_fulltime = np.ones(len(dataf))
# Output is then sum of both conditions
final_condition = (condition_female).astype(int) \
+ (condition_work).astype(int) \
+ (condition_fulltime).astype(int)
df_out = dataf[final_condition == 3]
return df_out
def restrict(dataf, working=False, female=None, fulltime=None):
dataf = dataf.copy()
out_dici = {"real": condition_by_type(dataf, "real", working, female, fulltime),
"ext": condition_by_type(dataf, "ext", working, female, fulltime)}
return out_dici
def var_by_method_dici(dici, variable, group, measure):
tmp = {}
m_list = ["real", "ext"]
for m in m_list:
if measure == "mean":
tmp[m] = dici[m].groupby(group)[variable + "_" + m].mean()
elif measure == "median":
tmp[m] = dici[m].groupby(group)[variable + "_" + m].median()
elif measure == "p90p50":
p90 = dici[m].groupby(group)[variable + "_" + m].quantile(0.9)
p50 = dici[m].groupby(group)[variable + "_" + m].quantile(0.5)
tmp[m] = p90/p50
elif measure == "p90p10":
p90 = dici[m].groupby(group)[variable + "_" + m].quantile(0.9)
p10 = dici[m].groupby(group)[variable + "_" + m].quantile(0.1)
tmp[m] = p90/p10
elif measure == "p50p10":
p50 = dici[m].groupby(group)[variable + "_" + m].quantile(0.5)
p10 = dici[m].groupby(group)[variable + "_" + m].quantile(0.1)
tmp[m] = p50/p10
elif measure == "gini":
tmp[m] = dici[m].groupby(group)[variable + "_" + m].agg(gini_coefficient)
df_out = pd.DataFrame(tmp)
return df_out
def plot_income_age(dataf, variable, path, working=None, female=None, fulltime=None, measure="mean"):
dataf = dataf.copy()
dici = restrict(dataf, working, female, fulltime)
df_plot = var_by_method_dici(dici, variable, group="age_real", measure=measure)
df_plot = df_plot.fillna(0)
df_plot.reset_index(inplace=True)
df_plot.rename(columns={"age_real": "age"}, inplace=True)
fig_title, file_title = get_titles_incomes("age_", variable, working, female, fulltime, measure)
plot_age(df_plot, fig_title, file_title, path)
def wrap_income_age_plots(dataf, path):
dataf = dataf.copy()
variables = ["gross_earnings", "hours"]
for var in variables:
for m in ["mean", "median"]:
# All people
plot_income_age(dataf, var, path=path, measure=m)
plot_income_age(dataf, var, path=path, female=0, measure=m)
plot_income_age(dataf, var, path=path, female=1, measure=m)
# Conditional on working
plot_income_age(dataf, var, path=path, working=1, measure=m)
plot_income_age(dataf, var, path=path, working=1, female=0, measure=m)
plot_income_age(dataf, var, path=path, working=1, female=1, measure=m)
# Conditional on fulltime
plot_income_age(dataf, var, path=path, fulltime=1, measure=m)
plot_income_age(dataf, var, path=path, fulltime=1, female=0, measure=m)
plot_income_age(dataf, var, path=path, fulltime=1, female=1, measure=m)
def plot_income_year(dataf, variable, path, working=None, female=None, fulltime=None, measure="mean"):
dataf = dataf.copy()
dici = restrict(dataf, working, female, fulltime)
df_plot = var_by_method_dici(dici, variable, group="year", measure=measure)
df_plot = df_plot.fillna(0)
df_plot.reset_index(inplace=True)
fig_title, file_title = get_titles_incomes("year_", variable, working, female, fulltime, measure)
plot_year(df_plot, fig_title, file_title, path)
def plot_income_year2(ploto, measure="mean"):
dici = restrict(ploto.data, ploto.working, ploto.female, ploto.fulltime)
df_plot = var_by_method_dici(dici, ploto.var, group="year", measure=measure)
df_plot = df_plot.fillna(0)
df_plot.reset_index(inplace=True)
fig_title, file_title = get_titles_incomes("year_", ploto.var, ploto.working, ploto.female, ploto.fulltime, measure)
plot_year2(df_plot, fig_title, file_title, ploto)
def wrap_income_year_plots(dataf, path):
dataf = dataf.copy()
variables = ["gross_earnings", "hours"]
for var in variables:
for m in ["mean", "median"]:
# All people
plot_income_year(dataf, var, path=path, measure=m)
plot_income_year(dataf, var, path=path, female=0, measure=m)
plot_income_year(dataf, var, path=path, female=1, measure=m)
# Conditional on working
plot_income_year(dataf, var, path=path, working=1, measure=m)
plot_income_year(dataf, var, path=path, working=1, female=0, measure=m)
plot_income_year(dataf, var, path=path, working=1, female=1, measure=m)
# Conditional on fulltime
plot_income_year(dataf, var, path=path, fulltime=1, measure=m)
plot_income_year(dataf, var, path=path, fulltime=1, female=0, measure=m)
plot_income_year(dataf, var, path=path, fulltime=1, female=1, measure=m)
def plot_inequality_year(dataf, variable, path, working=None, female=None, fulltime=None, measure="mean"):
dataf = dataf.copy()
dici = restrict(dataf, working, female, fulltime)
df_plot = var_by_method_dici(dici, variable, group="year", measure=measure)
df_plot = df_plot.fillna(0)
df_plot.reset_index(inplace=True)
fig_title, file_title = get_titles_incomes("ineq_", variable, working, female, fulltime, measure)
plot_year(df_plot, fig_title, file_title, path)
def wrap_inequality_year_plots(dataf, path):
dataf = dataf.copy()
var = ["gross_earnings", "hours"]
for v in var:
for m in ["p90p50", "p90p10", "p50p10", "gini"]:
plot_inequality_year(dataf, v, path, working=1, measure=m)
plot_inequality_year(dataf, v, path, working=1, female=0, measure=m)
plot_inequality_year(dataf, v, path, working=1, female=1, measure=m)
def gini_coefficient(x):
"""Compute Gini coefficient of array of values"""
diffsum = 0
for i, xi in enumerate(x[:-1], 1):
diffsum += np.sum(np.abs(xi - x[i:]))
return diffsum / (len(x)**2 * np.mean(x))
def make_quantile(dataf, var, m_list, q):
dataf = dataf.copy()
for m in m_list:
variable = var + "_" + m
real_q = dataf.groupby(["year"])[variable].quantile(q).to_frame()
real_q.rename(columns={variable: "var"}, inplace=True)
dataf = pd.merge(dataf, real_q, how="left", on="year")
dataf.loc[dataf[variable]>dataf["var"], variable] = dataf["var"]
dataf.drop("var", axis=1, inplace=True)
return dataf
def cap_outliers(dataf, m_list):
dataf = dataf.copy()
# # Hours
# dataf = make_quantile(dataf, "hours", m_list, 0.99)
# dataf = make_quantile(dataf, "hours_t1", m_list, 0.99)
# dataf = make_quantile(dataf, "hours_t2", m_list, 0.99)
# Gross earnings
dataf = make_quantile(dataf, "gross_earnings", m_list, 0.95)
dataf = make_quantile(dataf, "gross_earnings_t1", m_list, 0.95)
dataf = make_quantile(dataf, "gross_earnings_t2", m_list, 0.95)
return dataf
|
[
"numpy.abs",
"numpy.mean",
"bokeh.plotting.figure",
"pandas.merge",
"bokeh.models.ColumnDataSource",
"pandas.DataFrame",
"pandas.concat",
"numpy.arange"
] |
[((1241, 1365), 'bokeh.plotting.figure', 'figure', ([], {'x_range': 'ylist', 'plot_height': '(250)', 'plot_width': '(1500)', 'title': "('Employment Status by age: West Germany / type: ' + type)"}), "(x_range=ylist, plot_height=250, plot_width=1500, title=\n 'Employment Status by age: West Germany / type: ' + type)\n", (1247, 1365), False, 'from bokeh.plotting import figure\n'), ((1875, 1889), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1887, 1889), True, 'import pandas as pd\n'), ((3093, 3132), 'pandas.concat', 'pd.concat', (["[df_tmp['age'], tmp]"], {'axis': '(1)'}), "([df_tmp['age'], tmp], axis=1)\n", (3102, 3132), True, 'import pandas as pd\n'), ((3402, 3425), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (['dataf'], {}), '(dataf)\n', (3418, 3425), False, 'from bokeh.models import ColumnDataSource\n'), ((4037, 4060), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (['dataf'], {}), '(dataf)\n', (4053, 4060), False, 'from bokeh.models import ColumnDataSource\n'), ((4651, 4682), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (['ploto.df_plot'], {}), '(ploto.df_plot)\n', (4667, 4682), False, 'from bokeh.models import ColumnDataSource\n'), ((8418, 8430), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (8427, 8430), True, 'import numpy as np\n'), ((12147, 12164), 'pandas.DataFrame', 'pd.DataFrame', (['tmp'], {}), '(tmp)\n', (12159, 12164), True, 'import pandas as pd\n'), ((3461, 3500), 'bokeh.plotting.figure', 'figure', ([], {'title': 'fig_title', 'y_range': '(0, 1)'}), '(title=fig_title, y_range=(0, 1))\n', (3467, 3500), False, 'from bokeh.plotting import figure\n'), ((3525, 3548), 'bokeh.plotting.figure', 'figure', ([], {'title': 'fig_title'}), '(title=fig_title)\n', (3531, 3548), False, 'from bokeh.plotting import figure\n'), ((4096, 4135), 'bokeh.plotting.figure', 'figure', ([], {'title': 'fig_title', 'y_range': '(0, 1)'}), '(title=fig_title, y_range=(0, 1))\n', (4102, 4135), False, 'from bokeh.plotting import figure\n'), ((4160, 4183), 'bokeh.plotting.figure', 'figure', ([], {'title': 'fig_title'}), '(title=fig_title)\n', (4166, 4183), False, 'from bokeh.plotting import figure\n'), ((4730, 4759), 'bokeh.plotting.figure', 'figure', ([], {'title': 'ploto.fig_title'}), '(title=ploto.fig_title)\n', (4736, 4759), False, 'from bokeh.plotting import figure\n'), ((4784, 4836), 'bokeh.plotting.figure', 'figure', ([], {'title': 'ploto.fig_title', 'y_range': 'ploto.y_range'}), '(title=ploto.fig_title, y_range=ploto.y_range)\n', (4790, 4836), False, 'from bokeh.plotting import figure\n'), ((17226, 17272), 'pandas.merge', 'pd.merge', (['dataf', 'real_q'], {'how': '"""left"""', 'on': '"""year"""'}), "(dataf, real_q, how='left', on='year')\n", (17234, 17272), True, 'import pandas as pd\n'), ((16871, 16889), 'numpy.abs', 'np.abs', (['(xi - x[i:])'], {}), '(xi - x[i:])\n', (16877, 16889), True, 'import numpy as np\n'), ((16925, 16935), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (16932, 16935), True, 'import numpy as np\n')]
|
import unittest
import numpy as np
import openjij as oj
import cxxjij as cj
def calculate_ising_energy(h, J, spins):
energy = 0.0
for (i, j), Jij in J.items():
energy += Jij*spins[i]*spins[j]
for i, hi in h.items():
energy += hi * spins[i]
return energy
def calculate_qubo_energy(Q, binary):
energy = 0.0
for (i, j), Qij in Q.items():
energy += Qij*binary[i]*binary[j]
return energy
class VariableTypeTest(unittest.TestCase):
def test_variable_type(self):
spin = oj.cast_var_type('SPIN')
self.assertEqual(spin, oj.SPIN)
binary = oj.cast_var_type('BINARY')
self.assertEqual(binary, oj.BINARY)
class ModelTest(unittest.TestCase):
def setUp(self):
self.h = {0: 1, 1: -2}
self.J = {(0, 1): -1, (1, 2): -3, (2, 3): 0.5}
self.spins = {0: 1, 1: -1, 2: 1, 3: 1}
self.Q = {(0, 0): 1, (1, 2): -1, (2, 0): -0.2, (1, 3): 3}
self.binaries = {0: 0, 1: 1, 2: 1, 3: 0}
def test_bqm_constructor(self):
# Test BinaryQuadraticModel constructor
bqm = oj.BinaryQuadraticModel(self.h, self.J)
self.assertEqual(type(bqm.interaction_matrix()), np.ndarray)
self.assertEqual(bqm.vartype, oj.SPIN)
dense_graph = bqm.get_cxxjij_ising_graph(sparse=False)
self.assertTrue(isinstance(dense_graph, cj.graph.Dense))
bqm_qubo = oj.BinaryQuadraticModel.from_qubo(Q=self.Q)
self.assertEqual(bqm_qubo.vartype, oj.BINARY)
def test_interaction_matrix(self):
bqm = oj.BinaryQuadraticModel(self.h, self.J)
ising_matrix = np.array([
[1, -1, 0, 0],
[-1, -2, -3, 0],
[0, -3, 0, 0.5],
[0, 0, 0.5, 0]
])
np.testing.assert_array_equal(
bqm.interaction_matrix(), ising_matrix
)
# check Hij = Jij + Jji
J = self.J.copy()
J[0, 1] /= 3
J[1, 0] = J[0, 1] * 2
bqm = oj.BinaryQuadraticModel(self.h, J)
np.testing.assert_array_equal(bqm.interaction_matrix(), ising_matrix)
def test_transfer_to_cxxjij(self):
bqm = oj.BinaryQuadraticModel(self.h, self.J)
# to Dense
ising_graph = bqm.get_cxxjij_ising_graph(sparse=False)
self.assertEqual(ising_graph.size(), len(bqm.indices))
for i in range(len(bqm.indices)):
for j in range(len(bqm.indices)):
if i != j:
self.assertAlmostEqual(bqm.interaction_matrix()[i,j], ising_graph.get_interactions()[i, j])
else:
# i == j
self.assertAlmostEqual(bqm.interaction_matrix()[i,j], ising_graph.get_interactions()[i, len(bqm.indices)])
self.assertAlmostEqual(bqm.interaction_matrix()[i,j], ising_graph.get_interactions()[len(bqm.indices), i])
self.assertEqual(ising_graph.get_interactions()[i,i], 0)
self.assertEqual(ising_graph.get_interactions()[len(bqm.indices),len(bqm.indices)], 1)
# to Sparse
ising_graph = bqm.get_cxxjij_ising_graph(sparse=True)
self.assertEqual(ising_graph.size(), len(bqm.indices))
for i in range(ising_graph.size()):
for j in ising_graph.adj_nodes(i):
self.assertEqual(bqm.interaction_matrix()[i,j], ising_graph[i,j])
def test_bqm_calc_energy(self):
# Test to calculate energy
# Test Ising energy
bqm = oj.BinaryQuadraticModel(self.h, self.J)
ising_energy_bqm = bqm.energy(self.spins)
true_ising_e = calculate_ising_energy(self.h, self.J, self.spins)
self.assertEqual(ising_energy_bqm, true_ising_e)
# Test QUBO energy
bqm = oj.BinaryQuadraticModel.from_qubo(Q=self.Q)
qubo_energy_bqm = bqm.energy(self.binaries)
true_qubo_e = calculate_qubo_energy(self.Q, self.binaries)
self.assertEqual(qubo_energy_bqm, true_qubo_e)
# QUBO == Ising
spins = {0: 1, 1: 1, 2: -1, 3: 1}
binary = {0: 1, 1: 1, 2: 0, 3: 1}
qubo_bqm = oj.BinaryQuadraticModel.from_qubo(Q=self.Q)
# ising_mat = qubo_bqm.ising_interactions()
# h, J = {}, {}
# for i in range(len(ising_mat)-1):
# for j in range(i, len(ising_mat)):
# if i == j:
# h[i] = ising_mat[i][i]
# else:
# J[(i, j)] = ising_mat[i][j]
qubo_energy = qubo_bqm.energy(binary)
self.assertEqual(qubo_energy, qubo_bqm.energy(spins, convert_sample=True))
def test_energy_consistency(self):
bqm = oj.BinaryQuadraticModel(self.h, self.J, var_type='SPIN')
dense_ising_graph = bqm.get_cxxjij_ising_graph(sparse=False)
sparse_ising_graph = bqm.get_cxxjij_ising_graph(sparse=True)
spins = {0: -1, 1: -1, 2: -1, 3: -1}
self.assertAlmostEqual(dense_ising_graph.calc_energy([spins[i] for i in range(len(spins))]), bqm.energy(spins))
self.assertAlmostEqual(sparse_ising_graph.calc_energy([spins[i] for i in range(len(spins))]), bqm.energy(spins))
def test_bqm(self):
h = {}
J = {(0, 1): -1.0, (1, 2): -3.0}
bqm = oj.BinaryQuadraticModel(h, J)
self.assertEqual(J, bqm.get_quadratic())
self.assertEqual(type(bqm.interaction_matrix()), np.ndarray)
correct_mat = np.array([[0, -1, 0, ], [-1, 0, -3], [0, -3, 0]])
np.testing.assert_array_equal(
bqm.interaction_matrix(), correct_mat.astype(np.float))
def test_chimera_converter(self):
h = {}
J = {(0, 4): -1.0, (6, 2): -3.0, (16, 0): 4}
chimera = oj.ChimeraModel(h, J, offset=0, unit_num_L=2)
self.assertEqual(chimera.chimera_coordinate(
4, unit_num_L=2), (0, 0, 4))
self.assertEqual(chimera.chimera_coordinate(
12, unit_num_L=2), (0, 1, 4))
self.assertEqual(chimera.chimera_coordinate(
16, unit_num_L=2), (1, 0, 0))
def test_chimera(self):
h = {}
J = {(0, 4): -1.0, (6, 2): -3.0}
bqm = oj.ChimeraModel(h, J, offset=0, unit_num_L=3)
self.assertTrue(bqm.validate_chimera())
J = {(0, 1): -1}
bqm = oj.ChimeraModel(h, J, unit_num_L=3)
with self.assertRaises(ValueError):
bqm.validate_chimera()
J = {(4, 12): -1}
bqm = oj.ChimeraModel(h, J, unit_num_L=2)
self.assertTrue(bqm.validate_chimera())
J = {(0, 4): -1, (5, 13): 1, (24, 8): 2,
(18, 20): 1, (16, 0): 0.5, (19, 23): -2}
h = {13: 2}
chimera = oj.ChimeraModel(h, J, unit_num_L=2)
self.assertEqual(chimera.to_index(1, 1, 1, unit_num_L=2), 25)
self.assertTrue(chimera.validate_chimera())
def test_ising_dict(self):
Q = {(0, 4): -1.0, (6, 2): -3.0}
bqm = oj.ChimeraModel.from_qubo(Q=Q, unit_num_L=3)
def test_king_graph(self):
h = {}
J = {(0, 1): -1.0, (1, 2): -3.0}
king_interaction = [[0, 0, 1, 0, -1.0], [1, 0, 2, 0, -3.0]]
king_graph = oj.KingGraph(machine_type="ASIC", linear=h, quadratic=J)
correct_mat = np.array([[0, -1, 0, ], [-1, 0, -3], [0, -3, 0]])
np.testing.assert_array_equal(
king_graph.interaction_matrix(), correct_mat.astype(np.float))
self.assertCountEqual(king_interaction, king_graph._ising_king_graph)
king_graph = oj.KingGraph(
machine_type="ASIC", king_graph=king_interaction)
np.testing.assert_array_equal(
king_interaction, king_graph._ising_king_graph)
king_graph = oj.KingGraph.from_qubo(Q={(0, 1): -1}, machine_type='ASIC')
king_interaction = [[0, 0, 0, 0, -0.25],
[0, 0, 1, 0, -0.25], [1, 0, 1, 0, -0.25]]
self.assertCountEqual(king_interaction, king_graph._ising_king_graph)
def test_get_chimera_graph(self):
c_model = oj.ChimeraModel.from_qubo(Q={(0, 4): -1, (1, 1): -1, (1, 5): 1}, unit_num_L=2)
chimera = c_model.get_cxxjij_ising_graph()
self.assertIsInstance(chimera, cj.graph.Chimera)
c_model = oj.ChimeraModel.from_qubo(Q={((0, 0, 1), (0, 0, 4)): -1, ((0, 0, 4), (0, 0, 2)): -1},
unit_num_L=2)
chimera = c_model.get_cxxjij_ising_graph()
self.assertIsInstance(chimera, cj.graph.Chimera)
if __name__ == '__main__':
unittest.main()
|
[
"openjij.cast_var_type",
"openjij.ChimeraModel",
"openjij.BinaryQuadraticModel",
"openjij.BinaryQuadraticModel.from_qubo",
"openjij.KingGraph",
"numpy.array",
"openjij.KingGraph.from_qubo",
"openjij.ChimeraModel.from_qubo",
"unittest.main",
"numpy.testing.assert_array_equal"
] |
[((8441, 8456), 'unittest.main', 'unittest.main', ([], {}), '()\n', (8454, 8456), False, 'import unittest\n'), ((534, 558), 'openjij.cast_var_type', 'oj.cast_var_type', (['"""SPIN"""'], {}), "('SPIN')\n", (550, 558), True, 'import openjij as oj\n'), ((617, 643), 'openjij.cast_var_type', 'oj.cast_var_type', (['"""BINARY"""'], {}), "('BINARY')\n", (633, 643), True, 'import openjij as oj\n'), ((1096, 1135), 'openjij.BinaryQuadraticModel', 'oj.BinaryQuadraticModel', (['self.h', 'self.J'], {}), '(self.h, self.J)\n', (1119, 1135), True, 'import openjij as oj\n'), ((1402, 1445), 'openjij.BinaryQuadraticModel.from_qubo', 'oj.BinaryQuadraticModel.from_qubo', ([], {'Q': 'self.Q'}), '(Q=self.Q)\n', (1435, 1445), True, 'import openjij as oj\n'), ((1554, 1593), 'openjij.BinaryQuadraticModel', 'oj.BinaryQuadraticModel', (['self.h', 'self.J'], {}), '(self.h, self.J)\n', (1577, 1593), True, 'import openjij as oj\n'), ((1617, 1692), 'numpy.array', 'np.array', (['[[1, -1, 0, 0], [-1, -2, -3, 0], [0, -3, 0, 0.5], [0, 0, 0.5, 0]]'], {}), '([[1, -1, 0, 0], [-1, -2, -3, 0], [0, -3, 0, 0.5], [0, 0, 0.5, 0]])\n', (1625, 1692), True, 'import numpy as np\n'), ((1977, 2011), 'openjij.BinaryQuadraticModel', 'oj.BinaryQuadraticModel', (['self.h', 'J'], {}), '(self.h, J)\n', (2000, 2011), True, 'import openjij as oj\n'), ((2144, 2183), 'openjij.BinaryQuadraticModel', 'oj.BinaryQuadraticModel', (['self.h', 'self.J'], {}), '(self.h, self.J)\n', (2167, 2183), True, 'import openjij as oj\n'), ((3470, 3509), 'openjij.BinaryQuadraticModel', 'oj.BinaryQuadraticModel', (['self.h', 'self.J'], {}), '(self.h, self.J)\n', (3493, 3509), True, 'import openjij as oj\n'), ((3733, 3776), 'openjij.BinaryQuadraticModel.from_qubo', 'oj.BinaryQuadraticModel.from_qubo', ([], {'Q': 'self.Q'}), '(Q=self.Q)\n', (3766, 3776), True, 'import openjij as oj\n'), ((4079, 4122), 'openjij.BinaryQuadraticModel.from_qubo', 'oj.BinaryQuadraticModel.from_qubo', ([], {'Q': 'self.Q'}), '(Q=self.Q)\n', (4112, 4122), True, 'import openjij as oj\n'), ((4625, 4681), 'openjij.BinaryQuadraticModel', 'oj.BinaryQuadraticModel', (['self.h', 'self.J'], {'var_type': '"""SPIN"""'}), "(self.h, self.J, var_type='SPIN')\n", (4648, 4681), True, 'import openjij as oj\n'), ((5201, 5230), 'openjij.BinaryQuadraticModel', 'oj.BinaryQuadraticModel', (['h', 'J'], {}), '(h, J)\n', (5224, 5230), True, 'import openjij as oj\n'), ((5381, 5428), 'numpy.array', 'np.array', (['[[0, -1, 0], [-1, 0, -3], [0, -3, 0]]'], {}), '([[0, -1, 0], [-1, 0, -3], [0, -3, 0]])\n', (5389, 5428), True, 'import numpy as np\n'), ((5663, 5708), 'openjij.ChimeraModel', 'oj.ChimeraModel', (['h', 'J'], {'offset': '(0)', 'unit_num_L': '(2)'}), '(h, J, offset=0, unit_num_L=2)\n', (5678, 5708), True, 'import openjij as oj\n'), ((6092, 6137), 'openjij.ChimeraModel', 'oj.ChimeraModel', (['h', 'J'], {'offset': '(0)', 'unit_num_L': '(3)'}), '(h, J, offset=0, unit_num_L=3)\n', (6107, 6137), True, 'import openjij as oj\n'), ((6226, 6261), 'openjij.ChimeraModel', 'oj.ChimeraModel', (['h', 'J'], {'unit_num_L': '(3)'}), '(h, J, unit_num_L=3)\n', (6241, 6261), True, 'import openjij as oj\n'), ((6382, 6417), 'openjij.ChimeraModel', 'oj.ChimeraModel', (['h', 'J'], {'unit_num_L': '(2)'}), '(h, J, unit_num_L=2)\n', (6397, 6417), True, 'import openjij as oj\n'), ((6608, 6643), 'openjij.ChimeraModel', 'oj.ChimeraModel', (['h', 'J'], {'unit_num_L': '(2)'}), '(h, J, unit_num_L=2)\n', (6623, 6643), True, 'import openjij as oj\n'), ((6854, 6898), 'openjij.ChimeraModel.from_qubo', 'oj.ChimeraModel.from_qubo', ([], {'Q': 'Q', 'unit_num_L': '(3)'}), '(Q=Q, unit_num_L=3)\n', (6879, 6898), True, 'import openjij as oj\n'), ((7077, 7133), 'openjij.KingGraph', 'oj.KingGraph', ([], {'machine_type': '"""ASIC"""', 'linear': 'h', 'quadratic': 'J'}), "(machine_type='ASIC', linear=h, quadratic=J)\n", (7089, 7133), True, 'import openjij as oj\n'), ((7156, 7203), 'numpy.array', 'np.array', (['[[0, -1, 0], [-1, 0, -3], [0, -3, 0]]'], {}), '([[0, -1, 0], [-1, 0, -3], [0, -3, 0]])\n', (7164, 7203), True, 'import numpy as np\n'), ((7425, 7487), 'openjij.KingGraph', 'oj.KingGraph', ([], {'machine_type': '"""ASIC"""', 'king_graph': 'king_interaction'}), "(machine_type='ASIC', king_graph=king_interaction)\n", (7437, 7487), True, 'import openjij as oj\n'), ((7518, 7595), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['king_interaction', 'king_graph._ising_king_graph'], {}), '(king_interaction, king_graph._ising_king_graph)\n', (7547, 7595), True, 'import numpy as np\n'), ((7631, 7690), 'openjij.KingGraph.from_qubo', 'oj.KingGraph.from_qubo', ([], {'Q': '{(0, 1): -1}', 'machine_type': '"""ASIC"""'}), "(Q={(0, 1): -1}, machine_type='ASIC')\n", (7653, 7690), True, 'import openjij as oj\n'), ((7949, 8027), 'openjij.ChimeraModel.from_qubo', 'oj.ChimeraModel.from_qubo', ([], {'Q': '{(0, 4): -1, (1, 1): -1, (1, 5): 1}', 'unit_num_L': '(2)'}), '(Q={(0, 4): -1, (1, 1): -1, (1, 5): 1}, unit_num_L=2)\n', (7974, 8027), True, 'import openjij as oj\n'), ((8155, 8258), 'openjij.ChimeraModel.from_qubo', 'oj.ChimeraModel.from_qubo', ([], {'Q': '{((0, 0, 1), (0, 0, 4)): -1, ((0, 0, 4), (0, 0, 2)): -1}', 'unit_num_L': '(2)'}), '(Q={((0, 0, 1), (0, 0, 4)): -1, ((0, 0, 4), (0, 0,\n 2)): -1}, unit_num_L=2)\n', (8180, 8258), True, 'import openjij as oj\n')]
|
from __future__ import print_function
import os, sys
import pickle
import time
import glob
import numpy as np
import torch
from model import PVSE
from loss import cosine_sim, order_sim
from vocab import Vocabulary
from data import get_test_loader
from logger import AverageMeter
from option import parser, verify_input_args
ORDER_BATCH_SIZE = 100
def encode_data(model, data_loader, use_gpu=False):
"""Encode all images and sentences loadable by data_loader"""
# switch to evaluate mode
model.eval()
use_mil = model.module.mil if hasattr(model, 'module') else model.mil
# numpy array to keep all the embeddings
img_embs, txt_embs = None, None
for i, data in enumerate(data_loader):
img, txt, txt_len, ids = data
if torch.cuda.is_available():
img, txt, txt_len = img.cuda(), txt.cuda(), txt_len.cuda()
# compute the embeddings
img_emb, txt_emb, _, _, _, _ = model.forward(img, txt, txt_len)
del img, txt, txt_len
# initialize the output embeddings
if img_embs is None:
if use_gpu:
emb_sz = [len(data_loader.dataset), img_emb.size(1), img_emb.size(2)] \
if use_mil else [len(data_loader.dataset), img_emb.size(1)]
img_embs = torch.zeros(emb_sz, dtype=img_emb.dtype, requires_grad=False).cuda()
txt_embs = torch.zeros(emb_sz, dtype=txt_emb.dtype, requires_grad=False).cuda()
else:
emb_sz = (len(data_loader.dataset), img_emb.size(1), img_emb.size(2)) \
if use_mil else (len(data_loader.dataset), img_emb.size(1))
img_embs = np.zeros(emb_sz)
txt_embs = np.zeros(emb_sz)
# preserve the embeddings by copying from gpu and converting to numpy
img_embs[ids] = img_emb if use_gpu else img_emb.data.cpu().numpy().copy()
txt_embs[ids] = txt_emb if use_gpu else txt_emb.data.cpu().numpy().copy()
return img_embs, txt_embs
def i2t(images, sentences, nreps=1, npts=None, return_ranks=False, order=False, use_gpu=False):
"""
Images->Text (Image Annotation)
Images: (nreps*N, K) matrix of images
Captions: (nreps*N, K) matrix of sentences
"""
if use_gpu:
assert not order, 'Order embedding not supported in GPU mode'
if npts is None:
npts = int(images.shape[0] / nreps)
index_list = []
ranks, top1 = np.zeros(npts), np.zeros(npts)
for index in range(npts):
# Get query image
im = images[nreps * index]
im = im.reshape((1,) + im.shape)
# Compute scores
if use_gpu:
if len(sentences.shape) == 2:
sim = im.mm(sentences.t()).view(-1)
else:
_, K, D = im.shape
sim_kk = im.view(-1, D).mm(sentences.view(-1, D).t())
sim_kk = sim_kk.view(im.size(0), K, sentences.size(0), K)
sim_kk = sim_kk.permute(0,1,3,2).contiguous()
sim_kk = sim_kk.view(im.size(0), -1, sentences.size(0))
sim, _ = sim_kk.max(dim=1)
sim = sim.flatten()
else:
if order:
if index % ORDER_BATCH_SIZE == 0:
mx = min(images.shape[0], nreps * (index + ORDER_BATCH_SIZE))
im2 = images[nreps * index:mx:nreps]
sim_batch = order_sim(torch.Tensor(im2).cuda(), torch.Tensor(sentences).cuda())
sim_batch = sim_batch.cpu().numpy()
sim = sim_batch[index % ORDER_BATCH_SIZE]
else:
sim = np.tensordot(im, sentences, axes=[2, 2]).max(axis=(0,1,3)).flatten() \
if len(sentences.shape) == 3 else np.dot(im, sentences.T).flatten()
if use_gpu:
_, inds_gpu = sim.sort()
inds = inds_gpu.cpu().numpy().copy()[::-1]
else:
inds = np.argsort(sim)[::-1]
index_list.append(inds[0])
# Score
rank = 1e20
for i in range(nreps * index, nreps * (index + 1), 1):
tmp = np.where(inds == i)[0][0]
if tmp < rank:
rank = tmp
ranks[index] = rank
top1[index] = inds[0]
# Compute metrics
r1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
r5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
r10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
medr = np.floor(np.median(ranks)) + 1
meanr = ranks.mean() + 1
if return_ranks:
return (r1, r5, r10, medr, meanr), (ranks, top1)
else:
return (r1, r5, r10, medr, meanr)
def t2i(images, sentences, nreps=1, npts=None, return_ranks=False, order=False, use_gpu=False):
"""
Text->Images (Image Search)
Images: (nreps*N, K) matrix of images
Captions: (nreps*N, K) matrix of sentences
"""
if use_gpu:
assert not order, 'Order embedding not supported in GPU mode'
if npts is None:
npts = int(images.shape[0] / nreps)
if use_gpu:
ims = torch.stack([images[i] for i in range(0, len(images), nreps)])
else:
ims = np.array([images[i] for i in range(0, len(images), nreps)])
ranks, top1 = np.zeros(nreps * npts), np.zeros(nreps * npts)
for index in range(npts):
# Get query sentences
queries = sentences[nreps * index:nreps * (index + 1)]
# Compute scores
if use_gpu:
if len(sentences.shape) == 2:
sim = queries.mm(ims.t())
else:
sim_kk = queries.view(-1, queries.size(-1)).mm(ims.view(-1, ims.size(-1)).t())
sim_kk = sim_kk.view(queries.size(0), queries.size(1), ims.size(0), ims.size(1))
sim_kk = sim_kk.permute(0,1,3,2).contiguous()
sim_kk = sim_kk.view(queries.size(0), -1, ims.size(0))
sim, _ = sim_kk.max(dim=1)
else:
if order:
if nreps * index % ORDER_BATCH_SIZE == 0:
mx = min(sentences.shape[0], nreps * index + ORDER_BATCH_SIZE)
sentences_batch = sentences[nreps * index:mx]
sim_batch = order_sim(torch.Tensor(images).cuda(),
torch.Tensor(sentences_batch).cuda())
sim_batch = sim_batch.cpu().numpy()
sim = sim_batch[:, (nreps * index) % ORDER_BATCH_SIZE:(nreps * index) % ORDER_BATCH_SIZE + nreps].T
else:
sim = np.tensordot(queries, ims, axes=[2, 2]).max(axis=(1,3)) \
if len(sentences.shape) == 3 else np.dot(queries, ims.T)
inds = np.zeros(sim.shape)
for i in range(len(inds)):
if use_gpu:
_, inds_gpu = sim[i].sort()
inds[i] = inds_gpu.cpu().numpy().copy()[::-1]
else:
inds[i] = np.argsort(sim[i])[::-1]
ranks[nreps * index + i] = np.where(inds[i] == index)[0][0]
top1[nreps * index + i] = inds[i][0]
# Compute metrics
r1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
r5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
r10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
medr = np.floor(np.median(ranks)) + 1
meanr = ranks.mean() + 1
if return_ranks:
return (r1, r5, r10, medr, meanr), (ranks, top1)
else:
return (r1, r5, r10, medr, meanr)
def convert_old_state_dict(x, model, multi_gpu=False):
params = model.state_dict()
prefix = ['module.img_enc.', 'module.txt_enc.'] \
if multi_gpu else ['img_enc.', 'txt_enc.']
for i, old_params in enumerate(x):
for key, val in old_params.items():
key = prefix[i] + key.replace('module.','').replace('our_model', 'pie_net')
assert key in params, '{} not found in model state_dict'.format(key)
params[key] = val
return params
def evalrank(model, args, split='test'):
print('Loading dataset')
data_loader = get_test_loader(args, vocab)
print('Computing results... (eval_on_gpu={})'.format(args.eval_on_gpu))
img_embs, txt_embs = encode_data(model, data_loader, args.eval_on_gpu)
n_samples = img_embs.shape[0]
nreps = 5 if args.data_name == 'coco' else 1
print('Images: %d, Sentences: %d' % (img_embs.shape[0] / nreps, txt_embs.shape[0]))
# 5fold cross-validation, only for MSCOCO
mean_metrics = None
if args.data_name == 'coco':
results = []
for i in range(5):
r, rt0 = i2t(img_embs[i*5000:(i + 1)*5000], txt_embs[i*5000:(i + 1)*5000],
nreps=nreps, return_ranks=True, order=args.order, use_gpu=args.eval_on_gpu)
r = (r[0], r[1], r[2], r[3], r[3] / n_samples, r[4], r[4] / n_samples)
print("Image to text: %.2f, %.2f, %.2f, %.2f (%.2f), %.2f (%.2f)" % r)
ri, rti0 = t2i(img_embs[i*5000:(i + 1)*5000], txt_embs[i*5000:(i + 1)*5000],
nreps=nreps, return_ranks=True, order=args.order, use_gpu=args.eval_on_gpu)
if i == 0:
rt, rti = rt0, rti0
ri = (ri[0], ri[1], ri[2], ri[3], ri[3] / n_samples, ri[4], ri[4] / n_samples)
print("Text to image: %.2f, %.2f, %.2f, %.2f (%.2f), %.2f (%.2f)" % ri)
ar = (r[0] + r[1] + r[2]) / 3
ari = (ri[0] + ri[1] + ri[2]) / 3
rsum = r[0] + r[1] + r[2] + ri[0] + ri[1] + ri[2]
print("rsum: %.2f ar: %.2f ari: %.2f" % (rsum, ar, ari))
results += [list(r) + list(ri) + [ar, ari, rsum]]
mean_metrics = tuple(np.array(results).mean(axis=0).flatten())
print("-----------------------------------")
print("Mean metrics from 5-fold evaluation: ")
print("rsum: %.2f" % (mean_metrics[-1] * 6))
print("Average i2t Recall: %.2f" % mean_metrics[-3])
print("Image to text: %.2f %.2f %.2f %.2f (%.2f) %.2f (%.2f)" % mean_metrics[:7])
print("Average t2i Recall: %.2f" % mean_metrics[-2])
print("Text to image: %.2f %.2f %.2f %.2f (%.2f) %.2f (%.2f)" % mean_metrics[7:14])
# no cross-validation, full evaluation
r, rt = i2t(img_embs, txt_embs, nreps=nreps, return_ranks=True, use_gpu=args.eval_on_gpu)
ri, rti = t2i(img_embs, txt_embs, nreps=nreps, return_ranks=True, use_gpu=args.eval_on_gpu)
ar = (r[0] + r[1] + r[2]) / 3
ari = (ri[0] + ri[1] + ri[2]) / 3
rsum = r[0] + r[1] + r[2] + ri[0] + ri[1] + ri[2]
r = (r[0], r[1], r[2], r[3], r[3] / n_samples, r[4], r[4] / n_samples)
ri = (ri[0], ri[1], ri[2], ri[3], ri[3] / n_samples, ri[4], ri[4] / n_samples)
print("rsum: %.2f" % rsum)
print("Average i2t Recall: %.2f" % ar)
print("Image to text: %.2f %.2f %.2f %.2f (%.2f) %.2f (%.2f)" % r)
print("Average t2i Recall: %.2f" % ari)
print("Text to image: %.2f %.2f %.2f %.2f (%.2f) %.2f (%.2f)" % ri)
return mean_metrics
if __name__ == '__main__':
multi_gpu = torch.cuda.device_count() > 1
args = verify_input_args(parser.parse_args())
opt = verify_input_args(parser.parse_args())
# load vocabulary used by the model
with open('./vocab/%s_vocab.pkl' % args.data_name, 'rb') as f:
vocab = pickle.load(f)
args.vocab_size = len(vocab)
# load model and options
assert os.path.isfile(args.ckpt)
model = PVSE(vocab.word2idx, args)
if torch.cuda.is_available():
model = torch.nn.DataParallel(model).cuda() if multi_gpu else model
torch.backends.cudnn.benchmark = True
model.load_state_dict(torch.load(args.ckpt))
# evaluate
metrics = evalrank(model, args, split='test')
|
[
"model.PVSE",
"numpy.median",
"numpy.where",
"torch.load",
"numpy.tensordot",
"pickle.load",
"torch.cuda.device_count",
"torch.nn.DataParallel",
"torch.Tensor",
"os.path.isfile",
"numpy.argsort",
"data.get_test_loader",
"torch.cuda.is_available",
"numpy.zeros",
"numpy.dot",
"numpy.array",
"torch.zeros",
"option.parser.parse_args"
] |
[((7270, 7298), 'data.get_test_loader', 'get_test_loader', (['args', 'vocab'], {}), '(args, vocab)\n', (7285, 7298), False, 'from data import get_test_loader\n'), ((10374, 10399), 'os.path.isfile', 'os.path.isfile', (['args.ckpt'], {}), '(args.ckpt)\n', (10388, 10399), False, 'import os, sys\n'), ((10410, 10436), 'model.PVSE', 'PVSE', (['vocab.word2idx', 'args'], {}), '(vocab.word2idx, args)\n', (10414, 10436), False, 'from model import PVSE\n'), ((10442, 10467), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (10465, 10467), False, 'import torch\n'), ((743, 768), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (766, 768), False, 'import torch\n'), ((2278, 2292), 'numpy.zeros', 'np.zeros', (['npts'], {}), '(npts)\n', (2286, 2292), True, 'import numpy as np\n'), ((2294, 2308), 'numpy.zeros', 'np.zeros', (['npts'], {}), '(npts)\n', (2302, 2308), True, 'import numpy as np\n'), ((4757, 4779), 'numpy.zeros', 'np.zeros', (['(nreps * npts)'], {}), '(nreps * npts)\n', (4765, 4779), True, 'import numpy as np\n'), ((4781, 4803), 'numpy.zeros', 'np.zeros', (['(nreps * npts)'], {}), '(nreps * npts)\n', (4789, 4803), True, 'import numpy as np\n'), ((6021, 6040), 'numpy.zeros', 'np.zeros', (['sim.shape'], {}), '(sim.shape)\n', (6029, 6040), True, 'import numpy as np\n'), ((10049, 10074), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (10072, 10074), False, 'import torch\n'), ((10107, 10126), 'option.parser.parse_args', 'parser.parse_args', ([], {}), '()\n', (10124, 10126), False, 'from option import parser, verify_input_args\n'), ((10154, 10173), 'option.parser.parse_args', 'parser.parse_args', ([], {}), '()\n', (10171, 10173), False, 'from option import parser, verify_input_args\n'), ((10291, 10305), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (10302, 10305), False, 'import pickle\n'), ((10607, 10628), 'torch.load', 'torch.load', (['args.ckpt'], {}), '(args.ckpt)\n', (10617, 10628), False, 'import torch\n'), ((4042, 4058), 'numpy.median', 'np.median', (['ranks'], {}), '(ranks)\n', (4051, 4058), True, 'import numpy as np\n'), ((6553, 6569), 'numpy.median', 'np.median', (['ranks'], {}), '(ranks)\n', (6562, 6569), True, 'import numpy as np\n'), ((1561, 1577), 'numpy.zeros', 'np.zeros', (['emb_sz'], {}), '(emb_sz)\n', (1569, 1577), True, 'import numpy as np\n'), ((1597, 1613), 'numpy.zeros', 'np.zeros', (['emb_sz'], {}), '(emb_sz)\n', (1605, 1613), True, 'import numpy as np\n'), ((3564, 3579), 'numpy.argsort', 'np.argsort', (['sim'], {}), '(sim)\n', (3574, 3579), True, 'import numpy as np\n'), ((3717, 3736), 'numpy.where', 'np.where', (['(inds == i)'], {}), '(inds == i)\n', (3725, 3736), True, 'import numpy as np\n'), ((3873, 3892), 'numpy.where', 'np.where', (['(ranks < 1)'], {}), '(ranks < 1)\n', (3881, 3892), True, 'import numpy as np\n'), ((3929, 3948), 'numpy.where', 'np.where', (['(ranks < 5)'], {}), '(ranks < 5)\n', (3937, 3948), True, 'import numpy as np\n'), ((3986, 4006), 'numpy.where', 'np.where', (['(ranks < 10)'], {}), '(ranks < 10)\n', (3994, 4006), True, 'import numpy as np\n'), ((5986, 6008), 'numpy.dot', 'np.dot', (['queries', 'ims.T'], {}), '(queries, ims.T)\n', (5992, 6008), True, 'import numpy as np\n'), ((6210, 6228), 'numpy.argsort', 'np.argsort', (['sim[i]'], {}), '(sim[i])\n', (6220, 6228), True, 'import numpy as np\n'), ((6268, 6294), 'numpy.where', 'np.where', (['(inds[i] == index)'], {}), '(inds[i] == index)\n', (6276, 6294), True, 'import numpy as np\n'), ((6384, 6403), 'numpy.where', 'np.where', (['(ranks < 1)'], {}), '(ranks < 1)\n', (6392, 6403), True, 'import numpy as np\n'), ((6440, 6459), 'numpy.where', 'np.where', (['(ranks < 5)'], {}), '(ranks < 5)\n', (6448, 6459), True, 'import numpy as np\n'), ((6497, 6517), 'numpy.where', 'np.where', (['(ranks < 10)'], {}), '(ranks < 10)\n', (6505, 6517), True, 'import numpy as np\n'), ((10481, 10509), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (10502, 10509), False, 'import torch\n'), ((1217, 1278), 'torch.zeros', 'torch.zeros', (['emb_sz'], {'dtype': 'img_emb.dtype', 'requires_grad': '(False)'}), '(emb_sz, dtype=img_emb.dtype, requires_grad=False)\n', (1228, 1278), False, 'import torch\n'), ((1305, 1366), 'torch.zeros', 'torch.zeros', (['emb_sz'], {'dtype': 'txt_emb.dtype', 'requires_grad': '(False)'}), '(emb_sz, dtype=txt_emb.dtype, requires_grad=False)\n', (1316, 1366), False, 'import torch\n'), ((3410, 3433), 'numpy.dot', 'np.dot', (['im', 'sentences.T'], {}), '(im, sentences.T)\n', (3416, 3433), True, 'import numpy as np\n'), ((5882, 5921), 'numpy.tensordot', 'np.tensordot', (['queries', 'ims'], {'axes': '[2, 2]'}), '(queries, ims, axes=[2, 2])\n', (5894, 5921), True, 'import numpy as np\n'), ((8750, 8767), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (8758, 8767), True, 'import numpy as np\n'), ((3113, 3130), 'torch.Tensor', 'torch.Tensor', (['im2'], {}), '(im2)\n', (3125, 3130), False, 'import torch\n'), ((3139, 3162), 'torch.Tensor', 'torch.Tensor', (['sentences'], {}), '(sentences)\n', (3151, 3162), False, 'import torch\n'), ((5602, 5622), 'torch.Tensor', 'torch.Tensor', (['images'], {}), '(images)\n', (5614, 5622), False, 'import torch\n'), ((5664, 5693), 'torch.Tensor', 'torch.Tensor', (['sentences_batch'], {}), '(sentences_batch)\n', (5676, 5693), False, 'import torch\n'), ((3293, 3333), 'numpy.tensordot', 'np.tensordot', (['im', 'sentences'], {'axes': '[2, 2]'}), '(im, sentences, axes=[2, 2])\n', (3305, 3333), True, 'import numpy as np\n')]
|
"""
Model select class1 single allele models.
"""
import argparse
import os
import signal
import sys
import time
import traceback
import random
from functools import partial
from pprint import pprint
import numpy
import pandas
from scipy.stats import kendalltau, percentileofscore, pearsonr
from sklearn.metrics import roc_auc_score
import tqdm # progress bar
tqdm.monitor_interval = 0 # see https://github.com/tqdm/tqdm/issues/481
from .class1_affinity_predictor import Class1AffinityPredictor
from .common import normalize_allele_name
from .encodable_sequences import EncodableSequences
from .common import configure_logging, random_peptides
from .local_parallelism import worker_pool_with_gpu_assignments_from_args, add_local_parallelism_args
from .regression_target import from_ic50
# To avoid pickling large matrices to send to child processes when running in
# parallel, we use this global variable as a place to store data. Data that is
# stored here before creating the thread pool will be inherited to the child
# processes upon fork() call, allowing us to share large data with the workers
# via shared memory.
GLOBAL_DATA = {}
parser = argparse.ArgumentParser(usage=__doc__)
parser.add_argument(
"--data",
metavar="FILE.csv",
required=False,
help=(
"Model selection data CSV. Expected columns: "
"allele, peptide, measurement_value"))
parser.add_argument(
"--exclude-data",
metavar="FILE.csv",
required=False,
help=(
"Data to EXCLUDE from model selection. Useful to specify the original "
"training data used"))
parser.add_argument(
"--models-dir",
metavar="DIR",
required=True,
help="Directory to read models")
parser.add_argument(
"--out-models-dir",
metavar="DIR",
required=True,
help="Directory to write selected models")
parser.add_argument(
"--out-unselected-predictions",
metavar="FILE.csv",
help="Write predictions for validation data using unselected predictor to "
"FILE.csv")
parser.add_argument(
"--unselected-accuracy-scorer",
metavar="SCORER",
default="combined:mass-spec,mse")
parser.add_argument(
"--unselected-accuracy-scorer-num-samples",
type=int,
default=1000)
parser.add_argument(
"--unselected-accuracy-percentile-threshold",
type=float,
metavar="X",
default=95)
parser.add_argument(
"--allele",
default=None,
nargs="+",
help="Alleles to select models for. If not specified, all alleles with "
"enough measurements will be used.")
parser.add_argument(
"--combined-min-models",
type=int,
default=8,
metavar="N",
help="Min number of models to select per allele when using combined selector")
parser.add_argument(
"--combined-max-models",
type=int,
default=1000,
metavar="N",
help="Max number of models to select per allele when using combined selector")
parser.add_argument(
"--combined-min-contribution-percent",
type=float,
default=1.0,
metavar="X",
help="Use only model selectors that can contribute at least X %% to the "
"total score. Default: %(default)s")
parser.add_argument(
"--mass-spec-min-measurements",
type=int,
metavar="N",
default=1,
help="Min number of measurements required for an allele to use mass-spec model "
"selection")
parser.add_argument(
"--mass-spec-min-models",
type=int,
default=8,
metavar="N",
help="Min number of models to select per allele when using mass-spec selector")
parser.add_argument(
"--mass-spec-max-models",
type=int,
default=1000,
metavar="N",
help="Max number of models to select per allele when using mass-spec selector")
parser.add_argument(
"--mse-min-measurements",
type=int,
metavar="N",
default=1,
help="Min number of measurements required for an allele to use MSE model "
"selection")
parser.add_argument(
"--mse-min-models",
type=int,
default=8,
metavar="N",
help="Min number of models to select per allele when using MSE selector")
parser.add_argument(
"--mse-max-models",
type=int,
default=1000,
metavar="N",
help="Max number of models to select per allele when using MSE selector")
parser.add_argument(
"--scoring",
nargs="+",
default=["mse", "consensus"],
help="Scoring procedures to use in order")
parser.add_argument(
"--consensus-min-models",
type=int,
default=8,
metavar="N",
help="Min number of models to select per allele when using consensus selector")
parser.add_argument(
"--consensus-max-models",
type=int,
default=1000,
metavar="N",
help="Max number of models to select per allele when using consensus selector")
parser.add_argument(
"--consensus-num-peptides-per-length",
type=int,
default=10000,
help="Num peptides per length to use for consensus scoring")
parser.add_argument(
"--mass-spec-regex",
metavar="REGEX",
default="mass[- ]spec",
help="Regular expression for mass-spec data. Runs on measurement_source col."
"Default: %(default)s.")
parser.add_argument(
"--verbosity",
type=int,
help="Keras verbosity. Default: %(default)s",
default=0)
add_local_parallelism_args(parser)
def run(argv=sys.argv[1:]):
global GLOBAL_DATA
# On sigusr1 print stack trace
print("To show stack trace, run:\nkill -s USR1 %d" % os.getpid())
signal.signal(signal.SIGUSR1, lambda sig, frame: traceback.print_stack())
args = parser.parse_args(argv)
args.out_models_dir = os.path.abspath(args.out_models_dir)
configure_logging(verbose=args.verbosity > 1)
input_predictor = Class1AffinityPredictor.load(args.models_dir)
print("Loaded: %s" % input_predictor)
if args.allele:
alleles = [normalize_allele_name(a) for a in args.allele]
else:
alleles = input_predictor.supported_alleles
metadata_dfs = {}
if args.data:
df = pandas.read_csv(args.data)
print("Loaded data: %s" % (str(df.shape)))
df = df.loc[
(df.peptide.str.len() >= 8) & (df.peptide.str.len() <= 15)
]
print("Subselected to 8-15mers: %s" % (str(df.shape)))
# Allele names in data are assumed to be already normalized.
df = df.loc[df.allele.isin(alleles)].dropna()
print("Selected %d alleles: %s" % (len(alleles), ' '.join(alleles)))
if args.exclude_data:
exclude_df = pandas.read_csv(args.exclude_data)
metadata_dfs["model_selection_exclude"] = exclude_df
print("Loaded exclude data: %s" % (str(df.shape)))
df["_key"] = df.allele + "__" + df.peptide
exclude_df["_key"] = exclude_df.allele + "__" + exclude_df.peptide
df["_excluded"] = df._key.isin(exclude_df._key.unique())
print("Excluding measurements per allele (counts): ")
print(df.groupby("allele")._excluded.sum())
print("Excluding measurements per allele (fractions): ")
print(df.groupby("allele")._excluded.mean())
df = df.loc[~df._excluded]
del df["_excluded"]
del df["_key"]
print("Reduced data to: %s" % (str(df.shape)))
metadata_dfs["model_selection_data"] = df
df["mass_spec"] = df.measurement_source.str.contains(
args.mass_spec_regex)
else:
df = None
if args.out_unselected_predictions:
df["unselected_prediction"] = input_predictor.predict(
alleles=df.allele.values,
peptides=df.peptide.values)
df.to_csv(args.out_unselected_predictions)
print("Wrote: %s" % args.out_unselected_predictions)
selectors = {}
selector_to_model_selection_kwargs = {}
def make_selector(
scoring,
combined_min_contribution_percent=args.combined_min_contribution_percent):
if scoring in selectors:
return (
selectors[scoring], selector_to_model_selection_kwargs[scoring])
start = time.time()
if scoring.startswith("combined:"):
model_selection_kwargs = {
'min_models': args.combined_min_models,
'max_models': args.combined_max_models,
}
component_selectors = []
for component_selector in scoring.split(":", 1)[1].split(","):
component_selectors.append(
make_selector(
component_selector)[0])
selector = CombinedModelSelector(
component_selectors,
min_contribution_percent=combined_min_contribution_percent)
elif scoring == "mse":
model_selection_kwargs = {
'min_models': args.mse_min_models,
'max_models': args.mse_max_models,
}
min_measurements = args.mse_min_measurements
selector = MSEModelSelector(
df=df.loc[~df.mass_spec],
predictor=input_predictor,
min_measurements=min_measurements)
elif scoring == "mass-spec":
mass_spec_df = df.loc[df.mass_spec]
model_selection_kwargs = {
'min_models': args.mass_spec_min_models,
'max_models': args.mass_spec_max_models,
}
min_measurements = args.mass_spec_min_measurements
selector = MassSpecModelSelector(
df=mass_spec_df,
predictor=input_predictor,
min_measurements=min_measurements)
elif scoring == "consensus":
model_selection_kwargs = {
'min_models': args.consensus_min_models,
'max_models': args.consensus_max_models,
}
selector = ConsensusModelSelector(
predictor=input_predictor,
num_peptides_per_length=args.consensus_num_peptides_per_length)
else:
raise ValueError("Unsupported scoring method: %s" % scoring)
print("Instantiated model selector %s in %0.2f sec." % (
scoring, time.time() - start))
return (selector, model_selection_kwargs)
for scoring in args.scoring:
(selector, model_selection_kwargs) = make_selector(scoring)
selectors[scoring] = selector
selector_to_model_selection_kwargs[scoring] = model_selection_kwargs
unselected_accuracy_scorer = None
if args.unselected_accuracy_scorer:
# Force running all selectors by setting combined_min_contribution_percent=0.
unselected_accuracy_scorer = make_selector(
args.unselected_accuracy_scorer,
combined_min_contribution_percent=0.0)[0]
print("Using unselected accuracy scorer: %s" % unselected_accuracy_scorer)
GLOBAL_DATA["unselected_accuracy_scorer"] = unselected_accuracy_scorer
print("Selectors for alleles:")
allele_to_selector = {}
allele_to_model_selection_kwargs = {}
for allele in alleles:
selector = None
for possible_selector in args.scoring:
if selectors[possible_selector].usable_for_allele(allele=allele):
selector = selectors[possible_selector]
print("%20s %s" % (allele, selector.plan_summary(allele)))
break
if selector is None:
raise ValueError("No selectors usable for allele: %s" % allele)
allele_to_selector[allele] = selector
allele_to_model_selection_kwargs[allele] = (
selector_to_model_selection_kwargs[possible_selector])
GLOBAL_DATA["args"] = args
GLOBAL_DATA["input_predictor"] = input_predictor
GLOBAL_DATA["unselected_accuracy_scorer"] = unselected_accuracy_scorer
GLOBAL_DATA["allele_to_selector"] = allele_to_selector
GLOBAL_DATA["allele_to_model_selection_kwargs"] = allele_to_model_selection_kwargs
if not os.path.exists(args.out_models_dir):
print("Attempting to create directory: %s" % args.out_models_dir)
os.mkdir(args.out_models_dir)
print("Done.")
result_predictor = Class1AffinityPredictor(metadata_dataframes=metadata_dfs)
worker_pool = worker_pool_with_gpu_assignments_from_args(args)
start = time.time()
if worker_pool is None:
# Serial run
print("Running in serial.")
results = (
model_select(allele) for allele in alleles)
else:
# Parallel run
random.shuffle(alleles)
results = worker_pool.imap_unordered(
partial(model_select, constant_data=GLOBAL_DATA),
alleles,
chunksize=1)
unselected_summary = []
model_selection_dfs = []
for result in tqdm.tqdm(results, total=len(alleles)):
pprint(result)
summary_dict = dict(result)
summary_dict["retained"] = result["selected"] is not None
del summary_dict["selected"]
unselected_summary.append(summary_dict)
if result['selected'] is not None:
model_selection_dfs.append(
result['selected'].metadata_dataframes['model_selection'])
result_predictor.merge_in_place([result['selected']])
if model_selection_dfs:
model_selection_df = pandas.concat(
model_selection_dfs, ignore_index=True)
model_selection_df["selector"] = model_selection_df.allele.map(
allele_to_selector)
result_predictor.metadata_dataframes["model_selection"] = (
model_selection_df)
result_predictor.metadata_dataframes["unselected_summary"] = (
pandas.DataFrame(unselected_summary))
print("Done model selecting for %d alleles." % len(alleles))
result_predictor.save(args.out_models_dir)
model_selection_time = time.time() - start
if worker_pool:
worker_pool.close()
worker_pool.join()
print("Model selection time %0.2f min." % (model_selection_time / 60.0))
print("Predictor written to: %s" % args.out_models_dir)
class ScrambledPredictor(object):
def __init__(self, predictor):
self.predictor = predictor
self._predictions = {}
self._allele = None
def predict(self, peptides, allele):
if peptides not in self._predictions:
self._predictions[peptides] = pandas.Series(
self.predictor.predict(peptides=peptides, allele=allele))
self._allele = allele
assert allele == self._allele
return self._predictions[peptides].sample(frac=1.0).values
def model_select(allele, constant_data=GLOBAL_DATA):
unselected_accuracy_scorer = constant_data["unselected_accuracy_scorer"]
selector = constant_data["allele_to_selector"][allele]
model_selection_kwargs = constant_data[
"allele_to_model_selection_kwargs"
][allele]
predictor = constant_data["input_predictor"]
args = constant_data["args"]
unselected_accuracy_scorer_samples = constant_data["args"].unselected_accuracy_scorer_num_samples
result_dict = {
"allele": allele
}
unselected_score = None
unselected_score_percentile = None
unselected_score_scrambled_mean = None
if unselected_accuracy_scorer:
unselected_score_function = (
unselected_accuracy_scorer.score_function(allele))
additional_metadata = {}
unselected_score = unselected_score_function(
predictor, additional_metadata_out=additional_metadata)
scrambled_predictor = ScrambledPredictor(predictor)
scrambled_scores = numpy.array([
unselected_score_function(
scrambled_predictor)
for _ in range(unselected_accuracy_scorer_samples)
])
unselected_score_scrambled_mean = scrambled_scores.mean()
unselected_score_percentile = percentileofscore(
scrambled_scores, unselected_score)
print(
"Unselected score and percentile",
allele,
unselected_score,
unselected_score_percentile,
additional_metadata)
result_dict.update(
dict(("unselected_%s" % key, value)
for (key, value)
in additional_metadata.items()))
selected = None
threshold = args.unselected_accuracy_percentile_threshold
if unselected_score_percentile is None or unselected_score_percentile >= threshold:
selected = predictor.model_select(
score_function=selector.score_function(allele=allele),
alleles=[allele],
**model_selection_kwargs)
result_dict["unselected_score_plan"] = (
unselected_accuracy_scorer.plan_summary(allele)
if unselected_accuracy_scorer else None)
result_dict["selector_score_plan"] = selector.plan_summary(allele)
result_dict["unselected_accuracy_score_percentile"] = unselected_score_percentile
result_dict["unselected_score"] = unselected_score
result_dict["unselected_score_scrambled_mean"] = unselected_score_scrambled_mean
result_dict["selected"] = selected
result_dict["num_models"] = len(selected.neural_networks) if selected else None
return result_dict
def cache_encoding(predictor, peptides):
# Encode the peptides for each neural network, so the encoding
# becomes cached.
for network in predictor.neural_networks:
network.peptides_to_network_input(peptides)
class ScoreFunction(object):
"""
Thin wrapper over a score function (Class1AffinityPredictor -> float).
Used to keep a summary string associated with the function.
"""
def __init__(self, function, summary=None):
self.function = function
self.summary = summary if summary else "(n/a)"
def __call__(self, *args, **kwargs):
return self.function(*args, **kwargs)
class CombinedModelSelector(object):
"""
Model selector that computes a weighted average over other model selectors.
"""
def __init__(self, model_selectors, weights=None, min_contribution_percent=1.0):
if weights is None:
weights = numpy.ones(shape=(len(model_selectors),))
self.model_selectors = model_selectors
self.selector_to_weight = dict(zip(self.model_selectors, weights))
self.min_contribution_percent = min_contribution_percent
def usable_for_allele(self, allele):
return any(
selector.usable_for_allele(allele)
for selector in self.model_selectors)
def plan_summary(self, allele):
return self.score_function(allele, dry_run=True).summary
def score_function(self, allele, dry_run=False):
selector_to_max_weighted_score = {}
for selector in self.model_selectors:
weight = self.selector_to_weight[selector]
if selector.usable_for_allele(allele):
max_weighted_score = selector.max_absolute_value(allele) * weight
else:
max_weighted_score = 0
selector_to_max_weighted_score[selector] = max_weighted_score
max_total_score = sum(selector_to_max_weighted_score.values())
# Use only selectors that can contribute >1% to the total score
selectors_to_use = [
selector
for selector in self.model_selectors
if (
selector_to_max_weighted_score[selector] >
max_total_score * self.min_contribution_percent / 100.0)
]
summary = ", ".join([
"%s(|%.3f|)" % (
selector.plan_summary(allele),
selector_to_max_weighted_score[selector])
for selector in selectors_to_use
])
if dry_run:
score = None
else:
score_functions_and_weights = [
(selector.score_function(allele=allele),
self.selector_to_weight[selector])
for selector in selectors_to_use
]
def score(predictor, additional_metadata_out=None):
scores = numpy.array([
score_function(
predictor,
additional_metadata_out=additional_metadata_out) * weight
for (score_function, weight) in score_functions_and_weights
])
if additional_metadata_out is not None:
additional_metadata_out["combined_score_terms"] = str(
list(scores))
return scores.sum()
return ScoreFunction(score, summary=summary)
class ConsensusModelSelector(object):
"""
Model selector that scores sub-ensembles based on their Kendall tau
consistency with the full ensemble over a set of random peptides.
"""
def __init__(
self,
predictor,
num_peptides_per_length=10000,
multiply_score_by_value=10.0):
(min_length, max_length) = predictor.supported_peptide_lengths
peptides = []
for length in range(min_length, max_length + 1):
peptides.extend(
random_peptides(num_peptides_per_length, length=length))
self.peptides = EncodableSequences.create(peptides)
self.predictor = predictor
self.multiply_score_by_value = multiply_score_by_value
cache_encoding(self.predictor, self.peptides)
def usable_for_allele(self, allele):
return True
def max_absolute_value(self, allele):
return self.multiply_score_by_value
def plan_summary(self, allele):
return "consensus (%d points)" % len(self.peptides)
def score_function(self, allele):
full_ensemble_predictions = self.predictor.predict(
allele=allele,
peptides=self.peptides)
def score(predictor, additional_metadata_out=None):
predictions = predictor.predict(
allele=allele,
peptides=self.peptides,
)
tau = kendalltau(predictions, full_ensemble_predictions).correlation
if additional_metadata_out is not None:
additional_metadata_out["score_consensus_tau"] = tau
return tau * self.multiply_score_by_value
return ScoreFunction(
score, summary=self.plan_summary(allele))
class MSEModelSelector(object):
"""
Model selector that uses mean-squared error to score models. Inequalities
are supported.
"""
def __init__(
self,
df,
predictor,
min_measurements=1,
multiply_score_by_data_size=True):
self.df = df
self.predictor = predictor
self.min_measurements = min_measurements
self.multiply_score_by_data_size = multiply_score_by_data_size
def usable_for_allele(self, allele):
return (self.df.allele == allele).sum() >= self.min_measurements
def max_absolute_value(self, allele):
if self.multiply_score_by_data_size:
return (self.df.allele == allele).sum()
else:
return 1.0
def plan_summary(self, allele):
return self.score_function(allele).summary
def score_function(self, allele):
sub_df = self.df.loc[self.df.allele == allele].reset_index(drop=True)
peptides = EncodableSequences.create(sub_df.peptide.values)
def score(predictor, additional_metadata_out=None):
predictions = predictor.predict(
allele=allele,
peptides=peptides,
)
deviations = from_ic50(predictions) - from_ic50(
sub_df.measurement_value)
if 'measurement_inequality' in sub_df.columns:
# Must reverse meaning of inequality since we are working with
# transformed 0-1 values, which are anti-correlated with the ic50s.
# The measurement_inequality column is given in terms of ic50s.
deviations.loc[
(
(sub_df.measurement_inequality == "<") & (deviations > 0)) |
((sub_df.measurement_inequality == ">") & (deviations < 0))
] = 0.0
score_mse = (1 - (deviations ** 2).mean())
if additional_metadata_out is not None:
additional_metadata_out["score_MSE"] = 1 - score_mse
# We additionally include other scores on (=) measurements as
# a convenience
eq_df = sub_df
if 'measurement_inequality' in sub_df.columns:
eq_df = sub_df.loc[
sub_df.measurement_inequality == "="
]
additional_metadata_out["score_pearsonr"] = (
pearsonr(
numpy.log(eq_df.measurement_value.values),
numpy.log(predictions[eq_df.index.values]))[0])
for threshold in [500, 5000, 15000]:
if (eq_df.measurement_value < threshold).nunique() == 2:
additional_metadata_out["score_AUC@%d" % threshold] = (
roc_auc_score(
(eq_df.measurement_value < threshold).values,
-1 * predictions[eq_df.index.values]))
return score_mse * (
len(sub_df) if self.multiply_score_by_data_size else 1)
summary = "mse (%d points)" % (len(sub_df))
return ScoreFunction(score, summary=summary)
class MassSpecModelSelector(object):
"""
Model selector that uses PPV of differentiating decoys from hits from
mass-spec experiments.
"""
def __init__(
self,
df,
predictor,
decoys_per_length=0,
min_measurements=100,
multiply_score_by_data_size=True):
# Index is peptide, columns are alleles
hit_matrix = df.groupby(
["peptide", "allele"]).measurement_value.count().unstack().fillna(
0).astype(bool)
if decoys_per_length:
(min_length, max_length) = predictor.supported_peptide_lengths
decoys = []
for length in range(min_length, max_length + 1):
decoys.extend(
random_peptides(decoys_per_length, length=length))
decoy_matrix = pandas.DataFrame(
index=decoys, columns=hit_matrix.columns, dtype=bool)
decoy_matrix[:] = False
full_matrix = pandas.concat([hit_matrix, decoy_matrix])
else:
full_matrix = hit_matrix
if len(full_matrix) > 0:
full_matrix = full_matrix.sample(frac=1.0).astype(float)
self.df = full_matrix
self.predictor = predictor
self.min_measurements = min_measurements
self.multiply_score_by_data_size = multiply_score_by_data_size
self.peptides = EncodableSequences.create(full_matrix.index.values)
cache_encoding(self.predictor, self.peptides)
@staticmethod
def ppv(y_true, predictions):
df = pandas.DataFrame({"prediction": predictions, "y_true": y_true})
return df.sort_values("prediction", ascending=True)[
: int(y_true.sum())
].y_true.mean()
def usable_for_allele(self, allele):
return allele in self.df.columns and (
self.df[allele].sum() >= self.min_measurements)
def max_absolute_value(self, allele):
if self.multiply_score_by_data_size:
return self.df[allele].sum()
else:
return 1.0
def plan_summary(self, allele):
return self.score_function(allele).summary
def score_function(self, allele):
total_hits = self.df[allele].sum()
total_decoys = (self.df[allele] == 0).sum()
multiplier = total_hits if self.multiply_score_by_data_size else 1
def score(predictor, additional_metadata_out=None):
predictions = predictor.predict(
allele=allele,
peptides=self.peptides,
)
ppv = self.ppv(self.df[allele], predictions)
if additional_metadata_out is not None:
additional_metadata_out["score_mass_spec_PPV"] = ppv
# We additionally compute AUC score.
additional_metadata_out["score_mass_spec_AUC"] = roc_auc_score(
self.df[allele].values, -1 * predictions)
return ppv * multiplier
summary = "mass-spec (%d hits / %d decoys)" % (total_hits, total_decoys)
return ScoreFunction(score, summary=summary)
if __name__ == '__main__':
run()
|
[
"os.path.exists",
"scipy.stats.percentileofscore",
"random.shuffle",
"pandas.DataFrame",
"argparse.ArgumentParser",
"pandas.read_csv",
"traceback.print_stack",
"numpy.log",
"sklearn.metrics.roc_auc_score",
"pandas.concat",
"os.mkdir",
"os.getpid",
"functools.partial",
"os.path.abspath",
"time.time",
"pprint.pprint",
"scipy.stats.kendalltau"
] |
[((1157, 1195), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'usage': '__doc__'}), '(usage=__doc__)\n', (1180, 1195), False, 'import argparse\n'), ((5568, 5604), 'os.path.abspath', 'os.path.abspath', (['args.out_models_dir'], {}), '(args.out_models_dir)\n', (5583, 5604), False, 'import os\n'), ((12260, 12271), 'time.time', 'time.time', ([], {}), '()\n', (12269, 12271), False, 'import time\n'), ((13610, 13646), 'pandas.DataFrame', 'pandas.DataFrame', (['unselected_summary'], {}), '(unselected_summary)\n', (13626, 13646), False, 'import pandas\n'), ((5970, 5996), 'pandas.read_csv', 'pandas.read_csv', (['args.data'], {}), '(args.data)\n', (5985, 5996), False, 'import pandas\n'), ((8063, 8074), 'time.time', 'time.time', ([], {}), '()\n', (8072, 8074), False, 'import time\n'), ((11925, 11960), 'os.path.exists', 'os.path.exists', (['args.out_models_dir'], {}), '(args.out_models_dir)\n', (11939, 11960), False, 'import os\n'), ((12044, 12073), 'os.mkdir', 'os.mkdir', (['args.out_models_dir'], {}), '(args.out_models_dir)\n', (12052, 12073), False, 'import os\n'), ((12475, 12498), 'random.shuffle', 'random.shuffle', (['alleles'], {}), '(alleles)\n', (12489, 12498), False, 'import random\n'), ((12777, 12791), 'pprint.pprint', 'pprint', (['result'], {}), '(result)\n', (12783, 12791), False, 'from pprint import pprint\n'), ((13263, 13316), 'pandas.concat', 'pandas.concat', (['model_selection_dfs'], {'ignore_index': '(True)'}), '(model_selection_dfs, ignore_index=True)\n', (13276, 13316), False, 'import pandas\n'), ((13789, 13800), 'time.time', 'time.time', ([], {}), '()\n', (13798, 13800), False, 'import time\n'), ((15832, 15885), 'scipy.stats.percentileofscore', 'percentileofscore', (['scrambled_scores', 'unselected_score'], {}), '(scrambled_scores, unselected_score)\n', (15849, 15885), False, 'from scipy.stats import kendalltau, percentileofscore, pearsonr\n'), ((27131, 27194), 'pandas.DataFrame', 'pandas.DataFrame', (["{'prediction': predictions, 'y_true': y_true}"], {}), "({'prediction': predictions, 'y_true': y_true})\n", (27147, 27194), False, 'import pandas\n'), ((5414, 5425), 'os.getpid', 'os.getpid', ([], {}), '()\n', (5423, 5425), False, 'import os\n'), ((5480, 5503), 'traceback.print_stack', 'traceback.print_stack', ([], {}), '()\n', (5501, 5503), False, 'import traceback\n'), ((6471, 6505), 'pandas.read_csv', 'pandas.read_csv', (['args.exclude_data'], {}), '(args.exclude_data)\n', (6486, 6505), False, 'import pandas\n'), ((12557, 12605), 'functools.partial', 'partial', (['model_select'], {'constant_data': 'GLOBAL_DATA'}), '(model_select, constant_data=GLOBAL_DATA)\n', (12564, 12605), False, 'from functools import partial\n'), ((26402, 26472), 'pandas.DataFrame', 'pandas.DataFrame', ([], {'index': 'decoys', 'columns': 'hit_matrix.columns', 'dtype': 'bool'}), '(index=decoys, columns=hit_matrix.columns, dtype=bool)\n', (26418, 26472), False, 'import pandas\n'), ((26552, 26593), 'pandas.concat', 'pandas.concat', (['[hit_matrix, decoy_matrix]'], {}), '([hit_matrix, decoy_matrix])\n', (26565, 26593), False, 'import pandas\n'), ((21991, 22041), 'scipy.stats.kendalltau', 'kendalltau', (['predictions', 'full_ensemble_predictions'], {}), '(predictions, full_ensemble_predictions)\n', (22001, 22041), False, 'from scipy.stats import kendalltau, percentileofscore, pearsonr\n'), ((28411, 28466), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['self.df[allele].values', '(-1 * predictions)'], {}), '(self.df[allele].values, -1 * predictions)\n', (28424, 28466), False, 'from sklearn.metrics import roc_auc_score\n'), ((10137, 10148), 'time.time', 'time.time', ([], {}), '()\n', (10146, 10148), False, 'import time\n'), ((24817, 24858), 'numpy.log', 'numpy.log', (['eq_df.measurement_value.values'], {}), '(eq_df.measurement_value.values)\n', (24826, 24858), False, 'import numpy\n'), ((24884, 24926), 'numpy.log', 'numpy.log', (['predictions[eq_df.index.values]'], {}), '(predictions[eq_df.index.values])\n', (24893, 24926), False, 'import numpy\n'), ((25171, 25272), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['(eq_df.measurement_value < threshold).values', '(-1 * predictions[eq_df.index.values])'], {}), '((eq_df.measurement_value < threshold).values, -1 *\n predictions[eq_df.index.values])\n', (25184, 25272), False, 'from sklearn.metrics import roc_auc_score\n')]
|
############################################################
# Copyright 2019 <NAME>
# Licensed under the new BSD (3-clause) license:
#
# https://opensource.org/licenses/BSD-3-Clause
############################################################
############################################################
#
# Initial setup
#
############################################################
import matplotlib.pyplot as plot
import scipy.stats as stats
import numpy
import math
light = "#DCBCBC"
light_highlight = "#C79999"
mid = "#B97C7C"
mid_highlight = "#A25050"
dark = "#8F2727"
dark_highlight = "#7C0000"
green = "#00FF00"
# To facilitate the computation of Markov chain Monte Carlo estimators
# let's define a _Welford accumulator_ that computes empirical summaries
# of a sample in a single pass
def welford_summary(x, L = 100):
summary = [0] * (L + 1)
for n in range(len(x)):
delta = x[n] - summary[0]
summary[0] += delta / (n + 1)
for l in range(L):
if n > l:
summary[l + 1] += delta * (x[n - l] - summary[0])
norm = 1.0 / (len(x) - 1)
for l in range(L): summary[l + 1] *= norm
return summary
# We can then use the Welford accumulator output to compute the
# Markov chain Monte Carlo estimators and their properties
def compute_mcmc_stats(x, L = 20):
summary = welford_summary(x, L)
mean = summary[0]
var = summary[1]
acov = summary[1:(L + 1)]
# Compute the effective sample size
rho_hat_s = [0] * L
rho_hat_s[1] = acov[1] / var
# First we transform our autocovariances into Geyer's initial positive sequence
max_s = 1
for s in [ 2 * i + 1 for i in range((L - 1) / 2) ]:
rho_hat_even = acov[s + 1] / var
rho_hat_odd = acov[s + 2] / var;
max_s = s + 2
if rho_hat_even + rho_hat_odd > 0:
rho_hat_s[s + 1] = rho_hat_even
rho_hat_s[s + 2] = rho_hat_odd
else:
break
# Then we transform this output into Geyer's initial monotone sequence
for s in [ 2 * i + 3 for i in range((max_s - 2)/ 2) ]:
if rho_hat_s[s + 1] + rho_hat_s[s + 2] > rho_hat_s[s - 1] + rho_hat_s[s]:
rho_hat_s[s + 1] = 0.5 * (rho_hat_s[s - 1] + rho_hat_s[s])
rho_hat_s[s + 2] = rho_hat_s[s + 1]
ess = len(x) / (1.0 + 2 * sum(rho_hat_s))
return [mean, math.sqrt(var / ess), math.sqrt(var), ess]
# To generate our samples we'll use numpy's pseudo random number
# generator which needs to be seeded to achieve reproducible
# results
numpy.random.seed(seed=8675309)
# To ensure accurate results let's generate pretty large samples
N = 10000
# To see how results scale with dimension we'll consider
# behavior one thorugh ten dimensions
Ds = [ n + 1 for n in range(10) ]
idxs = [ idx for idx in range(Ds[-1]) for r in range(2) ]
plot_Ds = [ D + delta for D in Ds for delta in [-0.5, 0.5]]
############################################################
#
# How does the Random Walk Metropolis algorithm perform
# on a target distribution with a two-dimensional Gaussian
# density function?
#
############################################################
# Target density
def target_lpdf(x):
return - 0.5 * ( (x[0] - 1)**2 + (x[1] + 1)**2 ) \
- 0.5 * 2 * math.log(6.283185307179586)
# Tune proposal density
sigma = 1.4
# A place to store our Markov chain
# D columns for the parameters and one extra column
# for the Metropolis acceptance probability
D = 2
mcmc_samples = [[0] * (D + 1) for _ in range(N)]
# Randomly seed the initial state
mcmc_samples[0][0] = stats.norm.rvs(0, 3)
mcmc_samples[0][1] = stats.norm.rvs(0, 3)
mcmc_samples[0][2] = 1
for n in range(1, N):
x0 = [ mcmc_samples[n - 1][0], mcmc_samples[n - 1][1]]
xp = [ stats.norm.rvs(x0[0], sigma), stats.norm.rvs(x0[1], sigma) ]
# Compute acceptance probability
accept_prob = 1
if target_lpdf(xp) < target_lpdf(x0):
accept_prob = math.exp(target_lpdf(xp) - target_lpdf(x0))
mcmc_samples[n][D] = accept_prob
# Apply Metropolis correction
u = stats.uniform.rvs(0, 1)
if accept_prob > u:
mcmc_samples[n][0] = xp[0]
mcmc_samples[n][1] = xp[1]
else:
mcmc_samples[n][0] = x0[0]
mcmc_samples[n][1] = x0[1]
# Compute MCMC estimator statistics, leaving
# out the first 100 samples as warmup
compute_mcmc_stats([ s[0] for s in mcmc_samples[100:] ])
compute_mcmc_stats([ s[1] for s in mcmc_samples[100:] ])
# Plot convergence of MCMC estimators for each parameter
stride = 250
M = N / stride
iters = [ stride * (i + 1) for i in range(N / stride) ]
x1_mean = [0] * M
x1_se = [0] * M
x2_mean = [0] * M
x2_se = [0] * M
for m in range(M):
running_samples = [ s[0] for s in mcmc_samples[100:iters[m]] ]
mcmc_stats = compute_mcmc_stats(running_samples)
x1_mean[m] = mcmc_stats[0]
x1_se[m] = mcmc_stats[1]
running_samples = [ s[1] for s in mcmc_samples[100:iters[m]] ]
mcmc_stats = compute_mcmc_stats(running_samples)
x2_mean[m] = mcmc_stats[0]
x2_se[m] = mcmc_stats[1]
plot.fill_between(iters,
[ x1_mean[m] - 2 * x1_se[m] for m in range(M) ],
[ x1_mean[m] + 2 * x1_se[m] for m in range(M) ],
facecolor=light, color=light)
plot.plot(iters, x1_mean, color=dark)
plot.plot([iters[0], iters[-1]], [1, 1], color='grey', linestyle='--')
plot.gca().set_xlim([0, N])
plot.gca().set_xlabel("Iteration")
plot.gca().set_ylim([-2, 2])
plot.gca().set_ylabel("Monte Carlo Estimator")
plot.show()
plot.fill_between(iters,
[ x2_mean[m] - 2 * x2_se[m] for m in range(M) ],
[ x2_mean[m] + 2 * x2_se[m] for m in range(M) ],
facecolor=light, color=light)
plot.plot(iters, x2_mean, color=dark)
plot.plot([iters[0], iters[-1]], [-1, -1], color='grey', linestyle='--')
plot.gca().set_xlim([0, N])
plot.gca().set_xlabel("Iteration")
plot.gca().set_ylim([-2, 2])
plot.gca().set_ylabel("Monte Carlo Estimator")
plot.show()
############################################################
#
# How does the Random Walk Metropolis algorithm perform
# on a target distribution with a funnel density function?
#
############################################################
# Target density
def target_lpdf(x):
return - 0.5 * ( x[0]**2 + x[1]**2 + ( (x[2] - x[0]) / math.exp(x[1]) )**2 ) \
- 0.5 * 3 * math.log(6.283185307179586) - 0.5 * x[2]
# Tune proposal density
sigma = 1.4
# A place to store our Markov chain
# D columns for the parameters and one extra column
# for the Metropolis acceptance probability
D = 3
mcmc_samples = [[0] * (D + 1) for _ in range(N)]
# Randomly seed the initial state
mcmc_samples[0][0] = stats.norm.rvs(0, 3)
mcmc_samples[0][1] = stats.norm.rvs(0, 3)
mcmc_samples[0][2] = stats.norm.rvs(0, 3)
mcmc_samples[0][3] = 1
for n in range(1, N):
x0 = [ mcmc_samples[n - 1][0],
mcmc_samples[n - 1][1],
mcmc_samples[n - 1][2]]
xp = [ stats.norm.rvs(x0[0], sigma),
stats.norm.rvs(x0[1], sigma),
stats.norm.rvs(x0[2], sigma) ]
# Compute acceptance probability
accept_prob = 1
if target_lpdf(xp) < target_lpdf(x0):
accept_prob = math.exp(target_lpdf(xp) - target_lpdf(x0))
mcmc_samples[n][D] = accept_prob
# Apply Metropolis correction
u = stats.uniform.rvs(0, 1)
if accept_prob > u:
mcmc_samples[n][0] = xp[0]
mcmc_samples[n][1] = xp[1]
mcmc_samples[n][2] = xp[2]
else:
mcmc_samples[n][0] = x0[0]
mcmc_samples[n][1] = x0[1]
mcmc_samples[n][2] = x0[2]
# Compute MCMC estimator statistics, leaving
# out the first 100 samples as warmup
compute_mcmc_stats([ s[0] for s in mcmc_samples[100:] ])
compute_mcmc_stats([ s[1] for s in mcmc_samples[100:] ])
compute_mcmc_stats([ s[2] for s in mcmc_samples[100:] ])
# Plot convergence of MCMC estimators for each parameter
stride = 250
M = N / stride
iters = [ stride * (i + 1) for i in range(N / stride) ]
mu_mean = [0] * M
mu_se = [0] * M
log_tau_mean = [0] * M
log_tau_se = [0] * M
for m in range(M):
running_samples = [ s[0] for s in mcmc_samples[100:iters[m]] ]
mcmc_stats = compute_mcmc_stats(running_samples)
mu_mean[m] = mcmc_stats[0]
mu_se[m] = mcmc_stats[1]
running_samples = [ s[1] for s in mcmc_samples[100:iters[m]] ]
mcmc_stats = compute_mcmc_stats(running_samples)
log_tau_mean[m] = mcmc_stats[0]
log_tau_se[m] = mcmc_stats[1]
plot.fill_between(iters,
[ mu_mean[m] - 2 * mu_se[m] for m in range(M) ],
[ mu_mean[m] + 2 * mu_se[m] for m in range(M) ],
facecolor=light, color=light)
plot.plot(iters, mu_mean, color=dark)
plot.plot([iters[0], iters[-1]], [0, 0], color='grey', linestyle='--')
plot.gca().set_xlim([0, N])
plot.gca().set_xlabel("Iteration")
plot.gca().set_ylim([-1, 1])
plot.gca().set_ylabel("Monte Carlo Estimator")
plot.show()
plot.fill_between(iters,
[ log_tau_mean[m] - 2 * log_tau_se[m] for m in range(M) ],
[ log_tau_mean[m] + 2 * log_tau_se[m] for m in range(M) ],
facecolor=light, color=light)
plot.plot(iters, log_tau_mean, color=dark)
plot.plot([iters[0], iters[-1]], [0, 0], color='grey', linestyle='--')
plot.gca().set_xlim([0, N])
plot.gca().set_xlabel("Iteration")
plot.gca().set_ylim([-1, 8])
plot.gca().set_ylabel("Monte Carlo Estimator")
plot.show()
############################################################
#
# How does the effective sample size of a Random Walk
# Metropolis Markov chain vary with the dimension of
# the target distribution?
#
############################################################
def target_lpdf(x):
return - 0.5 * sum([ x_n**2 for x_n in x ]) \
- 0.5 * len(x) * math.log(6.283185307179586)
############################################################
# First let's use a constant Markov transition
############################################################
accept_prob_means = [0] * len(Ds)
accept_prob_ses = [0] * len(Ds)
ave_eff_sample_sizes = [0] * len(Ds)
# Tune proposal density
sigma = 1.4
for D in Ds:
# A place to store our Markov chain
# D columns for the parameters and one extra column
# for the Metropolis acceptance probability
mcmc_samples = [[0] * (D + 1) for _ in range(N)]
# Seeding the initial state with an exact sample
# from the target distribution ensures that we
# start in the typical set and avoid having to
# worry about warmup.
for d in range(D):
mcmc_samples[0][d] = stats.norm.rvs(0, 3)
mcmc_samples[0][D] = 1
for n in range(1, N):
x0 = [ mcmc_samples[n - 1][d] for d in range(D) ]
xp = [ stats.norm.rvs(x0[d], sigma) for d in range(D) ]
# Compute acceptance probability
accept_prob = 1
if target_lpdf(xp) < target_lpdf(x0):
accept_prob = math.exp(target_lpdf(xp) - target_lpdf(x0))
mcmc_samples[n][D] = accept_prob
# Apply Metropolis correction
u = stats.uniform.rvs(0, 1)
if accept_prob > u:
mcmc_samples[n][0:D] = xp
else:
mcmc_samples[n][0:D] = x0
# Estimate average acceptance probability
# Compute MCMC estimator statistics
mcmc_stats = compute_mcmc_stats([ s[D] for s in mcmc_samples])
accept_prob_means[D - 1] = mcmc_stats[0]
accept_prob_ses[D - 1] = mcmc_stats[1]
# Estimate effective sample size
eff_sample_sizes = [ compute_mcmc_stats([ s[d] for s in mcmc_samples])[3] \
for d in range(D) ]
ave_eff_sample_sizes[D - 1] = sum(eff_sample_sizes) / D
f, axarr = plot.subplots(1, 2)
axarr[0].set_title("")
axarr[0].fill_between(plot_Ds,
[ accept_prob_means[idx] - 2 * accept_prob_ses[idx] for idx in idxs ],
[ accept_prob_means[idx] + 2 * accept_prob_ses[idx] for idx in idxs ],
facecolor=dark, color=dark)
axarr[0].plot(plot_Ds, [ accept_prob_means[idx] for idx in idxs], color=dark_highlight)
axarr[0].set_xlim([Ds[0], Ds[-1]])
axarr[0].set_xlabel("Dimension")
axarr[0].set_ylim([0, 1])
axarr[0].set_ylabel("Average Acceptance Probability")
axarr[1].set_title("")
axarr[1].plot(plot_Ds, [ ave_eff_sample_sizes[idx] / N for idx in idxs],
color=dark_highlight)
axarr[1].set_xlim([Ds[0], Ds[-1]])
axarr[1].set_xlabel("Dimension")
axarr[1].set_ylim([0, 0.3])
axarr[1].set_ylabel("Average Effective Sample Size Per Iteration")
plot.show()
############################################################
# Now let's use an (approximately) optimally tuned Markov
# transition for each dimension
############################################################
accept_prob_means = [0] * len(Ds)
accept_prob_ses = [0] * len(Ds)
ave_eff_sample_sizes = [0] * len(Ds)
# Approximately optimal proposal tuning
opt_sigmas = [2.5, 1.75, 1.5, 1.2, 1.15, 1.0, 0.95, 0.85, 0.8, 0.75]
# Tune proposal density
sigma = 1.4
for D in Ds:
# A place to store our Markov chain
# D columns for the parameters and one extra column
# for the Metropolis acceptance probability
mcmc_samples = [[0] * (D + 1) for _ in range(N)]
# Seeding the initial state with an exact sample
# from the target distribution ensures that we
# start in the typical set and avoid having to
# worry about warmup.
for d in range(D):
mcmc_samples[0][d] = stats.norm.rvs(0, 3)
mcmc_samples[0][D] = 1
for n in range(1, N):
x0 = [ mcmc_samples[n - 1][d] for d in range(D) ]
xp = [ stats.norm.rvs(x0[d], opt_sigmas[D - 1]) for d in range(D) ]
# Compute acceptance probability
accept_prob = 1
if target_lpdf(xp) < target_lpdf(x0):
accept_prob = math.exp(target_lpdf(xp) - target_lpdf(x0))
mcmc_samples[n][D] = accept_prob
# Apply Metropolis correction
u = stats.uniform.rvs(0, 1)
if accept_prob > u:
mcmc_samples[n][0:D] = xp
else:
mcmc_samples[n][0:D] = x0
# Estimate average acceptance probability
# Compute MCMC estimator statistics
mcmc_stats = compute_mcmc_stats([ s[D] for s in mcmc_samples])
accept_prob_means[D - 1] = mcmc_stats[0]
accept_prob_ses[D - 1] = mcmc_stats[1]
# Estimate effective sample size
eff_sample_sizes = [ compute_mcmc_stats([ s[d] for s in mcmc_samples])[3] \
for d in range(D) ]
ave_eff_sample_sizes[D - 1] = sum(eff_sample_sizes) / D
f, axarr = plot.subplots(1, 2)
axarr[0].set_title("")
axarr[0].fill_between(plot_Ds,
[ accept_prob_means[idx] - 2 * accept_prob_ses[idx] for idx in idxs ],
[ accept_prob_means[idx] + 2 * accept_prob_ses[idx] for idx in idxs ],
facecolor=dark, color=dark)
axarr[0].plot(plot_Ds, [ accept_prob_means[idx] for idx in idxs], color=dark_highlight)
axarr[0].set_xlim([Ds[0], Ds[-1]])
axarr[0].set_xlabel("Dimension")
axarr[0].set_ylim([0, 1])
axarr[0].set_ylabel("Average Acceptance Probability")
axarr[1].set_title("")
axarr[1].plot(plot_Ds, [ ave_eff_sample_sizes[idx] / N for idx in idxs],
color=dark_highlight)
axarr[1].set_xlim([Ds[0], Ds[-1]])
axarr[1].set_xlabel("Dimension")
axarr[1].set_ylim([0, 0.3])
axarr[1].set_ylabel("Average Effective Sample Size Per Iteration")
plot.show()
|
[
"matplotlib.pyplot.gca",
"matplotlib.pyplot.plot",
"math.sqrt",
"scipy.stats.norm.rvs",
"math.log",
"numpy.random.seed",
"math.exp",
"scipy.stats.uniform.rvs",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] |
[((2453, 2484), 'numpy.random.seed', 'numpy.random.seed', ([], {'seed': '(8675309)'}), '(seed=8675309)\n', (2470, 2484), False, 'import numpy\n'), ((3493, 3513), 'scipy.stats.norm.rvs', 'stats.norm.rvs', (['(0)', '(3)'], {}), '(0, 3)\n', (3507, 3513), True, 'import scipy.stats as stats\n'), ((3535, 3555), 'scipy.stats.norm.rvs', 'stats.norm.rvs', (['(0)', '(3)'], {}), '(0, 3)\n', (3549, 3555), True, 'import scipy.stats as stats\n'), ((5127, 5164), 'matplotlib.pyplot.plot', 'plot.plot', (['iters', 'x1_mean'], {'color': 'dark'}), '(iters, x1_mean, color=dark)\n', (5136, 5164), True, 'import matplotlib.pyplot as plot\n'), ((5165, 5235), 'matplotlib.pyplot.plot', 'plot.plot', (['[iters[0], iters[-1]]', '[1, 1]'], {'color': '"""grey"""', 'linestyle': '"""--"""'}), "([iters[0], iters[-1]], [1, 1], color='grey', linestyle='--')\n", (5174, 5235), True, 'import matplotlib.pyplot as plot\n'), ((5377, 5388), 'matplotlib.pyplot.show', 'plot.show', ([], {}), '()\n', (5386, 5388), True, 'import matplotlib.pyplot as plot\n'), ((5598, 5635), 'matplotlib.pyplot.plot', 'plot.plot', (['iters', 'x2_mean'], {'color': 'dark'}), '(iters, x2_mean, color=dark)\n', (5607, 5635), True, 'import matplotlib.pyplot as plot\n'), ((5636, 5708), 'matplotlib.pyplot.plot', 'plot.plot', (['[iters[0], iters[-1]]', '[-1, -1]'], {'color': '"""grey"""', 'linestyle': '"""--"""'}), "([iters[0], iters[-1]], [-1, -1], color='grey', linestyle='--')\n", (5645, 5708), True, 'import matplotlib.pyplot as plot\n'), ((5850, 5861), 'matplotlib.pyplot.show', 'plot.show', ([], {}), '()\n', (5859, 5861), True, 'import matplotlib.pyplot as plot\n'), ((6566, 6586), 'scipy.stats.norm.rvs', 'stats.norm.rvs', (['(0)', '(3)'], {}), '(0, 3)\n', (6580, 6586), True, 'import scipy.stats as stats\n'), ((6608, 6628), 'scipy.stats.norm.rvs', 'stats.norm.rvs', (['(0)', '(3)'], {}), '(0, 3)\n', (6622, 6628), True, 'import scipy.stats as stats\n'), ((6650, 6670), 'scipy.stats.norm.rvs', 'stats.norm.rvs', (['(0)', '(3)'], {}), '(0, 3)\n', (6664, 6670), True, 'import scipy.stats as stats\n'), ((8474, 8511), 'matplotlib.pyplot.plot', 'plot.plot', (['iters', 'mu_mean'], {'color': 'dark'}), '(iters, mu_mean, color=dark)\n', (8483, 8511), True, 'import matplotlib.pyplot as plot\n'), ((8512, 8582), 'matplotlib.pyplot.plot', 'plot.plot', (['[iters[0], iters[-1]]', '[0, 0]'], {'color': '"""grey"""', 'linestyle': '"""--"""'}), "([iters[0], iters[-1]], [0, 0], color='grey', linestyle='--')\n", (8521, 8582), True, 'import matplotlib.pyplot as plot\n'), ((8724, 8735), 'matplotlib.pyplot.show', 'plot.show', ([], {}), '()\n', (8733, 8735), True, 'import matplotlib.pyplot as plot\n'), ((8965, 9007), 'matplotlib.pyplot.plot', 'plot.plot', (['iters', 'log_tau_mean'], {'color': 'dark'}), '(iters, log_tau_mean, color=dark)\n', (8974, 9007), True, 'import matplotlib.pyplot as plot\n'), ((9008, 9078), 'matplotlib.pyplot.plot', 'plot.plot', (['[iters[0], iters[-1]]', '[0, 0]'], {'color': '"""grey"""', 'linestyle': '"""--"""'}), "([iters[0], iters[-1]], [0, 0], color='grey', linestyle='--')\n", (9017, 9078), True, 'import matplotlib.pyplot as plot\n'), ((9220, 9231), 'matplotlib.pyplot.show', 'plot.show', ([], {}), '()\n', (9229, 9231), True, 'import matplotlib.pyplot as plot\n'), ((11377, 11396), 'matplotlib.pyplot.subplots', 'plot.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (11390, 11396), True, 'import matplotlib.pyplot as plot\n'), ((12226, 12237), 'matplotlib.pyplot.show', 'plot.show', ([], {}), '()\n', (12235, 12237), True, 'import matplotlib.pyplot as plot\n'), ((14164, 14183), 'matplotlib.pyplot.subplots', 'plot.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (14177, 14183), True, 'import matplotlib.pyplot as plot\n'), ((15013, 15024), 'matplotlib.pyplot.show', 'plot.show', ([], {}), '()\n', (15022, 15024), True, 'import matplotlib.pyplot as plot\n'), ((3961, 3984), 'scipy.stats.uniform.rvs', 'stats.uniform.rvs', (['(0)', '(1)'], {}), '(0, 1)\n', (3978, 3984), True, 'import scipy.stats as stats\n'), ((7169, 7192), 'scipy.stats.uniform.rvs', 'stats.uniform.rvs', (['(0)', '(1)'], {}), '(0, 1)\n', (7186, 7192), True, 'import scipy.stats as stats\n'), ((2273, 2293), 'math.sqrt', 'math.sqrt', (['(var / ess)'], {}), '(var / ess)\n', (2282, 2293), False, 'import math\n'), ((2295, 2309), 'math.sqrt', 'math.sqrt', (['var'], {}), '(var)\n', (2304, 2309), False, 'import math\n'), ((3668, 3696), 'scipy.stats.norm.rvs', 'stats.norm.rvs', (['x0[0]', 'sigma'], {}), '(x0[0], sigma)\n', (3682, 3696), True, 'import scipy.stats as stats\n'), ((3698, 3726), 'scipy.stats.norm.rvs', 'stats.norm.rvs', (['x0[1]', 'sigma'], {}), '(x0[1], sigma)\n', (3712, 3726), True, 'import scipy.stats as stats\n'), ((5237, 5247), 'matplotlib.pyplot.gca', 'plot.gca', ([], {}), '()\n', (5245, 5247), True, 'import matplotlib.pyplot as plot\n'), ((5265, 5275), 'matplotlib.pyplot.gca', 'plot.gca', ([], {}), '()\n', (5273, 5275), True, 'import matplotlib.pyplot as plot\n'), ((5300, 5310), 'matplotlib.pyplot.gca', 'plot.gca', ([], {}), '()\n', (5308, 5310), True, 'import matplotlib.pyplot as plot\n'), ((5329, 5339), 'matplotlib.pyplot.gca', 'plot.gca', ([], {}), '()\n', (5337, 5339), True, 'import matplotlib.pyplot as plot\n'), ((5710, 5720), 'matplotlib.pyplot.gca', 'plot.gca', ([], {}), '()\n', (5718, 5720), True, 'import matplotlib.pyplot as plot\n'), ((5738, 5748), 'matplotlib.pyplot.gca', 'plot.gca', ([], {}), '()\n', (5746, 5748), True, 'import matplotlib.pyplot as plot\n'), ((5773, 5783), 'matplotlib.pyplot.gca', 'plot.gca', ([], {}), '()\n', (5781, 5783), True, 'import matplotlib.pyplot as plot\n'), ((5802, 5812), 'matplotlib.pyplot.gca', 'plot.gca', ([], {}), '()\n', (5810, 5812), True, 'import matplotlib.pyplot as plot\n'), ((6827, 6855), 'scipy.stats.norm.rvs', 'stats.norm.rvs', (['x0[0]', 'sigma'], {}), '(x0[0], sigma)\n', (6841, 6855), True, 'import scipy.stats as stats\n'), ((6867, 6895), 'scipy.stats.norm.rvs', 'stats.norm.rvs', (['x0[1]', 'sigma'], {}), '(x0[1], sigma)\n', (6881, 6895), True, 'import scipy.stats as stats\n'), ((6906, 6934), 'scipy.stats.norm.rvs', 'stats.norm.rvs', (['x0[2]', 'sigma'], {}), '(x0[2], sigma)\n', (6920, 6934), True, 'import scipy.stats as stats\n'), ((8584, 8594), 'matplotlib.pyplot.gca', 'plot.gca', ([], {}), '()\n', (8592, 8594), True, 'import matplotlib.pyplot as plot\n'), ((8612, 8622), 'matplotlib.pyplot.gca', 'plot.gca', ([], {}), '()\n', (8620, 8622), True, 'import matplotlib.pyplot as plot\n'), ((8647, 8657), 'matplotlib.pyplot.gca', 'plot.gca', ([], {}), '()\n', (8655, 8657), True, 'import matplotlib.pyplot as plot\n'), ((8676, 8686), 'matplotlib.pyplot.gca', 'plot.gca', ([], {}), '()\n', (8684, 8686), True, 'import matplotlib.pyplot as plot\n'), ((9080, 9090), 'matplotlib.pyplot.gca', 'plot.gca', ([], {}), '()\n', (9088, 9090), True, 'import matplotlib.pyplot as plot\n'), ((9108, 9118), 'matplotlib.pyplot.gca', 'plot.gca', ([], {}), '()\n', (9116, 9118), True, 'import matplotlib.pyplot as plot\n'), ((9143, 9153), 'matplotlib.pyplot.gca', 'plot.gca', ([], {}), '()\n', (9151, 9153), True, 'import matplotlib.pyplot as plot\n'), ((9172, 9182), 'matplotlib.pyplot.gca', 'plot.gca', ([], {}), '()\n', (9180, 9182), True, 'import matplotlib.pyplot as plot\n'), ((10352, 10372), 'scipy.stats.norm.rvs', 'stats.norm.rvs', (['(0)', '(3)'], {}), '(0, 3)\n', (10366, 10372), True, 'import scipy.stats as stats\n'), ((10786, 10809), 'scipy.stats.uniform.rvs', 'stats.uniform.rvs', (['(0)', '(1)'], {}), '(0, 1)\n', (10803, 10809), True, 'import scipy.stats as stats\n'), ((13127, 13147), 'scipy.stats.norm.rvs', 'stats.norm.rvs', (['(0)', '(3)'], {}), '(0, 3)\n', (13141, 13147), True, 'import scipy.stats as stats\n'), ((13573, 13596), 'scipy.stats.uniform.rvs', 'stats.uniform.rvs', (['(0)', '(1)'], {}), '(0, 1)\n', (13590, 13596), True, 'import scipy.stats as stats\n'), ((3184, 3211), 'math.log', 'math.log', (['(6.283185307179586)'], {}), '(6.283185307179586)\n', (3192, 3211), False, 'import math\n'), ((9588, 9615), 'math.log', 'math.log', (['(6.283185307179586)'], {}), '(6.283185307179586)\n', (9596, 9615), False, 'import math\n'), ((10491, 10519), 'scipy.stats.norm.rvs', 'stats.norm.rvs', (['x0[d]', 'sigma'], {}), '(x0[d], sigma)\n', (10505, 10519), True, 'import scipy.stats as stats\n'), ((13266, 13306), 'scipy.stats.norm.rvs', 'stats.norm.rvs', (['x0[d]', 'opt_sigmas[D - 1]'], {}), '(x0[d], opt_sigmas[D - 1])\n', (13280, 13306), True, 'import scipy.stats as stats\n'), ((6244, 6271), 'math.log', 'math.log', (['(6.283185307179586)'], {}), '(6.283185307179586)\n', (6252, 6271), False, 'import math\n'), ((6199, 6213), 'math.exp', 'math.exp', (['x[1]'], {}), '(x[1])\n', (6207, 6213), False, 'import math\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.