code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
#
# Copyright (c) 2021 The GPflux Contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import numpy as np
import pytest
import tensorflow as tf
import tensorflow_probability as tfp
from gpflow.kullback_leiblers import gauss_kl
from gpflux.encoders import DirectlyParameterizedNormalDiag
from gpflux.layers import LatentVariableLayer, LayerWithObservations, TrackableLayer
tf.keras.backend.set_floatx("float64")
############
# Utilities
############
def _zero_one_normal_prior(w_dim):
""" N(0, I) prior """
return tfp.distributions.MultivariateNormalDiag(loc=np.zeros(w_dim), scale_diag=np.ones(w_dim))
def get_distributions_with_w_dim():
distributions = []
for d in [1, 5]:
mean = np.zeros(d)
scale_tri_l = np.eye(d)
mvn = tfp.distributions.MultivariateNormalTriL(mean, scale_tri_l)
std = np.ones(d)
mvn_diag = tfp.distributions.MultivariateNormalDiag(mean, std)
distributions.append((mvn, d))
distributions.append((mvn_diag, d))
return distributions
############
# Tests
############
@pytest.mark.parametrize("distribution, w_dim", get_distributions_with_w_dim())
def test_local_kls(distribution, w_dim):
lv = LatentVariableLayer(encoder=None, prior=distribution)
# test kl is 0 when posteriors == priors
posterior = distribution
assert lv._local_kls(posterior) == 0
# test kl > 0 when posteriors != priors
batch_size = 10
params = distribution.parameters
posterior_params = {
k: [v + 0.5 for _ in range(batch_size)]
for k, v in params.items()
if isinstance(v, np.ndarray)
}
posterior = lv.distribution_class(**posterior_params)
local_kls = lv._local_kls(posterior)
assert np.all(local_kls > 0)
assert local_kls.shape == (batch_size,)
@pytest.mark.parametrize("w_dim", [1, 5])
def test_local_kl_gpflow_consistency(w_dim):
num_data = 400
means = np.random.randn(num_data, w_dim)
encoder = DirectlyParameterizedNormalDiag(num_data, w_dim, means)
lv = LatentVariableLayer(encoder=encoder, prior=_zero_one_normal_prior(w_dim))
posteriors = lv._inference_posteriors(
[np.random.randn(num_data, 3), np.random.randn(num_data, 2)]
)
q_mu = posteriors.parameters["loc"]
q_sqrt = posteriors.parameters["scale_diag"]
gpflow_local_kls = gauss_kl(q_mu, q_sqrt)
tfp_local_kls = tf.reduce_sum(lv._local_kls(posteriors))
np.testing.assert_allclose(tfp_local_kls, gpflow_local_kls, rtol=1e-10)
class ArrayMatcher:
def __init__(self, expected):
self.expected = expected
def __eq__(self, actual):
return np.allclose(actual, self.expected, equal_nan=True)
@pytest.mark.parametrize("w_dim", [1, 5])
def test_latent_variable_layer_losses(mocker, w_dim):
num_data, x_dim, y_dim = 43, 3, 1
prior_shape = (w_dim,)
posteriors_shape = (num_data, w_dim)
prior = tfp.distributions.MultivariateNormalDiag(
loc=np.random.randn(*prior_shape),
scale_diag=np.random.randn(*prior_shape) ** 2,
)
posteriors = tfp.distributions.MultivariateNormalDiag(
loc=np.random.randn(*posteriors_shape),
scale_diag=np.random.randn(*posteriors_shape) ** 2,
)
encoder = mocker.Mock(return_value=(posteriors.loc, posteriors.scale.diag))
lv = LatentVariableLayer(encoder=encoder, prior=prior)
inputs = np.full((num_data, x_dim), np.nan)
targets = np.full((num_data, y_dim), np.nan)
observations = [inputs, targets]
encoder_inputs = np.concatenate(observations, axis=-1)
_ = lv(inputs)
encoder.assert_not_called()
assert lv.losses == [0.0]
_ = lv(inputs, observations=observations, training=True)
# assert_called_once_with uses == for comparison which fails on arrays
encoder.assert_called_once_with(ArrayMatcher(encoder_inputs), training=True)
expected_loss = [tf.reduce_mean(posteriors.kl_divergence(prior))]
np.testing.assert_equal(lv.losses, expected_loss) # also checks shapes match
@pytest.mark.parametrize("w_dim", [1, 5])
@pytest.mark.parametrize("seed2", [None, 42])
def test_latent_variable_layer_samples(mocker, test_data, w_dim, seed2):
seed = 123
inputs, targets = test_data
num_data, x_dim = inputs.shape
prior_shape = (w_dim,)
posteriors_shape = (num_data, w_dim)
prior = tfp.distributions.MultivariateNormalDiag(
loc=np.random.randn(*prior_shape),
scale_diag=np.random.randn(*prior_shape) ** 2,
)
posteriors = tfp.distributions.MultivariateNormalDiag(
loc=np.random.randn(*posteriors_shape),
scale_diag=np.random.randn(*posteriors_shape) ** 2,
)
encoder = mocker.Mock(return_value=(posteriors.loc, posteriors.scale.diag))
lv = LatentVariableLayer(prior=prior, encoder=encoder)
tf.random.set_seed(seed)
sample_prior = lv(inputs, seed=seed2)
tf.random.set_seed(seed)
prior_expected = np.concatenate([inputs, prior.sample(num_data, seed=seed2)], axis=-1)
np.testing.assert_array_equal(sample_prior, prior_expected)
tf.random.set_seed(seed)
sample_posterior = lv(inputs, observations=[inputs, targets], training=True, seed=seed2)
tf.random.set_seed(seed)
posterior_expected = np.concatenate([inputs, posteriors.sample(seed=seed2)], axis=-1)
np.testing.assert_array_equal(sample_posterior, posterior_expected)
def test_no_tensorflow_metaclass_overwritten():
"""
LayerWithObservations is a subclass of tf.keras.layers.Layer (via TrackableLayer);
this test ensures that TrackableLayer does not have a metaclass, and hence by adding
the ABCMeta to LayerWithObservations we are not accidentally removing some required
TensorFlow magic metaclass.
"""
assert LayerWithObservations.__bases__ == (TrackableLayer,)
assert type(TrackableLayer) is type
assert type(LayerWithObservations) is abc.ABCMeta
|
[
"numpy.testing.assert_equal",
"tensorflow_probability.distributions.MultivariateNormalDiag",
"tensorflow_probability.distributions.MultivariateNormalTriL",
"numpy.testing.assert_allclose",
"numpy.concatenate",
"numpy.testing.assert_array_equal",
"numpy.eye",
"numpy.allclose",
"gpflux.encoders.DirectlyParameterizedNormalDiag",
"numpy.ones",
"tensorflow.keras.backend.set_floatx",
"numpy.random.randn",
"gpflux.layers.LatentVariableLayer",
"tensorflow.random.set_seed",
"gpflow.kullback_leiblers.gauss_kl",
"pytest.mark.parametrize",
"numpy.zeros",
"numpy.full",
"numpy.all"
] |
[((897, 935), 'tensorflow.keras.backend.set_floatx', 'tf.keras.backend.set_floatx', (['"""float64"""'], {}), "('float64')\n", (924, 935), True, 'import tensorflow as tf\n'), ((2330, 2370), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""w_dim"""', '[1, 5]'], {}), "('w_dim', [1, 5])\n", (2353, 2370), False, 'import pytest\n'), ((3216, 3256), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""w_dim"""', '[1, 5]'], {}), "('w_dim', [1, 5])\n", (3239, 3256), False, 'import pytest\n'), ((4542, 4582), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""w_dim"""', '[1, 5]'], {}), "('w_dim', [1, 5])\n", (4565, 4582), False, 'import pytest\n'), ((4584, 4628), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""seed2"""', '[None, 42]'], {}), "('seed2', [None, 42])\n", (4607, 4628), False, 'import pytest\n'), ((1728, 1781), 'gpflux.layers.LatentVariableLayer', 'LatentVariableLayer', ([], {'encoder': 'None', 'prior': 'distribution'}), '(encoder=None, prior=distribution)\n', (1747, 1781), False, 'from gpflux.layers import LatentVariableLayer, LayerWithObservations, TrackableLayer\n'), ((2261, 2282), 'numpy.all', 'np.all', (['(local_kls > 0)'], {}), '(local_kls > 0)\n', (2267, 2282), True, 'import numpy as np\n'), ((2447, 2479), 'numpy.random.randn', 'np.random.randn', (['num_data', 'w_dim'], {}), '(num_data, w_dim)\n', (2462, 2479), True, 'import numpy as np\n'), ((2494, 2549), 'gpflux.encoders.DirectlyParameterizedNormalDiag', 'DirectlyParameterizedNormalDiag', (['num_data', 'w_dim', 'means'], {}), '(num_data, w_dim, means)\n', (2525, 2549), False, 'from gpflux.encoders import DirectlyParameterizedNormalDiag\n'), ((2866, 2888), 'gpflow.kullback_leiblers.gauss_kl', 'gauss_kl', (['q_mu', 'q_sqrt'], {}), '(q_mu, q_sqrt)\n', (2874, 2888), False, 'from gpflow.kullback_leiblers import gauss_kl\n'), ((2955, 3026), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['tfp_local_kls', 'gpflow_local_kls'], {'rtol': '(1e-10)'}), '(tfp_local_kls, gpflow_local_kls, rtol=1e-10)\n', (2981, 3026), True, 'import numpy as np\n'), ((3841, 3890), 'gpflux.layers.LatentVariableLayer', 'LatentVariableLayer', ([], {'encoder': 'encoder', 'prior': 'prior'}), '(encoder=encoder, prior=prior)\n', (3860, 3890), False, 'from gpflux.layers import LatentVariableLayer, LayerWithObservations, TrackableLayer\n'), ((3905, 3939), 'numpy.full', 'np.full', (['(num_data, x_dim)', 'np.nan'], {}), '((num_data, x_dim), np.nan)\n', (3912, 3939), True, 'import numpy as np\n'), ((3954, 3988), 'numpy.full', 'np.full', (['(num_data, y_dim)', 'np.nan'], {}), '((num_data, y_dim), np.nan)\n', (3961, 3988), True, 'import numpy as np\n'), ((4047, 4084), 'numpy.concatenate', 'np.concatenate', (['observations'], {'axis': '(-1)'}), '(observations, axis=-1)\n', (4061, 4084), True, 'import numpy as np\n'), ((4461, 4510), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['lv.losses', 'expected_loss'], {}), '(lv.losses, expected_loss)\n', (4484, 4510), True, 'import numpy as np\n'), ((5277, 5326), 'gpflux.layers.LatentVariableLayer', 'LatentVariableLayer', ([], {'prior': 'prior', 'encoder': 'encoder'}), '(prior=prior, encoder=encoder)\n', (5296, 5326), False, 'from gpflux.layers import LatentVariableLayer, LayerWithObservations, TrackableLayer\n'), ((5332, 5356), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['seed'], {}), '(seed)\n', (5350, 5356), True, 'import tensorflow as tf\n'), ((5403, 5427), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['seed'], {}), '(seed)\n', (5421, 5427), True, 'import tensorflow as tf\n'), ((5523, 5582), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['sample_prior', 'prior_expected'], {}), '(sample_prior, prior_expected)\n', (5552, 5582), True, 'import numpy as np\n'), ((5588, 5612), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['seed'], {}), '(seed)\n', (5606, 5612), True, 'import tensorflow as tf\n'), ((5710, 5734), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['seed'], {}), '(seed)\n', (5728, 5734), True, 'import tensorflow as tf\n'), ((5829, 5896), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['sample_posterior', 'posterior_expected'], {}), '(sample_posterior, posterior_expected)\n', (5858, 5896), True, 'import numpy as np\n'), ((1236, 1247), 'numpy.zeros', 'np.zeros', (['d'], {}), '(d)\n', (1244, 1247), True, 'import numpy as np\n'), ((1270, 1279), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (1276, 1279), True, 'import numpy as np\n'), ((1294, 1353), 'tensorflow_probability.distributions.MultivariateNormalTriL', 'tfp.distributions.MultivariateNormalTriL', (['mean', 'scale_tri_l'], {}), '(mean, scale_tri_l)\n', (1334, 1353), True, 'import tensorflow_probability as tfp\n'), ((1369, 1379), 'numpy.ones', 'np.ones', (['d'], {}), '(d)\n', (1376, 1379), True, 'import numpy as np\n'), ((1399, 1450), 'tensorflow_probability.distributions.MultivariateNormalDiag', 'tfp.distributions.MultivariateNormalDiag', (['mean', 'std'], {}), '(mean, std)\n', (1439, 1450), True, 'import tensorflow_probability as tfp\n'), ((3162, 3212), 'numpy.allclose', 'np.allclose', (['actual', 'self.expected'], {'equal_nan': '(True)'}), '(actual, self.expected, equal_nan=True)\n', (3173, 3212), True, 'import numpy as np\n'), ((1094, 1109), 'numpy.zeros', 'np.zeros', (['w_dim'], {}), '(w_dim)\n', (1102, 1109), True, 'import numpy as np\n'), ((1122, 1136), 'numpy.ones', 'np.ones', (['w_dim'], {}), '(w_dim)\n', (1129, 1136), True, 'import numpy as np\n'), ((2686, 2714), 'numpy.random.randn', 'np.random.randn', (['num_data', '(3)'], {}), '(num_data, 3)\n', (2701, 2714), True, 'import numpy as np\n'), ((2716, 2744), 'numpy.random.randn', 'np.random.randn', (['num_data', '(2)'], {}), '(num_data, 2)\n', (2731, 2744), True, 'import numpy as np\n'), ((3485, 3514), 'numpy.random.randn', 'np.random.randn', (['*prior_shape'], {}), '(*prior_shape)\n', (3500, 3514), True, 'import numpy as np\n'), ((3648, 3682), 'numpy.random.randn', 'np.random.randn', (['*posteriors_shape'], {}), '(*posteriors_shape)\n', (3663, 3682), True, 'import numpy as np\n'), ((4921, 4950), 'numpy.random.randn', 'np.random.randn', (['*prior_shape'], {}), '(*prior_shape)\n', (4936, 4950), True, 'import numpy as np\n'), ((5084, 5118), 'numpy.random.randn', 'np.random.randn', (['*posteriors_shape'], {}), '(*posteriors_shape)\n', (5099, 5118), True, 'import numpy as np\n'), ((3535, 3564), 'numpy.random.randn', 'np.random.randn', (['*prior_shape'], {}), '(*prior_shape)\n', (3550, 3564), True, 'import numpy as np\n'), ((3703, 3737), 'numpy.random.randn', 'np.random.randn', (['*posteriors_shape'], {}), '(*posteriors_shape)\n', (3718, 3737), True, 'import numpy as np\n'), ((4971, 5000), 'numpy.random.randn', 'np.random.randn', (['*prior_shape'], {}), '(*prior_shape)\n', (4986, 5000), True, 'import numpy as np\n'), ((5139, 5173), 'numpy.random.randn', 'np.random.randn', (['*posteriors_shape'], {}), '(*posteriors_shape)\n', (5154, 5173), True, 'import numpy as np\n')]
|
"""
This script will modulate the blinky lights using the following algorithm:
1) uses user-provided location to obtain row of pixel data from bathy image
2) samples a 'number of LEDs' number of pixels from that row
3) shifts the sampled row data to center it at the location specified by user
4) displays resulting pixels on Blinky Tape
5) shifts next row by a given latitude, also specified by user
6) sleeps for user-specified period of time
Uses the following arguments:
-l/--location: tuple
Location of the user in tuple(lat, lon). This represents the center of the LED strip. Defaults to (0, 0)
-u/--update-interval: int
Update interval of the script, in minutes. Defaults to 10.
-p/--port: str
Serial port of the BlinkyLight (e.g., 'ttyAMA0', 'COM3'). Defaults to 'COM5'.
-d/--delta_latitude: int
Vertical change in latitude every update rate. May be 0, but this will result in a never-changing LEDs.
-i/--image: str
Name of the PNG image that contains the color coded pathymetric data.
The file current named mapserv.png was obtained using the following API:
https://www.gebco.net/data_and_products/gebco_web_services/web_map_service/mapserv?request=getmap&service=wms&BBOX=-90,-180,90,180&format=image/png&height=600&width=1200&crs=EPSG:4326&layers=GEBCO_LATEST_SUB_ICE_TOPO&version=1.3.0
In lieu of providing command line arguments, you may alternatively edit the defaults in bath_config.json.
NOTE: runs via:
runfile('/BlinkyTape_Python/bathymetry_blink/bathymetry_blink.py', wdir='/BlinkyTape_Python/')
(C) 2021 <NAME> (https://joeycodes.dev)
MIT Licensed
"""
import optparse
import json
from blinkytape import BlinkyTape
from time import sleep
from PIL import Image
import numpy as np
import sys
MAX_ERRORS = 3
num_errors = 0
# Obtain default parameters
with open("./bathymetry_blink/bathy_config.json") as f:
config = json.load(f)
# Default Blinky Tape port on Raspberry Pi is /dev/ttyACM0
parser = optparse.OptionParser()
parser.add_option("-p", "--port", dest="portname",
help="serial port (ex: /dev/ttyACM0)", default=config["port"])
parser.add_option("-l", "--location", dest="location",
help="Location of the center of the LED strip (ex: 70,-110)", default=config["location"])
parser.add_option("-u", "--update-rate", dest="update_rate",
help="How often to update elevation profile (mins) (ex: 5)", default=config["update_rate"])
parser.add_option("-d", "--delta-latitude", dest="delta_latitude",
help="Change in latitude during update (ex: 5)", default=config["delta_latitude"])
parser.add_option("-n", "--num-leds", dest="num_leds",
help="Number of LEDs in strip (ex: 60)", default=config["num_leds"])
parser.add_option("-i", "--image", dest="image_name",
help="Name of the map/bathymetry image (ex: ./mapserv.png)", default=config["image"])
(options, args) = parser.parse_args()
if args:
print("Unknown parameters: " + args)
# grab the values provided by user (or defaults)
port = options.portname
loc = options.location
rate = options.update_rate
delta = options.delta_latitude
n_leds = options.num_leds
i_name = options.image_name
# Some visual indication that it works, for headless setups (green tape)
bt = BlinkyTape(port, n_leds)
bt.displayColor(0, 100, 0)
bt.show()
sleep(2)
while True:
try:
# first, load image
im = Image.open(i_name) # Can be many different formats.
cols, rows = im.size
a = np.asarray(im) # of shape (rows, cols, channels)
# map loc latitude to 0-based index
latitude_index = min(rows - 1, max(0, (int)(((loc[0] - -90) / (90 - -90)) * (rows - 0) + 0)))
longitude_index = min(cols - 1, max(0, (int)(((loc[1] - -180) / (180 - -180)) * (cols - 0) + 0)))
# update the location of the next row of elevation data to take
loc[0] += delta
loc[0] = ((loc[0] + 90) % 180) - 90 # wraps to next pole if overflow
print("Lat index: " + str(latitude_index))
print("Lon index: " + str(longitude_index))
print("Next latitude: " + str(loc[0]))
# grab the applicable pixel indices
indices = [(int)(x*(cols/n_leds)) for x in range(n_leds)]
# sample that row of pixel data
output_pixels = np.take(a[latitude_index], indices, axis=0)
# rotate the row to center around the specified longitude
output_pixels = np.roll(output_pixels, longitude_index, axis=0)
# send all pixel data to bt
for pixel in output_pixels:
print("Sending r: {}, g: {}, b: {}".format(*pixel))
bt.sendPixel(*pixel)
# finally, show the image
bt.show()
# delete variables for memory management
del a
del im
# Tape resets to stored pattern after a few seconds of inactivity
sleep(rate * 60) # Wait specified number of minutes
# sleep(10) # Wait specified number of minutes
except KeyboardInterrupt:
print("Keyboard interrupt, ending program.")
sys.exit()
except RuntimeError as e:
print("Encountered runtime error: " + e.args[0])
# flush any incomplete data
bt.show()
num_errors += 1
if num_errors > MAX_ERRORS:
sys.exit("Error count exceeds that allowed.")
|
[
"PIL.Image.open",
"numpy.roll",
"numpy.asarray",
"optparse.OptionParser",
"time.sleep",
"numpy.take",
"blinkytape.BlinkyTape",
"sys.exit",
"json.load"
] |
[((1945, 1968), 'optparse.OptionParser', 'optparse.OptionParser', ([], {}), '()\n', (1966, 1968), False, 'import optparse\n'), ((3304, 3328), 'blinkytape.BlinkyTape', 'BlinkyTape', (['port', 'n_leds'], {}), '(port, n_leds)\n', (3314, 3328), False, 'from blinkytape import BlinkyTape\n'), ((3366, 3374), 'time.sleep', 'sleep', (['(2)'], {}), '(2)\n', (3371, 3374), False, 'from time import sleep\n'), ((1863, 1875), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1872, 1875), False, 'import json\n'), ((3438, 3456), 'PIL.Image.open', 'Image.open', (['i_name'], {}), '(i_name)\n', (3448, 3456), False, 'from PIL import Image\n'), ((3532, 3546), 'numpy.asarray', 'np.asarray', (['im'], {}), '(im)\n', (3542, 3546), True, 'import numpy as np\n'), ((4371, 4414), 'numpy.take', 'np.take', (['a[latitude_index]', 'indices'], {'axis': '(0)'}), '(a[latitude_index], indices, axis=0)\n', (4378, 4414), True, 'import numpy as np\n'), ((4514, 4561), 'numpy.roll', 'np.roll', (['output_pixels', 'longitude_index'], {'axis': '(0)'}), '(output_pixels, longitude_index, axis=0)\n', (4521, 4561), True, 'import numpy as np\n'), ((4971, 4987), 'time.sleep', 'sleep', (['(rate * 60)'], {}), '(rate * 60)\n', (4976, 4987), False, 'from time import sleep\n'), ((5172, 5182), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5180, 5182), False, 'import sys\n'), ((5423, 5468), 'sys.exit', 'sys.exit', (['"""Error count exceeds that allowed."""'], {}), "('Error count exceeds that allowed.')\n", (5431, 5468), False, 'import sys\n')]
|
"""
Basic usage
===========
This example presents the basic usage of brokenaxes
"""
import matplotlib.pyplot as plt
from brokenaxes import brokenaxes
import numpy as np
fig = plt.figure(figsize=(5,2))
bax = brokenaxes(xlims=((0, .1), (.4, .7)), ylims=((-1, .7), (.79, 1)), hspace=.05)
x = np.linspace(0, 1, 100)
bax.plot(x, np.sin(10 * x), label='sin')
bax.plot(x, np.cos(10 * x), label='cos')
bax.legend(loc=3)
bax.set_xlabel('time')
bax.set_ylabel('value')
|
[
"matplotlib.pyplot.figure",
"brokenaxes.brokenaxes",
"numpy.linspace",
"numpy.cos",
"numpy.sin"
] |
[((180, 206), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 2)'}), '(figsize=(5, 2))\n', (190, 206), True, 'import matplotlib.pyplot as plt\n'), ((212, 299), 'brokenaxes.brokenaxes', 'brokenaxes', ([], {'xlims': '((0, 0.1), (0.4, 0.7))', 'ylims': '((-1, 0.7), (0.79, 1))', 'hspace': '(0.05)'}), '(xlims=((0, 0.1), (0.4, 0.7)), ylims=((-1, 0.7), (0.79, 1)),\n hspace=0.05)\n', (222, 299), False, 'from brokenaxes import brokenaxes\n'), ((294, 316), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (305, 316), True, 'import numpy as np\n'), ((329, 343), 'numpy.sin', 'np.sin', (['(10 * x)'], {}), '(10 * x)\n', (335, 343), True, 'import numpy as np\n'), ((370, 384), 'numpy.cos', 'np.cos', (['(10 * x)'], {}), '(10 * x)\n', (376, 384), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
#from math import *
from math import sin, cos
from qiskit_metal import draw, Dict
from qiskit_metal.qlibrary.core.base import QComponent
import numpy as np
#from ... import config
#if not config.is_building_docs():
# from qiskit_metal import is_true
class TransmonInterdigitated(QComponent):
"""
The base "TransmonInterdigitated" inherits the "QComponent" class.
This creates a transmon pocket with two large pads connected by a Josephson
junction. Both pads have four interdigitated "fingers" which increase the
capacitance of the structure. There are three coupling capacitor pads with qpins
defined; these can be connected to other structures in a design using CPWs.
Default Options:
* pad_width: '1000um' -- width of the large rectanglular pads on either side
of the junction
* pad_height: '300um' -- height of the large rectanglular pads on either side
of the junction
* finger_width: '50um' -- width of the "finger" on either side of the junction
* finger_height: '100um' -- height of the "finger" on the side of the junction
* finger_space: '50um' -- height of the Josephson Junction (equivalently; space
between two fingers)
* pad_pos_x: '0um' -- the internal coordinate defining the center of the bottom
rectangular pad
* pad_pos_y: '0um' -- the internal coordinate defining the center of the bottom
rectangular pad
* comb_width: '50um' -- the width of the four interdigitated combs connected to
either pad
* comb_space_vert: '50um' -- the space between the edge of a comb and the edge of
the opposite rectangular pad
* comb_space_hor: '50um' -- the space between adjacent interdigitated comb structures
* jj_width: '20um' -- the width of the Josephson Junction located between the two
fingers of the device
* cc_space: '50um' -- the space between the lower rectangular pad and the coupling
capacitor below it
* cc_width: '100um' -- the width of the coupling capacitor located below the bottom
rectangular pad
* cc_height: '100um' -- the height of the coupling capacitor located below the bottom
rectangular pad
* cc_topleft_space: '50um' -- the space between the upper rectangular pad and the top
left coupling capacitor
* cc_topleft_width: '100um' -- the width of the top left coupling capacitor pad
* cc_topleft_height: '100um' -- the height of the top left coupling capacitor pad
* cc_topright_space: '50um' -- the space between the upper rectangular pad and the
top right coupling capacitor
* cc_topright_width: '100um' -- the width of the top right coupling capacitor pad
* cc_topright_height: '100um' -- the height of the top right coupling capacitor pad
* position_x: '0um' -- the x-coordinate defining the center of the transmon pocket
on the chip
* position_y: '0um' -- the y-coordinate defining the center of the transmon pocket
on the chip
* rotation: '0.0' -- the angle at which the entire structure is rotated
* rotation_top_pad: '180' -- internal coordinate defining the angle of rotation
between top and bottom pads
* layer: '1' -- all objcets are drawn assuming they are part of the same layer on a
the chip
"""
# Default drawing options
default_options = Dict(pad_width='1000um',
pad_height='300um',
finger_width='50um',
finger_height='100um',
finger_space='50um',
pad_pos_x='0um',
pad_pos_y='0um',
comb_width='50um',
comb_space_vert='50um',
comb_space_hor='50um',
jj_width='20um',
cc_space='50um',
cc_width='100um',
cc_height='100um',
cc_topleft_space='50um',
cc_topleft_width='100um',
cc_topleft_height='100um',
cc_topright_space='50um',
cc_topright_width='100um',
cc_topright_height='100um',
position_x='0um',
position_y='0um',
rotation='0.0',
rotation_top_pad='180',
layer='1')
"""Default drawing options"""
# Name prefix of component, if user doesn't provide name
component_metadata = Dict(short_name='component')
"""Component metadata"""
def make(self):
"""Convert self.options into QGeometry."""
p = self.parse_options() # Parse the string options into numbers
# draw the lower pad as a rectangle
pad_lower = draw.rectangle(p.pad_width, p.pad_height, p.pad_pos_x,
p.pad_pos_y)
# draw the lower finger as a rectangle
finger_lower = draw.rectangle(
p.finger_width, p.finger_height, p.pad_pos_x, p.pad_pos_y +
0.49999 * (p.pad_height) + 0.49999 * (p.finger_height))
# draw the Josephson Junction
rect_jj = draw.rectangle(
p.jj_width, p.finger_space, p.pad_pos_x,
0.5 * (p.pad_height) + p.finger_height + 0.5 * (p.finger_space))
# draw the first comb to the right of the lower finger as a rectangle
comb1_lower = draw.rectangle(
p.comb_width,
(2 * p.finger_height + p.finger_space - p.comb_space_vert),
(0.5 * p.finger_width + p.comb_space_hor + 0.5 * p.comb_width),
(0.5 * p.pad_height + 0.5 *
(p.pad_pos_y + 0.5 * (p.pad_height) + 0.5 * (p.finger_height))))
# draw the second comb to the right of the lower finger by translating the first comb
comb2_lower = draw.translate(comb1_lower,
2.0 * (p.comb_space_hor + p.comb_width),
0.0)
# draw the first comb to the left of the lower finger
comb3_lower = draw.rectangle(
p.comb_width,
(2 * p.finger_height + p.finger_space - p.comb_space_vert),
(-0.5 * p.finger_width - 2.0 * p.comb_space_hor -
1.5 * p.comb_width),
(0.5 * p.pad_height + 0.5 *
(p.pad_pos_y + 0.5 * (p.pad_height) + 0.5 * (p.finger_height))))
# draw the second comb to the left of the lower finger
comb4_lower = draw.translate(comb3_lower,
-2.0 * (p.comb_space_hor + p.comb_width),
0.0)
coupling_capacitor = draw.rectangle(
p.cc_width, p.cc_height, p.pad_pos_x,
p.pad_pos_y - 0.5 * (p.pad_height) - p.cc_space - 0.5 * p.cc_height)
cc_topleft = draw.rectangle(
p.cc_topleft_width, p.cc_topleft_height,
p.pad_pos_x - 0.5 * p.pad_width + 0.5 * p.cc_topleft_width,
p.pad_pos_y + 1.5 * p.pad_height + 2.0 * p.finger_height +
p.finger_space + p.cc_topleft_space + 0.5 * p.cc_topleft_height)
cc_topright = draw.translate(
cc_topleft,
p.pad_width - 0.5 * p.cc_topleft_width - 0.5 * p.cc_topright_width,
0.0)
# merge the bottom elements
bottom = draw.union(pad_lower, finger_lower, comb1_lower, comb2_lower,
comb3_lower, comb4_lower)
# create the top portion of the comb by translating and rotating
# the bottom portion of the comb
top = draw.translate(bottom, 0.0, p.pad_height + p.finger_space)
top = draw.rotate(top, p.rotation_top_pad)
# merge everything into a single design
design = draw.union(bottom, top, rect_jj, coupling_capacitor,
cc_topleft, cc_topright)
# draw the transmon pocket bounding box
pocket = draw.rectangle(1.5 * p.pad_width, 5.0 * p.pad_height)
# the origin is originally set to the middle of the lower pad.
# Let's move it to the center of the JJ.
design = draw.translate(
design, 0.0,
-0.5 * p.pad_height - p.finger_height - 0.5 * p.finger_space)
# now translate the final structure according to the user input
design = draw.rotate(design, p.rotation, origin=(0, 0))
design = draw.translate(design, p.position_x, p.position_y)
pocket = draw.rotate(pocket, p.rotation, origin=(0, 0))
pocket = draw.translate(pocket, p.position_x, p.position_y)
geom = {'design': design}
geom_pocket = {'pocket': pocket}
self.add_qgeometry('poly', geom, layer=p.layer, subtract=False)
self.add_qgeometry('poly', geom_pocket, layer=p.layer, subtract=True)
###################################################################
# Add Qpin connections for coupling capacitors
# define a function that both rotates and translates the
# qpin coordinates
def qpin_rotate_translate(x):
""" This function rotates the coordinates of the three qpins
according to the user inputs for "position_x", "position_y"
and "rotation".
"""
y = list(x)
z = [0.0, 0.0]
z[0] = y[0] * cos(p.rotation * 3.14159 / 180) - y[1] * sin(
p.rotation * 3.14159 / 180)
z[1] = y[0] * sin(p.rotation * 3.14159 / 180) + y[1] * cos(
p.rotation * 3.14159 / 180)
z[0] = z[0] + p.position_x
z[1] = z[1] + p.position_y
x = (z[0], z[1])
return x
# Add Qpin connections for the bottom coupling capacitor
qp1a = (0.0,
-0.5 * p.pad_height - p.finger_height - 0.5 * p.finger_space)
qp1b = (0.0, -0.5 * p.pad_height - p.cc_space - p.cc_height -
0.5 * p.pad_height - p.finger_height - 0.5 * p.finger_space)
# rotate and translate the qpin coordinates
qp1a = qpin_rotate_translate(qp1a)
qp1b = qpin_rotate_translate(qp1b)
self.add_pin('pin1',
points=np.array([qp1a, qp1b]),
width=0.01,
input_as_norm=True)
# Add Qpin connections for top left coupling capacitor
qp2a = (p.pad_pos_x - 0.5 * p.pad_width + 0.5 * p.cc_topleft_width,
p.pad_pos_y + 1.5 * p.pad_height + 2.0 * p.finger_height +
p.finger_space + p.cc_topleft_space +
0.5 * p.cc_topleft_height - 0.5 * p.pad_height -
p.finger_height - 0.5 * p.finger_space)
qp2b = (p.pad_pos_x - 0.5 * p.pad_width, p.pad_pos_y +
1.5 * p.pad_height + 2.0 * p.finger_height + p.finger_space +
p.cc_topleft_space + 0.5 * p.cc_topleft_height -
0.5 * p.pad_height - p.finger_height - 0.5 * p.finger_space)
qp2a = qpin_rotate_translate(qp2a)
qp2b = qpin_rotate_translate(qp2b)
self.add_pin('pin2',
points=np.array([qp2a, qp2b]),
width=0.01,
input_as_norm=True)
# Add Qpin connections for top right coupling capacitor
qp3a = (p.pad_pos_x + 0.5 * p.pad_width - 0.5 * p.cc_topleft_width,
p.pad_pos_y + 1.5 * p.pad_height + 2.0 * p.finger_height +
p.finger_space + p.cc_topleft_space +
0.5 * p.cc_topleft_height - 0.5 * p.pad_height -
p.finger_height - 0.5 * p.finger_space)
qp3b = (p.pad_pos_x + 0.5 * p.pad_width, p.pad_pos_y +
1.5 * p.pad_height + 2.0 * p.finger_height + p.finger_space +
p.cc_topleft_space + 0.5 * p.cc_topleft_height -
0.5 * p.pad_height - p.finger_height - 0.5 * p.finger_space)
qp3a = qpin_rotate_translate(qp3a)
qp3b = qpin_rotate_translate(qp3b)
self.add_pin('pin3',
points=np.array([qp3a, qp3b]),
width=0.01,
input_as_norm=True)
|
[
"qiskit_metal.draw.union",
"math.cos",
"qiskit_metal.Dict",
"numpy.array",
"qiskit_metal.draw.rectangle",
"qiskit_metal.draw.translate",
"qiskit_metal.draw.rotate",
"math.sin"
] |
[((4010, 4566), 'qiskit_metal.Dict', 'Dict', ([], {'pad_width': '"""1000um"""', 'pad_height': '"""300um"""', 'finger_width': '"""50um"""', 'finger_height': '"""100um"""', 'finger_space': '"""50um"""', 'pad_pos_x': '"""0um"""', 'pad_pos_y': '"""0um"""', 'comb_width': '"""50um"""', 'comb_space_vert': '"""50um"""', 'comb_space_hor': '"""50um"""', 'jj_width': '"""20um"""', 'cc_space': '"""50um"""', 'cc_width': '"""100um"""', 'cc_height': '"""100um"""', 'cc_topleft_space': '"""50um"""', 'cc_topleft_width': '"""100um"""', 'cc_topleft_height': '"""100um"""', 'cc_topright_space': '"""50um"""', 'cc_topright_width': '"""100um"""', 'cc_topright_height': '"""100um"""', 'position_x': '"""0um"""', 'position_y': '"""0um"""', 'rotation': '"""0.0"""', 'rotation_top_pad': '"""180"""', 'layer': '"""1"""'}), "(pad_width='1000um', pad_height='300um', finger_width='50um',\n finger_height='100um', finger_space='50um', pad_pos_x='0um', pad_pos_y=\n '0um', comb_width='50um', comb_space_vert='50um', comb_space_hor='50um',\n jj_width='20um', cc_space='50um', cc_width='100um', cc_height='100um',\n cc_topleft_space='50um', cc_topleft_width='100um', cc_topleft_height=\n '100um', cc_topright_space='50um', cc_topright_width='100um',\n cc_topright_height='100um', position_x='0um', position_y='0um',\n rotation='0.0', rotation_top_pad='180', layer='1')\n", (4014, 4566), False, 'from qiskit_metal import draw, Dict\n'), ((5306, 5334), 'qiskit_metal.Dict', 'Dict', ([], {'short_name': '"""component"""'}), "(short_name='component')\n", (5310, 5334), False, 'from qiskit_metal import draw, Dict\n'), ((5576, 5643), 'qiskit_metal.draw.rectangle', 'draw.rectangle', (['p.pad_width', 'p.pad_height', 'p.pad_pos_x', 'p.pad_pos_y'], {}), '(p.pad_width, p.pad_height, p.pad_pos_x, p.pad_pos_y)\n', (5590, 5643), False, 'from qiskit_metal import draw, Dict\n'), ((5750, 5881), 'qiskit_metal.draw.rectangle', 'draw.rectangle', (['p.finger_width', 'p.finger_height', 'p.pad_pos_x', '(p.pad_pos_y + 0.49999 * p.pad_height + 0.49999 * p.finger_height)'], {}), '(p.finger_width, p.finger_height, p.pad_pos_x, p.pad_pos_y + \n 0.49999 * p.pad_height + 0.49999 * p.finger_height)\n', (5764, 5881), False, 'from qiskit_metal import draw, Dict\n'), ((5963, 6083), 'qiskit_metal.draw.rectangle', 'draw.rectangle', (['p.jj_width', 'p.finger_space', 'p.pad_pos_x', '(0.5 * p.pad_height + p.finger_height + 0.5 * p.finger_space)'], {}), '(p.jj_width, p.finger_space, p.pad_pos_x, 0.5 * p.pad_height +\n p.finger_height + 0.5 * p.finger_space)\n', (5977, 6083), False, 'from qiskit_metal import draw, Dict\n'), ((6210, 6460), 'qiskit_metal.draw.rectangle', 'draw.rectangle', (['p.comb_width', '(2 * p.finger_height + p.finger_space - p.comb_space_vert)', '(0.5 * p.finger_width + p.comb_space_hor + 0.5 * p.comb_width)', '(0.5 * p.pad_height + 0.5 * (p.pad_pos_y + 0.5 * p.pad_height + 0.5 * p.\n finger_height))'], {}), '(p.comb_width, 2 * p.finger_height + p.finger_space - p.\n comb_space_vert, 0.5 * p.finger_width + p.comb_space_hor + 0.5 * p.\n comb_width, 0.5 * p.pad_height + 0.5 * (p.pad_pos_y + 0.5 * p.\n pad_height + 0.5 * p.finger_height))\n', (6224, 6460), False, 'from qiskit_metal import draw, Dict\n'), ((6635, 6708), 'qiskit_metal.draw.translate', 'draw.translate', (['comb1_lower', '(2.0 * (p.comb_space_hor + p.comb_width))', '(0.0)'], {}), '(comb1_lower, 2.0 * (p.comb_space_hor + p.comb_width), 0.0)\n', (6649, 6708), False, 'from qiskit_metal import draw, Dict\n'), ((6868, 7124), 'qiskit_metal.draw.rectangle', 'draw.rectangle', (['p.comb_width', '(2 * p.finger_height + p.finger_space - p.comb_space_vert)', '(-0.5 * p.finger_width - 2.0 * p.comb_space_hor - 1.5 * p.comb_width)', '(0.5 * p.pad_height + 0.5 * (p.pad_pos_y + 0.5 * p.pad_height + 0.5 * p.\n finger_height))'], {}), '(p.comb_width, 2 * p.finger_height + p.finger_space - p.\n comb_space_vert, -0.5 * p.finger_width - 2.0 * p.comb_space_hor - 1.5 *\n p.comb_width, 0.5 * p.pad_height + 0.5 * (p.pad_pos_y + 0.5 * p.\n pad_height + 0.5 * p.finger_height))\n', (6882, 7124), False, 'from qiskit_metal import draw, Dict\n'), ((7282, 7356), 'qiskit_metal.draw.translate', 'draw.translate', (['comb3_lower', '(-2.0 * (p.comb_space_hor + p.comb_width))', '(0.0)'], {}), '(comb3_lower, -2.0 * (p.comb_space_hor + p.comb_width), 0.0)\n', (7296, 7356), False, 'from qiskit_metal import draw, Dict\n'), ((7461, 7585), 'qiskit_metal.draw.rectangle', 'draw.rectangle', (['p.cc_width', 'p.cc_height', 'p.pad_pos_x', '(p.pad_pos_y - 0.5 * p.pad_height - p.cc_space - 0.5 * p.cc_height)'], {}), '(p.cc_width, p.cc_height, p.pad_pos_x, p.pad_pos_y - 0.5 * p.\n pad_height - p.cc_space - 0.5 * p.cc_height)\n', (7475, 7585), False, 'from qiskit_metal import draw, Dict\n'), ((7630, 7883), 'qiskit_metal.draw.rectangle', 'draw.rectangle', (['p.cc_topleft_width', 'p.cc_topleft_height', '(p.pad_pos_x - 0.5 * p.pad_width + 0.5 * p.cc_topleft_width)', '(p.pad_pos_y + 1.5 * p.pad_height + 2.0 * p.finger_height + p.finger_space +\n p.cc_topleft_space + 0.5 * p.cc_topleft_height)'], {}), '(p.cc_topleft_width, p.cc_topleft_height, p.pad_pos_x - 0.5 *\n p.pad_width + 0.5 * p.cc_topleft_width, p.pad_pos_y + 1.5 * p.\n pad_height + 2.0 * p.finger_height + p.finger_space + p.\n cc_topleft_space + 0.5 * p.cc_topleft_height)\n', (7644, 7883), False, 'from qiskit_metal import draw, Dict\n'), ((7942, 8046), 'qiskit_metal.draw.translate', 'draw.translate', (['cc_topleft', '(p.pad_width - 0.5 * p.cc_topleft_width - 0.5 * p.cc_topright_width)', '(0.0)'], {}), '(cc_topleft, p.pad_width - 0.5 * p.cc_topleft_width - 0.5 * p\n .cc_topright_width, 0.0)\n', (7956, 8046), False, 'from qiskit_metal import draw, Dict\n'), ((8133, 8224), 'qiskit_metal.draw.union', 'draw.union', (['pad_lower', 'finger_lower', 'comb1_lower', 'comb2_lower', 'comb3_lower', 'comb4_lower'], {}), '(pad_lower, finger_lower, comb1_lower, comb2_lower, comb3_lower,\n comb4_lower)\n', (8143, 8224), False, 'from qiskit_metal import draw, Dict\n'), ((8378, 8436), 'qiskit_metal.draw.translate', 'draw.translate', (['bottom', '(0.0)', '(p.pad_height + p.finger_space)'], {}), '(bottom, 0.0, p.pad_height + p.finger_space)\n', (8392, 8436), False, 'from qiskit_metal import draw, Dict\n'), ((8451, 8487), 'qiskit_metal.draw.rotate', 'draw.rotate', (['top', 'p.rotation_top_pad'], {}), '(top, p.rotation_top_pad)\n', (8462, 8487), False, 'from qiskit_metal import draw, Dict\n'), ((8554, 8631), 'qiskit_metal.draw.union', 'draw.union', (['bottom', 'top', 'rect_jj', 'coupling_capacitor', 'cc_topleft', 'cc_topright'], {}), '(bottom, top, rect_jj, coupling_capacitor, cc_topleft, cc_topright)\n', (8564, 8631), False, 'from qiskit_metal import draw, Dict\n'), ((8726, 8779), 'qiskit_metal.draw.rectangle', 'draw.rectangle', (['(1.5 * p.pad_width)', '(5.0 * p.pad_height)'], {}), '(1.5 * p.pad_width, 5.0 * p.pad_height)\n', (8740, 8779), False, 'from qiskit_metal import draw, Dict\n'), ((8918, 9012), 'qiskit_metal.draw.translate', 'draw.translate', (['design', '(0.0)', '(-0.5 * p.pad_height - p.finger_height - 0.5 * p.finger_space)'], {}), '(design, 0.0, -0.5 * p.pad_height - p.finger_height - 0.5 * p\n .finger_space)\n', (8932, 9012), False, 'from qiskit_metal import draw, Dict\n'), ((9123, 9169), 'qiskit_metal.draw.rotate', 'draw.rotate', (['design', 'p.rotation'], {'origin': '(0, 0)'}), '(design, p.rotation, origin=(0, 0))\n', (9134, 9169), False, 'from qiskit_metal import draw, Dict\n'), ((9187, 9237), 'qiskit_metal.draw.translate', 'draw.translate', (['design', 'p.position_x', 'p.position_y'], {}), '(design, p.position_x, p.position_y)\n', (9201, 9237), False, 'from qiskit_metal import draw, Dict\n'), ((9256, 9302), 'qiskit_metal.draw.rotate', 'draw.rotate', (['pocket', 'p.rotation'], {'origin': '(0, 0)'}), '(pocket, p.rotation, origin=(0, 0))\n', (9267, 9302), False, 'from qiskit_metal import draw, Dict\n'), ((9320, 9370), 'qiskit_metal.draw.translate', 'draw.translate', (['pocket', 'p.position_x', 'p.position_y'], {}), '(pocket, p.position_x, p.position_y)\n', (9334, 9370), False, 'from qiskit_metal import draw, Dict\n'), ((10971, 10993), 'numpy.array', 'np.array', (['[qp1a, qp1b]'], {}), '([qp1a, qp1b])\n', (10979, 10993), True, 'import numpy as np\n'), ((11888, 11910), 'numpy.array', 'np.array', (['[qp2a, qp2b]'], {}), '([qp2a, qp2b])\n', (11896, 11910), True, 'import numpy as np\n'), ((12806, 12828), 'numpy.array', 'np.array', (['[qp3a, qp3b]'], {}), '([qp3a, qp3b])\n', (12814, 12828), True, 'import numpy as np\n'), ((10127, 10158), 'math.cos', 'cos', (['(p.rotation * 3.14159 / 180)'], {}), '(p.rotation * 3.14159 / 180)\n', (10130, 10158), False, 'from math import sin, cos\n'), ((10168, 10199), 'math.sin', 'sin', (['(p.rotation * 3.14159 / 180)'], {}), '(p.rotation * 3.14159 / 180)\n', (10171, 10199), False, 'from math import sin, cos\n'), ((10243, 10274), 'math.sin', 'sin', (['(p.rotation * 3.14159 / 180)'], {}), '(p.rotation * 3.14159 / 180)\n', (10246, 10274), False, 'from math import sin, cos\n'), ((10284, 10315), 'math.cos', 'cos', (['(p.rotation * 3.14159 / 180)'], {}), '(p.rotation * 3.14159 / 180)\n', (10287, 10315), False, 'from math import sin, cos\n')]
|
import inspect
import numpy as np
from pandas._libs import reduction as libreduction
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.common import (
is_dict_like,
is_extension_array_dtype,
is_list_like,
is_sequence,
)
from pandas.core.dtypes.generic import ABCSeries
def frame_apply(
obj,
func,
axis=0,
raw=False,
result_type=None,
ignore_failures=False,
args=None,
kwds=None,
):
""" construct and return a row or column based frame apply object """
axis = obj._get_axis_number(axis)
if axis == 0:
klass = FrameRowApply
elif axis == 1:
klass = FrameColumnApply
return klass(
obj,
func,
raw=raw,
result_type=result_type,
ignore_failures=ignore_failures,
args=args,
kwds=kwds,
)
class FrameApply:
def __init__(self, obj, func, raw, result_type, ignore_failures, args, kwds):
self.obj = obj
self.raw = raw
self.ignore_failures = ignore_failures
self.args = args or ()
self.kwds = kwds or {}
if result_type not in [None, "reduce", "broadcast", "expand"]:
raise ValueError(
"invalid value for result_type, must be one "
"of {None, 'reduce', 'broadcast', 'expand'}"
)
self.result_type = result_type
# curry if needed
if (kwds or args) and not isinstance(func, (np.ufunc, str)):
def f(x):
return func(x, *args, **kwds)
else:
f = func
self.f = f
# results
self.result = None
self.res_index = None
self.res_columns = None
@property
def columns(self):
return self.obj.columns
@property
def index(self):
return self.obj.index
@cache_readonly
def values(self):
return self.obj.values
@cache_readonly
def dtypes(self):
return self.obj.dtypes
@property
def agg_axis(self):
return self.obj._get_agg_axis(self.axis)
def get_result(self):
""" compute the results """
# dispatch to agg
if is_list_like(self.f) or is_dict_like(self.f):
return self.obj.aggregate(self.f, axis=self.axis, *self.args, **self.kwds)
# all empty
if len(self.columns) == 0 and len(self.index) == 0:
return self.apply_empty_result()
# string dispatch
if isinstance(self.f, str):
# Support for `frame.transform('method')`
# Some methods (shift, etc.) require the axis argument, others
# don't, so inspect and insert if necessary.
func = getattr(self.obj, self.f)
sig = inspect.getfullargspec(func)
if "axis" in sig.args:
self.kwds["axis"] = self.axis
return func(*self.args, **self.kwds)
# ufunc
elif isinstance(self.f, np.ufunc):
with np.errstate(all="ignore"):
results = self.obj._data.apply("apply", func=self.f)
return self.obj._constructor(
data=results, index=self.index, columns=self.columns, copy=False
)
# broadcasting
if self.result_type == "broadcast":
return self.apply_broadcast()
# one axis empty
elif not all(self.obj.shape):
return self.apply_empty_result()
# raw
elif self.raw and not self.obj._is_mixed_type:
return self.apply_raw()
return self.apply_standard()
def apply_empty_result(self):
"""
we have an empty result; at least 1 axis is 0
we will try to apply the function to an empty
series in order to see if this is a reduction function
"""
# we are not asked to reduce or infer reduction
# so just return a copy of the existing object
if self.result_type not in ["reduce", None]:
return self.obj.copy()
# we may need to infer
should_reduce = self.result_type == "reduce"
from pandas import Series
if not should_reduce:
try:
r = self.f(Series([]))
except Exception:
pass
else:
should_reduce = not isinstance(r, Series)
if should_reduce:
if len(self.agg_axis):
r = self.f(Series([]))
else:
r = np.nan
return self.obj._constructor_sliced(r, index=self.agg_axis)
else:
return self.obj.copy()
def apply_raw(self):
""" apply to the values as a numpy array """
try:
result = libreduction.compute_reduction(self.values, self.f, axis=self.axis)
except ValueError as err:
if "Function does not reduce" not in str(err):
# catch only ValueError raised intentionally in libreduction
raise
result = np.apply_along_axis(self.f, self.axis, self.values)
# TODO: mixed type case
if result.ndim == 2:
return self.obj._constructor(result, index=self.index, columns=self.columns)
else:
return self.obj._constructor_sliced(result, index=self.agg_axis)
def apply_broadcast(self, target):
result_values = np.empty_like(target.values)
# axis which we want to compare compliance
result_compare = target.shape[0]
for i, col in enumerate(target.columns):
res = self.f(target[col])
ares = np.asarray(res).ndim
# must be a scalar or 1d
if ares > 1:
raise ValueError("too many dims to broadcast")
elif ares == 1:
# must match return dim
if result_compare != len(res):
raise ValueError("cannot broadcast result")
result_values[:, i] = res
# we *always* preserve the original index / columns
result = self.obj._constructor(
result_values, index=target.index, columns=target.columns
)
return result
def apply_standard(self):
# try to reduce first (by default)
# this only matters if the reduction in values is of different dtype
# e.g. if we want to apply to a SparseFrame, then can't directly reduce
# we cannot reduce using non-numpy dtypes,
# as demonstrated in gh-12244
if (
self.result_type in ["reduce", None]
and not self.dtypes.apply(is_extension_array_dtype).any()
# Disallow complex_internals since libreduction shortcut
# cannot handle MultiIndex
and not self.agg_axis._has_complex_internals
):
values = self.values
index = self.obj._get_axis(self.axis)
labels = self.agg_axis
empty_arr = np.empty(len(index), dtype=values.dtype)
# Preserve subclass for e.g. test_subclassed_apply
dummy = self.obj._constructor_sliced(
empty_arr, index=index, dtype=values.dtype
)
try:
result = libreduction.compute_reduction(
values, self.f, axis=self.axis, dummy=dummy, labels=labels
)
except ValueError as err:
if "Function does not reduce" not in str(err):
# catch only ValueError raised intentionally in libreduction
raise
except TypeError:
# e.g. test_apply_ignore_failures we just ignore
if not self.ignore_failures:
raise
except ZeroDivisionError:
# reached via numexpr; fall back to python implementation
pass
else:
return self.obj._constructor_sliced(result, index=labels)
# compute the result using the series generator
self.apply_series_generator()
# wrap results
return self.wrap_results()
def apply_series_generator(self):
series_gen = self.series_generator
res_index = self.result_index
i = None
keys = []
results = {}
if self.ignore_failures:
successes = []
for i, v in enumerate(series_gen):
try:
results[i] = self.f(v)
except Exception:
pass
else:
keys.append(v.name)
successes.append(i)
# so will work with MultiIndex
if len(successes) < len(res_index):
res_index = res_index.take(successes)
else:
for i, v in enumerate(series_gen):
results[i] = self.f(v)
keys.append(v.name)
self.results = results
self.res_index = res_index
self.res_columns = self.result_columns
def wrap_results(self):
results = self.results
# see if we can infer the results
if len(results) > 0 and 0 in results and is_sequence(results[0]):
return self.wrap_results_for_axis()
# dict of scalars
result = self.obj._constructor_sliced(results)
result.index = self.res_index
return result
class FrameRowApply(FrameApply):
axis = 0
def apply_broadcast(self):
return super().apply_broadcast(self.obj)
@property
def series_generator(self):
return (self.obj._ixs(i, axis=1) for i in range(len(self.columns)))
@property
def result_index(self):
return self.columns
@property
def result_columns(self):
return self.index
def wrap_results_for_axis(self):
""" return the results for the rows """
results = self.results
result = self.obj._constructor(data=results)
if not isinstance(results[0], ABCSeries):
if len(result.index) == len(self.res_columns):
result.index = self.res_columns
if len(result.columns) == len(self.res_index):
result.columns = self.res_index
return result
class FrameColumnApply(FrameApply):
axis = 1
def apply_broadcast(self):
result = super().apply_broadcast(self.obj.T)
return result.T
@property
def series_generator(self):
constructor = self.obj._constructor_sliced
return (
constructor(arr, index=self.columns, name=name)
for i, (arr, name) in enumerate(zip(self.values, self.index))
)
@property
def result_index(self):
return self.index
@property
def result_columns(self):
return self.columns
def wrap_results_for_axis(self):
""" return the results for the columns """
results = self.results
# we have requested to expand
if self.result_type == "expand":
result = self.infer_to_same_shape()
# we have a non-series and don't want inference
elif not isinstance(results[0], ABCSeries):
from pandas import Series
result = Series(results)
result.index = self.res_index
# we may want to infer results
else:
result = self.infer_to_same_shape()
return result
def infer_to_same_shape(self):
""" infer the results to the same shape as the input object """
results = self.results
result = self.obj._constructor(data=results)
result = result.T
# set the index
result.index = self.res_index
# infer dtypes
result = result.infer_objects()
return result
|
[
"pandas.core.dtypes.common.is_list_like",
"pandas._libs.reduction.compute_reduction",
"pandas.Series",
"pandas.core.dtypes.common.is_sequence",
"numpy.asarray",
"inspect.getfullargspec",
"numpy.errstate",
"numpy.apply_along_axis",
"numpy.empty_like",
"pandas.core.dtypes.common.is_dict_like"
] |
[((5381, 5409), 'numpy.empty_like', 'np.empty_like', (['target.values'], {}), '(target.values)\n', (5394, 5409), True, 'import numpy as np\n'), ((2193, 2213), 'pandas.core.dtypes.common.is_list_like', 'is_list_like', (['self.f'], {}), '(self.f)\n', (2205, 2213), False, 'from pandas.core.dtypes.common import is_dict_like, is_extension_array_dtype, is_list_like, is_sequence\n'), ((2217, 2237), 'pandas.core.dtypes.common.is_dict_like', 'is_dict_like', (['self.f'], {}), '(self.f)\n', (2229, 2237), False, 'from pandas.core.dtypes.common import is_dict_like, is_extension_array_dtype, is_list_like, is_sequence\n'), ((2764, 2792), 'inspect.getfullargspec', 'inspect.getfullargspec', (['func'], {}), '(func)\n', (2786, 2792), False, 'import inspect\n'), ((4742, 4809), 'pandas._libs.reduction.compute_reduction', 'libreduction.compute_reduction', (['self.values', 'self.f'], {'axis': 'self.axis'}), '(self.values, self.f, axis=self.axis)\n', (4772, 4809), True, 'from pandas._libs import reduction as libreduction\n'), ((9164, 9187), 'pandas.core.dtypes.common.is_sequence', 'is_sequence', (['results[0]'], {}), '(results[0])\n', (9175, 9187), False, 'from pandas.core.dtypes.common import is_dict_like, is_extension_array_dtype, is_list_like, is_sequence\n'), ((5023, 5074), 'numpy.apply_along_axis', 'np.apply_along_axis', (['self.f', 'self.axis', 'self.values'], {}), '(self.f, self.axis, self.values)\n', (5042, 5074), True, 'import numpy as np\n'), ((5610, 5625), 'numpy.asarray', 'np.asarray', (['res'], {}), '(res)\n', (5620, 5625), True, 'import numpy as np\n'), ((7224, 7318), 'pandas._libs.reduction.compute_reduction', 'libreduction.compute_reduction', (['values', 'self.f'], {'axis': 'self.axis', 'dummy': 'dummy', 'labels': 'labels'}), '(values, self.f, axis=self.axis, dummy=dummy,\n labels=labels)\n', (7254, 7318), True, 'from pandas._libs import reduction as libreduction\n'), ((11205, 11220), 'pandas.Series', 'Series', (['results'], {}), '(results)\n', (11211, 11220), False, 'from pandas import Series\n'), ((3000, 3025), 'numpy.errstate', 'np.errstate', ([], {'all': '"""ignore"""'}), "(all='ignore')\n", (3011, 3025), True, 'import numpy as np\n'), ((4222, 4232), 'pandas.Series', 'Series', (['[]'], {}), '([])\n', (4228, 4232), False, 'from pandas import Series\n'), ((4450, 4460), 'pandas.Series', 'Series', (['[]'], {}), '([])\n', (4456, 4460), False, 'from pandas import Series\n')]
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Test for the piezo tensor class
"""
__author__ = "<NAME>"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
__date__ = "4/1/16"
import os
import unittest
import numpy as np
from pymatgen.analysis.piezo import PiezoTensor
from pymatgen.util.testing import PymatgenTest
class PiezoTest(PymatgenTest):
def setUp(self):
self.piezo_struc = self.get_structure("BaNiO3")
self.voigt_matrix = np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.03839, 0.0],
[0.0, 0.0, 0.0, 0.03839, 0.0, 0.0],
[6.89822, 6.89822, 27.46280, 0.0, 0.0, 0.0],
]
)
self.vasp_matrix = np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.0, 0.03839],
[0.0, 0.0, 0.0, 0.0, 0.03839, 0.0, 0.0],
[6.89822, 6.89822, 27.46280, 0.0, 0.0, 0.0],
]
)
self.full_tensor_array = [
[[0.0, 0.0, 0.03839], [0.0, 0.0, 0.0], [0.03839, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.03839], [0.0, 0.03839, 0.0]],
[[6.89822, 0.0, 0.0], [0.0, 6.89822, 0.0], [0.0, 0.0, 27.4628]],
]
def test_new(self):
pt = PiezoTensor(self.full_tensor_array)
self.assertArrayAlmostEqual(pt, self.full_tensor_array)
bad_dim_array = np.zeros((3, 3))
self.assertRaises(ValueError, PiezoTensor, bad_dim_array)
def test_from_voigt(self):
bad_voigt = np.zeros((3, 7))
pt = PiezoTensor.from_voigt(self.voigt_matrix)
self.assertArrayEqual(pt, self.full_tensor_array)
self.assertRaises(ValueError, PiezoTensor.from_voigt, bad_voigt)
self.assertArrayEqual(self.voigt_matrix, pt.voigt)
def test_from_vasp_voigt(self):
bad_voigt = np.zeros((3, 7))
pt = PiezoTensor.from_vasp_voigt(self.vasp_matrix)
self.assertArrayEqual(pt, self.full_tensor_array)
self.assertRaises(ValueError, PiezoTensor.from_voigt, bad_voigt)
self.assertArrayEqual(self.voigt_matrix, pt.voigt)
if __name__ == "__main__":
unittest.main()
|
[
"pymatgen.analysis.piezo.PiezoTensor.from_vasp_voigt",
"pymatgen.analysis.piezo.PiezoTensor",
"numpy.array",
"numpy.zeros",
"pymatgen.analysis.piezo.PiezoTensor.from_voigt",
"unittest.main"
] |
[((2195, 2210), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2208, 2210), False, 'import unittest\n'), ((554, 684), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0, 0.0, 0.03839, 0.0], [0.0, 0.0, 0.0, 0.03839, 0.0, 0.0], [\n 6.89822, 6.89822, 27.4628, 0.0, 0.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0, 0.0, 0.03839, 0.0], [0.0, 0.0, 0.0, 0.03839, 0.0,\n 0.0], [6.89822, 6.89822, 27.4628, 0.0, 0.0, 0.0]])\n', (562, 684), True, 'import numpy as np\n'), ((794, 929), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0, 0.0, 0.0, 0.03839], [0.0, 0.0, 0.0, 0.0, 0.03839, 0.0, 0.0\n ], [6.89822, 6.89822, 27.4628, 0.0, 0.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0, 0.0, 0.0, 0.03839], [0.0, 0.0, 0.0, 0.0, 0.03839,\n 0.0, 0.0], [6.89822, 6.89822, 27.4628, 0.0, 0.0, 0.0]])\n', (802, 929), True, 'import numpy as np\n'), ((1318, 1353), 'pymatgen.analysis.piezo.PiezoTensor', 'PiezoTensor', (['self.full_tensor_array'], {}), '(self.full_tensor_array)\n', (1329, 1353), False, 'from pymatgen.analysis.piezo import PiezoTensor\n'), ((1442, 1458), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (1450, 1458), True, 'import numpy as np\n'), ((1577, 1593), 'numpy.zeros', 'np.zeros', (['(3, 7)'], {}), '((3, 7))\n', (1585, 1593), True, 'import numpy as np\n'), ((1607, 1648), 'pymatgen.analysis.piezo.PiezoTensor.from_voigt', 'PiezoTensor.from_voigt', (['self.voigt_matrix'], {}), '(self.voigt_matrix)\n', (1629, 1648), False, 'from pymatgen.analysis.piezo import PiezoTensor\n'), ((1896, 1912), 'numpy.zeros', 'np.zeros', (['(3, 7)'], {}), '((3, 7))\n', (1904, 1912), True, 'import numpy as np\n'), ((1926, 1971), 'pymatgen.analysis.piezo.PiezoTensor.from_vasp_voigt', 'PiezoTensor.from_vasp_voigt', (['self.vasp_matrix'], {}), '(self.vasp_matrix)\n', (1953, 1971), False, 'from pymatgen.analysis.piezo import PiezoTensor\n')]
|
import argparse
import json
import numpy as np
import pandas as pd
import os
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report,f1_score
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras import backend as K
from keras.utils.vis_utils import plot_model
from sklearn.externals import joblib
import time
def f1(y_true, y_pred):
def recall(y_true, y_pred):
"""Recall metric.
Only computes a batch-wise average of recall.
Computes the recall, a metric for multi-label classification of
how many relevant items are selected.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision(y_true, y_pred):
"""Precision metric.
Only computes a batch-wise average of precision.
Computes the precision, a metric for multi-label classification of
how many selected items are relevant.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
precision = precision(y_true, y_pred)
recall = recall(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
def get_embeddings(sentences_list,layer_json):
'''
:param sentences_list: the path o the sentences.txt
:param layer_json: the path of the json file that contains the embeddings of the sentences
:return: Dictionary with key each sentence of the sentences_list and as value the embedding
'''
sentences = dict()#dict with key the index of each line of the sentences_list.txt and as value the sentence
embeddings = dict()##dict with key the index of each sentence and as value the its embedding
sentence_emb = dict()#key:sentence,value:its embedding
with open(sentences_list,'r') as file:
for index,line in enumerate(file):
sentences[index] = line.strip()
with open(layer_json, 'r',encoding='utf-8') as f:
for line in f:
embeddings[json.loads(line)['linex_index']] = np.asarray(json.loads(line)['features'])
for key,value in sentences.items():
sentence_emb[value] = embeddings[key]
return sentence_emb
def train_classifier(sentences_list,layer_json,dataset_csv,filename):
'''
:param sentences_list: the path o the sentences.txt
:param layer_json: the path of the json file that contains the embeddings of the sentences
:param dataset_csv: the path of the dataset
:param filename: The path of the pickle file that the model will be stored
:return:
'''
dataset = pd.read_csv(dataset_csv)
bert_dict = get_embeddings(sentences_list,layer_json)
length = list()
sentence_emb = list()
previous_emb = list()
next_list = list()
section_list = list()
label = list()
errors = 0
for row in dataset.iterrows():
sentence = row[1][0].strip()
previous = row[1][1].strip()
nexts = row[1][2].strip()
section = row[1][3].strip()
if sentence in bert_dict:
sentence_emb.append(bert_dict[sentence])
else:
sentence_emb.append(np.zeros(768))
print(sentence)
errors += 1
if previous in bert_dict:
previous_emb.append(bert_dict[previous])
else:
previous_emb.append(np.zeros(768))
if nexts in bert_dict:
next_list.append(bert_dict[nexts])
else:
next_list.append(np.zeros(768))
if section in bert_dict:
section_list.append(bert_dict[section])
else:
section_list.append(np.zeros(768))
length.append(row[1][4])
label.append(row[1][5])
sentence_emb = np.asarray(sentence_emb)
print(sentence_emb.shape)
next_emb = np.asarray(next_list)
print(next_emb.shape)
previous_emb = np.asarray(previous_emb)
print(previous_emb.shape)
section_emb = np.asarray(section_list)
print(sentence_emb.shape)
length = np.asarray(length)
print(length.shape)
label = np.asarray(label)
print(errors)
features = np.concatenate([sentence_emb, previous_emb, next_emb,section_emb], axis=1)
features = np.column_stack([features, length]) # np.append(features,length,axis=1)
print(features.shape)
X_train, X_val, y_train, y_val = train_test_split(features, label, test_size=0.33, random_state=42)
log = LogisticRegression(random_state=0, solver='newton-cg', max_iter=1000, C=0.1)
log.fit(X_train, y_train)
#save the model
_ = joblib.dump(log, filename, compress=9)
predictions = log.predict(X_val)
print("###########################################")
print("Results using embeddings from the",layer_json,"file")
print(classification_report(y_val, predictions))
print("F1 score using Logistic Regression:",f1_score(y_val, predictions))
print("###########################################")
#train a DNN
f1_results = list()
for i in range(3):
model = Sequential()
model.add(Dense(64, activation='relu', trainable=True))
model.add(Dense(128, activation='relu', trainable=True))
model.add(Dropout(0.30))
model.add(Dense(64, activation='relu', trainable=True))
model.add(Dropout(0.25))
model.add(Dense(64, activation='relu', trainable=True))
model.add(Dropout(0.35))
model.add(Dense(1, activation='sigmoid'))
# compile network
model.compile(loss='binary_crossentropy', optimizer='sgd', metrics=[f1])
# fit network
model.fit(X_train, y_train, epochs=100, batch_size=64)
loss, f_1 = model.evaluate(X_val, y_val, verbose=1)
print('\nTest F1: %f' % (f_1 * 100))
f1_results.append(f_1)
model = None
print("###########################################")
print("Results using embeddings from the", layer_json, "file")
# evaluate
print(np.mean(f1_results))
print("###########################################")
def parameter_tuning_LR(sentences_list,layer_json,dataset_csv):
'''
:param sentences_list: the path o the sentences.txt
:param layer_json: the path of the json file that contains the embeddings of the sentences
:param dataset_csv: the path of the dataset
:return:
'''
dataset = pd.read_csv(dataset_csv)
bert_dict = get_embeddings(sentences_list,layer_json)
length = list()
sentence_emb = list()
previous_emb = list()
next_list = list()
section_list = list()
label = list()
errors = 0
for row in dataset.iterrows():
sentence = row[1][0].strip()
previous = row[1][1].strip()
nexts = row[1][2].strip()
section = row[1][3].strip()
if sentence in bert_dict:
sentence_emb.append(bert_dict[sentence])
else:
sentence_emb.append(np.zeros(768))
print(sentence)
errors += 1
if previous in bert_dict:
previous_emb.append(bert_dict[previous])
else:
previous_emb.append(np.zeros(768))
if nexts in bert_dict:
next_list.append(bert_dict[nexts])
else:
next_list.append(np.zeros(768))
if section in bert_dict:
section_list.append(bert_dict[section])
else:
section_list.append(np.zeros(768))
length.append(row[1][4])
label.append(row[1][5])
sentence_emb = np.asarray(sentence_emb)
print(sentence_emb.shape)
next_emb = np.asarray(next_list)
print(next_emb.shape)
previous_emb = np.asarray(previous_emb)
print(previous_emb.shape)
section_emb = np.asarray(section_list)
print(sentence_emb.shape)
length = np.asarray(length)
print(length.shape)
label = np.asarray(label)
print(errors)
features = np.concatenate([sentence_emb, previous_emb, next_emb,section_emb], axis=1)
features = np.column_stack([features, length])
print(features.shape)
X_train, X_val, y_train, y_val = train_test_split(features, label, test_size=0.33, random_state=42)
C = [0.1,1,2,5,10]
solver = ['newton-cg','saga','sag']
best_params = dict()
best_score = 0.0
for c in C:
for s in solver:
start = time.time()
log = LogisticRegression(random_state=0, solver=s, max_iter=1000, C=c)
log.fit(X_train, y_train)
predictions = log.predict(X_val)
print("###########################################")
print("LR with C =",c,'and solver = ',s)
print("Results using embeddings from the", layer_json, "file")
print(classification_report(y_val, predictions))
f1 = f1_score(y_val, predictions)
if f1 > best_score:
best_score = f1
best_params['c'] = c
best_params['solver'] = s
print("F1 score using Logistic Regression:",f1)
print("###########################################")
end = time.time()
running_time = end - start
print("Running time:"+str(running_time))
def visualize_DNN(file_to_save):
'''
Save the DNN architecture to a png file. Better use the Visulize_DNN.ipynd
:param file_to_save: the png file that the architecture of the DNN will be saved.
:return: None
'''
model = Sequential()
model.add(Dense(64, activation='relu', trainable=True))
model.add(Dense(128, activation='relu', trainable=True))
model.add(Dropout(0.30))
model.add(Dense(64, activation='relu', trainable=True))
model.add(Dropout(0.25))
model.add(Dense(64, activation='relu', trainable=True))
model.add(Dropout(0.35))
model.add(Dense(1, activation='sigmoid'))
plot_model(model, to_file=file_to_save, show_shapes=True)
def save_model(sentences_list,layer_json,dataset_csv,pkl):
dataset = pd.read_csv(dataset_csv)
bert_dict = get_embeddings(sentences_list, layer_json)
length = list()
sentence_emb = list()
previous_emb = list()
next_list = list()
section_list = list()
label = list()
errors = 0
for row in dataset.iterrows():
sentence = row[1][0].strip()
previous = row[1][1].strip()
nexts = row[1][2].strip()
section = row[1][3].strip()
if sentence in bert_dict:
sentence_emb.append(bert_dict[sentence])
else:
sentence_emb.append(np.zeros(768))
print(sentence)
errors += 1
if previous in bert_dict:
previous_emb.append(bert_dict[previous])
else:
previous_emb.append(np.zeros(768))
if nexts in bert_dict:
next_list.append(bert_dict[nexts])
else:
next_list.append(np.zeros(768))
if section in bert_dict:
section_list.append(bert_dict[section])
else:
section_list.append(np.zeros(768))
length.append(row[1][4])
label.append(row[1][5])
sentence_emb = np.asarray(sentence_emb)
print(sentence_emb.shape)
next_emb = np.asarray(next_list)
print(next_emb.shape)
previous_emb = np.asarray(previous_emb)
print(previous_emb.shape)
section_emb = np.asarray(section_list)
print(sentence_emb.shape)
length = np.asarray(length)
print(length.shape)
label = np.asarray(label)
print(errors)
features = np.concatenate([sentence_emb, previous_emb, next_emb, section_emb], axis=1)
features = np.column_stack([features, length])
print(features.shape)
log = LogisticRegression(random_state=0, solver='saga', max_iter=1000, C=1)
log.fit(features, label)
_ = joblib.dump(log, pkl, compress=9)
if __name__ == '__main__':
#save_model('sentences_list.txt','Fudan_output_layer_-1.json','train_sentences1.csv','summarizer1.pkl')
ap = argparse.ArgumentParser()
ap.add_argument("-s", "--sentences", required=True, help="sentences list")
ap.add_argument("-o", "--output", required=True, help="output")
ap.add_argument("-ts", "--train set", required=True, help="path to train set")
ap.add_argument("-sp", "--summarizer path", required=True, help="path to save summarizer")
args = vars(ap.parse_args())
layer = train_classifier(args['sentences'], args['output'], args['train set'],args['summarizer path'])
#layer_1 = train_classifier('sentences_list.txt', 'new_output_layer_-1.json', 'train_sentences1.csv','fine_tune_BERT_sentence_classification1.pkl')
#layer_2 = train_classifier('sentences_list.txt','new_output_layer_-2.json','train_sentences1.csv','fine_tune_BERT_sentence_classification2.pkl')
#layer_3 = train_classifier('sentences_list.txt','new_output_layer_-3.json','train_sentences1.csv','fine_tune_BERT_sentence_classification3.pkl')
#layer_4 = train_classifier('sentences_list.txt','new_output_layer_-4.json','train_sentences1.csv','fine_tune_BERT_sentence_classification4.pkl')
#tuning = parameter_tuning_LR('sentences_list.txt','new_output_layer_-1.json','train_sentences1.csv')
#layer_1 = train_classifier('sentences_list.txt','output_layer_-1.json','train_sentences1.csv','fine_tune_BERT_sentence_classification.pkl')
#layer_2 = train_classifier('sentences_list.txt','output_layer_-2.json','train_sentences1.csv','fine_tune_BERT_sentence_classification.pkl')
#layer_3 = train_classifier('sentences_list.txt','output_layer_-3.json','train_sentences1.csv','fine_tune_BERT_sentence_classification.pkl')
#layer_4 = train_classifier('sentences_list.txt','output_layer_-4.json','train_sentences1.csv','fine_tune_BERT_sentence_classification.pkl')
|
[
"pandas.read_csv",
"sklearn.metrics.classification_report",
"keras.utils.vis_utils.plot_model",
"numpy.column_stack",
"keras.layers.Dense",
"numpy.mean",
"argparse.ArgumentParser",
"keras.backend.clip",
"numpy.asarray",
"numpy.concatenate",
"keras.backend.epsilon",
"json.loads",
"sklearn.model_selection.train_test_split",
"keras.models.Sequential",
"sklearn.externals.joblib.dump",
"time.time",
"keras.layers.Dropout",
"sklearn.metrics.f1_score",
"sklearn.linear_model.LogisticRegression",
"numpy.zeros"
] |
[((2973, 2997), 'pandas.read_csv', 'pd.read_csv', (['dataset_csv'], {}), '(dataset_csv)\n', (2984, 2997), True, 'import pandas as pd\n'), ((4112, 4136), 'numpy.asarray', 'np.asarray', (['sentence_emb'], {}), '(sentence_emb)\n', (4122, 4136), True, 'import numpy as np\n'), ((4182, 4203), 'numpy.asarray', 'np.asarray', (['next_list'], {}), '(next_list)\n', (4192, 4203), True, 'import numpy as np\n'), ((4249, 4273), 'numpy.asarray', 'np.asarray', (['previous_emb'], {}), '(previous_emb)\n', (4259, 4273), True, 'import numpy as np\n'), ((4322, 4346), 'numpy.asarray', 'np.asarray', (['section_list'], {}), '(section_list)\n', (4332, 4346), True, 'import numpy as np\n'), ((4390, 4408), 'numpy.asarray', 'np.asarray', (['length'], {}), '(length)\n', (4400, 4408), True, 'import numpy as np\n'), ((4445, 4462), 'numpy.asarray', 'np.asarray', (['label'], {}), '(label)\n', (4455, 4462), True, 'import numpy as np\n'), ((4496, 4571), 'numpy.concatenate', 'np.concatenate', (['[sentence_emb, previous_emb, next_emb, section_emb]'], {'axis': '(1)'}), '([sentence_emb, previous_emb, next_emb, section_emb], axis=1)\n', (4510, 4571), True, 'import numpy as np\n'), ((4586, 4621), 'numpy.column_stack', 'np.column_stack', (['[features, length]'], {}), '([features, length])\n', (4601, 4621), True, 'import numpy as np\n'), ((4723, 4789), 'sklearn.model_selection.train_test_split', 'train_test_split', (['features', 'label'], {'test_size': '(0.33)', 'random_state': '(42)'}), '(features, label, test_size=0.33, random_state=42)\n', (4739, 4789), False, 'from sklearn.model_selection import train_test_split\n'), ((4801, 4877), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(0)', 'solver': '"""newton-cg"""', 'max_iter': '(1000)', 'C': '(0.1)'}), "(random_state=0, solver='newton-cg', max_iter=1000, C=0.1)\n", (4819, 4877), False, 'from sklearn.linear_model import LogisticRegression\n'), ((4937, 4975), 'sklearn.externals.joblib.dump', 'joblib.dump', (['log', 'filename'], {'compress': '(9)'}), '(log, filename, compress=9)\n', (4948, 4975), False, 'from sklearn.externals import joblib\n'), ((6715, 6739), 'pandas.read_csv', 'pd.read_csv', (['dataset_csv'], {}), '(dataset_csv)\n', (6726, 6739), True, 'import pandas as pd\n'), ((7854, 7878), 'numpy.asarray', 'np.asarray', (['sentence_emb'], {}), '(sentence_emb)\n', (7864, 7878), True, 'import numpy as np\n'), ((7924, 7945), 'numpy.asarray', 'np.asarray', (['next_list'], {}), '(next_list)\n', (7934, 7945), True, 'import numpy as np\n'), ((7991, 8015), 'numpy.asarray', 'np.asarray', (['previous_emb'], {}), '(previous_emb)\n', (8001, 8015), True, 'import numpy as np\n'), ((8064, 8088), 'numpy.asarray', 'np.asarray', (['section_list'], {}), '(section_list)\n', (8074, 8088), True, 'import numpy as np\n'), ((8132, 8150), 'numpy.asarray', 'np.asarray', (['length'], {}), '(length)\n', (8142, 8150), True, 'import numpy as np\n'), ((8187, 8204), 'numpy.asarray', 'np.asarray', (['label'], {}), '(label)\n', (8197, 8204), True, 'import numpy as np\n'), ((8238, 8313), 'numpy.concatenate', 'np.concatenate', (['[sentence_emb, previous_emb, next_emb, section_emb]'], {'axis': '(1)'}), '([sentence_emb, previous_emb, next_emb, section_emb], axis=1)\n', (8252, 8313), True, 'import numpy as np\n'), ((8328, 8363), 'numpy.column_stack', 'np.column_stack', (['[features, length]'], {}), '([features, length])\n', (8343, 8363), True, 'import numpy as np\n'), ((8428, 8494), 'sklearn.model_selection.train_test_split', 'train_test_split', (['features', 'label'], {'test_size': '(0.33)', 'random_state': '(42)'}), '(features, label, test_size=0.33, random_state=42)\n', (8444, 8494), False, 'from sklearn.model_selection import train_test_split\n'), ((9783, 9795), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (9793, 9795), False, 'from keras.models import Sequential\n'), ((10175, 10232), 'keras.utils.vis_utils.plot_model', 'plot_model', (['model'], {'to_file': 'file_to_save', 'show_shapes': '(True)'}), '(model, to_file=file_to_save, show_shapes=True)\n', (10185, 10232), False, 'from keras.utils.vis_utils import plot_model\n'), ((10309, 10333), 'pandas.read_csv', 'pd.read_csv', (['dataset_csv'], {}), '(dataset_csv)\n', (10320, 10333), True, 'import pandas as pd\n'), ((11448, 11472), 'numpy.asarray', 'np.asarray', (['sentence_emb'], {}), '(sentence_emb)\n', (11458, 11472), True, 'import numpy as np\n'), ((11518, 11539), 'numpy.asarray', 'np.asarray', (['next_list'], {}), '(next_list)\n', (11528, 11539), True, 'import numpy as np\n'), ((11585, 11609), 'numpy.asarray', 'np.asarray', (['previous_emb'], {}), '(previous_emb)\n', (11595, 11609), True, 'import numpy as np\n'), ((11658, 11682), 'numpy.asarray', 'np.asarray', (['section_list'], {}), '(section_list)\n', (11668, 11682), True, 'import numpy as np\n'), ((11726, 11744), 'numpy.asarray', 'np.asarray', (['length'], {}), '(length)\n', (11736, 11744), True, 'import numpy as np\n'), ((11781, 11798), 'numpy.asarray', 'np.asarray', (['label'], {}), '(label)\n', (11791, 11798), True, 'import numpy as np\n'), ((11832, 11907), 'numpy.concatenate', 'np.concatenate', (['[sentence_emb, previous_emb, next_emb, section_emb]'], {'axis': '(1)'}), '([sentence_emb, previous_emb, next_emb, section_emb], axis=1)\n', (11846, 11907), True, 'import numpy as np\n'), ((11923, 11958), 'numpy.column_stack', 'np.column_stack', (['[features, length]'], {}), '([features, length])\n', (11938, 11958), True, 'import numpy as np\n'), ((11996, 12065), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(0)', 'solver': '"""saga"""', 'max_iter': '(1000)', 'C': '(1)'}), "(random_state=0, solver='saga', max_iter=1000, C=1)\n", (12014, 12065), False, 'from sklearn.linear_model import LogisticRegression\n'), ((12104, 12137), 'sklearn.externals.joblib.dump', 'joblib.dump', (['log', 'pkl'], {'compress': '(9)'}), '(log, pkl, compress=9)\n', (12115, 12137), False, 'from sklearn.externals import joblib\n'), ((12286, 12311), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (12309, 12311), False, 'import argparse\n'), ((5146, 5187), 'sklearn.metrics.classification_report', 'classification_report', (['y_val', 'predictions'], {}), '(y_val, predictions)\n', (5167, 5187), False, 'from sklearn.metrics import classification_report, f1_score\n'), ((5237, 5265), 'sklearn.metrics.f1_score', 'f1_score', (['y_val', 'predictions'], {}), '(y_val, predictions)\n', (5245, 5265), False, 'from sklearn.metrics import classification_report, f1_score\n'), ((5406, 5418), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (5416, 5418), False, 'from keras.models import Sequential\n'), ((6327, 6346), 'numpy.mean', 'np.mean', (['f1_results'], {}), '(f1_results)\n', (6334, 6346), True, 'import numpy as np\n'), ((9810, 9854), 'keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""', 'trainable': '(True)'}), "(64, activation='relu', trainable=True)\n", (9815, 9854), False, 'from keras.layers import Dense, Dropout\n'), ((9870, 9915), 'keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""', 'trainable': '(True)'}), "(128, activation='relu', trainable=True)\n", (9875, 9915), False, 'from keras.layers import Dense, Dropout\n'), ((9931, 9943), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (9938, 9943), False, 'from keras.layers import Dense, Dropout\n'), ((9960, 10004), 'keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""', 'trainable': '(True)'}), "(64, activation='relu', trainable=True)\n", (9965, 10004), False, 'from keras.layers import Dense, Dropout\n'), ((10020, 10033), 'keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (10027, 10033), False, 'from keras.layers import Dense, Dropout\n'), ((10049, 10093), 'keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""', 'trainable': '(True)'}), "(64, activation='relu', trainable=True)\n", (10054, 10093), False, 'from keras.layers import Dense, Dropout\n'), ((10109, 10122), 'keras.layers.Dropout', 'Dropout', (['(0.35)'], {}), '(0.35)\n', (10116, 10122), False, 'from keras.layers import Dense, Dropout\n'), ((10138, 10168), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (10143, 10168), False, 'from keras.layers import Dense, Dropout\n'), ((5437, 5481), 'keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""', 'trainable': '(True)'}), "(64, activation='relu', trainable=True)\n", (5442, 5481), False, 'from keras.layers import Dense, Dropout\n'), ((5501, 5546), 'keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""', 'trainable': '(True)'}), "(128, activation='relu', trainable=True)\n", (5506, 5546), False, 'from keras.layers import Dense, Dropout\n'), ((5566, 5578), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (5573, 5578), False, 'from keras.layers import Dense, Dropout\n'), ((5599, 5643), 'keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""', 'trainable': '(True)'}), "(64, activation='relu', trainable=True)\n", (5604, 5643), False, 'from keras.layers import Dense, Dropout\n'), ((5663, 5676), 'keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (5670, 5676), False, 'from keras.layers import Dense, Dropout\n'), ((5696, 5740), 'keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""', 'trainable': '(True)'}), "(64, activation='relu', trainable=True)\n", (5701, 5740), False, 'from keras.layers import Dense, Dropout\n'), ((5760, 5773), 'keras.layers.Dropout', 'Dropout', (['(0.35)'], {}), '(0.35)\n', (5767, 5773), False, 'from keras.layers import Dense, Dropout\n'), ((5793, 5823), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (5798, 5823), False, 'from keras.layers import Dense, Dropout\n'), ((8666, 8677), 'time.time', 'time.time', ([], {}), '()\n', (8675, 8677), False, 'import time\n'), ((8696, 8760), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(0)', 'solver': 's', 'max_iter': '(1000)', 'C': 'c'}), '(random_state=0, solver=s, max_iter=1000, C=c)\n', (8714, 8760), False, 'from sklearn.linear_model import LogisticRegression\n'), ((9116, 9144), 'sklearn.metrics.f1_score', 'f1_score', (['y_val', 'predictions'], {}), '(y_val, predictions)\n', (9124, 9144), False, 'from sklearn.metrics import classification_report, f1_score\n'), ((9431, 9442), 'time.time', 'time.time', ([], {}), '()\n', (9440, 9442), False, 'import time\n'), ((751, 780), 'keras.backend.clip', 'K.clip', (['(y_true * y_pred)', '(0)', '(1)'], {}), '(y_true * y_pred, 0, 1)\n', (757, 780), True, 'from keras import backend as K\n'), ((826, 846), 'keras.backend.clip', 'K.clip', (['y_true', '(0)', '(1)'], {}), '(y_true, 0, 1)\n', (832, 846), True, 'from keras import backend as K\n'), ((905, 916), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (914, 916), True, 'from keras import backend as K\n'), ((1236, 1265), 'keras.backend.clip', 'K.clip', (['(y_true * y_pred)', '(0)', '(1)'], {}), '(y_true * y_pred, 0, 1)\n', (1242, 1265), True, 'from keras import backend as K\n'), ((1312, 1332), 'keras.backend.clip', 'K.clip', (['y_pred', '(0)', '(1)'], {}), '(y_pred, 0, 1)\n', (1318, 1332), True, 'from keras import backend as K\n'), ((1395, 1406), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (1404, 1406), True, 'from keras import backend as K\n'), ((1562, 1573), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (1571, 1573), True, 'from keras import backend as K\n'), ((3526, 3539), 'numpy.zeros', 'np.zeros', (['(768)'], {}), '(768)\n', (3534, 3539), True, 'import numpy as np\n'), ((3727, 3740), 'numpy.zeros', 'np.zeros', (['(768)'], {}), '(768)\n', (3735, 3740), True, 'import numpy as np\n'), ((3864, 3877), 'numpy.zeros', 'np.zeros', (['(768)'], {}), '(768)\n', (3872, 3877), True, 'import numpy as np\n'), ((4011, 4024), 'numpy.zeros', 'np.zeros', (['(768)'], {}), '(768)\n', (4019, 4024), True, 'import numpy as np\n'), ((7268, 7281), 'numpy.zeros', 'np.zeros', (['(768)'], {}), '(768)\n', (7276, 7281), True, 'import numpy as np\n'), ((7469, 7482), 'numpy.zeros', 'np.zeros', (['(768)'], {}), '(768)\n', (7477, 7482), True, 'import numpy as np\n'), ((7606, 7619), 'numpy.zeros', 'np.zeros', (['(768)'], {}), '(768)\n', (7614, 7619), True, 'import numpy as np\n'), ((7753, 7766), 'numpy.zeros', 'np.zeros', (['(768)'], {}), '(768)\n', (7761, 7766), True, 'import numpy as np\n'), ((9056, 9097), 'sklearn.metrics.classification_report', 'classification_report', (['y_val', 'predictions'], {}), '(y_val, predictions)\n', (9077, 9097), False, 'from sklearn.metrics import classification_report, f1_score\n'), ((10862, 10875), 'numpy.zeros', 'np.zeros', (['(768)'], {}), '(768)\n', (10870, 10875), True, 'import numpy as np\n'), ((11063, 11076), 'numpy.zeros', 'np.zeros', (['(768)'], {}), '(768)\n', (11071, 11076), True, 'import numpy as np\n'), ((11200, 11213), 'numpy.zeros', 'np.zeros', (['(768)'], {}), '(768)\n', (11208, 11213), True, 'import numpy as np\n'), ((11347, 11360), 'numpy.zeros', 'np.zeros', (['(768)'], {}), '(768)\n', (11355, 11360), True, 'import numpy as np\n'), ((2388, 2404), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (2398, 2404), False, 'import json\n'), ((2434, 2450), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (2444, 2450), False, 'import json\n')]
|
'''
-------------------------------------------------------------------------------------------------
This code accompanies the paper titled "Human injury-based safety decision of automated vehicles"
Author: <NAME>, <NAME>, <NAME>, <NAME>
Corresponding author: <NAME> (<EMAIL>)
-------------------------------------------------------------------------------------------------
'''
import torch
import numpy as np
from torch import nn
from torch.nn.utils import weight_norm
__author__ = "<NAME>"
def Collision_cond(veh_striking_list, V1_v, V2_v, delta_angle, veh_param):
''' Estimate the collision condition. '''
(veh_l, veh_w, veh_cgf, veh_cgs, veh_k, veh_m) = veh_param
delta_angle_2 = np.arccos(np.abs(np.cos(delta_angle)))
if -1e-6 < delta_angle_2 < 1e-6:
delta_angle_2 = 1e-6
delta_v1_list = []
delta_v2_list = []
# Estimate the collision condition (delat-v) according to the principal impact direction.
for veh_striking in veh_striking_list:
if veh_striking[0] == 1:
veh_ca = np.arctan(veh_cgf[0] / veh_cgs[0])
veh_a2 = np.abs(veh_cgs[1] - veh_striking[3])
veh_RDS = np.abs(V1_v * np.cos(delta_angle) - V2_v)
veh_a1 = np.abs(np.sqrt(veh_cgf[0] ** 2 + veh_cgs[0] ** 2) * np.cos(veh_ca + delta_angle_2))
if (veh_striking[1]+1) in [16, 1, 2, 3, 17, 20, 21] and (veh_striking[2]+1) in [16, 1, 2, 3, 17, 20, 21]:
veh_e = 2 / veh_RDS
else:
veh_e = 0.5 / veh_RDS
elif veh_striking[0] == 2:
veh_ca = np.arctan(veh_cgf[0] / veh_cgs[0])
veh_a2 = np.abs(veh_cgf[1] - veh_striking[3])
veh_a1 = np.abs(np.sqrt(veh_cgf[0] ** 2 + veh_cgs[0] ** 2) * np.cos(delta_angle_2 - veh_ca + np.pi / 2))
veh_RDS = V1_v * np.sin(delta_angle_2)
veh_e = 1.5 / veh_RDS
elif veh_striking[0] == 3:
veh_ca = np.arctan(veh_cgf[1] / veh_cgs[1])
veh_a1 = np.abs(veh_cgs[0] - veh_striking[3])
veh_RDS = np.abs(V2_v * np.cos(delta_angle) - V1_v)
veh_a2 = np.abs(np.sqrt(veh_cgf[1] ** 2 + veh_cgs[1] ** 2) * np.cos(veh_ca + delta_angle_2))
if (veh_striking[1]+1) in [16, 1, 2, 3, 17, 20, 21] and (veh_striking[2]+1) in [16, 1, 2, 3, 17, 20, 21]:
veh_e = 2 / veh_RDS
else:
veh_e = 0.5 / veh_RDS
elif veh_striking[0] == 4:
veh_ca = np.arctan(veh_cgf[1] / veh_cgs[1])
veh_a1 = np.abs(veh_cgf[0] - veh_striking[3])
veh_a2 = np.abs(np.sqrt(veh_cgf[1] ** 2 + veh_cgs[1] ** 2) * np.cos(delta_angle_2 - veh_ca + np.pi / 2))
veh_RDS = V2_v * np.sin(delta_angle_2)
veh_e = 1.5 / veh_RDS
# Obtain delta-v based on the plane 2-DOF rigid-body collision model with momentum conservation.
veh_y1 = veh_k[0] ** 2 / (veh_a1 ** 2 + veh_k[0] ** 2)
veh_y2 = veh_k[1] ** 2 / (veh_a2 ** 2 + veh_k[1] ** 2)
delta_v1 = (1 + veh_e) * veh_m[1] * veh_y1 * veh_y2 * veh_RDS / (veh_m[0] * veh_y1 + veh_m[1] * veh_y2)
delta_v2 = (1 + veh_e) * veh_m[0] * veh_y1 * veh_y2 * veh_RDS / (veh_m[0] * veh_y1 + veh_m[1] * veh_y2)
delta_v1_list.append(delta_v1)
delta_v2_list.append(delta_v2)
delta_v1_ = max(delta_v1_list)
delta_v2_ = max(delta_v2_list)
index = delta_v1_list.index(max(delta_v1_list))
return delta_v1_, delta_v2_, index
|
[
"numpy.abs",
"numpy.sqrt",
"numpy.cos",
"numpy.sin",
"numpy.arctan"
] |
[((723, 742), 'numpy.cos', 'np.cos', (['delta_angle'], {}), '(delta_angle)\n', (729, 742), True, 'import numpy as np\n'), ((1050, 1084), 'numpy.arctan', 'np.arctan', (['(veh_cgf[0] / veh_cgs[0])'], {}), '(veh_cgf[0] / veh_cgs[0])\n', (1059, 1084), True, 'import numpy as np\n'), ((1106, 1142), 'numpy.abs', 'np.abs', (['(veh_cgs[1] - veh_striking[3])'], {}), '(veh_cgs[1] - veh_striking[3])\n', (1112, 1142), True, 'import numpy as np\n'), ((1579, 1613), 'numpy.arctan', 'np.arctan', (['(veh_cgf[0] / veh_cgs[0])'], {}), '(veh_cgf[0] / veh_cgs[0])\n', (1588, 1613), True, 'import numpy as np\n'), ((1635, 1671), 'numpy.abs', 'np.abs', (['(veh_cgf[1] - veh_striking[3])'], {}), '(veh_cgf[1] - veh_striking[3])\n', (1641, 1671), True, 'import numpy as np\n'), ((1235, 1277), 'numpy.sqrt', 'np.sqrt', (['(veh_cgf[0] ** 2 + veh_cgs[0] ** 2)'], {}), '(veh_cgf[0] ** 2 + veh_cgs[0] ** 2)\n', (1242, 1277), True, 'import numpy as np\n'), ((1280, 1310), 'numpy.cos', 'np.cos', (['(veh_ca + delta_angle_2)'], {}), '(veh_ca + delta_angle_2)\n', (1286, 1310), True, 'import numpy as np\n'), ((1818, 1839), 'numpy.sin', 'np.sin', (['delta_angle_2'], {}), '(delta_angle_2)\n', (1824, 1839), True, 'import numpy as np\n'), ((1931, 1965), 'numpy.arctan', 'np.arctan', (['(veh_cgf[1] / veh_cgs[1])'], {}), '(veh_cgf[1] / veh_cgs[1])\n', (1940, 1965), True, 'import numpy as np\n'), ((1987, 2023), 'numpy.abs', 'np.abs', (['(veh_cgs[0] - veh_striking[3])'], {}), '(veh_cgs[0] - veh_striking[3])\n', (1993, 2023), True, 'import numpy as np\n'), ((1179, 1198), 'numpy.cos', 'np.cos', (['delta_angle'], {}), '(delta_angle)\n', (1185, 1198), True, 'import numpy as np\n'), ((1700, 1742), 'numpy.sqrt', 'np.sqrt', (['(veh_cgf[0] ** 2 + veh_cgs[0] ** 2)'], {}), '(veh_cgf[0] ** 2 + veh_cgs[0] ** 2)\n', (1707, 1742), True, 'import numpy as np\n'), ((1745, 1787), 'numpy.cos', 'np.cos', (['(delta_angle_2 - veh_ca + np.pi / 2)'], {}), '(delta_angle_2 - veh_ca + np.pi / 2)\n', (1751, 1787), True, 'import numpy as np\n'), ((2460, 2494), 'numpy.arctan', 'np.arctan', (['(veh_cgf[1] / veh_cgs[1])'], {}), '(veh_cgf[1] / veh_cgs[1])\n', (2469, 2494), True, 'import numpy as np\n'), ((2516, 2552), 'numpy.abs', 'np.abs', (['(veh_cgf[0] - veh_striking[3])'], {}), '(veh_cgf[0] - veh_striking[3])\n', (2522, 2552), True, 'import numpy as np\n'), ((2116, 2158), 'numpy.sqrt', 'np.sqrt', (['(veh_cgf[1] ** 2 + veh_cgs[1] ** 2)'], {}), '(veh_cgf[1] ** 2 + veh_cgs[1] ** 2)\n', (2123, 2158), True, 'import numpy as np\n'), ((2161, 2191), 'numpy.cos', 'np.cos', (['(veh_ca + delta_angle_2)'], {}), '(veh_ca + delta_angle_2)\n', (2167, 2191), True, 'import numpy as np\n'), ((2699, 2720), 'numpy.sin', 'np.sin', (['delta_angle_2'], {}), '(delta_angle_2)\n', (2705, 2720), True, 'import numpy as np\n'), ((2060, 2079), 'numpy.cos', 'np.cos', (['delta_angle'], {}), '(delta_angle)\n', (2066, 2079), True, 'import numpy as np\n'), ((2581, 2623), 'numpy.sqrt', 'np.sqrt', (['(veh_cgf[1] ** 2 + veh_cgs[1] ** 2)'], {}), '(veh_cgf[1] ** 2 + veh_cgs[1] ** 2)\n', (2588, 2623), True, 'import numpy as np\n'), ((2626, 2668), 'numpy.cos', 'np.cos', (['(delta_angle_2 - veh_ca + np.pi / 2)'], {}), '(delta_angle_2 - veh_ca + np.pi / 2)\n', (2632, 2668), True, 'import numpy as np\n')]
|
"""Test the search module"""
from collections.abc import Iterable, Sized
from io import StringIO
from itertools import chain, product
from functools import partial
import pickle
import sys
from types import GeneratorType
import re
import numpy as np
import scipy.sparse as sp
import pytest
from sklearn.utils.fixes import sp_version
from sklearn.utils._testing import assert_raises
from sklearn.utils._testing import assert_warns
from sklearn.utils._testing import assert_warns_message
from sklearn.utils._testing import assert_raise_message
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import ignore_warnings
from sklearn.utils._mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.base import clone
from sklearn.exceptions import NotFittedError
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.model_selection import fit_grid_point
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.model_selection import LeavePGroupsOut
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import GroupShuffleSplit
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import ParameterGrid
from sklearn.model_selection import ParameterSampler
from sklearn.model_selection._search import BaseSearchCV
from sklearn.model_selection._validation import FitFailedWarning
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import f1_score
from sklearn.metrics import recall_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.linear_model import Ridge, SGDClassifier, LinearRegression
from sklearn.experimental import enable_hist_gradient_boosting # noqa
from sklearn.ensemble import HistGradientBoostingClassifier
from sklearn.model_selection.tests.common import OneTimeSplitter
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier:
"""Dummy classifier to test the parameter search algorithms"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert len(X) == len(Y)
self.classes_ = np.unique(Y)
return self
def predict(self, T):
return T.shape[0]
def transform(self, X):
return X + self.foo_param
def inverse_transform(self, X):
return X - self.foo_param
predict_proba = predict
predict_log_proba = predict
decision_function = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert list(grid) == [grid[i] for i in range(len(grid))]
@pytest.mark.parametrize("klass", [ParameterGrid,
partial(ParameterSampler, n_iter=10)])
@pytest.mark.parametrize(
"input, error_type, error_message",
[(0, TypeError, r'Parameter .* is not a dict or a list \(0\)'),
([{'foo': [0]}, 0], TypeError, r'Parameter .* is not a dict \(0\)'),
({'foo': 0}, TypeError, "Parameter.* value is not iterable .*"
r"\(key='foo', value=0\)")]
)
def test_validate_parameter_input(klass, input, error_type, error_message):
with pytest.raises(error_type, match=error_message):
klass(input)
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert isinstance(grid1, Iterable)
assert isinstance(grid1, Sized)
assert len(grid1) == 3
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert len(grid2) == 6
# loop to assert we can iterate over the grid multiple times
for i in range(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert (points ==
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert len(empty) == 1
assert list(empty) == [{}]
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert len(has_empty) == 4
assert list(has_empty) == [{'C': 1}, {'C': 10}, {}, {'C': .5}]
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=3, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert grid_search.best_estimator_.foo_param == 2
assert_array_equal(grid_search.cv_results_["param_foo_param"].data,
[1, 2, 3])
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
def test_grid_search_pipeline_steps():
# check that parameters that are estimators are cloned before fitting
pipe = Pipeline([('regressor', LinearRegression())])
param_grid = {'regressor': [LinearRegression(), Ridge()]}
grid_search = GridSearchCV(pipe, param_grid, cv=2)
grid_search.fit(X, y)
regressor_results = grid_search.cv_results_['param_regressor']
assert isinstance(regressor_results[0], LinearRegression)
assert isinstance(regressor_results[1], Ridge)
assert not hasattr(regressor_results[0], 'coef_')
assert not hasattr(regressor_results[1], 'coef_')
assert regressor_results[0] is not grid_search.best_estimator_
assert regressor_results[1] is not grid_search.best_estimator_
# check that we didn't modify the parameter grid that was passed
assert not hasattr(param_grid['regressor'][0], 'coef_')
assert not hasattr(param_grid['regressor'][1], 'coef_')
@pytest.mark.parametrize("SearchCV", [GridSearchCV, RandomizedSearchCV])
def test_SearchCV_with_fit_params(SearchCV):
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(expected_fit_params=['spam', 'eggs'])
searcher = SearchCV(
clf, {'foo_param': [1, 2, 3]}, cv=2, error_score="raise"
)
# The CheckingClassifier generates an assertion error if
# a parameter is missing or has length != len(X).
err_msg = r"Expected fit parameter\(s\) \['eggs'\] not seen."
with pytest.raises(AssertionError, match=err_msg):
searcher.fit(X, y, spam=np.ones(10))
err_msg = "Fit parameter spam has length 1; expected"
with pytest.raises(AssertionError, match=err_msg):
searcher.fit(X, y, spam=np.ones(1), eggs=np.zeros(10))
searcher.fit(X, y, spam=np.ones(10), eggs=np.zeros(10))
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert grid_search_no_score.best_params_ == grid_search.best_params_
# check that we can call score and that it gives the correct result
assert grid_search.score(X, y) == grid_search_no_score.score(X, y)
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc'
).fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = search_no_scoring.score(X, y)
score_accuracy = search_accuracy.score(X, y)
score_no_score_auc = search_no_score_method_auc.score(X, y)
score_auc = search_auc.score(X, y)
# ensure the test is sane
assert score_auc < 1.0
assert score_accuracy < 1.0
assert score_auc != score_accuracy
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_grid_search_groups():
# Check if ValueError (when groups is None) propagates to GridSearchCV
# And also check if groups is correctly passed to the cv object
rng = np.random.RandomState(0)
X, y = make_classification(n_samples=15, n_classes=2, random_state=0)
groups = rng.randint(0, 3, 15)
clf = LinearSVC(random_state=0)
grid = {'C': [1]}
group_cvs = [LeaveOneGroupOut(), LeavePGroupsOut(2),
GroupKFold(n_splits=3), GroupShuffleSplit()]
for cv in group_cvs:
gs = GridSearchCV(clf, grid, cv=cv)
assert_raise_message(ValueError,
"The 'groups' parameter should not be None.",
gs.fit, X, y)
gs.fit(X, y, groups=groups)
non_group_cvs = [StratifiedKFold(), StratifiedShuffleSplit()]
for cv in non_group_cvs:
gs = GridSearchCV(clf, grid, cv=cv)
# Should not raise an error
gs.fit(X, y)
def test_classes__property():
# Test that classes_ property matches best_estimator_.classes_
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
Cs = [.1, 1, 10]
grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs})
grid_search.fit(X, y)
assert_array_equal(grid_search.best_estimator_.classes_,
grid_search.classes_)
# Test that regressors do not have a classes_ attribute
grid_search = GridSearchCV(Ridge(), {'alpha': [1.0, 2.0]})
grid_search.fit(X, y)
assert not hasattr(grid_search, 'classes_')
# Test that the grid searcher has no classes_ attribute before it's fit
grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs})
assert not hasattr(grid_search, 'classes_')
# Test that the grid searcher has no classes_ attribute without a refit
grid_search = GridSearchCV(LinearSVC(random_state=0),
{'C': Cs}, refit=False)
grid_search.fit(X, y)
assert not hasattr(grid_search, 'classes_')
def test_trivial_cv_results_attr():
# Test search over a "grid" with only one point.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]}, cv=3)
grid_search.fit(X, y)
assert hasattr(grid_search, "cv_results_")
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1, cv=3)
random_search.fit(X, y)
assert hasattr(grid_search, "cv_results_")
def test_no_refit():
# Test that GSCV can be used for model selection alone without refitting
clf = MockClassifier()
for scoring in [None, ['accuracy', 'precision']]:
grid_search = GridSearchCV(
clf, {'foo_param': [1, 2, 3]}, refit=False, cv=3
)
grid_search.fit(X, y)
assert not hasattr(grid_search, "best_estimator_") and \
hasattr(grid_search, "best_index_") and \
hasattr(grid_search, "best_params_")
# Make sure the functions predict/transform etc raise meaningful
# error messages
for fn_name in ('predict', 'predict_proba', 'predict_log_proba',
'transform', 'inverse_transform'):
assert_raise_message(NotFittedError,
('refit=False. %s is available only after '
'refitting on the best parameters'
% fn_name), getattr(grid_search, fn_name), X)
# Test that an invalid refit param raises appropriate error messages
for refit in ["", 5, True, 'recall', 'accuracy']:
assert_raise_message(ValueError, "For multi-metric scoring, the "
"parameter refit must be set to a scorer key",
GridSearchCV(clf, {}, refit=refit,
scoring={'acc': 'accuracy',
'prec': 'precision'}
).fit,
X, y)
def test_grid_search_error():
# Test that grid search will capture errors on data with different length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC(gamma='auto')
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_when_param_grid_includes_range():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = None
grid_search = GridSearchCV(clf, {'foo_param': range(1, 4)}, cv=3)
grid_search.fit(X, y)
assert grid_search.best_estimator_.foo_param == 2
def test_grid_search_bad_param_grid():
param_dict = {"C": 1}
clf = SVC(gamma='auto')
assert_raise_message(
ValueError,
"Parameter grid for parameter (C) needs to"
" be a list or numpy array, but got (<class 'int'>)."
" Single values need to be wrapped in a list"
" with one element.",
GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raise_message(
ValueError,
"Parameter values for parameter (C) need to be a non-empty sequence.",
GridSearchCV, clf, param_dict)
param_dict = {"C": "1,2,3"}
clf = SVC(gamma='auto')
assert_raise_message(
ValueError,
"Parameter grid for parameter (C) needs to"
" be a list or numpy array, but got (<class 'str'>)."
" Single values need to be wrapped in a list"
" with one element.",
GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones((3, 2))}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert np.mean(y_pred == y_pred2) >= .9
assert C == C2
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert C == C2
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert C == C3
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert cv.best_score_ >= 0
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert np.mean(y_pred == y_test) >= 0
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert not hasattr(self, 'has_been_fit_')
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
@ignore_warnings
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_refit_callable():
"""
Test refit=callable, which adds flexibility in identifying the
"best" estimator.
"""
def refit_callable(cv_results):
"""
A dummy function tests `refit=callable` interface.
Return the index of a model that has the least
`mean_test_score`.
"""
# Fit a dummy clf with `refit=True` to get a list of keys in
# clf.cv_results_.
X, y = make_classification(n_samples=100, n_features=4,
random_state=42)
clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.01, 0.1, 1]},
scoring='precision', refit=True)
clf.fit(X, y)
# Ensure that `best_index_ != 0` for this dummy clf
assert clf.best_index_ != 0
# Assert every key matches those in `cv_results`
for key in clf.cv_results_.keys():
assert key in cv_results
return cv_results['mean_test_score'].argmin()
X, y = make_classification(n_samples=100, n_features=4,
random_state=42)
clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.01, 0.1, 1]},
scoring='precision', refit=refit_callable)
clf.fit(X, y)
assert clf.best_index_ == 0
# Ensure `best_score_` is disabled when using `refit=callable`
assert not hasattr(clf, 'best_score_')
def test_refit_callable_invalid_type():
"""
Test implementation catches the errors when 'best_index_' returns an
invalid result.
"""
def refit_callable_invalid_type(cv_results):
"""
A dummy function tests when returned 'best_index_' is not integer.
"""
return None
X, y = make_classification(n_samples=100, n_features=4,
random_state=42)
clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.1, 1]},
scoring='precision', refit=refit_callable_invalid_type)
with pytest.raises(TypeError,
match='best_index_ returned is not an integer'):
clf.fit(X, y)
@pytest.mark.parametrize('out_bound_value', [-1, 2])
@pytest.mark.parametrize('search_cv', [RandomizedSearchCV, GridSearchCV])
def test_refit_callable_out_bound(out_bound_value, search_cv):
"""
Test implementation catches the errors when 'best_index_' returns an
out of bound result.
"""
def refit_callable_out_bound(cv_results):
"""
A dummy function tests when returned 'best_index_' is out of bounds.
"""
return out_bound_value
X, y = make_classification(n_samples=100, n_features=4,
random_state=42)
clf = search_cv(LinearSVC(random_state=42), {'C': [0.1, 1]},
scoring='precision', refit=refit_callable_out_bound)
with pytest.raises(IndexError, match='best_index_ index out of range'):
clf.fit(X, y)
def test_refit_callable_multi_metric():
"""
Test refit=callable in multiple metric evaluation setting
"""
def refit_callable(cv_results):
"""
A dummy function tests `refit=callable` interface.
Return the index of a model that has the least
`mean_test_prec`.
"""
assert 'mean_test_prec' in cv_results
return cv_results['mean_test_prec'].argmin()
X, y = make_classification(n_samples=100, n_features=4,
random_state=42)
scoring = {'Accuracy': make_scorer(accuracy_score), 'prec': 'precision'}
clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.01, 0.1, 1]},
scoring=scoring, refit=refit_callable)
clf.fit(X, y)
assert clf.best_index_ == 0
# Ensure `best_score_` is disabled when using `refit=callable`
assert not hasattr(clf, 'best_score_')
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(
check_X=check_X, check_y=check_y, methods_to_check=["fit"],
)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert hasattr(grid_search, "cv_results_")
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(
check_X=lambda x: isinstance(x, list), methods_to_check=["fit"],
)
cv = KFold(n_splits=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert hasattr(grid_search, "cv_results_")
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(
check_y=lambda x: isinstance(x, list), methods_to_check=["fit"],
)
cv = KFold(n_splits=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert hasattr(grid_search, "cv_results_")
@ignore_warnings
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
def check_df(x):
return isinstance(x, InputFeatureType)
def check_series(x):
return isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert hasattr(grid_search, "cv_results_")
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(n_samples=50, random_state=0)
km = KMeans(random_state=0, init="random", n_init=1)
# Multi-metric evaluation unsupervised
scoring = ['adjusted_rand_score', 'fowlkes_mallows_score']
for refit in ['adjusted_rand_score', 'fowlkes_mallows_score']:
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring=scoring, refit=refit)
grid_search.fit(X, y)
# Both ARI and FMS can find the right number :)
assert grid_search.best_params_["n_clusters"] == 3
# Single metric evaluation unsupervised
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='fowlkes_mallows_score')
grid_search.fit(X, y)
assert grid_search.best_params_["n_clusters"] == 3
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert grid_search.best_params_["n_clusters"] == 4
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert search.best_params_['bandwidth'] == .1
assert search.best_score_ == 42
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert len(samples) == 10
for sample in samples:
assert sample["kernel"] in ["rbf", "linear"]
assert 0 <= sample["C"] <= 1
# test that repeated calls yield identical parameters
param_distributions = {"C": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=3, random_state=0)
assert [x for x in sampler] == [x for x in sampler]
if sp_version >= (0, 16):
param_distributions = {"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
assert [x for x in sampler] == [x for x in sampler]
def check_cv_results_array_types(search, param_keys, score_keys):
# Check if the search `cv_results`'s array are of correct types
cv_results = search.cv_results_
assert all(isinstance(cv_results[param], np.ma.MaskedArray)
for param in param_keys)
assert all(cv_results[key].dtype == object for key in param_keys)
assert not any(isinstance(cv_results[key], np.ma.MaskedArray)
for key in score_keys)
assert all(cv_results[key].dtype == np.float64
for key in score_keys if not key.startswith('rank'))
scorer_keys = search.scorer_.keys() if search.multimetric_ else ['score']
for key in scorer_keys:
assert cv_results['rank_test_%s' % key].dtype == np.int32
def check_cv_results_keys(cv_results, param_keys, score_keys, n_cand):
# Test the search.cv_results_ contains all the required results
assert_array_equal(sorted(cv_results.keys()),
sorted(param_keys + score_keys + ('params',)))
assert all(cv_results[key].shape == (n_cand,)
for key in param_keys + score_keys)
def test_grid_search_cv_results():
X, y = make_classification(n_samples=50, n_features=4,
random_state=42)
n_splits = 3
n_grid_points = 6
params = [dict(kernel=['rbf', ], C=[1, 10], gamma=[0.1, 1]),
dict(kernel=['poly', ], degree=[1, 2])]
param_keys = ('param_C', 'param_degree', 'param_gamma', 'param_kernel')
score_keys = ('mean_test_score', 'mean_train_score',
'rank_test_score',
'split0_test_score', 'split1_test_score',
'split2_test_score',
'split0_train_score', 'split1_train_score',
'split2_train_score',
'std_test_score', 'std_train_score',
'mean_fit_time', 'std_fit_time',
'mean_score_time', 'std_score_time')
n_candidates = n_grid_points
search = GridSearchCV(SVC(), cv=n_splits, param_grid=params,
return_train_score=True)
search.fit(X, y)
cv_results = search.cv_results_
# Check if score and timing are reasonable
assert all(cv_results['rank_test_score'] >= 1)
assert (all(cv_results[k] >= 0) for k in score_keys
if k != 'rank_test_score')
assert (all(cv_results[k] <= 1) for k in score_keys
if 'time' not in k and
k != 'rank_test_score')
# Check cv_results structure
check_cv_results_array_types(search, param_keys, score_keys)
check_cv_results_keys(cv_results, param_keys, score_keys, n_candidates)
# Check masking
cv_results = search.cv_results_
n_candidates = len(search.cv_results_['params'])
assert all((cv_results['param_C'].mask[i] and
cv_results['param_gamma'].mask[i] and
not cv_results['param_degree'].mask[i])
for i in range(n_candidates)
if cv_results['param_kernel'][i] == 'linear')
assert all((not cv_results['param_C'].mask[i] and
not cv_results['param_gamma'].mask[i] and
cv_results['param_degree'].mask[i])
for i in range(n_candidates)
if cv_results['param_kernel'][i] == 'rbf')
def test_random_search_cv_results():
X, y = make_classification(n_samples=50, n_features=4, random_state=42)
n_splits = 3
n_search_iter = 30
params = [{'kernel': ['rbf'], 'C': expon(scale=10),
'gamma': expon(scale=0.1)},
{'kernel': ['poly'], 'degree': [2, 3]}]
param_keys = ('param_C', 'param_degree', 'param_gamma', 'param_kernel')
score_keys = ('mean_test_score', 'mean_train_score',
'rank_test_score',
'split0_test_score', 'split1_test_score',
'split2_test_score',
'split0_train_score', 'split1_train_score',
'split2_train_score',
'std_test_score', 'std_train_score',
'mean_fit_time', 'std_fit_time',
'mean_score_time', 'std_score_time')
n_cand = n_search_iter
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter,
cv=n_splits,
param_distributions=params,
return_train_score=True)
search.fit(X, y)
cv_results = search.cv_results_
# Check results structure
check_cv_results_array_types(search, param_keys, score_keys)
check_cv_results_keys(cv_results, param_keys, score_keys, n_cand)
n_candidates = len(search.cv_results_['params'])
assert all((cv_results['param_C'].mask[i] and
cv_results['param_gamma'].mask[i] and
not cv_results['param_degree'].mask[i])
for i in range(n_candidates)
if cv_results['param_kernel'][i] == 'linear')
assert all((not cv_results['param_C'].mask[i] and
not cv_results['param_gamma'].mask[i] and
cv_results['param_degree'].mask[i])
for i in range(n_candidates)
if cv_results['param_kernel'][i] == 'rbf')
@pytest.mark.parametrize(
"SearchCV, specialized_params",
[(GridSearchCV, {'param_grid': {'C': [1, 10]}}),
(RandomizedSearchCV,
{'param_distributions': {'C': [1, 10]}, 'n_iter': 2})]
)
def test_search_default_iid(SearchCV, specialized_params):
# Test the IID parameter TODO: Clearly this test does something else???
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
common_params = {'estimator': SVC(), 'cv': cv,
'return_train_score': True}
search = SearchCV(**common_params, **specialized_params)
search.fit(X, y)
test_cv_scores = np.array(
[search.cv_results_['split%d_test_score' % s][0]
for s in range(search.n_splits_)]
)
test_mean = search.cv_results_['mean_test_score'][0]
test_std = search.cv_results_['std_test_score'][0]
train_cv_scores = np.array(
[search.cv_results_['split%d_train_score' % s][0]
for s in range(search.n_splits_)]
)
train_mean = search.cv_results_['mean_train_score'][0]
train_std = search.cv_results_['std_train_score'][0]
assert search.cv_results_['param_C'][0] == 1
# scores are the same as above
assert_allclose(test_cv_scores, [1, 1. / 3.])
assert_allclose(train_cv_scores, [1, 1])
# Unweighted mean/std is used
assert test_mean == pytest.approx(np.mean(test_cv_scores))
assert test_std == pytest.approx(np.std(test_cv_scores))
# For the train scores, we do not take a weighted mean irrespective of
# i.i.d. or not
assert train_mean == pytest.approx(1)
assert train_std == pytest.approx(0)
def test_grid_search_cv_results_multimetric():
X, y = make_classification(n_samples=50, n_features=4, random_state=42)
n_splits = 3
params = [dict(kernel=['rbf', ], C=[1, 10], gamma=[0.1, 1]),
dict(kernel=['poly', ], degree=[1, 2])]
grid_searches = []
for scoring in ({'accuracy': make_scorer(accuracy_score),
'recall': make_scorer(recall_score)},
'accuracy', 'recall'):
grid_search = GridSearchCV(SVC(), cv=n_splits,
param_grid=params,
scoring=scoring, refit=False)
grid_search.fit(X, y)
grid_searches.append(grid_search)
compare_cv_results_multimetric_with_single(*grid_searches)
def test_random_search_cv_results_multimetric():
X, y = make_classification(n_samples=50, n_features=4, random_state=42)
n_splits = 3
n_search_iter = 30
# Scipy 0.12's stats dists do not accept seed, hence we use param grid
params = dict(C=np.logspace(-4, 1, 3),
gamma=np.logspace(-5, 0, 3, base=0.1))
for refit in (True, False):
random_searches = []
for scoring in (('accuracy', 'recall'), 'accuracy', 'recall'):
# If True, for multi-metric pass refit='accuracy'
if refit:
probability = True
refit = 'accuracy' if isinstance(scoring, tuple) else refit
else:
probability = False
clf = SVC(probability=probability, random_state=42)
random_search = RandomizedSearchCV(clf, n_iter=n_search_iter,
cv=n_splits,
param_distributions=params,
scoring=scoring,
refit=refit, random_state=0)
random_search.fit(X, y)
random_searches.append(random_search)
compare_cv_results_multimetric_with_single(*random_searches)
compare_refit_methods_when_refit_with_acc(
random_searches[0], random_searches[1], refit)
def compare_cv_results_multimetric_with_single(
search_multi, search_acc, search_rec):
"""Compare multi-metric cv_results with the ensemble of multiple
single metric cv_results from single metric grid/random search"""
assert search_multi.multimetric_
assert_array_equal(sorted(search_multi.scorer_),
('accuracy', 'recall'))
cv_results_multi = search_multi.cv_results_
cv_results_acc_rec = {re.sub('_score$', '_accuracy', k): v
for k, v in search_acc.cv_results_.items()}
cv_results_acc_rec.update({re.sub('_score$', '_recall', k): v
for k, v in search_rec.cv_results_.items()})
# Check if score and timing are reasonable, also checks if the keys
# are present
assert all((np.all(cv_results_multi[k] <= 1) for k in (
'mean_score_time', 'std_score_time', 'mean_fit_time',
'std_fit_time')))
# Compare the keys, other than time keys, among multi-metric and
# single metric grid search results. np.testing.assert_equal performs a
# deep nested comparison of the two cv_results dicts
np.testing.assert_equal({k: v for k, v in cv_results_multi.items()
if not k.endswith('_time')},
{k: v for k, v in cv_results_acc_rec.items()
if not k.endswith('_time')})
def compare_refit_methods_when_refit_with_acc(search_multi, search_acc, refit):
"""Compare refit multi-metric search methods with single metric methods"""
assert search_acc.refit == refit
if refit:
assert search_multi.refit == 'accuracy'
else:
assert not search_multi.refit
return # search cannot predict/score without refit
X, y = make_blobs(n_samples=100, n_features=4, random_state=42)
for method in ('predict', 'predict_proba', 'predict_log_proba'):
assert_almost_equal(getattr(search_multi, method)(X),
getattr(search_acc, method)(X))
assert_almost_equal(search_multi.score(X, y), search_acc.score(X, y))
for key in ('best_index_', 'best_score_', 'best_params_'):
assert getattr(search_multi, key) == getattr(search_acc, key)
def test_search_cv_results_rank_tie_breaking():
X, y = make_blobs(n_samples=50, random_state=42)
# The two C values are close enough to give similar models
# which would result in a tie of their mean cv-scores
param_grid = {'C': [1, 1.001, 0.001]}
grid_search = GridSearchCV(SVC(), param_grid=param_grid,
return_train_score=True)
random_search = RandomizedSearchCV(SVC(), n_iter=3,
param_distributions=param_grid,
return_train_score=True)
for search in (grid_search, random_search):
search.fit(X, y)
cv_results = search.cv_results_
# Check tie breaking strategy -
# Check that there is a tie in the mean scores between
# candidates 1 and 2 alone
assert_almost_equal(cv_results['mean_test_score'][0],
cv_results['mean_test_score'][1])
assert_almost_equal(cv_results['mean_train_score'][0],
cv_results['mean_train_score'][1])
assert not np.allclose(cv_results['mean_test_score'][1],
cv_results['mean_test_score'][2])
assert not np.allclose(cv_results['mean_train_score'][1],
cv_results['mean_train_score'][2])
# 'min' rank should be assigned to the tied candidates
assert_almost_equal(search.cv_results_['rank_test_score'], [1, 1, 3])
def test_search_cv_results_none_param():
X, y = [[1], [2], [3], [4], [5]], [0, 0, 0, 0, 1]
estimators = (DecisionTreeRegressor(), DecisionTreeClassifier())
est_parameters = {"random_state": [0, None]}
cv = KFold()
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv,
).fit(X, y)
assert_array_equal(grid_search.cv_results_['param_random_state'],
[0, None])
@ignore_warnings()
def test_search_cv_timing():
svc = LinearSVC(random_state=0)
X = [[1, ], [2, ], [3, ], [4, ]]
y = [0, 1, 1, 0]
gs = GridSearchCV(svc, {'C': [0, 1]}, cv=2, error_score=0)
rs = RandomizedSearchCV(svc, {'C': [0, 1]}, cv=2, error_score=0, n_iter=2)
for search in (gs, rs):
search.fit(X, y)
for key in ['mean_fit_time', 'std_fit_time']:
# NOTE The precision of time.time in windows is not high
# enough for the fit/score times to be non-zero for trivial X and y
assert np.all(search.cv_results_[key] >= 0)
assert np.all(search.cv_results_[key] < 1)
for key in ['mean_score_time', 'std_score_time']:
assert search.cv_results_[key][1] >= 0
assert search.cv_results_[key][0] == 0.0
assert np.all(search.cv_results_[key] < 1)
assert hasattr(search, "refit_time_")
assert isinstance(search.refit_time_, float)
assert search.refit_time_ >= 0
def test_grid_search_correct_score_results():
# test that correct scores are used
n_splits = 3
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score, cv=n_splits)
cv_results = grid_search.fit(X, y).cv_results_
# Test scorer names
result_keys = list(cv_results.keys())
expected_keys = (("mean_test_score", "rank_test_score") +
tuple("split%d_test_score" % cv_i
for cv_i in range(n_splits)))
assert all(np.in1d(expected_keys, result_keys))
cv = StratifiedKFold(n_splits=n_splits)
n_splits = grid_search.n_splits_
for candidate_i, C in enumerate(Cs):
clf.set_params(C=C)
cv_scores = np.array(
list(grid_search.cv_results_['split%d_test_score'
% s][candidate_i]
for s in range(n_splits)))
for i, (train, test) in enumerate(cv.split(X, y)):
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, cv_scores[i])
# FIXME remove test_fit_grid_point as the function will be removed on 0.25
@ignore_warnings(category=FutureWarning)
def test_fit_grid_point():
X, y = make_classification(random_state=0)
cv = StratifiedKFold()
svc = LinearSVC(random_state=0)
scorer = make_scorer(accuracy_score)
for params in ({'C': 0.1}, {'C': 0.01}, {'C': 0.001}):
for train, test in cv.split(X, y):
this_scores, this_params, n_test_samples = fit_grid_point(
X, y, clone(svc), params, train, test,
scorer, verbose=False)
est = clone(svc).set_params(**params)
est.fit(X[train], y[train])
expected_score = scorer(est, X[test], y[test])
# Test the return values of fit_grid_point
assert_almost_equal(this_scores, expected_score)
assert params == this_params
assert n_test_samples == test.size
# Should raise an error upon multimetric scorer
assert_raise_message(ValueError, "For evaluating multiple scores, use "
"sklearn.model_selection.cross_validate instead.",
fit_grid_point, X, y, svc, params, train, test,
{'score': scorer}, verbose=True)
# FIXME remove test_fit_grid_point_deprecated as
# fit_grid_point will be removed on 0.25
def test_fit_grid_point_deprecated():
X, y = make_classification(random_state=0)
svc = LinearSVC(random_state=0)
scorer = make_scorer(accuracy_score)
msg = ("fit_grid_point is deprecated in version 0.23 "
"and will be removed in version 0.25")
params = {'C': 0.1}
train, test = next(StratifiedKFold().split(X, y))
with pytest.warns(FutureWarning, match=msg):
fit_grid_point(X, y, svc, params, train, test, scorer, verbose=False)
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True, cv=3)
grid_search.fit(X, y)
grid_search_pickled = pickle.loads(pickle.dumps(grid_search))
assert_array_almost_equal(grid_search.predict(X),
grid_search_pickled.predict(X))
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3, cv=3)
random_search.fit(X, y)
random_search_pickled = pickle.loads(pickle.dumps(random_search))
assert_array_almost_equal(random_search.predict(X),
random_search_pickled.predict(X))
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(return_indicator=True,
random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold()
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
res_params = grid_search.cv_results_['params']
for cand_i in range(len(res_params)):
est.set_params(**res_params[cand_i])
for i, (train, test) in enumerate(cv.split(X, y)):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(
correct_score,
grid_search.cv_results_['split%d_test_score' % i][cand_i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
res_params = random_search.cv_results_['params']
for cand_i in range(len(res_params)):
est.set_params(**res_params[cand_i])
for i, (train, test) in enumerate(cv.split(X, y)):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(
correct_score,
random_search.cv_results_['split%d_test_score'
% i][cand_i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert not hasattr(gs, "predict_proba")
def test_grid_search_allows_nans():
# Test GridSearchCV with SimpleImputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', SimpleImputer(strategy='mean', missing_values=np.nan)),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def score(self, X=None, Y=None):
return 0.
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
n_candidates = len(gs.cv_results_['params'])
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
def get_cand_scores(i):
return np.array(list(gs.cv_results_['split%d_test_score' % s][i]
for s in range(gs.n_splits_)))
assert all((np.all(get_cand_scores(cand_i) == 0.0)
for cand_i in range(n_candidates)
if gs.cv_results_['param_parameter'][cand_i] ==
FailingClassifier.FAILING_PARAMETER))
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
n_candidates = len(gs.cv_results_['params'])
assert all(np.all(np.isnan(get_cand_scores(cand_i)))
for cand_i in range(n_candidates)
if gs.cv_results_['param_parameter'][cand_i] ==
FailingClassifier.FAILING_PARAMETER)
ranks = gs.cv_results_['rank_test_score']
# Check that succeeded estimators have lower ranks
assert ranks[0] <= 2 and ranks[1] <= 2
# Check that failed estimator has the highest rank
assert ranks[clf.FAILING_PARAMETER] == 3
assert gs.best_index_ != clf.FAILING_PARAMETER
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise warning if n_iter is bigger than total parameter space
params = [{'first': [0, 1], 'second': ['a', 'b', 'c']},
{'third': ['two', 'values']}]
sampler = ParameterSampler(params, n_iter=9)
n_iter = 9
grid_size = 8
expected_warning = ('The total space of parameters %d is smaller '
'than n_iter=%d. Running %d iterations. For '
'exhaustive searches, use GridSearchCV.'
% (grid_size, n_iter, grid_size))
assert_warns_message(UserWarning, expected_warning,
list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=8)
samples = list(sampler)
assert len(samples) == 8
for values in ParameterGrid(params):
assert values in samples
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert len(samples) == 99
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert len(set(hashable_samples)) == 99
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert len(samples) == 7
def test_stochastic_gradient_loss_param():
# Make sure the predict_proba works when loss is specified
# as one of the parameters in the param_grid.
param_grid = {
'loss': ['log'],
}
X = np.arange(24).reshape(6, -1)
y = [0, 0, 0, 1, 1, 1]
clf = GridSearchCV(estimator=SGDClassifier(loss='hinge'),
param_grid=param_grid, cv=3)
# When the estimator is not fitted, `predict_proba` is not available as the
# loss is 'hinge'.
assert not hasattr(clf, "predict_proba")
clf.fit(X, y)
clf.predict_proba(X)
clf.predict_log_proba(X)
# Make sure `predict_proba` is not available when setting loss=['hinge']
# in param_grid
param_grid = {
'loss': ['hinge'],
}
clf = GridSearchCV(estimator=SGDClassifier(loss='hinge'),
param_grid=param_grid, cv=3)
assert not hasattr(clf, "predict_proba")
clf.fit(X, y)
assert not hasattr(clf, "predict_proba")
def test_search_train_scores_set_to_false():
X = np.arange(6).reshape(6, -1)
y = [0, 0, 0, 1, 1, 1]
clf = LinearSVC(random_state=0)
gs = GridSearchCV(clf, param_grid={'C': [0.1, 0.2]}, cv=3)
gs.fit(X, y)
def test_grid_search_cv_splits_consistency():
# Check if a one time iterable is accepted as a cv parameter.
n_samples = 100
n_splits = 5
X, y = make_classification(n_samples=n_samples, random_state=0)
gs = GridSearchCV(LinearSVC(random_state=0),
param_grid={'C': [0.1, 0.2, 0.3]},
cv=OneTimeSplitter(n_splits=n_splits,
n_samples=n_samples),
return_train_score=True)
gs.fit(X, y)
gs2 = GridSearchCV(LinearSVC(random_state=0),
param_grid={'C': [0.1, 0.2, 0.3]},
cv=KFold(n_splits=n_splits), return_train_score=True)
gs2.fit(X, y)
# Give generator as a cv parameter
assert isinstance(KFold(n_splits=n_splits,
shuffle=True, random_state=0).split(X, y),
GeneratorType)
gs3 = GridSearchCV(LinearSVC(random_state=0),
param_grid={'C': [0.1, 0.2, 0.3]},
cv=KFold(n_splits=n_splits, shuffle=True,
random_state=0).split(X, y),
return_train_score=True)
gs3.fit(X, y)
gs4 = GridSearchCV(LinearSVC(random_state=0),
param_grid={'C': [0.1, 0.2, 0.3]},
cv=KFold(n_splits=n_splits, shuffle=True,
random_state=0), return_train_score=True)
gs4.fit(X, y)
def _pop_time_keys(cv_results):
for key in ('mean_fit_time', 'std_fit_time',
'mean_score_time', 'std_score_time'):
cv_results.pop(key)
return cv_results
# Check if generators are supported as cv and
# that the splits are consistent
np.testing.assert_equal(_pop_time_keys(gs3.cv_results_),
_pop_time_keys(gs4.cv_results_))
# OneTimeSplitter is a non-re-entrant cv where split can be called only
# once if ``cv.split`` is called once per param setting in GridSearchCV.fit
# the 2nd and 3rd parameter will not be evaluated as no train/test indices
# will be generated for the 2nd and subsequent cv.split calls.
# This is a check to make sure cv.split is not called once per param
# setting.
np.testing.assert_equal({k: v for k, v in gs.cv_results_.items()
if not k.endswith('_time')},
{k: v for k, v in gs2.cv_results_.items()
if not k.endswith('_time')})
# Check consistency of folds across the parameters
gs = GridSearchCV(LinearSVC(random_state=0),
param_grid={'C': [0.1, 0.1, 0.2, 0.2]},
cv=KFold(n_splits=n_splits, shuffle=True),
return_train_score=True)
gs.fit(X, y)
# As the first two param settings (C=0.1) and the next two param
# settings (C=0.2) are same, the test and train scores must also be
# same as long as the same train/test indices are generated for all
# the cv splits, for both param setting
for score_type in ('train', 'test'):
per_param_scores = {}
for param_i in range(4):
per_param_scores[param_i] = list(
gs.cv_results_['split%d_%s_score' % (s, score_type)][param_i]
for s in range(5))
assert_array_almost_equal(per_param_scores[0],
per_param_scores[1])
assert_array_almost_equal(per_param_scores[2],
per_param_scores[3])
def test_transform_inverse_transform_round_trip():
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=3, verbose=3)
grid_search.fit(X, y)
X_round_trip = grid_search.inverse_transform(grid_search.transform(X))
assert_array_equal(X, X_round_trip)
def test_custom_run_search():
def check_results(results, gscv):
exp_results = gscv.cv_results_
assert sorted(results.keys()) == sorted(exp_results)
for k in results:
if not k.endswith('_time'):
# XXX: results['params'] is a list :|
results[k] = np.asanyarray(results[k])
if results[k].dtype.kind == 'O':
assert_array_equal(exp_results[k], results[k],
err_msg='Checking ' + k)
else:
assert_allclose(exp_results[k], results[k],
err_msg='Checking ' + k)
def fit_grid(param_grid):
return GridSearchCV(clf, param_grid,
return_train_score=True).fit(X, y)
class CustomSearchCV(BaseSearchCV):
def __init__(self, estimator, **kwargs):
super().__init__(estimator, **kwargs)
def _run_search(self, evaluate):
results = evaluate([{'max_depth': 1}, {'max_depth': 2}])
check_results(results, fit_grid({'max_depth': [1, 2]}))
results = evaluate([{'min_samples_split': 5},
{'min_samples_split': 10}])
check_results(results, fit_grid([{'max_depth': [1, 2]},
{'min_samples_split': [5, 10]}]))
# Using regressor to make sure each score differs
clf = DecisionTreeRegressor(random_state=0)
X, y = make_classification(n_samples=100, n_informative=4,
random_state=0)
mycv = CustomSearchCV(clf, return_train_score=True).fit(X, y)
gscv = fit_grid([{'max_depth': [1, 2]},
{'min_samples_split': [5, 10]}])
results = mycv.cv_results_
check_results(results, gscv)
for attr in dir(gscv):
if (attr[0].islower() and attr[-1:] == '_' and
attr not in {'cv_results_', 'best_estimator_',
'refit_time_', 'classes_'}):
assert getattr(gscv, attr) == getattr(mycv, attr), \
"Attribute %s not equal" % attr
def test__custom_fit_no_run_search():
class NoRunSearchSearchCV(BaseSearchCV):
def __init__(self, estimator, **kwargs):
super().__init__(estimator, **kwargs)
def fit(self, X, y=None, groups=None, **fit_params):
return self
# this should not raise any exceptions
NoRunSearchSearchCV(SVC()).fit(X, y)
class BadSearchCV(BaseSearchCV):
def __init__(self, estimator, **kwargs):
super().__init__(estimator, **kwargs)
with pytest.raises(NotImplementedError,
match="_run_search not implemented."):
# this should raise a NotImplementedError
BadSearchCV(SVC()).fit(X, y)
def test_empty_cv_iterator_error():
# Use global X, y
# create cv
cv = KFold(n_splits=3).split(X)
# pop all of it, this should cause the expected ValueError
[u for u in cv]
# cv is empty now
train_size = 100
ridge = RandomizedSearchCV(Ridge(), {'alpha': [1e-3, 1e-2, 1e-1]},
cv=cv, n_jobs=4)
# assert that this raises an error
with pytest.raises(ValueError,
match='No fits were performed. '
'Was the CV iterator empty\\? '
'Were there no candidates\\?'):
ridge.fit(X[:train_size], y[:train_size])
def test_random_search_bad_cv():
# Use global X, y
class BrokenKFold(KFold):
def get_n_splits(self, *args, **kw):
return 1
# create bad cv
cv = BrokenKFold(n_splits=3)
train_size = 100
ridge = RandomizedSearchCV(Ridge(), {'alpha': [1e-3, 1e-2, 1e-1]},
cv=cv, n_jobs=4)
# assert that this raises an error
with pytest.raises(ValueError,
match='cv.split and cv.get_n_splits returned '
'inconsistent results. Expected \\d+ '
'splits, got \\d+'):
ridge.fit(X[:train_size], y[:train_size])
def test_n_features_in():
# make sure grid search and random search delegate n_features_in to the
# best estimator
n_features = 4
X, y = make_classification(n_features=n_features)
gbdt = HistGradientBoostingClassifier()
param_grid = {'max_iter': [3, 4]}
gs = GridSearchCV(gbdt, param_grid)
rs = RandomizedSearchCV(gbdt, param_grid, n_iter=1)
assert not hasattr(gs, 'n_features_in_')
assert not hasattr(rs, 'n_features_in_')
gs.fit(X, y)
rs.fit(X, y)
assert gs.n_features_in_ == n_features
assert rs.n_features_in_ == n_features
def test_search_cv__pairwise_property_delegated_to_base_estimator():
"""
Test implementation of BaseSearchCV has the _pairwise property
which matches the _pairwise property of its estimator.
This test make sure _pairwise is delegated to the base estimator.
Non-regression test for issue #13920.
"""
est = BaseEstimator()
attr_message = "BaseSearchCV _pairwise property must match estimator"
for _pairwise_setting in [True, False]:
setattr(est, '_pairwise', _pairwise_setting)
cv = GridSearchCV(est, {'n_neighbors': [10]})
assert _pairwise_setting == cv._pairwise, attr_message
def test_search_cv__pairwise_property_equivalence_of_precomputed():
"""
Test implementation of BaseSearchCV has the _pairwise property
which matches the _pairwise property of its estimator.
This test ensures the equivalence of 'precomputed'.
Non-regression test for issue #13920.
"""
n_samples = 50
n_splits = 2
X, y = make_classification(n_samples=n_samples, random_state=0)
grid_params = {'n_neighbors': [10]}
# defaults to euclidean metric (minkowski p = 2)
clf = KNeighborsClassifier()
cv = GridSearchCV(clf, grid_params, cv=n_splits)
cv.fit(X, y)
preds_original = cv.predict(X)
# precompute euclidean metric to validate _pairwise is working
X_precomputed = euclidean_distances(X)
clf = KNeighborsClassifier(metric='precomputed')
cv = GridSearchCV(clf, grid_params, cv=n_splits)
cv.fit(X_precomputed, y)
preds_precomputed = cv.predict(X_precomputed)
attr_message = "GridSearchCV not identical with precomputed metric"
assert (preds_original == preds_precomputed).all(), attr_message
@pytest.mark.parametrize(
"SearchCV, param_search",
[(GridSearchCV, {'a': [0.1, 0.01]}),
(RandomizedSearchCV, {'a': uniform(1, 3)})]
)
def test_scalar_fit_param(SearchCV, param_search):
# unofficially sanctioned tolerance for scalar values in fit_params
# non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/15805
class TestEstimator(BaseEstimator, ClassifierMixin):
def __init__(self, a=None):
self.a = a
def fit(self, X, y, r=None):
self.r_ = r
def predict(self, X):
return np.zeros(shape=(len(X)))
model = SearchCV(TestEstimator(), param_search)
X, y = make_classification(random_state=42)
model.fit(X, y, r=42)
assert model.best_estimator_.r_ == 42
@pytest.mark.parametrize(
"SearchCV, param_search",
[(GridSearchCV, {'alpha': [0.1, 0.01]}),
(RandomizedSearchCV, {'alpha': uniform(0.01, 0.1)})]
)
def test_scalar_fit_param_compat(SearchCV, param_search):
# check support for scalar values in fit_params, for instance in LightGBM
# that do not exactly respect the scikit-learn API contract but that we do
# not want to break without an explicit deprecation cycle and API
# recommendations for implementing early stopping with a user provided
# validation set. non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/15805
X_train, X_valid, y_train, y_valid = train_test_split(
*make_classification(random_state=42), random_state=42
)
class _FitParamClassifier(SGDClassifier):
def fit(self, X, y, sample_weight=None, tuple_of_arrays=None,
scalar_param=None, callable_param=None):
super().fit(X, y, sample_weight=sample_weight)
assert scalar_param > 0
assert callable(callable_param)
# The tuple of arrays should be preserved as tuple.
assert isinstance(tuple_of_arrays, tuple)
assert tuple_of_arrays[0].ndim == 2
assert tuple_of_arrays[1].ndim == 1
return self
def _fit_param_callable():
pass
model = SearchCV(
_FitParamClassifier(), param_search
)
# NOTE: `fit_params` should be data dependent (e.g. `sample_weight`) which
# is not the case for the following parameters. But this abuse is common in
# popular third-party libraries and we should tolerate this behavior for
# now and be careful not to break support for those without following
# proper deprecation cycle.
fit_params = {
'tuple_of_arrays': (X_valid, y_valid),
'callable_param': _fit_param_callable,
'scalar_param': 42,
}
model.fit(X_train, y_train, **fit_params)
|
[
"sklearn.utils._testing.assert_warns_message",
"sklearn.model_selection.GridSearchCV",
"sklearn.model_selection.StratifiedShuffleSplit",
"sklearn.utils._testing.assert_raises",
"sklearn.tree.DecisionTreeRegressor",
"pickle.dumps",
"sklearn.utils._testing.assert_array_equal",
"sklearn.neighbors.KNeighborsClassifier",
"numpy.asanyarray",
"sklearn.model_selection.StratifiedKFold",
"numpy.array",
"sklearn.metrics.roc_auc_score",
"scipy.stats.expon",
"sklearn.model_selection.KFold",
"sklearn.utils._mocking.CheckingClassifier",
"numpy.random.RandomState",
"numpy.arange",
"sklearn.model_selection.ParameterGrid",
"numpy.mean",
"sklearn.ensemble.HistGradientBoostingClassifier",
"sklearn.linear_model.SGDClassifier",
"sklearn.model_selection.tests.common.OneTimeSplitter",
"numpy.where",
"sklearn.datasets.make_blobs",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.model_selection.ParameterSampler",
"sklearn.neighbors.KernelDensity",
"itertools.product",
"numpy.dot",
"sklearn.model_selection.LeavePGroupsOut",
"sklearn.model_selection.GroupKFold",
"scipy.sparse.csr_matrix",
"io.StringIO",
"numpy.logspace",
"numpy.allclose",
"numpy.ones",
"sklearn.metrics.pairwise.euclidean_distances",
"sklearn.model_selection.LeaveOneGroupOut",
"sklearn.model_selection.GroupShuffleSplit",
"numpy.in1d",
"sklearn.svm.LinearSVC",
"scipy.stats.uniform",
"sklearn.utils._testing.assert_allclose",
"pytest.raises",
"sklearn.utils._testing.ignore_warnings",
"numpy.std",
"re.sub",
"sklearn.base.BaseEstimator",
"sklearn.linear_model.LinearRegression",
"sklearn.svm.SVC",
"sklearn.datasets.make_classification",
"sklearn.cluster.KMeans",
"pytest.approx",
"scipy.stats.bernoulli",
"sklearn.metrics.f1_score",
"numpy.unique",
"sklearn.utils._testing.assert_almost_equal",
"sklearn.datasets.make_multilabel_classification",
"sklearn.utils._testing.assert_warns",
"sklearn.model_selection.fit_grid_point",
"sklearn.base.clone",
"sklearn.linear_model.Ridge",
"sklearn.metrics.make_scorer",
"sklearn.utils._testing.assert_array_almost_equal",
"pytest.mark.parametrize",
"numpy.zeros",
"functools.partial",
"sklearn.impute.SimpleImputer",
"numpy.all",
"sklearn.utils._testing.assert_raise_message",
"pytest.warns",
"sklearn.model_selection.RandomizedSearchCV"
] |
[((4079, 4125), 'numpy.array', 'np.array', (['[[-1, -1], [-2, -1], [1, 1], [2, 1]]'], {}), '([[-1, -1], [-2, -1], [1, 1], [2, 1]])\n', (4087, 4125), True, 'import numpy as np\n'), ((4130, 4152), 'numpy.array', 'np.array', (['[1, 1, 2, 2]'], {}), '([1, 1, 2, 2])\n', (4138, 4152), True, 'import numpy as np\n'), ((4385, 4686), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""input, error_type, error_message"""', '[(0, TypeError, \'Parameter .* is not a dict or a list \\\\(0\\\\)\'), ([{\'foo\':\n [0]}, 0], TypeError, \'Parameter .* is not a dict \\\\(0\\\\)\'), ({\'foo\': 0},\n TypeError, "Parameter.* value is not iterable .*\\\\(key=\'foo\', value=0\\\\)")]'], {}), '(\'input, error_type, error_message\', [(0, TypeError,\n \'Parameter .* is not a dict or a list \\\\(0\\\\)\'), ([{\'foo\': [0]}, 0],\n TypeError, \'Parameter .* is not a dict \\\\(0\\\\)\'), ({\'foo\': 0},\n TypeError, "Parameter.* value is not iterable .*\\\\(key=\'foo\', value=0\\\\)")]\n )\n', (4408, 4686), False, 'import pytest\n'), ((7934, 8005), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""SearchCV"""', '[GridSearchCV, RandomizedSearchCV]'], {}), "('SearchCV', [GridSearchCV, RandomizedSearchCV])\n", (7957, 8005), False, 'import pytest\n'), ((22950, 23001), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""out_bound_value"""', '[-1, 2]'], {}), "('out_bound_value', [-1, 2])\n", (22973, 23001), False, 'import pytest\n'), ((23003, 23075), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""search_cv"""', '[RandomizedSearchCV, GridSearchCV]'], {}), "('search_cv', [RandomizedSearchCV, GridSearchCV])\n", (23026, 23075), False, 'import pytest\n'), ((35001, 35192), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""SearchCV, specialized_params"""', "[(GridSearchCV, {'param_grid': {'C': [1, 10]}}), (RandomizedSearchCV, {\n 'param_distributions': {'C': [1, 10]}, 'n_iter': 2})]"], {}), "('SearchCV, specialized_params', [(GridSearchCV, {\n 'param_grid': {'C': [1, 10]}}), (RandomizedSearchCV, {\n 'param_distributions': {'C': [1, 10]}, 'n_iter': 2})])\n", (35024, 35192), False, 'import pytest\n'), ((43521, 43538), 'sklearn.utils._testing.ignore_warnings', 'ignore_warnings', ([], {}), '()\n', (43536, 43538), False, 'from sklearn.utils._testing import ignore_warnings\n'), ((46129, 46168), 'sklearn.utils._testing.ignore_warnings', 'ignore_warnings', ([], {'category': 'FutureWarning'}), '(category=FutureWarning)\n', (46144, 46168), False, 'from sklearn.utils._testing import ignore_warnings\n'), ((4971, 4993), 'sklearn.model_selection.ParameterGrid', 'ParameterGrid', (['params1'], {}), '(params1)\n', (4984, 4993), False, 'from sklearn.model_selection import ParameterGrid\n'), ((5229, 5251), 'sklearn.model_selection.ParameterGrid', 'ParameterGrid', (['params2'], {}), '(params2)\n', (5242, 5251), False, 'from sklearn.model_selection import ParameterGrid\n'), ((5793, 5810), 'sklearn.model_selection.ParameterGrid', 'ParameterGrid', (['{}'], {}), '({})\n', (5806, 5810), False, 'from sklearn.model_selection import ParameterGrid\n'), ((5916, 5960), 'sklearn.utils._testing.assert_raises', 'assert_raises', (['IndexError', '(lambda : empty[1])'], {}), '(IndexError, lambda : empty[1])\n', (5929, 5960), False, 'from sklearn.utils._testing import assert_raises\n'), ((5977, 6026), 'sklearn.model_selection.ParameterGrid', 'ParameterGrid', (["[{'C': [1, 10]}, {}, {'C': [0.5]}]"], {}), "([{'C': [1, 10]}, {}, {'C': [0.5]}])\n", (5990, 6026), False, 'from sklearn.model_selection import ParameterGrid\n'), ((6316, 6376), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', "{'foo_param': [1, 2, 3]}"], {'cv': '(3)', 'verbose': '(3)'}), "(clf, {'foo_param': [1, 2, 3]}, cv=3, verbose=3)\n", (6328, 6376), False, 'from sklearn.model_selection import GridSearchCV\n'), ((6488, 6498), 'io.StringIO', 'StringIO', ([], {}), '()\n', (6496, 6498), False, 'from io import StringIO\n'), ((6612, 6690), 'sklearn.utils._testing.assert_array_equal', 'assert_array_equal', (["grid_search.cv_results_['param_foo_param'].data", '[1, 2, 3]'], {}), "(grid_search.cv_results_['param_foo_param'].data, [1, 2, 3])\n", (6630, 6690), False, 'from sklearn.utils._testing import assert_array_equal\n'), ((6956, 7004), 'sklearn.utils._testing.assert_raises', 'assert_raises', (['ValueError', 'grid_search.fit', 'X', 'y'], {}), '(ValueError, grid_search.fit, X, y)\n', (6969, 7004), False, 'from sklearn.utils._testing import assert_raises\n'), ((7257, 7293), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['pipe', 'param_grid'], {'cv': '(2)'}), '(pipe, param_grid, cv=2)\n', (7269, 7293), False, 'from sklearn.model_selection import GridSearchCV\n'), ((8098, 8125), 'numpy.array', 'np.array', (['([0] * 5 + [1] * 5)'], {}), '([0] * 5 + [1] * 5)\n', (8106, 8125), True, 'import numpy as np\n'), ((8136, 8192), 'sklearn.utils._mocking.CheckingClassifier', 'CheckingClassifier', ([], {'expected_fit_params': "['spam', 'eggs']"}), "(expected_fit_params=['spam', 'eggs'])\n", (8154, 8192), False, 'from sklearn.utils._mocking import CheckingClassifier, MockDataFrame\n'), ((8935, 8960), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(0)'}), '(random_state=0)\n', (8944, 8960), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((8972, 9009), 'sklearn.datasets.make_blobs', 'make_blobs', ([], {'random_state': '(0)', 'centers': '(2)'}), '(random_state=0, centers=2)\n', (8982, 9009), False, 'from sklearn.datasets import make_blobs\n'), ((9101, 9149), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', "{'C': Cs}"], {'scoring': '"""accuracy"""'}), "(clf, {'C': Cs}, scoring='accuracy')\n", (9113, 9149), False, 'from sklearn.model_selection import GridSearchCV\n'), ((9204, 9261), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf_no_score', "{'C': Cs}"], {'scoring': '"""accuracy"""'}), "(clf_no_score, {'C': Cs}, scoring='accuracy')\n", (9216, 9261), False, 'from sklearn.model_selection import GridSearchCV\n'), ((9698, 9735), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf_no_score', "{'C': Cs}"], {}), "(clf_no_score, {'C': Cs})\n", (9710, 9735), False, 'from sklearn.model_selection import GridSearchCV\n'), ((9740, 9818), 'sklearn.utils._testing.assert_raise_message', 'assert_raise_message', (['TypeError', '"""no scoring"""', 'grid_search_no_score.fit', '[[1]]'], {}), "(TypeError, 'no scoring', grid_search_no_score.fit, [[1]])\n", (9760, 9818), False, 'from sklearn.utils._testing import assert_raise_message\n'), ((9894, 9969), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(100)', 'n_classes': '(2)', 'flip_y': '(0.2)', 'random_state': '(0)'}), '(n_samples=100, n_classes=2, flip_y=0.2, random_state=0)\n', (9913, 9969), False, 'from sklearn.datasets import make_classification\n'), ((10010, 10035), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(0)'}), '(random_state=0)\n', (10019, 10035), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((10953, 11006), 'sklearn.utils._testing.assert_almost_equal', 'assert_almost_equal', (['score_accuracy', 'score_no_scoring'], {}), '(score_accuracy, score_no_scoring)\n', (10972, 11006), False, 'from sklearn.utils._testing import assert_almost_equal\n'), ((11011, 11061), 'sklearn.utils._testing.assert_almost_equal', 'assert_almost_equal', (['score_auc', 'score_no_score_auc'], {}), '(score_auc, score_no_score_auc)\n', (11030, 11061), False, 'from sklearn.utils._testing import assert_almost_equal\n'), ((11248, 11272), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (11269, 11272), True, 'import numpy as np\n'), ((11285, 11347), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(15)', 'n_classes': '(2)', 'random_state': '(0)'}), '(n_samples=15, n_classes=2, random_state=0)\n', (11304, 11347), False, 'from sklearn.datasets import make_classification\n'), ((11394, 11419), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(0)'}), '(random_state=0)\n', (11403, 11419), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((12169, 12196), 'numpy.array', 'np.array', (['([0] * 5 + [1] * 5)'], {}), '([0] * 5 + [1] * 5)\n', (12177, 12196), True, 'import numpy as np\n'), ((12318, 12396), 'sklearn.utils._testing.assert_array_equal', 'assert_array_equal', (['grid_search.best_estimator_.classes_', 'grid_search.classes_'], {}), '(grid_search.best_estimator_.classes_, grid_search.classes_)\n', (12336, 12396), False, 'from sklearn.utils._testing import assert_array_equal\n'), ((13212, 13255), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', "{'foo_param': [1]}"], {'cv': '(3)'}), "(clf, {'foo_param': [1]}, cv=3)\n", (13224, 13255), False, 'from sklearn.model_selection import GridSearchCV\n'), ((13350, 13409), 'sklearn.model_selection.RandomizedSearchCV', 'RandomizedSearchCV', (['clf', "{'foo_param': [0]}"], {'n_iter': '(1)', 'cv': '(3)'}), "(clf, {'foo_param': [0]}, n_iter=1, cv=3)\n", (13368, 13409), False, 'from sklearn.model_selection import RandomizedSearchCV\n'), ((15168, 15234), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(200)', 'n_features': '(100)', 'random_state': '(0)'}), '(n_samples=200, n_features=100, random_state=0)\n', (15187, 15234), False, 'from sklearn.datasets import make_classification\n'), ((15246, 15257), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {}), '()\n', (15255, 15257), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((15267, 15303), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', "{'C': [0.1, 1.0]}"], {}), "(clf, {'C': [0.1, 1.0]})\n", (15279, 15303), False, 'from sklearn.model_selection import GridSearchCV\n'), ((15308, 15355), 'sklearn.utils._testing.assert_raises', 'assert_raises', (['ValueError', 'cv.fit', 'X_[:180]', 'y_'], {}), '(ValueError, cv.fit, X_[:180], y_)\n', (15321, 15355), False, 'from sklearn.utils._testing import assert_raises\n'), ((15410, 15476), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(200)', 'n_features': '(100)', 'random_state': '(0)'}), '(n_samples=200, n_features=100, random_state=0)\n', (15429, 15476), False, 'from sklearn.datasets import make_classification\n'), ((15553, 15570), 'sklearn.svm.SVC', 'SVC', ([], {'gamma': '"""auto"""'}), "(gamma='auto')\n", (15556, 15570), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((15580, 15609), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', 'param_dict'], {}), '(clf, param_dict)\n', (15592, 15609), False, 'from sklearn.model_selection import GridSearchCV\n'), ((15640, 15675), 'sklearn.svm.SVC', 'SVC', ([], {'C': '(1.0)', 'kernel': '"""rbf"""', 'gamma': '(0.1)'}), "(C=1.0, kernel='rbf', gamma=0.1)\n", (15643, 15675), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((15701, 15766), 'sklearn.utils._testing.assert_array_equal', 'assert_array_equal', (['clf.dual_coef_', 'cv.best_estimator_.dual_coef_'], {}), '(clf.dual_coef_, cv.best_estimator_.dual_coef_)\n', (15719, 15766), False, 'from sklearn.utils._testing import assert_array_equal\n'), ((16175, 16192), 'sklearn.svm.SVC', 'SVC', ([], {'gamma': '"""auto"""'}), "(gamma='auto')\n", (16178, 16192), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((16197, 16426), 'sklearn.utils._testing.assert_raise_message', 'assert_raise_message', (['ValueError', '"""Parameter grid for parameter (C) needs to be a list or numpy array, but got (<class \'int\'>). Single values need to be wrapped in a list with one element."""', 'GridSearchCV', 'clf', 'param_dict'], {}), '(ValueError,\n "Parameter grid for parameter (C) needs to be a list or numpy array, but got (<class \'int\'>). Single values need to be wrapped in a list with one element."\n , GridSearchCV, clf, param_dict)\n', (16217, 16426), False, 'from sklearn.utils._testing import assert_raise_message\n'), ((16514, 16519), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (16517, 16519), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((16524, 16666), 'sklearn.utils._testing.assert_raise_message', 'assert_raise_message', (['ValueError', '"""Parameter values for parameter (C) need to be a non-empty sequence."""', 'GridSearchCV', 'clf', 'param_dict'], {}), "(ValueError,\n 'Parameter values for parameter (C) need to be a non-empty sequence.',\n GridSearchCV, clf, param_dict)\n", (16544, 16666), False, 'from sklearn.utils._testing import assert_raise_message\n'), ((16727, 16744), 'sklearn.svm.SVC', 'SVC', ([], {'gamma': '"""auto"""'}), "(gamma='auto')\n", (16730, 16744), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((16749, 16978), 'sklearn.utils._testing.assert_raise_message', 'assert_raise_message', (['ValueError', '"""Parameter grid for parameter (C) needs to be a list or numpy array, but got (<class \'str\'>). Single values need to be wrapped in a list with one element."""', 'GridSearchCV', 'clf', 'param_dict'], {}), '(ValueError,\n "Parameter grid for parameter (C) needs to be a list or numpy array, but got (<class \'str\'>). Single values need to be wrapped in a list with one element."\n , GridSearchCV, clf, param_dict)\n', (16769, 16978), False, 'from sklearn.utils._testing import assert_raise_message\n'), ((17079, 17084), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (17082, 17084), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((17089, 17145), 'sklearn.utils._testing.assert_raises', 'assert_raises', (['ValueError', 'GridSearchCV', 'clf', 'param_dict'], {}), '(ValueError, GridSearchCV, clf, param_dict)\n', (17102, 17145), False, 'from sklearn.utils._testing import assert_raises\n'), ((17262, 17328), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(200)', 'n_features': '(100)', 'random_state': '(0)'}), '(n_samples=200, n_features=100, random_state=0)\n', (17281, 17328), False, 'from sklearn.datasets import make_classification\n'), ((17340, 17351), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {}), '()\n', (17349, 17351), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((17361, 17397), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', "{'C': [0.1, 1.0]}"], {}), "(clf, {'C': [0.1, 1.0]})\n", (17373, 17397), False, 'from sklearn.model_selection import GridSearchCV\n'), ((17502, 17519), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['X_'], {}), '(X_)\n', (17515, 17519), True, 'import scipy.sparse as sp\n'), ((17530, 17541), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {}), '()\n', (17539, 17541), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((17551, 17587), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', "{'C': [0.1, 1.0]}"], {}), "(clf, {'C': [0.1, 1.0]})\n", (17563, 17587), False, 'from sklearn.model_selection import GridSearchCV\n'), ((17810, 17876), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(200)', 'n_features': '(100)', 'random_state': '(0)'}), '(n_samples=200, n_features=100, random_state=0)\n', (17829, 17876), False, 'from sklearn.datasets import make_classification\n'), ((17888, 17899), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {}), '()\n', (17897, 17899), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((17909, 17959), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', "{'C': [0.1, 1.0]}"], {'scoring': '"""f1"""'}), "(clf, {'C': [0.1, 1.0]}, scoring='f1')\n", (17921, 17959), False, 'from sklearn.model_selection import GridSearchCV\n'), ((18064, 18081), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['X_'], {}), '(X_)\n', (18077, 18081), True, 'import scipy.sparse as sp\n'), ((18092, 18103), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {}), '()\n', (18101, 18103), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((18113, 18163), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', "{'C': [0.1, 1.0]}"], {'scoring': '"""f1"""'}), "(clf, {'C': [0.1, 1.0]}, scoring='f1')\n", (18125, 18163), False, 'from sklearn.model_selection import GridSearchCV\n'), ((18265, 18300), 'sklearn.utils._testing.assert_array_equal', 'assert_array_equal', (['y_pred', 'y_pred2'], {}), '(y_pred, y_pred2)\n', (18283, 18300), False, 'from sklearn.utils._testing import assert_array_equal\n'), ((18614, 18659), 'sklearn.metrics.make_scorer', 'make_scorer', (['f1_loss'], {'greater_is_better': '(False)'}), '(f1_loss, greater_is_better=False)\n', (18625, 18659), False, 'from sklearn.metrics import make_scorer\n'), ((18669, 18721), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', "{'C': [0.1, 1.0]}"], {'scoring': 'F1Loss'}), "(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)\n", (18681, 18721), False, 'from sklearn.model_selection import GridSearchCV\n'), ((18842, 18877), 'sklearn.utils._testing.assert_array_equal', 'assert_array_equal', (['y_pred', 'y_pred3'], {}), '(y_pred, y_pred3)\n', (18860, 18877), False, 'from sklearn.utils._testing import assert_array_equal\n'), ((19053, 19119), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(200)', 'n_features': '(100)', 'random_state': '(0)'}), '(n_samples=200, n_features=100, random_state=0)\n', (19072, 19119), False, 'from sklearn.datasets import make_classification\n'), ((19211, 19239), 'numpy.dot', 'np.dot', (['X_[:180]', 'X_[:180].T'], {}), '(X_[:180], X_[:180].T)\n', (19217, 19239), True, 'import numpy as np\n'), ((19274, 19299), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""precomputed"""'}), "(kernel='precomputed')\n", (19277, 19299), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((19309, 19345), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', "{'C': [0.1, 1.0]}"], {}), "(clf, {'C': [0.1, 1.0]})\n", (19321, 19345), False, 'from sklearn.model_selection import GridSearchCV\n'), ((19458, 19486), 'numpy.dot', 'np.dot', (['X_[180:]', 'X_[:180].T'], {}), '(X_[180:], X_[:180].T)\n', (19464, 19486), True, 'import numpy as np\n'), ((19919, 19937), 'numpy.zeros', 'np.zeros', (['(10, 20)'], {}), '((10, 20))\n', (19927, 19937), True, 'import numpy as np\n'), ((19952, 19966), 'numpy.ones', 'np.ones', (['(10,)'], {}), '((10,))\n', (19959, 19966), True, 'import numpy as np\n'), ((19978, 20003), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""precomputed"""'}), "(kernel='precomputed')\n", (19981, 20003), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((20013, 20049), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', "{'C': [0.1, 1.0]}"], {}), "(clf, {'C': [0.1, 1.0]})\n", (20025, 20049), False, 'from sklearn.model_selection import GridSearchCV\n'), ((20054, 20105), 'sklearn.utils._testing.assert_raises', 'assert_raises', (['ValueError', 'cv.fit', 'K_train', 'y_train'], {}), '(ValueError, cv.fit, K_train, y_train)\n', (20067, 20105), False, 'from sklearn.utils._testing import assert_raises\n'), ((20666, 20693), 'numpy.array', 'np.array', (['([0] * 5 + [1] * 5)'], {}), '([0] * 5 + [1] * 5)\n', (20674, 20693), True, 'import numpy as np\n'), ((21845, 21910), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(100)', 'n_features': '(4)', 'random_state': '(42)'}), '(n_samples=100, n_features=4, random_state=42)\n', (21864, 21910), False, 'from sklearn.datasets import make_classification\n'), ((22574, 22639), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(100)', 'n_features': '(4)', 'random_state': '(42)'}), '(n_samples=100, n_features=4, random_state=42)\n', (22593, 22639), False, 'from sklearn.datasets import make_classification\n'), ((23443, 23508), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(100)', 'n_features': '(4)', 'random_state': '(42)'}), '(n_samples=100, n_features=4, random_state=42)\n', (23462, 23508), False, 'from sklearn.datasets import make_classification\n'), ((24208, 24273), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(100)', 'n_features': '(4)', 'random_state': '(42)'}), '(n_samples=100, n_features=4, random_state=42)\n', (24227, 24273), False, 'from sklearn.datasets import make_classification\n'), ((24961, 25039), 'sklearn.utils._mocking.CheckingClassifier', 'CheckingClassifier', ([], {'check_X': 'check_X', 'check_y': 'check_y', 'methods_to_check': "['fit']"}), "(check_X=check_X, check_y=check_y, methods_to_check=['fit'])\n", (24979, 25039), False, 'from sklearn.utils._mocking import CheckingClassifier, MockDataFrame\n'), ((25073, 25116), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', "{'foo_param': [1, 2, 3]}"], {}), "(clf, {'foo_param': [1, 2, 3]})\n", (25085, 25116), False, 'from sklearn.model_selection import GridSearchCV\n'), ((25316, 25343), 'numpy.array', 'np.array', (['([0] * 5 + [1] * 5)'], {}), '([0] * 5 + [1] * 5)\n', (25324, 25343), True, 'import numpy as np\n'), ((25463, 25480), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(3)'}), '(n_splits=3)\n', (25468, 25480), False, 'from sklearn.model_selection import KFold\n'), ((25499, 25549), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', "{'foo_param': [1, 2, 3]}"], {'cv': 'cv'}), "(clf, {'foo_param': [1, 2, 3]}, cv=cv)\n", (25511, 25549), False, 'from sklearn.model_selection import GridSearchCV\n'), ((25752, 25779), 'numpy.array', 'np.array', (['([0] * 5 + [1] * 5)'], {}), '([0] * 5 + [1] * 5)\n', (25760, 25779), True, 'import numpy as np\n'), ((25899, 25916), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(3)'}), '(n_splits=3)\n', (25904, 25916), False, 'from sklearn.model_selection import KFold\n'), ((25935, 25985), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', "{'foo_param': [1, 2, 3]}"], {'cv': 'cv'}), "(clf, {'foo_param': [1, 2, 3]}, cv=cv)\n", (25947, 25985), False, 'from sklearn.model_selection import GridSearchCV\n'), ((26411, 26438), 'numpy.array', 'np.array', (['([0] * 5 + [1] * 5)'], {}), '([0] * 5 + [1] * 5)\n', (26419, 26438), True, 'import numpy as np\n'), ((27111, 27151), 'sklearn.datasets.make_blobs', 'make_blobs', ([], {'n_samples': '(50)', 'random_state': '(0)'}), '(n_samples=50, random_state=0)\n', (27121, 27151), False, 'from sklearn.datasets import make_blobs\n'), ((27161, 27208), 'sklearn.cluster.KMeans', 'KMeans', ([], {'random_state': '(0)', 'init': '"""random"""', 'n_init': '(1)'}), "(random_state=0, init='random', n_init=1)\n", (27167, 27208), False, 'from sklearn.cluster import KMeans\n'), ((28370, 28447), 'sklearn.datasets.make_blobs', 'make_blobs', ([], {'cluster_std': '(0.1)', 'random_state': '(1)', 'centers': '[[0, 1], [1, 0], [0, 0]]'}), '(cluster_std=0.1, random_state=1, centers=[[0, 1], [1, 0], [0, 0]])\n', (28380, 28447), False, 'from sklearn.datasets import make_blobs\n'), ((28923, 29011), 'sklearn.model_selection.ParameterSampler', 'ParameterSampler', ([], {'param_distributions': 'param_distributions', 'n_iter': '(10)', 'random_state': '(0)'}), '(param_distributions=param_distributions, n_iter=10,\n random_state=0)\n', (28939, 29011), False, 'from sklearn.model_selection import ParameterSampler\n'), ((29362, 29449), 'sklearn.model_selection.ParameterSampler', 'ParameterSampler', ([], {'param_distributions': 'param_distributions', 'n_iter': '(3)', 'random_state': '(0)'}), '(param_distributions=param_distributions, n_iter=3,\n random_state=0)\n', (29378, 29449), False, 'from sklearn.model_selection import ParameterSampler\n'), ((30970, 31034), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(50)', 'n_features': '(4)', 'random_state': '(42)'}), '(n_samples=50, n_features=4, random_state=42)\n', (30989, 31034), False, 'from sklearn.datasets import make_classification\n'), ((33149, 33213), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(50)', 'n_features': '(4)', 'random_state': '(42)'}), '(n_samples=50, n_features=4, random_state=42)\n', (33168, 33213), False, 'from sklearn.datasets import make_classification\n'), ((35383, 35501), 'sklearn.datasets.make_blobs', 'make_blobs', ([], {'centers': '[[0, 0], [1, 0], [0, 1], [1, 1]]', 'random_state': '(0)', 'cluster_std': '(0.1)', 'shuffle': '(False)', 'n_samples': '(80)'}), '(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,\n cluster_std=0.1, shuffle=False, n_samples=80)\n', (35393, 35501), False, 'from sklearn.datasets import make_blobs\n'), ((35651, 35685), 'numpy.ones', 'np.ones', (['X.shape[0]'], {'dtype': 'np.bool'}), '(X.shape[0], dtype=np.bool)\n', (35658, 35685), True, 'import numpy as np\n'), ((36704, 36751), 'sklearn.utils._testing.assert_allclose', 'assert_allclose', (['test_cv_scores', '[1, 1.0 / 3.0]'], {}), '(test_cv_scores, [1, 1.0 / 3.0])\n', (36719, 36751), False, 'from sklearn.utils._testing import assert_allclose\n'), ((36754, 36794), 'sklearn.utils._testing.assert_allclose', 'assert_allclose', (['train_cv_scores', '[1, 1]'], {}), '(train_cv_scores, [1, 1])\n', (36769, 36794), False, 'from sklearn.utils._testing import assert_allclose\n'), ((37192, 37256), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(50)', 'n_features': '(4)', 'random_state': '(42)'}), '(n_samples=50, n_features=4, random_state=42)\n', (37211, 37256), False, 'from sklearn.datasets import make_classification\n'), ((37954, 38018), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(50)', 'n_features': '(4)', 'random_state': '(42)'}), '(n_samples=50, n_features=4, random_state=42)\n', (37973, 38018), False, 'from sklearn.datasets import make_classification\n'), ((41100, 41156), 'sklearn.datasets.make_blobs', 'make_blobs', ([], {'n_samples': '(100)', 'n_features': '(4)', 'random_state': '(42)'}), '(n_samples=100, n_features=4, random_state=42)\n', (41110, 41156), False, 'from sklearn.datasets import make_blobs\n'), ((41616, 41657), 'sklearn.datasets.make_blobs', 'make_blobs', ([], {'n_samples': '(50)', 'random_state': '(42)'}), '(n_samples=50, random_state=42)\n', (41626, 41657), False, 'from sklearn.datasets import make_blobs\n'), ((43260, 43267), 'sklearn.model_selection.KFold', 'KFold', ([], {}), '()\n', (43265, 43267), False, 'from sklearn.model_selection import KFold\n'), ((43578, 43603), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(0)'}), '(random_state=0)\n', (43587, 43603), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((43673, 43726), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['svc', "{'C': [0, 1]}"], {'cv': '(2)', 'error_score': '(0)'}), "(svc, {'C': [0, 1]}, cv=2, error_score=0)\n", (43685, 43726), False, 'from sklearn.model_selection import GridSearchCV\n'), ((43736, 43805), 'sklearn.model_selection.RandomizedSearchCV', 'RandomizedSearchCV', (['svc', "{'C': [0, 1]}"], {'cv': '(2)', 'error_score': '(0)', 'n_iter': '(2)'}), "(svc, {'C': [0, 1]}, cv=2, error_score=0, n_iter=2)\n", (43754, 43805), False, 'from sklearn.model_selection import RandomizedSearchCV\n'), ((44646, 44671), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(0)'}), '(random_state=0)\n', (44655, 44671), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((44683, 44720), 'sklearn.datasets.make_blobs', 'make_blobs', ([], {'random_state': '(0)', 'centers': '(2)'}), '(random_state=0, centers=2)\n', (44693, 44720), False, 'from sklearn.datasets import make_blobs\n'), ((46207, 46242), 'sklearn.datasets.make_classification', 'make_classification', ([], {'random_state': '(0)'}), '(random_state=0)\n', (46226, 46242), False, 'from sklearn.datasets import make_classification\n'), ((46252, 46269), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {}), '()\n', (46267, 46269), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((46280, 46305), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(0)'}), '(random_state=0)\n', (46289, 46305), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((46319, 46346), 'sklearn.metrics.make_scorer', 'make_scorer', (['accuracy_score'], {}), '(accuracy_score)\n', (46330, 46346), False, 'from sklearn.metrics import make_scorer\n'), ((47027, 47240), 'sklearn.utils._testing.assert_raise_message', 'assert_raise_message', (['ValueError', '"""For evaluating multiple scores, use sklearn.model_selection.cross_validate instead."""', 'fit_grid_point', 'X', 'y', 'svc', 'params', 'train', 'test', "{'score': scorer}"], {'verbose': '(True)'}), "(ValueError,\n 'For evaluating multiple scores, use sklearn.model_selection.cross_validate instead.'\n , fit_grid_point, X, y, svc, params, train, test, {'score': scorer},\n verbose=True)\n", (47047, 47240), False, 'from sklearn.utils._testing import assert_raise_message\n'), ((47447, 47482), 'sklearn.datasets.make_classification', 'make_classification', ([], {'random_state': '(0)'}), '(random_state=0)\n', (47466, 47482), False, 'from sklearn.datasets import make_classification\n'), ((47493, 47518), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(0)'}), '(random_state=0)\n', (47502, 47518), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((47532, 47559), 'sklearn.metrics.make_scorer', 'make_scorer', (['accuracy_score'], {}), '(accuracy_score)\n', (47543, 47559), False, 'from sklearn.metrics import make_scorer\n'), ((47985, 48046), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', "{'foo_param': [1, 2, 3]}"], {'refit': '(True)', 'cv': '(3)'}), "(clf, {'foo_param': [1, 2, 3]}, refit=True, cv=3)\n", (47997, 48046), False, 'from sklearn.model_selection import GridSearchCV\n'), ((48276, 48353), 'sklearn.model_selection.RandomizedSearchCV', 'RandomizedSearchCV', (['clf', "{'foo_param': [1, 2, 3]}"], {'refit': '(True)', 'n_iter': '(3)', 'cv': '(3)'}), "(clf, {'foo_param': [1, 2, 3]}, refit=True, n_iter=3, cv=3)\n", (48294, 48353), False, 'from sklearn.model_selection import RandomizedSearchCV\n'), ((48717, 48786), 'sklearn.datasets.make_multilabel_classification', 'make_multilabel_classification', ([], {'return_indicator': '(True)', 'random_state': '(0)'}), '(return_indicator=True, random_state=0)\n', (48747, 48786), False, 'from sklearn.datasets import make_multilabel_classification\n'), ((48888, 48895), 'sklearn.model_selection.KFold', 'KFold', ([], {}), '()\n', (48893, 48895), False, 'from sklearn.model_selection import KFold\n'), ((50533, 50555), 'sklearn.svm.SVC', 'SVC', ([], {'probability': '(False)'}), '(probability=False)\n', (50536, 50555), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((51707, 51771), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(20)', 'n_features': '(10)', 'random_state': '(0)'}), '(n_samples=20, n_features=10, random_state=0)\n', (51726, 51771), False, 'from sklearn.datasets import make_classification\n'), ((52133, 52233), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', "[{'parameter': [0, 1, 2]}]"], {'scoring': '"""accuracy"""', 'refit': '(False)', 'error_score': '(0.0)'}), "(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy', refit=\n False, error_score=0.0)\n", (52145, 52233), False, 'from sklearn.model_selection import GridSearchCV\n'), ((52255, 52299), 'sklearn.utils._testing.assert_warns', 'assert_warns', (['FitFailedWarning', 'gs.fit', 'X', 'y'], {}), '(FitFailedWarning, gs.fit, X, y)\n', (52267, 52299), False, 'from sklearn.utils._testing import assert_warns\n'), ((52983, 53027), 'sklearn.utils._testing.assert_warns', 'assert_warns', (['FitFailedWarning', 'gs.fit', 'X', 'y'], {}), '(FitFailedWarning, gs.fit, X, y)\n', (52995, 53027), False, 'from sklearn.utils._testing import assert_warns\n'), ((53719, 53783), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(20)', 'n_features': '(10)', 'random_state': '(0)'}), '(n_samples=20, n_features=10, random_state=0)\n', (53738, 53783), False, 'from sklearn.datasets import make_classification\n'), ((53905, 54009), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', "[{'parameter': [0, 1, 2]}]"], {'scoring': '"""accuracy"""', 'refit': '(False)', 'error_score': '"""raise"""'}), "(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy', refit=\n False, error_score='raise')\n", (53917, 54009), False, 'from sklearn.model_selection import GridSearchCV\n'), ((54105, 54144), 'sklearn.utils._testing.assert_raises', 'assert_raises', (['ValueError', 'gs.fit', 'X', 'y'], {}), '(ValueError, gs.fit, X, y)\n', (54118, 54144), False, 'from sklearn.utils._testing import assert_raises\n'), ((54375, 54409), 'sklearn.model_selection.ParameterSampler', 'ParameterSampler', (['params'], {'n_iter': '(9)'}), '(params, n_iter=9)\n', (54391, 54409), False, 'from sklearn.model_selection import ParameterSampler\n'), ((54711, 54777), 'sklearn.utils._testing.assert_warns_message', 'assert_warns_message', (['UserWarning', 'expected_warning', 'list', 'sampler'], {}), '(UserWarning, expected_warning, list, sampler)\n', (54731, 54777), False, 'from sklearn.utils._testing import assert_warns_message\n'), ((54884, 54918), 'sklearn.model_selection.ParameterSampler', 'ParameterSampler', (['params'], {'n_iter': '(8)'}), '(params, n_iter=8)\n', (54900, 54918), False, 'from sklearn.model_selection import ParameterSampler\n'), ((54994, 55015), 'sklearn.model_selection.ParameterGrid', 'ParameterGrid', (['params'], {}), '(params)\n', (55007, 55015), False, 'from sklearn.model_selection import ParameterGrid\n'), ((55183, 55235), 'sklearn.model_selection.ParameterSampler', 'ParameterSampler', (['params'], {'n_iter': '(99)', 'random_state': '(42)'}), '(params, n_iter=99, random_state=42)\n', (55199, 55235), False, 'from sklearn.model_selection import ParameterSampler\n'), ((55573, 55620), 'sklearn.model_selection.ParameterSampler', 'ParameterSampler', (['params_distribution'], {'n_iter': '(7)'}), '(params_distribution, n_iter=7)\n', (55589, 55620), False, 'from sklearn.model_selection import ParameterSampler\n'), ((56777, 56802), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(0)'}), '(random_state=0)\n', (56786, 56802), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((56813, 56866), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf'], {'param_grid': "{'C': [0.1, 0.2]}", 'cv': '(3)'}), "(clf, param_grid={'C': [0.1, 0.2]}, cv=3)\n", (56825, 56866), False, 'from sklearn.model_selection import GridSearchCV\n'), ((57046, 57102), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': 'n_samples', 'random_state': '(0)'}), '(n_samples=n_samples, random_state=0)\n', (57065, 57102), False, 'from sklearn.datasets import make_classification\n'), ((60560, 60620), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', "{'foo_param': [1, 2, 3]}"], {'cv': '(3)', 'verbose': '(3)'}), "(clf, {'foo_param': [1, 2, 3]}, cv=3, verbose=3)\n", (60572, 60620), False, 'from sklearn.model_selection import GridSearchCV\n'), ((60727, 60762), 'sklearn.utils._testing.assert_array_equal', 'assert_array_equal', (['X', 'X_round_trip'], {}), '(X, X_round_trip)\n', (60745, 60762), False, 'from sklearn.utils._testing import assert_array_equal\n'), ((62223, 62260), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {'random_state': '(0)'}), '(random_state=0)\n', (62244, 62260), False, 'from sklearn.tree import DecisionTreeRegressor\n'), ((62272, 62339), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(100)', 'n_informative': '(4)', 'random_state': '(0)'}), '(n_samples=100, n_informative=4, random_state=0)\n', (62291, 62339), False, 'from sklearn.datasets import make_classification\n'), ((65082, 65124), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_features': 'n_features'}), '(n_features=n_features)\n', (65101, 65124), False, 'from sklearn.datasets import make_classification\n'), ((65136, 65168), 'sklearn.ensemble.HistGradientBoostingClassifier', 'HistGradientBoostingClassifier', ([], {}), '()\n', (65166, 65168), False, 'from sklearn.ensemble import HistGradientBoostingClassifier\n'), ((65216, 65246), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['gbdt', 'param_grid'], {}), '(gbdt, param_grid)\n', (65228, 65246), False, 'from sklearn.model_selection import GridSearchCV\n'), ((65256, 65302), 'sklearn.model_selection.RandomizedSearchCV', 'RandomizedSearchCV', (['gbdt', 'param_grid'], {'n_iter': '(1)'}), '(gbdt, param_grid, n_iter=1)\n', (65274, 65302), False, 'from sklearn.model_selection import RandomizedSearchCV\n'), ((65849, 65864), 'sklearn.base.BaseEstimator', 'BaseEstimator', ([], {}), '()\n', (65862, 65864), False, 'from sklearn.base import BaseEstimator, ClassifierMixin\n'), ((66512, 66568), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': 'n_samples', 'random_state': '(0)'}), '(n_samples=n_samples, random_state=0)\n', (66531, 66568), False, 'from sklearn.datasets import make_classification\n'), ((66673, 66695), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {}), '()\n', (66693, 66695), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((66705, 66748), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', 'grid_params'], {'cv': 'n_splits'}), '(clf, grid_params, cv=n_splits)\n', (66717, 66748), False, 'from sklearn.model_selection import GridSearchCV\n'), ((66889, 66911), 'sklearn.metrics.pairwise.euclidean_distances', 'euclidean_distances', (['X'], {}), '(X)\n', (66908, 66911), False, 'from sklearn.metrics.pairwise import euclidean_distances\n'), ((66922, 66964), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'metric': '"""precomputed"""'}), "(metric='precomputed')\n", (66942, 66964), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((66974, 67017), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', 'grid_params'], {'cv': 'n_splits'}), '(clf, grid_params, cv=n_splits)\n', (66986, 67017), False, 'from sklearn.model_selection import GridSearchCV\n'), ((67924, 67960), 'sklearn.datasets.make_classification', 'make_classification', ([], {'random_state': '(42)'}), '(random_state=42)\n', (67943, 67960), False, 'from sklearn.datasets import make_classification\n'), ((3263, 3275), 'numpy.unique', 'np.unique', (['Y'], {}), '(Y)\n', (3272, 3275), True, 'import numpy as np\n'), ((4781, 4827), 'pytest.raises', 'pytest.raises', (['error_type'], {'match': 'error_message'}), '(error_type, match=error_message)\n', (4794, 4827), False, 'import pytest\n'), ((4345, 4381), 'functools.partial', 'partial', (['ParameterSampler'], {'n_iter': '(10)'}), '(ParameterSampler, n_iter=10)\n', (4352, 4381), False, 'from functools import partial\n'), ((8480, 8524), 'pytest.raises', 'pytest.raises', (['AssertionError'], {'match': 'err_msg'}), '(AssertionError, match=err_msg)\n', (8493, 8524), False, 'import pytest\n'), ((8639, 8683), 'pytest.raises', 'pytest.raises', (['AssertionError'], {'match': 'err_msg'}), '(AssertionError, match=err_msg)\n', (8652, 8683), False, 'import pytest\n'), ((11460, 11478), 'sklearn.model_selection.LeaveOneGroupOut', 'LeaveOneGroupOut', ([], {}), '()\n', (11476, 11478), False, 'from sklearn.model_selection import LeaveOneGroupOut\n'), ((11480, 11498), 'sklearn.model_selection.LeavePGroupsOut', 'LeavePGroupsOut', (['(2)'], {}), '(2)\n', (11495, 11498), False, 'from sklearn.model_selection import LeavePGroupsOut\n'), ((11517, 11539), 'sklearn.model_selection.GroupKFold', 'GroupKFold', ([], {'n_splits': '(3)'}), '(n_splits=3)\n', (11527, 11539), False, 'from sklearn.model_selection import GroupKFold\n'), ((11541, 11560), 'sklearn.model_selection.GroupShuffleSplit', 'GroupShuffleSplit', ([], {}), '()\n', (11558, 11560), False, 'from sklearn.model_selection import GroupShuffleSplit\n'), ((11600, 11630), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', 'grid'], {'cv': 'cv'}), '(clf, grid, cv=cv)\n', (11612, 11630), False, 'from sklearn.model_selection import GridSearchCV\n'), ((11639, 11735), 'sklearn.utils._testing.assert_raise_message', 'assert_raise_message', (['ValueError', '"""The \'groups\' parameter should not be None."""', 'gs.fit', 'X', 'y'], {}), '(ValueError,\n "The \'groups\' parameter should not be None.", gs.fit, X, y)\n', (11659, 11735), False, 'from sklearn.utils._testing import assert_raise_message\n'), ((11848, 11865), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {}), '()\n', (11863, 11865), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((11867, 11891), 'sklearn.model_selection.StratifiedShuffleSplit', 'StratifiedShuffleSplit', ([], {}), '()\n', (11889, 11891), False, 'from sklearn.model_selection import StratifiedShuffleSplit\n'), ((11935, 11965), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', 'grid'], {'cv': 'cv'}), '(clf, grid, cv=cv)\n', (11947, 11965), False, 'from sklearn.model_selection import GridSearchCV\n'), ((12250, 12275), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(0)'}), '(random_state=0)\n', (12259, 12275), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((12512, 12519), 'sklearn.linear_model.Ridge', 'Ridge', ([], {}), '()\n', (12517, 12519), False, 'from sklearn.linear_model import Ridge, SGDClassifier, LinearRegression\n'), ((12726, 12751), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(0)'}), '(random_state=0)\n', (12735, 12751), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((12920, 12945), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(0)'}), '(random_state=0)\n', (12929, 12945), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((13688, 13750), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', "{'foo_param': [1, 2, 3]}"], {'refit': '(False)', 'cv': '(3)'}), "(clf, {'foo_param': [1, 2, 3]}, refit=False, cv=3)\n", (13700, 13750), False, 'from sklearn.model_selection import GridSearchCV\n'), ((17052, 17067), 'numpy.ones', 'np.ones', (['(3, 2)'], {}), '((3, 2))\n', (17059, 17067), True, 'import numpy as np\n'), ((17704, 17730), 'numpy.mean', 'np.mean', (['(y_pred == y_pred2)'], {}), '(y_pred == y_pred2)\n', (17711, 17730), True, 'import numpy as np\n'), ((19554, 19579), 'numpy.mean', 'np.mean', (['(y_pred == y_test)'], {}), '(y_pred == y_test)\n', (19561, 19579), True, 'import numpy as np\n'), ((20428, 20448), 'numpy.zeros', 'np.zeros', (['X.shape[0]'], {}), '(X.shape[0])\n', (20436, 20448), True, 'import numpy as np\n'), ((21283, 21348), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(100)', 'n_features': '(4)', 'random_state': '(42)'}), '(n_samples=100, n_features=4, random_state=42)\n', (21302, 21348), False, 'from sklearn.datasets import make_classification\n'), ((21965, 21991), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(42)'}), '(random_state=42)\n', (21974, 21991), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((22695, 22721), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(42)'}), '(random_state=42)\n', (22704, 22721), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((22828, 22900), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""best_index_ returned is not an integer"""'}), "(TypeError, match='best_index_ returned is not an integer')\n", (22841, 22900), False, 'import pytest\n'), ((23561, 23587), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(42)'}), '(random_state=42)\n', (23570, 23587), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((23688, 23753), 'pytest.raises', 'pytest.raises', (['IndexError'], {'match': '"""best_index_ index out of range"""'}), "(IndexError, match='best_index_ index out of range')\n", (23701, 23753), False, 'import pytest\n'), ((24332, 24359), 'sklearn.metrics.make_scorer', 'make_scorer', (['accuracy_score'], {}), '(accuracy_score)\n', (24343, 24359), False, 'from sklearn.metrics import make_scorer\n'), ((24405, 24431), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(42)'}), '(random_state=42)\n', (24414, 24431), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((26743, 26801), 'sklearn.utils._mocking.CheckingClassifier', 'CheckingClassifier', ([], {'check_X': 'check_df', 'check_y': 'check_series'}), '(check_X=check_df, check_y=check_series)\n', (26761, 26801), False, 'from sklearn.utils._mocking import CheckingClassifier, MockDataFrame\n'), ((26825, 26868), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', "{'foo_param': [1, 2, 3]}"], {}), "(clf, {'foo_param': [1, 2, 3]})\n", (26837, 26868), False, 'from sklearn.model_selection import GridSearchCV\n'), ((28495, 28510), 'sklearn.neighbors.KernelDensity', 'KernelDensity', ([], {}), '()\n', (28508, 28510), False, 'from sklearn.neighbors import KernelDensity\n'), ((28894, 28907), 'scipy.stats.uniform', 'uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (28901, 28907), False, 'from scipy.stats import bernoulli, expon, uniform\n'), ((29633, 29721), 'sklearn.model_selection.ParameterSampler', 'ParameterSampler', ([], {'param_distributions': 'param_distributions', 'n_iter': '(10)', 'random_state': '(0)'}), '(param_distributions=param_distributions, n_iter=10,\n random_state=0)\n', (29649, 29721), False, 'from sklearn.model_selection import ParameterSampler\n'), ((31818, 31823), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (31821, 31823), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((34001, 34006), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (34004, 34006), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((35961, 35966), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (35964, 35966), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((37074, 37090), 'pytest.approx', 'pytest.approx', (['(1)'], {}), '(1)\n', (37087, 37090), False, 'import pytest\n'), ((37115, 37131), 'pytest.approx', 'pytest.approx', (['(0)'], {}), '(0)\n', (37128, 37131), False, 'import pytest\n'), ((39745, 39778), 're.sub', 're.sub', (['"""_score$"""', '"""_accuracy"""', 'k'], {}), "('_score$', '_accuracy', k)\n", (39751, 39778), False, 'import re\n'), ((41854, 41859), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (41857, 41859), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((41979, 41984), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (41982, 41984), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((42391, 42483), 'sklearn.utils._testing.assert_almost_equal', 'assert_almost_equal', (["cv_results['mean_test_score'][0]", "cv_results['mean_test_score'][1]"], {}), "(cv_results['mean_test_score'][0], cv_results[\n 'mean_test_score'][1])\n", (42410, 42483), False, 'from sklearn.utils._testing import assert_almost_equal\n'), ((42515, 42609), 'sklearn.utils._testing.assert_almost_equal', 'assert_almost_equal', (["cv_results['mean_train_score'][0]", "cv_results['mean_train_score'][1]"], {}), "(cv_results['mean_train_score'][0], cv_results[\n 'mean_train_score'][1])\n", (42534, 42609), False, 'from sklearn.utils._testing import assert_almost_equal\n'), ((42966, 43035), 'sklearn.utils._testing.assert_almost_equal', 'assert_almost_equal', (["search.cv_results_['rank_test_score']", '[1, 1, 3]'], {}), "(search.cv_results_['rank_test_score'], [1, 1, 3])\n", (42985, 43035), False, 'from sklearn.utils._testing import assert_almost_equal\n'), ((43151, 43174), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {}), '()\n', (43172, 43174), False, 'from sklearn.tree import DecisionTreeRegressor\n'), ((43176, 43200), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (43198, 43200), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((43414, 43490), 'sklearn.utils._testing.assert_array_equal', 'assert_array_equal', (["grid_search.cv_results_['param_random_state']", '[0, None]'], {}), "(grid_search.cv_results_['param_random_state'], [0, None])\n", (43432, 43490), False, 'from sklearn.utils._testing import assert_array_equal\n'), ((44800, 44856), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', "{'C': Cs}"], {'scoring': 'score', 'cv': 'n_splits'}), "(clf, {'C': Cs}, scoring=score, cv=n_splits)\n", (44812, 44856), False, 'from sklearn.model_selection import GridSearchCV\n'), ((45243, 45277), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': 'n_splits'}), '(n_splits=n_splits)\n', (45258, 45277), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((47757, 47795), 'pytest.warns', 'pytest.warns', (['FutureWarning'], {'match': 'msg'}), '(FutureWarning, match=msg)\n', (47769, 47795), False, 'import pytest\n'), ((47805, 47874), 'sklearn.model_selection.fit_grid_point', 'fit_grid_point', (['X', 'y', 'svc', 'params', 'train', 'test', 'scorer'], {'verbose': '(False)'}), '(X, y, svc, params, train, test, scorer, verbose=False)\n', (47819, 47874), False, 'from sklearn.model_selection import fit_grid_point\n'), ((48112, 48137), 'pickle.dumps', 'pickle.dumps', (['grid_search'], {}), '(grid_search)\n', (48124, 48137), False, 'import pickle\n'), ((48462, 48489), 'pickle.dumps', 'pickle.dumps', (['random_search'], {}), '(random_search)\n', (48474, 48489), False, 'import pickle\n'), ((48915, 48952), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {'random_state': '(0)'}), '(random_state=0)\n', (48936, 48952), False, 'from sklearn.tree import DecisionTreeRegressor\n'), ((48972, 49010), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'random_state': '(0)'}), '(random_state=0)\n', (48994, 49010), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((49093, 49133), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['est', 'est_parameters'], {'cv': 'cv'}), '(est, est_parameters, cv=cv)\n', (49105, 49133), False, 'from sklearn.model_selection import GridSearchCV\n'), ((49721, 49777), 'sklearn.model_selection.RandomizedSearchCV', 'RandomizedSearchCV', (['est', 'est_parameters'], {'cv': 'cv', 'n_iter': '(3)'}), '(est, est_parameters, cv=cv, n_iter=3)\n', (49739, 49777), False, 'from sklearn.model_selection import RandomizedSearchCV\n'), ((51455, 51475), 'numpy.zeros', 'np.zeros', (['X.shape[0]'], {}), '(X.shape[0])\n', (51463, 51475), True, 'import numpy as np\n'), ((55517, 55531), 'scipy.stats.bernoulli', 'bernoulli', (['(0.5)'], {}), '(0.5)\n', (55526, 55531), False, 'from scipy.stats import bernoulli, expon, uniform\n'), ((57126, 57151), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(0)'}), '(random_state=0)\n', (57135, 57151), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((57421, 57446), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(0)'}), '(random_state=0)\n', (57430, 57446), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((57819, 57844), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(0)'}), '(random_state=0)\n', (57828, 57844), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((58120, 58145), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(0)'}), '(random_state=0)\n', (58129, 58145), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((59502, 59527), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(0)'}), '(random_state=0)\n', (59511, 59527), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((60250, 60317), 'sklearn.utils._testing.assert_array_almost_equal', 'assert_array_almost_equal', (['per_param_scores[0]', 'per_param_scores[1]'], {}), '(per_param_scores[0], per_param_scores[1])\n', (60275, 60317), False, 'from sklearn.utils._testing import assert_array_almost_equal\n'), ((60360, 60427), 'sklearn.utils._testing.assert_array_almost_equal', 'assert_array_almost_equal', (['per_param_scores[2]', 'per_param_scores[3]'], {}), '(per_param_scores[2], per_param_scores[3])\n', (60385, 60427), False, 'from sklearn.utils._testing import assert_array_almost_equal\n'), ((63418, 63490), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {'match': '"""_run_search not implemented."""'}), "(NotImplementedError, match='_run_search not implemented.')\n", (63431, 63490), False, 'import pytest\n'), ((63874, 63881), 'sklearn.linear_model.Ridge', 'Ridge', ([], {}), '()\n', (63879, 63881), False, 'from sklearn.linear_model import Ridge, SGDClassifier, LinearRegression\n'), ((64011, 64136), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""No fits were performed. Was the CV iterator empty\\\\? Were there no candidates\\\\?"""'}), "(ValueError, match=\n 'No fits were performed. Was the CV iterator empty\\\\? Were there no candidates\\\\?'\n )\n", (64024, 64136), False, 'import pytest\n'), ((64526, 64533), 'sklearn.linear_model.Ridge', 'Ridge', ([], {}), '()\n', (64531, 64533), False, 'from sklearn.linear_model import Ridge, SGDClassifier, LinearRegression\n'), ((64663, 64798), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""cv.split and cv.get_n_splits returned inconsistent results. Expected \\\\d+ splits, got \\\\d+"""'}), "(ValueError, match=\n 'cv.split and cv.get_n_splits returned inconsistent results. Expected \\\\d+ splits, got \\\\d+'\n )\n", (64676, 64798), False, 'import pytest\n'), ((66050, 66090), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['est', "{'n_neighbors': [10]}"], {}), "(est, {'n_neighbors': [10]})\n", (66062, 66090), False, 'from sklearn.model_selection import GridSearchCV\n'), ((7209, 7227), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (7225, 7227), False, 'from sklearn.linear_model import Ridge, SGDClassifier, LinearRegression\n'), ((7229, 7236), 'sklearn.linear_model.Ridge', 'Ridge', ([], {}), '()\n', (7234, 7236), False, 'from sklearn.linear_model import Ridge, SGDClassifier, LinearRegression\n'), ((8059, 8073), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (8068, 8073), True, 'import numpy as np\n'), ((8776, 8787), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (8783, 8787), True, 'import numpy as np\n'), ((8794, 8806), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (8802, 8806), True, 'import numpy as np\n'), ((10084, 10121), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', 'grid'], {'scoring': 'None'}), '(clf, grid, scoring=None)\n', (10096, 10121), False, 'from sklearn.model_selection import GridSearchCV\n'), ((10154, 10197), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', 'grid'], {'scoring': '"""accuracy"""'}), "(clf, grid, scoring='accuracy')\n", (10166, 10197), False, 'from sklearn.model_selection import GridSearchCV\n'), ((10419, 10461), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', 'grid'], {'scoring': '"""roc_auc"""'}), "(clf, grid, scoring='roc_auc')\n", (10431, 10461), False, 'from sklearn.model_selection import GridSearchCV\n'), ((12130, 12144), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (12139, 12144), True, 'import numpy as np\n'), ((18574, 18600), 'sklearn.metrics.f1_score', 'f1_score', (['y_true_', 'y_pred_'], {}), '(y_true_, y_pred_)\n', (18582, 18600), False, 'from sklearn.metrics import f1_score\n'), ((20627, 20641), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (20636, 20641), True, 'import numpy as np\n'), ((21411, 21437), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': '(42)'}), '(random_state=42)\n', (21420, 21437), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((24755, 24780), 'numpy.arange', 'np.arange', (['(10 * 5 * 3 * 2)'], {}), '(10 * 5 * 3 * 2)\n', (24764, 24780), True, 'import numpy as np\n'), ((24813, 24835), 'numpy.arange', 'np.arange', (['(10 * 7 * 11)'], {}), '(10 * 7 * 11)\n', (24822, 24835), True, 'import numpy as np\n'), ((25277, 25291), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (25286, 25291), True, 'import numpy as np\n'), ((25713, 25727), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (25722, 25727), True, 'import numpy as np\n'), ((26372, 26386), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (26381, 26386), True, 'import numpy as np\n'), ((29600, 29613), 'scipy.stats.uniform', 'uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (29607, 29613), False, 'from scipy.stats import bernoulli, expon, uniform\n'), ((33295, 33310), 'scipy.stats.expon', 'expon', ([], {'scale': '(10)'}), '(scale=10)\n', (33300, 33310), False, 'from scipy.stats import bernoulli, expon, uniform\n'), ((33336, 33352), 'scipy.stats.expon', 'expon', ([], {'scale': '(0.1)'}), '(scale=0.1)\n', (33341, 33352), False, 'from scipy.stats import bernoulli, expon, uniform\n'), ((36867, 36890), 'numpy.mean', 'np.mean', (['test_cv_scores'], {}), '(test_cv_scores)\n', (36874, 36890), True, 'import numpy as np\n'), ((36929, 36951), 'numpy.std', 'np.std', (['test_cv_scores'], {}), '(test_cv_scores)\n', (36935, 36951), True, 'import numpy as np\n'), ((37451, 37478), 'sklearn.metrics.make_scorer', 'make_scorer', (['accuracy_score'], {}), '(accuracy_score)\n', (37462, 37478), False, 'from sklearn.metrics import make_scorer\n'), ((37511, 37536), 'sklearn.metrics.make_scorer', 'make_scorer', (['recall_score'], {}), '(recall_score)\n', (37522, 37536), False, 'from sklearn.metrics import make_scorer\n'), ((37617, 37622), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (37620, 37622), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((38156, 38177), 'numpy.logspace', 'np.logspace', (['(-4)', '(1)', '(3)'], {}), '(-4, 1, 3)\n', (38167, 38177), True, 'import numpy as np\n'), ((38203, 38234), 'numpy.logspace', 'np.logspace', (['(-5)', '(0)', '(3)'], {'base': '(0.1)'}), '(-5, 0, 3, base=0.1)\n', (38214, 38234), True, 'import numpy as np\n'), ((38635, 38680), 'sklearn.svm.SVC', 'SVC', ([], {'probability': 'probability', 'random_state': '(42)'}), '(probability=probability, random_state=42)\n', (38638, 38680), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((38709, 38845), 'sklearn.model_selection.RandomizedSearchCV', 'RandomizedSearchCV', (['clf'], {'n_iter': 'n_search_iter', 'cv': 'n_splits', 'param_distributions': 'params', 'scoring': 'scoring', 'refit': 'refit', 'random_state': '(0)'}), '(clf, n_iter=n_search_iter, cv=n_splits,\n param_distributions=params, scoring=scoring, refit=refit, random_state=0)\n', (38727, 38845), False, 'from sklearn.model_selection import RandomizedSearchCV\n'), ((39883, 39914), 're.sub', 're.sub', (['"""_score$"""', '"""_recall"""', 'k'], {}), "('_score$', '_recall', k)\n", (39889, 39914), False, 'import re\n'), ((40101, 40133), 'numpy.all', 'np.all', (['(cv_results_multi[k] <= 1)'], {}), '(cv_results_multi[k] <= 1)\n', (40107, 40133), True, 'import numpy as np\n'), ((42652, 42731), 'numpy.allclose', 'np.allclose', (["cv_results['mean_test_score'][1]", "cv_results['mean_test_score'][2]"], {}), "(cv_results['mean_test_score'][1], cv_results['mean_test_score'][2])\n", (42663, 42731), True, 'import numpy as np\n'), ((42782, 42868), 'numpy.allclose', 'np.allclose', (["cv_results['mean_train_score'][1]", "cv_results['mean_train_score'][2]"], {}), "(cv_results['mean_train_score'][1], cv_results[\n 'mean_train_score'][2])\n", (42793, 42868), True, 'import numpy as np\n'), ((44082, 44118), 'numpy.all', 'np.all', (['(search.cv_results_[key] >= 0)'], {}), '(search.cv_results_[key] >= 0)\n', (44088, 44118), True, 'import numpy as np\n'), ((44138, 44173), 'numpy.all', 'np.all', (['(search.cv_results_[key] < 1)'], {}), '(search.cv_results_[key] < 1)\n', (44144, 44173), True, 'import numpy as np\n'), ((44356, 44391), 'numpy.all', 'np.all', (['(search.cv_results_[key] < 1)'], {}), '(search.cv_results_[key] < 1)\n', (44362, 44391), True, 'import numpy as np\n'), ((45192, 45227), 'numpy.in1d', 'np.in1d', (['expected_keys', 'result_keys'], {}), '(expected_keys, result_keys)\n', (45199, 45227), True, 'import numpy as np\n'), ((46833, 46881), 'sklearn.utils._testing.assert_almost_equal', 'assert_almost_equal', (['this_scores', 'expected_score'], {}), '(this_scores, expected_score)\n', (46852, 46881), False, 'from sklearn.utils._testing import assert_almost_equal\n'), ((50470, 50483), 'numpy.arange', 'np.arange', (['(20)'], {}), '(20)\n', (50479, 50483), True, 'import numpy as np\n'), ((50565, 50592), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', '{}'], {'cv': '(2)'}), '(clf, {}, cv=2)\n', (50577, 50592), False, 'from sklearn.model_selection import GridSearchCV\n'), ((50736, 50767), 'numpy.arange', 'np.arange', (['(20)'], {'dtype': 'np.float64'}), '(20, dtype=np.float64)\n', (50745, 50767), True, 'import numpy as np\n'), ((50976, 51035), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['p', "{'classifier__foo_param': [1, 2, 3]}"], {'cv': '(2)'}), "(p, {'classifier__foo_param': [1, 2, 3]}, cv=2)\n", (50988, 51035), False, 'from sklearn.model_selection import GridSearchCV\n'), ((55894, 55907), 'numpy.arange', 'np.arange', (['(24)'], {}), '(24)\n', (55903, 55907), True, 'import numpy as np\n'), ((55983, 56010), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {'loss': '"""hinge"""'}), "(loss='hinge')\n", (55996, 56010), False, 'from sklearn.linear_model import Ridge, SGDClassifier, LinearRegression\n'), ((56468, 56495), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {'loss': '"""hinge"""'}), "(loss='hinge')\n", (56481, 56495), False, 'from sklearn.linear_model import Ridge, SGDClassifier, LinearRegression\n'), ((56712, 56724), 'numpy.arange', 'np.arange', (['(6)'], {}), '(6)\n', (56721, 56724), True, 'import numpy as np\n'), ((57235, 57290), 'sklearn.model_selection.tests.common.OneTimeSplitter', 'OneTimeSplitter', ([], {'n_splits': 'n_splits', 'n_samples': 'n_samples'}), '(n_splits=n_splits, n_samples=n_samples)\n', (57250, 57290), False, 'from sklearn.model_selection.tests.common import OneTimeSplitter\n'), ((57532, 57556), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'n_splits'}), '(n_splits=n_splits)\n', (57537, 57556), False, 'from sklearn.model_selection import KFold\n'), ((58231, 58285), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'n_splits', 'shuffle': '(True)', 'random_state': '(0)'}), '(n_splits=n_splits, shuffle=True, random_state=0)\n', (58236, 58285), False, 'from sklearn.model_selection import KFold\n'), ((59616, 59654), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'n_splits', 'shuffle': '(True)'}), '(n_splits=n_splits, shuffle=True)\n', (59621, 59654), False, 'from sklearn.model_selection import KFold\n'), ((63688, 63705), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(3)'}), '(n_splits=3)\n', (63693, 63705), False, 'from sklearn.model_selection import KFold\n'), ((68731, 68767), 'sklearn.datasets.make_classification', 'make_classification', ([], {'random_state': '(42)'}), '(random_state=42)\n', (68750, 68767), False, 'from sklearn.datasets import make_classification\n'), ((7155, 7173), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (7171, 7173), False, 'from sklearn.linear_model import Ridge, SGDClassifier, LinearRegression\n'), ((8558, 8569), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (8565, 8569), True, 'import numpy as np\n'), ((8717, 8727), 'numpy.ones', 'np.ones', (['(1)'], {}), '(1)\n', (8724, 8727), True, 'import numpy as np\n'), ((8734, 8746), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (8742, 8746), True, 'import numpy as np\n'), ((14784, 14872), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', '{}'], {'refit': 'refit', 'scoring': "{'acc': 'accuracy', 'prec': 'precision'}"}), "(clf, {}, refit=refit, scoring={'acc': 'accuracy', 'prec':\n 'precision'})\n", (14796, 14872), False, 'from sklearn.model_selection import GridSearchCV\n'), ((35695, 35711), 'numpy.where', 'np.where', (['(y == 1)'], {}), '(y == 1)\n', (35703, 35711), True, 'import numpy as np\n'), ((35734, 35750), 'numpy.where', 'np.where', (['(y == 2)'], {}), '(y == 2)\n', (35742, 35750), True, 'import numpy as np\n'), ((43318, 43358), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['est', 'est_parameters'], {'cv': 'cv'}), '(est, est_parameters, cv=cv)\n', (43330, 43358), False, 'from sklearn.model_selection import GridSearchCV\n'), ((46002, 46050), 'sklearn.utils._testing.assert_almost_equal', 'assert_almost_equal', (['correct_score', 'cv_scores[i]'], {}), '(correct_score, cv_scores[i])\n', (46021, 46050), False, 'from sklearn.utils._testing import assert_almost_equal\n'), ((46543, 46553), 'sklearn.base.clone', 'clone', (['svc'], {}), '(svc)\n', (46548, 46553), False, 'from sklearn.base import clone\n'), ((47716, 47733), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {}), '()\n', (47731, 47733), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((49498, 49596), 'sklearn.utils._testing.assert_almost_equal', 'assert_almost_equal', (['correct_score', "grid_search.cv_results_['split%d_test_score' % i][cand_i]"], {}), "(correct_score, grid_search.cv_results_[\n 'split%d_test_score' % i][cand_i])\n", (49517, 49596), False, 'from sklearn.utils._testing import assert_almost_equal\n'), ((50189, 50289), 'sklearn.utils._testing.assert_almost_equal', 'assert_almost_equal', (['correct_score', "random_search.cv_results_['split%d_test_score' % i][cand_i]"], {}), "(correct_score, random_search.cv_results_[\n 'split%d_test_score' % i][cand_i])\n", (50208, 50289), False, 'from sklearn.utils._testing import assert_almost_equal\n'), ((50867, 50920), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'strategy': '"""mean"""', 'missing_values': 'np.nan'}), "(strategy='mean', missing_values=np.nan)\n", (50880, 50920), False, 'from sklearn.impute import SimpleImputer\n'), ((57663, 57717), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'n_splits', 'shuffle': '(True)', 'random_state': '(0)'}), '(n_splits=n_splits, shuffle=True, random_state=0)\n', (57668, 57717), False, 'from sklearn.model_selection import KFold\n'), ((61082, 61107), 'numpy.asanyarray', 'np.asanyarray', (['results[k]'], {}), '(results[k])\n', (61095, 61107), True, 'import numpy as np\n'), ((61481, 61535), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', 'param_grid'], {'return_train_score': '(True)'}), '(clf, param_grid, return_train_score=True)\n', (61493, 61535), False, 'from sklearn.model_selection import GridSearchCV\n'), ((63254, 63259), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (63257, 63259), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((67370, 67383), 'scipy.stats.uniform', 'uniform', (['(1)', '(3)'], {}), '(1, 3)\n', (67377, 67383), False, 'from scipy.stats import bernoulli, expon, uniform\n'), ((68168, 68186), 'scipy.stats.uniform', 'uniform', (['(0.01)', '(0.1)'], {}), '(0.01, 0.1)\n', (68175, 68186), False, 'from scipy.stats import bernoulli, expon, uniform\n'), ((46634, 46644), 'sklearn.base.clone', 'clone', (['svc'], {}), '(svc)\n', (46639, 46644), False, 'from sklearn.base import clone\n'), ((57930, 57984), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'n_splits', 'shuffle': '(True)', 'random_state': '(0)'}), '(n_splits=n_splits, shuffle=True, random_state=0)\n', (57935, 57984), False, 'from sklearn.model_selection import KFold\n'), ((61177, 61248), 'sklearn.utils._testing.assert_array_equal', 'assert_array_equal', (['exp_results[k]', 'results[k]'], {'err_msg': "('Checking ' + k)"}), "(exp_results[k], results[k], err_msg='Checking ' + k)\n", (61195, 61248), False, 'from sklearn.utils._testing import assert_array_equal\n'), ((61330, 61398), 'sklearn.utils._testing.assert_allclose', 'assert_allclose', (['exp_results[k]', 'results[k]'], {'err_msg': "('Checking ' + k)"}), "(exp_results[k], results[k], err_msg='Checking ' + k)\n", (61345, 61398), False, 'from sklearn.utils._testing import assert_allclose\n'), ((63585, 63590), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (63588, 63590), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((5621, 5660), 'itertools.product', 'product', (["params2['bar']", "params2['foo']"], {}), "(params2['bar'], params2['foo'])\n", (5628, 5660), False, 'from itertools import chain, product\n'), ((45958, 45985), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y[test]', 'dec'], {}), '(y[test], dec)\n', (45971, 45985), False, 'from sklearn.metrics import roc_auc_score\n')]
|
# -*- encoding:utf-8 -*-
# @Time : 2021/1/3 15:15
# @Author : gfjiang
import os.path as osp
import mmcv
import numpy as np
import cvtools
import matplotlib.pyplot as plt
import cv2.cv2 as cv
from functools import partial
import torch
import math
from cvtools.utils.path import add_prefix_filename_suffix
from mmdet.ops import nms
from mmdet.apis import init_detector, inference_detector
def draw_features(module, input, output, work_dir='./'):
x = output.cpu().numpy()
out_channels = list(output.shape)[1]
height = int(math.sqrt(out_channels))
width = height
if list(output.shape)[2] < 128:
return
fig = plt.figure(figsize=(32, 32))
fig.subplots_adjust(left=0.05, right=0.95, bottom=0.05, top=0.95, wspace=0.05, hspace=0.05)
for i in range(height * width):
plt.subplot(height, width, i + 1)
plt.axis('off')
img = x[0, i, :, :]
pmin = np.min(img)
pmax = np.max(img)
img = ((img - pmin) / (pmax - pmin + 0.000001))*255 # float在[0,1]之间,转换成0-255
img = img.astype(np.uint8) # 转成unit8
img = cv.applyColorMap(img, cv.COLORMAP_JET) # 生成heat map
img = img[:, :, ::-1] # 注意cv2(BGR)和matplotlib(RGB)通道是相反的
plt.imshow(img)
# print("{}/{}".format(i,width*height))
savename = get_image_name_for_hook(module, work_dir)
fig.savefig(savename, dpi=100)
fig.clf()
plt.close()
def get_image_name_for_hook(module, work_dir='./'):
"""
Generate image filename for hook function
Parameters:
-----------
module: module of neural network
"""
# os.makedirs(work_dir, exist_ok=True)
module_name = str(module)
base_name = module_name.split('(')[0]
index = 0
image_name = '.' # '.' is surely exist, to make first loop condition True
while osp.exists(image_name):
index += 1
image_name = osp.join(
work_dir, 'feats', '%s_%d.png' % (base_name, index))
return image_name
class AerialDetectionOBB(object):
def __init__(self, config, pth):
self.imgs = []
self.cfg = mmcv.Config.fromfile(config)
self.pth = pth
print('loading model {} ...'.format(pth))
self.model = init_detector(self.cfg, self.pth, device='cuda:0')
self.results = []
self.img_detected = []
# self.vis_feats((torch.nn.Conv2d, torch.nn.MaxPool2d))
def __call__(self,
imgs_or_path,
det_thrs=0.5,
vis=False,
vis_thr=0.5,
save_root=''):
if isinstance(imgs_or_path, str):
self.imgs += cvtools.get_files_list(imgs_or_path)
else:
self.imgs += imgs_or_path
prog_bar = mmcv.ProgressBar(len(self.imgs))
for _, img in enumerate(self.imgs):
self.detect(img, det_thrs=det_thrs, vis=vis,
vis_thr=vis_thr, save_root=save_root)
prog_bar.update()
def detect(self,
img,
det_thrs=0.5,
vis=False,
vis_thr=0.5,
save_root=''):
result = inference_detector(self.model, img)
# result = self.nms(result)
if isinstance(det_thrs, float):
det_thrs = [det_thrs] * len(result)
if vis:
to_file = osp.join(save_root, osp.basename(img))
to_file = add_prefix_filename_suffix(to_file, suffix='_obb')
self.vis(img, result, vis_thr=vis_thr, to_file=to_file)
result = [det[det[..., -1] > det_thr] for det, det_thr
in zip(result, det_thrs)]
if len(result) == 0:
print('detect: image {} has no object.'.format(img))
self.img_detected.append(img)
self.results.append(result)
return result
def nms(self, result, nms_th=0.3):
dets_num = [len(det_cls) for det_cls in result]
result = np.vstack(result)
_, ids = nms(result, nms_th)
total_num = 0
nms_result = []
for num in dets_num:
ids_cls = ids[np.where((total_num <= ids) & (ids < num))[0]]
nms_result.append(result[ids_cls])
total_num += num
return nms_result
def vis(self, img, bbox_result, vis_thr=0.5,
to_file='vis.jpg'):
bboxes = np.vstack(bbox_result)
labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(labels)
inds = np.where(bboxes[:, -1] > vis_thr)[0]
bboxes = bboxes[inds]
labels = labels[inds]
texts = [self.model.CLASSES[index]+'|'+str(round(bbox[-1], 2))
for index, bbox in zip(labels, bboxes)]
img = cvtools.draw_boxes_texts(
img, bboxes[:, :-1], box_format='polygon', line_width=2)
cvtools.imwrite(img, to_file)
def vis_feats(self, modules_for_plot):
h, w = self.cfg.data.train.img_scale
for name, module in self.model.named_modules():
if isinstance(module, modules_for_plot):
draw_features_func = partial(
draw_features, work_dir=self.cfg.work_dir)
module.register_forward_hook(draw_features_func)
def save_results(self, save):
str_results = ''
for i, img in enumerate(self.img_detected):
result = self.results[i]
img = osp.basename(img)
for cls_index, dets in enumerate(result):
cls = self.model.CLASSES[cls_index]
for box in dets:
bbox_str = ','.join(map(str, map(int, box[:4])))
str_results += ' '.join([img, cls, bbox_str]) + '\n'
with open(save, 'w') as f:
f.write(str_results)
if __name__ == '__main__':
config_file = 'configs/DOTA/faster_rcnn_RoITrans_r50_fpn_1x_dota_1gpus_mdanet2.py'
pth_file = 'work_dirs/faster_rcnn_RoITrans_r50_fpn_1x_dota_1gpus_mdanet2/epoch_12.pth'
detector = AerialDetectionOBB(config_file, pth_file)
detector('/media/data/DOTA/crop/P2701_2926_1597_3949_2620.png', vis=True,
save_root='work_dirs/attention_vis/')
detector.save_results('work_dirs/faster_rcnn_RoITrans_r50_fpn_1x_dota_1gpus_mdanet2/detect_result.txt')
|
[
"math.sqrt",
"cvtools.imwrite",
"mmdet.ops.nms",
"matplotlib.pyplot.imshow",
"os.path.exists",
"cvtools.utils.path.add_prefix_filename_suffix",
"cvtools.draw_boxes_texts",
"numpy.where",
"mmdet.apis.init_detector",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.vstack",
"numpy.concatenate",
"numpy.min",
"matplotlib.pyplot.axis",
"mmdet.apis.inference_detector",
"cvtools.get_files_list",
"cv2.cv2.applyColorMap",
"os.path.join",
"matplotlib.pyplot.figure",
"functools.partial",
"os.path.basename",
"mmcv.Config.fromfile",
"numpy.full",
"matplotlib.pyplot.subplot"
] |
[((643, 671), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(32, 32)'}), '(figsize=(32, 32))\n', (653, 671), True, 'import matplotlib.pyplot as plt\n'), ((1399, 1410), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1408, 1410), True, 'import matplotlib.pyplot as plt\n'), ((1815, 1837), 'os.path.exists', 'osp.exists', (['image_name'], {}), '(image_name)\n', (1825, 1837), True, 'import os.path as osp\n'), ((538, 561), 'math.sqrt', 'math.sqrt', (['out_channels'], {}), '(out_channels)\n', (547, 561), False, 'import math\n'), ((812, 845), 'matplotlib.pyplot.subplot', 'plt.subplot', (['height', 'width', '(i + 1)'], {}), '(height, width, i + 1)\n', (823, 845), True, 'import matplotlib.pyplot as plt\n'), ((854, 869), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (862, 869), True, 'import matplotlib.pyplot as plt\n'), ((913, 924), 'numpy.min', 'np.min', (['img'], {}), '(img)\n', (919, 924), True, 'import numpy as np\n'), ((940, 951), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (946, 951), True, 'import numpy as np\n'), ((1098, 1136), 'cv2.cv2.applyColorMap', 'cv.applyColorMap', (['img', 'cv.COLORMAP_JET'], {}), '(img, cv.COLORMAP_JET)\n', (1114, 1136), True, 'import cv2.cv2 as cv\n'), ((1225, 1240), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (1235, 1240), True, 'import matplotlib.pyplot as plt\n'), ((1879, 1940), 'os.path.join', 'osp.join', (['work_dir', '"""feats"""', "('%s_%d.png' % (base_name, index))"], {}), "(work_dir, 'feats', '%s_%d.png' % (base_name, index))\n", (1887, 1940), True, 'import os.path as osp\n'), ((2092, 2120), 'mmcv.Config.fromfile', 'mmcv.Config.fromfile', (['config'], {}), '(config)\n', (2112, 2120), False, 'import mmcv\n'), ((2215, 2265), 'mmdet.apis.init_detector', 'init_detector', (['self.cfg', 'self.pth'], {'device': '"""cuda:0"""'}), "(self.cfg, self.pth, device='cuda:0')\n", (2228, 2265), False, 'from mmdet.apis import init_detector, inference_detector\n'), ((3136, 3171), 'mmdet.apis.inference_detector', 'inference_detector', (['self.model', 'img'], {}), '(self.model, img)\n', (3154, 3171), False, 'from mmdet.apis import init_detector, inference_detector\n'), ((3924, 3941), 'numpy.vstack', 'np.vstack', (['result'], {}), '(result)\n', (3933, 3941), True, 'import numpy as np\n'), ((3959, 3978), 'mmdet.ops.nms', 'nms', (['result', 'nms_th'], {}), '(result, nms_th)\n', (3962, 3978), False, 'from mmdet.ops import nms\n'), ((4328, 4350), 'numpy.vstack', 'np.vstack', (['bbox_result'], {}), '(bbox_result)\n', (4337, 4350), True, 'import numpy as np\n'), ((4501, 4523), 'numpy.concatenate', 'np.concatenate', (['labels'], {}), '(labels)\n', (4515, 4523), True, 'import numpy as np\n'), ((4778, 4863), 'cvtools.draw_boxes_texts', 'cvtools.draw_boxes_texts', (['img', 'bboxes[:, :-1]'], {'box_format': '"""polygon"""', 'line_width': '(2)'}), "(img, bboxes[:, :-1], box_format='polygon',\n line_width=2)\n", (4802, 4863), False, 'import cvtools\n'), ((4881, 4910), 'cvtools.imwrite', 'cvtools.imwrite', (['img', 'to_file'], {}), '(img, to_file)\n', (4896, 4910), False, 'import cvtools\n'), ((2630, 2666), 'cvtools.get_files_list', 'cvtools.get_files_list', (['imgs_or_path'], {}), '(imgs_or_path)\n', (2652, 2666), False, 'import cvtools\n'), ((3395, 3445), 'cvtools.utils.path.add_prefix_filename_suffix', 'add_prefix_filename_suffix', (['to_file'], {'suffix': '"""_obb"""'}), "(to_file, suffix='_obb')\n", (3421, 3445), False, 'from cvtools.utils.path import add_prefix_filename_suffix\n'), ((4382, 4423), 'numpy.full', 'np.full', (['bbox.shape[0]', 'i'], {'dtype': 'np.int32'}), '(bbox.shape[0], i, dtype=np.int32)\n', (4389, 4423), True, 'import numpy as np\n'), ((4539, 4572), 'numpy.where', 'np.where', (['(bboxes[:, -1] > vis_thr)'], {}), '(bboxes[:, -1] > vis_thr)\n', (4547, 4572), True, 'import numpy as np\n'), ((5454, 5471), 'os.path.basename', 'osp.basename', (['img'], {}), '(img)\n', (5466, 5471), True, 'import os.path as osp\n'), ((3354, 3371), 'os.path.basename', 'osp.basename', (['img'], {}), '(img)\n', (3366, 3371), True, 'import os.path as osp\n'), ((5150, 5200), 'functools.partial', 'partial', (['draw_features'], {'work_dir': 'self.cfg.work_dir'}), '(draw_features, work_dir=self.cfg.work_dir)\n', (5157, 5200), False, 'from functools import partial\n'), ((4080, 4122), 'numpy.where', 'np.where', (['((total_num <= ids) & (ids < num))'], {}), '((total_num <= ids) & (ids < num))\n', (4088, 4122), True, 'import numpy as np\n')]
|
# coding=utf-8
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
import modeling
import optimization
import tokenization
import tensorflow as tf
import horovod.tensorflow as hvd
import time
from utils.utils import LogEvalRunHook, LogTrainRunHook, setup_xla_flags
from utils.gpu_affinity import set_affinity
import utils.dllogger_class
from dllogger import Verbosity
from utils.create_glue_data import *
import numpy as np
import tf_metrics
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"data_dir", None,
"The input data dir. Should contain the .tsv files (or other data files) "
"for the task.")
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("task_name", None, "The name of the task to train.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string(
"dllog_path", "/results/bert_dllog.json",
"filename where dllogger writes to")
flags.DEFINE_string(
"optimizer_type", "lamb",
"Optimizer type : adam or lamb")
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_bool(
"do_predict", False,
"Whether to run the model in inference mode on the test set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_bool("use_trt", False, "Whether to use TF-TRT")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("display_loss_steps", 10,
"How often to print loss from estimator")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_integer("num_accumulation_steps", 1,
"Number of accumulation steps before gradient update"
"Global batch size = num_accumulation_steps * train_batch_size")
flags.DEFINE_bool("amp", True, "Whether to enable AMP ops. When false, uses TF32 on A100 and FP32 on V100 GPUS.")
flags.DEFINE_bool("use_xla", True, "Whether to enable XLA JIT compilation.")
flags.DEFINE_bool("horovod", False, "Whether to use Horovod for multi-gpu runs")
flags.DEFINE_bool(
"verbose_logging", False,
"If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal SQuAD evaluation.")
def file_based_input_fn_builder(input_file, batch_size, seq_length, is_training,
drop_remainder, hvd=None):
"""Creates an `input_fn` closure to be passed to Estimator."""
name_to_features = {
"input_ids": tf.io.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.io.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.io.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.io.FixedLenFeature([], tf.int64),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn():
"""The actual input function."""
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
if hvd is not None: d = d.shard(hvd.size(), hvd.rank())
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
labels, num_labels, use_one_hot_embeddings):
"""Creates a classification model."""
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings,
compute_type=tf.float32)
# In the demo, we are doing a simple classification task on the entire
# segment.
#
# If you want to use the token-level output, use model.get_sequence_output()
# instead.
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias, name='cls_logits')
probabilities = tf.nn.softmax(logits, axis=-1, name='cls_probabilities')
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1, name='cls_per_example_loss')
loss = tf.reduce_mean(per_example_loss, name='cls_loss')
return (loss, per_example_loss, logits, probabilities)
def get_frozen_tftrt_model(bert_config, shape, num_labels, use_one_hot_embeddings, init_checkpoint):
tf_config = tf.compat.v1.ConfigProto()
tf_config.gpu_options.allow_growth = True
output_node_names = ['loss/cls_loss', 'loss/cls_per_example_loss', 'loss/cls_logits', 'loss/cls_probabilities']
with tf.Session(config=tf_config) as tf_sess:
input_ids = tf.placeholder(tf.int32, shape, 'input_ids')
input_mask = tf.placeholder(tf.int32, shape, 'input_mask')
segment_ids = tf.placeholder(tf.int32, shape, 'segment_ids')
label_ids = tf.placeholder(tf.int32, (None), 'label_ids')
create_model(bert_config, False, input_ids, input_mask, segment_ids, label_ids,
num_labels, use_one_hot_embeddings)
tvars = tf.trainable_variables()
(assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf_sess.run(tf.global_variables_initializer())
print("LOADED!")
tf.compat.v1.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
else:
init_string = ", *NOTTTTTTTTTTTTTTTTTTTTT"
tf.compat.v1.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string)
frozen_graph = tf.graph_util.convert_variables_to_constants(tf_sess,
tf_sess.graph.as_graph_def(), output_node_names)
num_nodes = len(frozen_graph.node)
print('Converting graph using TensorFlow-TensorRT...')
from tensorflow.python.compiler.tensorrt import trt_convert as trt
converter = trt.TrtGraphConverter(
input_graph_def=frozen_graph,
nodes_blacklist=output_node_names,
max_workspace_size_bytes=(4096 << 20) - 1000,
precision_mode = "FP16" if FLAGS.amp else "FP32",
minimum_segment_size=4,
is_dynamic_op=True,
maximum_cached_engines=1000
)
frozen_graph = converter.convert()
print('Total node count before and after TF-TRT conversion:',
num_nodes, '->', len(frozen_graph.node))
print('TRT node count:',
len([1 for n in frozen_graph.node if str(n.op) == 'TRTEngineOp']))
with tf.io.gfile.GFile("frozen_modelTRT.pb", "wb") as f:
f.write(frozen_graph.SerializeToString())
return frozen_graph
def model_fn_builder(task_name, bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps,
use_one_hot_embeddings, hvd=None):
"""Returns `model_fn` closure for Estimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for Estimator."""
def metric_fn(per_example_loss, label_ids, logits):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
if task_name == "cola":
FN, FN_op = tf.metrics.false_negatives(labels=label_ids, predictions=predictions)
FP, FP_op = tf.metrics.false_positives(labels=label_ids, predictions=predictions)
TP, TP_op = tf.metrics.true_positives(labels=label_ids, predictions=predictions)
TN, TN_op = tf.metrics.true_negatives(labels=label_ids, predictions=predictions)
MCC = (TP * TN - FP * FN) / ((TP + FP) * (TP + FN) * (TN + FP) * (TN + FN)) ** 0.5
MCC_op = tf.group(FN_op, TN_op, TP_op, FP_op, tf.identity(MCC, name="MCC"))
return {"MCC": (MCC, MCC_op)}
elif task_name == "mrpc":
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions)
loss = tf.metrics.mean(values=per_example_loss)
f1 = tf_metrics.f1(labels=label_ids, predictions=predictions, num_classes=2, pos_indices=[1])
return {
"eval_accuracy": accuracy,
"eval_f1": f1,
"eval_loss": loss,
}
else:
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions)
loss = tf.metrics.mean(values=per_example_loss)
return {
"eval_accuracy": accuracy,
"eval_loss": loss,
}
tf.compat.v1.logging.info("*** Features ***")
tf.compat.v1.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.compat.v1.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
if not is_training and FLAGS.use_trt:
trt_graph = get_frozen_tftrt_model(bert_config, input_ids.shape, num_labels, use_one_hot_embeddings, init_checkpoint)
(total_loss, per_example_loss, logits, probabilities) = tf.import_graph_def(trt_graph,
input_map={'input_ids':input_ids, 'input_mask':input_mask, 'segment_ids':segment_ids, 'label_ids':label_ids},
return_elements=['loss/cls_loss:0', 'loss/cls_per_example_loss:0', 'loss/cls_logits:0', 'loss/cls_probabilities:0'],
name='')
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {"probabilities": probabilities}
output_spec = tf.estimator.EstimatorSpec(
mode=mode, predictions=predictions)
elif mode == tf.estimator.ModeKeys.EVAL:
eval_metric_ops = metric_fn(per_example_loss, label_ids, logits)
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=total_loss,
eval_metric_ops=eval_metric_ops)
return output_spec
(total_loss, per_example_loss, logits, probabilities) = create_model(
bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,
num_labels, use_one_hot_embeddings)
tvars = tf.trainable_variables()
initialized_variable_names = {}
if init_checkpoint and (hvd is None or hvd.rank() == 0):
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
if FLAGS.verbose_logging:
tf.compat.v1.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.compat.v1.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps,
hvd, False, FLAGS.amp, FLAGS.num_accumulation_steps, FLAGS.optimizer_type)
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op)
elif mode == tf.estimator.ModeKeys.EVAL:
dummy_op = tf.no_op()
# Need to call mixed precision graph rewrite if fp16 to enable graph rewrite
if FLAGS.amp:
loss_scaler = tf.train.experimental.FixedLossScale(1)
dummy_op = tf.train.experimental.enable_mixed_precision_graph_rewrite(
optimization.LAMBOptimizer(learning_rate=0.0), loss_scaler)
eval_metric_ops = metric_fn(per_example_loss, label_ids, logits)
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=total_loss,
eval_metric_ops=eval_metric_ops)
else:
dummy_op = tf.no_op()
# Need to call mixed precision graph rewrite if fp16 to enable graph rewrite
if FLAGS.amp:
dummy_op = tf.train.experimental.enable_mixed_precision_graph_rewrite(
optimization.LAMBOptimizer(learning_rate=0.0))
output_spec = tf.estimator.EstimatorSpec(
mode=mode, predictions=probabilities)
return output_spec
return model_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def input_fn_builder(features, batch_size, seq_length, is_training, drop_remainder, hvd=None):
"""Creates an `input_fn` closure to be passed to Estimator."""
all_input_ids = []
all_input_mask = []
all_segment_ids = []
all_label_ids = []
for feature in features:
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_label_ids.append(feature.label_id)
def input_fn():
"""The actual input function."""
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
d = tf.data.Dataset.from_tensor_slices({
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"segment_ids":
tf.constant(
all_segment_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
"label_ids":
tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),
})
if is_training:
if hvd is not None: d = d.shard(hvd.size(), hvd.rank())
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
def main(_):
setup_xla_flags()
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
dllogging = utils.dllogger_class.dllogger_class(FLAGS.dllog_path)
if FLAGS.horovod:
hvd.init()
processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mrpc": MrpcProcessor,
"xnli": XnliProcessor,
}
if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:
raise ValueError(
"At least one of `do_train`, `do_eval` or `do_predict' must be True.")
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
tf.io.gfile.makedirs(FLAGS.output_dir)
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
label_list = processor.get_labels()
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
master_process = True
training_hooks = []
global_batch_size = FLAGS.train_batch_size * FLAGS.num_accumulation_steps
hvd_rank = 0
config = tf.compat.v1.ConfigProto()
if FLAGS.horovod:
tf.compat.v1.logging.info("Multi-GPU training with TF Horovod")
tf.compat.v1.logging.info("hvd.size() = %d hvd.rank() = %d", hvd.size(), hvd.rank())
global_batch_size = FLAGS.train_batch_size * FLAGS.num_accumulation_steps * hvd.size()
master_process = (hvd.rank() == 0)
hvd_rank = hvd.rank()
config.gpu_options.visible_device_list = str(hvd.local_rank())
set_affinity(hvd.local_rank())
if hvd.size() > 1:
training_hooks.append(hvd.BroadcastGlobalVariablesHook(0))
if FLAGS.use_xla:
config.graph_options.optimizer_options.global_jit_level = tf.compat.v1.OptimizerOptions.ON_1
if FLAGS.amp:
tf.enable_resource_variables()
run_config = tf.estimator.RunConfig(
model_dir=FLAGS.output_dir if master_process else None,
session_config=config,
save_checkpoints_steps=FLAGS.save_checkpoints_steps if master_process else None,
save_summary_steps=FLAGS.save_checkpoints_steps if master_process else None,
log_step_count_steps=FLAGS.display_loss_steps,
keep_checkpoint_max=1)
if master_process:
tf.compat.v1.logging.info("***** Configuaration *****")
for key in FLAGS.__flags.keys():
tf.compat.v1.logging.info(' {}: {}'.format(key, getattr(FLAGS, key)))
tf.compat.v1.logging.info("**************************")
train_examples = None
num_train_steps = None
num_warmup_steps = None
training_hooks.append(LogTrainRunHook(global_batch_size, hvd_rank, FLAGS.save_checkpoints_steps, num_steps_ignore_xla=25))
if FLAGS.do_train:
train_examples = processor.get_train_examples(FLAGS.data_dir)
num_train_steps = int(
len(train_examples) / global_batch_size * FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
start_index = 0
end_index = len(train_examples)
tmp_filenames = [os.path.join(FLAGS.output_dir, "train.tf_record")]
if FLAGS.horovod:
tmp_filenames = [os.path.join(FLAGS.output_dir, "train.tf_record{}".format(i)) for i in range(hvd.size())]
num_examples_per_rank = len(train_examples) // hvd.size()
remainder = len(train_examples) % hvd.size()
if hvd.rank() < remainder:
start_index = hvd.rank() * (num_examples_per_rank+1)
end_index = start_index + num_examples_per_rank + 1
else:
start_index = hvd.rank() * num_examples_per_rank + remainder
end_index = start_index + (num_examples_per_rank)
model_fn = model_fn_builder(
task_name=task_name,
bert_config=bert_config,
num_labels=len(label_list),
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate if not FLAGS.horovod else FLAGS.learning_rate * hvd.size(),
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_one_hot_embeddings=False,
hvd=None if not FLAGS.horovod else hvd)
estimator = tf.estimator.Estimator(
model_fn=model_fn,
config=run_config)
if FLAGS.do_train:
file_based_convert_examples_to_features(
train_examples[start_index:end_index], label_list, FLAGS.max_seq_length, tokenizer, tmp_filenames[hvd_rank])
tf.compat.v1.logging.info("***** Running training *****")
tf.compat.v1.logging.info(" Num examples = %d", len(train_examples))
tf.compat.v1.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.compat.v1.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = file_based_input_fn_builder(
input_file=tmp_filenames,
batch_size=FLAGS.train_batch_size,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True,
hvd=None if not FLAGS.horovod else hvd)
train_start_time = time.time()
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps, hooks=training_hooks)
train_time_elapsed = time.time() - train_start_time
train_time_wo_overhead = training_hooks[-1].total_time
avg_sentences_per_second = num_train_steps * global_batch_size * 1.0 / train_time_elapsed
ss_sentences_per_second = (training_hooks[-1].count - training_hooks[-1].skipped) * global_batch_size * 1.0 / train_time_wo_overhead
if master_process:
tf.compat.v1.logging.info("-----------------------------")
tf.compat.v1.logging.info("Total Training Time = %0.2f for Sentences = %d", train_time_elapsed,
num_train_steps * global_batch_size)
tf.compat.v1.logging.info("Total Training Time W/O Overhead = %0.2f for Sentences = %d", train_time_wo_overhead,
(training_hooks[-1].count - training_hooks[-1].skipped) * global_batch_size)
tf.compat.v1.logging.info("Throughput Average (sentences/sec) with overhead = %0.2f", avg_sentences_per_second)
tf.compat.v1.logging.info("Throughput Average (sentences/sec) = %0.2f", ss_sentences_per_second)
tf.compat.v1.logging.info("-----------------------------")
if FLAGS.do_eval and master_process:
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record")
file_based_convert_examples_to_features(
eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file)
tf.compat.v1.logging.info("***** Running evaluation *****")
tf.compat.v1.logging.info(" Num examples = %d", len(eval_examples))
tf.compat.v1.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
eval_drop_remainder = False
eval_input_fn = file_based_input_fn_builder(
input_file=eval_file,
batch_size=FLAGS.eval_batch_size,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=eval_drop_remainder)
eval_hooks = [LogEvalRunHook(FLAGS.eval_batch_size)]
eval_start_time = time.time()
result = estimator.evaluate(input_fn=eval_input_fn, hooks=eval_hooks)
eval_time_elapsed = time.time() - eval_start_time
time_list = eval_hooks[-1].time_list
time_list.sort()
# Removing outliers (init/warmup) in throughput computation.
eval_time_wo_overhead = sum(time_list[:int(len(time_list) * 0.8)])
num_sentences = (int(len(time_list) * 0.8)) * FLAGS.eval_batch_size
avg = np.mean(time_list)
cf_50 = max(time_list[:int(len(time_list) * 0.50)])
cf_90 = max(time_list[:int(len(time_list) * 0.90)])
cf_95 = max(time_list[:int(len(time_list) * 0.95)])
cf_99 = max(time_list[:int(len(time_list) * 0.99)])
cf_100 = max(time_list[:int(len(time_list) * 1)])
ss_sentences_per_second = num_sentences * 1.0 / eval_time_wo_overhead
tf.compat.v1.logging.info("-----------------------------")
tf.compat.v1.logging.info("Total Inference Time = %0.2f for Sentences = %d", eval_time_elapsed,
eval_hooks[-1].count * FLAGS.eval_batch_size)
tf.compat.v1.logging.info("Total Inference Time W/O Overhead = %0.2f for Sentences = %d", eval_time_wo_overhead,
num_sentences)
tf.compat.v1.logging.info("Summary Inference Statistics on EVAL set")
tf.compat.v1.logging.info("Batch size = %d", FLAGS.eval_batch_size)
tf.compat.v1.logging.info("Sequence Length = %d", FLAGS.max_seq_length)
tf.compat.v1.logging.info("Precision = %s", "fp16" if FLAGS.amp else "fp32")
tf.compat.v1.logging.info("Latency Confidence Level 50 (ms) = %0.2f", cf_50 * 1000)
tf.compat.v1.logging.info("Latency Confidence Level 90 (ms) = %0.2f", cf_90 * 1000)
tf.compat.v1.logging.info("Latency Confidence Level 95 (ms) = %0.2f", cf_95 * 1000)
tf.compat.v1.logging.info("Latency Confidence Level 99 (ms) = %0.2f", cf_99 * 1000)
tf.compat.v1.logging.info("Latency Confidence Level 100 (ms) = %0.2f", cf_100 * 1000)
tf.compat.v1.logging.info("Latency Average (ms) = %0.2f", avg * 1000)
tf.compat.v1.logging.info("Throughput Average (sentences/sec) = %0.2f", ss_sentences_per_second)
dllogging.logger.log(step=(), data={"throughput_val": ss_sentences_per_second}, verbosity=Verbosity.DEFAULT)
tf.compat.v1.logging.info("-----------------------------")
output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
with tf.io.gfile.GFile(output_eval_file, "w") as writer:
tf.compat.v1.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
dllogging.logger.log(step=(), data={key: float(result[key])}, verbosity=Verbosity.DEFAULT)
tf.compat.v1.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if FLAGS.do_predict and master_process:
predict_examples = processor.get_test_examples(FLAGS.data_dir)
predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record")
file_based_convert_examples_to_features(predict_examples, label_list,
FLAGS.max_seq_length, tokenizer,
predict_file)
tf.compat.v1.logging.info("***** Running prediction*****")
tf.compat.v1.logging.info(" Num examples = %d", len(predict_examples))
tf.compat.v1.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
predict_drop_remainder = False
predict_input_fn = file_based_input_fn_builder(
input_file=predict_file,
batch_size=FLAGS.predict_batch_size,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=predict_drop_remainder)
predict_hooks = [LogEvalRunHook(FLAGS.predict_batch_size)]
predict_start_time = time.time()
output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv")
with tf.io.gfile.GFile(output_predict_file, "w") as writer:
tf.compat.v1.logging.info("***** Predict results *****")
for prediction in estimator.predict(input_fn=predict_input_fn, hooks=predict_hooks,
yield_single_examples=False):
output_line = "\t".join(
str(class_probability) for class_probability in prediction) + "\n"
writer.write(output_line)
predict_time_elapsed = time.time() - predict_start_time
time_list = predict_hooks[-1].time_list
time_list.sort()
# Removing outliers (init/warmup) in throughput computation.
predict_time_wo_overhead = sum(time_list[:int(len(time_list) * 0.8)])
num_sentences = (int(len(time_list) * 0.8)) * FLAGS.predict_batch_size
avg = np.mean(time_list)
cf_50 = max(time_list[:int(len(time_list) * 0.50)])
cf_90 = max(time_list[:int(len(time_list) * 0.90)])
cf_95 = max(time_list[:int(len(time_list) * 0.95)])
cf_99 = max(time_list[:int(len(time_list) * 0.99)])
cf_100 = max(time_list[:int(len(time_list) * 1)])
ss_sentences_per_second = num_sentences * 1.0 / predict_time_wo_overhead
tf.compat.v1.logging.info("-----------------------------")
tf.compat.v1.logging.info("Total Inference Time = %0.2f for Sentences = %d", predict_time_elapsed,
predict_hooks[-1].count * FLAGS.predict_batch_size)
tf.compat.v1.logging.info("Total Inference Time W/O Overhead = %0.2f for Sentences = %d", predict_time_wo_overhead,
num_sentences)
tf.compat.v1.logging.info("Summary Inference Statistics on TEST SET")
tf.compat.v1.logging.info("Batch size = %d", FLAGS.predict_batch_size)
tf.compat.v1.logging.info("Sequence Length = %d", FLAGS.max_seq_length)
tf.compat.v1.logging.info("Precision = %s", "fp16" if FLAGS.amp else "fp32")
tf.compat.v1.logging.info("Latency Confidence Level 50 (ms) = %0.2f", cf_50 * 1000)
tf.compat.v1.logging.info("Latency Confidence Level 90 (ms) = %0.2f", cf_90 * 1000)
tf.compat.v1.logging.info("Latency Confidence Level 95 (ms) = %0.2f", cf_95 * 1000)
tf.compat.v1.logging.info("Latency Confidence Level 99 (ms) = %0.2f", cf_99 * 1000)
tf.compat.v1.logging.info("Latency Confidence Level 100 (ms) = %0.2f", cf_100 * 1000)
tf.compat.v1.logging.info("Latency Average (ms) = %0.2f", avg * 1000)
tf.compat.v1.logging.info("Throughput Average (sentences/sec) = %0.2f", ss_sentences_per_second)
dllogging.logger.log(step=(), data={"throughput_val": ss_sentences_per_second}, verbosity=Verbosity.DEFAULT)
tf.compat.v1.logging.info("-----------------------------")
if __name__ == "__main__":
flags.mark_flag_as_required("data_dir")
flags.mark_flag_as_required("task_name")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
tf.compat.v1.app.run()
|
[
"utils.utils.setup_xla_flags",
"horovod.tensorflow.init",
"tensorflow.reduce_sum",
"tensorflow.estimator.EstimatorSpec",
"tensorflow.truncated_normal_initializer",
"tensorflow.metrics.mean",
"tensorflow.python.compiler.tensorrt.trt_convert.TrtGraphConverter",
"tensorflow.io.FixedLenFeature",
"tensorflow.nn.dropout",
"tensorflow.nn.softmax",
"tensorflow.zeros_initializer",
"tensorflow.reduce_mean",
"horovod.tensorflow.local_rank",
"numpy.mean",
"tensorflow.io.gfile.GFile",
"tensorflow.train.init_from_checkpoint",
"utils.utils.LogTrainRunHook",
"tensorflow.Session",
"tensorflow.estimator.Estimator",
"tensorflow.compat.v1.logging.set_verbosity",
"tensorflow.compat.v1.app.run",
"tensorflow.placeholder",
"tensorflow.metrics.accuracy",
"modeling.BertModel",
"tensorflow.matmul",
"horovod.tensorflow.size",
"tensorflow.trainable_variables",
"tokenization.FullTokenizer",
"modeling.get_assignment_map_from_checkpoint",
"tensorflow.metrics.true_positives",
"horovod.tensorflow.BroadcastGlobalVariablesHook",
"tensorflow.one_hot",
"tensorflow.variable_scope",
"tensorflow.train.experimental.FixedLossScale",
"optimization.create_optimizer",
"tensorflow.parse_single_example",
"optimization.LAMBOptimizer",
"tensorflow.nn.log_softmax",
"tensorflow.to_int32",
"tensorflow.metrics.false_positives",
"tf_metrics.f1",
"modeling.BertConfig.from_json_file",
"tensorflow.import_graph_def",
"time.time",
"tensorflow.enable_resource_variables",
"tensorflow.nn.bias_add",
"tensorflow.compat.v1.logging.info",
"tensorflow.compat.v1.ConfigProto",
"tensorflow.data.TFRecordDataset",
"tensorflow.metrics.false_negatives",
"tensorflow.estimator.RunConfig",
"tensorflow.metrics.true_negatives",
"tensorflow.no_op",
"os.path.join",
"tensorflow.io.gfile.makedirs",
"tensorflow.global_variables_initializer",
"tensorflow.argmax",
"tensorflow.constant",
"utils.utils.LogEvalRunHook",
"tensorflow.identity",
"horovod.tensorflow.rank"
] |
[((6344, 6560), 'modeling.BertModel', 'modeling.BertModel', ([], {'config': 'bert_config', 'is_training': 'is_training', 'input_ids': 'input_ids', 'input_mask': 'input_mask', 'token_type_ids': 'segment_ids', 'use_one_hot_embeddings': 'use_one_hot_embeddings', 'compute_type': 'tf.float32'}), '(config=bert_config, is_training=is_training, input_ids=\n input_ids, input_mask=input_mask, token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings, compute_type=tf.float32)\n', (6362, 6560), False, 'import modeling\n'), ((7956, 7982), 'tensorflow.compat.v1.ConfigProto', 'tf.compat.v1.ConfigProto', ([], {}), '()\n', (7980, 7982), True, 'import tensorflow as tf\n'), ((17775, 17792), 'utils.utils.setup_xla_flags', 'setup_xla_flags', ([], {}), '()\n', (17790, 17792), False, 'from utils.utils import LogEvalRunHook, LogTrainRunHook, setup_xla_flags\n'), ((17796, 17857), 'tensorflow.compat.v1.logging.set_verbosity', 'tf.compat.v1.logging.set_verbosity', (['tf.compat.v1.logging.INFO'], {}), '(tf.compat.v1.logging.INFO)\n', (17830, 17857), True, 'import tensorflow as tf\n'), ((18291, 18349), 'modeling.BertConfig.from_json_file', 'modeling.BertConfig.from_json_file', (['FLAGS.bert_config_file'], {}), '(FLAGS.bert_config_file)\n', (18325, 18349), False, 'import modeling\n'), ((18628, 18666), 'tensorflow.io.gfile.makedirs', 'tf.io.gfile.makedirs', (['FLAGS.output_dir'], {}), '(FLAGS.output_dir)\n', (18648, 18666), True, 'import tensorflow as tf\n'), ((18891, 18986), 'tokenization.FullTokenizer', 'tokenization.FullTokenizer', ([], {'vocab_file': 'FLAGS.vocab_file', 'do_lower_case': 'FLAGS.do_lower_case'}), '(vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS\n .do_lower_case)\n', (18917, 18986), False, 'import tokenization\n'), ((19139, 19165), 'tensorflow.compat.v1.ConfigProto', 'tf.compat.v1.ConfigProto', ([], {}), '()\n', (19163, 19165), True, 'import tensorflow as tf\n'), ((19898, 20245), 'tensorflow.estimator.RunConfig', 'tf.estimator.RunConfig', ([], {'model_dir': '(FLAGS.output_dir if master_process else None)', 'session_config': 'config', 'save_checkpoints_steps': '(FLAGS.save_checkpoints_steps if master_process else None)', 'save_summary_steps': '(FLAGS.save_checkpoints_steps if master_process else None)', 'log_step_count_steps': 'FLAGS.display_loss_steps', 'keep_checkpoint_max': '(1)'}), '(model_dir=FLAGS.output_dir if master_process else\n None, session_config=config, save_checkpoints_steps=FLAGS.\n save_checkpoints_steps if master_process else None, save_summary_steps=\n FLAGS.save_checkpoints_steps if master_process else None,\n log_step_count_steps=FLAGS.display_loss_steps, keep_checkpoint_max=1)\n', (19920, 20245), True, 'import tensorflow as tf\n'), ((22110, 22170), 'tensorflow.estimator.Estimator', 'tf.estimator.Estimator', ([], {'model_fn': 'model_fn', 'config': 'run_config'}), '(model_fn=model_fn, config=run_config)\n', (22132, 22170), True, 'import tensorflow as tf\n'), ((31757, 31779), 'tensorflow.compat.v1.app.run', 'tf.compat.v1.app.run', ([], {}), '()\n', (31777, 31779), True, 'import tensorflow as tf\n'), ((4847, 4892), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[seq_length]', 'tf.int64'], {}), '([seq_length], tf.int64)\n', (4868, 4892), True, 'import tensorflow as tf\n'), ((4914, 4959), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[seq_length]', 'tf.int64'], {}), '([seq_length], tf.int64)\n', (4935, 4959), True, 'import tensorflow as tf\n'), ((4982, 5027), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[seq_length]', 'tf.int64'], {}), '([seq_length], tf.int64)\n', (5003, 5027), True, 'import tensorflow as tf\n'), ((5048, 5083), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (5069, 5083), True, 'import tensorflow as tf\n'), ((5204, 5253), 'tensorflow.parse_single_example', 'tf.parse_single_example', (['record', 'name_to_features'], {}), '(record, name_to_features)\n', (5227, 5253), True, 'import tensorflow as tf\n'), ((5737, 5772), 'tensorflow.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['input_file'], {}), '(input_file)\n', (5760, 5772), True, 'import tensorflow as tf\n'), ((7132, 7157), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""loss"""'], {}), "('loss')\n", (7149, 7157), True, 'import tensorflow as tf\n'), ((7283, 7340), 'tensorflow.matmul', 'tf.matmul', (['output_layer', 'output_weights'], {'transpose_b': '(True)'}), '(output_layer, output_weights, transpose_b=True)\n', (7292, 7340), True, 'import tensorflow as tf\n'), ((7354, 7408), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['logits', 'output_bias'], {'name': '"""cls_logits"""'}), "(logits, output_bias, name='cls_logits')\n", (7368, 7408), True, 'import tensorflow as tf\n'), ((7429, 7485), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {'axis': '(-1)', 'name': '"""cls_probabilities"""'}), "(logits, axis=-1, name='cls_probabilities')\n", (7442, 7485), True, 'import tensorflow as tf\n'), ((7502, 7536), 'tensorflow.nn.log_softmax', 'tf.nn.log_softmax', (['logits'], {'axis': '(-1)'}), '(logits, axis=-1)\n', (7519, 7536), True, 'import tensorflow as tf\n'), ((7559, 7613), 'tensorflow.one_hot', 'tf.one_hot', (['labels'], {'depth': 'num_labels', 'dtype': 'tf.float32'}), '(labels, depth=num_labels, dtype=tf.float32)\n', (7569, 7613), True, 'import tensorflow as tf\n'), ((7730, 7779), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['per_example_loss'], {'name': '"""cls_loss"""'}), "(per_example_loss, name='cls_loss')\n", (7744, 7779), True, 'import tensorflow as tf\n'), ((8149, 8177), 'tensorflow.Session', 'tf.Session', ([], {'config': 'tf_config'}), '(config=tf_config)\n', (8159, 8177), True, 'import tensorflow as tf\n'), ((8206, 8250), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', 'shape', '"""input_ids"""'], {}), "(tf.int32, shape, 'input_ids')\n", (8220, 8250), True, 'import tensorflow as tf\n'), ((8268, 8313), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', 'shape', '"""input_mask"""'], {}), "(tf.int32, shape, 'input_mask')\n", (8282, 8313), True, 'import tensorflow as tf\n'), ((8332, 8378), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', 'shape', '"""segment_ids"""'], {}), "(tf.int32, shape, 'segment_ids')\n", (8346, 8378), True, 'import tensorflow as tf\n'), ((8395, 8438), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', 'None', '"""label_ids"""'], {}), "(tf.int32, None, 'label_ids')\n", (8409, 8438), True, 'import tensorflow as tf\n'), ((8587, 8611), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (8609, 8611), True, 'import tensorflow as tf\n'), ((8663, 8730), 'modeling.get_assignment_map_from_checkpoint', 'modeling.get_assignment_map_from_checkpoint', (['tvars', 'init_checkpoint'], {}), '(tvars, init_checkpoint)\n', (8706, 8730), False, 'import modeling\n'), ((8735, 8797), 'tensorflow.train.init_from_checkpoint', 'tf.train.init_from_checkpoint', (['init_checkpoint', 'assignment_map'], {}), '(init_checkpoint, assignment_map)\n', (8764, 8797), True, 'import tensorflow as tf\n'), ((8874, 8932), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""**** Trainable Variables ****"""'], {}), "('**** Trainable Variables ****')\n", (8899, 8932), True, 'import tensorflow as tf\n'), ((9552, 9818), 'tensorflow.python.compiler.tensorrt.trt_convert.TrtGraphConverter', 'trt.TrtGraphConverter', ([], {'input_graph_def': 'frozen_graph', 'nodes_blacklist': 'output_node_names', 'max_workspace_size_bytes': '((4096 << 20) - 1000)', 'precision_mode': "('FP16' if FLAGS.amp else 'FP32')", 'minimum_segment_size': '(4)', 'is_dynamic_op': '(True)', 'maximum_cached_engines': '(1000)'}), "(input_graph_def=frozen_graph, nodes_blacklist=\n output_node_names, max_workspace_size_bytes=(4096 << 20) - 1000,\n precision_mode='FP16' if FLAGS.amp else 'FP32', minimum_segment_size=4,\n is_dynamic_op=True, maximum_cached_engines=1000)\n", (9573, 9818), True, 'from tensorflow.python.compiler.tensorrt import trt_convert as trt\n'), ((12163, 12208), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""*** Features ***"""'], {}), "('*** Features ***')\n", (12188, 12208), True, 'import tensorflow as tf\n'), ((12213, 12258), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""*** Features ***"""'], {}), "('*** Features ***')\n", (12238, 12258), True, 'import tensorflow as tf\n'), ((13898, 13922), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (13920, 13922), True, 'import tensorflow as tf\n'), ((17951, 17961), 'horovod.tensorflow.init', 'hvd.init', ([], {}), '()\n', (17959, 17961), True, 'import horovod.tensorflow as hvd\n'), ((19193, 19256), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Multi-GPU training with TF Horovod"""'], {}), "('Multi-GPU training with TF Horovod')\n", (19218, 19256), True, 'import tensorflow as tf\n'), ((19499, 19509), 'horovod.tensorflow.rank', 'hvd.rank', ([], {}), '()\n', (19507, 19509), True, 'import horovod.tensorflow as hvd\n'), ((20293, 20348), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""***** Configuaration *****"""'], {}), "('***** Configuaration *****')\n", (20318, 20348), True, 'import tensorflow as tf\n'), ((20475, 20530), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""**************************"""'], {}), "('**************************')\n", (20500, 20530), True, 'import tensorflow as tf\n'), ((20631, 20734), 'utils.utils.LogTrainRunHook', 'LogTrainRunHook', (['global_batch_size', 'hvd_rank', 'FLAGS.save_checkpoints_steps'], {'num_steps_ignore_xla': '(25)'}), '(global_batch_size, hvd_rank, FLAGS.save_checkpoints_steps,\n num_steps_ignore_xla=25)\n', (20646, 20734), False, 'from utils.utils import LogEvalRunHook, LogTrainRunHook, setup_xla_flags\n'), ((22374, 22431), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""***** Running training *****"""'], {}), "('***** Running training *****')\n", (22399, 22431), True, 'import tensorflow as tf\n'), ((22510, 22580), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['""" Batch size = %d"""', 'FLAGS.train_batch_size'], {}), "(' Batch size = %d', FLAGS.train_batch_size)\n", (22535, 22580), True, 'import tensorflow as tf\n'), ((22585, 22647), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['""" Num steps = %d"""', 'num_train_steps'], {}), "(' Num steps = %d', num_train_steps)\n", (22610, 22647), True, 'import tensorflow as tf\n'), ((22943, 22954), 'time.time', 'time.time', ([], {}), '()\n', (22952, 22954), False, 'import time\n'), ((24284, 24332), 'os.path.join', 'os.path.join', (['FLAGS.output_dir', '"""eval.tf_record"""'], {}), "(FLAGS.output_dir, 'eval.tf_record')\n", (24296, 24332), False, 'import os\n'), ((24462, 24521), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""***** Running evaluation *****"""'], {}), "('***** Running evaluation *****')\n", (24487, 24521), True, 'import tensorflow as tf\n'), ((24599, 24668), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['""" Batch size = %d"""', 'FLAGS.eval_batch_size'], {}), "(' Batch size = %d', FLAGS.eval_batch_size)\n", (24624, 24668), True, 'import tensorflow as tf\n'), ((25015, 25026), 'time.time', 'time.time', ([], {}), '()\n', (25024, 25026), False, 'import time\n'), ((25438, 25456), 'numpy.mean', 'np.mean', (['time_list'], {}), '(time_list)\n', (25445, 25456), True, 'import numpy as np\n'), ((25814, 25872), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""-----------------------------"""'], {}), "('-----------------------------')\n", (25839, 25872), True, 'import tensorflow as tf\n'), ((25877, 26022), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Total Inference Time = %0.2f for Sentences = %d"""', 'eval_time_elapsed', '(eval_hooks[-1].count * FLAGS.eval_batch_size)'], {}), "('Total Inference Time = %0.2f for Sentences = %d',\n eval_time_elapsed, eval_hooks[-1].count * FLAGS.eval_batch_size)\n", (25902, 26022), True, 'import tensorflow as tf\n'), ((26043, 26179), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Total Inference Time W/O Overhead = %0.2f for Sentences = %d"""', 'eval_time_wo_overhead', 'num_sentences'], {}), "(\n 'Total Inference Time W/O Overhead = %0.2f for Sentences = %d',\n eval_time_wo_overhead, num_sentences)\n", (26068, 26179), True, 'import tensorflow as tf\n'), ((26195, 26264), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Summary Inference Statistics on EVAL set"""'], {}), "('Summary Inference Statistics on EVAL set')\n", (26220, 26264), True, 'import tensorflow as tf\n'), ((26269, 26336), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Batch size = %d"""', 'FLAGS.eval_batch_size'], {}), "('Batch size = %d', FLAGS.eval_batch_size)\n", (26294, 26336), True, 'import tensorflow as tf\n'), ((26341, 26412), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Sequence Length = %d"""', 'FLAGS.max_seq_length'], {}), "('Sequence Length = %d', FLAGS.max_seq_length)\n", (26366, 26412), True, 'import tensorflow as tf\n'), ((26417, 26493), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Precision = %s"""', "('fp16' if FLAGS.amp else 'fp32')"], {}), "('Precision = %s', 'fp16' if FLAGS.amp else 'fp32')\n", (26442, 26493), True, 'import tensorflow as tf\n'), ((26498, 26585), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Latency Confidence Level 50 (ms) = %0.2f"""', '(cf_50 * 1000)'], {}), "('Latency Confidence Level 50 (ms) = %0.2f', cf_50 *\n 1000)\n", (26523, 26585), True, 'import tensorflow as tf\n'), ((26586, 26673), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Latency Confidence Level 90 (ms) = %0.2f"""', '(cf_90 * 1000)'], {}), "('Latency Confidence Level 90 (ms) = %0.2f', cf_90 *\n 1000)\n", (26611, 26673), True, 'import tensorflow as tf\n'), ((26674, 26761), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Latency Confidence Level 95 (ms) = %0.2f"""', '(cf_95 * 1000)'], {}), "('Latency Confidence Level 95 (ms) = %0.2f', cf_95 *\n 1000)\n", (26699, 26761), True, 'import tensorflow as tf\n'), ((26762, 26849), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Latency Confidence Level 99 (ms) = %0.2f"""', '(cf_99 * 1000)'], {}), "('Latency Confidence Level 99 (ms) = %0.2f', cf_99 *\n 1000)\n", (26787, 26849), True, 'import tensorflow as tf\n'), ((26850, 26940), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Latency Confidence Level 100 (ms) = %0.2f"""', '(cf_100 * 1000)'], {}), "('Latency Confidence Level 100 (ms) = %0.2f', \n cf_100 * 1000)\n", (26875, 26940), True, 'import tensorflow as tf\n'), ((26940, 27009), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Latency Average (ms) = %0.2f"""', '(avg * 1000)'], {}), "('Latency Average (ms) = %0.2f', avg * 1000)\n", (26965, 27009), True, 'import tensorflow as tf\n'), ((27014, 27114), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Throughput Average (sentences/sec) = %0.2f"""', 'ss_sentences_per_second'], {}), "('Throughput Average (sentences/sec) = %0.2f',\n ss_sentences_per_second)\n", (27039, 27114), True, 'import tensorflow as tf\n'), ((27228, 27286), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""-----------------------------"""'], {}), "('-----------------------------')\n", (27253, 27286), True, 'import tensorflow as tf\n'), ((27312, 27362), 'os.path.join', 'os.path.join', (['FLAGS.output_dir', '"""eval_results.txt"""'], {}), "(FLAGS.output_dir, 'eval_results.txt')\n", (27324, 27362), False, 'import os\n'), ((27882, 27933), 'os.path.join', 'os.path.join', (['FLAGS.output_dir', '"""predict.tf_record"""'], {}), "(FLAGS.output_dir, 'predict.tf_record')\n", (27894, 27933), False, 'import os\n'), ((28148, 28206), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""***** Running prediction*****"""'], {}), "('***** Running prediction*****')\n", (28173, 28206), True, 'import tensorflow as tf\n'), ((28287, 28359), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['""" Batch size = %d"""', 'FLAGS.predict_batch_size'], {}), "(' Batch size = %d', FLAGS.predict_batch_size)\n", (28312, 28359), True, 'import tensorflow as tf\n'), ((28730, 28741), 'time.time', 'time.time', ([], {}), '()\n', (28739, 28741), False, 'import time\n'), ((28769, 28819), 'os.path.join', 'os.path.join', (['FLAGS.output_dir', '"""test_results.tsv"""'], {}), "(FLAGS.output_dir, 'test_results.tsv')\n", (28781, 28819), False, 'import os\n'), ((29626, 29644), 'numpy.mean', 'np.mean', (['time_list'], {}), '(time_list)\n', (29633, 29644), True, 'import numpy as np\n'), ((30005, 30063), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""-----------------------------"""'], {}), "('-----------------------------')\n", (30030, 30063), True, 'import tensorflow as tf\n'), ((30068, 30222), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Total Inference Time = %0.2f for Sentences = %d"""', 'predict_time_elapsed', '(predict_hooks[-1].count * FLAGS.predict_batch_size)'], {}), "('Total Inference Time = %0.2f for Sentences = %d',\n predict_time_elapsed, predict_hooks[-1].count * FLAGS.predict_batch_size)\n", (30093, 30222), True, 'import tensorflow as tf\n'), ((30243, 30382), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Total Inference Time W/O Overhead = %0.2f for Sentences = %d"""', 'predict_time_wo_overhead', 'num_sentences'], {}), "(\n 'Total Inference Time W/O Overhead = %0.2f for Sentences = %d',\n predict_time_wo_overhead, num_sentences)\n", (30268, 30382), True, 'import tensorflow as tf\n'), ((30408, 30477), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Summary Inference Statistics on TEST SET"""'], {}), "('Summary Inference Statistics on TEST SET')\n", (30433, 30477), True, 'import tensorflow as tf\n'), ((30482, 30552), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Batch size = %d"""', 'FLAGS.predict_batch_size'], {}), "('Batch size = %d', FLAGS.predict_batch_size)\n", (30507, 30552), True, 'import tensorflow as tf\n'), ((30557, 30628), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Sequence Length = %d"""', 'FLAGS.max_seq_length'], {}), "('Sequence Length = %d', FLAGS.max_seq_length)\n", (30582, 30628), True, 'import tensorflow as tf\n'), ((30633, 30709), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Precision = %s"""', "('fp16' if FLAGS.amp else 'fp32')"], {}), "('Precision = %s', 'fp16' if FLAGS.amp else 'fp32')\n", (30658, 30709), True, 'import tensorflow as tf\n'), ((30714, 30801), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Latency Confidence Level 50 (ms) = %0.2f"""', '(cf_50 * 1000)'], {}), "('Latency Confidence Level 50 (ms) = %0.2f', cf_50 *\n 1000)\n", (30739, 30801), True, 'import tensorflow as tf\n'), ((30802, 30889), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Latency Confidence Level 90 (ms) = %0.2f"""', '(cf_90 * 1000)'], {}), "('Latency Confidence Level 90 (ms) = %0.2f', cf_90 *\n 1000)\n", (30827, 30889), True, 'import tensorflow as tf\n'), ((30890, 30977), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Latency Confidence Level 95 (ms) = %0.2f"""', '(cf_95 * 1000)'], {}), "('Latency Confidence Level 95 (ms) = %0.2f', cf_95 *\n 1000)\n", (30915, 30977), True, 'import tensorflow as tf\n'), ((30978, 31065), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Latency Confidence Level 99 (ms) = %0.2f"""', '(cf_99 * 1000)'], {}), "('Latency Confidence Level 99 (ms) = %0.2f', cf_99 *\n 1000)\n", (31003, 31065), True, 'import tensorflow as tf\n'), ((31066, 31156), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Latency Confidence Level 100 (ms) = %0.2f"""', '(cf_100 * 1000)'], {}), "('Latency Confidence Level 100 (ms) = %0.2f', \n cf_100 * 1000)\n", (31091, 31156), True, 'import tensorflow as tf\n'), ((31156, 31225), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Latency Average (ms) = %0.2f"""', '(avg * 1000)'], {}), "('Latency Average (ms) = %0.2f', avg * 1000)\n", (31181, 31225), True, 'import tensorflow as tf\n'), ((31230, 31330), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Throughput Average (sentences/sec) = %0.2f"""', 'ss_sentences_per_second'], {}), "('Throughput Average (sentences/sec) = %0.2f',\n ss_sentences_per_second)\n", (31255, 31330), True, 'import tensorflow as tf\n'), ((31444, 31502), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""-----------------------------"""'], {}), "('-----------------------------')\n", (31469, 31502), True, 'import tensorflow as tf\n'), ((6973, 7017), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': '(0.02)'}), '(stddev=0.02)\n', (7004, 7017), True, 'import tensorflow as tf\n'), ((7100, 7122), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (7120, 7122), True, 'import tensorflow as tf\n'), ((7226, 7268), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['output_layer'], {'keep_prob': '(0.9)'}), '(output_layer, keep_prob=0.9)\n', (7239, 7268), True, 'import tensorflow as tf\n'), ((7639, 7718), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(one_hot_labels * log_probs)'], {'axis': '(-1)', 'name': '"""cls_per_example_loss"""'}), "(one_hot_labels * log_probs, axis=-1, name='cls_per_example_loss')\n", (7652, 7718), True, 'import tensorflow as tf\n'), ((8814, 8847), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (8845, 8847), True, 'import tensorflow as tf\n'), ((10147, 10192), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['"""frozen_modelTRT.pb"""', '"""wb"""'], {}), "('frozen_modelTRT.pb', 'wb')\n", (10164, 10192), True, 'import tensorflow as tf\n'), ((10741, 10789), 'tensorflow.argmax', 'tf.argmax', (['logits'], {'axis': '(-1)', 'output_type': 'tf.int32'}), '(logits, axis=-1, output_type=tf.int32)\n', (10750, 10789), True, 'import tensorflow as tf\n'), ((12306, 12394), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (["(' name = %s, shape = %s' % (name, features[name].shape))"], {}), "(' name = %s, shape = %s' % (name, features[name]\n .shape))\n", (12331, 12394), True, 'import tensorflow as tf\n'), ((12840, 13126), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['trt_graph'], {'input_map': "{'input_ids': input_ids, 'input_mask': input_mask, 'segment_ids':\n segment_ids, 'label_ids': label_ids}", 'return_elements': "['loss/cls_loss:0', 'loss/cls_per_example_loss:0', 'loss/cls_logits:0',\n 'loss/cls_probabilities:0']", 'name': '""""""'}), "(trt_graph, input_map={'input_ids': input_ids,\n 'input_mask': input_mask, 'segment_ids': segment_ids, 'label_ids':\n label_ids}, return_elements=['loss/cls_loss:0',\n 'loss/cls_per_example_loss:0', 'loss/cls_logits:0',\n 'loss/cls_probabilities:0'], name='')\n", (12859, 13126), True, 'import tensorflow as tf\n'), ((14080, 14147), 'modeling.get_assignment_map_from_checkpoint', 'modeling.get_assignment_map_from_checkpoint', (['tvars', 'init_checkpoint'], {}), '(tvars, init_checkpoint)\n', (14123, 14147), False, 'import modeling\n'), ((14154, 14216), 'tensorflow.train.init_from_checkpoint', 'tf.train.init_from_checkpoint', (['init_checkpoint', 'assignment_map'], {}), '(init_checkpoint, assignment_map)\n', (14183, 14216), True, 'import tensorflow as tf\n'), ((14256, 14314), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""**** Trainable Variables ****"""'], {}), "('**** Trainable Variables ****')\n", (14281, 14314), True, 'import tensorflow as tf\n'), ((14679, 14853), 'optimization.create_optimizer', 'optimization.create_optimizer', (['total_loss', 'learning_rate', 'num_train_steps', 'num_warmup_steps', 'hvd', '(False)', 'FLAGS.amp', 'FLAGS.num_accumulation_steps', 'FLAGS.optimizer_type'], {}), '(total_loss, learning_rate, num_train_steps,\n num_warmup_steps, hvd, False, FLAGS.amp, FLAGS.num_accumulation_steps,\n FLAGS.optimizer_type)\n', (14708, 14853), False, 'import optimization\n'), ((14887, 14960), 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', ([], {'mode': 'mode', 'loss': 'total_loss', 'train_op': 'train_op'}), '(mode=mode, loss=total_loss, train_op=train_op)\n', (14913, 14960), True, 'import tensorflow as tf\n'), ((19324, 19334), 'horovod.tensorflow.size', 'hvd.size', ([], {}), '()\n', (19332, 19334), True, 'import horovod.tensorflow as hvd\n'), ((19336, 19346), 'horovod.tensorflow.rank', 'hvd.rank', ([], {}), '()\n', (19344, 19346), True, 'import horovod.tensorflow as hvd\n'), ((19430, 19440), 'horovod.tensorflow.size', 'hvd.size', ([], {}), '()\n', (19438, 19440), True, 'import horovod.tensorflow as hvd\n'), ((19465, 19475), 'horovod.tensorflow.rank', 'hvd.rank', ([], {}), '()\n', (19473, 19475), True, 'import horovod.tensorflow as hvd\n'), ((19561, 19577), 'horovod.tensorflow.local_rank', 'hvd.local_rank', ([], {}), '()\n', (19575, 19577), True, 'import horovod.tensorflow as hvd\n'), ((19598, 19614), 'horovod.tensorflow.local_rank', 'hvd.local_rank', ([], {}), '()\n', (19612, 19614), True, 'import horovod.tensorflow as hvd\n'), ((19625, 19635), 'horovod.tensorflow.size', 'hvd.size', ([], {}), '()\n', (19633, 19635), True, 'import horovod.tensorflow as hvd\n'), ((19851, 19881), 'tensorflow.enable_resource_variables', 'tf.enable_resource_variables', ([], {}), '()\n', (19879, 19881), True, 'import tensorflow as tf\n'), ((21069, 21118), 'os.path.join', 'os.path.join', (['FLAGS.output_dir', '"""train.tf_record"""'], {}), "(FLAGS.output_dir, 'train.tf_record')\n", (21081, 21118), False, 'import os\n'), ((23074, 23085), 'time.time', 'time.time', ([], {}), '()\n', (23083, 23085), False, 'import time\n'), ((23427, 23485), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""-----------------------------"""'], {}), "('-----------------------------')\n", (23452, 23485), True, 'import tensorflow as tf\n'), ((23494, 23630), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Total Training Time = %0.2f for Sentences = %d"""', 'train_time_elapsed', '(num_train_steps * global_batch_size)'], {}), "('Total Training Time = %0.2f for Sentences = %d',\n train_time_elapsed, num_train_steps * global_batch_size)\n", (23519, 23630), True, 'import tensorflow as tf\n'), ((23659, 23862), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Total Training Time W/O Overhead = %0.2f for Sentences = %d"""', 'train_time_wo_overhead', '((training_hooks[-1].count - training_hooks[-1].skipped) * global_batch_size)'], {}), "(\n 'Total Training Time W/O Overhead = %0.2f for Sentences = %d',\n train_time_wo_overhead, (training_hooks[-1].count - training_hooks[-1].\n skipped) * global_batch_size)\n", (23684, 23862), True, 'import tensorflow as tf\n'), ((23881, 24001), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Throughput Average (sentences/sec) with overhead = %0.2f"""', 'avg_sentences_per_second'], {}), "(\n 'Throughput Average (sentences/sec) with overhead = %0.2f',\n avg_sentences_per_second)\n", (23906, 24001), True, 'import tensorflow as tf\n'), ((24001, 24101), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""Throughput Average (sentences/sec) = %0.2f"""', 'ss_sentences_per_second'], {}), "('Throughput Average (sentences/sec) = %0.2f',\n ss_sentences_per_second)\n", (24026, 24101), True, 'import tensorflow as tf\n'), ((24106, 24164), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""-----------------------------"""'], {}), "('-----------------------------')\n", (24131, 24164), True, 'import tensorflow as tf\n'), ((24954, 24991), 'utils.utils.LogEvalRunHook', 'LogEvalRunHook', (['FLAGS.eval_batch_size'], {}), '(FLAGS.eval_batch_size)\n', (24968, 24991), False, 'from utils.utils import LogEvalRunHook, LogTrainRunHook, setup_xla_flags\n'), ((25126, 25137), 'time.time', 'time.time', ([], {}), '()\n', (25135, 25137), False, 'import time\n'), ((27372, 27412), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['output_eval_file', '"""w"""'], {}), "(output_eval_file, 'w')\n", (27389, 27412), True, 'import tensorflow as tf\n'), ((27430, 27483), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""***** Eval results *****"""'], {}), "('***** Eval results *****')\n", (27455, 27483), True, 'import tensorflow as tf\n'), ((28663, 28703), 'utils.utils.LogEvalRunHook', 'LogEvalRunHook', (['FLAGS.predict_batch_size'], {}), '(FLAGS.predict_batch_size)\n', (28677, 28703), False, 'from utils.utils import LogEvalRunHook, LogTrainRunHook, setup_xla_flags\n'), ((28829, 28872), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['output_predict_file', '"""w"""'], {}), "(output_predict_file, 'w')\n", (28846, 28872), True, 'import tensorflow as tf\n'), ((28892, 28948), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['"""***** Predict results *****"""'], {}), "('***** Predict results *****')\n", (28917, 28948), True, 'import tensorflow as tf\n'), ((29302, 29313), 'time.time', 'time.time', ([], {}), '()\n', (29311, 29313), False, 'import time\n'), ((5470, 5484), 'tensorflow.to_int32', 'tf.to_int32', (['t'], {}), '(t)\n', (5481, 5484), True, 'import tensorflow as tf\n'), ((9141, 9233), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['""" name = %s, shape = %s%s"""', 'var.name', 'var.shape', 'init_string'], {}), "(' name = %s, shape = %s%s', var.name, var.shape,\n init_string)\n", (9166, 9233), True, 'import tensorflow as tf\n'), ((10846, 10915), 'tensorflow.metrics.false_negatives', 'tf.metrics.false_negatives', ([], {'labels': 'label_ids', 'predictions': 'predictions'}), '(labels=label_ids, predictions=predictions)\n', (10872, 10915), True, 'import tensorflow as tf\n'), ((10940, 11009), 'tensorflow.metrics.false_positives', 'tf.metrics.false_positives', ([], {'labels': 'label_ids', 'predictions': 'predictions'}), '(labels=label_ids, predictions=predictions)\n', (10966, 11009), True, 'import tensorflow as tf\n'), ((11034, 11102), 'tensorflow.metrics.true_positives', 'tf.metrics.true_positives', ([], {'labels': 'label_ids', 'predictions': 'predictions'}), '(labels=label_ids, predictions=predictions)\n', (11059, 11102), True, 'import tensorflow as tf\n'), ((11127, 11195), 'tensorflow.metrics.true_negatives', 'tf.metrics.true_negatives', ([], {'labels': 'label_ids', 'predictions': 'predictions'}), '(labels=label_ids, predictions=predictions)\n', (11152, 11195), True, 'import tensorflow as tf\n'), ((13290, 13352), 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', ([], {'mode': 'mode', 'predictions': 'predictions'}), '(mode=mode, predictions=predictions)\n', (13316, 13352), True, 'import tensorflow as tf\n'), ((14478, 14570), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (['""" name = %s, shape = %s%s"""', 'var.name', 'var.shape', 'init_string'], {}), "(' name = %s, shape = %s%s', var.name, var.shape,\n init_string)\n", (14503, 14570), True, 'import tensorflow as tf\n'), ((15054, 15064), 'tensorflow.no_op', 'tf.no_op', ([], {}), '()\n', (15062, 15064), True, 'import tensorflow as tf\n'), ((15472, 15564), 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', ([], {'mode': 'mode', 'loss': 'total_loss', 'eval_metric_ops': 'eval_metric_ops'}), '(mode=mode, loss=total_loss, eval_metric_ops=\n eval_metric_ops)\n', (15498, 15564), True, 'import tensorflow as tf\n'), ((15618, 15628), 'tensorflow.no_op', 'tf.no_op', ([], {}), '()\n', (15626, 15628), True, 'import tensorflow as tf\n'), ((15890, 15954), 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', ([], {'mode': 'mode', 'predictions': 'probabilities'}), '(mode=mode, predictions=probabilities)\n', (15916, 15954), True, 'import tensorflow as tf\n'), ((16970, 17046), 'tensorflow.constant', 'tf.constant', (['all_input_ids'], {'shape': '[num_examples, seq_length]', 'dtype': 'tf.int32'}), '(all_input_ids, shape=[num_examples, seq_length], dtype=tf.int32)\n', (16981, 17046), True, 'import tensorflow as tf\n'), ((17115, 17192), 'tensorflow.constant', 'tf.constant', (['all_input_mask'], {'shape': '[num_examples, seq_length]', 'dtype': 'tf.int32'}), '(all_input_mask, shape=[num_examples, seq_length], dtype=tf.int32)\n', (17126, 17192), True, 'import tensorflow as tf\n'), ((17278, 17356), 'tensorflow.constant', 'tf.constant', (['all_segment_ids'], {'shape': '[num_examples, seq_length]', 'dtype': 'tf.int32'}), '(all_segment_ids, shape=[num_examples, seq_length], dtype=tf.int32)\n', (17289, 17356), True, 'import tensorflow as tf\n'), ((17440, 17504), 'tensorflow.constant', 'tf.constant', (['all_label_ids'], {'shape': '[num_examples]', 'dtype': 'tf.int32'}), '(all_label_ids, shape=[num_examples], dtype=tf.int32)\n', (17451, 17504), True, 'import tensorflow as tf\n'), ((19673, 19708), 'horovod.tensorflow.BroadcastGlobalVariablesHook', 'hvd.BroadcastGlobalVariablesHook', (['(0)'], {}), '(0)\n', (19705, 19708), True, 'import horovod.tensorflow as hvd\n'), ((21309, 21319), 'horovod.tensorflow.size', 'hvd.size', ([], {}), '()\n', (21317, 21319), True, 'import horovod.tensorflow as hvd\n'), ((21360, 21370), 'horovod.tensorflow.size', 'hvd.size', ([], {}), '()\n', (21368, 21370), True, 'import horovod.tensorflow as hvd\n'), ((21380, 21390), 'horovod.tensorflow.rank', 'hvd.rank', ([], {}), '()\n', (21388, 21390), True, 'import horovod.tensorflow as hvd\n'), ((5831, 5841), 'horovod.tensorflow.size', 'hvd.size', ([], {}), '()\n', (5839, 5841), True, 'import horovod.tensorflow as hvd\n'), ((5843, 5853), 'horovod.tensorflow.rank', 'hvd.rank', ([], {}), '()\n', (5851, 5853), True, 'import horovod.tensorflow as hvd\n'), ((11350, 11378), 'tensorflow.identity', 'tf.identity', (['MCC'], {'name': '"""MCC"""'}), "(MCC, name='MCC')\n", (11361, 11378), True, 'import tensorflow as tf\n'), ((11479, 11541), 'tensorflow.metrics.accuracy', 'tf.metrics.accuracy', ([], {'labels': 'label_ids', 'predictions': 'predictions'}), '(labels=label_ids, predictions=predictions)\n', (11498, 11541), True, 'import tensorflow as tf\n'), ((11578, 11618), 'tensorflow.metrics.mean', 'tf.metrics.mean', ([], {'values': 'per_example_loss'}), '(values=per_example_loss)\n', (11593, 11618), True, 'import tensorflow as tf\n'), ((11636, 11728), 'tf_metrics.f1', 'tf_metrics.f1', ([], {'labels': 'label_ids', 'predictions': 'predictions', 'num_classes': '(2)', 'pos_indices': '[1]'}), '(labels=label_ids, predictions=predictions, num_classes=2,\n pos_indices=[1])\n', (11649, 11728), False, 'import tf_metrics\n'), ((11906, 11968), 'tensorflow.metrics.accuracy', 'tf.metrics.accuracy', ([], {'labels': 'label_ids', 'predictions': 'predictions'}), '(labels=label_ids, predictions=predictions)\n', (11925, 11968), True, 'import tensorflow as tf\n'), ((12005, 12045), 'tensorflow.metrics.mean', 'tf.metrics.mean', ([], {'values': 'per_example_loss'}), '(values=per_example_loss)\n', (12020, 12045), True, 'import tensorflow as tf\n'), ((13522, 13614), 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', ([], {'mode': 'mode', 'loss': 'total_loss', 'eval_metric_ops': 'eval_metric_ops'}), '(mode=mode, loss=total_loss, eval_metric_ops=\n eval_metric_ops)\n', (13548, 13614), True, 'import tensorflow as tf\n'), ((14002, 14012), 'horovod.tensorflow.rank', 'hvd.rank', ([], {}), '()\n', (14010, 14012), True, 'import horovod.tensorflow as hvd\n'), ((15190, 15229), 'tensorflow.train.experimental.FixedLossScale', 'tf.train.experimental.FixedLossScale', (['(1)'], {}), '(1)\n', (15226, 15229), True, 'import tensorflow as tf\n'), ((17572, 17582), 'horovod.tensorflow.size', 'hvd.size', ([], {}), '()\n', (17580, 17582), True, 'import horovod.tensorflow as hvd\n'), ((17584, 17594), 'horovod.tensorflow.rank', 'hvd.rank', ([], {}), '()\n', (17592, 17594), True, 'import horovod.tensorflow as hvd\n'), ((21426, 21436), 'horovod.tensorflow.rank', 'hvd.rank', ([], {}), '()\n', (21434, 21436), True, 'import horovod.tensorflow as hvd\n'), ((21921, 21931), 'horovod.tensorflow.size', 'hvd.size', ([], {}), '()\n', (21929, 21931), True, 'import horovod.tensorflow as hvd\n'), ((15321, 15366), 'optimization.LAMBOptimizer', 'optimization.LAMBOptimizer', ([], {'learning_rate': '(0.0)'}), '(learning_rate=0.0)\n', (15347, 15366), False, 'import optimization\n'), ((15823, 15868), 'optimization.LAMBOptimizer', 'optimization.LAMBOptimizer', ([], {'learning_rate': '(0.0)'}), '(learning_rate=0.0)\n', (15849, 15868), False, 'import optimization\n'), ((21243, 21253), 'horovod.tensorflow.size', 'hvd.size', ([], {}), '()\n', (21251, 21253), True, 'import horovod.tensorflow as hvd\n'), ((21559, 21569), 'horovod.tensorflow.rank', 'hvd.rank', ([], {}), '()\n', (21567, 21569), True, 'import horovod.tensorflow as hvd\n')]
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import time
import json
import math
from functools import partial
import numpy as np
import paddle
from paddle.io import DataLoader
from args import parse_args
import paddlenlp as ppnlp
from paddlenlp.data import Pad, Stack, Tuple, Dict
from paddlenlp.transformers import BertForQuestionAnswering, BertTokenizer
from paddlenlp.transformers import ErnieForQuestionAnswering, ErnieTokenizer
from paddlenlp.transformers import ErnieGramForQuestionAnswering, ErnieGramTokenizer
from paddlenlp.transformers import RobertaForQuestionAnswering, RobertaTokenizer
from paddlenlp.transformers import LinearDecayWithWarmup
from paddlenlp.metrics.squad import squad_evaluate, compute_prediction
from paddlenlp.datasets import load_dataset
MODEL_CLASSES = {
"bert": (BertForQuestionAnswering, BertTokenizer),
"ernie": (ErnieForQuestionAnswering, ErnieTokenizer),
"ernie_gram": (ErnieGramForQuestionAnswering, ErnieGramTokenizer),
"roberta": (RobertaForQuestionAnswering, RobertaTokenizer)
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
paddle.seed(args.seed)
@paddle.no_grad()
def evaluate(model, data_loader, args):
model.eval()
all_start_logits = []
all_end_logits = []
tic_eval = time.time()
for batch in data_loader:
input_ids, token_type_ids = batch
start_logits_tensor, end_logits_tensor = model(input_ids,
token_type_ids)
for idx in range(start_logits_tensor.shape[0]):
if len(all_start_logits) % 1000 == 0 and len(all_start_logits):
print("Processing example: %d" % len(all_start_logits))
print('time per 1000:', time.time() - tic_eval)
tic_eval = time.time()
all_start_logits.append(start_logits_tensor.numpy()[idx])
all_end_logits.append(end_logits_tensor.numpy()[idx])
all_predictions, _, _ = compute_prediction(
data_loader.dataset.data, data_loader.dataset.new_data,
(all_start_logits, all_end_logits), False, args.n_best_size,
args.max_answer_length)
# Can also write all_nbest_json and scores_diff_json files if needed
with open('prediction.json', "w", encoding='utf-8') as writer:
writer.write(
json.dumps(
all_predictions, ensure_ascii=False, indent=4) + "\n")
squad_evaluate(
examples=data_loader.dataset.data,
preds=all_predictions,
is_whitespace_splited=False)
model.train()
class CrossEntropyLossForSQuAD(paddle.nn.Layer):
def __init__(self):
super(CrossEntropyLossForSQuAD, self).__init__()
def forward(self, y, label):
start_logits, end_logits = y
start_position, end_position = label
start_position = paddle.unsqueeze(start_position, axis=-1)
end_position = paddle.unsqueeze(end_position, axis=-1)
start_loss = paddle.nn.functional.cross_entropy(
input=start_logits, label=start_position)
end_loss = paddle.nn.functional.cross_entropy(
input=end_logits, label=end_position)
loss = (start_loss + end_loss) / 2
return loss
def run(args):
paddle.set_device(args.device)
if paddle.distributed.get_world_size() > 1:
paddle.distributed.init_parallel_env()
rank = paddle.distributed.get_rank()
task_name = args.task_name.lower()
args.model_type = args.model_type.lower()
model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path)
set_seed(args)
if rank == 0:
if os.path.exists(args.model_name_or_path):
print("init checkpoint from %s" % args.model_name_or_path)
model = model_class.from_pretrained(args.model_name_or_path)
if paddle.distributed.get_world_size() > 1:
model = paddle.DataParallel(model)
def prepare_train_features(examples):
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
# NOTE: Almost the same functionality as HuggingFace's prepare_train_features function. The main difference is
# that HugggingFace uses ArrowTable as basic data structure, while we use list of dictionary instead.
contexts = [examples[i]['context'] for i in range(len(examples))]
questions = [examples[i]['question'] for i in range(len(examples))]
tokenized_examples = tokenizer(
questions,
contexts,
stride=args.doc_stride,
max_seq_len=args.max_seq_length)
# Let's label those examples!
for i, tokenized_example in enumerate(tokenized_examples):
# We will label impossible answers with the index of the CLS token.
input_ids = tokenized_example["input_ids"]
cls_index = input_ids.index(tokenizer.cls_token_id)
# The offset mappings will give us a map from token to character position in the original context. This will
# help us compute the start_positions and end_positions.
offsets = tokenized_example['offset_mapping']
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
sequence_ids = tokenized_example['token_type_ids']
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = tokenized_example['overflow_to_sample']
answers = examples[sample_index]['answers']
answer_starts = examples[sample_index]['answer_starts']
# Start/end character index of the answer in the text.
start_char = answer_starts[0]
end_char = start_char + len(answers[0])
# Start token index of the current span in the text.
token_start_index = 0
while sequence_ids[token_start_index] != 1:
token_start_index += 1
# End token index of the current span in the text.
token_end_index = len(input_ids) - 1
while sequence_ids[token_end_index] != 1:
token_end_index -= 1
# Minus one more to reach actual text
token_end_index -= 1
# Detect if the answer is out of the span (in which case this feature is labeled with the CLS index).
if not (offsets[token_start_index][0] <= start_char and
offsets[token_end_index][1] >= end_char):
tokenized_examples[i]["start_positions"] = cls_index
tokenized_examples[i]["end_positions"] = cls_index
else:
# Otherwise move the token_start_index and token_end_index to the two ends of the answer.
# Note: we could go after the last offset if the answer is the last word (edge case).
while token_start_index < len(offsets) and offsets[
token_start_index][0] <= start_char:
token_start_index += 1
tokenized_examples[i]["start_positions"] = token_start_index - 1
while offsets[token_end_index][1] >= end_char:
token_end_index -= 1
tokenized_examples[i]["end_positions"] = token_end_index + 1
return tokenized_examples
if args.do_train:
if args.train_file:
train_ds = load_dataset(task_name, data_files=args.train_file)
else:
train_ds = load_dataset(task_name, splits='train')
train_ds.map(prepare_train_features, batched=True)
train_batch_sampler = paddle.io.DistributedBatchSampler(
train_ds, batch_size=args.batch_size, shuffle=True)
train_batchify_fn = lambda samples, fn=Dict({
"input_ids": Pad(axis=0, pad_val=tokenizer.pad_token_id),
"token_type_ids": Pad(axis=0, pad_val=tokenizer.pad_token_type_id),
"start_positions": Stack(dtype="int64"),
"end_positions": Stack(dtype="int64")
}): fn(samples)
train_data_loader = DataLoader(
dataset=train_ds,
batch_sampler=train_batch_sampler,
collate_fn=train_batchify_fn,
return_list=True)
num_training_steps = args.max_steps if args.max_steps > 0 else len(
train_data_loader) * args.num_train_epochs
num_train_epochs = math.ceil(num_training_steps /
len(train_data_loader))
lr_scheduler = LinearDecayWithWarmup(
args.learning_rate, num_training_steps, args.warmup_proportion)
# Generate parameter names needed to perform weight decay.
# All bias and LayerNorm parameters are excluded.
decay_params = [
p.name for n, p in model.named_parameters()
if not any(nd in n for nd in ["bias", "norm"])
]
optimizer = paddle.optimizer.AdamW(
learning_rate=lr_scheduler,
epsilon=args.adam_epsilon,
parameters=model.parameters(),
weight_decay=args.weight_decay,
apply_decay_param_fun=lambda x: x in decay_params)
criterion = CrossEntropyLossForSQuAD()
global_step = 0
tic_train = time.time()
for epoch in range(num_train_epochs):
for step, batch in enumerate(train_data_loader):
global_step += 1
input_ids, token_type_ids, start_positions, end_positions = batch
logits = model(
input_ids=input_ids, token_type_ids=token_type_ids)
loss = criterion(logits, (start_positions, end_positions))
if global_step % args.logging_steps == 0:
print(
"global step %d, epoch: %d, batch: %d, loss: %f, speed: %.2f step/s"
% (global_step, epoch + 1, step + 1, loss,
args.logging_steps / (time.time() - tic_train)))
tic_train = time.time()
loss.backward()
optimizer.step()
lr_scheduler.step()
optimizer.clear_grad()
if global_step % args.save_steps == 0 or global_step == num_training_steps:
if rank == 0:
output_dir = os.path.join(args.output_dir,
"model_%d" % global_step)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# need better way to get inner model of DataParallel
model_to_save = model._layers if isinstance(
model, paddle.DataParallel) else model
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
print('Saving checkpoint to:', output_dir)
if global_step == num_training_steps:
break
def prepare_validation_features(examples):
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
# NOTE: Almost the same functionality as HuggingFace's prepare_train_features function. The main difference is
# that HugggingFace uses ArrowTable as basic data structure, while we use list of dictionary instead.
contexts = [examples[i]['context'] for i in range(len(examples))]
questions = [examples[i]['question'] for i in range(len(examples))]
tokenized_examples = tokenizer(
questions,
contexts,
stride=args.doc_stride,
max_seq_len=args.max_seq_length)
# For validation, there is no need to compute start and end positions
for i, tokenized_example in enumerate(tokenized_examples):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
sequence_ids = tokenized_example['token_type_ids']
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = tokenized_example['overflow_to_sample']
tokenized_examples[i]["example_id"] = examples[sample_index]['id']
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
tokenized_examples[i]["offset_mapping"] = [
(o if sequence_ids[k] == 1 else None)
for k, o in enumerate(tokenized_example["offset_mapping"])
]
return tokenized_examples
if args.do_predict and rank == 0:
if args.predict_file:
dev_ds = load_dataset(task_name, data_files=args.predict_file)
else:
dev_ds = load_dataset(task_name, splits='dev')
dev_ds.map(prepare_validation_features, batched=True)
dev_batch_sampler = paddle.io.BatchSampler(
dev_ds, batch_size=args.batch_size, shuffle=False)
dev_batchify_fn = lambda samples, fn=Dict({
"input_ids": Pad(axis=0, pad_val=tokenizer.pad_token_id),
"token_type_ids": Pad(axis=0, pad_val=tokenizer.pad_token_type_id)
}): fn(samples)
dev_data_loader = DataLoader(
dataset=dev_ds,
batch_sampler=dev_batch_sampler,
collate_fn=dev_batchify_fn,
return_list=True)
evaluate(model, dev_data_loader, args)
if __name__ == "__main__":
args = parse_args()
run(args)
|
[
"paddlenlp.data.Pad",
"paddlenlp.metrics.squad.compute_prediction",
"paddle.nn.functional.cross_entropy",
"paddle.seed",
"os.path.exists",
"paddle.no_grad",
"json.dumps",
"paddle.distributed.init_parallel_env",
"numpy.random.seed",
"paddle.io.DataLoader",
"paddle.set_device",
"paddle.io.BatchSampler",
"paddle.io.DistributedBatchSampler",
"paddle.distributed.get_world_size",
"paddlenlp.metrics.squad.squad_evaluate",
"args.parse_args",
"time.time",
"paddlenlp.transformers.LinearDecayWithWarmup",
"paddle.distributed.get_rank",
"os.makedirs",
"os.path.join",
"paddle.DataParallel",
"random.seed",
"paddlenlp.datasets.load_dataset",
"paddle.unsqueeze",
"paddlenlp.data.Stack"
] |
[((1787, 1803), 'paddle.no_grad', 'paddle.no_grad', ([], {}), '()\n', (1801, 1803), False, 'import paddle\n'), ((1704, 1726), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (1715, 1726), False, 'import random\n'), ((1731, 1756), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (1745, 1756), True, 'import numpy as np\n'), ((1761, 1783), 'paddle.seed', 'paddle.seed', (['args.seed'], {}), '(args.seed)\n', (1772, 1783), False, 'import paddle\n'), ((1927, 1938), 'time.time', 'time.time', ([], {}), '()\n', (1936, 1938), False, 'import time\n'), ((2623, 2791), 'paddlenlp.metrics.squad.compute_prediction', 'compute_prediction', (['data_loader.dataset.data', 'data_loader.dataset.new_data', '(all_start_logits, all_end_logits)', '(False)', 'args.n_best_size', 'args.max_answer_length'], {}), '(data_loader.dataset.data, data_loader.dataset.new_data,\n (all_start_logits, all_end_logits), False, args.n_best_size, args.\n max_answer_length)\n', (2641, 2791), False, 'from paddlenlp.metrics.squad import squad_evaluate, compute_prediction\n'), ((3071, 3176), 'paddlenlp.metrics.squad.squad_evaluate', 'squad_evaluate', ([], {'examples': 'data_loader.dataset.data', 'preds': 'all_predictions', 'is_whitespace_splited': '(False)'}), '(examples=data_loader.dataset.data, preds=all_predictions,\n is_whitespace_splited=False)\n', (3085, 3176), False, 'from paddlenlp.metrics.squad import squad_evaluate, compute_prediction\n'), ((3895, 3925), 'paddle.set_device', 'paddle.set_device', (['args.device'], {}), '(args.device)\n', (3912, 3925), False, 'import paddle\n'), ((4032, 4061), 'paddle.distributed.get_rank', 'paddle.distributed.get_rank', ([], {}), '()\n', (4059, 4061), False, 'import paddle\n'), ((14771, 14783), 'args.parse_args', 'parse_args', ([], {}), '()\n', (14781, 14783), False, 'from args import parse_args\n'), ((3490, 3531), 'paddle.unsqueeze', 'paddle.unsqueeze', (['start_position'], {'axis': '(-1)'}), '(start_position, axis=-1)\n', (3506, 3531), False, 'import paddle\n'), ((3555, 3594), 'paddle.unsqueeze', 'paddle.unsqueeze', (['end_position'], {'axis': '(-1)'}), '(end_position, axis=-1)\n', (3571, 3594), False, 'import paddle\n'), ((3616, 3692), 'paddle.nn.functional.cross_entropy', 'paddle.nn.functional.cross_entropy', ([], {'input': 'start_logits', 'label': 'start_position'}), '(input=start_logits, label=start_position)\n', (3650, 3692), False, 'import paddle\n'), ((3725, 3797), 'paddle.nn.functional.cross_entropy', 'paddle.nn.functional.cross_entropy', ([], {'input': 'end_logits', 'label': 'end_position'}), '(input=end_logits, label=end_position)\n', (3759, 3797), False, 'import paddle\n'), ((3933, 3968), 'paddle.distributed.get_world_size', 'paddle.distributed.get_world_size', ([], {}), '()\n', (3966, 3968), False, 'import paddle\n'), ((3982, 4020), 'paddle.distributed.init_parallel_env', 'paddle.distributed.init_parallel_env', ([], {}), '()\n', (4018, 4020), False, 'import paddle\n'), ((4335, 4374), 'os.path.exists', 'os.path.exists', (['args.model_name_or_path'], {}), '(args.model_name_or_path)\n', (4349, 4374), False, 'import os\n'), ((4521, 4556), 'paddle.distributed.get_world_size', 'paddle.distributed.get_world_size', ([], {}), '()\n', (4554, 4556), False, 'import paddle\n'), ((4578, 4604), 'paddle.DataParallel', 'paddle.DataParallel', (['model'], {}), '(model)\n', (4597, 4604), False, 'import paddle\n'), ((8555, 8644), 'paddle.io.DistributedBatchSampler', 'paddle.io.DistributedBatchSampler', (['train_ds'], {'batch_size': 'args.batch_size', 'shuffle': '(True)'}), '(train_ds, batch_size=args.batch_size,\n shuffle=True)\n', (8588, 8644), False, 'import paddle\n'), ((9014, 9130), 'paddle.io.DataLoader', 'DataLoader', ([], {'dataset': 'train_ds', 'batch_sampler': 'train_batch_sampler', 'collate_fn': 'train_batchify_fn', 'return_list': '(True)'}), '(dataset=train_ds, batch_sampler=train_batch_sampler, collate_fn=\n train_batchify_fn, return_list=True)\n', (9024, 9130), False, 'from paddle.io import DataLoader\n'), ((9450, 9540), 'paddlenlp.transformers.LinearDecayWithWarmup', 'LinearDecayWithWarmup', (['args.learning_rate', 'num_training_steps', 'args.warmup_proportion'], {}), '(args.learning_rate, num_training_steps, args.\n warmup_proportion)\n', (9471, 9540), False, 'from paddlenlp.transformers import LinearDecayWithWarmup\n'), ((10190, 10201), 'time.time', 'time.time', ([], {}), '()\n', (10199, 10201), False, 'import time\n'), ((14188, 14261), 'paddle.io.BatchSampler', 'paddle.io.BatchSampler', (['dev_ds'], {'batch_size': 'args.batch_size', 'shuffle': '(False)'}), '(dev_ds, batch_size=args.batch_size, shuffle=False)\n', (14210, 14261), False, 'import paddle\n'), ((14528, 14638), 'paddle.io.DataLoader', 'DataLoader', ([], {'dataset': 'dev_ds', 'batch_sampler': 'dev_batch_sampler', 'collate_fn': 'dev_batchify_fn', 'return_list': '(True)'}), '(dataset=dev_ds, batch_sampler=dev_batch_sampler, collate_fn=\n dev_batchify_fn, return_list=True)\n', (14538, 14638), False, 'from paddle.io import DataLoader\n'), ((8337, 8388), 'paddlenlp.datasets.load_dataset', 'load_dataset', (['task_name'], {'data_files': 'args.train_file'}), '(task_name, data_files=args.train_file)\n', (8349, 8388), False, 'from paddlenlp.datasets import load_dataset\n'), ((8426, 8465), 'paddlenlp.datasets.load_dataset', 'load_dataset', (['task_name'], {'splits': '"""train"""'}), "(task_name, splits='train')\n", (8438, 8465), False, 'from paddlenlp.datasets import load_dataset\n'), ((13970, 14023), 'paddlenlp.datasets.load_dataset', 'load_dataset', (['task_name'], {'data_files': 'args.predict_file'}), '(task_name, data_files=args.predict_file)\n', (13982, 14023), False, 'from paddlenlp.datasets import load_dataset\n'), ((14059, 14096), 'paddlenlp.datasets.load_dataset', 'load_dataset', (['task_name'], {'splits': '"""dev"""'}), "(task_name, splits='dev')\n", (14071, 14096), False, 'from paddlenlp.datasets import load_dataset\n'), ((2445, 2456), 'time.time', 'time.time', ([], {}), '()\n', (2454, 2456), False, 'import time\n'), ((2983, 3040), 'json.dumps', 'json.dumps', (['all_predictions'], {'ensure_ascii': '(False)', 'indent': '(4)'}), '(all_predictions, ensure_ascii=False, indent=4)\n', (2993, 3040), False, 'import json\n'), ((10957, 10968), 'time.time', 'time.time', ([], {}), '()\n', (10966, 10968), False, 'import time\n'), ((2394, 2405), 'time.time', 'time.time', ([], {}), '()\n', (2403, 2405), False, 'import time\n'), ((8733, 8776), 'paddlenlp.data.Pad', 'Pad', ([], {'axis': '(0)', 'pad_val': 'tokenizer.pad_token_id'}), '(axis=0, pad_val=tokenizer.pad_token_id)\n', (8736, 8776), False, 'from paddlenlp.data import Pad, Stack, Tuple, Dict\n'), ((8808, 8856), 'paddlenlp.data.Pad', 'Pad', ([], {'axis': '(0)', 'pad_val': 'tokenizer.pad_token_type_id'}), '(axis=0, pad_val=tokenizer.pad_token_type_id)\n', (8811, 8856), False, 'from paddlenlp.data import Pad, Stack, Tuple, Dict\n'), ((8889, 8909), 'paddlenlp.data.Stack', 'Stack', ([], {'dtype': '"""int64"""'}), "(dtype='int64')\n", (8894, 8909), False, 'from paddlenlp.data import Pad, Stack, Tuple, Dict\n'), ((8940, 8960), 'paddlenlp.data.Stack', 'Stack', ([], {'dtype': '"""int64"""'}), "(dtype='int64')\n", (8945, 8960), False, 'from paddlenlp.data import Pad, Stack, Tuple, Dict\n'), ((11273, 11328), 'os.path.join', 'os.path.join', (['args.output_dir', "('model_%d' % global_step)"], {}), "(args.output_dir, 'model_%d' % global_step)\n", (11285, 11328), False, 'import os\n'), ((14353, 14396), 'paddlenlp.data.Pad', 'Pad', ([], {'axis': '(0)', 'pad_val': 'tokenizer.pad_token_id'}), '(axis=0, pad_val=tokenizer.pad_token_id)\n', (14356, 14396), False, 'from paddlenlp.data import Pad, Stack, Tuple, Dict\n'), ((14428, 14476), 'paddlenlp.data.Pad', 'Pad', ([], {'axis': '(0)', 'pad_val': 'tokenizer.pad_token_type_id'}), '(axis=0, pad_val=tokenizer.pad_token_type_id)\n', (14431, 14476), False, 'from paddlenlp.data import Pad, Stack, Tuple, Dict\n'), ((11410, 11436), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (11424, 11436), False, 'import os\n'), ((11466, 11489), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (11477, 11489), False, 'import os\n'), ((10898, 10909), 'time.time', 'time.time', ([], {}), '()\n', (10907, 10909), False, 'import time\n')]
|
# Credit to https://medium.com/emergent-future/simple-reinforcement-learning-with-tensorflow-part-0-q-learning-with-tables-and-neural-networks-d195264329d0
import gym
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
env = gym.make('FrozenLake-v0')
# NEURAL NETWORK IMPLEMENTATION
tf.reset_default_graph()
# Feature vector for current state representation
input1 = tf.placeholder(shape=[1, env.observation_space.n], dtype=tf.float32)
# tf.Variable(<initial-value>, name=<optional-name>)
# tf.random_uniform(shape, minval=0, maxval=None, dtype=tf.float32, seed=None, name=None)
# Weighting W vector in range 0 - 0.01 (like the way Andrew Ng did with *0.01
W = tf.Variable(tf.random_uniform([env.observation_space.n, env.action_space.n], 0, 0.01))
# Qout with shape [1, env.action_space.n] - Action state value for Q[s, a] with every a available at a state
Qout = tf.matmul(input1, W)
# Greedy action at a state
predict = tf.argmax(Qout, axis=1)
# Feature vector for next state representation
nextQ = tf.placeholder(shape=[1, env.action_space.n], dtype=tf.float32)
# Entropy loss
loss = tf.reduce_sum(tf.square(Qout - nextQ))
trainer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
updateModel = trainer.minimize(loss)
# TRAIN THE NETWORK
init = tf.global_variables_initializer()
# Set learning parameters
y = 0.99
e = 0.1
number_episodes = 2000
# List to store total rewards and steps per episode
jList = []
rList = []
with tf.Session() as sess:
sess.run(init)
for i in range(number_episodes):
print("Episode #{} is running!".format(i))
# First state
s = env.reset()
rAll = 0
d = False
j = 0
# Q network
while j < 200: # or While not d:
j += 1
# Choose action by epsilon (e) greedy
# print("s = ", s," --> Identity s:s+1: ", np.identity(env.observation_space.n)[s:s+1])
# s = 0 --> Identity s: s + 1: [[1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]]
# s = 1 --> Identity s: s + 1: [[0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]]
# Identity [s:s+1] is a one-hot vector
# Therefore W is the actual Q value
a, allQ = sess.run([predict, Qout], feed_dict={input1: np.identity(env.observation_space.n)[s:s+1]})
if np.random.rand(1) < e:
a[0] = env.action_space.sample()
s1, r, d, _ = env.step(a[0])
# Obtain next state Q value by feeding the new state throughout the network
Q1 = sess.run(Qout, feed_dict={input1: np.identity(env.observation_space.n)[s1:s1+1]})
maxQ1 = np.max(Q1)
targetQ = allQ
targetQ[0, a[0]] = r + y * maxQ1
# Train our network using target and predicted Q values
_, W1 = sess.run([updateModel, W], feed_dict={input1: np.identity(env.observation_space.n)[s:s+1], nextQ: targetQ})
rAll += r
s = s1
if d:
e = 1./((i/50) + 10)
break
jList.append(j)
rList.append(rAll)
env.close()
plt.figure()
plt.plot(rList, label="Return - Q Learning")
plt.show()
plt.figure()
plt.plot(jList, label="Steps - Q Learning")
plt.show()
# -------------------------------------------------------------------------
# TABULAR IMPLEMENTATION
#
# # Set learning parameters
# lr = 0.8
# y = 0.95
# number_episodes = 20000
#
# # Initial table with all zeros
# Q = np.zeros([env.observation_space.n, env.action_space.n])
#
# # List of reward and steps per episode
# rList = []
# for i in range (number_episodes):
# print("Episode #{} is running!".format(i))
# s = env.reset()
# rAll = 0
# d = False
# j = 0
# while j < 99:
# j += 1
# # Choose an action by greedily (with noise) picking from Q table
# # Because of the noise, it is epsilon-greedy with epsilon decreasing over time
# a = np.argmax(Q[s, :] + np.random.rand(1, env.action_space.n)*(1./(i + 1)))
# s1, r, d, _ = env.step(a)
# # env.render()
#
# # Update Q table with new knowledge
# Q[s, a] = Q[s, a] + lr * (r + y * np.max(Q[s1, :]) - Q[s, a])
# rAll += r
# s = s1
# if d:
# break
# rList.append(rAll)
|
[
"numpy.identity",
"tensorflow.reset_default_graph",
"numpy.random.rand",
"tensorflow.placeholder",
"matplotlib.pyplot.plot",
"tensorflow.Session",
"numpy.max",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.global_variables_initializer",
"tensorflow.argmax",
"matplotlib.pyplot.figure",
"tensorflow.random_uniform",
"tensorflow.matmul",
"tensorflow.square",
"gym.make",
"matplotlib.pyplot.show"
] |
[((251, 276), 'gym.make', 'gym.make', (['"""FrozenLake-v0"""'], {}), "('FrozenLake-v0')\n", (259, 276), False, 'import gym\n'), ((310, 334), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (332, 334), True, 'import tensorflow as tf\n'), ((395, 463), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[1, env.observation_space.n]', 'dtype': 'tf.float32'}), '(shape=[1, env.observation_space.n], dtype=tf.float32)\n', (409, 463), True, 'import tensorflow as tf\n'), ((895, 915), 'tensorflow.matmul', 'tf.matmul', (['input1', 'W'], {}), '(input1, W)\n', (904, 915), True, 'import tensorflow as tf\n'), ((954, 977), 'tensorflow.argmax', 'tf.argmax', (['Qout'], {'axis': '(1)'}), '(Qout, axis=1)\n', (963, 977), True, 'import tensorflow as tf\n'), ((1034, 1097), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[1, env.action_space.n]', 'dtype': 'tf.float32'}), '(shape=[1, env.action_space.n], dtype=tf.float32)\n', (1048, 1097), True, 'import tensorflow as tf\n'), ((1170, 1222), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': '(0.1)'}), '(learning_rate=0.1)\n', (1203, 1222), True, 'import tensorflow as tf\n'), ((1288, 1321), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1319, 1321), True, 'import tensorflow as tf\n'), ((3132, 3144), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3142, 3144), True, 'import matplotlib.pyplot as plt\n'), ((3145, 3189), 'matplotlib.pyplot.plot', 'plt.plot', (['rList'], {'label': '"""Return - Q Learning"""'}), "(rList, label='Return - Q Learning')\n", (3153, 3189), True, 'import matplotlib.pyplot as plt\n'), ((3190, 3200), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3198, 3200), True, 'import matplotlib.pyplot as plt\n'), ((3202, 3214), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3212, 3214), True, 'import matplotlib.pyplot as plt\n'), ((3215, 3258), 'matplotlib.pyplot.plot', 'plt.plot', (['jList'], {'label': '"""Steps - Q Learning"""'}), "(jList, label='Steps - Q Learning')\n", (3223, 3258), True, 'import matplotlib.pyplot as plt\n'), ((3259, 3269), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3267, 3269), True, 'import matplotlib.pyplot as plt\n'), ((703, 776), 'tensorflow.random_uniform', 'tf.random_uniform', (['[env.observation_space.n, env.action_space.n]', '(0)', '(0.01)'], {}), '([env.observation_space.n, env.action_space.n], 0, 0.01)\n', (720, 776), True, 'import tensorflow as tf\n'), ((1135, 1158), 'tensorflow.square', 'tf.square', (['(Qout - nextQ)'], {}), '(Qout - nextQ)\n', (1144, 1158), True, 'import tensorflow as tf\n'), ((1469, 1481), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1479, 1481), True, 'import tensorflow as tf\n'), ((2668, 2678), 'numpy.max', 'np.max', (['Q1'], {}), '(Q1)\n', (2674, 2678), True, 'import numpy as np\n'), ((2347, 2364), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (2361, 2364), True, 'import numpy as np\n'), ((2285, 2321), 'numpy.identity', 'np.identity', (['env.observation_space.n'], {}), '(env.observation_space.n)\n', (2296, 2321), True, 'import numpy as np\n'), ((2600, 2636), 'numpy.identity', 'np.identity', (['env.observation_space.n'], {}), '(env.observation_space.n)\n', (2611, 2636), True, 'import numpy as np\n'), ((2886, 2922), 'numpy.identity', 'np.identity', (['env.observation_space.n'], {}), '(env.observation_space.n)\n', (2897, 2922), True, 'import numpy as np\n')]
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2018-2019, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from torch.autograd import Function, Variable
from torch.nn import Module
def check_type(var, t, name):
if var.dtype is not t:
raise TypeError("{} must be {}".format(name, t))
def check_contiguous(var, name):
if not var.is_contiguous():
raise ValueError("{} must be contiguous".format(name))
def check_dim(var, dim, name):
if len(var.shape) != dim:
raise ValueError("{} must be {}D".format(name, dim))
def certify_inputs(log_probs, labels, lengths, label_lengths):
# check_type(log_probs, torch.float32, "log_probs")
check_type(labels, torch.int32, "labels")
check_type(label_lengths, torch.int32, "label_lengths")
check_type(lengths, torch.int32, "lengths")
check_contiguous(log_probs, "log_probs")
check_contiguous(labels, "labels")
check_contiguous(label_lengths, "label_lengths")
check_contiguous(lengths, "lengths")
if lengths.shape[0] != log_probs.shape[0]:
raise ValueError(
f"Must have a length per example. "
f"Given lengths dim: {lengths.shape[0]}, "
f"Log probs dim : {log_probs.shape[0]}"
)
if label_lengths.shape[0] != log_probs.shape[0]:
raise ValueError(
"Must have a label length per example. "
f"Given label lengths dim : {label_lengths.shape[0]}, "
f"Log probs dim : {log_probs.shape[0]}"
)
check_dim(log_probs, 4, "log_probs")
check_dim(labels, 2, "labels")
check_dim(lengths, 1, "lenghts")
check_dim(label_lengths, 1, "label_lenghts")
max_T = torch.max(lengths)
max_U = torch.max(label_lengths)
T, U = log_probs.shape[1:3]
if T != max_T:
raise ValueError(f"Input length mismatch! Given T: {T}, Expected max T from input lengths: {max_T}")
if U != max_U + 1:
raise ValueError(f"Output length mismatch! Given U: {U}, Expected max U from target lengths: {max_U} + 1")
def _assert_no_grad(tensor):
assert not tensor.requires_grad, (
"gradients only computed for log_probs - please " "mark other tensors as not requiring gradients"
)
def forward_pass(log_probs, labels, blank):
"""
Computes probability of the forward variable alpha.
Args:
log_probs: Tensor of shape [T, U, V+1]
labels: Labels of shape [B, U]
blank: Index of the blank token.
Returns:
A tuple of the forward variable probabilities - alpha of shape [T, U]
and the log likelihood of this forward step.
"""
T, U, _ = log_probs.shape
alphas = np.zeros((T, U), dtype='f')
for t in range(1, T):
alphas[t, 0] = alphas[t - 1, 0] + log_probs[t - 1, 0, blank]
for u in range(1, U):
alphas[0, u] = alphas[0, u - 1] + log_probs[0, u - 1, labels[u - 1]]
for t in range(1, T):
for u in range(1, U):
no_emit = alphas[t - 1, u] + log_probs[t - 1, u, blank]
emit = alphas[t, u - 1] + log_probs[t, u - 1, labels[u - 1]]
alphas[t, u] = np.logaddexp(emit, no_emit)
loglike = alphas[T - 1, U - 1] + log_probs[T - 1, U - 1, blank]
return alphas, loglike
def backward_pass(log_probs, labels, blank):
"""
Computes probability of the backward variable beta.
Args:
log_probs: Tensor of shape [T, U, V+1]
labels: Labels of shape [B, U]
blank: Index of the blank token.
Returns:
A tuple of the backward variable probabilities - beta of shape [T, U]
and the log likelihood of this backward step.
"""
T, U, _ = log_probs.shape
betas = np.zeros((T, U), dtype='f')
betas[T - 1, U - 1] = log_probs[T - 1, U - 1, blank]
for t in reversed(range(T - 1)):
betas[t, U - 1] = betas[t + 1, U - 1] + log_probs[t, U - 1, blank]
for u in reversed(range(U - 1)):
betas[T - 1, u] = betas[T - 1, u + 1] + log_probs[T - 1, u, labels[u]]
for t in reversed(range(T - 1)):
for u in reversed(range(U - 1)):
no_emit = betas[t + 1, u] + log_probs[t, u, blank]
emit = betas[t, u + 1] + log_probs[t, u, labels[u]]
betas[t, u] = np.logaddexp(emit, no_emit)
return betas, betas[0, 0]
def compute_gradient(log_probs, alphas, betas, labels, blank, fastemit_lambda):
"""
Computes the gradients of the log_probs with respect to the log probability of this step occuring.
Args:
Args:
log_probs: Tensor of shape [T, U, V+1]
alphas: Tensor of shape [T, U] which represents the forward variable.
betas: Tensor of shape [T, U] which represents the backward variable.
labels: Labels of shape [B, U]
blank: Index of the blank token.
Returns:
Gradients of shape [T, U, V+1] with respect to the forward log probability
"""
T, U, _ = log_probs.shape
grads = np.full(log_probs.shape, -float("inf"))
log_like = betas[0, 0] # == alphas[T - 1, U - 1] + betas[T - 1, U - 1]
# // grad to last blank transition
grads[T - 1, U - 1, blank] = alphas[T - 1, U - 1]
grads[: T - 1, :, blank] = alphas[: T - 1, :] + betas[1:, :]
# // grad to label transition
for u, l in enumerate(labels):
grads[:, u, l] = alphas[:, u] + betas[:, u + 1]
grads = -np.exp(grads + log_probs - log_like)
if fastemit_lambda > 0.0:
for u, l in enumerate(labels):
grads[:, u, l] = (1.0 + fastemit_lambda) * grads[:, u, l]
return grads
def fastemit_regularization(log_probs, labels, alphas, betas, blank, fastemit_lambda):
"""
Describes the computation of FastEmit regularization from the paper -
[FastEmit: Low-latency Streaming ASR with Sequence-level Emission Regularization](https://arxiv.org/abs/2010.11148)
Args:
log_probs: Tensor of shape [T, U, V+1]
labels: Unused. Labels of shape [B, U]
alphas: Tensor of shape [T, U] which represents the forward variable.
betas: Unused. Tensor of shape [T, U] which represents the backward variable.
blank: Index of the blank token.
fastemit_lambda: Float scaling factor for FastEmit regularization.
Returns:
The regularized negative log likelihood - lambda * P˜(At, u|x)
"""
# General calculation of the fastemit regularization alignments
T, U, _ = log_probs.shape
# alignment = np.zeros((T, U), dtype='float32')
#
# for t in range(0, T):
# alignment[t, U - 1] = alphas[t, U - 1] + betas[t, U - 1]
#
# for t in range(0, T):
# for u in range(0, U - 1):
# emit = alphas[t, u] + log_probs[t, u, labels[u]] + betas[t, u + 1]
# alignment[t, u] = emit
# reg = fastemit_lambda * (alignment[T - 1, U - 1])
# The above is equivalent to below, without need of computing above
# reg = fastemit_lambda * (alphas[T - 1, U - 1] + betas[T - 1, U - 1])
# The above is also equivalent to below, without need of computing the betas alignment matrix
reg = fastemit_lambda * (alphas[T - 1, U - 1] + log_probs[T - 1, U - 1, blank])
return -reg
def transduce(log_probs, labels, blank=0, fastemit_lambda=0.0):
"""
Args:
log_probs: 3D array with shape
[input len, output len + 1, vocab size]
labels: 1D array with shape [output time steps]
blank: Index of the blank token.
fastemit_lambda: Float scaling factor for FastEmit regularization.
Returns:
float: The negative log-likelihood
3D array: Gradients with respect to the
unnormalized input actications
2d arrays: Alphas matrix (TxU)
2d array: Betas matrix (TxU)
"""
alphas, ll_forward = forward_pass(log_probs, labels, blank)
betas, ll_backward = backward_pass(log_probs, labels, blank)
grads = compute_gradient(log_probs, alphas, betas, labels, blank, fastemit_lambda)
return -ll_forward, grads, alphas, betas
def transduce_batch(log_probs, labels, flen, glen, blank=0, fastemit_lambda=0.0):
"""
Compute the transducer loss of the batch.
Args:
log_probs: [B, T, U, V+1]. Activation matrix normalized with log-softmax.
labels: [B, U+1] - ground truth labels with <SOS> padded as blank token in the beginning.
flen: Length vector of the acoustic sequence.
glen: Length vector of the target sequence.
blank: Id of the blank token.
fastemit_lambda: Float scaling factor for FastEmit regularization.
Returns:
Batch of transducer forward log probabilities (loss) and the gradients of the activation matrix.
"""
grads = np.zeros_like(log_probs)
costs = []
for b in range(log_probs.shape[0]):
t = int(flen[b])
u = int(glen[b]) + 1
ll, g, alphas, betas = transduce(log_probs[b, :t, :u, :], labels[b, : u - 1], blank, fastemit_lambda)
grads[b, :t, :u, :] = g
reg = fastemit_regularization(
log_probs[b, :t, :u, :], labels[b, : u - 1], alphas, betas, blank, fastemit_lambda
)
ll += reg
costs.append(ll)
return costs, grads
class _RNNT(Function):
@staticmethod
def forward(ctx, acts, labels, act_lens, label_lens, blank, fastemit_lambda):
costs, grads = transduce_batch(
acts.detach().cpu().numpy(),
labels.cpu().numpy(),
act_lens.cpu().numpy(),
label_lens.cpu().numpy(),
blank,
fastemit_lambda,
)
costs = torch.FloatTensor([sum(costs)])
grads = torch.Tensor(grads).to(acts)
ctx.grads = grads
return costs
@staticmethod
def backward(ctx, grad_output):
return ctx.grads, None, None, None, None, None
class RNNTLoss(Module):
"""
Parameters:
`blank_label` (int): default 0 - label index of blank token
fastemit_lambda: Float scaling factor for FastEmit regularization.
"""
def __init__(self, blank: int = 0, fastemit_lambda: float = 0.0):
super(RNNTLoss, self).__init__()
self.blank = blank
self.fastemit_lambda = fastemit_lambda
self.rnnt = _RNNT.apply
def forward(self, acts, labels, act_lens, label_lens):
assert len(labels.size()) == 2
_assert_no_grad(labels)
_assert_no_grad(act_lens)
_assert_no_grad(label_lens)
certify_inputs(acts, labels, act_lens, label_lens)
acts = torch.nn.functional.log_softmax(acts, -1)
return self.rnnt(acts, labels, act_lens, label_lens, self.blank, self.fastemit_lambda)
if __name__ == '__main__':
loss = RNNTLoss(fastemit_lambda=0.01)
torch.manual_seed(0)
acts = torch.randn(1, 2, 5, 3)
labels = torch.tensor([[0, 2, 1, 2]], dtype=torch.int32)
act_lens = torch.tensor([2], dtype=torch.int32)
label_lens = torch.tensor([len(labels[0])], dtype=torch.int32)
loss_val = loss(acts, labels, act_lens, label_lens)
|
[
"torch.manual_seed",
"torch.max",
"torch.Tensor",
"numpy.logaddexp",
"numpy.exp",
"torch.tensor",
"numpy.zeros",
"torch.nn.functional.log_softmax",
"numpy.zeros_like",
"torch.randn"
] |
[((2794, 2812), 'torch.max', 'torch.max', (['lengths'], {}), '(lengths)\n', (2803, 2812), False, 'import torch\n'), ((2825, 2849), 'torch.max', 'torch.max', (['label_lengths'], {}), '(label_lengths)\n', (2834, 2849), False, 'import torch\n'), ((3774, 3801), 'numpy.zeros', 'np.zeros', (['(T, U)'], {'dtype': '"""f"""'}), "((T, U), dtype='f')\n", (3782, 3801), True, 'import numpy as np\n'), ((4795, 4822), 'numpy.zeros', 'np.zeros', (['(T, U)'], {'dtype': '"""f"""'}), "((T, U), dtype='f')\n", (4803, 4822), True, 'import numpy as np\n'), ((9803, 9827), 'numpy.zeros_like', 'np.zeros_like', (['log_probs'], {}), '(log_probs)\n', (9816, 9827), True, 'import numpy as np\n'), ((11824, 11844), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (11841, 11844), False, 'import torch\n'), ((11857, 11880), 'torch.randn', 'torch.randn', (['(1)', '(2)', '(5)', '(3)'], {}), '(1, 2, 5, 3)\n', (11868, 11880), False, 'import torch\n'), ((11894, 11941), 'torch.tensor', 'torch.tensor', (['[[0, 2, 1, 2]]'], {'dtype': 'torch.int32'}), '([[0, 2, 1, 2]], dtype=torch.int32)\n', (11906, 11941), False, 'import torch\n'), ((11957, 11993), 'torch.tensor', 'torch.tensor', (['[2]'], {'dtype': 'torch.int32'}), '([2], dtype=torch.int32)\n', (11969, 11993), False, 'import torch\n'), ((6460, 6496), 'numpy.exp', 'np.exp', (['(grads + log_probs - log_like)'], {}), '(grads + log_probs - log_like)\n', (6466, 6496), True, 'import numpy as np\n'), ((11611, 11652), 'torch.nn.functional.log_softmax', 'torch.nn.functional.log_softmax', (['acts', '(-1)'], {}), '(acts, -1)\n', (11642, 11652), False, 'import torch\n'), ((4226, 4253), 'numpy.logaddexp', 'np.logaddexp', (['emit', 'no_emit'], {}), '(emit, no_emit)\n', (4238, 4253), True, 'import numpy as np\n'), ((5342, 5369), 'numpy.logaddexp', 'np.logaddexp', (['emit', 'no_emit'], {}), '(emit, no_emit)\n', (5354, 5369), True, 'import numpy as np\n'), ((10729, 10748), 'torch.Tensor', 'torch.Tensor', (['grads'], {}), '(grads)\n', (10741, 10748), False, 'import torch\n')]
|
import numpy as np
from sklearn.linear_model import LogisticRegression
from .models import User
from .twitter import vectorize_tweet
def predict_user(user1_name, user2_name, tweet_text):
"""
Determine and return which user is more likely to say a given Tweet.
Example: predict_user('ausen', 'elonmusk', 'Lambda School Rocks!')
Returns 1 corresponding to 1st user passed in, or 0 for second.
"""
user1 = User.query.filter(User.name == user1_name).one()
user2 = User.query.filter(User.name == user2_name).one()
user1_vect = np.array([tweet.vect for tweet in user1.tweets])
user2_vect = np.array([tweet.vect for tweet in user2.tweets])
vects = np.vstack([user1_vect, user2_vect])
labels = np.concatenate([np.ones(len(user1.tweets)),
np.zeros(len(user2.tweets))])
log_reg = LogisticRegression().fit(vects, labels)
# We've done the model fitting, now to predict...
hypo_tweet_vect = vectorize_tweet(tweet_text)
return log_reg.predict(np.array(hypo_tweet_vect).reshape(1,-1))
|
[
"numpy.array",
"numpy.vstack",
"sklearn.linear_model.LogisticRegression"
] |
[((561, 609), 'numpy.array', 'np.array', (['[tweet.vect for tweet in user1.tweets]'], {}), '([tweet.vect for tweet in user1.tweets])\n', (569, 609), True, 'import numpy as np\n'), ((627, 675), 'numpy.array', 'np.array', (['[tweet.vect for tweet in user2.tweets]'], {}), '([tweet.vect for tweet in user2.tweets])\n', (635, 675), True, 'import numpy as np\n'), ((689, 724), 'numpy.vstack', 'np.vstack', (['[user1_vect, user2_vect]'], {}), '([user1_vect, user2_vect])\n', (698, 724), True, 'import numpy as np\n'), ((856, 876), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (874, 876), False, 'from sklearn.linear_model import LogisticRegression\n'), ((1027, 1052), 'numpy.array', 'np.array', (['hypo_tweet_vect'], {}), '(hypo_tweet_vect)\n', (1035, 1052), True, 'import numpy as np\n')]
|
# sys
import os
import sys
import numpy as np
import random
import pickle
import json
# torch
import torch
import torch.nn as nn
from torchvision import datasets, transforms
# operation
from . import tools
class Feeder_UCF(torch.utils.data.Dataset):
""" Feeder for skeleton-based action recognition in kinetics-skeleton dataset
Arguments:
data_path: the path to '.npy' data, the shape of data should be (N, C, T, V, M)
label_path: the path to label
random_choose: If true, randomly choose a portion of the input sequence
random_shift: If true, randomly pad zeros at the begining or end of sequence
random_move: If true, perform randomly but continuously changed transformation to input sequence
window_size: The length of the output sequence
pose_matching: If ture, match the pose between two frames
num_person_in: The number of people the feeder can observe in the input sequence
num_person_out: The number of people the feeder in the output sequence
debug: If true, only use the first 100 samples
"""
def __init__(self,
data_path,
label_path,
ignore_empty_sample=True,
random_choose=False,
random_shift=False,
random_move=False,
window_size=-1,
pose_matching=False,
num_person_in=5,
num_person_out=2,
debug=False):
self.debug = debug
self.data_path = data_path
self.label_path = label_path
self.random_choose = random_choose
self.random_shift = random_shift
self.random_move = random_move
self.window_size = window_size
self.num_person_in = num_person_in
self.num_person_out = num_person_out
self.pose_matching = pose_matching
self.ignore_empty_sample = ignore_empty_sample
self.load_data()
def load_data(self):
# load file list
self.sample_name = os.listdir(self.data_path)
if self.debug:
self.sample_name = self.sample_name[0:2]
# load label
label_path = self.label_path
with open(label_path) as f:
label_info = json.load(f)
sample_id = [name.split('.')[0] for name in self.sample_name]
self.label = np.array(
[label_info[id]['label_index'] for id in sample_id])
has_skeleton = np.array(
[label_info[id]['has_skeleton'] for id in sample_id])
# ignore the samples which does not has skeleton sequence
if self.ignore_empty_sample:
self.sample_name = [
s for h, s in zip(has_skeleton, self.sample_name) if h
]
self.label = self.label[has_skeleton]
# output data shape (N, C, T, V, M)
self.N = len(self.sample_name) #sample
self.C = 3 #channel
self.T = 90000 #frame
self.V = 18 #joint
self.M = self.num_person_out #person
def __len__(self):
return len(self.sample_name)
def __iter__(self):
return self
def __getitem__(self, index):
# output shape (C, T, V, M)
# get data
sample_name = self.sample_name[index]
sample_path = os.path.join(self.data_path, sample_name)
with open(sample_path, 'r') as f:
video_info = json.load(f)
# fill data_numpy
data_numpy = np.zeros((self.C, self.T, self.V, self.num_person_in))
count = 0
for frame_info in video_info['data']:
frame_index = frame_info['frame_index']
for m, skeleton_info in enumerate(frame_info["skeleton"]):
if m >= self.num_person_in:
break
pose = skeleton_info['pose']
score = skeleton_info['score']
frame_index = int(frame_index)
# print(frame_index)
data_numpy[0, frame_index, :, m] = pose[0::2]
data_numpy[1, frame_index, :, m] = pose[1::2]
data_numpy[2, frame_index, :, m] = score
# count += 1
# print(" ",count, " ")
# centralization
data_numpy[0:2] = data_numpy[0:2] - 0.5
data_numpy[0][data_numpy[2] == 0] = 0
data_numpy[1][data_numpy[2] == 0] = 0
# get & check label index
label = video_info['label_index']
assert (self.label[index] == label)
# data augmentation
if self.random_shift:
data_numpy = tools.random_shift(data_numpy)
if self.random_choose:
data_numpy = tools.random_choose(data_numpy, self.window_size)
elif self.window_size > 0:
data_numpy = tools.auto_pading(data_numpy, self.window_size)
if self.random_move:
data_numpy = tools.random_move(data_numpy)
# sort by score
sort_index = (-data_numpy[2, :, :, :].sum(axis=1)).argsort(axis=1)
for t, s in enumerate(sort_index):
data_numpy[:, t, :, :] = data_numpy[:, t, :, s].transpose((1, 2,
0))
data_numpy = data_numpy[:, :, :, 0:self.num_person_out]
# match poses between 2 frames
if self.pose_matching:
data_numpy = tools.openpose_match(data_numpy)
return data_numpy, label
def top_k(self, score, top_k):
assert (all(self.label >= 0))
rank = score.argsort()
hit_top_k = [l in rank[i, -top_k:] for i, l in enumerate(self.label)]
return sum(hit_top_k) * 1.0 / len(hit_top_k)
def top_k_by_category(self, score, top_k):
assert (all(self.label >= 0))
return tools.top_k_by_category(self.label, score, top_k)
def calculate_recall_precision(self, score):
assert (all(self.label >= 0))
return tools.calculate_recall_precision(self.label, score)
|
[
"os.listdir",
"os.path.join",
"numpy.array",
"numpy.zeros",
"json.load"
] |
[((2055, 2081), 'os.listdir', 'os.listdir', (['self.data_path'], {}), '(self.data_path)\n', (2065, 2081), False, 'import os\n'), ((2393, 2454), 'numpy.array', 'np.array', (["[label_info[id]['label_index'] for id in sample_id]"], {}), "([label_info[id]['label_index'] for id in sample_id])\n", (2401, 2454), True, 'import numpy as np\n'), ((2491, 2553), 'numpy.array', 'np.array', (["[label_info[id]['has_skeleton'] for id in sample_id]"], {}), "([label_info[id]['has_skeleton'] for id in sample_id])\n", (2499, 2553), True, 'import numpy as np\n'), ((3330, 3371), 'os.path.join', 'os.path.join', (['self.data_path', 'sample_name'], {}), '(self.data_path, sample_name)\n', (3342, 3371), False, 'import os\n'), ((3500, 3554), 'numpy.zeros', 'np.zeros', (['(self.C, self.T, self.V, self.num_person_in)'], {}), '((self.C, self.T, self.V, self.num_person_in))\n', (3508, 3554), True, 'import numpy as np\n'), ((2279, 2291), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2288, 2291), False, 'import json\n'), ((3439, 3451), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3448, 3451), False, 'import json\n')]
|
# pvtrace is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# pvtrace is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from external.transformations import translation_matrix, rotation_matrix
import external.transformations as tf
from Trace import Photon
from Geometry import Box, Cylinder, FinitePlane, transform_point, transform_direction, rotation_matrix_from_vector_alignment, norm
from Materials import Spectrum
def random_spherecial_vector():
# This method of calculating isotropic vectors is taken from GNU Scientific Library
LOOP = True
while LOOP:
x = -1. + 2. * np.random.uniform()
y = -1. + 2. * np.random.uniform()
s = x**2 + y**2
if s <= 1.0:
LOOP = False
z = -1. + 2. * s
a = 2 * np.sqrt(1 - s)
x = a * x
y = a * y
return np.array([x,y,z])
class SimpleSource(object):
"""A light source that will generate photons of a single colour, direction and position."""
def __init__(self, position=[0,0,0], direction=[0,0,1], wavelength=555, use_random_polarisation=False):
super(SimpleSource, self).__init__()
self.position = position
self.direction = direction
self.wavelength = wavelength
self.use_random_polarisation = use_random_polarisation
self.throw = 0
self.source_id = "SimpleSource_" + str(id(self))
def photon(self):
photon = Photon()
photon.source = self.source_id
photon.position = np.array(self.position)
photon.direction = np.array(self.direction)
photon.active = True
photon.wavelength = self.wavelength
# If use_polarisation is set generate a random polarisation vector of the photon
if self.use_random_polarisation:
# Randomise rotation angle around xy-plane, the transform from +z to the direction of the photon
vec = random_spherecial_vector()
vec[2] = 0.
vec = norm(vec)
R = rotation_matrix_from_vector_alignment(self.direction, [0,0,1])
photon.polarisation = transform_direction(vec, R)
else:
photon.polarisation = None
photon.id = self.throw
self.throw = self.throw + 1
return photon
class Laser(object):
"""A light source that will generate photons of a single colour, direction and position."""
def __init__(self, position=[0,0,0], direction=[0,0,1], wavelength=555, polarisation=None):
super(Laser, self).__init__()
self.position = np.array(position)
self.direction = np.array(direction)
self.wavelength = wavelength
assert polarisation != None, "Polarisation of the Laser is not set."
self.polarisation = np.array(polarisation)
self.throw = 0
self.source_id = "LaserSource_" + str(id(self))
def photon(self):
photon = Photon()
photon.source = self.source_id
photon.position = np.array(self.position)
photon.direction = np.array(self.direction)
photon.active = True
photon.wavelength = self.wavelength
photon.polarisation = self.polarisation
photon.id = self.throw
self.throw = self.throw + 1
return photon
class PlanarSource(object):
"""A box that emits photons from the top surface (normal), sampled from the spectrum."""
def __init__(self, spectrum=None, wavelength=555, direction=(0,0,1), length=0.05, width=0.05):
super(PlanarSource, self).__init__()
self.spectrum = spectrum
self.wavelength = wavelength
self.plane = FinitePlane(length=length, width=width)
self.length = length
self.width = width
# direction is the direction that photons are fired out of the plane in the GLOBAL FRAME.
# i.e. this is passed directly to the photon to set is's direction
self.direction = direction
self.throw = 0
self.source_id = "PlanarSource_" + str(id(self))
def translate(self, translation):
self.plane.append_transform(tf.translation_matrix(translation))
def rotate(self, angle, axis):
self.plane.append_transform(tf.rotation_matrix(angle, axis))
def photon(self):
photon = Photon()
photon.source = self.source_id
photon.id = self.throw
self.throw = self.throw + 1
# Create a point which is on the surface of the finite plane in it's local frame
x = np.random.uniform(0., self.length)
y = np.random.uniform(0., self.width)
local_point = (x, y, 0.)
# Transform the direciton
photon.position = transform_point(local_point, self.plane.transform)
photon.direction = self.direction
photon.active = True
if self.spectrum != None:
photon.wavelength = self.spectrum.wavelength_at_probability(np.random.uniform())
else:
photon.wavelength = self.wavelength
return photon
class LensSource(object):
"""
A source where photons generated in a plane are focused on a line with space tolerance given by variable "focussize".
The focus line should be perpendicular to the plane normal and aligned with the z-axis.
"""
def __init__(self, spectrum = None, wavelength = 555, linepoint=(0,0,0), linedirection=(0,0,1), focussize = 0, planeorigin = (-1,-1,-1), planeextent = (-1,1,1)):
super(LensSource, self).__init__()
self.spectrum = spectrum
self.wavelength = wavelength
self.planeorigin = planeorigin
self.planeextent = planeextent
self.linepoint = np.array(linepoint)
self.linedirection = np.array(linedirection)
self.focussize = focussize
self.throw = 0
self.source_id = "LensSource_" + str(id(self))
def photon(self):
photon = Photon()
photon.source = self.source_id
photon.id = self.throw
self.throw = self.throw + 1
# Position
x = np.random.uniform(self.planeorigin[0],self.planeextent[0])
y = np.random.uniform(self.planeorigin[1],self.planeextent[1])
z = np.random.uniform(self.planeorigin[2],self.planeextent[2])
photon.position = np.array((x,y,z))
# Direction
focuspoint = np.array((0.,0.,0.))
focuspoint[0] = self.linepoint[0] + np.random.uniform(-self.focussize,self.focussize)
focuspoint[1] = self.linepoint[1] + np.random.uniform(-self.focussize,self.focussize)
focuspoint[2] = photon.position[2]
direction = focuspoint - photon.position
modulus = (direction[0]**2+direction[1]**2+direction[2]**2)**0.5
photon.direction = direction/modulus
# Wavelength
if self.spectrum != None:
photon.wavelength = self.spectrum.wavelength_at_probability(np.random.uniform())
else:
photon.wavelength = self.wavelength
return photon
class LensSourceAngle(object):
"""
A source where photons generated in a plane are focused on a line with space tolerance given by variable "focussize".
The focus line should be perpendicular to the plane normal and aligned with the z-axis.
For this lense an additional z-boost is added (Angle of incidence in z-direction).
"""
def __init__(self, spectrum = None, wavelength = 555, linepoint=(0,0,0), linedirection=(0,0,1), angle = 0, focussize = 0, planeorigin = (-1,-1,-1), planeextent = (-1,1,1)):
super(LensSourceAngle, self).__init__()
self.spectrum = spectrum
self.wavelength = wavelength
self.planeorigin = planeorigin
self.planeextent = planeextent
self.linepoint = np.array(linepoint)
self.linedirection = np.array(linedirection)
self.focussize = focussize
self.angle = angle
self.throw = 0
self.source_id = "LensSourceAngle_" + str(id(self))
def photon(self):
photon = Photon()
photon.id = self.throw
self.throw = self.throw + 1
# Position
x = np.random.uniform(self.planeorigin[0],self.planeextent[0])
y = np.random.uniform(self.planeorigin[1],self.planeextent[1])
boost = y*np.tan(self.angle)
z = np.random.uniform(self.planeorigin[2],self.planeextent[2]) - boost
photon.position = np.array((x,y,z))
# Direction
focuspoint = np.array((0.,0.,0.))
focuspoint[0] = self.linepoint[0] + np.random.uniform(-self.focussize,self.focussize)
focuspoint[1] = self.linepoint[1] + np.random.uniform(-self.focussize,self.focussize)
focuspoint[2] = photon.position[2] + boost
direction = focuspoint - photon.position
modulus = (direction[0]**2+direction[1]**2+direction[2]**2)**0.5
photon.direction = direction/modulus
# Wavelength
if self.spectrum != None:
photon.wavelength = self.spectrum.wavelength_at_probability(np.random.uniform())
else:
photon.wavelength = self.wavelength
return photon
class CylindricalSource(object):
"""
A source for photons emitted in a random direction and position inside a cylinder(radius, length)
"""
def __init__(self, spectrum = None, wavelength = 555, radius = 1, length = 10):
super(CylindricalSource, self).__init__()
self.spectrum = spectrum
self.wavelength = wavelength
self.shape = Cylinder(radius = radius, length = length)
self.radius = radius
self.length = length
self.throw = 0
self.source_id = "CylindricalSource_" + str(id(self))
def translate(self, translation):
self.shape.append_transform(tf.translation_matrix(translation))
def rotate(self, angle, axis):
self.shape.append_transform(tf.rotation_matrix(angle, axis))
def photon(self):
photon = Photon()
photon.source = self.source_id
photon.id = self.throw
self.throw = self.throw + 1
# Position of emission
phi = np.random.uniform(0., 2*np.pi)
r = np.random.uniform(0.,self.radius)
x = r*np.cos(phi)
y = r*np.sin(phi)
z = np.random.uniform(0.,self.length)
local_center = (x,y,z)
photon.position = transform_point(local_center, self.shape.transform)
# Direction of emission (no need to transform if meant to be isotropic)
phi = np.random.uniform(0.,2*np.pi)
theta = np.random.uniform(0.,np.pi)
x = np.cos(phi)*np.sin(theta)
y = np.sin(phi)*np.sin(theta)
z = np.cos(theta)
local_direction = (x,y,z)
photon.direction = local_direction
# Set wavelength of photon
if self.spectrum != None:
photon.wavelength = self.spectrum.wavelength_at_probability(np.random.uniform())
else:
photon.wavelength = self.wavelength
# Further initialisation
photon.active = True
return photon
class PointSource(object):
"""
A point source that emits randomly in solid angle specified by phimin, ..., thetamax
"""
def __init__(self, spectrum = None, wavelength = 555, center = (0.,0.,0.), phimin = 0, phimax = 2*np.pi, thetamin = 0, thetamax = np.pi):
super(PointSource, self).__init__()
self.spectrum = spectrum
self.wavelength = wavelength
self.center = center
self.phimin = phimin
self.phimax = phimax
self.thetamin = thetamin
self.thetamax = thetamax
self.throw = 0
self.source_id = "PointSource_" + str(id(self))
def photon(self):
photon = Photon()
photon.source = self.source_id
photon.id = self.throw
self.throw = self.throw + 1
phi = np.random.uniform(self.phimin, self.phimax)
theta = np.random.uniform(self.thetamin, self.thetamax)
x = np.cos(phi)*np.sin(theta)
y = np.sin(phi)*np.sin(theta)
z = np.cos(theta)
direction = (x,y,z)
transform = tf.translation_matrix((0,0,0))
point = transform_point(self.center, transform)
photon.direction = direction
photon.position = point
if self.spectrum != None:
photon.wavelength = self.spectrum.wavelength_at_probability(np.random.uniform())
else:
photon.wavelength = self.wavelength
photon.active = True
return photon
class RadialSource(object):
"""
A point source that emits at discrete angles theta(i) and phi(i)
"""
def __init__(self, spectrum = None, wavelength = 555, center = (0.,0.,0.), phimin = 0, phimax = 2*np.pi, thetamin = 0, thetamax = np.pi, spacing=20):
super(RadialSource, self).__init__()
self.spectrum = spectrum
self.wavelength = wavelength
self.center = center
self.phimin = phimin
self.phimax = phimax
self.thetamin = thetamin
self.thetamax = thetamax
self.spacing = spacing
self.throw = 0
self.source_id = "RadialSource_" + str(id(self))
def photon(self):
photon = Photon()
photon.source = self.source_id
photon.id = self.throw
self.throw = self.throw + 1
intphi = np.random.randint(1, self.spacing+1)
inttheta = np.random.randint(1, self.spacing+1)
phi = intphi*(self.phimax-self.phimin)/self.spacing
if self.thetamin == self.thetamax:
theta = self.thetamin
else:
theta = inttheta*(self.thetamax-self.thetamin)/self.spacing
x = np.cos(phi)*np.sin(theta)
y = np.sin(phi)*np.sin(theta)
z = np.cos(theta)
direction = (x,y,z)
transform = tf.translation_matrix((0,0,0))
point = transform_point(self.center, transform)
photon.direction = direction
photon.position = point
if self.spectrum != None:
photon.wavelength = self.spectrum.wavelength_at_probability(np.random.uniform())
else:
photon.wavelength = self.wavelength
photon.active = True
return photon
|
[
"Geometry.Cylinder",
"external.transformations.translation_matrix",
"numpy.sqrt",
"numpy.tan",
"Geometry.FinitePlane",
"numpy.sin",
"Geometry.transform_direction",
"numpy.array",
"numpy.random.randint",
"Geometry.transform_point",
"external.transformations.rotation_matrix",
"numpy.cos",
"numpy.random.uniform",
"Geometry.rotation_matrix_from_vector_alignment",
"Trace.Photon",
"Geometry.norm"
] |
[((1353, 1372), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (1361, 1372), True, 'import numpy as np\n'), ((1299, 1313), 'numpy.sqrt', 'np.sqrt', (['(1 - s)'], {}), '(1 - s)\n', (1306, 1313), True, 'import numpy as np\n'), ((1971, 1979), 'Trace.Photon', 'Photon', ([], {}), '()\n', (1977, 1979), False, 'from Trace import Photon\n'), ((2045, 2068), 'numpy.array', 'np.array', (['self.position'], {}), '(self.position)\n', (2053, 2068), True, 'import numpy as np\n'), ((2096, 2120), 'numpy.array', 'np.array', (['self.direction'], {}), '(self.direction)\n', (2104, 2120), True, 'import numpy as np\n'), ((3139, 3157), 'numpy.array', 'np.array', (['position'], {}), '(position)\n', (3147, 3157), True, 'import numpy as np\n'), ((3183, 3202), 'numpy.array', 'np.array', (['direction'], {}), '(direction)\n', (3191, 3202), True, 'import numpy as np\n'), ((3345, 3367), 'numpy.array', 'np.array', (['polarisation'], {}), '(polarisation)\n', (3353, 3367), True, 'import numpy as np\n'), ((3513, 3521), 'Trace.Photon', 'Photon', ([], {}), '()\n', (3519, 3521), False, 'from Trace import Photon\n'), ((3587, 3610), 'numpy.array', 'np.array', (['self.position'], {}), '(self.position)\n', (3595, 3610), True, 'import numpy as np\n'), ((3638, 3662), 'numpy.array', 'np.array', (['self.direction'], {}), '(self.direction)\n', (3646, 3662), True, 'import numpy as np\n'), ((4239, 4278), 'Geometry.FinitePlane', 'FinitePlane', ([], {'length': 'length', 'width': 'width'}), '(length=length, width=width)\n', (4250, 4278), False, 'from Geometry import Box, Cylinder, FinitePlane, transform_point, transform_direction, rotation_matrix_from_vector_alignment, norm\n'), ((4891, 4899), 'Trace.Photon', 'Photon', ([], {}), '()\n', (4897, 4899), False, 'from Trace import Photon\n'), ((5108, 5143), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', 'self.length'], {}), '(0.0, self.length)\n', (5125, 5143), True, 'import numpy as np\n'), ((5155, 5189), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', 'self.width'], {}), '(0.0, self.width)\n', (5172, 5189), True, 'import numpy as np\n'), ((5291, 5341), 'Geometry.transform_point', 'transform_point', (['local_point', 'self.plane.transform'], {}), '(local_point, self.plane.transform)\n', (5306, 5341), False, 'from Geometry import Box, Cylinder, FinitePlane, transform_point, transform_direction, rotation_matrix_from_vector_alignment, norm\n'), ((6264, 6283), 'numpy.array', 'np.array', (['linepoint'], {}), '(linepoint)\n', (6272, 6283), True, 'import numpy as np\n'), ((6313, 6336), 'numpy.array', 'np.array', (['linedirection'], {}), '(linedirection)\n', (6321, 6336), True, 'import numpy as np\n'), ((6490, 6498), 'Trace.Photon', 'Photon', ([], {}), '()\n', (6496, 6498), False, 'from Trace import Photon\n'), ((6646, 6705), 'numpy.random.uniform', 'np.random.uniform', (['self.planeorigin[0]', 'self.planeextent[0]'], {}), '(self.planeorigin[0], self.planeextent[0])\n', (6663, 6705), True, 'import numpy as np\n'), ((6717, 6776), 'numpy.random.uniform', 'np.random.uniform', (['self.planeorigin[1]', 'self.planeextent[1]'], {}), '(self.planeorigin[1], self.planeextent[1])\n', (6734, 6776), True, 'import numpy as np\n'), ((6788, 6847), 'numpy.random.uniform', 'np.random.uniform', (['self.planeorigin[2]', 'self.planeextent[2]'], {}), '(self.planeorigin[2], self.planeextent[2])\n', (6805, 6847), True, 'import numpy as np\n'), ((6873, 6892), 'numpy.array', 'np.array', (['(x, y, z)'], {}), '((x, y, z))\n', (6881, 6892), True, 'import numpy as np\n'), ((6949, 6974), 'numpy.array', 'np.array', (['(0.0, 0.0, 0.0)'], {}), '((0.0, 0.0, 0.0))\n', (6957, 6974), True, 'import numpy as np\n'), ((8392, 8411), 'numpy.array', 'np.array', (['linepoint'], {}), '(linepoint)\n', (8400, 8411), True, 'import numpy as np\n'), ((8441, 8464), 'numpy.array', 'np.array', (['linedirection'], {}), '(linedirection)\n', (8449, 8464), True, 'import numpy as np\n'), ((8658, 8666), 'Trace.Photon', 'Photon', ([], {}), '()\n', (8664, 8666), False, 'from Trace import Photon\n'), ((8784, 8843), 'numpy.random.uniform', 'np.random.uniform', (['self.planeorigin[0]', 'self.planeextent[0]'], {}), '(self.planeorigin[0], self.planeextent[0])\n', (8801, 8843), True, 'import numpy as np\n'), ((8855, 8914), 'numpy.random.uniform', 'np.random.uniform', (['self.planeorigin[1]', 'self.planeextent[1]'], {}), '(self.planeorigin[1], self.planeextent[1])\n', (8872, 8914), True, 'import numpy as np\n'), ((9056, 9075), 'numpy.array', 'np.array', (['(x, y, z)'], {}), '((x, y, z))\n', (9064, 9075), True, 'import numpy as np\n'), ((9132, 9157), 'numpy.array', 'np.array', (['(0.0, 0.0, 0.0)'], {}), '((0.0, 0.0, 0.0))\n', (9140, 9157), True, 'import numpy as np\n'), ((10215, 10253), 'Geometry.Cylinder', 'Cylinder', ([], {'radius': 'radius', 'length': 'length'}), '(radius=radius, length=length)\n', (10223, 10253), False, 'from Geometry import Box, Cylinder, FinitePlane, transform_point, transform_direction, rotation_matrix_from_vector_alignment, norm\n'), ((10673, 10681), 'Trace.Photon', 'Photon', ([], {}), '()\n', (10679, 10681), False, 'from Trace import Photon\n'), ((10843, 10876), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(2 * np.pi)'], {}), '(0.0, 2 * np.pi)\n', (10860, 10876), True, 'import numpy as np\n'), ((10886, 10921), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', 'self.radius'], {}), '(0.0, self.radius)\n', (10903, 10921), True, 'import numpy as np\n'), ((11002, 11037), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', 'self.length'], {}), '(0.0, self.length)\n', (11019, 11037), True, 'import numpy as np\n'), ((11102, 11153), 'Geometry.transform_point', 'transform_point', (['local_center', 'self.shape.transform'], {}), '(local_center, self.shape.transform)\n', (11117, 11153), False, 'from Geometry import Box, Cylinder, FinitePlane, transform_point, transform_direction, rotation_matrix_from_vector_alignment, norm\n'), ((11266, 11299), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(2 * np.pi)'], {}), '(0.0, 2 * np.pi)\n', (11283, 11299), True, 'import numpy as np\n'), ((11312, 11341), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', 'np.pi'], {}), '(0.0, np.pi)\n', (11329, 11341), True, 'import numpy as np\n'), ((11437, 11450), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (11443, 11450), True, 'import numpy as np\n'), ((12545, 12553), 'Trace.Photon', 'Photon', ([], {}), '()\n', (12551, 12553), False, 'from Trace import Photon\n'), ((12683, 12726), 'numpy.random.uniform', 'np.random.uniform', (['self.phimin', 'self.phimax'], {}), '(self.phimin, self.phimax)\n', (12700, 12726), True, 'import numpy as np\n'), ((12743, 12790), 'numpy.random.uniform', 'np.random.uniform', (['self.thetamin', 'self.thetamax'], {}), '(self.thetamin, self.thetamax)\n', (12760, 12790), True, 'import numpy as np\n'), ((12888, 12901), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (12894, 12901), True, 'import numpy as np\n'), ((12959, 12991), 'external.transformations.translation_matrix', 'tf.translation_matrix', (['(0, 0, 0)'], {}), '((0, 0, 0))\n', (12980, 12991), True, 'import external.transformations as tf\n'), ((13006, 13045), 'Geometry.transform_point', 'transform_point', (['self.center', 'transform'], {}), '(self.center, transform)\n', (13021, 13045), False, 'from Geometry import Box, Cylinder, FinitePlane, transform_point, transform_direction, rotation_matrix_from_vector_alignment, norm\n'), ((14082, 14090), 'Trace.Photon', 'Photon', ([], {}), '()\n', (14088, 14090), False, 'from Trace import Photon\n'), ((14232, 14270), 'numpy.random.randint', 'np.random.randint', (['(1)', '(self.spacing + 1)'], {}), '(1, self.spacing + 1)\n', (14249, 14270), True, 'import numpy as np\n'), ((14296, 14334), 'numpy.random.randint', 'np.random.randint', (['(1)', '(self.spacing + 1)'], {}), '(1, self.spacing + 1)\n', (14313, 14334), True, 'import numpy as np\n'), ((14669, 14682), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (14675, 14682), True, 'import numpy as np\n'), ((14740, 14772), 'external.transformations.translation_matrix', 'tf.translation_matrix', (['(0, 0, 0)'], {}), '((0, 0, 0))\n', (14761, 14772), True, 'import external.transformations as tf\n'), ((14787, 14826), 'Geometry.transform_point', 'transform_point', (['self.center', 'transform'], {}), '(self.center, transform)\n', (14802, 14826), False, 'from Geometry import Box, Cylinder, FinitePlane, transform_point, transform_direction, rotation_matrix_from_vector_alignment, norm\n'), ((2542, 2551), 'Geometry.norm', 'norm', (['vec'], {}), '(vec)\n', (2546, 2551), False, 'from Geometry import Box, Cylinder, FinitePlane, transform_point, transform_direction, rotation_matrix_from_vector_alignment, norm\n'), ((2568, 2632), 'Geometry.rotation_matrix_from_vector_alignment', 'rotation_matrix_from_vector_alignment', (['self.direction', '[0, 0, 1]'], {}), '(self.direction, [0, 0, 1])\n', (2605, 2632), False, 'from Geometry import Box, Cylinder, FinitePlane, transform_point, transform_direction, rotation_matrix_from_vector_alignment, norm\n'), ((2665, 2692), 'Geometry.transform_direction', 'transform_direction', (['vec', 'R'], {}), '(vec, R)\n', (2684, 2692), False, 'from Geometry import Box, Cylinder, FinitePlane, transform_point, transform_direction, rotation_matrix_from_vector_alignment, norm\n'), ((4702, 4736), 'external.transformations.translation_matrix', 'tf.translation_matrix', (['translation'], {}), '(translation)\n', (4723, 4736), True, 'import external.transformations as tf\n'), ((4814, 4845), 'external.transformations.rotation_matrix', 'tf.rotation_matrix', (['angle', 'axis'], {}), '(angle, axis)\n', (4832, 4845), True, 'import external.transformations as tf\n'), ((7022, 7072), 'numpy.random.uniform', 'np.random.uniform', (['(-self.focussize)', 'self.focussize'], {}), '(-self.focussize, self.focussize)\n', (7039, 7072), True, 'import numpy as np\n'), ((7116, 7166), 'numpy.random.uniform', 'np.random.uniform', (['(-self.focussize)', 'self.focussize'], {}), '(-self.focussize, self.focussize)\n', (7133, 7166), True, 'import numpy as np\n'), ((8932, 8950), 'numpy.tan', 'np.tan', (['self.angle'], {}), '(self.angle)\n', (8938, 8950), True, 'import numpy as np\n'), ((8963, 9022), 'numpy.random.uniform', 'np.random.uniform', (['self.planeorigin[2]', 'self.planeextent[2]'], {}), '(self.planeorigin[2], self.planeextent[2])\n', (8980, 9022), True, 'import numpy as np\n'), ((9205, 9255), 'numpy.random.uniform', 'np.random.uniform', (['(-self.focussize)', 'self.focussize'], {}), '(-self.focussize, self.focussize)\n', (9222, 9255), True, 'import numpy as np\n'), ((9299, 9349), 'numpy.random.uniform', 'np.random.uniform', (['(-self.focussize)', 'self.focussize'], {}), '(-self.focussize, self.focussize)\n', (9316, 9349), True, 'import numpy as np\n'), ((10492, 10526), 'external.transformations.translation_matrix', 'tf.translation_matrix', (['translation'], {}), '(translation)\n', (10513, 10526), True, 'import external.transformations as tf\n'), ((10600, 10631), 'external.transformations.rotation_matrix', 'tf.rotation_matrix', (['angle', 'axis'], {}), '(angle, axis)\n', (10618, 10631), True, 'import external.transformations as tf\n'), ((10951, 10962), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (10957, 10962), True, 'import numpy as np\n'), ((10977, 10988), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (10983, 10988), True, 'import numpy as np\n'), ((11361, 11372), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (11367, 11372), True, 'import numpy as np\n'), ((11373, 11386), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (11379, 11386), True, 'import numpy as np\n'), ((11399, 11410), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (11405, 11410), True, 'import numpy as np\n'), ((11411, 11424), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (11417, 11424), True, 'import numpy as np\n'), ((12812, 12823), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (12818, 12823), True, 'import numpy as np\n'), ((12824, 12837), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (12830, 12837), True, 'import numpy as np\n'), ((12850, 12861), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (12856, 12861), True, 'import numpy as np\n'), ((12862, 12875), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (12868, 12875), True, 'import numpy as np\n'), ((14586, 14597), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (14592, 14597), True, 'import numpy as np\n'), ((14598, 14611), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (14604, 14611), True, 'import numpy as np\n'), ((14630, 14641), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (14636, 14641), True, 'import numpy as np\n'), ((14642, 14655), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (14648, 14655), True, 'import numpy as np\n'), ((1120, 1139), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (1137, 1139), True, 'import numpy as np\n'), ((1163, 1182), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (1180, 1182), True, 'import numpy as np\n'), ((5519, 5538), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (5536, 5538), True, 'import numpy as np\n'), ((7522, 7541), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (7539, 7541), True, 'import numpy as np\n'), ((9712, 9731), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (9729, 9731), True, 'import numpy as np\n'), ((11687, 11706), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (11704, 11706), True, 'import numpy as np\n'), ((13239, 13258), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (13256, 13258), True, 'import numpy as np\n'), ((15020, 15039), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (15037, 15039), True, 'import numpy as np\n')]
|
import numpy as np
import sys
## ROCKSTAR ##
halostruct1 = np.dtype([('id',np.int64),
('pos',np.float32,(6,)),
('corevel',np.float32,(3,)),
('bulkvel',np.float32,(3,)),
('m',np.float32),
('r',np.float32),
('child_r',np.float32),
('vmax_r',np.float32),
('mgrav',np.float32),
('vmax',np.float32),
('rvmax',np.float32),
('rs',np.float32),
('klypin_rs',np.float32),
('vrms',np.float32),
('J',np.float32,(3,)),
('energy',np.float32),
('spin',np.float32),
('alt_m',np.float32,(4,)),
('Xoff',np.float32),
('Voff',np.float32),
('b_to_a',np.float32),
('c_to_a',np.float32),
('A',np.float32,(3,)),
('b_to_a2',np.float32),
('c_to_a2',np.float32),
('A2',np.float32,(3,)),
('bullock_spin',np.float32),
('kin_to_pot',np.float32),
('m_pe_b',np.float32),
('m_pe_d',np.float32),
('dummy1',np.float32), ## ALIGNMENT
('num_p',np.int64),
('num_child_particles',np.int64),
('p_start',np.int64),
('desc',np.int64),
('flags',np.int64),
('n_core',np.int64),
('dummy2',np.float32), ## ALIGNMENT
('min_pos_err',np.float32),
('min_vel_err',np.float32),
('min_bulkvel_err',np.float32)
])
halostruct2 = np.dtype([('id',np.int64),
('pos',np.float32,(6,)),
('corevel',np.float32,(3,)),
('bulkvel',np.float32,(3,)),
('m',np.float32),
('r',np.float32),
('child_r',np.float32),
('vmax_r',np.float32),
('mgrav',np.float32),
('vmax',np.float32),
('rvmax',np.float32),
('rs',np.float32),
('klypin_rs',np.float32),
('vrms',np.float32),
('J',np.float32,(3,)),
('energy',np.float32),
('spin',np.float32),
('alt_m',np.float32,(4,)),
('Xoff',np.float32),
('Voff',np.float32),
('b_to_a',np.float32),
('c_to_a',np.float32),
('A',np.float32,(3,)),
('b_to_a2',np.float32),
('c_to_a2',np.float32),
('A2',np.float32,(3,)),
('bullock_spin',np.float32),
('kin_to_pot',np.float32),
('m_pe_b',np.float32),
('m_pe_d',np.float32),
('halfmass_radius',np.float32),
#('dummy1',np.float32), ## ALIGNMENT
('num_p',np.int64),
('num_child_particles',np.int64),
('p_start',np.int64),
('desc',np.int64),
('flags',np.int64),
('n_core',np.int64),
('dummy2',np.float32), ## ALIGNMENT
('min_pos_err',np.float32),
('min_vel_err',np.float32),
('min_bulkvel_err',np.float32)
])
## ROCKSTAR-GALAXIES ##
halogalaxystruct1 = np.dtype([('id',np.int64),
('pos',np.float32,(6,)),
('corevel',np.float32,(3,)),
('bulkvel',np.float32,(3,)),
('m',np.float32),
('r',np.float32),
('child_r',np.float32),
('vmax_r',np.float32),
('mgrav',np.float32),
('vmax',np.float32),
('rvmax',np.float32),
('rs',np.float32),
('klypin_rs',np.float32),
('vrms',np.float32),
('J',np.float32,(3,)),
('energy',np.float32),
('spin',np.float32),
('alt_m',np.float32,(4,)),
('Xoff',np.float32),
('Voff',np.float32),
('b_to_a',np.float32),
('c_to_a',np.float32),
('A',np.float32,(3,)),
('b_to_a2',np.float32),
('c_to_a2',np.float32),
('A2',np.float32,(3,)),
('bullock_spin',np.float32),
('kin_to_pot',np.float32),
('m_pe_b',np.float32),
('m_pe_d',np.float32),
('dummy1',np.float32), ## ALIGNMENT
('num_p',np.int64),
('num_child_particles',np.int64),
('p_start',np.int64),
('desc',np.int64),
('flags',np.int64),
('n_core',np.int64),
('dummy2',np.float32), ## ALIGNMENT
('min_pos_err',np.float32),
('min_vel_err',np.float32),
('min_bulkvel_err',np.float32),
('type',np.int32),
('sm',np.float32),
('gas',np.float32),
('bh',np.float32),
('peak_density',np.float32),
('av_density',np.float32),
])
def getRSformat(obj):
if obj.galaxies == 0:
if obj.format_revision == 0:
print('OUTDATED ROCKSTAR, PLEASE UPDATE!')
sys.exit()
elif obj.format_revision == 1:
if obj.debug: print('returning halostruct1')
return halostruct1
elif obj.format_revision == 2:
if obj.debug: print('returning halostruct2')
return halostruct2
else:
print('found HALO_FORMAT_REVISION=%d, if this is >2 email me!' %
obj.format_revision)
sys.exit()
elif obj.galaxies == 1:
if obj.format_revision == 0:
print('OUTDATED ROCKSTAR-GALAXIES, PLEASE UPDATE!')
sys.exit()
elif obj.format_revision == 1:
if obj.debug: print('returning halogalaxystruct1')
return halogalaxystruct1
else:
print('found HALO_FORMAT_REVISION=%d, if this is >1 email me!' %
obj.format_revision)
sys.exit()
|
[
"numpy.dtype",
"sys.exit"
] |
[((60, 1145), 'numpy.dtype', 'np.dtype', (["[('id', np.int64), ('pos', np.float32, (6,)), ('corevel', np.float32, (3,)),\n ('bulkvel', np.float32, (3,)), ('m', np.float32), ('r', np.float32), (\n 'child_r', np.float32), ('vmax_r', np.float32), ('mgrav', np.float32),\n ('vmax', np.float32), ('rvmax', np.float32), ('rs', np.float32), (\n 'klypin_rs', np.float32), ('vrms', np.float32), ('J', np.float32, (3,)),\n ('energy', np.float32), ('spin', np.float32), ('alt_m', np.float32, (4,\n )), ('Xoff', np.float32), ('Voff', np.float32), ('b_to_a', np.float32),\n ('c_to_a', np.float32), ('A', np.float32, (3,)), ('b_to_a2', np.float32\n ), ('c_to_a2', np.float32), ('A2', np.float32, (3,)), ('bullock_spin',\n np.float32), ('kin_to_pot', np.float32), ('m_pe_b', np.float32), (\n 'm_pe_d', np.float32), ('dummy1', np.float32), ('num_p', np.int64), (\n 'num_child_particles', np.int64), ('p_start', np.int64), ('desc', np.\n int64), ('flags', np.int64), ('n_core', np.int64), ('dummy2', np.\n float32), ('min_pos_err', np.float32), ('min_vel_err', np.float32), (\n 'min_bulkvel_err', np.float32)]"], {}), "([('id', np.int64), ('pos', np.float32, (6,)), ('corevel', np.\n float32, (3,)), ('bulkvel', np.float32, (3,)), ('m', np.float32), ('r',\n np.float32), ('child_r', np.float32), ('vmax_r', np.float32), ('mgrav',\n np.float32), ('vmax', np.float32), ('rvmax', np.float32), ('rs', np.\n float32), ('klypin_rs', np.float32), ('vrms', np.float32), ('J', np.\n float32, (3,)), ('energy', np.float32), ('spin', np.float32), ('alt_m',\n np.float32, (4,)), ('Xoff', np.float32), ('Voff', np.float32), (\n 'b_to_a', np.float32), ('c_to_a', np.float32), ('A', np.float32, (3,)),\n ('b_to_a2', np.float32), ('c_to_a2', np.float32), ('A2', np.float32, (3\n ,)), ('bullock_spin', np.float32), ('kin_to_pot', np.float32), (\n 'm_pe_b', np.float32), ('m_pe_d', np.float32), ('dummy1', np.float32),\n ('num_p', np.int64), ('num_child_particles', np.int64), ('p_start', np.\n int64), ('desc', np.int64), ('flags', np.int64), ('n_core', np.int64),\n ('dummy2', np.float32), ('min_pos_err', np.float32), ('min_vel_err', np\n .float32), ('min_bulkvel_err', np.float32)])\n", (68, 1145), True, 'import numpy as np\n'), ((2082, 3178), 'numpy.dtype', 'np.dtype', (["[('id', np.int64), ('pos', np.float32, (6,)), ('corevel', np.float32, (3,)),\n ('bulkvel', np.float32, (3,)), ('m', np.float32), ('r', np.float32), (\n 'child_r', np.float32), ('vmax_r', np.float32), ('mgrav', np.float32),\n ('vmax', np.float32), ('rvmax', np.float32), ('rs', np.float32), (\n 'klypin_rs', np.float32), ('vrms', np.float32), ('J', np.float32, (3,)),\n ('energy', np.float32), ('spin', np.float32), ('alt_m', np.float32, (4,\n )), ('Xoff', np.float32), ('Voff', np.float32), ('b_to_a', np.float32),\n ('c_to_a', np.float32), ('A', np.float32, (3,)), ('b_to_a2', np.float32\n ), ('c_to_a2', np.float32), ('A2', np.float32, (3,)), ('bullock_spin',\n np.float32), ('kin_to_pot', np.float32), ('m_pe_b', np.float32), (\n 'm_pe_d', np.float32), ('halfmass_radius', np.float32), ('num_p', np.\n int64), ('num_child_particles', np.int64), ('p_start', np.int64), (\n 'desc', np.int64), ('flags', np.int64), ('n_core', np.int64), ('dummy2',\n np.float32), ('min_pos_err', np.float32), ('min_vel_err', np.float32),\n ('min_bulkvel_err', np.float32)]"], {}), "([('id', np.int64), ('pos', np.float32, (6,)), ('corevel', np.\n float32, (3,)), ('bulkvel', np.float32, (3,)), ('m', np.float32), ('r',\n np.float32), ('child_r', np.float32), ('vmax_r', np.float32), ('mgrav',\n np.float32), ('vmax', np.float32), ('rvmax', np.float32), ('rs', np.\n float32), ('klypin_rs', np.float32), ('vrms', np.float32), ('J', np.\n float32, (3,)), ('energy', np.float32), ('spin', np.float32), ('alt_m',\n np.float32, (4,)), ('Xoff', np.float32), ('Voff', np.float32), (\n 'b_to_a', np.float32), ('c_to_a', np.float32), ('A', np.float32, (3,)),\n ('b_to_a2', np.float32), ('c_to_a2', np.float32), ('A2', np.float32, (3\n ,)), ('bullock_spin', np.float32), ('kin_to_pot', np.float32), (\n 'm_pe_b', np.float32), ('m_pe_d', np.float32), ('halfmass_radius', np.\n float32), ('num_p', np.int64), ('num_child_particles', np.int64), (\n 'p_start', np.int64), ('desc', np.int64), ('flags', np.int64), (\n 'n_core', np.int64), ('dummy2', np.float32), ('min_pos_err', np.float32\n ), ('min_vel_err', np.float32), ('min_bulkvel_err', np.float32)])\n", (2090, 3178), True, 'import numpy as np\n'), ((4191, 5423), 'numpy.dtype', 'np.dtype', (["[('id', np.int64), ('pos', np.float32, (6,)), ('corevel', np.float32, (3,)),\n ('bulkvel', np.float32, (3,)), ('m', np.float32), ('r', np.float32), (\n 'child_r', np.float32), ('vmax_r', np.float32), ('mgrav', np.float32),\n ('vmax', np.float32), ('rvmax', np.float32), ('rs', np.float32), (\n 'klypin_rs', np.float32), ('vrms', np.float32), ('J', np.float32, (3,)),\n ('energy', np.float32), ('spin', np.float32), ('alt_m', np.float32, (4,\n )), ('Xoff', np.float32), ('Voff', np.float32), ('b_to_a', np.float32),\n ('c_to_a', np.float32), ('A', np.float32, (3,)), ('b_to_a2', np.float32\n ), ('c_to_a2', np.float32), ('A2', np.float32, (3,)), ('bullock_spin',\n np.float32), ('kin_to_pot', np.float32), ('m_pe_b', np.float32), (\n 'm_pe_d', np.float32), ('dummy1', np.float32), ('num_p', np.int64), (\n 'num_child_particles', np.int64), ('p_start', np.int64), ('desc', np.\n int64), ('flags', np.int64), ('n_core', np.int64), ('dummy2', np.\n float32), ('min_pos_err', np.float32), ('min_vel_err', np.float32), (\n 'min_bulkvel_err', np.float32), ('type', np.int32), ('sm', np.float32),\n ('gas', np.float32), ('bh', np.float32), ('peak_density', np.float32),\n ('av_density', np.float32)]"], {}), "([('id', np.int64), ('pos', np.float32, (6,)), ('corevel', np.\n float32, (3,)), ('bulkvel', np.float32, (3,)), ('m', np.float32), ('r',\n np.float32), ('child_r', np.float32), ('vmax_r', np.float32), ('mgrav',\n np.float32), ('vmax', np.float32), ('rvmax', np.float32), ('rs', np.\n float32), ('klypin_rs', np.float32), ('vrms', np.float32), ('J', np.\n float32, (3,)), ('energy', np.float32), ('spin', np.float32), ('alt_m',\n np.float32, (4,)), ('Xoff', np.float32), ('Voff', np.float32), (\n 'b_to_a', np.float32), ('c_to_a', np.float32), ('A', np.float32, (3,)),\n ('b_to_a2', np.float32), ('c_to_a2', np.float32), ('A2', np.float32, (3\n ,)), ('bullock_spin', np.float32), ('kin_to_pot', np.float32), (\n 'm_pe_b', np.float32), ('m_pe_d', np.float32), ('dummy1', np.float32),\n ('num_p', np.int64), ('num_child_particles', np.int64), ('p_start', np.\n int64), ('desc', np.int64), ('flags', np.int64), ('n_core', np.int64),\n ('dummy2', np.float32), ('min_pos_err', np.float32), ('min_vel_err', np\n .float32), ('min_bulkvel_err', np.float32), ('type', np.int32), ('sm',\n np.float32), ('gas', np.float32), ('bh', np.float32), ('peak_density',\n np.float32), ('av_density', np.float32)])\n", (4199, 5423), True, 'import numpy as np\n'), ((6913, 6923), 'sys.exit', 'sys.exit', ([], {}), '()\n', (6921, 6923), False, 'import sys\n'), ((7489, 7499), 'sys.exit', 'sys.exit', ([], {}), '()\n', (7497, 7499), False, 'import sys\n'), ((7336, 7346), 'sys.exit', 'sys.exit', ([], {}), '()\n', (7344, 7346), False, 'import sys\n'), ((7784, 7794), 'sys.exit', 'sys.exit', ([], {}), '()\n', (7792, 7794), False, 'import sys\n')]
|
# -*- coding: utf-8 -*-
# Copyright (c) 2013 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import numpy
import scipy
import unittest
import time
from nearpy import Engine
from nearpy.distances import CosineDistance
from nearpy.hashes import RandomBinaryProjections, HashPermutations, HashPermutationMapper
def example2():
# Dimension of feature space
DIM = 100
# Number of data points (dont do too much because of exact search)
POINTS = 20000
##########################################################
print('Performing indexing with HashPermutations...')
t0 = time.time()
# Create permutations meta-hash
permutations = HashPermutations('permut')
# Create binary hash as child hash
rbp_perm = RandomBinaryProjections('rbp_perm', 14)
rbp_conf = {'num_permutation':50,'beam_size':10,'num_neighbour':100}
# Add rbp as child hash of permutations hash
permutations.add_child_hash(rbp_perm, rbp_conf)
# Create engine
engine_perm = Engine(DIM, lshashes=[permutations], distance=CosineDistance())
# First index some random vectors
matrix = numpy.zeros((POINTS,DIM))
for i in range(POINTS):
v = numpy.random.randn(DIM)
matrix[i] = v
engine_perm.store_vector(v)
# Then update permuted index
permutations.build_permuted_index()
t1 = time.time()
print('Indexing took %f seconds' % (t1-t0))
# Get random query vector
query = numpy.random.randn(DIM)
# Do random query on engine 3
print('\nNeighbour distances with HashPermutations:')
print(' -> Candidate count is %d' % engine_perm.candidate_count(query))
results = engine_perm.neighbours(query)
dists = [x[2] for x in results]
print(dists)
# Real neighbours
print('\nReal neighbour distances:')
query = query.reshape((DIM))
dists = CosineDistance().distance(matrix, query)
dists = dists.reshape((-1,))
dists = sorted(dists)
print(dists[:10])
##########################################################
print('\nPerforming indexing with HashPermutationMapper...')
t0 = time.time()
# Create permutations meta-hash
permutations2 = HashPermutationMapper('permut2')
# Create binary hash as child hash
rbp_perm2 = RandomBinaryProjections('rbp_perm2', 14)
# Add rbp as child hash of permutations hash
permutations2.add_child_hash(rbp_perm2)
# Create engine
engine_perm2 = Engine(DIM, lshashes=[permutations2], distance=CosineDistance())
# First index some random vectors
matrix = numpy.zeros((POINTS,DIM))
for i in range(POINTS):
v = numpy.random.randn(DIM)
matrix[i] = v
engine_perm2.store_vector(v)
t1 = time.time()
print('Indexing took %f seconds' % (t1-t0))
# Get random query vector
query = numpy.random.randn(DIM)
# Do random query on engine 4
print('\nNeighbour distances with HashPermutationMapper:')
print(' -> Candidate count is %d' % engine_perm2.candidate_count(query))
results = engine_perm2.neighbours(query)
dists = [x[2] for x in results]
print(dists)
# Real neighbours
print('\nReal neighbour distances:')
query = query.reshape((DIM))
dists = CosineDistance().distance(matrix,query)
dists = dists.reshape((-1,))
dists = sorted(dists)
print(dists[:10])
##########################################################
print('\nPerforming indexing with multiple binary hashes...')
t0 = time.time()
hashes = []
for k in range(20):
hashes.append(RandomBinaryProjections('rbp_%d' % k, 10))
# Create engine
engine_rbps = Engine(DIM, lshashes=hashes, distance=CosineDistance())
# First index some random vectors
matrix = numpy.zeros((POINTS,DIM))
for i in range(POINTS):
v = numpy.random.randn(DIM)
matrix[i] = v
engine_rbps.store_vector(v)
t1 = time.time()
print('Indexing took %f seconds' % (t1-t0))
# Get random query vector
query = numpy.random.randn(DIM)
# Do random query on engine 4
print('\nNeighbour distances with multiple binary hashes:')
print(' -> Candidate count is %d' % engine_rbps.candidate_count(query))
results = engine_rbps.neighbours(query)
dists = [x[2] for x in results]
print(dists)
# Real neighbours
print('\nReal neighbour distances:')
query = query.reshape((DIM))
dists = CosineDistance().distance(matrix,query)
dists = dists.reshape((-1,))
dists = sorted(dists)
print(dists[:10])
##########################################################
|
[
"nearpy.hashes.RandomBinaryProjections",
"nearpy.hashes.HashPermutationMapper",
"numpy.zeros",
"nearpy.distances.CosineDistance",
"time.time",
"numpy.random.randn",
"nearpy.hashes.HashPermutations"
] |
[((1613, 1624), 'time.time', 'time.time', ([], {}), '()\n', (1622, 1624), False, 'import time\n'), ((1681, 1707), 'nearpy.hashes.HashPermutations', 'HashPermutations', (['"""permut"""'], {}), "('permut')\n", (1697, 1707), False, 'from nearpy.hashes import RandomBinaryProjections, HashPermutations, HashPermutationMapper\n'), ((1763, 1802), 'nearpy.hashes.RandomBinaryProjections', 'RandomBinaryProjections', (['"""rbp_perm"""', '(14)'], {}), "('rbp_perm', 14)\n", (1786, 1802), False, 'from nearpy.hashes import RandomBinaryProjections, HashPermutations, HashPermutationMapper\n'), ((2133, 2159), 'numpy.zeros', 'numpy.zeros', (['(POINTS, DIM)'], {}), '((POINTS, DIM))\n', (2144, 2159), False, 'import numpy\n'), ((2365, 2376), 'time.time', 'time.time', ([], {}), '()\n', (2374, 2376), False, 'import time\n'), ((2468, 2491), 'numpy.random.randn', 'numpy.random.randn', (['DIM'], {}), '(DIM)\n', (2486, 2491), False, 'import numpy\n'), ((3129, 3140), 'time.time', 'time.time', ([], {}), '()\n', (3138, 3140), False, 'import time\n'), ((3198, 3230), 'nearpy.hashes.HashPermutationMapper', 'HashPermutationMapper', (['"""permut2"""'], {}), "('permut2')\n", (3219, 3230), False, 'from nearpy.hashes import RandomBinaryProjections, HashPermutations, HashPermutationMapper\n'), ((3287, 3327), 'nearpy.hashes.RandomBinaryProjections', 'RandomBinaryProjections', (['"""rbp_perm2"""', '(14)'], {}), "('rbp_perm2', 14)\n", (3310, 3327), False, 'from nearpy.hashes import RandomBinaryProjections, HashPermutations, HashPermutationMapper\n'), ((3579, 3605), 'numpy.zeros', 'numpy.zeros', (['(POINTS, DIM)'], {}), '((POINTS, DIM))\n', (3590, 3605), False, 'import numpy\n'), ((3738, 3749), 'time.time', 'time.time', ([], {}), '()\n', (3747, 3749), False, 'import time\n'), ((3841, 3864), 'numpy.random.randn', 'numpy.random.randn', (['DIM'], {}), '(DIM)\n', (3859, 3864), False, 'import numpy\n'), ((4509, 4520), 'time.time', 'time.time', ([], {}), '()\n', (4518, 4520), False, 'import time\n'), ((4774, 4800), 'numpy.zeros', 'numpy.zeros', (['(POINTS, DIM)'], {}), '((POINTS, DIM))\n', (4785, 4800), False, 'import numpy\n'), ((4932, 4943), 'time.time', 'time.time', ([], {}), '()\n', (4941, 4943), False, 'import time\n'), ((5035, 5058), 'numpy.random.randn', 'numpy.random.randn', (['DIM'], {}), '(DIM)\n', (5053, 5058), False, 'import numpy\n'), ((2199, 2222), 'numpy.random.randn', 'numpy.random.randn', (['DIM'], {}), '(DIM)\n', (2217, 2222), False, 'import numpy\n'), ((3645, 3668), 'numpy.random.randn', 'numpy.random.randn', (['DIM'], {}), '(DIM)\n', (3663, 3668), False, 'import numpy\n'), ((4840, 4863), 'numpy.random.randn', 'numpy.random.randn', (['DIM'], {}), '(DIM)\n', (4858, 4863), False, 'import numpy\n'), ((2063, 2079), 'nearpy.distances.CosineDistance', 'CosineDistance', ([], {}), '()\n', (2077, 2079), False, 'from nearpy.distances import CosineDistance\n'), ((2868, 2884), 'nearpy.distances.CosineDistance', 'CosineDistance', ([], {}), '()\n', (2882, 2884), False, 'from nearpy.distances import CosineDistance\n'), ((3509, 3525), 'nearpy.distances.CosineDistance', 'CosineDistance', ([], {}), '()\n', (3523, 3525), False, 'from nearpy.distances import CosineDistance\n'), ((4248, 4264), 'nearpy.distances.CosineDistance', 'CosineDistance', ([], {}), '()\n', (4262, 4264), False, 'from nearpy.distances import CosineDistance\n'), ((4584, 4625), 'nearpy.hashes.RandomBinaryProjections', 'RandomBinaryProjections', (["('rbp_%d' % k)", '(10)'], {}), "('rbp_%d' % k, 10)\n", (4607, 4625), False, 'from nearpy.hashes import RandomBinaryProjections, HashPermutations, HashPermutationMapper\n'), ((4704, 4720), 'nearpy.distances.CosineDistance', 'CosineDistance', ([], {}), '()\n', (4718, 4720), False, 'from nearpy.distances import CosineDistance\n'), ((5441, 5457), 'nearpy.distances.CosineDistance', 'CosineDistance', ([], {}), '()\n', (5455, 5457), False, 'from nearpy.distances import CosineDistance\n')]
|
import matplotlib.pyplot as plt
import numpy as np
from fears.utils import results_manager, plotter, dir_manager
import os
suffix = '07212021_0001'
data_folder = 'results_' + suffix
exp_info_file = 'experiment_info_' + suffix + '.p'
exp_folders,exp_info = results_manager.get_experiment_results(data_folder,
exp_info_file)
max_cells = exp_info.populations[0].max_cells
n_sims = exp_info.n_sims
k_abs = exp_info.slopes
exp_folders.reverse()
k_abs = np.flip(k_abs)
fig,ax = plt.subplots(nrows=2,ncols=2,figsize=(4,4))
pop = exp_info.populations[0]
ax = ax.reshape((len(k_abs),))
axnum = 0
tc_axes=[]
drug_axes=[]
for exp in exp_folders:
k_abs_t = exp[exp.find('=')+1:]
k_abs_t = float(k_abs_t)
num = np.argwhere(k_abs == k_abs_t)
num = num[0,0]
# generate timecourse axes
tcax = ax[axnum]
# da = tcax.twinx()
sim_files = os.listdir(path=exp)
sim_files = sorted(sim_files)
survive_count = 0
counts_total = None
k=0
while k < len(sim_files):
# for sim in sim_files:
sim = sim_files[k]
sim = exp + os.sep + sim
data = results_manager.get_data(sim)
dc = data[:,-1]
data = data[:,0:-1]
# data = data/np.max(data)
data_t = data[-1,:]
# check to see if any genotypes are at least 10% of the max cell count
if any(data_t >= 1):
survive_count += 1
if counts_total is None:
counts_total = data
else:
counts_total += data
# data = data/np.max(data)
# exp_info.populations[num].counts_log_scale = True
data = data/max_cells
if k==0:
drug_kwargs = {'alpha':0.7,
'color':'black',
'linewidth':2,
'label':'Drug Concentration ($\u03BC$M)'
}
tcax,drug_ax = plotter.plot_timecourse_to_axes(exp_info.populations[num],
data,
tcax,
drug_curve=dc,
drug_ax_sci_notation=True,
drug_kwargs=drug_kwargs,
legend_labels=False,
grayscale=True,
color='gray',
linewidth=1,
labelsize=12,
alpha=0.7
)
drug_ax.set_ylabel('')
drug_axes.append( drug_ax )
else:
tcax,da = plotter.plot_timecourse_to_axes(exp_info.populations[num],
data,
tcax,
grayscale=True,
color='gray',
legend_labels=False,
linewidth=2,
labelsize=12,
alpha=0.2
)
# drug_ax.set_ylim(0,10**4)
k+=1
if survive_count > 0:
counts_avg = counts_total/survive_count
# counts_avg = counts_avg/np.max(counts_avg)
# counts_avg = counts_total
counts_avg = counts_avg/np.max(counts_avg)
tcax,temp = plotter.plot_timecourse_to_axes(exp_info.populations[num],
counts_avg,
tcax,
labelsize=12)
# t = np.arange(len(dc))
# t = t*exp_info.populations[0].timestep_scale/24
# da.plot(t,dc)
tc_axes.append( tcax )
axnum+=1
|
[
"numpy.flip",
"fears.utils.results_manager.get_data",
"os.listdir",
"fears.utils.results_manager.get_experiment_results",
"numpy.max",
"fears.utils.plotter.plot_timecourse_to_axes",
"numpy.argwhere",
"matplotlib.pyplot.subplots"
] |
[((258, 324), 'fears.utils.results_manager.get_experiment_results', 'results_manager.get_experiment_results', (['data_folder', 'exp_info_file'], {}), '(data_folder, exp_info_file)\n', (296, 324), False, 'from fears.utils import results_manager, plotter, dir_manager\n'), ((512, 526), 'numpy.flip', 'np.flip', (['k_abs'], {}), '(k_abs)\n', (519, 526), True, 'import numpy as np\n'), ((537, 583), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(2)', 'figsize': '(4, 4)'}), '(nrows=2, ncols=2, figsize=(4, 4))\n', (549, 583), True, 'import matplotlib.pyplot as plt\n'), ((779, 808), 'numpy.argwhere', 'np.argwhere', (['(k_abs == k_abs_t)'], {}), '(k_abs == k_abs_t)\n', (790, 808), True, 'import numpy as np\n'), ((935, 955), 'os.listdir', 'os.listdir', ([], {'path': 'exp'}), '(path=exp)\n', (945, 955), False, 'import os\n'), ((1182, 1211), 'fears.utils.results_manager.get_data', 'results_manager.get_data', (['sim'], {}), '(sim)\n', (1206, 1211), False, 'from fears.utils import results_manager, plotter, dir_manager\n'), ((3986, 4080), 'fears.utils.plotter.plot_timecourse_to_axes', 'plotter.plot_timecourse_to_axes', (['exp_info.populations[num]', 'counts_avg', 'tcax'], {'labelsize': '(12)'}), '(exp_info.populations[num], counts_avg, tcax,\n labelsize=12)\n', (4017, 4080), False, 'from fears.utils import results_manager, plotter, dir_manager\n'), ((2004, 2242), 'fears.utils.plotter.plot_timecourse_to_axes', 'plotter.plot_timecourse_to_axes', (['exp_info.populations[num]', 'data', 'tcax'], {'drug_curve': 'dc', 'drug_ax_sci_notation': '(True)', 'drug_kwargs': 'drug_kwargs', 'legend_labels': '(False)', 'grayscale': '(True)', 'color': '"""gray"""', 'linewidth': '(1)', 'labelsize': '(12)', 'alpha': '(0.7)'}), "(exp_info.populations[num], data, tcax,\n drug_curve=dc, drug_ax_sci_notation=True, drug_kwargs=drug_kwargs,\n legend_labels=False, grayscale=True, color='gray', linewidth=1,\n labelsize=12, alpha=0.7)\n", (2035, 2242), False, 'from fears.utils import results_manager, plotter, dir_manager\n'), ((3016, 3183), 'fears.utils.plotter.plot_timecourse_to_axes', 'plotter.plot_timecourse_to_axes', (['exp_info.populations[num]', 'data', 'tcax'], {'grayscale': '(True)', 'color': '"""gray"""', 'legend_labels': '(False)', 'linewidth': '(2)', 'labelsize': '(12)', 'alpha': '(0.2)'}), "(exp_info.populations[num], data, tcax,\n grayscale=True, color='gray', legend_labels=False, linewidth=2,\n labelsize=12, alpha=0.2)\n", (3047, 3183), False, 'from fears.utils import results_manager, plotter, dir_manager\n'), ((3947, 3965), 'numpy.max', 'np.max', (['counts_avg'], {}), '(counts_avg)\n', (3953, 3965), True, 'import numpy as np\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import ray
from ray.rllib.ddpg2.models import DDPGModel
from ray.rllib.models.catalog import ModelCatalog
from ray.rllib.optimizers import PolicyEvaluator
from ray.rllib.utils.filter import NoFilter
from ray.rllib.utils.process_rollout import process_rollout
from ray.rllib.utils.sampler import SyncSampler
class DDPGEvaluator(PolicyEvaluator):
def __init__(self, registry, env_creator, config):
self.env = ModelCatalog.get_preprocessor_as_wrapper(
registry, env_creator(config["env_config"]))
# contains model, target_model
self.model = DDPGModel(registry, self.env, config)
self.sampler = SyncSampler(
self.env, self.model.model, NoFilter(),
config["num_local_steps"], horizon=config["horizon"])
def sample(self):
"""Returns a batch of samples."""
rollout = self.sampler.get_data()
rollout.data["weights"] = np.ones_like(rollout.data["rewards"])
# since each sample is one step, no discounting needs to be applied;
# this does not involve config["gamma"]
samples = process_rollout(
rollout, NoFilter(),
gamma=1.0, use_gae=False)
return samples
def update_target(self):
"""Updates target critic and target actor."""
self.model.update_target()
def compute_gradients(self, samples):
"""Returns critic, actor gradients."""
return self.model.compute_gradients(samples)
def apply_gradients(self, grads):
"""Applies gradients to evaluator weights."""
self.model.apply_gradients(grads)
def compute_apply(self, samples):
grads, _ = self.compute_gradients(samples)
self.apply_gradients(grads)
def get_weights(self):
"""Returns model weights."""
return self.model.get_weights()
def set_weights(self, weights):
"""Sets model weights."""
self.model.set_weights(weights)
def get_completed_rollout_metrics(self):
"""Returns metrics on previously completed rollouts.
Calling this clears the queue of completed rollout metrics.
"""
return self.sampler.get_metrics()
RemoteDDPGEvaluator = ray.remote(DDPGEvaluator)
|
[
"numpy.ones_like",
"ray.remote",
"ray.rllib.utils.filter.NoFilter",
"ray.rllib.ddpg2.models.DDPGModel"
] |
[((2374, 2399), 'ray.remote', 'ray.remote', (['DDPGEvaluator'], {}), '(DDPGEvaluator)\n', (2384, 2399), False, 'import ray\n'), ((712, 749), 'ray.rllib.ddpg2.models.DDPGModel', 'DDPGModel', (['registry', 'self.env', 'config'], {}), '(registry, self.env, config)\n', (721, 749), False, 'from ray.rllib.ddpg2.models import DDPGModel\n'), ((1071, 1108), 'numpy.ones_like', 'np.ones_like', (["rollout.data['rewards']"], {}), "(rollout.data['rewards'])\n", (1083, 1108), True, 'import numpy as np\n'), ((839, 849), 'ray.rllib.utils.filter.NoFilter', 'NoFilter', ([], {}), '()\n', (847, 849), False, 'from ray.rllib.utils.filter import NoFilter\n'), ((1299, 1309), 'ray.rllib.utils.filter.NoFilter', 'NoFilter', ([], {}), '()\n', (1307, 1309), False, 'from ray.rllib.utils.filter import NoFilter\n')]
|
import numpy as np
from defdap.quat import Quat
hex_syms = Quat.symEqv("hexagonal")
# subset of hexagonal symmetries that give unique orientations when the
# Burgers transformation is applied
unq_hex_syms = [
hex_syms[0],
hex_syms[5],
hex_syms[4],
hex_syms[2],
hex_syms[10],
hex_syms[11]
]
cubic_syms = Quat.symEqv("cubic")
# subset of cubic symmetries that give unique orientations when the
# Burgers transformation is applied
unq_cub_syms = [
cubic_syms[0],
cubic_syms[7],
cubic_syms[9],
cubic_syms[1],
cubic_syms[22],
cubic_syms[16],
cubic_syms[12],
cubic_syms[15],
cubic_syms[4],
cubic_syms[8],
cubic_syms[21],
cubic_syms[20]
]
# HCP -> BCC
burg_eulers = np.array([135, 90, 354.74]) * np.pi / 180
burg_trans = Quat.fromEulerAngles(*burg_eulers).conjugate
|
[
"numpy.array",
"defdap.quat.Quat.symEqv",
"defdap.quat.Quat.fromEulerAngles"
] |
[((60, 84), 'defdap.quat.Quat.symEqv', 'Quat.symEqv', (['"""hexagonal"""'], {}), "('hexagonal')\n", (71, 84), False, 'from defdap.quat import Quat\n'), ((329, 349), 'defdap.quat.Quat.symEqv', 'Quat.symEqv', (['"""cubic"""'], {}), "('cubic')\n", (340, 349), False, 'from defdap.quat import Quat\n'), ((789, 823), 'defdap.quat.Quat.fromEulerAngles', 'Quat.fromEulerAngles', (['*burg_eulers'], {}), '(*burg_eulers)\n', (809, 823), False, 'from defdap.quat import Quat\n'), ((734, 761), 'numpy.array', 'np.array', (['[135, 90, 354.74]'], {}), '([135, 90, 354.74])\n', (742, 761), True, 'import numpy as np\n')]
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
import matplotlib.pyplot as plt
import CurveFit
import shutil
#find all DIRECTORIES containing non-hidden files ending in FILENAME
def getDataDirectories(DIRECTORY, FILENAME="valLoss.txt"):
directories=[]
for directory in os.scandir(DIRECTORY):
for item in os.scandir(directory):
if item.name.endswith(FILENAME) and not item.name.startswith("."):
directories.append(directory.path)
return directories
#get all non-hidden data files in DIRECTORY with extension EXT
def getDataFiles(DIRECTORY, EXT='txt'):
datafiles=[]
for item in os.scandir(DIRECTORY):
if item.name.endswith("."+EXT) and not item.name.startswith("."):
datafiles.append(item.path)
return datafiles
#checking if loss ever doesn't decrease for numEpochs epochs in a row.
def stopsDecreasing(loss, epoch, numEpochs):
minLoss=np.inf
epochMin=0
for i in range(0,loss.size):
if loss[i] < minLoss:
minLoss=loss[i]
epochMin=epoch[i]
elif (epoch[i]-epochMin) >= numEpochs:
return i, minLoss
return i, minLoss
#dirpath is where the accuracy and loss files are stored. want to move the files into the same format expected by grabNNData.
def createFolders(SEARCHDIR, SAVEDIR):
for item in os.scandir(SEARCHDIR):
name=str(item.name)
files=name.split('-')
SAVEFULLDIR=SAVEDIR+str(files[0])
if not os.path.exists(SAVEFULLDIR):
try:
os.makedirs(SAVEFULLDIR)
except FileExistsError:
#directory already exists--must have been created between the if statement & our attempt at making directory
pass
shutil.move(item.path, SAVEFULLDIR+"/"+str(files[1]))
#a function to read in information (e.g. accuracy, loss) stored at FILENAME
def grabNNData(FILENAME, header='infer', sep=' '):
data = pd.read_csv(FILENAME, sep, header=header)
if ('epochs' in data.columns) and ('trainLoss' in data.columns) and ('valLoss' in data.columns) and ('valAcc' in data.columns) and ('batch_size' in data.columns) and ('learning_rate' in data.columns):
sortedData=data.sort_values(by="epochs", axis=0, ascending=True)
epoch=np.array(sortedData['epochs'])
trainLoss=np.array(sortedData['trainLoss'])
valLoss=np.array(sortedData['valLoss'])
valAcc=np.array(sortedData['valAcc'])
batch_size=np.array(sortedData['batch_size'])
learning_rate=np.array(sortedData['learning_rate'])
convKers=np.array(sortedData['convKernels'])
return(epoch, trainLoss, valLoss, valAcc, batch_size, learning_rate, convKers)
elif ('epochs' in data.columns) and ('trainLoss' in data.columns) and ('valLoss' in data.columns) and ('valAcc' in data.columns):
sortedData=data.sort_values(by="epochs", axis=0, ascending=True)
epoch=np.array(sortedData['epochs'])
trainLoss=np.array(sortedData['trainLoss'])
valLoss=np.array(sortedData['valLoss'])
valAcc=np.array(sortedData['valAcc'])
else:
print("Missing a column in NN datafile")
raise Exception('NN datafile is missing one of the expected columns: epochs trainLoss valLoss valAcc [optional extra columns: batch_size, learning_rate]')
#slice data could be used to test values of E other than E=0.5, which we use by default
def sliceData(xsize, x, y, z=None, w=None):
#we can slice the data to sample less often, but not more often. We verify that we're not being asked for a granularity that is smaller than the frequency of datapoints in the vectors.
if x[0] > xsize:
return x,y,z,w
else:
result=(1.0/x[0])*xsize
#result is how often we should take datapoints if we wish to consider values every xsize
x=x[int(result-1)::int(result)]
y=y[int(result-1)::int(result)]
if z is not None:
z=z[int(result-1)::int(result)]
if w is None:
return x,y,z
else:
return x,y
#if we get to this point in function, it means z and w are both not None.
w=w[int(result-1)::int(result)]
return x,y,z,w
|
[
"os.path.exists",
"os.makedirs",
"pandas.read_csv",
"os.scandir",
"numpy.array"
] |
[((312, 333), 'os.scandir', 'os.scandir', (['DIRECTORY'], {}), '(DIRECTORY)\n', (322, 333), False, 'import os\n'), ((668, 689), 'os.scandir', 'os.scandir', (['DIRECTORY'], {}), '(DIRECTORY)\n', (678, 689), False, 'import os\n'), ((1388, 1409), 'os.scandir', 'os.scandir', (['SEARCHDIR'], {}), '(SEARCHDIR)\n', (1398, 1409), False, 'import os\n'), ((2001, 2042), 'pandas.read_csv', 'pd.read_csv', (['FILENAME', 'sep'], {'header': 'header'}), '(FILENAME, sep, header=header)\n', (2012, 2042), True, 'import pandas as pd\n'), ((355, 376), 'os.scandir', 'os.scandir', (['directory'], {}), '(directory)\n', (365, 376), False, 'import os\n'), ((2338, 2368), 'numpy.array', 'np.array', (["sortedData['epochs']"], {}), "(sortedData['epochs'])\n", (2346, 2368), True, 'import numpy as np\n'), ((2387, 2420), 'numpy.array', 'np.array', (["sortedData['trainLoss']"], {}), "(sortedData['trainLoss'])\n", (2395, 2420), True, 'import numpy as np\n'), ((2437, 2468), 'numpy.array', 'np.array', (["sortedData['valLoss']"], {}), "(sortedData['valLoss'])\n", (2445, 2468), True, 'import numpy as np\n'), ((2484, 2514), 'numpy.array', 'np.array', (["sortedData['valAcc']"], {}), "(sortedData['valAcc'])\n", (2492, 2514), True, 'import numpy as np\n'), ((2535, 2569), 'numpy.array', 'np.array', (["sortedData['batch_size']"], {}), "(sortedData['batch_size'])\n", (2543, 2569), True, 'import numpy as np\n'), ((2592, 2629), 'numpy.array', 'np.array', (["sortedData['learning_rate']"], {}), "(sortedData['learning_rate'])\n", (2600, 2629), True, 'import numpy as np\n'), ((2648, 2683), 'numpy.array', 'np.array', (["sortedData['convKernels']"], {}), "(sortedData['convKernels'])\n", (2656, 2683), True, 'import numpy as np\n'), ((1526, 1553), 'os.path.exists', 'os.path.exists', (['SAVEFULLDIR'], {}), '(SAVEFULLDIR)\n', (1540, 1553), False, 'import os\n'), ((3016, 3046), 'numpy.array', 'np.array', (["sortedData['epochs']"], {}), "(sortedData['epochs'])\n", (3024, 3046), True, 'import numpy as np\n'), ((3065, 3098), 'numpy.array', 'np.array', (["sortedData['trainLoss']"], {}), "(sortedData['trainLoss'])\n", (3073, 3098), True, 'import numpy as np\n'), ((3115, 3146), 'numpy.array', 'np.array', (["sortedData['valLoss']"], {}), "(sortedData['valLoss'])\n", (3123, 3146), True, 'import numpy as np\n'), ((3162, 3192), 'numpy.array', 'np.array', (["sortedData['valAcc']"], {}), "(sortedData['valAcc'])\n", (3170, 3192), True, 'import numpy as np\n'), ((1588, 1612), 'os.makedirs', 'os.makedirs', (['SAVEFULLDIR'], {}), '(SAVEFULLDIR)\n', (1599, 1612), False, 'import os\n')]
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
postprocess.
"""
import os
import argparse
import numpy as np
from src.ms_utils import calculate_auc
from mindspore import context, load_checkpoint
def softmax(x):
t_max = np.max(x, axis=1, keepdims=True) # returns max of each row and keeps same dims
e_x = np.exp(x - t_max) # subtracts each row with its max value
t_sum = np.sum(e_x, axis=1, keepdims=True) # returns sum of each row and keeps same dims
f_x = e_x / t_sum
return f_x
def score_model(preds, test_pos, test_neg, weight, bias):
"""
Score the model on the test set edges in each epoch.
Args:
epoch (LongTensor): Training epochs.
Returns:
auc(Float32): AUC result.
f1(Float32): F1-Score result.
"""
score_positive_edges = np.array(test_pos, dtype=np.int32).T
score_negative_edges = np.array(test_neg, dtype=np.int32).T
test_positive_z = np.concatenate((preds[score_positive_edges[0, :], :],
preds[score_positive_edges[1, :], :]), axis=1)
test_negative_z = np.concatenate((preds[score_negative_edges[0, :], :],
preds[score_negative_edges[1, :], :]), axis=1)
# operands could not be broadcast together with shapes (4288,128) (128,3)
scores = np.dot(np.concatenate((test_positive_z, test_negative_z), axis=0), weight) + bias
probability_scores = np.exp(softmax(scores))
predictions = probability_scores[:, 0]/probability_scores[:, 0:2].sum(1)
# predictions = predictions.asnumpy()
targets = [0]*len(test_pos) + [1]*len(test_neg)
auc, f1 = calculate_auc(targets, predictions)
return auc, f1
def get_acc():
"""get infer Accuracy."""
parser = argparse.ArgumentParser(description='postprocess')
parser.add_argument('--dataset_name', type=str, default='bitcoin-otc', choices=['bitcoin-otc', 'bitcoin-alpha'],
help='dataset name')
parser.add_argument('--result_path', type=str, default='./ascend310_infer/input/', help='result Files')
parser.add_argument('--label_path', type=str, default='', help='y_test npy Files')
parser.add_argument('--mask_path', type=str, default='', help='test_mask npy Files')
parser.add_argument("--checkpoint_file", type=str, default='sgcn_alpha_f1.ckpt', help="Checkpoint file path.")
parser.add_argument("--edge_path", nargs="?",
default="./input/bitcoin_alpha.csv", help="Edge list csv.")
parser.add_argument("--features-path", nargs="?",
default="./input/bitcoin_alpha.csv", help="Edge list csv.")
parser.add_argument("--test-size", type=float,
default=0.2, help="Test dataset size. Default is 0.2.")
parser.add_argument("--seed", type=int, default=42,
help="Random seed for sklearn pre-training. Default is 42.")
parser.add_argument("--spectral-features", default=True, dest="spectral_features", action="store_true")
parser.add_argument("--reduction-iterations", type=int,
default=30, help="Number of SVD iterations. Default is 30.")
parser.add_argument("--reduction-dimensions", type=int,
default=64, help="Number of SVD feature extraction dimensions. Default is 64.")
args_opt = parser.parse_args()
# Runtime
context.set_context(mode=context.GRAPH_MODE, device_target='Ascend', device_id=0)
# Create network
test_pos = np.load(os.path.join(args_opt.result_path, 'pos_test.npy'))
test_neg = np.load(os.path.join(args_opt.result_path, 'neg_test.npy'))
# Load parameters from checkpoint into network
param_dict = load_checkpoint(args_opt.checkpoint_file)
print(type(param_dict))
print(param_dict)
print(type(param_dict['regression_weights']))
print(param_dict['regression_weights'])
# load_param_into_net(net, param_dict)
pred = np.fromfile('./result_Files/repos_0.bin', np.float32)
if args_opt.dataset_name == 'bitcoin-otc':
pred = pred.reshape(5881, 64)
else:
pred = pred.reshape(3783, 64)
auc, f1 = score_model(pred, test_pos, test_neg, param_dict['regression_weights'].asnumpy(),
param_dict['regression_bias'].asnumpy())
print("Test set results:", "auc=", "{:.5f}".format(auc), "f1=", "{:.5f}".format(f1))
if __name__ == '__main__':
get_acc()
|
[
"numpy.fromfile",
"argparse.ArgumentParser",
"mindspore.context.set_context",
"os.path.join",
"numpy.max",
"numpy.exp",
"numpy.sum",
"numpy.array",
"mindspore.load_checkpoint",
"numpy.concatenate",
"src.ms_utils.calculate_auc"
] |
[((876, 908), 'numpy.max', 'np.max', (['x'], {'axis': '(1)', 'keepdims': '(True)'}), '(x, axis=1, keepdims=True)\n', (882, 908), True, 'import numpy as np\n'), ((967, 984), 'numpy.exp', 'np.exp', (['(x - t_max)'], {}), '(x - t_max)\n', (973, 984), True, 'import numpy as np\n'), ((1039, 1073), 'numpy.sum', 'np.sum', (['e_x'], {'axis': '(1)', 'keepdims': '(True)'}), '(e_x, axis=1, keepdims=True)\n', (1045, 1073), True, 'import numpy as np\n'), ((1597, 1702), 'numpy.concatenate', 'np.concatenate', (['(preds[score_positive_edges[0, :], :], preds[score_positive_edges[1, :], :])'], {'axis': '(1)'}), '((preds[score_positive_edges[0, :], :], preds[\n score_positive_edges[1, :], :]), axis=1)\n', (1611, 1702), True, 'import numpy as np\n'), ((1760, 1865), 'numpy.concatenate', 'np.concatenate', (['(preds[score_negative_edges[0, :], :], preds[score_negative_edges[1, :], :])'], {'axis': '(1)'}), '((preds[score_negative_edges[0, :], :], preds[\n score_negative_edges[1, :], :]), axis=1)\n', (1774, 1865), True, 'import numpy as np\n'), ((2314, 2349), 'src.ms_utils.calculate_auc', 'calculate_auc', (['targets', 'predictions'], {}), '(targets, predictions)\n', (2327, 2349), False, 'from src.ms_utils import calculate_auc\n'), ((2433, 2483), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""postprocess"""'}), "(description='postprocess')\n", (2456, 2483), False, 'import argparse\n'), ((4083, 4168), 'mindspore.context.set_context', 'context.set_context', ([], {'mode': 'context.GRAPH_MODE', 'device_target': '"""Ascend"""', 'device_id': '(0)'}), "(mode=context.GRAPH_MODE, device_target='Ascend',\n device_id=0)\n", (4102, 4168), False, 'from mindspore import context, load_checkpoint\n'), ((4409, 4450), 'mindspore.load_checkpoint', 'load_checkpoint', (['args_opt.checkpoint_file'], {}), '(args_opt.checkpoint_file)\n', (4424, 4450), False, 'from mindspore import context, load_checkpoint\n'), ((4655, 4708), 'numpy.fromfile', 'np.fromfile', (['"""./result_Files/repos_0.bin"""', 'np.float32'], {}), "('./result_Files/repos_0.bin', np.float32)\n", (4666, 4708), True, 'import numpy as np\n'), ((1472, 1506), 'numpy.array', 'np.array', (['test_pos'], {'dtype': 'np.int32'}), '(test_pos, dtype=np.int32)\n', (1480, 1506), True, 'import numpy as np\n'), ((1537, 1571), 'numpy.array', 'np.array', (['test_neg'], {'dtype': 'np.int32'}), '(test_neg, dtype=np.int32)\n', (1545, 1571), True, 'import numpy as np\n'), ((4211, 4261), 'os.path.join', 'os.path.join', (['args_opt.result_path', '"""pos_test.npy"""'], {}), "(args_opt.result_path, 'pos_test.npy')\n", (4223, 4261), False, 'import os\n'), ((4287, 4337), 'os.path.join', 'os.path.join', (['args_opt.result_path', '"""neg_test.npy"""'], {}), "(args_opt.result_path, 'neg_test.npy')\n", (4299, 4337), False, 'import os\n'), ((2000, 2058), 'numpy.concatenate', 'np.concatenate', (['(test_positive_z, test_negative_z)'], {'axis': '(0)'}), '((test_positive_z, test_negative_z), axis=0)\n', (2014, 2058), True, 'import numpy as np\n')]
|
# This file is part of postcipes
# (c) <NAME>
# The code is released under the MIT Licence.
# See LICENCE.txt and the Legal section in the README for more information
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .postcipe import Postcipe
import turbulucid as tbl
from scipy.interpolate import interp1d
import numpy as np
import h5py
__all__ = ["HydraulicJump"]
class HydraulicJump(Postcipe):
def __init__(self, path):
Postcipe.__init__(self)
self.case = tbl.Case(path)
self.case['alphag'] = 1 - self.case['alpha.waterMean']
self.U = self.case.boundary_data("inlet", sort="y")[1]['UMean'][0, 0]
y_inlet = self.case.boundary_data("inlet", sort="y")[0][:, 1]
inlet_edge_length = tbl.edge_lengths(self.case, "inlet")
self.d = y_inlet[-1] + 0.5*inlet_edge_length[-1]
self.Fr1 = self.U/np.sqrt(9.81*self.d)
self.d2 = self.d*(np.sqrt(1 + 8*self.Fr1**2) - 1)/2
self.Fr2 = self.U/np.sqrt(9.81*self.d2)
iso05 = tbl.isoline(self.case, "alpha.waterMean", 0.5)
idx = iso05[:, 0].argsort()
self.xfs = iso05[idx, 0]
self.yfs = iso05[idx, 1]
idx_toe = np.argmin(np.abs(self.d*1.1 - self.yfs[:int(self.yfs.size/2)]))
self.xtoe = self.xfs[idx_toe]
|
[
"turbulucid.Case",
"turbulucid.edge_lengths",
"numpy.sqrt",
"turbulucid.isoline"
] |
[((548, 562), 'turbulucid.Case', 'tbl.Case', (['path'], {}), '(path)\n', (556, 562), True, 'import turbulucid as tbl\n'), ((803, 839), 'turbulucid.edge_lengths', 'tbl.edge_lengths', (['self.case', '"""inlet"""'], {}), "(self.case, 'inlet')\n", (819, 839), True, 'import turbulucid as tbl\n'), ((1069, 1115), 'turbulucid.isoline', 'tbl.isoline', (['self.case', '"""alpha.waterMean"""', '(0.5)'], {}), "(self.case, 'alpha.waterMean', 0.5)\n", (1080, 1115), True, 'import turbulucid as tbl\n'), ((923, 945), 'numpy.sqrt', 'np.sqrt', (['(9.81 * self.d)'], {}), '(9.81 * self.d)\n', (930, 945), True, 'import numpy as np\n'), ((1030, 1053), 'numpy.sqrt', 'np.sqrt', (['(9.81 * self.d2)'], {}), '(9.81 * self.d2)\n', (1037, 1053), True, 'import numpy as np\n'), ((970, 1000), 'numpy.sqrt', 'np.sqrt', (['(1 + 8 * self.Fr1 ** 2)'], {}), '(1 + 8 * self.Fr1 ** 2)\n', (977, 1000), True, 'import numpy as np\n')]
|
# pylint: disable=protected-access
"""
Test the wrappers for the C API.
"""
import os
from contextlib import contextmanager
import numpy as np
import numpy.testing as npt
import pandas as pd
import pytest
import xarray as xr
from packaging.version import Version
from pygmt import Figure, clib
from pygmt.clib.conversion import dataarray_to_matrix
from pygmt.clib.session import FAMILIES, VIAS
from pygmt.exceptions import (
GMTCLibError,
GMTCLibNoSessionError,
GMTInvalidInput,
GMTVersionError,
)
from pygmt.helpers import GMTTempFile
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
with clib.Session() as _lib:
gmt_version = Version(_lib.info["version"])
@contextmanager
def mock(session, func, returns=None, mock_func=None):
"""
Mock a GMT C API function to make it always return a given value.
Used to test that exceptions are raised when API functions fail by
producing a NULL pointer as output or non-zero status codes.
Needed because it's not easy to get some API functions to fail without
inducing a Segmentation Fault (which is a good thing because libgmt usually
only fails with errors).
"""
if mock_func is None:
def mock_api_function(*args): # pylint: disable=unused-argument
"""
A mock GMT API function that always returns a given value.
"""
return returns
mock_func = mock_api_function
get_libgmt_func = session.get_libgmt_func
def mock_get_libgmt_func(name, argtypes=None, restype=None):
"""
Return our mock function.
"""
if name == func:
return mock_func
return get_libgmt_func(name, argtypes, restype)
setattr(session, "get_libgmt_func", mock_get_libgmt_func)
yield
setattr(session, "get_libgmt_func", get_libgmt_func)
def test_getitem():
"""
Test that I can get correct constants from the C lib.
"""
ses = clib.Session()
assert ses["GMT_SESSION_EXTERNAL"] != -99999
assert ses["GMT_MODULE_CMD"] != -99999
assert ses["GMT_PAD_DEFAULT"] != -99999
assert ses["GMT_DOUBLE"] != -99999
with pytest.raises(GMTCLibError):
ses["A_WHOLE_LOT_OF_JUNK"] # pylint: disable=pointless-statement
def test_create_destroy_session():
"""
Test that create and destroy session are called without errors.
"""
# Create two session and make sure they are not pointing to the same memory
session1 = clib.Session()
session1.create(name="test_session1")
assert session1.session_pointer is not None
session2 = clib.Session()
session2.create(name="test_session2")
assert session2.session_pointer is not None
assert session2.session_pointer != session1.session_pointer
session1.destroy()
session2.destroy()
# Create and destroy a session twice
ses = clib.Session()
for __ in range(2):
with pytest.raises(GMTCLibNoSessionError):
ses.session_pointer # pylint: disable=pointless-statement
ses.create("session1")
assert ses.session_pointer is not None
ses.destroy()
with pytest.raises(GMTCLibNoSessionError):
ses.session_pointer # pylint: disable=pointless-statement
def test_create_session_fails():
"""
Check that an exception is raised when failing to create a session.
"""
ses = clib.Session()
with mock(ses, "GMT_Create_Session", returns=None):
with pytest.raises(GMTCLibError):
ses.create("test-session-name")
# Should fail if trying to create a session before destroying the old one.
ses.create("test1")
with pytest.raises(GMTCLibError):
ses.create("test2")
def test_destroy_session_fails():
"""
Fail to destroy session when given bad input.
"""
ses = clib.Session()
with pytest.raises(GMTCLibNoSessionError):
ses.destroy()
ses.create("test-session")
with mock(ses, "GMT_Destroy_Session", returns=1):
with pytest.raises(GMTCLibError):
ses.destroy()
ses.destroy()
def test_call_module():
"""
Run a command to see if call_module works.
"""
data_fname = os.path.join(TEST_DATA_DIR, "points.txt")
out_fname = "test_call_module.txt"
with clib.Session() as lib:
with GMTTempFile() as out_fname:
lib.call_module("info", "{} -C ->{}".format(data_fname, out_fname.name))
assert os.path.exists(out_fname.name)
output = out_fname.read().strip()
assert output == "11.5309 61.7074 -2.9289 7.8648 0.1412 0.9338"
def test_call_module_invalid_arguments():
"""
Fails for invalid module arguments.
"""
with clib.Session() as lib:
with pytest.raises(GMTCLibError):
lib.call_module("info", "bogus-data.bla")
def test_call_module_invalid_name():
"""
Fails when given bad input.
"""
with clib.Session() as lib:
with pytest.raises(GMTCLibError):
lib.call_module("meh", "")
def test_call_module_error_message():
"""
Check is the GMT error message was captured.
"""
with clib.Session() as lib:
try:
lib.call_module("info", "bogus-data.bla")
except GMTCLibError as error:
assert "Module 'info' failed with status code" in str(error)
assert "gmtinfo [ERROR]: Cannot find file bogus-data.bla" in str(error)
def test_method_no_session():
"""
Fails when not in a session.
"""
# Create an instance of Session without "with" so no session is created.
lib = clib.Session()
with pytest.raises(GMTCLibNoSessionError):
lib.call_module("gmtdefaults", "")
with pytest.raises(GMTCLibNoSessionError):
lib.session_pointer # pylint: disable=pointless-statement
def test_parse_constant_single():
"""
Parsing a single family argument correctly.
"""
lib = clib.Session()
for family in FAMILIES:
parsed = lib._parse_constant(family, valid=FAMILIES)
assert parsed == lib[family]
def test_parse_constant_composite():
"""
Parsing a composite constant argument (separated by |) correctly.
"""
lib = clib.Session()
test_cases = ((family, via) for family in FAMILIES for via in VIAS)
for family, via in test_cases:
composite = "|".join([family, via])
expected = lib[family] + lib[via]
parsed = lib._parse_constant(composite, valid=FAMILIES, valid_modifiers=VIAS)
assert parsed == expected
def test_parse_constant_fails():
"""
Check if the function fails when given bad input.
"""
lib = clib.Session()
test_cases = [
"SOME_random_STRING",
"GMT_IS_DATASET|GMT_VIA_MATRIX|GMT_VIA_VECTOR",
"GMT_IS_DATASET|NOT_A_PROPER_VIA",
"NOT_A_PROPER_FAMILY|GMT_VIA_MATRIX",
"NOT_A_PROPER_FAMILY|ALSO_INVALID",
]
for test_case in test_cases:
with pytest.raises(GMTInvalidInput):
lib._parse_constant(test_case, valid=FAMILIES, valid_modifiers=VIAS)
# Should also fail if not given valid modifiers but is using them anyway.
# This should work...
lib._parse_constant(
"GMT_IS_DATASET|GMT_VIA_MATRIX", valid=FAMILIES, valid_modifiers=VIAS
)
# But this shouldn't.
with pytest.raises(GMTInvalidInput):
lib._parse_constant(
"GMT_IS_DATASET|GMT_VIA_MATRIX", valid=FAMILIES, valid_modifiers=None
)
def test_create_data_dataset():
"""
Run the function to make sure it doesn't fail badly.
"""
with clib.Session() as lib:
# Dataset from vectors
data_vector = lib.create_data(
family="GMT_IS_DATASET|GMT_VIA_VECTOR",
geometry="GMT_IS_POINT",
mode="GMT_CONTAINER_ONLY",
dim=[10, 20, 1, 0], # columns, rows, layers, dtype
)
# Dataset from matrices
data_matrix = lib.create_data(
family="GMT_IS_DATASET|GMT_VIA_MATRIX",
geometry="GMT_IS_POINT",
mode="GMT_CONTAINER_ONLY",
dim=[10, 20, 1, 0],
)
assert data_vector != data_matrix
def test_create_data_grid_dim():
"""
Create a grid ignoring range and inc.
"""
with clib.Session() as lib:
# Grids from matrices using dim
lib.create_data(
family="GMT_IS_GRID|GMT_VIA_MATRIX",
geometry="GMT_IS_SURFACE",
mode="GMT_CONTAINER_ONLY",
dim=[10, 20, 1, 0],
)
def test_create_data_grid_range():
"""
Create a grid specifying range and inc instead of dim.
"""
with clib.Session() as lib:
# Grids from matrices using range and int
lib.create_data(
family="GMT_IS_GRID|GMT_VIA_MATRIX",
geometry="GMT_IS_SURFACE",
mode="GMT_CONTAINER_ONLY",
ranges=[150.0, 250.0, -20.0, 20.0],
inc=[0.1, 0.2],
)
def test_create_data_fails():
"""
Check that create_data raises exceptions for invalid input and output.
"""
# Passing in invalid mode
with pytest.raises(GMTInvalidInput):
with clib.Session() as lib:
lib.create_data(
family="GMT_IS_DATASET",
geometry="GMT_IS_SURFACE",
mode="Not_a_valid_mode",
dim=[0, 0, 1, 0],
ranges=[150.0, 250.0, -20.0, 20.0],
inc=[0.1, 0.2],
)
# Passing in invalid geometry
with pytest.raises(GMTInvalidInput):
with clib.Session() as lib:
lib.create_data(
family="GMT_IS_GRID",
geometry="Not_a_valid_geometry",
mode="GMT_CONTAINER_ONLY",
dim=[0, 0, 1, 0],
ranges=[150.0, 250.0, -20.0, 20.0],
inc=[0.1, 0.2],
)
# If the data pointer returned is None (NULL pointer)
with pytest.raises(GMTCLibError):
with clib.Session() as lib:
with mock(lib, "GMT_Create_Data", returns=None):
lib.create_data(
family="GMT_IS_DATASET",
geometry="GMT_IS_SURFACE",
mode="GMT_CONTAINER_ONLY",
dim=[11, 10, 2, 0],
)
def test_virtual_file():
"""
Test passing in data via a virtual file with a Dataset.
"""
dtypes = "float32 float64 int32 int64 uint32 uint64".split()
shape = (5, 3)
for dtype in dtypes:
with clib.Session() as lib:
family = "GMT_IS_DATASET|GMT_VIA_MATRIX"
geometry = "GMT_IS_POINT"
dataset = lib.create_data(
family=family,
geometry=geometry,
mode="GMT_CONTAINER_ONLY",
dim=[shape[1], shape[0], 1, 0], # columns, rows, layers, dtype
)
data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape)
lib.put_matrix(dataset, matrix=data)
# Add the dataset to a virtual file and pass it along to gmt info
vfargs = (family, geometry, "GMT_IN|GMT_IS_REFERENCE", dataset)
with lib.open_virtual_file(*vfargs) as vfile:
with GMTTempFile() as outfile:
lib.call_module("info", "{} ->{}".format(vfile, outfile.name))
output = outfile.read(keep_tabs=True)
bounds = "\t".join(
["<{:.0f}/{:.0f}>".format(col.min(), col.max()) for col in data.T]
)
expected = "<matrix memory>: N = {}\t{}\n".format(shape[0], bounds)
assert output == expected
def test_virtual_file_fails():
"""
Check that opening and closing virtual files raises an exception for non-
zero return codes.
"""
vfargs = (
"GMT_IS_DATASET|GMT_VIA_MATRIX",
"GMT_IS_POINT",
"GMT_IN|GMT_IS_REFERENCE",
None,
)
# Mock Open_VirtualFile to test the status check when entering the context.
# If the exception is raised, the code won't get to the closing of the
# virtual file.
with clib.Session() as lib, mock(lib, "GMT_Open_VirtualFile", returns=1):
with pytest.raises(GMTCLibError):
with lib.open_virtual_file(*vfargs):
print("Should not get to this code")
# Test the status check when closing the virtual file
# Mock the opening to return 0 (success) so that we don't open a file that
# we won't close later.
with clib.Session() as lib, mock(lib, "GMT_Open_VirtualFile", returns=0), mock(
lib, "GMT_Close_VirtualFile", returns=1
):
with pytest.raises(GMTCLibError):
with lib.open_virtual_file(*vfargs):
pass
print("Shouldn't get to this code either")
def test_virtual_file_bad_direction():
"""
Test passing an invalid direction argument.
"""
with clib.Session() as lib:
vfargs = (
"GMT_IS_DATASET|GMT_VIA_MATRIX",
"GMT_IS_POINT",
"GMT_IS_GRID", # The invalid direction argument
0,
)
with pytest.raises(GMTInvalidInput):
with lib.open_virtual_file(*vfargs):
print("This should have failed")
def test_virtualfile_from_vectors():
"""
Test the automation for transforming vectors to virtual file dataset.
"""
dtypes = "float32 float64 int32 int64 uint32 uint64".split()
size = 10
for dtype in dtypes:
x = np.arange(size, dtype=dtype)
y = np.arange(size, size * 2, 1, dtype=dtype)
z = np.arange(size * 2, size * 3, 1, dtype=dtype)
with clib.Session() as lib:
with lib.virtualfile_from_vectors(x, y, z) as vfile:
with GMTTempFile() as outfile:
lib.call_module("info", "{} ->{}".format(vfile, outfile.name))
output = outfile.read(keep_tabs=True)
bounds = "\t".join(
["<{:.0f}/{:.0f}>".format(i.min(), i.max()) for i in (x, y, z)]
)
expected = "<vector memory>: N = {}\t{}\n".format(size, bounds)
assert output == expected
@pytest.mark.parametrize("dtype", [str, object])
def test_virtualfile_from_vectors_one_string_or_object_column(dtype):
"""
Test passing in one column with string or object dtype into virtual file
dataset.
"""
size = 5
x = np.arange(size, dtype=np.int32)
y = np.arange(size, size * 2, 1, dtype=np.int32)
strings = np.array(["a", "bc", "defg", "hijklmn", "opqrst"], dtype=dtype)
with clib.Session() as lib:
with lib.virtualfile_from_vectors(x, y, strings) as vfile:
with GMTTempFile() as outfile:
lib.call_module("convert", f"{vfile} ->{outfile.name}")
output = outfile.read(keep_tabs=True)
expected = "".join(f"{i}\t{j}\t{k}\n" for i, j, k in zip(x, y, strings))
assert output == expected
@pytest.mark.parametrize("dtype", [str, object])
def test_virtualfile_from_vectors_two_string_or_object_columns(dtype):
"""
Test passing in two columns of string or object dtype into virtual file
dataset.
"""
size = 5
x = np.arange(size, dtype=np.int32)
y = np.arange(size, size * 2, 1, dtype=np.int32)
strings1 = np.array(["a", "bc", "def", "ghij", "klmno"], dtype=dtype)
strings2 = np.array(["pqrst", "uvwx", "yz!", "@#", "$"], dtype=dtype)
with clib.Session() as lib:
with lib.virtualfile_from_vectors(x, y, strings1, strings2) as vfile:
with GMTTempFile() as outfile:
lib.call_module("convert", f"{vfile} ->{outfile.name}")
output = outfile.read(keep_tabs=True)
expected = "".join(
f"{h}\t{i}\t{j} {k}\n" for h, i, j, k in zip(x, y, strings1, strings2)
)
assert output == expected
def test_virtualfile_from_vectors_transpose():
"""
Test transforming matrix columns to virtual file dataset.
"""
dtypes = "float32 float64 int32 int64 uint32 uint64".split()
shape = (7, 5)
for dtype in dtypes:
data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape)
with clib.Session() as lib:
with lib.virtualfile_from_vectors(*data.T) as vfile:
with GMTTempFile() as outfile:
lib.call_module("info", "{} -C ->{}".format(vfile, outfile.name))
output = outfile.read(keep_tabs=True)
bounds = "\t".join(
["{:.0f}\t{:.0f}".format(col.min(), col.max()) for col in data.T]
)
expected = "{}\n".format(bounds)
assert output == expected
def test_virtualfile_from_vectors_diff_size():
"""
Test the function fails for arrays of different sizes.
"""
x = np.arange(5)
y = np.arange(6)
with clib.Session() as lib:
with pytest.raises(GMTInvalidInput):
with lib.virtualfile_from_vectors(x, y):
print("This should have failed")
def test_virtualfile_from_matrix():
"""
Test transforming a matrix to virtual file dataset.
"""
dtypes = "float32 float64 int32 int64 uint32 uint64".split()
shape = (7, 5)
for dtype in dtypes:
data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape)
with clib.Session() as lib:
with lib.virtualfile_from_matrix(data) as vfile:
with GMTTempFile() as outfile:
lib.call_module("info", "{} ->{}".format(vfile, outfile.name))
output = outfile.read(keep_tabs=True)
bounds = "\t".join(
["<{:.0f}/{:.0f}>".format(col.min(), col.max()) for col in data.T]
)
expected = "<matrix memory>: N = {}\t{}\n".format(shape[0], bounds)
assert output == expected
def test_virtualfile_from_matrix_slice():
"""
Test transforming a slice of a larger array to virtual file dataset.
"""
dtypes = "float32 float64 int32 int64 uint32 uint64".split()
shape = (10, 6)
for dtype in dtypes:
full_data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape)
rows = 5
cols = 3
data = full_data[:rows, :cols]
with clib.Session() as lib:
with lib.virtualfile_from_matrix(data) as vfile:
with GMTTempFile() as outfile:
lib.call_module("info", "{} ->{}".format(vfile, outfile.name))
output = outfile.read(keep_tabs=True)
bounds = "\t".join(
["<{:.0f}/{:.0f}>".format(col.min(), col.max()) for col in data.T]
)
expected = "<matrix memory>: N = {}\t{}\n".format(rows, bounds)
assert output == expected
def test_virtualfile_from_vectors_pandas():
"""
Pass vectors to a dataset using pandas Series.
"""
dtypes = "float32 float64 int32 int64 uint32 uint64".split()
size = 13
for dtype in dtypes:
data = pd.DataFrame(
data=dict(
x=np.arange(size, dtype=dtype),
y=np.arange(size, size * 2, 1, dtype=dtype),
z=np.arange(size * 2, size * 3, 1, dtype=dtype),
)
)
with clib.Session() as lib:
with lib.virtualfile_from_vectors(data.x, data.y, data.z) as vfile:
with GMTTempFile() as outfile:
lib.call_module("info", "{} ->{}".format(vfile, outfile.name))
output = outfile.read(keep_tabs=True)
bounds = "\t".join(
[
"<{:.0f}/{:.0f}>".format(i.min(), i.max())
for i in (data.x, data.y, data.z)
]
)
expected = "<vector memory>: N = {}\t{}\n".format(size, bounds)
assert output == expected
def test_virtualfile_from_vectors_arraylike():
"""
Pass array-like vectors to a dataset.
"""
size = 13
x = list(range(0, size, 1))
y = tuple(range(size, size * 2, 1))
z = range(size * 2, size * 3, 1)
with clib.Session() as lib:
with lib.virtualfile_from_vectors(x, y, z) as vfile:
with GMTTempFile() as outfile:
lib.call_module("info", "{} ->{}".format(vfile, outfile.name))
output = outfile.read(keep_tabs=True)
bounds = "\t".join(
["<{:.0f}/{:.0f}>".format(min(i), max(i)) for i in (x, y, z)]
)
expected = "<vector memory>: N = {}\t{}\n".format(size, bounds)
assert output == expected
def test_extract_region_fails():
"""
Check that extract region fails if nothing has been plotted.
"""
Figure()
with pytest.raises(GMTCLibError):
with clib.Session() as lib:
lib.extract_region()
def test_extract_region_two_figures():
"""
Extract region should handle multiple figures existing at the same time.
"""
# Make two figures before calling extract_region to make sure that it's
# getting from the current figure, not the last figure.
fig1 = Figure()
region1 = np.array([0, 10, -20, -10])
fig1.coast(region=region1, projection="M6i", frame=True, land="black")
fig2 = Figure()
fig2.basemap(region="US.HI+r5", projection="M6i", frame=True)
# Activate the first figure and extract the region from it
# Use in a different session to avoid any memory problems.
with clib.Session() as lib:
lib.call_module("figure", "{} -".format(fig1._name))
with clib.Session() as lib:
wesn1 = lib.extract_region()
npt.assert_allclose(wesn1, region1)
# Now try it with the second one
with clib.Session() as lib:
lib.call_module("figure", "{} -".format(fig2._name))
with clib.Session() as lib:
wesn2 = lib.extract_region()
npt.assert_allclose(wesn2, np.array([-165.0, -150.0, 15.0, 25.0]))
def test_write_data_fails():
"""
Check that write data raises an exception for non-zero return codes.
"""
# It's hard to make the C API function fail without causing a Segmentation
# Fault. Can't test this if by giving a bad file name because if
# output=='', GMT will just write to stdout and spaces are valid file
# names. Use a mock instead just to exercise this part of the code.
with clib.Session() as lib:
with mock(lib, "GMT_Write_Data", returns=1):
with pytest.raises(GMTCLibError):
lib.write_data(
"GMT_IS_VECTOR",
"GMT_IS_POINT",
"GMT_WRITE_SET",
[1] * 6,
"some-file-name",
None,
)
def test_dataarray_to_matrix_works():
"""
Check that dataarray_to_matrix returns correct output.
"""
data = np.diag(v=np.arange(3))
x = np.linspace(start=0, stop=4, num=3)
y = np.linspace(start=5, stop=9, num=3)
grid = xr.DataArray(data, coords=[("y", y), ("x", x)])
matrix, region, inc = dataarray_to_matrix(grid)
npt.assert_allclose(actual=matrix, desired=np.flipud(data))
npt.assert_allclose(actual=region, desired=[x.min(), x.max(), y.min(), y.max()])
npt.assert_allclose(actual=inc, desired=[x[1] - x[0], y[1] - y[0]])
def test_dataarray_to_matrix_negative_x_increment():
"""
Check if dataarray_to_matrix returns correct output with flipped x.
"""
data = np.diag(v=np.arange(3))
x = np.linspace(start=4, stop=0, num=3)
y = np.linspace(start=5, stop=9, num=3)
grid = xr.DataArray(data, coords=[("y", y), ("x", x)])
matrix, region, inc = dataarray_to_matrix(grid)
npt.assert_allclose(actual=matrix, desired=np.flip(data, axis=(0, 1)))
npt.assert_allclose(actual=region, desired=[x.min(), x.max(), y.min(), y.max()])
npt.assert_allclose(actual=inc, desired=[abs(x[1] - x[0]), abs(y[1] - y[0])])
def test_dataarray_to_matrix_negative_y_increment():
"""
Check that dataarray_to_matrix returns correct output with flipped y.
"""
data = np.diag(v=np.arange(3))
x = np.linspace(start=0, stop=4, num=3)
y = np.linspace(start=9, stop=5, num=3)
grid = xr.DataArray(data, coords=[("y", y), ("x", x)])
matrix, region, inc = dataarray_to_matrix(grid)
npt.assert_allclose(actual=matrix, desired=data)
npt.assert_allclose(actual=region, desired=[x.min(), x.max(), y.min(), y.max()])
npt.assert_allclose(actual=inc, desired=[abs(x[1] - x[0]), abs(y[1] - y[0])])
def test_dataarray_to_matrix_negative_x_and_y_increment():
"""
Check that dataarray_to_matrix returns correct output with flipped x/y.
"""
data = np.diag(v=np.arange(3))
x = np.linspace(start=4, stop=0, num=3)
y = np.linspace(start=9, stop=5, num=3)
grid = xr.DataArray(data, coords=[("y", y), ("x", x)])
matrix, region, inc = dataarray_to_matrix(grid)
npt.assert_allclose(actual=matrix, desired=np.fliplr(data))
npt.assert_allclose(actual=region, desired=[x.min(), x.max(), y.min(), y.max()])
npt.assert_allclose(actual=inc, desired=[abs(x[1] - x[0]), abs(y[1] - y[0])])
def test_dataarray_to_matrix_dims_fails():
"""
Check that it fails for > 2 dims.
"""
# Make a 3D regular grid
data = np.ones((10, 12, 11), dtype="float32")
x = np.arange(11)
y = np.arange(12)
z = np.arange(10)
grid = xr.DataArray(data, coords=[("z", z), ("y", y), ("x", x)])
with pytest.raises(GMTInvalidInput):
dataarray_to_matrix(grid)
def test_dataarray_to_matrix_inc_fails():
"""
Check that it fails for variable increments.
"""
data = np.ones((4, 5), dtype="float64")
x = np.linspace(0, 1, 5)
y = np.logspace(2, 3, 4)
grid = xr.DataArray(data, coords=[("y", y), ("x", x)])
with pytest.raises(GMTInvalidInput):
dataarray_to_matrix(grid)
def test_get_default():
"""
Make sure get_default works without crashing and gives reasonable results.
"""
with clib.Session() as lib:
assert lib.get_default("API_GRID_LAYOUT") in ["rows", "columns"]
assert int(lib.get_default("API_CORES")) >= 1
assert Version(lib.get_default("API_VERSION")) >= Version("6.2.0")
def test_get_default_fails():
"""
Make sure get_default raises an exception for invalid names.
"""
with clib.Session() as lib:
with pytest.raises(GMTCLibError):
lib.get_default("NOT_A_VALID_NAME")
def test_info_dict():
"""
Make sure the clib.Session.info dict is working.
"""
# Check if there are no errors or segfaults from getting all of the
# properties.
with clib.Session() as lib:
assert lib.info
# Mock GMT_Get_Default to return always the same string
def mock_defaults(api, name, value): # pylint: disable=unused-argument
"""
Put 'bla' in the value buffer.
"""
value.value = b"bla"
return 0
ses = clib.Session()
ses.create("test-session")
with mock(ses, "GMT_Get_Default", mock_func=mock_defaults):
# Check for an empty dictionary
assert ses.info
for key in ses.info:
assert ses.info[key] == "bla"
ses.destroy()
def test_fails_for_wrong_version():
"""
Make sure the clib.Session raises an exception if GMT is too old.
"""
# Mock GMT_Get_Default to return an old version
def mock_defaults(api, name, value): # pylint: disable=unused-argument
"""
Return an old version.
"""
if name == b"API_VERSION":
value.value = b"5.4.3"
else:
value.value = b"bla"
return 0
lib = clib.Session()
with mock(lib, "GMT_Get_Default", mock_func=mock_defaults):
with pytest.raises(GMTVersionError):
with lib:
assert lib.info["version"] != "5.4.3"
# Make sure the session is closed when the exception is raised.
with pytest.raises(GMTCLibNoSessionError):
assert lib.session_pointer
|
[
"pygmt.clib.Session",
"numpy.array",
"pygmt.Figure",
"numpy.arange",
"os.path.exists",
"numpy.flip",
"pygmt.helpers.GMTTempFile",
"numpy.testing.assert_allclose",
"pygmt.clib.conversion.dataarray_to_matrix",
"numpy.linspace",
"numpy.logspace",
"numpy.ones",
"numpy.flipud",
"numpy.fliplr",
"os.path.dirname",
"pytest.raises",
"os.path.join",
"pytest.mark.parametrize",
"packaging.version.Version",
"xarray.DataArray"
] |
[((14171, 14218), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[str, object]'], {}), "('dtype', [str, object])\n", (14194, 14218), False, 'import pytest\n'), ((14965, 15012), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[str, object]'], {}), "('dtype', [str, object])\n", (14988, 15012), False, 'import pytest\n'), ((583, 608), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (598, 608), False, 'import os\n'), ((624, 638), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (636, 638), False, 'from pygmt import Figure, clib\n'), ((666, 695), 'packaging.version.Version', 'Version', (["_lib.info['version']"], {}), "(_lib.info['version'])\n", (673, 695), False, 'from packaging.version import Version\n'), ((1965, 1979), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (1977, 1979), False, 'from pygmt import Figure, clib\n'), ((2483, 2497), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (2495, 2497), False, 'from pygmt import Figure, clib\n'), ((2603, 2617), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (2615, 2617), False, 'from pygmt import Figure, clib\n'), ((2869, 2883), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (2881, 2883), False, 'from pygmt import Figure, clib\n'), ((3385, 3399), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (3397, 3399), False, 'from pygmt import Figure, clib\n'), ((3823, 3837), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (3835, 3837), False, 'from pygmt import Figure, clib\n'), ((4184, 4225), 'os.path.join', 'os.path.join', (['TEST_DATA_DIR', '"""points.txt"""'], {}), "(TEST_DATA_DIR, 'points.txt')\n", (4196, 4225), False, 'import os\n'), ((5590, 5604), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (5602, 5604), False, 'from pygmt import Figure, clib\n'), ((5919, 5933), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (5931, 5933), False, 'from pygmt import Figure, clib\n'), ((6195, 6209), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (6207, 6209), False, 'from pygmt import Figure, clib\n'), ((6638, 6652), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (6650, 6652), False, 'from pygmt import Figure, clib\n'), ((14416, 14447), 'numpy.arange', 'np.arange', (['size'], {'dtype': 'np.int32'}), '(size, dtype=np.int32)\n', (14425, 14447), True, 'import numpy as np\n'), ((14456, 14500), 'numpy.arange', 'np.arange', (['size', '(size * 2)', '(1)'], {'dtype': 'np.int32'}), '(size, size * 2, 1, dtype=np.int32)\n', (14465, 14500), True, 'import numpy as np\n'), ((14515, 14578), 'numpy.array', 'np.array', (["['a', 'bc', 'defg', 'hijklmn', 'opqrst']"], {'dtype': 'dtype'}), "(['a', 'bc', 'defg', 'hijklmn', 'opqrst'], dtype=dtype)\n", (14523, 14578), True, 'import numpy as np\n'), ((15210, 15241), 'numpy.arange', 'np.arange', (['size'], {'dtype': 'np.int32'}), '(size, dtype=np.int32)\n', (15219, 15241), True, 'import numpy as np\n'), ((15250, 15294), 'numpy.arange', 'np.arange', (['size', '(size * 2)', '(1)'], {'dtype': 'np.int32'}), '(size, size * 2, 1, dtype=np.int32)\n', (15259, 15294), True, 'import numpy as np\n'), ((15310, 15368), 'numpy.array', 'np.array', (["['a', 'bc', 'def', 'ghij', 'klmno']"], {'dtype': 'dtype'}), "(['a', 'bc', 'def', 'ghij', 'klmno'], dtype=dtype)\n", (15318, 15368), True, 'import numpy as np\n'), ((15384, 15442), 'numpy.array', 'np.array', (["['pqrst', 'uvwx', 'yz!', '@#', '$']"], {'dtype': 'dtype'}), "(['pqrst', 'uvwx', 'yz!', '@#', '$'], dtype=dtype)\n", (15392, 15442), True, 'import numpy as np\n'), ((16822, 16834), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (16831, 16834), True, 'import numpy as np\n'), ((16843, 16855), 'numpy.arange', 'np.arange', (['(6)'], {}), '(6)\n', (16852, 16855), True, 'import numpy as np\n'), ((20704, 20712), 'pygmt.Figure', 'Figure', ([], {}), '()\n', (20710, 20712), False, 'from pygmt import Figure, clib\n'), ((21101, 21109), 'pygmt.Figure', 'Figure', ([], {}), '()\n', (21107, 21109), False, 'from pygmt import Figure, clib\n'), ((21124, 21151), 'numpy.array', 'np.array', (['[0, 10, -20, -10]'], {}), '([0, 10, -20, -10])\n', (21132, 21151), True, 'import numpy as np\n'), ((21239, 21247), 'pygmt.Figure', 'Figure', ([], {}), '()\n', (21245, 21247), False, 'from pygmt import Figure, clib\n'), ((22878, 22913), 'numpy.linspace', 'np.linspace', ([], {'start': '(0)', 'stop': '(4)', 'num': '(3)'}), '(start=0, stop=4, num=3)\n', (22889, 22913), True, 'import numpy as np\n'), ((22922, 22957), 'numpy.linspace', 'np.linspace', ([], {'start': '(5)', 'stop': '(9)', 'num': '(3)'}), '(start=5, stop=9, num=3)\n', (22933, 22957), True, 'import numpy as np\n'), ((22969, 23016), 'xarray.DataArray', 'xr.DataArray', (['data'], {'coords': "[('y', y), ('x', x)]"}), "(data, coords=[('y', y), ('x', x)])\n", (22981, 23016), True, 'import xarray as xr\n'), ((23044, 23069), 'pygmt.clib.conversion.dataarray_to_matrix', 'dataarray_to_matrix', (['grid'], {}), '(grid)\n', (23063, 23069), False, 'from pygmt.clib.conversion import dataarray_to_matrix\n'), ((23223, 23290), 'numpy.testing.assert_allclose', 'npt.assert_allclose', ([], {'actual': 'inc', 'desired': '[x[1] - x[0], y[1] - y[0]]'}), '(actual=inc, desired=[x[1] - x[0], y[1] - y[0]])\n', (23242, 23290), True, 'import numpy.testing as npt\n'), ((23477, 23512), 'numpy.linspace', 'np.linspace', ([], {'start': '(4)', 'stop': '(0)', 'num': '(3)'}), '(start=4, stop=0, num=3)\n', (23488, 23512), True, 'import numpy as np\n'), ((23521, 23556), 'numpy.linspace', 'np.linspace', ([], {'start': '(5)', 'stop': '(9)', 'num': '(3)'}), '(start=5, stop=9, num=3)\n', (23532, 23556), True, 'import numpy as np\n'), ((23568, 23615), 'xarray.DataArray', 'xr.DataArray', (['data'], {'coords': "[('y', y), ('x', x)]"}), "(data, coords=[('y', y), ('x', x)])\n", (23580, 23615), True, 'import xarray as xr\n'), ((23643, 23668), 'pygmt.clib.conversion.dataarray_to_matrix', 'dataarray_to_matrix', (['grid'], {}), '(grid)\n', (23662, 23668), False, 'from pygmt.clib.conversion import dataarray_to_matrix\n'), ((24099, 24134), 'numpy.linspace', 'np.linspace', ([], {'start': '(0)', 'stop': '(4)', 'num': '(3)'}), '(start=0, stop=4, num=3)\n', (24110, 24134), True, 'import numpy as np\n'), ((24143, 24178), 'numpy.linspace', 'np.linspace', ([], {'start': '(9)', 'stop': '(5)', 'num': '(3)'}), '(start=9, stop=5, num=3)\n', (24154, 24178), True, 'import numpy as np\n'), ((24190, 24237), 'xarray.DataArray', 'xr.DataArray', (['data'], {'coords': "[('y', y), ('x', x)]"}), "(data, coords=[('y', y), ('x', x)])\n", (24202, 24237), True, 'import xarray as xr\n'), ((24265, 24290), 'pygmt.clib.conversion.dataarray_to_matrix', 'dataarray_to_matrix', (['grid'], {}), '(grid)\n', (24284, 24290), False, 'from pygmt.clib.conversion import dataarray_to_matrix\n'), ((24295, 24343), 'numpy.testing.assert_allclose', 'npt.assert_allclose', ([], {'actual': 'matrix', 'desired': 'data'}), '(actual=matrix, desired=data)\n', (24314, 24343), True, 'import numpy.testing as npt\n'), ((24707, 24742), 'numpy.linspace', 'np.linspace', ([], {'start': '(4)', 'stop': '(0)', 'num': '(3)'}), '(start=4, stop=0, num=3)\n', (24718, 24742), True, 'import numpy as np\n'), ((24751, 24786), 'numpy.linspace', 'np.linspace', ([], {'start': '(9)', 'stop': '(5)', 'num': '(3)'}), '(start=9, stop=5, num=3)\n', (24762, 24786), True, 'import numpy as np\n'), ((24798, 24845), 'xarray.DataArray', 'xr.DataArray', (['data'], {'coords': "[('y', y), ('x', x)]"}), "(data, coords=[('y', y), ('x', x)])\n", (24810, 24845), True, 'import xarray as xr\n'), ((24873, 24898), 'pygmt.clib.conversion.dataarray_to_matrix', 'dataarray_to_matrix', (['grid'], {}), '(grid)\n', (24892, 24898), False, 'from pygmt.clib.conversion import dataarray_to_matrix\n'), ((25269, 25307), 'numpy.ones', 'np.ones', (['(10, 12, 11)'], {'dtype': '"""float32"""'}), "((10, 12, 11), dtype='float32')\n", (25276, 25307), True, 'import numpy as np\n'), ((25316, 25329), 'numpy.arange', 'np.arange', (['(11)'], {}), '(11)\n', (25325, 25329), True, 'import numpy as np\n'), ((25338, 25351), 'numpy.arange', 'np.arange', (['(12)'], {}), '(12)\n', (25347, 25351), True, 'import numpy as np\n'), ((25360, 25373), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (25369, 25373), True, 'import numpy as np\n'), ((25385, 25442), 'xarray.DataArray', 'xr.DataArray', (['data'], {'coords': "[('z', z), ('y', y), ('x', x)]"}), "(data, coords=[('z', z), ('y', y), ('x', x)])\n", (25397, 25442), True, 'import xarray as xr\n'), ((25638, 25670), 'numpy.ones', 'np.ones', (['(4, 5)'], {'dtype': '"""float64"""'}), "((4, 5), dtype='float64')\n", (25645, 25670), True, 'import numpy as np\n'), ((25679, 25699), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(5)'], {}), '(0, 1, 5)\n', (25690, 25699), True, 'import numpy as np\n'), ((25708, 25728), 'numpy.logspace', 'np.logspace', (['(2)', '(3)', '(4)'], {}), '(2, 3, 4)\n', (25719, 25728), True, 'import numpy as np\n'), ((25740, 25787), 'xarray.DataArray', 'xr.DataArray', (['data'], {'coords': "[('y', y), ('x', x)]"}), "(data, coords=[('y', y), ('x', x)])\n", (25752, 25787), True, 'import xarray as xr\n'), ((26949, 26963), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (26961, 26963), False, 'from pygmt import Figure, clib\n'), ((27665, 27679), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (27677, 27679), False, 'from pygmt import Figure, clib\n'), ((2164, 2191), 'pytest.raises', 'pytest.raises', (['GMTCLibError'], {}), '(GMTCLibError)\n', (2177, 2191), False, 'import pytest\n'), ((3654, 3681), 'pytest.raises', 'pytest.raises', (['GMTCLibError'], {}), '(GMTCLibError)\n', (3667, 3681), False, 'import pytest\n'), ((3847, 3883), 'pytest.raises', 'pytest.raises', (['GMTCLibNoSessionError'], {}), '(GMTCLibNoSessionError)\n', (3860, 3883), False, 'import pytest\n'), ((4274, 4288), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (4286, 4288), False, 'from pygmt import Figure, clib\n'), ((4704, 4718), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (4716, 4718), False, 'from pygmt import Figure, clib\n'), ((4919, 4933), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (4931, 4933), False, 'from pygmt import Figure, clib\n'), ((5137, 5151), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (5149, 5151), False, 'from pygmt import Figure, clib\n'), ((5614, 5650), 'pytest.raises', 'pytest.raises', (['GMTCLibNoSessionError'], {}), '(GMTCLibNoSessionError)\n', (5627, 5650), False, 'import pytest\n'), ((5704, 5740), 'pytest.raises', 'pytest.raises', (['GMTCLibNoSessionError'], {}), '(GMTCLibNoSessionError)\n', (5717, 5740), False, 'import pytest\n'), ((7305, 7335), 'pytest.raises', 'pytest.raises', (['GMTInvalidInput'], {}), '(GMTInvalidInput)\n', (7318, 7335), False, 'import pytest\n'), ((7574, 7588), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (7586, 7588), False, 'from pygmt import Figure, clib\n'), ((8254, 8268), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (8266, 8268), False, 'from pygmt import Figure, clib\n'), ((8632, 8646), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (8644, 8646), False, 'from pygmt import Figure, clib\n'), ((9105, 9135), 'pytest.raises', 'pytest.raises', (['GMTInvalidInput'], {}), '(GMTInvalidInput)\n', (9118, 9135), False, 'import pytest\n'), ((9502, 9532), 'pytest.raises', 'pytest.raises', (['GMTInvalidInput'], {}), '(GMTInvalidInput)\n', (9515, 9532), False, 'import pytest\n'), ((9929, 9956), 'pytest.raises', 'pytest.raises', (['GMTCLibError'], {}), '(GMTCLibError)\n', (9942, 9956), False, 'import pytest\n'), ((12110, 12124), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (12122, 12124), False, 'from pygmt import Figure, clib\n'), ((12498, 12512), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (12510, 12512), False, 'from pygmt import Figure, clib\n'), ((12909, 12923), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (12921, 12923), False, 'from pygmt import Figure, clib\n'), ((13498, 13526), 'numpy.arange', 'np.arange', (['size'], {'dtype': 'dtype'}), '(size, dtype=dtype)\n', (13507, 13526), True, 'import numpy as np\n'), ((13539, 13580), 'numpy.arange', 'np.arange', (['size', '(size * 2)', '(1)'], {'dtype': 'dtype'}), '(size, size * 2, 1, dtype=dtype)\n', (13548, 13580), True, 'import numpy as np\n'), ((13593, 13638), 'numpy.arange', 'np.arange', (['(size * 2)', '(size * 3)', '(1)'], {'dtype': 'dtype'}), '(size * 2, size * 3, 1, dtype=dtype)\n', (13602, 13638), True, 'import numpy as np\n'), ((14588, 14602), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (14600, 14602), False, 'from pygmt import Figure, clib\n'), ((15452, 15466), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (15464, 15466), False, 'from pygmt import Figure, clib\n'), ((16865, 16879), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (16877, 16879), False, 'from pygmt import Figure, clib\n'), ((20106, 20120), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (20118, 20120), False, 'from pygmt import Figure, clib\n'), ((20722, 20749), 'pytest.raises', 'pytest.raises', (['GMTCLibError'], {}), '(GMTCLibError)\n', (20735, 20749), False, 'import pytest\n'), ((21450, 21464), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (21462, 21464), False, 'from pygmt import Figure, clib\n'), ((21543, 21557), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (21555, 21557), False, 'from pygmt import Figure, clib\n'), ((21611, 21646), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['wesn1', 'region1'], {}), '(wesn1, region1)\n', (21630, 21646), True, 'import numpy.testing as npt\n'), ((21694, 21708), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (21706, 21708), False, 'from pygmt import Figure, clib\n'), ((21787, 21801), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (21799, 21801), False, 'from pygmt import Figure, clib\n'), ((22345, 22359), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (22357, 22359), False, 'from pygmt import Figure, clib\n'), ((25452, 25482), 'pytest.raises', 'pytest.raises', (['GMTInvalidInput'], {}), '(GMTInvalidInput)\n', (25465, 25482), False, 'import pytest\n'), ((25492, 25517), 'pygmt.clib.conversion.dataarray_to_matrix', 'dataarray_to_matrix', (['grid'], {}), '(grid)\n', (25511, 25517), False, 'from pygmt.clib.conversion import dataarray_to_matrix\n'), ((25797, 25827), 'pytest.raises', 'pytest.raises', (['GMTInvalidInput'], {}), '(GMTInvalidInput)\n', (25810, 25827), False, 'import pytest\n'), ((25837, 25862), 'pygmt.clib.conversion.dataarray_to_matrix', 'dataarray_to_matrix', (['grid'], {}), '(grid)\n', (25856, 25862), False, 'from pygmt.clib.conversion import dataarray_to_matrix\n'), ((25993, 26007), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (26005, 26007), False, 'from pygmt import Figure, clib\n'), ((26340, 26354), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (26352, 26354), False, 'from pygmt import Figure, clib\n'), ((26645, 26659), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (26657, 26659), False, 'from pygmt import Figure, clib\n'), ((27942, 27978), 'pytest.raises', 'pytest.raises', (['GMTCLibNoSessionError'], {}), '(GMTCLibNoSessionError)\n', (27955, 27978), False, 'import pytest\n'), ((2921, 2957), 'pytest.raises', 'pytest.raises', (['GMTCLibNoSessionError'], {}), '(GMTCLibNoSessionError)\n', (2934, 2957), False, 'import pytest\n'), ((3143, 3179), 'pytest.raises', 'pytest.raises', (['GMTCLibNoSessionError'], {}), '(GMTCLibNoSessionError)\n', (3156, 3179), False, 'import pytest\n'), ((3469, 3496), 'pytest.raises', 'pytest.raises', (['GMTCLibError'], {}), '(GMTCLibError)\n', (3482, 3496), False, 'import pytest\n'), ((4005, 4032), 'pytest.raises', 'pytest.raises', (['GMTCLibError'], {}), '(GMTCLibError)\n', (4018, 4032), False, 'import pytest\n'), ((4310, 4323), 'pygmt.helpers.GMTTempFile', 'GMTTempFile', ([], {}), '()\n', (4321, 4323), False, 'from pygmt.helpers import GMTTempFile\n'), ((4442, 4472), 'os.path.exists', 'os.path.exists', (['out_fname.name'], {}), '(out_fname.name)\n', (4456, 4472), False, 'import os\n'), ((4740, 4767), 'pytest.raises', 'pytest.raises', (['GMTCLibError'], {}), '(GMTCLibError)\n', (4753, 4767), False, 'import pytest\n'), ((4955, 4982), 'pytest.raises', 'pytest.raises', (['GMTCLibError'], {}), '(GMTCLibError)\n', (4968, 4982), False, 'import pytest\n'), ((6943, 6973), 'pytest.raises', 'pytest.raises', (['GMTInvalidInput'], {}), '(GMTInvalidInput)\n', (6956, 6973), False, 'import pytest\n'), ((9150, 9164), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (9162, 9164), False, 'from pygmt import Figure, clib\n'), ((9547, 9561), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (9559, 9561), False, 'from pygmt import Figure, clib\n'), ((9971, 9985), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (9983, 9985), False, 'from pygmt import Figure, clib\n'), ((10510, 10524), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (10522, 10524), False, 'from pygmt import Figure, clib\n'), ((12192, 12219), 'pytest.raises', 'pytest.raises', (['GMTCLibError'], {}), '(GMTCLibError)\n', (12205, 12219), False, 'import pytest\n'), ((12641, 12668), 'pytest.raises', 'pytest.raises', (['GMTCLibError'], {}), '(GMTCLibError)\n', (12654, 12668), False, 'import pytest\n'), ((13123, 13153), 'pytest.raises', 'pytest.raises', (['GMTInvalidInput'], {}), '(GMTInvalidInput)\n', (13136, 13153), False, 'import pytest\n'), ((13652, 13666), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (13664, 13666), False, 'from pygmt import Figure, clib\n'), ((16200, 16214), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (16212, 16214), False, 'from pygmt import Figure, clib\n'), ((16901, 16931), 'pytest.raises', 'pytest.raises', (['GMTInvalidInput'], {}), '(GMTInvalidInput)\n', (16914, 16931), False, 'import pytest\n'), ((17341, 17355), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (17353, 17355), False, 'from pygmt import Figure, clib\n'), ((18268, 18282), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (18280, 18282), False, 'from pygmt import Figure, clib\n'), ((19263, 19277), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (19275, 19277), False, 'from pygmt import Figure, clib\n'), ((20764, 20778), 'pygmt.clib.Session', 'clib.Session', ([], {}), '()\n', (20776, 20778), False, 'from pygmt import Figure, clib\n'), ((21882, 21920), 'numpy.array', 'np.array', (['[-165.0, -150.0, 15.0, 25.0]'], {}), '([-165.0, -150.0, 15.0, 25.0])\n', (21890, 21920), True, 'import numpy as np\n'), ((22856, 22868), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (22865, 22868), True, 'import numpy as np\n'), ((23117, 23132), 'numpy.flipud', 'np.flipud', (['data'], {}), '(data)\n', (23126, 23132), True, 'import numpy as np\n'), ((23455, 23467), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (23464, 23467), True, 'import numpy as np\n'), ((23716, 23742), 'numpy.flip', 'np.flip', (['data'], {'axis': '(0, 1)'}), '(data, axis=(0, 1))\n', (23723, 23742), True, 'import numpy as np\n'), ((24077, 24089), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (24086, 24089), True, 'import numpy as np\n'), ((24685, 24697), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (24694, 24697), True, 'import numpy as np\n'), ((24946, 24961), 'numpy.fliplr', 'np.fliplr', (['data'], {}), '(data)\n', (24955, 24961), True, 'import numpy as np\n'), ((26201, 26217), 'packaging.version.Version', 'Version', (['"""6.2.0"""'], {}), "('6.2.0')\n", (26208, 26217), False, 'from packaging.version import Version\n'), ((26376, 26403), 'pytest.raises', 'pytest.raises', (['GMTCLibError'], {}), '(GMTCLibError)\n', (26389, 26403), False, 'import pytest\n'), ((27757, 27787), 'pytest.raises', 'pytest.raises', (['GMTVersionError'], {}), '(GMTVersionError)\n', (27770, 27787), False, 'import pytest\n'), ((14695, 14708), 'pygmt.helpers.GMTTempFile', 'GMTTempFile', ([], {}), '()\n', (14706, 14708), False, 'from pygmt.helpers import GMTTempFile\n'), ((15570, 15583), 'pygmt.helpers.GMTTempFile', 'GMTTempFile', ([], {}), '()\n', (15581, 15583), False, 'from pygmt.helpers import GMTTempFile\n'), ((16128, 16171), 'numpy.arange', 'np.arange', (['(shape[0] * shape[1])'], {'dtype': 'dtype'}), '(shape[0] * shape[1], dtype=dtype)\n', (16137, 16171), True, 'import numpy as np\n'), ((17269, 17312), 'numpy.arange', 'np.arange', (['(shape[0] * shape[1])'], {'dtype': 'dtype'}), '(shape[0] * shape[1], dtype=dtype)\n', (17278, 17312), True, 'import numpy as np\n'), ((18123, 18166), 'numpy.arange', 'np.arange', (['(shape[0] * shape[1])'], {'dtype': 'dtype'}), '(shape[0] * shape[1], dtype=dtype)\n', (18132, 18166), True, 'import numpy as np\n'), ((20207, 20220), 'pygmt.helpers.GMTTempFile', 'GMTTempFile', ([], {}), '()\n', (20218, 20220), False, 'from pygmt.helpers import GMTTempFile\n'), ((22438, 22465), 'pytest.raises', 'pytest.raises', (['GMTCLibError'], {}), '(GMTCLibError)\n', (22451, 22465), False, 'import pytest\n'), ((10885, 10928), 'numpy.arange', 'np.arange', (['(shape[0] * shape[1])'], {'dtype': 'dtype'}), '(shape[0] * shape[1], dtype=dtype)\n', (10894, 10928), True, 'import numpy as np\n'), ((11226, 11239), 'pygmt.helpers.GMTTempFile', 'GMTTempFile', ([], {}), '()\n', (11237, 11239), False, 'from pygmt.helpers import GMTTempFile\n'), ((13761, 13774), 'pygmt.helpers.GMTTempFile', 'GMTTempFile', ([], {}), '()\n', (13772, 13774), False, 'from pygmt.helpers import GMTTempFile\n'), ((16309, 16322), 'pygmt.helpers.GMTTempFile', 'GMTTempFile', ([], {}), '()\n', (16320, 16322), False, 'from pygmt.helpers import GMTTempFile\n'), ((17446, 17459), 'pygmt.helpers.GMTTempFile', 'GMTTempFile', ([], {}), '()\n', (17457, 17459), False, 'from pygmt.helpers import GMTTempFile\n'), ((18373, 18386), 'pygmt.helpers.GMTTempFile', 'GMTTempFile', ([], {}), '()\n', (18384, 18386), False, 'from pygmt.helpers import GMTTempFile\n'), ((19387, 19400), 'pygmt.helpers.GMTTempFile', 'GMTTempFile', ([], {}), '()\n', (19398, 19400), False, 'from pygmt.helpers import GMTTempFile\n'), ((19070, 19098), 'numpy.arange', 'np.arange', (['size'], {'dtype': 'dtype'}), '(size, dtype=dtype)\n', (19079, 19098), True, 'import numpy as np\n'), ((19118, 19159), 'numpy.arange', 'np.arange', (['size', '(size * 2)', '(1)'], {'dtype': 'dtype'}), '(size, size * 2, 1, dtype=dtype)\n', (19127, 19159), True, 'import numpy as np\n'), ((19179, 19224), 'numpy.arange', 'np.arange', (['(size * 2)', '(size * 3)', '(1)'], {'dtype': 'dtype'}), '(size * 2, size * 3, 1, dtype=dtype)\n', (19188, 19224), True, 'import numpy as np\n')]
|
from __future__ import annotations
from datetime import timedelta
import operator
from sys import getsizeof
from typing import (
TYPE_CHECKING,
Any,
Callable,
Hashable,
List,
cast,
)
import warnings
import numpy as np
from pandas._libs import index as libindex
from pandas._libs.lib import no_default
from pandas._typing import Dtype
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
cache_readonly,
doc,
)
from pandas.util._exceptions import rewrite_exception
from pandas.core.dtypes.common import (
ensure_platform_int,
ensure_python_int,
is_float,
is_integer,
is_scalar,
is_signed_integer_dtype,
is_timedelta64_dtype,
)
from pandas.core.dtypes.generic import ABCTimedeltaIndex
from pandas.core import ops
import pandas.core.common as com
from pandas.core.construction import extract_array
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import maybe_extract_name
from pandas.core.indexes.numeric import (
Float64Index,
Int64Index,
NumericIndex,
)
from pandas.core.ops.common import unpack_zerodim_and_defer
if TYPE_CHECKING:
from pandas import Index
_empty_range = range(0)
class RangeIndex(NumericIndex):
"""
Immutable Index implementing a monotonic integer range.
RangeIndex is a memory-saving special case of Int64Index limited to
representing monotonic ranges. Using RangeIndex may in some instances
improve computing speed.
This is the default index type used
by DataFrame and Series when no explicit index is provided by the user.
Parameters
----------
start : int (default: 0), range, or other RangeIndex instance
If int and "stop" is not given, interpreted as "stop" instead.
stop : int (default: 0)
step : int (default: 1)
dtype : np.int64
Unused, accepted for homogeneity with other index types.
copy : bool, default False
Unused, accepted for homogeneity with other index types.
name : object, optional
Name to be stored in the index.
Attributes
----------
start
stop
step
Methods
-------
from_range
See Also
--------
Index : The base pandas Index type.
Int64Index : Index of int64 data.
"""
_typ = "rangeindex"
_engine_type = libindex.Int64Engine
_dtype_validation_metadata = (is_signed_integer_dtype, "signed integer")
_can_hold_na = False
_range: range
# --------------------------------------------------------------------
# Constructors
def __new__(
cls,
start=None,
stop=None,
step=None,
dtype: Dtype | None = None,
copy: bool = False,
name: Hashable = None,
) -> RangeIndex:
cls._validate_dtype(dtype)
name = maybe_extract_name(name, start, cls)
# RangeIndex
if isinstance(start, RangeIndex):
return start.copy(name=name)
elif isinstance(start, range):
return cls._simple_new(start, name=name)
# validate the arguments
if com.all_none(start, stop, step):
raise TypeError("RangeIndex(...) must be called with integers")
start = ensure_python_int(start) if start is not None else 0
if stop is None:
start, stop = 0, start
else:
stop = ensure_python_int(stop)
step = ensure_python_int(step) if step is not None else 1
if step == 0:
raise ValueError("Step must not be zero")
rng = range(start, stop, step)
return cls._simple_new(rng, name=name)
@classmethod
def from_range(
cls, data: range, name=None, dtype: Dtype | None = None
) -> RangeIndex:
"""
Create RangeIndex from a range object.
Returns
-------
RangeIndex
"""
if not isinstance(data, range):
raise TypeError(
f"{cls.__name__}(...) must be called with object coercible to a "
f"range, {repr(data)} was passed"
)
cls._validate_dtype(dtype)
return cls._simple_new(data, name=name)
@classmethod
def _simple_new(cls, values: range, name: Hashable = None) -> RangeIndex:
result = object.__new__(cls)
assert isinstance(values, range)
result._range = values
result._name = name
result._cache = {}
result._reset_identity()
return result
# --------------------------------------------------------------------
@cache_readonly
def _constructor(self) -> type[Int64Index]:
""" return the class to use for construction """
return Int64Index
@cache_readonly
def _data(self) -> np.ndarray:
"""
An int array that for performance reasons is created only when needed.
The constructed array is saved in ``_cache``.
"""
return np.arange(self.start, self.stop, self.step, dtype=np.int64)
@cache_readonly
def _cached_int64index(self) -> Int64Index:
return Int64Index._simple_new(self._data, name=self.name)
@property
def _int64index(self) -> Int64Index:
# wrap _cached_int64index so we can be sure its name matches self.name
res = self._cached_int64index
res._name = self._name
return res
def _get_data_as_items(self):
""" return a list of tuples of start, stop, step """
rng = self._range
return [("start", rng.start), ("stop", rng.stop), ("step", rng.step)]
def __reduce__(self):
d = self._get_attributes_dict()
d.update(dict(self._get_data_as_items()))
return ibase._new_Index, (type(self), d), None
# --------------------------------------------------------------------
# Rendering Methods
def _format_attrs(self):
"""
Return a list of tuples of the (attr, formatted_value)
"""
attrs = self._get_data_as_items()
if self.name is not None:
attrs.append(("name", ibase.default_pprint(self.name)))
return attrs
def _format_data(self, name=None):
# we are formatting thru the attributes
return None
def _format_with_header(self, header: list[str], na_rep: str = "NaN") -> list[str]:
if not len(self._range):
return header
first_val_str = str(self._range[0])
last_val_str = str(self._range[-1])
max_length = max(len(first_val_str), len(last_val_str))
return header + [f"{x:<{max_length}}" for x in self._range]
# --------------------------------------------------------------------
_deprecation_message = (
"RangeIndex.{} is deprecated and will be "
"removed in a future version. Use RangeIndex.{} "
"instead"
)
@property
def start(self) -> int:
"""
The value of the `start` parameter (``0`` if this was not supplied).
"""
# GH 25710
return self._range.start
@property
def _start(self) -> int:
"""
The value of the `start` parameter (``0`` if this was not supplied).
.. deprecated:: 0.25.0
Use ``start`` instead.
"""
warnings.warn(
self._deprecation_message.format("_start", "start"),
FutureWarning,
stacklevel=2,
)
return self.start
@property
def stop(self) -> int:
"""
The value of the `stop` parameter.
"""
return self._range.stop
@property
def _stop(self) -> int:
"""
The value of the `stop` parameter.
.. deprecated:: 0.25.0
Use ``stop`` instead.
"""
# GH 25710
warnings.warn(
self._deprecation_message.format("_stop", "stop"),
FutureWarning,
stacklevel=2,
)
return self.stop
@property
def step(self) -> int:
"""
The value of the `step` parameter (``1`` if this was not supplied).
"""
# GH 25710
return self._range.step
@property
def _step(self) -> int:
"""
The value of the `step` parameter (``1`` if this was not supplied).
.. deprecated:: 0.25.0
Use ``step`` instead.
"""
# GH 25710
warnings.warn(
self._deprecation_message.format("_step", "step"),
FutureWarning,
stacklevel=2,
)
return self.step
@cache_readonly
def nbytes(self) -> int:
"""
Return the number of bytes in the underlying data.
"""
rng = self._range
return getsizeof(rng) + sum(
getsizeof(getattr(rng, attr_name))
for attr_name in ["start", "stop", "step"]
)
def memory_usage(self, deep: bool = False) -> int:
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self.nbytes
@property
def dtype(self) -> np.dtype:
return np.dtype(np.int64)
@property
def is_unique(self) -> bool:
""" return if the index has unique values """
return True
@cache_readonly
def is_monotonic_increasing(self) -> bool:
return self._range.step > 0 or len(self) <= 1
@cache_readonly
def is_monotonic_decreasing(self) -> bool:
return self._range.step < 0 or len(self) <= 1
def __contains__(self, key: Any) -> bool:
hash(key)
try:
key = ensure_python_int(key)
except TypeError:
return False
return key in self._range
@property
def inferred_type(self) -> str:
return "integer"
# --------------------------------------------------------------------
# Indexing Methods
@doc(Int64Index.get_loc)
def get_loc(self, key, method=None, tolerance=None):
if method is None and tolerance is None:
if is_integer(key) or (is_float(key) and key.is_integer()):
new_key = int(key)
try:
return self._range.index(new_key)
except ValueError as err:
raise KeyError(key) from err
raise KeyError(key)
return super().get_loc(key, method=method, tolerance=tolerance)
def _get_indexer(
self,
target: Index,
method: str | None = None,
limit: int | None = None,
tolerance=None,
) -> np.ndarray:
# -> np.ndarray[np.intp]
if com.any_not_none(method, tolerance, limit):
return super()._get_indexer(
target, method=method, tolerance=tolerance, limit=limit
)
if self.step > 0:
start, stop, step = self.start, self.stop, self.step
else:
# GH 28678: work on reversed range for simplicity
reverse = self._range[::-1]
start, stop, step = reverse.start, reverse.stop, reverse.step
if not is_signed_integer_dtype(target):
# checks/conversions/roundings are delegated to general method
return super()._get_indexer(target, method=method, tolerance=tolerance)
target_array = np.asarray(target)
locs = target_array - start
valid = (locs % step == 0) & (locs >= 0) & (target_array < stop)
locs[~valid] = -1
locs[valid] = locs[valid] / step
if step != self.step:
# We reversed this range: transform to original locs
locs[valid] = len(self) - 1 - locs[valid]
return ensure_platform_int(locs)
# --------------------------------------------------------------------
def repeat(self, repeats, axis=None) -> Int64Index:
return self._int64index.repeat(repeats, axis=axis)
def delete(self, loc) -> Int64Index: # type: ignore[override]
return self._int64index.delete(loc)
def take(
self, indices, axis: int = 0, allow_fill: bool = True, fill_value=None, **kwargs
) -> Int64Index:
with rewrite_exception("Int64Index", type(self).__name__):
return self._int64index.take(
indices,
axis=axis,
allow_fill=allow_fill,
fill_value=fill_value,
**kwargs,
)
def tolist(self) -> list[int]:
return list(self._range)
@doc(Int64Index.__iter__)
def __iter__(self):
yield from self._range
@doc(Int64Index._shallow_copy)
def _shallow_copy(self, values, name: Hashable = no_default):
name = self.name if name is no_default else name
if values.dtype.kind == "f":
return Float64Index(values, name=name)
return Int64Index._simple_new(values, name=name)
def _view(self: RangeIndex) -> RangeIndex:
result = type(self)._simple_new(self._range, name=self._name)
result._cache = self._cache
return result
@doc(Int64Index.copy)
def copy(
self,
name: Hashable = None,
deep: bool = False,
dtype: Dtype | None = None,
names=None,
):
name = self._validate_names(name=name, names=names, deep=deep)[0]
new_index = self._rename(name=name)
if dtype:
warnings.warn(
"parameter dtype is deprecated and will be removed in a future "
"version. Use the astype method instead.",
FutureWarning,
stacklevel=2,
)
new_index = new_index.astype(dtype)
return new_index
def _minmax(self, meth: str):
no_steps = len(self) - 1
if no_steps == -1:
return np.nan
elif (meth == "min" and self.step > 0) or (meth == "max" and self.step < 0):
return self.start
return self.start + self.step * no_steps
def min(self, axis=None, skipna: bool = True, *args, **kwargs) -> int:
"""The minimum value of the RangeIndex"""
nv.validate_minmax_axis(axis)
nv.validate_min(args, kwargs)
return self._minmax("min")
def max(self, axis=None, skipna: bool = True, *args, **kwargs) -> int:
"""The maximum value of the RangeIndex"""
nv.validate_minmax_axis(axis)
nv.validate_max(args, kwargs)
return self._minmax("max")
def argsort(self, *args, **kwargs) -> np.ndarray:
"""
Returns the indices that would sort the index and its
underlying data.
Returns
-------
np.ndarray[np.intp]
See Also
--------
numpy.ndarray.argsort
"""
ascending = kwargs.pop("ascending", True) # EA compat
nv.validate_argsort(args, kwargs)
if self._range.step > 0:
result = np.arange(len(self), dtype=np.intp)
else:
result = np.arange(len(self) - 1, -1, -1, dtype=np.intp)
if not ascending:
result = result[::-1]
return result
def factorize(
self, sort: bool = False, na_sentinel: int | None = -1
) -> tuple[np.ndarray, RangeIndex]:
codes = np.arange(len(self), dtype=np.intp)
uniques = self
if sort and self.step < 0:
codes = codes[::-1]
uniques = uniques[::-1]
return codes, uniques
def equals(self, other: object) -> bool:
"""
Determines if two Index objects contain the same elements.
"""
if isinstance(other, RangeIndex):
return self._range == other._range
return super().equals(other)
# --------------------------------------------------------------------
# Set Operations
def _intersection(self, other: Index, sort=False):
if not isinstance(other, RangeIndex):
# Int64Index
return super()._intersection(other, sort=sort)
if not len(self) or not len(other):
return self._simple_new(_empty_range)
first = self._range[::-1] if self.step < 0 else self._range
second = other._range[::-1] if other.step < 0 else other._range
# check whether intervals intersect
# deals with in- and decreasing ranges
int_low = max(first.start, second.start)
int_high = min(first.stop, second.stop)
if int_high <= int_low:
return self._simple_new(_empty_range)
# Method hint: linear Diophantine equation
# solve intersection problem
# performance hint: for identical step sizes, could use
# cheaper alternative
gcd, s, _ = self._extended_gcd(first.step, second.step)
# check whether element sets intersect
if (first.start - second.start) % gcd:
return self._simple_new(_empty_range)
# calculate parameters for the RangeIndex describing the
# intersection disregarding the lower bounds
tmp_start = first.start + (second.start - first.start) * first.step // gcd * s
new_step = first.step * second.step // gcd
new_range = range(tmp_start, int_high, new_step)
new_index = self._simple_new(new_range)
# adjust index to limiting interval
new_start = new_index._min_fitting_element(int_low)
new_range = range(new_start, new_index.stop, new_index.step)
new_index = self._simple_new(new_range)
if (self.step < 0 and other.step < 0) is not (new_index.step < 0):
new_index = new_index[::-1]
if sort is None:
new_index = new_index.sort_values()
return new_index
def _min_fitting_element(self, lower_limit: int) -> int:
"""Returns the smallest element greater than or equal to the limit"""
no_steps = -(-(lower_limit - self.start) // abs(self.step))
return self.start + abs(self.step) * no_steps
def _max_fitting_element(self, upper_limit: int) -> int:
"""Returns the largest element smaller than or equal to the limit"""
no_steps = (upper_limit - self.start) // abs(self.step)
return self.start + abs(self.step) * no_steps
def _extended_gcd(self, a: int, b: int) -> tuple[int, int, int]:
"""
Extended Euclidean algorithms to solve Bezout's identity:
a*x + b*y = gcd(x, y)
Finds one particular solution for x, y: s, t
Returns: gcd, s, t
"""
s, old_s = 0, 1
t, old_t = 1, 0
r, old_r = b, a
while r:
quotient = old_r // r
old_r, r = r, old_r - quotient * r
old_s, s = s, old_s - quotient * s
old_t, t = t, old_t - quotient * t
return old_r, old_s, old_t
def _union(self, other: Index, sort):
"""
Form the union of two Index objects and sorts if possible
Parameters
----------
other : Index or array-like
sort : False or None, default None
Whether to sort resulting index. ``sort=None`` returns a
monotonically increasing ``RangeIndex`` if possible or a sorted
``Int64Index`` if not. ``sort=False`` always returns an
unsorted ``Int64Index``
.. versionadded:: 0.25.0
Returns
-------
union : Index
"""
if isinstance(other, RangeIndex) and sort is None:
start_s, step_s = self.start, self.step
end_s = self.start + self.step * (len(self) - 1)
start_o, step_o = other.start, other.step
end_o = other.start + other.step * (len(other) - 1)
if self.step < 0:
start_s, step_s, end_s = end_s, -step_s, start_s
if other.step < 0:
start_o, step_o, end_o = end_o, -step_o, start_o
if len(self) == 1 and len(other) == 1:
step_s = step_o = abs(self.start - other.start)
elif len(self) == 1:
step_s = step_o
elif len(other) == 1:
step_o = step_s
start_r = min(start_s, start_o)
end_r = max(end_s, end_o)
if step_o == step_s:
if (
(start_s - start_o) % step_s == 0
and (start_s - end_o) <= step_s
and (start_o - end_s) <= step_s
):
return type(self)(start_r, end_r + step_s, step_s)
if (
(step_s % 2 == 0)
and (abs(start_s - start_o) <= step_s / 2)
and (abs(end_s - end_o) <= step_s / 2)
):
return type(self)(start_r, end_r + step_s / 2, step_s / 2)
elif step_o % step_s == 0:
if (
(start_o - start_s) % step_s == 0
and (start_o + step_s >= start_s)
and (end_o - step_s <= end_s)
):
return type(self)(start_r, end_r + step_s, step_s)
elif step_s % step_o == 0:
if (
(start_s - start_o) % step_o == 0
and (start_s + step_o >= start_o)
and (end_s - step_o <= end_o)
):
return type(self)(start_r, end_r + step_o, step_o)
return self._int64index._union(other, sort=sort)
def _difference(self, other, sort=None):
# optimized set operation if we have another RangeIndex
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other, result_name = self._convert_can_do_setop(other)
if not isinstance(other, RangeIndex):
return super()._difference(other, sort=sort)
res_name = ops.get_op_result_name(self, other)
first = self._range[::-1] if self.step < 0 else self._range
overlap = self.intersection(other)
if overlap.step < 0:
overlap = overlap[::-1]
if len(overlap) == 0:
return self.rename(name=res_name)
if len(overlap) == len(self):
return self[:0].rename(res_name)
if not isinstance(overlap, RangeIndex):
# We won't end up with RangeIndex, so fall back
return super()._difference(other, sort=sort)
if overlap.step != first.step:
# In some cases we might be able to get a RangeIndex back,
# but not worth the effort.
return super()._difference(other, sort=sort)
if overlap[0] == first.start:
# The difference is everything after the intersection
new_rng = range(overlap[-1] + first.step, first.stop, first.step)
elif overlap[-1] == first[-1]:
# The difference is everything before the intersection
new_rng = range(first.start, overlap[0], first.step)
else:
# The difference is not range-like
return super()._difference(other, sort=sort)
new_index = type(self)._simple_new(new_rng, name=res_name)
if first is not self._range:
new_index = new_index[::-1]
return new_index
def symmetric_difference(self, other, result_name: Hashable = None, sort=None):
if not isinstance(other, RangeIndex) or sort is not None:
return super().symmetric_difference(other, result_name, sort)
left = self.difference(other)
right = other.difference(self)
result = left.union(right)
if result_name is not None:
result = result.rename(result_name)
return result
# --------------------------------------------------------------------
def _concat(self, indexes: list[Index], name: Hashable) -> Index:
"""
Overriding parent method for the case of all RangeIndex instances.
When all members of "indexes" are of type RangeIndex: result will be
RangeIndex if possible, Int64Index otherwise. E.g.:
indexes = [RangeIndex(3), RangeIndex(3, 6)] -> RangeIndex(6)
indexes = [RangeIndex(3), RangeIndex(4, 6)] -> Int64Index([0,1,2,4,5])
"""
if not all(isinstance(x, RangeIndex) for x in indexes):
return super()._concat(indexes, name)
elif len(indexes) == 1:
return indexes[0]
rng_indexes = cast(List[RangeIndex], indexes)
start = step = next_ = None
# Filter the empty indexes
non_empty_indexes = [obj for obj in rng_indexes if len(obj)]
for obj in non_empty_indexes:
rng = obj._range
if start is None:
# This is set by the first non-empty index
start = rng.start
if step is None and len(rng) > 1:
step = rng.step
elif step is None:
# First non-empty index had only one element
if rng.start == start:
values = np.concatenate([x._values for x in rng_indexes])
result = Int64Index(values)
return result.rename(name)
step = rng.start - start
non_consecutive = (step != rng.step and len(rng) > 1) or (
next_ is not None and rng.start != next_
)
if non_consecutive:
result = Int64Index(np.concatenate([x._values for x in rng_indexes]))
return result.rename(name)
if step is not None:
next_ = rng[-1] + step
if non_empty_indexes:
# Get the stop value from "next" or alternatively
# from the last non-empty index
stop = non_empty_indexes[-1].stop if next_ is None else next_
return RangeIndex(start, stop, step).rename(name)
# Here all "indexes" had 0 length, i.e. were empty.
# In this case return an empty range index.
return RangeIndex(0, 0).rename(name)
def __len__(self) -> int:
"""
return the length of the RangeIndex
"""
return len(self._range)
@property
def size(self) -> int:
return len(self)
def __getitem__(self, key):
"""
Conserve RangeIndex type for scalar and slice keys.
"""
if isinstance(key, slice):
new_range = self._range[key]
return self._simple_new(new_range, name=self._name)
elif is_integer(key):
new_key = int(key)
try:
return self._range[new_key]
except IndexError as err:
raise IndexError(
f"index {key} is out of bounds for axis 0 with size {len(self)}"
) from err
elif is_scalar(key):
raise IndexError(
"only integers, slices (`:`), "
"ellipsis (`...`), numpy.newaxis (`None`) "
"and integer or boolean "
"arrays are valid indices"
)
# fall back to Int64Index
return super().__getitem__(key)
def _getitem_slice(self: RangeIndex, slobj: slice) -> RangeIndex:
"""
Fastpath for __getitem__ when we know we have a slice.
"""
res = self._range[slobj]
return type(self)._simple_new(res, name=self._name)
@unpack_zerodim_and_defer("__floordiv__")
def __floordiv__(self, other):
if is_integer(other) and other != 0:
if len(self) == 0 or self.start % other == 0 and self.step % other == 0:
start = self.start // other
step = self.step // other
stop = start + len(self) * step
new_range = range(start, stop, step or 1)
return self._simple_new(new_range, name=self.name)
if len(self) == 1:
start = self.start // other
new_range = range(start, start + 1, 1)
return self._simple_new(new_range, name=self.name)
return self._int64index // other
# --------------------------------------------------------------------
# Reductions
def all(self, *args, **kwargs) -> bool:
return 0 not in self._range
def any(self, *args, **kwargs) -> bool:
return any(self._range)
# --------------------------------------------------------------------
def _cmp_method(self, other, op):
if isinstance(other, RangeIndex) and self._range == other._range:
# Both are immutable so if ._range attr. are equal, shortcut is possible
return super()._cmp_method(self, op)
return super()._cmp_method(other, op)
def _arith_method(self, other, op):
"""
Parameters
----------
other : Any
op : callable that accepts 2 params
perform the binary op
"""
if isinstance(other, ABCTimedeltaIndex):
# Defer to TimedeltaIndex implementation
return NotImplemented
elif isinstance(other, (timedelta, np.timedelta64)):
# GH#19333 is_integer evaluated True on timedelta64,
# so we need to catch these explicitly
return op(self._int64index, other)
elif is_timedelta64_dtype(other):
# Must be an np.ndarray; GH#22390
return op(self._int64index, other)
if op in [
operator.pow,
ops.rpow,
operator.mod,
ops.rmod,
ops.rfloordiv,
divmod,
ops.rdivmod,
]:
return op(self._int64index, other)
step: Callable | None = None
if op in [operator.mul, ops.rmul, operator.truediv, ops.rtruediv]:
step = op
# TODO: if other is a RangeIndex we may have more efficient options
other = extract_array(other, extract_numpy=True, extract_range=True)
attrs = self._get_attributes_dict()
left, right = self, other
try:
# apply if we have an override
if step:
with np.errstate(all="ignore"):
rstep = step(left.step, right)
# we don't have a representable op
# so return a base index
if not is_integer(rstep) or not rstep:
raise ValueError
else:
rstep = left.step
with np.errstate(all="ignore"):
rstart = op(left.start, right)
rstop = op(left.stop, right)
result = type(self)(rstart, rstop, rstep, **attrs)
# for compat with numpy / Int64Index
# even if we can represent as a RangeIndex, return
# as a Float64Index if we have float-like descriptors
if not all(is_integer(x) for x in [rstart, rstop, rstep]):
result = result.astype("float64")
return result
except (ValueError, TypeError, ZeroDivisionError):
# Defer to Int64Index implementation
return op(self._int64index, other)
# TODO: Do attrs get handled reliably?
|
[
"pandas.core.construction.extract_array",
"pandas.compat.numpy.function.validate_argsort",
"pandas.core.dtypes.common.is_scalar",
"numpy.arange",
"pandas.compat.numpy.function.validate_min",
"pandas.core.indexes.numeric.Int64Index",
"pandas.core.common.any_not_none",
"sys.getsizeof",
"numpy.asarray",
"pandas.core.indexes.base.maybe_extract_name",
"pandas.core.dtypes.common.is_integer",
"pandas.core.dtypes.common.ensure_platform_int",
"pandas.compat.numpy.function.validate_max",
"numpy.concatenate",
"warnings.warn",
"numpy.dtype",
"pandas.core.indexes.base.default_pprint",
"pandas.core.dtypes.common.is_signed_integer_dtype",
"pandas.core.common.all_none",
"pandas.compat.numpy.function.validate_minmax_axis",
"pandas.core.dtypes.common.is_float",
"pandas.util._decorators.doc",
"pandas.core.dtypes.common.ensure_python_int",
"typing.cast",
"pandas.core.ops.get_op_result_name",
"pandas.core.ops.common.unpack_zerodim_and_defer",
"pandas.core.indexes.numeric.Float64Index",
"pandas.core.dtypes.common.is_timedelta64_dtype",
"numpy.errstate",
"pandas.core.indexes.numeric.Int64Index._simple_new"
] |
[((10265, 10288), 'pandas.util._decorators.doc', 'doc', (['Int64Index.get_loc'], {}), '(Int64Index.get_loc)\n', (10268, 10288), False, 'from pandas.util._decorators import cache_readonly, doc\n'), ((12844, 12868), 'pandas.util._decorators.doc', 'doc', (['Int64Index.__iter__'], {}), '(Int64Index.__iter__)\n', (12847, 12868), False, 'from pandas.util._decorators import cache_readonly, doc\n'), ((12930, 12959), 'pandas.util._decorators.doc', 'doc', (['Int64Index._shallow_copy'], {}), '(Int64Index._shallow_copy)\n', (12933, 12959), False, 'from pandas.util._decorators import cache_readonly, doc\n'), ((13411, 13431), 'pandas.util._decorators.doc', 'doc', (['Int64Index.copy'], {}), '(Int64Index.copy)\n', (13414, 13431), False, 'from pandas.util._decorators import cache_readonly, doc\n'), ((27681, 27721), 'pandas.core.ops.common.unpack_zerodim_and_defer', 'unpack_zerodim_and_defer', (['"""__floordiv__"""'], {}), "('__floordiv__')\n", (27705, 27721), False, 'from pandas.core.ops.common import unpack_zerodim_and_defer\n'), ((2834, 2870), 'pandas.core.indexes.base.maybe_extract_name', 'maybe_extract_name', (['name', 'start', 'cls'], {}), '(name, start, cls)\n', (2852, 2870), False, 'from pandas.core.indexes.base import maybe_extract_name\n'), ((3113, 3144), 'pandas.core.common.all_none', 'com.all_none', (['start', 'stop', 'step'], {}), '(start, stop, step)\n', (3125, 3144), True, 'import pandas.core.common as com\n'), ((4958, 5017), 'numpy.arange', 'np.arange', (['self.start', 'self.stop', 'self.step'], {'dtype': 'np.int64'}), '(self.start, self.stop, self.step, dtype=np.int64)\n', (4967, 5017), True, 'import numpy as np\n'), ((5102, 5152), 'pandas.core.indexes.numeric.Int64Index._simple_new', 'Int64Index._simple_new', (['self._data'], {'name': 'self.name'}), '(self._data, name=self.name)\n', (5124, 5152), False, 'from pandas.core.indexes.numeric import Float64Index, Int64Index, NumericIndex\n'), ((9495, 9513), 'numpy.dtype', 'np.dtype', (['np.int64'], {}), '(np.int64)\n', (9503, 9513), True, 'import numpy as np\n'), ((10990, 11032), 'pandas.core.common.any_not_none', 'com.any_not_none', (['method', 'tolerance', 'limit'], {}), '(method, tolerance, limit)\n', (11006, 11032), True, 'import pandas.core.common as com\n'), ((11675, 11693), 'numpy.asarray', 'np.asarray', (['target'], {}), '(target)\n', (11685, 11693), True, 'import numpy as np\n'), ((12035, 12060), 'pandas.core.dtypes.common.ensure_platform_int', 'ensure_platform_int', (['locs'], {}), '(locs)\n', (12054, 12060), False, 'from pandas.core.dtypes.common import ensure_platform_int, ensure_python_int, is_float, is_integer, is_scalar, is_signed_integer_dtype, is_timedelta64_dtype\n'), ((13187, 13228), 'pandas.core.indexes.numeric.Int64Index._simple_new', 'Int64Index._simple_new', (['values'], {'name': 'name'}), '(values, name=name)\n', (13209, 13228), False, 'from pandas.core.indexes.numeric import Float64Index, Int64Index, NumericIndex\n'), ((14454, 14483), 'pandas.compat.numpy.function.validate_minmax_axis', 'nv.validate_minmax_axis', (['axis'], {}), '(axis)\n', (14477, 14483), True, 'from pandas.compat.numpy import function as nv\n'), ((14492, 14521), 'pandas.compat.numpy.function.validate_min', 'nv.validate_min', (['args', 'kwargs'], {}), '(args, kwargs)\n', (14507, 14521), True, 'from pandas.compat.numpy import function as nv\n'), ((14691, 14720), 'pandas.compat.numpy.function.validate_minmax_axis', 'nv.validate_minmax_axis', (['axis'], {}), '(axis)\n', (14714, 14720), True, 'from pandas.compat.numpy import function as nv\n'), ((14729, 14758), 'pandas.compat.numpy.function.validate_max', 'nv.validate_max', (['args', 'kwargs'], {}), '(args, kwargs)\n', (14744, 14758), True, 'from pandas.compat.numpy import function as nv\n'), ((15157, 15190), 'pandas.compat.numpy.function.validate_argsort', 'nv.validate_argsort', (['args', 'kwargs'], {}), '(args, kwargs)\n', (15176, 15190), True, 'from pandas.compat.numpy import function as nv\n'), ((22152, 22187), 'pandas.core.ops.get_op_result_name', 'ops.get_op_result_name', (['self', 'other'], {}), '(self, other)\n', (22174, 22187), False, 'from pandas.core import ops\n'), ((24717, 24748), 'typing.cast', 'cast', (['List[RangeIndex]', 'indexes'], {}), '(List[RangeIndex], indexes)\n', (24721, 24748), False, 'from typing import TYPE_CHECKING, Any, Callable, Hashable, List, cast\n'), ((30176, 30236), 'pandas.core.construction.extract_array', 'extract_array', (['other'], {'extract_numpy': '(True)', 'extract_range': '(True)'}), '(other, extract_numpy=True, extract_range=True)\n', (30189, 30236), False, 'from pandas.core.construction import extract_array\n'), ((3239, 3263), 'pandas.core.dtypes.common.ensure_python_int', 'ensure_python_int', (['start'], {}), '(start)\n', (3256, 3263), False, 'from pandas.core.dtypes.common import ensure_platform_int, ensure_python_int, is_float, is_integer, is_scalar, is_signed_integer_dtype, is_timedelta64_dtype\n'), ((3386, 3409), 'pandas.core.dtypes.common.ensure_python_int', 'ensure_python_int', (['stop'], {}), '(stop)\n', (3403, 3409), False, 'from pandas.core.dtypes.common import ensure_platform_int, ensure_python_int, is_float, is_integer, is_scalar, is_signed_integer_dtype, is_timedelta64_dtype\n'), ((3426, 3449), 'pandas.core.dtypes.common.ensure_python_int', 'ensure_python_int', (['step'], {}), '(step)\n', (3443, 3449), False, 'from pandas.core.dtypes.common import ensure_platform_int, ensure_python_int, is_float, is_integer, is_scalar, is_signed_integer_dtype, is_timedelta64_dtype\n'), ((8712, 8726), 'sys.getsizeof', 'getsizeof', (['rng'], {}), '(rng)\n', (8721, 8726), False, 'from sys import getsizeof\n'), ((9976, 9998), 'pandas.core.dtypes.common.ensure_python_int', 'ensure_python_int', (['key'], {}), '(key)\n', (9993, 9998), False, 'from pandas.core.dtypes.common import ensure_platform_int, ensure_python_int, is_float, is_integer, is_scalar, is_signed_integer_dtype, is_timedelta64_dtype\n'), ((11459, 11490), 'pandas.core.dtypes.common.is_signed_integer_dtype', 'is_signed_integer_dtype', (['target'], {}), '(target)\n', (11482, 11490), False, 'from pandas.core.dtypes.common import ensure_platform_int, ensure_python_int, is_float, is_integer, is_scalar, is_signed_integer_dtype, is_timedelta64_dtype\n'), ((13140, 13171), 'pandas.core.indexes.numeric.Float64Index', 'Float64Index', (['values'], {'name': 'name'}), '(values, name=name)\n', (13152, 13171), False, 'from pandas.core.indexes.numeric import Float64Index, Int64Index, NumericIndex\n'), ((13731, 13888), 'warnings.warn', 'warnings.warn', (['"""parameter dtype is deprecated and will be removed in a future version. Use the astype method instead."""', 'FutureWarning'], {'stacklevel': '(2)'}), "(\n 'parameter dtype is deprecated and will be removed in a future version. Use the astype method instead.'\n , FutureWarning, stacklevel=2)\n", (13744, 13888), False, 'import warnings\n'), ((26791, 26806), 'pandas.core.dtypes.common.is_integer', 'is_integer', (['key'], {}), '(key)\n', (26801, 26806), False, 'from pandas.core.dtypes.common import ensure_platform_int, ensure_python_int, is_float, is_integer, is_scalar, is_signed_integer_dtype, is_timedelta64_dtype\n'), ((27769, 27786), 'pandas.core.dtypes.common.is_integer', 'is_integer', (['other'], {}), '(other)\n', (27779, 27786), False, 'from pandas.core.dtypes.common import ensure_platform_int, ensure_python_int, is_float, is_integer, is_scalar, is_signed_integer_dtype, is_timedelta64_dtype\n'), ((10410, 10425), 'pandas.core.dtypes.common.is_integer', 'is_integer', (['key'], {}), '(key)\n', (10420, 10425), False, 'from pandas.core.dtypes.common import ensure_platform_int, ensure_python_int, is_float, is_integer, is_scalar, is_signed_integer_dtype, is_timedelta64_dtype\n'), ((27097, 27111), 'pandas.core.dtypes.common.is_scalar', 'is_scalar', (['key'], {}), '(key)\n', (27106, 27111), False, 'from pandas.core.dtypes.common import ensure_platform_int, ensure_python_int, is_float, is_integer, is_scalar, is_signed_integer_dtype, is_timedelta64_dtype\n'), ((29580, 29607), 'pandas.core.dtypes.common.is_timedelta64_dtype', 'is_timedelta64_dtype', (['other'], {}), '(other)\n', (29600, 29607), False, 'from pandas.core.dtypes.common import ensure_platform_int, ensure_python_int, is_float, is_integer, is_scalar, is_signed_integer_dtype, is_timedelta64_dtype\n'), ((30749, 30774), 'numpy.errstate', 'np.errstate', ([], {'all': '"""ignore"""'}), "(all='ignore')\n", (30760, 30774), True, 'import numpy as np\n'), ((6075, 6106), 'pandas.core.indexes.base.default_pprint', 'ibase.default_pprint', (['self.name'], {}), '(self.name)\n', (6095, 6106), True, 'import pandas.core.indexes.base as ibase\n'), ((10430, 10443), 'pandas.core.dtypes.common.is_float', 'is_float', (['key'], {}), '(key)\n', (10438, 10443), False, 'from pandas.core.dtypes.common import ensure_platform_int, ensure_python_int, is_float, is_integer, is_scalar, is_signed_integer_dtype, is_timedelta64_dtype\n'), ((25726, 25774), 'numpy.concatenate', 'np.concatenate', (['[x._values for x in rng_indexes]'], {}), '([x._values for x in rng_indexes])\n', (25740, 25774), True, 'import numpy as np\n'), ((30415, 30440), 'numpy.errstate', 'np.errstate', ([], {'all': '"""ignore"""'}), "(all='ignore')\n", (30426, 30440), True, 'import numpy as np\n'), ((25329, 25377), 'numpy.concatenate', 'np.concatenate', (['[x._values for x in rng_indexes]'], {}), '([x._values for x in rng_indexes])\n', (25343, 25377), True, 'import numpy as np\n'), ((25407, 25425), 'pandas.core.indexes.numeric.Int64Index', 'Int64Index', (['values'], {}), '(values)\n', (25417, 25425), False, 'from pandas.core.indexes.numeric import Float64Index, Int64Index, NumericIndex\n'), ((30609, 30626), 'pandas.core.dtypes.common.is_integer', 'is_integer', (['rstep'], {}), '(rstep)\n', (30619, 30626), False, 'from pandas.core.dtypes.common import ensure_platform_int, ensure_python_int, is_float, is_integer, is_scalar, is_signed_integer_dtype, is_timedelta64_dtype\n'), ((31134, 31147), 'pandas.core.dtypes.common.is_integer', 'is_integer', (['x'], {}), '(x)\n', (31144, 31147), False, 'from pandas.core.dtypes.common import ensure_platform_int, ensure_python_int, is_float, is_integer, is_scalar, is_signed_integer_dtype, is_timedelta64_dtype\n')]
|
import cv2, time
import numpy as np
import Tkinter
"""
Wraps up some interfaces to opencv user interface methods (displaying
image frames, event handling, etc).
If desired, an alternative UI could be built and imported into get_pulse.py
instead. Opencv is used to perform much of the data analysis, but there is no
reason it has to be used to handle the UI as well. It just happens to be very
effective for our purposes.
"""
def resize(*args, **kwargs):
return cv2.resize(*args, **kwargs)
def moveWindow(*args,**kwargs):
return
def imshow(root,args,kwargs):
image = cv2.cvtColor(output_frame, cv2.COLOR_BGR2RGB)
image = Image.fromarray(image)
image = ImageTk.PhotoImage(image)
return Tkinter.Label(root, image=kwargs).pack()
#return cv2.imshow(*args,**kwargs)
def destroyWindow(*args,**kwargs):
return cv2.destroyWindow(*args,**kwargs)
def waitKey(*args,**kwargs):
return cv2.waitKey(*args,**kwargs)
"""
The rest of this file defines some GUI plotting functionality. There are plenty
of other ways to do simple x-y data plots in python, but this application uses
cv2.imshow to do real-time data plotting and handle user interaction.
This is entirely independent of the data calculation functions, so it can be
replaced in the get_pulse.py application easily.
"""
def combine(left, right):
"""Stack images horizontally.
"""
h = max(left.shape[0], right.shape[0])
w = left.shape[1] + right.shape[1]
hoff = left.shape[0]
shape = list(left.shape)
shape[0] = h
shape[1] = w
comb = np.zeros(tuple(shape),left.dtype)
# left will be on left, aligned top, with right on right
comb[:left.shape[0],:left.shape[1]] = left
comb[:right.shape[0],left.shape[1]:] = right
return comb
def plotXY(data,size = (280,640),margin = 25,name = "data",labels=[], skip = [],
showmax = [], bg = None,label_ndigits = [], showmax_digits=[]):
for x,y in data:
if len(x) < 2 or len(y) < 2:
return
n_plots = len(data)
w = float(size[1])
h = size[0]/float(n_plots)
z = np.zeros((size[0],size[1],3))
if isinstance(bg,np.ndarray):
wd = int(bg.shape[1]/bg.shape[0]*h )
bg = cv2.resize(bg,(wd,int(h)))
if len(bg.shape) == 3:
r = combine(bg[:,:,0],z[:,:,0])
g = combine(bg[:,:,1],z[:,:,1])
b = combine(bg[:,:,2],z[:,:,2])
else:
r = combine(bg,z[:,:,0])
g = combine(bg,z[:,:,1])
b = combine(bg,z[:,:,2])
z = cv2.merge([r,g,b])[:,:-wd,]
i = 0
P = []
for x,y in data:
x = np.array(x)
y = -np.array(y)
xx = (w-2*margin)*(x - x.min()) / (x.max() - x.min())+margin
yy = (h-2*margin)*(y - y.min()) / (y.max() - y.min())+margin + i*h
mx = max(yy)
if labels:
if labels[i]:
for ii in range(len(x)):
if ii%skip[i] == 0:
col = (255,255,255)
ss = '{0:.%sf}' % label_ndigits[i]
ss = ss.format(x[ii])
cv2.putText(z,ss,(int(xx[ii]),int((i+1)*h)),
cv2.FONT_HERSHEY_PLAIN,1,col)
if showmax:
if showmax[i]:
col = (0,255,0)
ii = np.argmax(-y)
ss = '{0:.%sf} %s' % (showmax_digits[i], showmax[i])
ss = ss.format(x[ii])
#"%0.0f %s" % (x[ii], showmax[i])
cv2.putText(z,ss,(int(xx[ii]),int((yy[ii]))),
cv2.FONT_HERSHEY_PLAIN,2,col)
try:
pts = np.array([[x_, y_] for x_, y_ in zip(xx,yy)],np.int32)
i+=1
P.append(pts)
except ValueError:
pass #temporary
"""
#Polylines seems to have some trouble rendering multiple polys for some people
for p in P:
cv2.polylines(z, [p], False, (255,255,255),1)
"""
#hack-y alternative:
for p in P:
for i in range(len(p)-1):
cv2.line(z,tuple(p[i]),tuple(p[i+1]), (255,255,255),1)
return z
#cv2.imshow(name,z)
|
[
"Tkinter.Label",
"cv2.merge",
"cv2.destroyWindow",
"numpy.argmax",
"numpy.array",
"numpy.zeros",
"cv2.cvtColor",
"cv2.resize",
"cv2.waitKey"
] |
[((468, 495), 'cv2.resize', 'cv2.resize', (['*args'], {}), '(*args, **kwargs)\n', (478, 495), False, 'import cv2, time\n'), ((583, 628), 'cv2.cvtColor', 'cv2.cvtColor', (['output_frame', 'cv2.COLOR_BGR2RGB'], {}), '(output_frame, cv2.COLOR_BGR2RGB)\n', (595, 628), False, 'import cv2, time\n'), ((845, 879), 'cv2.destroyWindow', 'cv2.destroyWindow', (['*args'], {}), '(*args, **kwargs)\n', (862, 879), False, 'import cv2, time\n'), ((920, 948), 'cv2.waitKey', 'cv2.waitKey', (['*args'], {}), '(*args, **kwargs)\n', (931, 948), False, 'import cv2, time\n'), ((2127, 2158), 'numpy.zeros', 'np.zeros', (['(size[0], size[1], 3)'], {}), '((size[0], size[1], 3))\n', (2135, 2158), True, 'import numpy as np\n'), ((2672, 2683), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (2680, 2683), True, 'import numpy as np\n'), ((713, 746), 'Tkinter.Label', 'Tkinter.Label', (['root'], {'image': 'kwargs'}), '(root, image=kwargs)\n', (726, 746), False, 'import Tkinter\n'), ((2581, 2601), 'cv2.merge', 'cv2.merge', (['[r, g, b]'], {}), '([r, g, b])\n', (2590, 2601), False, 'import cv2, time\n'), ((2697, 2708), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (2705, 2708), True, 'import numpy as np\n'), ((3409, 3422), 'numpy.argmax', 'np.argmax', (['(-y)'], {}), '(-y)\n', (3418, 3422), True, 'import numpy as np\n')]
|
import json
import logging
import sys
import numpy as np
import torch
from task_config import SuperGLUE_LABEL_MAPPING
from snorkel.mtl.data import MultitaskDataset
sys.path.append("..") # Adds higher directory to python modules path.
logger = logging.getLogger(__name__)
TASK_NAME = "WSC"
def get_char_index(text, span_text, span_index):
tokens = text.replace("\n", " ").lower().split(" ")
span_tokens = span_text.replace("\n", " ").lower().split(" ")
# Token exact match
if tokens[span_index : span_index + len(span_tokens)] == span_tokens:
st = len(" ".join(tokens[:span_index])) + 1 if span_index != 0 else 0
ed = st + len(span_text)
return st, ed
if span_index < len(tokens):
# Token fuzzy match with extra chars
char_in_text = " ".join(tokens[span_index : span_index + len(span_tokens)])
char_in_span = " ".join(span_tokens)
if char_in_text.startswith(char_in_span):
st = len(" ".join(tokens[:span_index])) + 1 if span_index != 0 else 0
# ed = st + len(char_in_span)
ed = st + len(char_in_text)
return st, ed
# Token fuzzy match with extra chars
char_in_text = " ".join(tokens[span_index : span_index + len(span_tokens)])
char_in_span = " ".join(span_tokens)
if char_in_span.startswith(char_in_text):
st = len(" ".join(tokens[:span_index])) + 1 if span_index != 0 else 0
ed = st + len(char_in_text)
return st, ed
# Index out of range
if span_index >= len(tokens):
span_index -= 10
# Token fuzzy match with different position
for idx in range(span_index, len(tokens)):
if tokens[idx : idx + len(span_tokens)] == span_tokens:
st = len(" ".join(tokens[:idx])) + 1 if idx != 0 else 0
ed = st + len(span_text)
return st, ed
# Token best fuzzy match with different position
for idx in range(span_index, len(tokens)):
if tokens[idx] == span_tokens[0]:
for length in range(1, len(span_tokens)):
if tokens[idx : idx + length] != span_tokens[:length]:
st = len(" ".join(tokens[:idx])) + 1 if idx != 0 else 0
ed = st + len(" ".join(span_tokens[: length - 1]))
return st, ed
return None
def parse(jsonl_path, tokenizer, max_data_samples, max_sequence_length):
logger.info(f"Loading data from {jsonl_path}.")
rows = [json.loads(row) for row in open(jsonl_path, encoding="utf-8")]
for i in range(2):
logger.info(f"Sample {i}: {rows[i]}")
# Truncate to max_data_samples
if max_data_samples:
rows = rows[:max_data_samples]
logger.info(f"Truncating to {max_data_samples} samples.")
# sentence text
sentences = []
# span1
span1s = []
# span2
span2s = []
# span1 idx
span1_idxs = []
# span2 idx
span2_idxs = []
# label
labels = []
token1_idxs = []
token2_idxs = []
xlnet_tokens = []
xlnet_token_ids = []
xlnet_token_masks = []
xlnet_token_segments = []
# Check the maximum token length
max_len = -1
for row in rows:
index = row["idx"]
text = row["text"]
span1_text = row["target"]["span1_text"]
span2_text = row["target"]["span2_text"]
span1_index = row["target"]["span1_index"]
span2_index = row["target"]["span2_index"]
label = row["label"] if "label" in row else True
span1_char_index = get_char_index(text, span1_text, span1_index)
span2_char_index = get_char_index(text, span2_text, span2_index)
assert span1_char_index is not None, f"Check example {id} in {jsonl_path}"
assert span2_char_index is not None, f"Check example {id} in {jsonl_path}"
# Tokenize sentences
xlnet_tokens_sub1 = tokenizer.tokenize(
text[: min(span1_char_index[0], span2_char_index[0])]
)
if span1_char_index[0] < span2_char_index[0]:
xlnet_tokens_sub2 = tokenizer.tokenize(
text[span1_char_index[0] : span1_char_index[1]]
)
token1_idx = [
len(xlnet_tokens_sub1) + 1,
len(xlnet_tokens_sub1) + len(xlnet_tokens_sub2),
]
else:
xlnet_tokens_sub2 = tokenizer.tokenize(
text[span2_char_index[0] : span2_char_index[1]]
)
token2_idx = [
len(xlnet_tokens_sub1) + 1,
len(xlnet_tokens_sub1) + len(xlnet_tokens_sub2),
]
sub3_st = (
span1_char_index[1]
if span1_char_index[0] < span2_char_index[0]
else span2_char_index[1]
)
sub3_ed = (
span1_char_index[0]
if span1_char_index[0] > span2_char_index[0]
else span2_char_index[0]
)
xlnet_tokens_sub3 = tokenizer.tokenize(text[sub3_st:sub3_ed])
if span1_char_index[0] < span2_char_index[0]:
xlnet_tokens_sub4 = tokenizer.tokenize(
text[span2_char_index[0] : span2_char_index[1]]
)
cur_len = (
len(xlnet_tokens_sub1) + len(xlnet_tokens_sub2) + len(xlnet_tokens_sub3)
)
token2_idx = [cur_len + 1, cur_len + len(xlnet_tokens_sub4)]
else:
xlnet_tokens_sub4 = tokenizer.tokenize(
text[span1_char_index[0] : span1_char_index[1]]
)
cur_len = (
len(xlnet_tokens_sub1) + len(xlnet_tokens_sub2) + len(xlnet_tokens_sub3)
)
token1_idx = [cur_len + 1, cur_len + len(xlnet_tokens_sub4)]
if span1_char_index[0] < span2_char_index[0]:
xlnet_tokens_sub5 = tokenizer.tokenize(text[span2_char_index[1] :])
else:
xlnet_tokens_sub5 = tokenizer.tokenize(text[span1_char_index[1] :])
tokens = (
["[CLS]"]
+ xlnet_tokens_sub1
+ xlnet_tokens_sub2
+ xlnet_tokens_sub3
+ xlnet_tokens_sub4
+ xlnet_tokens_sub5
+ ["[SEP]"]
)
if len(tokens) > max_len:
max_len = len(tokens)
token_ids = tokenizer.convert_tokens_to_ids(tokens)
token_segments = [0] * len(token_ids)
# Generate mask where 1 for real tokens and 0 for padding tokens
token_masks = [1] * len(token_ids)
token1_idxs.append(token1_idx)
token2_idxs.append(token2_idx)
sentences.append(text)
span1s.append(span1_text)
span2s.append(span2_text)
span1_idxs.append(span1_index)
span2_idxs.append(span2_index)
labels.append(SuperGLUE_LABEL_MAPPING[TASK_NAME][label])
xlnet_tokens.append(tokens)
xlnet_token_ids.append(torch.LongTensor(token_ids))
xlnet_token_masks.append(torch.LongTensor(token_masks))
xlnet_token_segments.append(torch.LongTensor(token_segments))
token1_idxs = torch.from_numpy(np.array(token1_idxs))
token2_idxs = torch.from_numpy(np.array(token2_idxs))
labels = torch.from_numpy(np.array(labels))
logger.info(f"Max token len {max_len}")
return MultitaskDataset(
name="SuperGLUE",
X_dict={
"sentence": sentences,
"span1": span1s,
"span2": span2s,
"span1_idx": span1_idxs,
"span2_idx": span2_idxs,
"token1_idx": token1_idxs,
"token2_idx": token2_idxs,
"tokens": xlnet_tokens,
"token_ids": xlnet_token_ids,
"token_masks": xlnet_token_masks,
"token_segments": xlnet_token_segments,
},
Y_dict={"labels": labels},
)
|
[
"logging.getLogger",
"json.loads",
"snorkel.mtl.data.MultitaskDataset",
"torch.LongTensor",
"numpy.array",
"sys.path.append"
] |
[((167, 188), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (182, 188), False, 'import sys\n'), ((249, 276), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (266, 276), False, 'import logging\n'), ((7278, 7658), 'snorkel.mtl.data.MultitaskDataset', 'MultitaskDataset', ([], {'name': '"""SuperGLUE"""', 'X_dict': "{'sentence': sentences, 'span1': span1s, 'span2': span2s, 'span1_idx':\n span1_idxs, 'span2_idx': span2_idxs, 'token1_idx': token1_idxs,\n 'token2_idx': token2_idxs, 'tokens': xlnet_tokens, 'token_ids':\n xlnet_token_ids, 'token_masks': xlnet_token_masks, 'token_segments':\n xlnet_token_segments}", 'Y_dict': "{'labels': labels}"}), "(name='SuperGLUE', X_dict={'sentence': sentences, 'span1':\n span1s, 'span2': span2s, 'span1_idx': span1_idxs, 'span2_idx':\n span2_idxs, 'token1_idx': token1_idxs, 'token2_idx': token2_idxs,\n 'tokens': xlnet_tokens, 'token_ids': xlnet_token_ids, 'token_masks':\n xlnet_token_masks, 'token_segments': xlnet_token_segments}, Y_dict={\n 'labels': labels})\n", (7294, 7658), False, 'from snorkel.mtl.data import MultitaskDataset\n'), ((2503, 2518), 'json.loads', 'json.loads', (['row'], {}), '(row)\n', (2513, 2518), False, 'import json\n'), ((7091, 7112), 'numpy.array', 'np.array', (['token1_idxs'], {}), '(token1_idxs)\n', (7099, 7112), True, 'import numpy as np\n'), ((7149, 7170), 'numpy.array', 'np.array', (['token2_idxs'], {}), '(token2_idxs)\n', (7157, 7170), True, 'import numpy as np\n'), ((7203, 7219), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (7211, 7219), True, 'import numpy as np\n'), ((6892, 6919), 'torch.LongTensor', 'torch.LongTensor', (['token_ids'], {}), '(token_ids)\n', (6908, 6919), False, 'import torch\n'), ((6954, 6983), 'torch.LongTensor', 'torch.LongTensor', (['token_masks'], {}), '(token_masks)\n', (6970, 6983), False, 'import torch\n'), ((7021, 7053), 'torch.LongTensor', 'torch.LongTensor', (['token_segments'], {}), '(token_segments)\n', (7037, 7053), False, 'import torch\n')]
|
__all__ = ['imread', 'imsave']
import numpy as np
from PIL import Image
from ...util import img_as_ubyte, img_as_uint
def imread(fname, dtype=None, img_num=None, **kwargs):
"""Load an image from file.
Parameters
----------
fname : str or file
File name or file-like-object.
dtype : numpy dtype object or string specifier
Specifies data type of array elements.
img_num : int, optional
Specifies which image to read in a file with multiple images
(zero-indexed).
kwargs : keyword pairs, optional
Addition keyword arguments to pass through.
Notes
-----
Files are read using the Python Imaging Library.
See PIL docs [1]_ for a list of supported formats.
References
----------
.. [1] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html
"""
if isinstance(fname, str):
with open(fname, 'rb') as f:
im = Image.open(f)
return pil_to_ndarray(im, dtype=dtype, img_num=img_num)
else:
im = Image.open(fname)
return pil_to_ndarray(im, dtype=dtype, img_num=img_num)
def pil_to_ndarray(image, dtype=None, img_num=None):
"""Import a PIL Image object to an ndarray, in memory.
Parameters
----------
Refer to ``imread``.
"""
try:
# this will raise an IOError if the file is not readable
image.getdata()[0]
except IOError as e:
site = "http://pillow.readthedocs.org/en/latest/installation.html#external-libraries"
pillow_error_message = str(e)
error_message = ('Could not load "%s" \n'
'Reason: "%s"\n'
'Please see documentation at: %s'
% (image.filename, pillow_error_message, site))
raise ValueError(error_message)
frames = []
grayscale = None
i = 0
while 1:
try:
image.seek(i)
except EOFError:
break
frame = image
if img_num is not None and img_num != i:
image.getdata()[0]
i += 1
continue
if image.format == 'PNG' and image.mode == 'I' and dtype is None:
dtype = 'uint16'
if image.mode == 'P':
if grayscale is None:
grayscale = _palette_is_grayscale(image)
if grayscale:
frame = image.convert('L')
else:
if image.format == 'PNG' and 'transparency' in image.info:
frame = image.convert('RGBA')
else:
frame = image.convert('RGB')
elif image.mode == '1':
frame = image.convert('L')
elif 'A' in image.mode:
frame = image.convert('RGBA')
elif image.mode == 'CMYK':
frame = image.convert('RGB')
if image.mode.startswith('I;16'):
shape = image.size
dtype = '>u2' if image.mode.endswith('B') else '<u2'
if 'S' in image.mode:
dtype = dtype.replace('u', 'i')
frame = np.fromstring(frame.tobytes(), dtype)
frame.shape = shape[::-1]
else:
frame = np.array(frame, dtype=dtype)
frames.append(frame)
i += 1
if img_num is not None:
break
if hasattr(image, 'fp') and image.fp:
image.fp.close()
if img_num is None and len(frames) > 1:
return np.array(frames)
elif frames:
return frames[0]
elif img_num:
raise IndexError('Could not find image #%s' % img_num)
def _palette_is_grayscale(pil_image):
"""Return True if PIL image in palette mode is grayscale.
Parameters
----------
pil_image : PIL image
PIL Image that is in Palette mode.
Returns
-------
is_grayscale : bool
True if all colors in image palette are gray.
"""
assert pil_image.mode == 'P'
# get palette as an array with R, G, B columns
palette = np.asarray(pil_image.getpalette()).reshape((256, 3))
# Not all palette colors are used; unused colors have junk values.
start, stop = pil_image.getextrema()
valid_palette = palette[start:stop + 1]
# Image is grayscale if channel differences (R - G and G - B)
# are all zero.
return np.allclose(np.diff(valid_palette), 0)
def ndarray_to_pil(arr, format_str=None):
"""Export an ndarray to a PIL object.
Parameters
----------
Refer to ``imsave``.
"""
if arr.ndim == 3:
arr = img_as_ubyte(arr)
mode = {3: 'RGB', 4: 'RGBA'}[arr.shape[2]]
elif format_str in ['png', 'PNG']:
mode = 'I;16'
mode_base = 'I'
if arr.dtype.kind == 'f':
arr = img_as_uint(arr)
elif arr.max() < 256 and arr.min() >= 0:
arr = arr.astype(np.uint8)
mode = mode_base = 'L'
else:
arr = img_as_uint(arr)
else:
arr = img_as_ubyte(arr)
mode = 'L'
mode_base = 'L'
try:
array_buffer = arr.tobytes()
except AttributeError:
array_buffer = arr.tostring() # Numpy < 1.9
if arr.ndim == 2:
im = Image.new(mode_base, arr.T.shape)
try:
im.frombytes(array_buffer, 'raw', mode)
except AttributeError:
im.fromstring(array_buffer, 'raw', mode) # PIL 1.1.7
else:
image_shape = (arr.shape[1], arr.shape[0])
try:
im = Image.frombytes(mode, image_shape, array_buffer)
except AttributeError:
im = Image.fromstring(mode, image_shape, array_buffer) # PIL 1.1.7
return im
def imsave(fname, arr, format_str=None, **kwargs):
"""Save an image to disk.
Parameters
----------
fname : str or file-like object
Name of destination file.
arr : ndarray of uint8 or float
Array (image) to save. Arrays of data-type uint8 should have
values in [0, 255], whereas floating-point arrays must be
in [0, 1].
format_str: str
Format to save as, this is defaulted to PNG if using a file-like
object; this will be derived from the extension if fname is a string
kwargs: dict
Keyword arguments to the Pillow save function (or tifffile save
function, for Tiff files). These are format dependent. For example,
Pillow's JPEG save function supports an integer ``quality`` argument
with values in [1, 95], while TIFFFile supports a ``compress``
integer argument with values in [0, 9].
Notes
-----
Use the Python Imaging Library.
See PIL docs [1]_ for a list of other supported formats.
All images besides single channel PNGs are converted using `img_as_uint8`.
Single Channel PNGs have the following behavior:
- Integer values in [0, 255] and Boolean types -> img_as_uint8
- Floating point and other integers -> img_as_uint16
References
----------
.. [1] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html
"""
# default to PNG if file-like object
if not isinstance(fname, str) and format_str is None:
format_str = "PNG"
# Check for png in filename
if (isinstance(fname, str)
and fname.lower().endswith(".png")):
format_str = "PNG"
arr = np.asanyarray(arr)
if arr.dtype.kind == 'b':
arr = arr.astype(np.uint8)
if arr.ndim not in (2, 3):
raise ValueError("Invalid shape for image array: %s" % (arr.shape, ))
if arr.ndim == 3:
if arr.shape[2] not in (3, 4):
raise ValueError("Invalid number of channels in image array.")
img = ndarray_to_pil(arr, format_str=format_str)
img.save(fname, format=format_str, **kwargs)
|
[
"PIL.Image.open",
"PIL.Image.new",
"numpy.diff",
"numpy.asanyarray",
"numpy.array",
"PIL.Image.fromstring",
"PIL.Image.frombytes"
] |
[((7329, 7347), 'numpy.asanyarray', 'np.asanyarray', (['arr'], {}), '(arr)\n', (7342, 7347), True, 'import numpy as np\n'), ((1049, 1066), 'PIL.Image.open', 'Image.open', (['fname'], {}), '(fname)\n', (1059, 1066), False, 'from PIL import Image\n'), ((3457, 3473), 'numpy.array', 'np.array', (['frames'], {}), '(frames)\n', (3465, 3473), True, 'import numpy as np\n'), ((4327, 4349), 'numpy.diff', 'np.diff', (['valid_palette'], {}), '(valid_palette)\n', (4334, 4349), True, 'import numpy as np\n'), ((5189, 5222), 'PIL.Image.new', 'Image.new', (['mode_base', 'arr.T.shape'], {}), '(mode_base, arr.T.shape)\n', (5198, 5222), False, 'from PIL import Image\n'), ((944, 957), 'PIL.Image.open', 'Image.open', (['f'], {}), '(f)\n', (954, 957), False, 'from PIL import Image\n'), ((3204, 3232), 'numpy.array', 'np.array', (['frame'], {'dtype': 'dtype'}), '(frame, dtype=dtype)\n', (3212, 3232), True, 'import numpy as np\n'), ((5476, 5524), 'PIL.Image.frombytes', 'Image.frombytes', (['mode', 'image_shape', 'array_buffer'], {}), '(mode, image_shape, array_buffer)\n', (5491, 5524), False, 'from PIL import Image\n'), ((5573, 5622), 'PIL.Image.fromstring', 'Image.fromstring', (['mode', 'image_shape', 'array_buffer'], {}), '(mode, image_shape, array_buffer)\n', (5589, 5622), False, 'from PIL import Image\n')]
|
# This version of the bitcoin experiment imports data preprocessed in Matlab, and uses the GCN baseline
# The point of this script is to do link prediction
# Imports and aliases
import pickle
import torch as t
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.datasets as datasets
import numpy as np
import matplotlib.pyplot as plt
import cProfile
import pandas as pd
import datetime
from scipy.sparse import csr_matrix
import os.path
import embedding_help_functions as ehf
import scipy.io as sio
unsq = t.unsqueeze
sq = t.squeeze
# Settings
alpha_vec = [.75, .76, .77, .78, .79, .80, .81, .82, .83, .84, .85, .86, .87, .88, .89, .90, .91, .92, .93, .94, .95]
no_layers = 1
dataset = "OTC" # OTC or Alpha
no_epochs = 1000
mat_f_name = "saved_content_bitcoin_otc.mat"
no_trials = 1
beta1 = 19
beta2 = 19
cutoff = 95
eval_type = "MAP-MRR" # "MAP-MRR" or "F1"
data_loc = "data/Bitcoin_" + dataset + "/"
S_train, S_val, S_test = 95, 20, 20
lr = 0.01
momentum = 0.9
# Load and return relevant data
A, A_labels, C_train, C_val, C_test, N = ehf.load_data(data_loc, mat_f_name, S_train, S_val, S_test, transformed=False)
# Create features for the nodes
X_train, X_val, X_test = ehf.create_node_features(A, S_train, S_val, S_test, same_block_size=False)
# Extract edges and labels from A_labels, and augment with nonexisting edges
# edges, beta
edges = A_labels._indices()
edges_aug, labels = ehf.augment_edges(edges, N, beta1, beta2, cutoff)
# Divide adjacency matrices and labels into training, validation and testing sets
edges_train, target_train, e_train, edges_val, target_val, e_val, edges_test, target_test, e_test = ehf.split_data(edges_aug, labels, S_train, S_val, S_test, same_block_size = False)
if no_trials > 1:
ep_acc_loss_vec = []
for tr in range(no_trials):
for alpha in alpha_vec:
class_weights = t.tensor([alpha, 1.0-alpha])
save_res_fname = "results_BASELINE_layers" + str(no_layers) + "_w" + str(round(float(class_weights[0])*100)) + "_" + dataset + "_link_prediction"
# Create gcn for training
if no_layers == 2:
gcn = ehf.EmbeddingKWGCN(C_train[:-1], X_train[:-1], e_train, [6,6,2], nonlin2="selu")
elif no_layers == 1:
gcn = ehf.EmbeddingKWGCN(C_train[:-1], X_train[:-1], e_train, [6,2])
# Train
optimizer = t.optim.SGD(gcn.parameters(), lr=lr, momentum=momentum)
criterion = nn.CrossEntropyLoss(weight=class_weights) # Takes arguments (output, target)
if eval_type == "F1":
ep_acc_loss = np.zeros((no_epochs,12)) # (precision_train, recall_train, f1_train, loss_train, precision_val, recall_val, f1_val, loss_val, precision_test, recall_test, f1_test, loss_test)
elif eval_type == "MAP-MRR":
ep_acc_loss = np.zeros((no_epochs,9)) # (MAP_train, MRR_train, loss_train, MAP_val, MRR_val, loss_val, MAP_test, MRR_test, loss_test)
for ep in range(no_epochs):
# Compute loss and take step
optimizer.zero_grad()
output_train = gcn()
loss_train = criterion(output_train, target_train[edges_train[0]!=0])
loss_train.backward()
optimizer.step()
# Things that don't require gradient
with t.no_grad():
if ep % 100 == 0:
# Compute stats for training data; no point in doing more often than this
guess_train = t.argmax(output_train, dim=1)
if eval_type == "F1":
precision_train, recall_train, f1_train = ehf.compute_f1(guess_train, target_train[edges_train[0]!=0])
elif eval_type == "MAP-MRR":
MAP_train, MRR_train = ehf.compute_MAP_MRR(output_train, target_train[edges_train[0]!=0], edges_train[:, edges_train[0]!=0])
# Compute stats for validation data
output_val = gcn(C_val[:-1], X_val[:-1], e_val)
guess_val = t.argmax(output_val, dim=1)
if eval_type == "F1":
precision_val, recall_val, f1_val = ehf.compute_f1(guess_val, target_val[edges_val[0]!=0])
elif eval_type == "MAP-MRR":
MAP_val, MRR_val = ehf.compute_MAP_MRR(output_val, target_val[edges_val[0]!=0], edges_val[:, edges_val[0]!=0])
loss_val = criterion(output_val, target_val[edges_val[0]!=0])
# Compute stats for test data
output_test = gcn(C_test[:-1], X_test[:-1], e_test)
guess_test = t.argmax(output_test, dim=1)
if eval_type == "F1":
precision_test, recall_test, f1_test = ehf.compute_f1(guess_test, target_test[edges_test[0]!=0])
elif eval_type == "MAP-MRR":
MAP_test, MRR_test = ehf.compute_MAP_MRR(output_test, target_test[edges_test[0]!=0], edges_test[:, edges_test[0]!=0])
loss_test = criterion(output_test, target_test[edges_test[0]!=0])
# Print
if eval_type == "F1":
ehf.print_f1(precision_train, recall_train, f1_train, loss_train, precision_val, recall_val, f1_val, loss_val, precision_test, recall_test, f1_test, loss_test, alpha, tr, ep)
elif eval_type == "MAP-MRR":
print("alpha/Tr/Ep %.2f/%d/%d. Train MAP/MRR %.16f/%.16f. Train loss %.16f." % (alpha, tr, ep, MAP_train, MRR_train, loss_train))
print("alpha/Tr/Ep %.2f/%d/%d. Val MAP/MRR %.16f/%.16f. Val loss %.16f." % (alpha, tr, ep, MAP_val, MRR_val, loss_val))
print("alpha/Tr/Ep %.2f/%d/%d. Test MAP/MRR %.16f/%.16f. Test loss %.16f.\n" % (alpha, tr, ep, MAP_test, MRR_test, loss_test))
# Store values with results
if eval_type == "F1":
ep_acc_loss[ep] = [precision_train, recall_train, f1_train, loss_train, precision_val, recall_val, f1_val, loss_val, precision_test, recall_test, f1_test, loss_test]
elif eval_type == "MAP-MRR":
ep_acc_loss[ep] = [MAP_train, MRR_train, loss_train, MAP_val, MRR_val, loss_val, MAP_test, MRR_test, loss_test]
if eval_type == "F1":
ehf.print_f1(precision_train, recall_train, f1_train, loss_train, precision_val, recall_val, f1_val, loss_val, precision_test, recall_test, f1_test, loss_test, is_final=True)
elif eval_type == "MAP-MRR":
print("FINAL: Train MAP/MRR %.16f/%.16f. Train loss %.16f." % (MAP_train, MRR_train, loss_train))
print("FINAL: Val MAP/MRR %.16f/%.16f. Val loss %.16f." % (MAP_val, MRR_val, loss_val))
print("FINAL: Test MAP/MRR %.16f/%.16f. Test loss %.16f.\n" % (MAP_test, MRR_test, loss_test))
if no_trials == 1:
pickle.dump(ep_acc_loss, open(save_res_fname, "wb"))
print("Results saved for single trial")
else:
ep_acc_loss_vec.append(ep_acc_loss)
if no_trials > 1:
pickle.dump(ep_acc_loss_vec, open(save_res_fname + "_no_trials" + str(no_trials), "wb"))
print("Results saved for all trials")
|
[
"embedding_help_functions.load_data",
"embedding_help_functions.split_data",
"embedding_help_functions.compute_f1",
"embedding_help_functions.compute_MAP_MRR",
"torch.nn.CrossEntropyLoss",
"embedding_help_functions.create_node_features",
"torch.argmax",
"torch.tensor",
"numpy.zeros",
"embedding_help_functions.EmbeddingKWGCN",
"embedding_help_functions.print_f1",
"torch.no_grad",
"embedding_help_functions.augment_edges"
] |
[((1080, 1158), 'embedding_help_functions.load_data', 'ehf.load_data', (['data_loc', 'mat_f_name', 'S_train', 'S_val', 'S_test'], {'transformed': '(False)'}), '(data_loc, mat_f_name, S_train, S_val, S_test, transformed=False)\n', (1093, 1158), True, 'import embedding_help_functions as ehf\n'), ((1217, 1291), 'embedding_help_functions.create_node_features', 'ehf.create_node_features', (['A', 'S_train', 'S_val', 'S_test'], {'same_block_size': '(False)'}), '(A, S_train, S_val, S_test, same_block_size=False)\n', (1241, 1291), True, 'import embedding_help_functions as ehf\n'), ((1432, 1481), 'embedding_help_functions.augment_edges', 'ehf.augment_edges', (['edges', 'N', 'beta1', 'beta2', 'cutoff'], {}), '(edges, N, beta1, beta2, cutoff)\n', (1449, 1481), True, 'import embedding_help_functions as ehf\n'), ((1665, 1750), 'embedding_help_functions.split_data', 'ehf.split_data', (['edges_aug', 'labels', 'S_train', 'S_val', 'S_test'], {'same_block_size': '(False)'}), '(edges_aug, labels, S_train, S_val, S_test, same_block_size=False\n )\n', (1679, 1750), True, 'import embedding_help_functions as ehf\n'), ((1861, 1891), 'torch.tensor', 't.tensor', (['[alpha, 1.0 - alpha]'], {}), '([alpha, 1.0 - alpha])\n', (1869, 1891), True, 'import torch as t\n'), ((2368, 2409), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'weight': 'class_weights'}), '(weight=class_weights)\n', (2387, 2409), True, 'import torch.nn as nn\n'), ((2097, 2184), 'embedding_help_functions.EmbeddingKWGCN', 'ehf.EmbeddingKWGCN', (['C_train[:-1]', 'X_train[:-1]', 'e_train', '[6, 6, 2]'], {'nonlin2': '"""selu"""'}), "(C_train[:-1], X_train[:-1], e_train, [6, 6, 2], nonlin2=\n 'selu')\n", (2115, 2184), True, 'import embedding_help_functions as ehf\n'), ((2486, 2511), 'numpy.zeros', 'np.zeros', (['(no_epochs, 12)'], {}), '((no_epochs, 12))\n', (2494, 2511), True, 'import numpy as np\n'), ((5624, 5806), 'embedding_help_functions.print_f1', 'ehf.print_f1', (['precision_train', 'recall_train', 'f1_train', 'loss_train', 'precision_val', 'recall_val', 'f1_val', 'loss_val', 'precision_test', 'recall_test', 'f1_test', 'loss_test'], {'is_final': '(True)'}), '(precision_train, recall_train, f1_train, loss_train,\n precision_val, recall_val, f1_val, loss_val, precision_test,\n recall_test, f1_test, loss_test, is_final=True)\n', (5636, 5806), True, 'import embedding_help_functions as ehf\n'), ((2210, 2273), 'embedding_help_functions.EmbeddingKWGCN', 'ehf.EmbeddingKWGCN', (['C_train[:-1]', 'X_train[:-1]', 'e_train', '[6, 2]'], {}), '(C_train[:-1], X_train[:-1], e_train, [6, 2])\n', (2228, 2273), True, 'import embedding_help_functions as ehf\n'), ((2709, 2733), 'numpy.zeros', 'np.zeros', (['(no_epochs, 9)'], {}), '((no_epochs, 9))\n', (2717, 2733), True, 'import numpy as np\n'), ((3108, 3119), 'torch.no_grad', 't.no_grad', ([], {}), '()\n', (3117, 3119), True, 'import torch as t\n'), ((3241, 3270), 'torch.argmax', 't.argmax', (['output_train'], {'dim': '(1)'}), '(output_train, dim=1)\n', (3249, 3270), True, 'import torch as t\n'), ((3684, 3711), 'torch.argmax', 't.argmax', (['output_val'], {'dim': '(1)'}), '(output_val, dim=1)\n', (3692, 3711), True, 'import torch as t\n'), ((4170, 4198), 'torch.argmax', 't.argmax', (['output_test'], {'dim': '(1)'}), '(output_test, dim=1)\n', (4178, 4198), True, 'import torch as t\n'), ((3346, 3408), 'embedding_help_functions.compute_f1', 'ehf.compute_f1', (['guess_train', 'target_train[edges_train[0] != 0]'], {}), '(guess_train, target_train[edges_train[0] != 0])\n', (3360, 3408), True, 'import embedding_help_functions as ehf\n'), ((3781, 3837), 'embedding_help_functions.compute_f1', 'ehf.compute_f1', (['guess_val', 'target_val[edges_val[0] != 0]'], {}), '(guess_val, target_val[edges_val[0] != 0])\n', (3795, 3837), True, 'import embedding_help_functions as ehf\n'), ((4271, 4330), 'embedding_help_functions.compute_f1', 'ehf.compute_f1', (['guess_test', 'target_test[edges_test[0] != 0]'], {}), '(guess_test, target_test[edges_test[0] != 0])\n', (4285, 4330), True, 'import embedding_help_functions as ehf\n'), ((4605, 4787), 'embedding_help_functions.print_f1', 'ehf.print_f1', (['precision_train', 'recall_train', 'f1_train', 'loss_train', 'precision_val', 'recall_val', 'f1_val', 'loss_val', 'precision_test', 'recall_test', 'f1_test', 'loss_test', 'alpha', 'tr', 'ep'], {}), '(precision_train, recall_train, f1_train, loss_train,\n precision_val, recall_val, f1_val, loss_val, precision_test,\n recall_test, f1_test, loss_test, alpha, tr, ep)\n', (4617, 4787), True, 'import embedding_help_functions as ehf\n'), ((3470, 3579), 'embedding_help_functions.compute_MAP_MRR', 'ehf.compute_MAP_MRR', (['output_train', 'target_train[edges_train[0] != 0]', 'edges_train[:, edges_train[0] != 0]'], {}), '(output_train, target_train[edges_train[0] != 0],\n edges_train[:, edges_train[0] != 0])\n', (3489, 3579), True, 'import embedding_help_functions as ehf\n'), ((3895, 3994), 'embedding_help_functions.compute_MAP_MRR', 'ehf.compute_MAP_MRR', (['output_val', 'target_val[edges_val[0] != 0]', 'edges_val[:, edges_val[0] != 0]'], {}), '(output_val, target_val[edges_val[0] != 0], edges_val[:,\n edges_val[0] != 0])\n', (3914, 3994), True, 'import embedding_help_functions as ehf\n'), ((4390, 4494), 'embedding_help_functions.compute_MAP_MRR', 'ehf.compute_MAP_MRR', (['output_test', 'target_test[edges_test[0] != 0]', 'edges_test[:, edges_test[0] != 0]'], {}), '(output_test, target_test[edges_test[0] != 0],\n edges_test[:, edges_test[0] != 0])\n', (4409, 4494), True, 'import embedding_help_functions as ehf\n')]
|
from __future__ import division
from timeit import default_timer as timer
import csv
import numpy as np
import itertools
from munkres import Munkres, print_matrix, make_cost_matrix
import sys
from classes import *
from functions import *
from math import sqrt
import Tkinter as tk
import tkFileDialog as filedialog
root = tk.Tk()
root.withdraw()
p_file = filedialog.askopenfilename(title='Please select the posting file')
c_file = filedialog.askopenfilename(title='Please select the candidate file')
"""for use with /users/java_jonathan/postings_lge.csv and
/Users/java_jonathan/candidates_lge.csv"""
# p_file = raw_input("Please enter the path for the postings file: ")
# p_file = p_file.strip()
# c_file = raw_input("Please enter the path for the candidate file: ")
# c_file = c_file.strip()
start = timer()
with open(p_file,'r') as f:
#with open('/Users/Jonathan/Google Drive/CPD/Python/postings.csv','r') as f:
reader = csv.reader(f)
postingsAll = list(reader)
with open(c_file,'r') as f:
reader = csv.reader(f)
candidatesAll = list(reader)
"""create empty lists to fill with lists of lists output by iterating function
below"""
names = []
totalMatrix = []
for list in candidatesAll:
candidate = Candidate(*list)
names.append(candidate.name)
n = 0
for list in postingsAll:
posting = Posting(*list)
totalMatrix.append(matchDept(posting,candidate) + matchAnchor(posting,candidate)
+matchLocation(posting,candidate) + matchCompetency(posting,candidate) +
matchSkill(posting,candidate)+matchCohort(posting,candidate))
n += 1
l = len(names)
names.extend([0] * (n-l))
totalMatrix.extend([0] * (n**2 - len(totalMatrix)))
totalMatrix = np.asarray(totalMatrix)
totalMatrix = np.reshape(totalMatrix,(n,-1))
#at this point the matrix is structured as candidates down and jobs across
totalMatrix = np.transpose(totalMatrix)
#now it's switched!
totalMatrix = np.subtract(np.amax(totalMatrix),totalMatrix)
totalMatrix = np.array(totalMatrix)
minSuitability = 18
check = []
result = []
m = Munkres()
indexes = m.compute(totalMatrix)
#print_matrix(totalMatrix, msg='Lowest cost through this matrix:')
total = 0.0
unhappy_candidates = 0
medium_candidates = 0
tenpc_candidates = 0
qs_candidates = 0
vs_candidates = 0
f = open('output.txt', 'w')
for row, column in indexes:
if column < l:
value = totalMatrix[row][column]
if value > minSuitability*0.9:
tenpc_candidates += 1
elif value > minSuitability*0.75:
medium_candidates += 1
elif value > minSuitability/2:
unhappy_candidates += 1
elif value > minSuitability*0.25:
qs_candidates += 1
elif value > minSuitability*0.1:
vs_candidates += 1
total += value
check.append(column+1)
result.append((row,column))
f.write('For candidate %s: \nOptimal position: %d (score %s)\n'
% (names[column], column+1, value))
else:
pass
globalSatisfaction = 100*(1-(total/(l*minSuitability)))
print('Global satisfaction: %.2f%%' % globalSatisfaction)
print('Candidates who are more than 90%% suitable: %d' % vs_candidates)
print('Candidates who are more than 75%% suitable: %d' % qs_candidates)
print('Candidates who are more than 50%% suitable: %d' % (l-unhappy_candidates))
print('Candidates who are more than 75%% unsuitable: %d' % medium_candidates)
print('Candidates who are more than 90%% unsuitable: %d' % tenpc_candidates)
#output from excel:
correct = [1,3,5,9,10,2,4,8,6,7]
#this function tests output above against Excel:
#test(correct,check)
topMatrix = topFive(names,totalMatrix)
#print(topMatrix)
np.savetxt('/Users/java_jonathan/test.csv',topMatrix, fmt='%s', delimiter=',',
newline='\n', header='', footer='', comments='# ')
np.savetxt('/Users/java_jonathan/test2.csv',totalMatrix, fmt='%s', delimiter=',',
newline='\n', header='', footer='', comments='# ')
end = timer()
print(end-start)
"""
#posting = [Posting(*postingsAll)]
#print(posting[0].anchor)
#print(posting)
#print(candidatesAll)
#print(postingsAll)
#print(postingsAll[0].name)
#print(preferences)
#print(postings)
#split up files into relative blocks
postCode = [lists[0] for lists in postings]
postDept = [lists[1] for lists in postings]
postAnchor = [lists[2] for lists in postings]
postSkills = [lists[3:5] for lists in postings]
postLocation = [lists[5] for lists in postings]
postCompetencies = [lists[7:10] for lists in postings]
postSecurity = [lists[10] for lists in postings]
#with open('/Users/Jonathan/Google Drive/CPD/Python/candidates.csv','r') as f:
#gives first column ie candidate a
a=totalMatrix[:,[0]]
#b = totalMatrix[:,[0]]
#print(a)
#converts 1D matrix to list for ease
a = np.array(a).tolist()
#print(a)
#creates list called output containing rank of score
output = [0] * len(a)
for i, x in enumerate(sorted(range(len(a)), key=lambda y: a[y])):
output[x] = i
print(output)
#creates tuples of rank, job and appends to list
jobRank = []
# for rank, b in zip(output, postCode):
# jobScore = (rank,b)
# list(jobScore)
# jobRank.append(jobScore)
# print(jobRank)
output = [0] * len(a)
for i, x in enumerate(sorted(range(len(a)), key=lambda y: a[y])):
output[x] = i
print(output)
# #print(a)
# jobRank = sorted(jobRank, reverse=False)
# print(jobRank)
# print('For candidate a, the best position is %s') % (jobRank[0][1])
# print(candidate[0].skills)
"""
|
[
"numpy.reshape",
"numpy.amax",
"timeit.default_timer",
"Tkinter.Tk",
"numpy.asarray",
"numpy.array",
"munkres.Munkres",
"tkFileDialog.askopenfilename",
"numpy.savetxt",
"numpy.transpose",
"csv.reader"
] |
[((323, 330), 'Tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (328, 330), True, 'import Tkinter as tk\n'), ((356, 422), 'tkFileDialog.askopenfilename', 'filedialog.askopenfilename', ([], {'title': '"""Please select the posting file"""'}), "(title='Please select the posting file')\n", (382, 422), True, 'import tkFileDialog as filedialog\n'), ((432, 500), 'tkFileDialog.askopenfilename', 'filedialog.askopenfilename', ([], {'title': '"""Please select the candidate file"""'}), "(title='Please select the candidate file')\n", (458, 500), True, 'import tkFileDialog as filedialog\n'), ((806, 813), 'timeit.default_timer', 'timer', ([], {}), '()\n', (811, 813), True, 'from timeit import default_timer as timer\n'), ((1712, 1735), 'numpy.asarray', 'np.asarray', (['totalMatrix'], {}), '(totalMatrix)\n', (1722, 1735), True, 'import numpy as np\n'), ((1751, 1783), 'numpy.reshape', 'np.reshape', (['totalMatrix', '(n, -1)'], {}), '(totalMatrix, (n, -1))\n', (1761, 1783), True, 'import numpy as np\n'), ((1871, 1896), 'numpy.transpose', 'np.transpose', (['totalMatrix'], {}), '(totalMatrix)\n', (1883, 1896), True, 'import numpy as np\n'), ((1991, 2012), 'numpy.array', 'np.array', (['totalMatrix'], {}), '(totalMatrix)\n', (1999, 2012), True, 'import numpy as np\n'), ((2060, 2069), 'munkres.Munkres', 'Munkres', ([], {}), '()\n', (2067, 2069), False, 'from munkres import Munkres, print_matrix, make_cost_matrix\n'), ((3680, 3815), 'numpy.savetxt', 'np.savetxt', (['"""/Users/java_jonathan/test.csv"""', 'topMatrix'], {'fmt': '"""%s"""', 'delimiter': '""","""', 'newline': '"""\n"""', 'header': '""""""', 'footer': '""""""', 'comments': '"""# """'}), "('/Users/java_jonathan/test.csv', topMatrix, fmt='%s', delimiter=\n ',', newline='\\n', header='', footer='', comments='# ')\n", (3690, 3815), True, 'import numpy as np\n'), ((3810, 3947), 'numpy.savetxt', 'np.savetxt', (['"""/Users/java_jonathan/test2.csv"""', 'totalMatrix'], {'fmt': '"""%s"""', 'delimiter': '""","""', 'newline': '"""\n"""', 'header': '""""""', 'footer': '""""""', 'comments': '"""# """'}), "('/Users/java_jonathan/test2.csv', totalMatrix, fmt='%s',\n delimiter=',', newline='\\n', header='', footer='', comments='# ')\n", (3820, 3947), True, 'import numpy as np\n'), ((3949, 3956), 'timeit.default_timer', 'timer', ([], {}), '()\n', (3954, 3956), True, 'from timeit import default_timer as timer\n'), ((932, 945), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (942, 945), False, 'import csv\n'), ((1019, 1032), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (1029, 1032), False, 'import csv\n'), ((1943, 1963), 'numpy.amax', 'np.amax', (['totalMatrix'], {}), '(totalMatrix)\n', (1950, 1963), True, 'import numpy as np\n')]
|
import logging
import numpy
from ..Fragments import Fragments
from ..typing import SpectrumType
logger = logging.getLogger("matchms")
def add_losses(spectrum_in: SpectrumType, loss_mz_from=0.0, loss_mz_to=1000.0) -> SpectrumType:
"""Derive losses based on precursor mass.
Parameters
----------
spectrum_in:
Input spectrum.
loss_mz_from:
Minimum allowed m/z value for losses. Default is 0.0.
loss_mz_to:
Maximum allowed m/z value for losses. Default is 1000.0.
"""
if spectrum_in is None:
return None
spectrum = spectrum_in.clone()
precursor_mz = spectrum.get("precursor_mz", None)
if precursor_mz:
assert isinstance(precursor_mz, (float, int)), ("Expected 'precursor_mz' to be a scalar number.",
"Consider applying 'add_precursor_mz' filter first.")
peaks_mz, peaks_intensities = spectrum.peaks.mz, spectrum.peaks.intensities
losses_mz = (precursor_mz - peaks_mz)[::-1]
losses_intensities = peaks_intensities[::-1]
# Add losses which are within given boundaries
mask = numpy.where((losses_mz >= loss_mz_from)
& (losses_mz <= loss_mz_to))
spectrum.losses = Fragments(mz=losses_mz[mask],
intensities=losses_intensities[mask])
else:
logger.warning("No precursor_mz found. Consider applying 'add_precursor_mz' filter first.")
return spectrum
|
[
"logging.getLogger",
"numpy.where"
] |
[((107, 135), 'logging.getLogger', 'logging.getLogger', (['"""matchms"""'], {}), "('matchms')\n", (124, 135), False, 'import logging\n'), ((1156, 1224), 'numpy.where', 'numpy.where', (['((losses_mz >= loss_mz_from) & (losses_mz <= loss_mz_to))'], {}), '((losses_mz >= loss_mz_from) & (losses_mz <= loss_mz_to))\n', (1167, 1224), False, 'import numpy\n')]
|
import argparse
import glob
import os
import pickle
from pathlib import Path
import numpy as np
from PIL import Image
from tqdm import tqdm
from src.align.align_trans import get_reference_facial_points, warp_and_crop_face
# sys.path.append("../../")
from src.align.detector import detect_faces
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="face alignment")
parser.add_argument(
"-source_root",
"--source_root",
help="specify your source dir",
default="../../data/fiw-videos/new-processed/",
type=str,
)
parser.add_argument(
"-dest_root",
"--dest_root",
help="specify your destination dir",
default="../../data/fiw-videos/new-processed/",
type=str,
)
parser.add_argument(
"-crop_size",
"--crop_size",
help="specify size of aligned faces, align and crop with padding",
default=112,
type=int,
)
args = parser.parse_args()
source_root = args.source_root # specify your source dir
dest_root = args.dest_root # specify your destination dir
crop_size = (
args.crop_size
) # specify size of aligned faces, align and crop with padding
scale = crop_size / 112.0
reference = get_reference_facial_points(default_square=True) * scale
cwd = os.getcwd() # delete '.DS_Store' existed in the source_root
os.chdir(source_root)
os.system("find . -name '*.DS_Store' -type f -delete")
os.chdir(cwd)
imfiles = [
f
for f in glob.glob(f"{source_root}F????/MID*/faces/msceleb*")
if Path(f).is_file()
]
# images = {imfile.replace(source_root, ''): Image.open(imfile) for imfile in imfiles}
meta = {}
# for subfolder in tqdm(os.listdir(source_root)):
for imfile in tqdm(imfiles):
ref = imfile.replace(source_root, "")
print("Processing\t{}".format(imfile))
img = Image.open(imfile)
try: # Handle exception
bbs, landmarks = detect_faces(img)
except Exception:
print("{} is discarded due to exception!".format(imfile))
continue
ref = imfile.replace(source_root, "")
ndetections = len(landmarks)
if (
ndetections == 0
): # If the landmarks cannot be detected, the img will be discarded
print("{} is discarded due to non-detected landmarks!".format(imfile))
meta[ref] = []
continue
li_meta = []
for i in range(ndetections):
im_meta = {}
im_meta["face"] = i
im_meta["landmarks"] = landmarks[i]
im_meta["bb"] = bbs[i]
facial5points = [[landmarks[i][j], landmarks[i][j + 5]] for j in range(5)]
warped_face = warp_and_crop_face(
np.array(img),
facial5points,
reference,
crop_size=(crop_size, crop_size),
)
img_warped = Image.fromarray(warped_face)
image_name = imfile.replace("images", "cropped").replace(
".jpg", "-{:02d}.jpg".format(i)
)
# im_meta['ref'] = "/".join(image_name.split('/')[-5:])
img_warped.save(image_name)
li_meta.append(im_meta)
meta[ref] = li_meta
with open(source_root + "cropped-meta.pkl", "wb") as f:
pickle.dump(meta, f)
|
[
"PIL.Image.fromarray",
"PIL.Image.open",
"pickle.dump",
"argparse.ArgumentParser",
"src.align.align_trans.get_reference_facial_points",
"pathlib.Path",
"src.align.detector.detect_faces",
"tqdm.tqdm",
"os.getcwd",
"os.chdir",
"numpy.array",
"os.system",
"glob.glob"
] |
[((338, 391), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""face alignment"""'}), "(description='face alignment')\n", (361, 391), False, 'import argparse\n'), ((1351, 1362), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1360, 1362), False, 'import os\n'), ((1416, 1437), 'os.chdir', 'os.chdir', (['source_root'], {}), '(source_root)\n', (1424, 1437), False, 'import os\n'), ((1442, 1496), 'os.system', 'os.system', (['"""find . -name \'*.DS_Store\' -type f -delete"""'], {}), '("find . -name \'*.DS_Store\' -type f -delete")\n', (1451, 1496), False, 'import os\n'), ((1501, 1514), 'os.chdir', 'os.chdir', (['cwd'], {}), '(cwd)\n', (1509, 1514), False, 'import os\n'), ((1825, 1838), 'tqdm.tqdm', 'tqdm', (['imfiles'], {}), '(imfiles)\n', (1829, 1838), False, 'from tqdm import tqdm\n'), ((1283, 1331), 'src.align.align_trans.get_reference_facial_points', 'get_reference_facial_points', ([], {'default_square': '(True)'}), '(default_square=True)\n', (1310, 1331), False, 'from src.align.align_trans import get_reference_facial_points, warp_and_crop_face\n'), ((1947, 1965), 'PIL.Image.open', 'Image.open', (['imfile'], {}), '(imfile)\n', (1957, 1965), False, 'from PIL import Image\n'), ((3408, 3428), 'pickle.dump', 'pickle.dump', (['meta', 'f'], {}), '(meta, f)\n', (3419, 3428), False, 'import pickle\n'), ((1559, 1611), 'glob.glob', 'glob.glob', (['f"""{source_root}F????/MID*/faces/msceleb*"""'], {}), "(f'{source_root}F????/MID*/faces/msceleb*')\n", (1568, 1611), False, 'import glob\n'), ((2028, 2045), 'src.align.detector.detect_faces', 'detect_faces', (['img'], {}), '(img)\n', (2040, 2045), False, 'from src.align.detector import detect_faces\n'), ((3007, 3035), 'PIL.Image.fromarray', 'Image.fromarray', (['warped_face'], {}), '(warped_face)\n', (3022, 3035), False, 'from PIL import Image\n'), ((2845, 2858), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (2853, 2858), True, 'import numpy as np\n'), ((1623, 1630), 'pathlib.Path', 'Path', (['f'], {}), '(f)\n', (1627, 1630), False, 'from pathlib import Path\n')]
|
import numpy as np
from keras.applications.inception_v3 import InceptionV3
from keras.initializers import RandomNormal
from keras.layers import (BatchNormalization, Conv2D, Conv2DTranspose, Conv3D,
Cropping2D, Dense, Flatten, GlobalAveragePooling2D,
Input, Lambda, MaxPooling2D, Reshape, UpSampling2D,
ZeroPadding2D, ZeroPadding3D, add, concatenate)
from keras.layers.advanced_activations import ELU, LeakyReLU
from keras.models import Model
# Parameterized 2D Block Model
def BlockModel2D(input_shape, filt_num=16, numBlocks=3):
"""Creates a Block CED model for segmentation problems
Args:
input shape: a list or tuple of [rows,cols,channels] of input images
filt_num: the number of filters in the first and last layers
This number is multipled linearly increased and decreased throughout the model
numBlocks: number of processing blocks. The larger the number the deeper the model
output_chan: number of output channels. Set if doing multi-class segmentation
regression: Whether to have a continuous output with linear activation
Returns:
An unintialized Keras model
Example useage: SegModel = BlockModel2D([256,256,1],filt_num=8)
Notes: Using rows/cols that are powers of 2 is recommended. Otherwise,
the rows/cols must be divisible by 2^numBlocks for skip connections
to match up properly
"""
use_bn = True
# check for input shape compatibility
rows, cols = input_shape[0:2]
assert rows % 2**numBlocks == 0, "Input rows and number of blocks are incompatible"
assert cols % 2**numBlocks == 0, "Input cols and number of blocks are incompatible"
# calculate size reduction
startsize = np.max(input_shape[0:2])
minsize = (startsize-np.sum(2**np.arange(1, numBlocks+1)))/2**numBlocks
assert minsize > 4, "Too small of input for this many blocks. Use fewer blocks or larger input"
# input layer
lay_input = Input(shape=input_shape, name='input_layer')
# contracting blocks
x = lay_input
skip_list = []
for rr in range(1, numBlocks+1):
x1 = Conv2D(filt_num*rr, (1, 1), padding='same',
name='Conv1_{}'.format(rr))(x)
if use_bn:
x1 = BatchNormalization()(x1)
x1 = ELU(name='elu_x1_{}'.format(rr))(x1)
x3 = Conv2D(filt_num*rr, (3, 3), padding='same',
name='Conv3_{}'.format(rr))(x)
if use_bn:
x3 = BatchNormalization()(x3)
x3 = ELU(name='elu_x3_{}'.format(rr))(x3)
x51 = Conv2D(filt_num*rr, (3, 3), padding='same',
name='Conv51_{}'.format(rr))(x)
if use_bn:
x51 = BatchNormalization()(x51)
x51 = ELU(name='elu_x51_{}'.format(rr))(x51)
x52 = Conv2D(filt_num*rr, (3, 3), padding='same',
name='Conv52_{}'.format(rr))(x51)
if use_bn:
x52 = BatchNormalization()(x52)
x52 = ELU(name='elu_x52_{}'.format(rr))(x52)
x = concatenate([x1, x3, x52], name='merge_{}'.format(rr))
x = Conv2D(filt_num*rr, (1, 1), padding='valid',
name='ConvAll_{}'.format(rr))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_all_{}'.format(rr))(x)
x = ZeroPadding2D(padding=(1, 1), name='PrePad_{}'.format(rr))(x)
x = Conv2D(filt_num*rr, (4, 4), padding='valid',
strides=(2, 2), name='DownSample_{}'.format(rr))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_downsample_{}'.format(rr))(x)
x = Conv2D(filt_num*rr, (3, 3), padding='same',
name='ConvClean_{}'.format(rr))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_clean_{}'.format(rr))(x)
skip_list.append(x)
# expanding blocks
expnums = list(range(1, numBlocks+1))
expnums.reverse()
for dd in expnums:
if dd < len(skip_list):
x = concatenate([skip_list[dd-1], x],
name='skip_connect_{}'.format(dd))
x1 = Conv2D(filt_num*dd, (1, 1), padding='same',
name='DeConv1_{}'.format(dd))(x)
if use_bn:
x1 = BatchNormalization()(x1)
x1 = ELU(name='elu_Dx1_{}'.format(dd))(x1)
x3 = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConv3_{}'.format(dd))(x)
if use_bn:
x3 = BatchNormalization()(x3)
x3 = ELU(name='elu_Dx3_{}'.format(dd))(x3)
x51 = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConv51_{}'.format(dd))(x)
if use_bn:
x51 = BatchNormalization()(x51)
x51 = ELU(name='elu_Dx51_{}'.format(dd))(x51)
x52 = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConv52_{}'.format(dd))(x51)
if use_bn:
x52 = BatchNormalization()(x52)
x52 = ELU(name='elu_Dx52_{}'.format(dd))(x52)
x = concatenate([x1, x3, x52], name='Dmerge_{}'.format(dd))
x = Conv2D(filt_num*dd, (1, 1), padding='valid',
name='DeConvAll_{}'.format(dd))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_Dall_{}'.format(dd))(x)
x = UpSampling2D(size=(2, 2), name='UpSample_{}'.format(dd))(x)
x = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConvClean1_{}'.format(dd))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_Dclean1_{}'.format(dd))(x)
x = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConvClean2_{}'.format(dd))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_Dclean2_{}'.format(dd))(x)
# classifier
lay_out = Conv2D(1, (1, 1), activation='sigmoid', name='output_layer')(x)
return Model(lay_input, lay_out)
# Parameterized 2D Block Model
def BlockModel_Classifier(input_shape, filt_num=16, numBlocks=3):
"""Creates a Block model for pretraining on classification task
Args:
input shape: a list or tuple of [rows,cols,channels] of input images
filt_num: the number of filters in the first and last layers
This number is multipled linearly increased and decreased throughout the model
numBlocks: number of processing blocks. The larger the number the deeper the model
output_chan: number of output channels. Set if doing multi-class segmentation
regression: Whether to have a continuous output with linear activation
Returns:
An unintialized Keras model
Example useage: SegModel = BlockModel2D([256,256,1],filt_num=8)
Notes: Using rows/cols that are powers of 2 is recommended. Otherwise,
the rows/cols must be divisible by 2^numBlocks for skip connections
to match up properly
"""
use_bn = True
# check for input shape compatibility
rows, cols = input_shape[0:2]
assert rows % 2**numBlocks == 0, "Input rows and number of blocks are incompatible"
assert cols % 2**numBlocks == 0, "Input cols and number of blocks are incompatible"
# calculate size reduction
startsize = np.max(input_shape[0:2])
minsize = (startsize-np.sum(2**np.arange(1, numBlocks+1)))/2**numBlocks
assert minsize > 4, "Too small of input for this many blocks. Use fewer blocks or larger input"
# input layer
lay_input = Input(shape=input_shape, name='input_layer')
# contracting blocks
x = lay_input
skip_list = []
for rr in range(1, numBlocks+1):
x1 = Conv2D(filt_num*rr, (1, 1), padding='same',
name='Conv1_{}'.format(rr))(x)
if use_bn:
x1 = BatchNormalization()(x1)
x1 = ELU(name='elu_x1_{}'.format(rr))(x1)
x3 = Conv2D(filt_num*rr, (3, 3), padding='same',
name='Conv3_{}'.format(rr))(x)
if use_bn:
x3 = BatchNormalization()(x3)
x3 = ELU(name='elu_x3_{}'.format(rr))(x3)
x51 = Conv2D(filt_num*rr, (3, 3), padding='same',
name='Conv51_{}'.format(rr))(x)
if use_bn:
x51 = BatchNormalization()(x51)
x51 = ELU(name='elu_x51_{}'.format(rr))(x51)
x52 = Conv2D(filt_num*rr, (3, 3), padding='same',
name='Conv52_{}'.format(rr))(x51)
if use_bn:
x52 = BatchNormalization()(x52)
x52 = ELU(name='elu_x52_{}'.format(rr))(x52)
x = concatenate([x1, x3, x52], name='merge_{}'.format(rr))
x = Conv2D(filt_num*rr, (1, 1), padding='valid',
name='ConvAll_{}'.format(rr))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_all_{}'.format(rr))(x)
x = ZeroPadding2D(padding=(1, 1), name='PrePad_{}'.format(rr))(x)
x = Conv2D(filt_num*rr, (4, 4), padding='valid',
strides=(2, 2), name='DownSample_{}'.format(rr))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_downsample_{}'.format(rr))(x)
x = Conv2D(filt_num*rr, (3, 3), padding='same',
name='ConvClean_{}'.format(rr))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_skip_{}'.format(rr))(x)
# average pooling
x = GlobalAveragePooling2D()(x)
# classifier
lay_out = Dense(1, activation='sigmoid', name='output_layer')(x)
return Model(lay_input, lay_out)
def ConvertEncoderToCED(model):
# Returns a model with frozen encoder layers
# and complimentary, unfrozen decoder layers
# get input layer
# model must be compiled again after using this function
lay_input = model.input
# get skip connection layer outputs
skip_list = [l.output for l in model.layers if 'skip' in l.name]
numBlocks = len(skip_list)
filt_num = int(skip_list[0].shape[-1])
x = model.layers[-3].output
# freeze encoder layers
for layer in model.layers:
layer.trainable = False
use_bn = True
# make expanding blocks
expnums = list(range(1, numBlocks+1))
expnums.reverse()
for dd in expnums:
if dd < len(skip_list):
x = concatenate([skip_list[dd-1], x],
name='skip_connect_{}'.format(dd))
x1 = Conv2D(filt_num*dd, (1, 1), padding='same',
name='DeConv1_{}'.format(dd))(x)
if use_bn:
x1 = BatchNormalization()(x1)
x1 = ELU(name='elu_Dx1_{}'.format(dd))(x1)
x3 = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConv3_{}'.format(dd))(x)
if use_bn:
x3 = BatchNormalization()(x3)
x3 = ELU(name='elu_Dx3_{}'.format(dd))(x3)
x51 = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConv51_{}'.format(dd))(x)
if use_bn:
x51 = BatchNormalization()(x51)
x51 = ELU(name='elu_Dx51_{}'.format(dd))(x51)
x52 = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConv52_{}'.format(dd))(x51)
if use_bn:
x52 = BatchNormalization()(x52)
x52 = ELU(name='elu_Dx52_{}'.format(dd))(x52)
x = concatenate([x1, x3, x52], name='Dmerge_{}'.format(dd))
x = Conv2D(filt_num*dd, (1, 1), padding='valid',
name='DeConvAll_{}'.format(dd))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_Dall_{}'.format(dd))(x)
x = UpSampling2D(size=(2, 2), name='UpSample_{}'.format(dd))(x)
x = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConvClean1_{}'.format(dd))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_Dclean1_{}'.format(dd))(x)
x = Conv2D(filt_num*dd, (3, 3), padding='same',
name='DeConvClean2_{}'.format(dd))(x)
if use_bn:
x = BatchNormalization()(x)
x = ELU(name='elu_Dclean2_{}'.format(dd))(x)
# classifier
lay_out = Conv2D(1, (1, 1), activation='sigmoid', name='output_layer')(x)
return Model(lay_input, lay_out)
def Inception_model(input_shape=(299, 299, 3)):
incep_model = InceptionV3(
include_top=False, weights=None, input_shape=input_shape, pooling='avg')
input_layer = incep_model.input
incep_output = incep_model.output
# x = Conv2D(16, (3, 3), activation='relu')(incep_output)
# x = Flatten()(x)
x = Dense(1, activation='sigmoid')(incep_output)
return Model(inputs=input_layer, outputs=x)
|
[
"keras.layers.Conv2D",
"numpy.max",
"keras.layers.Dense",
"keras.layers.Input",
"keras.models.Model",
"keras.applications.inception_v3.InceptionV3",
"keras.layers.GlobalAveragePooling2D",
"keras.layers.BatchNormalization",
"numpy.arange"
] |
[((1786, 1810), 'numpy.max', 'np.max', (['input_shape[0:2]'], {}), '(input_shape[0:2])\n', (1792, 1810), True, 'import numpy as np\n'), ((2022, 2066), 'keras.layers.Input', 'Input', ([], {'shape': 'input_shape', 'name': '"""input_layer"""'}), "(shape=input_shape, name='input_layer')\n", (2027, 2066), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((5990, 6015), 'keras.models.Model', 'Model', (['lay_input', 'lay_out'], {}), '(lay_input, lay_out)\n', (5995, 6015), False, 'from keras.models import Model\n'), ((7306, 7330), 'numpy.max', 'np.max', (['input_shape[0:2]'], {}), '(input_shape[0:2])\n', (7312, 7330), True, 'import numpy as np\n'), ((7542, 7586), 'keras.layers.Input', 'Input', ([], {'shape': 'input_shape', 'name': '"""input_layer"""'}), "(shape=input_shape, name='input_layer')\n", (7547, 7586), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((9558, 9583), 'keras.models.Model', 'Model', (['lay_input', 'lay_out'], {}), '(lay_input, lay_out)\n', (9563, 9583), False, 'from keras.models import Model\n'), ((12241, 12266), 'keras.models.Model', 'Model', (['lay_input', 'lay_out'], {}), '(lay_input, lay_out)\n', (12246, 12266), False, 'from keras.models import Model\n'), ((12335, 12423), 'keras.applications.inception_v3.InceptionV3', 'InceptionV3', ([], {'include_top': '(False)', 'weights': 'None', 'input_shape': 'input_shape', 'pooling': '"""avg"""'}), "(include_top=False, weights=None, input_shape=input_shape,\n pooling='avg')\n", (12346, 12423), False, 'from keras.applications.inception_v3 import InceptionV3\n'), ((12652, 12688), 'keras.models.Model', 'Model', ([], {'inputs': 'input_layer', 'outputs': 'x'}), '(inputs=input_layer, outputs=x)\n', (12657, 12688), False, 'from keras.models import Model\n'), ((5914, 5974), 'keras.layers.Conv2D', 'Conv2D', (['(1)', '(1, 1)'], {'activation': '"""sigmoid"""', 'name': '"""output_layer"""'}), "(1, (1, 1), activation='sigmoid', name='output_layer')\n", (5920, 5974), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((9432, 9456), 'keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {}), '()\n', (9454, 9456), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((9491, 9542), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""', 'name': '"""output_layer"""'}), "(1, activation='sigmoid', name='output_layer')\n", (9496, 9542), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((12165, 12225), 'keras.layers.Conv2D', 'Conv2D', (['(1)', '(1, 1)'], {'activation': '"""sigmoid"""', 'name': '"""output_layer"""'}), "(1, (1, 1), activation='sigmoid', name='output_layer')\n", (12171, 12225), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((12596, 12626), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (12601, 12626), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((2311, 2331), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2329, 2331), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((2530, 2550), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2548, 2550), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((2753, 2773), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2771, 2773), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((2982, 3002), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3000, 3002), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((3272, 3292), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3290, 3292), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((3582, 3602), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3600, 3602), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((3807, 3827), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3825, 3827), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((4312, 4332), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (4330, 4332), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((4534, 4554), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (4552, 4554), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((4760, 4780), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (4778, 4780), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((4992, 5012), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (5010, 5012), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((5286, 5306), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (5304, 5306), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((5580, 5600), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (5598, 5600), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((5805, 5825), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (5823, 5825), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((7831, 7851), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (7849, 7851), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((8050, 8070), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (8068, 8070), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((8273, 8293), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (8291, 8293), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((8502, 8522), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (8520, 8522), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((8792, 8812), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (8810, 8812), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((9102, 9122), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (9120, 9122), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((9327, 9347), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (9345, 9347), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((10563, 10583), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (10581, 10583), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((10785, 10805), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (10803, 10805), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((11011, 11031), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (11029, 11031), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((11243, 11263), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (11261, 11263), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((11537, 11557), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (11555, 11557), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((11831, 11851), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (11849, 11851), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((12056, 12076), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (12074, 12076), False, 'from keras.layers import BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate\n'), ((1846, 1873), 'numpy.arange', 'np.arange', (['(1)', '(numBlocks + 1)'], {}), '(1, numBlocks + 1)\n', (1855, 1873), True, 'import numpy as np\n'), ((7366, 7393), 'numpy.arange', 'np.arange', (['(1)', '(numBlocks + 1)'], {}), '(1, numBlocks + 1)\n', (7375, 7393), True, 'import numpy as np\n')]
|
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
import os
import contorno
from constantes import INTERVALOS, PASSOS, TAMANHO_BARRA, DELTA_T, DELTA_X
z_temp = contorno.p_3
TAMANHO_BARRA = 2
x = np.linspace(0.0, TAMANHO_BARRA, INTERVALOS+1)
y = np.linspace(0.0, DELTA_T, PASSOS+1)
z = []
for k in range(PASSOS+1):
z_k = np.copy(z_temp)
z.append(z_k)
for i in range(1, INTERVALOS):
z_temp[i] = z_k[i] + (DELTA_T/(DELTA_X**2)) * (z_k[i+1]-2*z_k[i]+z_k[i-1])
z = np.asarray(z)
x, y = np.meshgrid(x, y)
fig = plt.figure()
ax = fig.gca(projection='3d')
surf = ax.plot_surface(x, y, z, cmap=cm.coolwarm, antialiased=False)
ax.set_xlabel('x')
ax.set_ylabel('t')
ax.set_zlabel('T(x,t)')
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
|
[
"numpy.copy",
"numpy.asarray",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.meshgrid",
"matplotlib.pyplot.show"
] |
[((264, 311), 'numpy.linspace', 'np.linspace', (['(0.0)', 'TAMANHO_BARRA', '(INTERVALOS + 1)'], {}), '(0.0, TAMANHO_BARRA, INTERVALOS + 1)\n', (275, 311), True, 'import numpy as np\n'), ((314, 351), 'numpy.linspace', 'np.linspace', (['(0.0)', 'DELTA_T', '(PASSOS + 1)'], {}), '(0.0, DELTA_T, PASSOS + 1)\n', (325, 351), True, 'import numpy as np\n'), ((551, 564), 'numpy.asarray', 'np.asarray', (['z'], {}), '(z)\n', (561, 564), True, 'import numpy as np\n'), ((572, 589), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (583, 589), True, 'import numpy as np\n'), ((597, 609), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (607, 609), True, 'import matplotlib.pyplot as plt\n'), ((815, 825), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (823, 825), True, 'import matplotlib.pyplot as plt\n'), ((394, 409), 'numpy.copy', 'np.copy', (['z_temp'], {}), '(z_temp)\n', (401, 409), True, 'import numpy as np\n')]
|
import numpy as np
import pytest
import theano
import theano.tensor as tt
# Don't import test classes otherwise they get tested as part of the file
from tests import unittest_tools as utt
from tests.gpuarray.config import mode_with_gpu, mode_without_gpu, test_ctx_name
from tests.tensor.test_basic import (
TestAlloc,
TestComparison,
TestJoinAndSplit,
TestReshape,
)
from tests.tensor.utils import rand, safe_make_node
from theano.gpuarray.basic_ops import (
GpuAlloc,
GpuAllocEmpty,
GpuContiguous,
GpuEye,
GpuFromHost,
GpuJoin,
GpuReshape,
GpuSplit,
GpuToGpu,
GpuTri,
HostFromGpu,
gpu_contiguous,
gpu_join,
host_from_gpu,
)
from theano.gpuarray.elemwise import GpuDimShuffle, GpuElemwise
from theano.gpuarray.subtensor import GpuSubtensor
from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor
from theano.tensor import TensorType
from theano.tensor.basic import alloc
pygpu = pytest.importorskip("pygpu")
gpuarray = pygpu.gpuarray
utt.seed_rng()
rng = np.random.RandomState(seed=utt.fetch_seed())
def inplace_func(
inputs,
outputs,
mode=None,
allow_input_downcast=False,
on_unused_input="raise",
name=None,
):
if mode is None:
mode = mode_with_gpu
return theano.function(
inputs,
outputs,
mode=mode,
allow_input_downcast=allow_input_downcast,
accept_inplace=True,
on_unused_input=on_unused_input,
name=name,
)
def fake_shared(value, name=None, strict=False, allow_downcast=None, **kwargs):
from theano.tensor.sharedvar import scalar_constructor, tensor_constructor
for c in (gpuarray_shared_constructor, tensor_constructor, scalar_constructor):
try:
return c(
value, name=name, strict=strict, allow_downcast=allow_downcast, **kwargs
)
except TypeError:
continue
def rand_gpuarray(*shape, **kwargs):
r = rng.rand(*shape) * 2 - 1
dtype = kwargs.pop("dtype", theano.config.floatX)
cls = kwargs.pop("cls", None)
if len(kwargs) != 0:
raise TypeError("Unexpected argument %s", list(kwargs.keys())[0])
return gpuarray.array(r, dtype=dtype, cls=cls, context=get_context(test_ctx_name))
def makeTester(
name,
op,
gpu_op,
cases,
checks=None,
mode_gpu=mode_with_gpu,
mode_nogpu=mode_without_gpu,
skip=False,
eps=1e-10,
):
if checks is None:
checks = {}
_op = op
_gpu_op = gpu_op
_cases = cases
_skip = skip
_checks = checks
class Checker(utt.OptimizationTestMixin):
op = staticmethod(_op)
gpu_op = staticmethod(_gpu_op)
cases = _cases
skip = _skip
checks = _checks
def setup_method(self):
eval(self.__class__.__module__ + "." + self.__class__.__name__)
def test_all(self):
if skip:
pytest.skip(skip)
for testname, inputs in cases.items():
for _ in range(len(inputs)):
if type(inputs[_]) is float:
inputs[_] = np.asarray(inputs[_], dtype=theano.config.floatX)
self.run_case(testname, inputs)
def run_case(self, testname, inputs):
inputs_ref = [theano.shared(inp) for inp in inputs]
inputs_tst = [theano.shared(inp) for inp in inputs]
try:
node_ref = safe_make_node(self.op, *inputs_ref)
node_tst = safe_make_node(self.op, *inputs_tst)
except Exception as exc:
err_msg = (
"Test %s::%s: Error occurred while making " "a node with inputs %s"
) % (self.gpu_op, testname, inputs)
exc.args += (err_msg,)
raise
try:
f_ref = inplace_func([], node_ref.outputs, mode=mode_nogpu)
f_tst = inplace_func([], node_tst.outputs, mode=mode_gpu)
except Exception as exc:
err_msg = (
"Test %s::%s: Error occurred while trying to " "make a Function"
) % (self.gpu_op, testname)
exc.args += (err_msg,)
raise
self.assertFunctionContains1(f_tst, self.gpu_op)
ref_e = None
try:
expecteds = f_ref()
except Exception as exc:
ref_e = exc
try:
variables = f_tst()
except Exception as exc:
if ref_e is None:
err_msg = (
"Test %s::%s: exception when calling the " "Function"
) % (self.gpu_op, testname)
exc.args += (err_msg,)
raise
else:
# if we raised an exception of the same type we're good.
if isinstance(exc, type(ref_e)):
return
else:
err_msg = (
"Test %s::%s: exception raised during test "
"call was not the same as the reference "
"call (got: %s, expected %s)"
% (self.gpu_op, testname, type(exc), type(ref_e))
)
exc.args += (err_msg,)
raise
for i, (variable, expected) in enumerate(zip(variables, expecteds)):
condition = (
variable.dtype != expected.dtype
or variable.shape != expected.shape
or not TensorType.values_eq_approx(variable, expected)
)
assert not condition, (
"Test %s::%s: Output %s gave the wrong "
"value. With inputs %s, expected %s "
"(dtype %s), got %s (dtype %s)."
% (
self.op,
testname,
i,
inputs,
expected,
expected.dtype,
variable,
variable.dtype,
)
)
for description, check in self.checks.items():
assert check(inputs, variables), (
"Test %s::%s: Failed check: %s " "(inputs were %s, ouputs were %s)"
) % (self.op, testname, description, inputs, variables)
Checker.__name__ = name
if hasattr(Checker, "__qualname__"):
Checker.__qualname__ = name
return Checker
def test_transfer_cpu_gpu():
a = tt.fmatrix("a")
g = GpuArrayType(dtype="float32", broadcastable=(False, False))("g")
av = np.asarray(rng.rand(5, 4), dtype="float32")
gv = gpuarray.array(av, context=get_context(test_ctx_name))
f = theano.function([a], GpuFromHost(test_ctx_name)(a))
fv = f(av)
assert GpuArrayType.values_eq(fv, gv)
f = theano.function([g], host_from_gpu(g))
fv = f(gv)
assert np.all(fv == av)
def test_transfer_gpu_gpu():
g = GpuArrayType(
dtype="float32", broadcastable=(False, False), context_name=test_ctx_name
)()
av = np.asarray(rng.rand(5, 4), dtype="float32")
gv = gpuarray.array(av, context=get_context(test_ctx_name))
mode = mode_with_gpu.excluding(
"cut_gpua_host_transfers", "local_cut_gpua_host_gpua"
)
f = theano.function([g], GpuToGpu(test_ctx_name)(g), mode=mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, GpuToGpu)
fv = f(gv)
assert GpuArrayType.values_eq(fv, gv)
def test_transfer_strided():
# This is just to ensure that it works in theano
# libgpuarray has a much more comprehensive suit of tests to
# ensure correctness
a = tt.fmatrix("a")
g = GpuArrayType(dtype="float32", broadcastable=(False, False))("g")
av = np.asarray(rng.rand(5, 8), dtype="float32")
gv = gpuarray.array(av, context=get_context(test_ctx_name))
av = av[:, ::2]
gv = gv[:, ::2]
f = theano.function([a], GpuFromHost(test_ctx_name)(a))
fv = f(av)
assert GpuArrayType.values_eq(fv, gv)
f = theano.function([g], host_from_gpu(g))
fv = f(gv)
assert np.all(fv == av)
def gpu_alloc_expected(x, *shp):
g = gpuarray.empty(shp, dtype=x.dtype, context=get_context(test_ctx_name))
g[:] = x
return g
TestGpuAlloc = makeTester(
name="GpuAllocTester",
# The +1 is there to allow the lift to the GPU.
op=lambda *args: alloc(*args) + 1,
gpu_op=GpuAlloc(test_ctx_name),
cases=dict(
correct01=(rand(), np.int32(7)),
# just gives a DeepCopyOp with possibly wrong results on the CPU
# correct01_bcast=(rand(1), np.int32(7)),
correct02=(rand(), np.int32(4), np.int32(7)),
correct12=(rand(7), np.int32(4), np.int32(7)),
correct13=(rand(7), np.int32(2), np.int32(4), np.int32(7)),
correct23=(rand(4, 7), np.int32(2), np.int32(4), np.int32(7)),
bad_shape12=(rand(7), np.int32(7), np.int32(5)),
),
)
class TestGPUAlloc(TestAlloc):
dtype = "float32"
mode = mode_with_gpu
shared = staticmethod(gpuarray_shared_constructor)
allocs = [GpuAlloc(test_ctx_name), GpuAlloc(test_ctx_name), tt.Alloc()]
def test_alloc_empty():
for dt in ["float32", "int8"]:
f = theano.function([], GpuAllocEmpty(dt, context_name=test_ctx_name)(2, 3))
assert len(f.maker.fgraph.apply_nodes) == 1
out = f()
assert out.shape == (2, 3)
assert out.dtype == dt
f = theano.function(
[],
[
GpuAllocEmpty("uint64", test_ctx_name)(3, 2),
GpuAllocEmpty("uint64", test_ctx_name)(3, 2),
],
)
out = f()
assert out[0].shape == (3, 2)
assert out[0].dtype == "uint64"
assert out[1].shape == (3, 2)
assert out[1].dtype == "uint64"
assert (
len(
[
node
for node in f.maker.fgraph.apply_nodes
if isinstance(node.op, GpuAllocEmpty)
]
)
== 1
)
def test_shape():
x = GpuArrayType(dtype="float32", broadcastable=[False, False, False])()
v = gpuarray.zeros((3, 4, 5), dtype="float32", context=get_context(test_ctx_name))
f = theano.function([x], x.shape)
topo = f.maker.fgraph.toposort()
assert np.all(f(v) == (3, 4, 5))
if theano.config.mode != "FAST_COMPILE":
assert len(topo) == 4
assert isinstance(topo[0].op, tt.opt.Shape_i)
assert isinstance(topo[1].op, tt.opt.Shape_i)
assert isinstance(topo[2].op, tt.opt.Shape_i)
assert isinstance(topo[3].op, tt.opt.MakeVector)
mode = mode_with_gpu.excluding("local_shape_to_shape_i")
f = theano.function([x], x.shape, mode=mode)
topo = f.maker.fgraph.toposort()
assert np.all(f(v) == (3, 4, 5))
assert len(topo) == 1
assert isinstance(topo[0].op, tt.Shape)
def test_gpu_contiguous():
a = tt.fmatrix("a")
i = tt.iscalar("i")
a_val = np.asarray(np.random.rand(4, 5), dtype="float32")
# The reshape is needed otherwise we make the subtensor on the CPU
# to transfer less data.
f = theano.function(
[a, i], gpu_contiguous(a.reshape((5, 4))[::i]), mode=mode_with_gpu
)
topo = f.maker.fgraph.toposort()
assert any([isinstance(node.op, GpuSubtensor) for node in topo])
assert any([isinstance(node.op, GpuContiguous) for node in topo])
assert f(a_val, 1).flags.c_contiguous
assert f(a_val, 2).flags.c_contiguous
assert f(a_val, 2).flags.c_contiguous
class TestGPUReshape(TestReshape):
def setup_method(self):
self.shared = gpuarray_shared_constructor
self.op = GpuReshape
self.mode = mode_with_gpu
self.ignore_topo = (
HostFromGpu,
GpuFromHost,
theano.compile.DeepCopyOp,
GpuDimShuffle,
GpuElemwise,
tt.opt.Shape_i,
tt.opt.MakeVector,
)
assert self.op == GpuReshape
class TestGPUComparison(TestComparison):
def setup_method(self):
utt.seed_rng()
self.mode = mode_with_gpu
self.shared = gpuarray_shared_constructor
self.dtypes = ["float64", "float32"]
class TestGPUJoinAndSplit(TestJoinAndSplit):
def setup_method(self):
self.mode = mode_with_gpu.excluding("constant_folding")
self.join_op = GpuJoin()
self.split_op_class = GpuSplit
# Use join instead of MakeVector since there is no MakeVector on GPU
self.make_vector_op = GpuJoin()
# this is to avoid errors with limited devices
self.floatX = "float32"
self.hide_error = theano.config.mode not in ["DebugMode", "DEBUG_MODE"]
def shared(x, **kwargs):
return gpuarray_shared_constructor(x, target=test_ctx_name, **kwargs)
self.shared = shared
def test_gpusplit_opt(self):
# Test that we move the node to the GPU
# Also test float16 computation at the same time.
rng = np.random.RandomState(seed=utt.fetch_seed())
m = self.shared(rng.rand(4, 6).astype("float16"))
o = tt.Split(2)(m, 0, [2, 2])
assert o[0].dtype == "float16"
f = theano.function([], o, mode=self.mode)
assert any(
[
isinstance(node.op, self.split_op_class)
for node in f.maker.fgraph.toposort()
]
)
o1, o2 = f()
assert np.allclose(o1, m.get_value(borrow=True)[:2])
assert np.allclose(o2, m.get_value(borrow=True)[2:])
def test_gpujoin_gpualloc():
a = tt.fmatrix("a")
a_val = np.asarray(np.random.rand(4, 5), dtype="float32")
b = tt.fmatrix("b")
b_val = np.asarray(np.random.rand(3, 5), dtype="float32")
f = theano.function(
[a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)) + 4, mode=mode_without_gpu
)
f_gpu = theano.function(
[a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)), mode=mode_with_gpu
)
f_gpu2 = theano.function(
[a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)) + 4, mode=mode_with_gpu
)
assert sum([node.op == tt.alloc for node in f.maker.fgraph.toposort()]) == 2
assert sum([node.op == tt.join_ for node in f.maker.fgraph.toposort()]) == 1
assert (
sum([isinstance(node.op, GpuAlloc) for node in f_gpu.maker.fgraph.toposort()])
== 2
)
assert sum([node.op == gpu_join for node in f_gpu.maker.fgraph.toposort()]) == 1
assert (
sum([isinstance(node.op, GpuAlloc) for node in f_gpu2.maker.fgraph.toposort()])
== 2
)
assert sum([node.op == gpu_join for node in f_gpu2.maker.fgraph.toposort()]) == 1
assert np.allclose(f(a_val, b_val), f_gpu2(a_val, b_val))
def test_gpueye():
def check(dtype, N, M_=None, k=0):
# Theano does not accept None as a tensor.
# So we must use a real value.
M = M_
# Currently DebugMode does not support None as inputs even if this is
# allowed.
if M is None:
M = N
N_symb = tt.iscalar()
M_symb = tt.iscalar()
k_symb = tt.iscalar()
out = tt.eye(N_symb, M_symb, k_symb, dtype=dtype) + np.array(1).astype(dtype)
f = theano.function([N_symb, M_symb, k_symb], out, mode=mode_with_gpu)
result = np.asarray(f(N, M, k)) - np.array(1).astype(dtype)
assert np.allclose(result, np.eye(N, M_, k, dtype=dtype))
assert result.dtype == np.dtype(dtype)
assert any([isinstance(node.op, GpuEye) for node in f.maker.fgraph.toposort()])
for dtype in ["float32", "int32", "float16"]:
check(dtype, 3)
# M != N, k = 0
check(dtype, 3, 5)
check(dtype, 5, 3)
# N == M, k != 0
check(dtype, 3, 3, 1)
check(dtype, 3, 3, -1)
# N < M, k != 0
check(dtype, 3, 5, 1)
check(dtype, 3, 5, -1)
# N > M, k != 0
check(dtype, 5, 3, 1)
check(dtype, 5, 3, -1)
# k > M, -k > N, k > M, k > N
check(dtype, 5, 3, 3)
check(dtype, 3, 5, 3)
check(dtype, 5, 3, -3)
check(dtype, 3, 5, -3)
check(dtype, 5, 3, 6)
check(dtype, 3, 5, -6)
def test_hostfromgpu_shape_i():
# Test that the shape is lifted over hostfromgpu
m = mode_with_gpu.including(
"local_dot_to_dot22", "local_dot22_to_dot22scalar", "specialize"
)
a = tt.fmatrix("a")
ca = theano.gpuarray.type.GpuArrayType("float32", (False, False))()
av = np.asarray(np.random.rand(5, 4), dtype="float32")
cv = gpuarray.asarray(
np.random.rand(5, 4), dtype="float32", context=get_context(test_ctx_name)
)
f = theano.function([a], GpuFromHost(test_ctx_name)(a), mode=m)
assert any(isinstance(x.op, GpuFromHost) for x in f.maker.fgraph.toposort())
f = theano.function([a], GpuFromHost(test_ctx_name)(a).shape, mode=m)
topo = f.maker.fgraph.toposort()
assert isinstance(topo[0].op, tt.opt.Shape_i)
assert isinstance(topo[1].op, tt.opt.Shape_i)
assert isinstance(topo[2].op, tt.opt.MakeVector)
assert tuple(f(av)) == (5, 4)
f = theano.function([ca], host_from_gpu(ca), mode=m)
assert host_from_gpu in [x.op for x in f.maker.fgraph.toposort()]
f = theano.function([ca], host_from_gpu(ca).shape, mode=m)
topo = f.maker.fgraph.toposort()
assert isinstance(topo[0].op, theano.compile.Shape_i)
assert isinstance(topo[1].op, theano.compile.Shape_i)
assert isinstance(topo[2].op, tt.opt.MakeVector)
assert tuple(f(cv)) == (5, 4)
def test_Gpujoin_inplace():
# Test Gpujoin to work inplace.
#
# This function tests the case when several elements are passed to the
# Gpujoin function but all except one of them are empty. In this case
# Gpujoin should work inplace and the output should be the view of the
# non-empty element.
s = tt.lscalar()
data = np.array([3, 4, 5], dtype=theano.config.floatX)
x = gpuarray_shared_constructor(data, borrow=True)
z = tt.zeros((s,))
join = GpuJoin(view=0)
c = join(0, x, z)
f = theano.function([s], theano.Out(c, borrow=True))
if not isinstance(mode_with_gpu, theano.compile.DebugMode):
assert x.get_value(borrow=True, return_internal_type=True) is f(0)
assert np.allclose(f(0), [3, 4, 5])
def test_gpu_tril_triu():
def check_l(m, k=0):
m_symb = tt.matrix(dtype=m.dtype)
k_symb = tt.iscalar()
f = theano.function(
[m_symb, k_symb], tt.tril(m_symb, k_symb), mode=mode_with_gpu
)
result = f(m, k)
assert np.allclose(result, np.tril(m, k))
assert result.dtype == np.dtype(dtype)
assert any([isinstance(node.op, GpuTri) for node in f.maker.fgraph.toposort()])
def check_u(m, k=0):
m_symb = tt.matrix(dtype=m.dtype)
k_symb = tt.iscalar()
f = theano.function(
[m_symb, k_symb], tt.triu(m_symb, k_symb), mode=mode_with_gpu
)
result = f(m, k)
assert np.allclose(result, np.triu(m, k))
assert result.dtype == np.dtype(dtype)
assert any([isinstance(node.op, GpuTri) for node in f.maker.fgraph.toposort()])
utt.seed_rng()
test_rng = np.random.RandomState(seed=utt.fetch_seed())
for dtype in ["float64", "float32", "float16"]:
# try a big one
m = np.asarray(test_rng.rand(5000, 5000) * 2 - 1, dtype=dtype)
check_l(m, 0)
check_l(m, 1)
check_l(m, -1)
check_u(m, 0)
check_u(m, 1)
check_u(m, -1)
m = np.asarray(test_rng.rand(10, 10) * 2 - 1, dtype=dtype)
check_l(m, 0)
check_l(m, 1)
check_l(m, -1)
check_u(m, 0)
check_u(m, 1)
check_u(m, -1)
m = np.asarray(test_rng.rand(10, 5) * 2 - 1, dtype=dtype)
check_l(m, 0)
check_l(m, 1)
check_l(m, -1)
check_u(m, 0)
check_u(m, 1)
check_u(m, -1)
def test_gputri():
def check(dtype, N, M_=None, k=0):
# Theano does not accept None as a tensor.
# So we must use a real value.
M = M_
# Currently DebugMode does not support None as inputs even if this is
# allowed.
if M is None:
M = N
N_symb = tt.iscalar()
M_symb = tt.iscalar()
k_symb = tt.iscalar()
out = tt.tri(N_symb, M_symb, k_symb, dtype=dtype) + np.array(1).astype(dtype)
f = theano.function([N_symb, M_symb, k_symb], out, mode=mode_with_gpu)
result = np.asarray(f(N, M, k)) - np.array(1).astype(dtype)
assert np.allclose(result, np.tri(N, M_, k, dtype=dtype))
assert result.dtype == np.dtype(dtype)
assert any([isinstance(node.op, GpuTri) for node in f.maker.fgraph.toposort()])
for dtype in ["float64", "float32", "int32", "float16"]:
# try a big one
check(dtype, 1000, 1000, 0)
check(dtype, 1000, 1000, -400)
check(dtype, 1000, 1000, 400)
check(dtype, 5)
# M != N, k = 0
check(dtype, 3, 5)
check(dtype, 5, 3)
# N == M, k != 0
check(dtype, 3, 3, 1)
check(dtype, 3, 3, -1)
# N < M, k != 0
check(dtype, 3, 5, 1)
check(dtype, 3, 5, -1)
# N > M, k != 0
check(dtype, 5, 3, 1)
check(dtype, 5, 3, -1)
# k > M, -k > N, k > M, k > N
check(dtype, 5, 3, 3)
check(dtype, 3, 5, 3)
check(dtype, 5, 3, -3)
check(dtype, 3, 5, -3)
check(dtype, 5, 3, 6)
check(dtype, 3, 5, -6)
|
[
"tests.unittest_tools.seed_rng",
"theano.tensor.iscalar",
"theano.tensor.lscalar",
"numpy.random.rand",
"tests.gpuarray.config.mode_with_gpu.excluding",
"numpy.int32",
"theano.tensor.zeros_like",
"numpy.array",
"theano.gpuarray.type.gpuarray_shared_constructor",
"tests.unittest_tools.fetch_seed",
"theano.tensor.basic.alloc",
"theano.gpuarray.basic_ops.GpuAllocEmpty",
"theano.shared",
"theano.function",
"theano.gpuarray.basic_ops.GpuJoin",
"theano.gpuarray.type.GpuArrayType",
"numpy.asarray",
"theano.gpuarray.basic_ops.GpuToGpu",
"theano.tensor.zeros",
"theano.tensor.Split",
"theano.tensor.triu",
"numpy.dtype",
"numpy.triu",
"pytest.skip",
"theano.gpuarray.basic_ops.GpuFromHost",
"numpy.tri",
"theano.tensor.TensorType.values_eq_approx",
"numpy.eye",
"theano.tensor.tri",
"theano.gpuarray.basic_ops.host_from_gpu",
"theano.tensor.fmatrix",
"theano.gpuarray.basic_ops.GpuAlloc",
"theano.tensor.ones_like",
"theano.gpuarray.type.get_context",
"theano.tensor.tril",
"theano.tensor.matrix",
"tests.gpuarray.config.mode_with_gpu.including",
"tests.tensor.utils.safe_make_node",
"pytest.importorskip",
"theano.Out",
"theano.tensor.eye",
"numpy.tril",
"tests.tensor.utils.rand",
"numpy.all",
"theano.gpuarray.type.GpuArrayType.values_eq",
"theano.tensor.Alloc"
] |
[((987, 1015), 'pytest.importorskip', 'pytest.importorskip', (['"""pygpu"""'], {}), "('pygpu')\n", (1006, 1015), False, 'import pytest\n'), ((1043, 1057), 'tests.unittest_tools.seed_rng', 'utt.seed_rng', ([], {}), '()\n', (1055, 1057), True, 'from tests import unittest_tools as utt\n'), ((1309, 1470), 'theano.function', 'theano.function', (['inputs', 'outputs'], {'mode': 'mode', 'allow_input_downcast': 'allow_input_downcast', 'accept_inplace': '(True)', 'on_unused_input': 'on_unused_input', 'name': 'name'}), '(inputs, outputs, mode=mode, allow_input_downcast=\n allow_input_downcast, accept_inplace=True, on_unused_input=\n on_unused_input, name=name)\n', (1324, 1470), False, 'import theano\n'), ((6760, 6775), 'theano.tensor.fmatrix', 'tt.fmatrix', (['"""a"""'], {}), "('a')\n", (6770, 6775), True, 'import theano.tensor as tt\n'), ((7054, 7084), 'theano.gpuarray.type.GpuArrayType.values_eq', 'GpuArrayType.values_eq', (['fv', 'gv'], {}), '(fv, gv)\n', (7076, 7084), False, 'from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor\n'), ((7159, 7175), 'numpy.all', 'np.all', (['(fv == av)'], {}), '(fv == av)\n', (7165, 7175), True, 'import numpy as np\n'), ((7448, 7526), 'tests.gpuarray.config.mode_with_gpu.excluding', 'mode_with_gpu.excluding', (['"""cut_gpua_host_transfers"""', '"""local_cut_gpua_host_gpua"""'], {}), "('cut_gpua_host_transfers', 'local_cut_gpua_host_gpua')\n", (7471, 7526), False, 'from tests.gpuarray.config import mode_with_gpu, mode_without_gpu, test_ctx_name\n'), ((7742, 7772), 'theano.gpuarray.type.GpuArrayType.values_eq', 'GpuArrayType.values_eq', (['fv', 'gv'], {}), '(fv, gv)\n', (7764, 7772), False, 'from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor\n'), ((7955, 7970), 'theano.tensor.fmatrix', 'tt.fmatrix', (['"""a"""'], {}), "('a')\n", (7965, 7970), True, 'import theano.tensor as tt\n'), ((8290, 8320), 'theano.gpuarray.type.GpuArrayType.values_eq', 'GpuArrayType.values_eq', (['fv', 'gv'], {}), '(fv, gv)\n', (8312, 8320), False, 'from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor\n'), ((8395, 8411), 'numpy.all', 'np.all', (['(fv == av)'], {}), '(fv == av)\n', (8401, 8411), True, 'import numpy as np\n'), ((10462, 10491), 'theano.function', 'theano.function', (['[x]', 'x.shape'], {}), '([x], x.shape)\n', (10477, 10491), False, 'import theano\n'), ((10871, 10920), 'tests.gpuarray.config.mode_with_gpu.excluding', 'mode_with_gpu.excluding', (['"""local_shape_to_shape_i"""'], {}), "('local_shape_to_shape_i')\n", (10894, 10920), False, 'from tests.gpuarray.config import mode_with_gpu, mode_without_gpu, test_ctx_name\n'), ((10929, 10969), 'theano.function', 'theano.function', (['[x]', 'x.shape'], {'mode': 'mode'}), '([x], x.shape, mode=mode)\n', (10944, 10969), False, 'import theano\n'), ((11151, 11166), 'theano.tensor.fmatrix', 'tt.fmatrix', (['"""a"""'], {}), "('a')\n", (11161, 11166), True, 'import theano.tensor as tt\n'), ((11175, 11190), 'theano.tensor.iscalar', 'tt.iscalar', (['"""i"""'], {}), "('i')\n", (11185, 11190), True, 'import theano.tensor as tt\n'), ((13815, 13830), 'theano.tensor.fmatrix', 'tt.fmatrix', (['"""a"""'], {}), "('a')\n", (13825, 13830), True, 'import theano.tensor as tt\n'), ((13901, 13916), 'theano.tensor.fmatrix', 'tt.fmatrix', (['"""b"""'], {}), "('b')\n", (13911, 13916), True, 'import theano.tensor as tt\n'), ((16526, 16619), 'tests.gpuarray.config.mode_with_gpu.including', 'mode_with_gpu.including', (['"""local_dot_to_dot22"""', '"""local_dot22_to_dot22scalar"""', '"""specialize"""'], {}), "('local_dot_to_dot22', 'local_dot22_to_dot22scalar',\n 'specialize')\n", (16549, 16619), False, 'from tests.gpuarray.config import mode_with_gpu, mode_without_gpu, test_ctx_name\n'), ((16638, 16653), 'theano.tensor.fmatrix', 'tt.fmatrix', (['"""a"""'], {}), "('a')\n", (16648, 16653), True, 'import theano.tensor as tt\n'), ((18108, 18120), 'theano.tensor.lscalar', 'tt.lscalar', ([], {}), '()\n', (18118, 18120), True, 'import theano.tensor as tt\n'), ((18132, 18179), 'numpy.array', 'np.array', (['[3, 4, 5]'], {'dtype': 'theano.config.floatX'}), '([3, 4, 5], dtype=theano.config.floatX)\n', (18140, 18179), True, 'import numpy as np\n'), ((18188, 18234), 'theano.gpuarray.type.gpuarray_shared_constructor', 'gpuarray_shared_constructor', (['data'], {'borrow': '(True)'}), '(data, borrow=True)\n', (18215, 18234), False, 'from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor\n'), ((18243, 18257), 'theano.tensor.zeros', 'tt.zeros', (['(s,)'], {}), '((s,))\n', (18251, 18257), True, 'import theano.tensor as tt\n'), ((18270, 18285), 'theano.gpuarray.basic_ops.GpuJoin', 'GpuJoin', ([], {'view': '(0)'}), '(view=0)\n', (18277, 18285), False, 'from theano.gpuarray.basic_ops import GpuAlloc, GpuAllocEmpty, GpuContiguous, GpuEye, GpuFromHost, GpuJoin, GpuReshape, GpuSplit, GpuToGpu, GpuTri, HostFromGpu, gpu_contiguous, gpu_join, host_from_gpu\n'), ((19420, 19434), 'tests.unittest_tools.seed_rng', 'utt.seed_rng', ([], {}), '()\n', (19432, 19434), True, 'from tests import unittest_tools as utt\n'), ((1091, 1107), 'tests.unittest_tools.fetch_seed', 'utt.fetch_seed', ([], {}), '()\n', (1105, 1107), True, 'from tests import unittest_tools as utt\n'), ((6784, 6843), 'theano.gpuarray.type.GpuArrayType', 'GpuArrayType', ([], {'dtype': '"""float32"""', 'broadcastable': '(False, False)'}), "(dtype='float32', broadcastable=(False, False))\n", (6796, 6843), False, 'from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor\n'), ((7115, 7131), 'theano.gpuarray.basic_ops.host_from_gpu', 'host_from_gpu', (['g'], {}), '(g)\n', (7128, 7131), False, 'from theano.gpuarray.basic_ops import GpuAlloc, GpuAllocEmpty, GpuContiguous, GpuEye, GpuFromHost, GpuJoin, GpuReshape, GpuSplit, GpuToGpu, GpuTri, HostFromGpu, gpu_contiguous, gpu_join, host_from_gpu\n'), ((7215, 7307), 'theano.gpuarray.type.GpuArrayType', 'GpuArrayType', ([], {'dtype': '"""float32"""', 'broadcastable': '(False, False)', 'context_name': 'test_ctx_name'}), "(dtype='float32', broadcastable=(False, False), context_name=\n test_ctx_name)\n", (7227, 7307), False, 'from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor\n'), ((7979, 8038), 'theano.gpuarray.type.GpuArrayType', 'GpuArrayType', ([], {'dtype': '"""float32"""', 'broadcastable': '(False, False)'}), "(dtype='float32', broadcastable=(False, False))\n", (7991, 8038), False, 'from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor\n'), ((8351, 8367), 'theano.gpuarray.basic_ops.host_from_gpu', 'host_from_gpu', (['g'], {}), '(g)\n', (8364, 8367), False, 'from theano.gpuarray.basic_ops import GpuAlloc, GpuAllocEmpty, GpuContiguous, GpuEye, GpuFromHost, GpuJoin, GpuReshape, GpuSplit, GpuToGpu, GpuTri, HostFromGpu, gpu_contiguous, gpu_join, host_from_gpu\n'), ((8710, 8733), 'theano.gpuarray.basic_ops.GpuAlloc', 'GpuAlloc', (['test_ctx_name'], {}), '(test_ctx_name)\n', (8718, 8733), False, 'from theano.gpuarray.basic_ops import GpuAlloc, GpuAllocEmpty, GpuContiguous, GpuEye, GpuFromHost, GpuJoin, GpuReshape, GpuSplit, GpuToGpu, GpuTri, HostFromGpu, gpu_contiguous, gpu_join, host_from_gpu\n'), ((9378, 9401), 'theano.gpuarray.basic_ops.GpuAlloc', 'GpuAlloc', (['test_ctx_name'], {}), '(test_ctx_name)\n', (9386, 9401), False, 'from theano.gpuarray.basic_ops import GpuAlloc, GpuAllocEmpty, GpuContiguous, GpuEye, GpuFromHost, GpuJoin, GpuReshape, GpuSplit, GpuToGpu, GpuTri, HostFromGpu, gpu_contiguous, gpu_join, host_from_gpu\n'), ((9403, 9426), 'theano.gpuarray.basic_ops.GpuAlloc', 'GpuAlloc', (['test_ctx_name'], {}), '(test_ctx_name)\n', (9411, 9426), False, 'from theano.gpuarray.basic_ops import GpuAlloc, GpuAllocEmpty, GpuContiguous, GpuEye, GpuFromHost, GpuJoin, GpuReshape, GpuSplit, GpuToGpu, GpuTri, HostFromGpu, gpu_contiguous, gpu_join, host_from_gpu\n'), ((9428, 9438), 'theano.tensor.Alloc', 'tt.Alloc', ([], {}), '()\n', (9436, 9438), True, 'import theano.tensor as tt\n'), ((10298, 10364), 'theano.gpuarray.type.GpuArrayType', 'GpuArrayType', ([], {'dtype': '"""float32"""', 'broadcastable': '[False, False, False]'}), "(dtype='float32', broadcastable=[False, False, False])\n", (10310, 10364), False, 'from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor\n'), ((11214, 11234), 'numpy.random.rand', 'np.random.rand', (['(4)', '(5)'], {}), '(4, 5)\n', (11228, 11234), True, 'import numpy as np\n'), ((12294, 12308), 'tests.unittest_tools.seed_rng', 'utt.seed_rng', ([], {}), '()\n', (12306, 12308), True, 'from tests import unittest_tools as utt\n'), ((12533, 12576), 'tests.gpuarray.config.mode_with_gpu.excluding', 'mode_with_gpu.excluding', (['"""constant_folding"""'], {}), "('constant_folding')\n", (12556, 12576), False, 'from tests.gpuarray.config import mode_with_gpu, mode_without_gpu, test_ctx_name\n'), ((12600, 12609), 'theano.gpuarray.basic_ops.GpuJoin', 'GpuJoin', ([], {}), '()\n', (12607, 12609), False, 'from theano.gpuarray.basic_ops import GpuAlloc, GpuAllocEmpty, GpuContiguous, GpuEye, GpuFromHost, GpuJoin, GpuReshape, GpuSplit, GpuToGpu, GpuTri, HostFromGpu, gpu_contiguous, gpu_join, host_from_gpu\n'), ((12756, 12765), 'theano.gpuarray.basic_ops.GpuJoin', 'GpuJoin', ([], {}), '()\n', (12763, 12765), False, 'from theano.gpuarray.basic_ops import GpuAlloc, GpuAllocEmpty, GpuContiguous, GpuEye, GpuFromHost, GpuJoin, GpuReshape, GpuSplit, GpuToGpu, GpuTri, HostFromGpu, gpu_contiguous, gpu_join, host_from_gpu\n'), ((13425, 13463), 'theano.function', 'theano.function', (['[]', 'o'], {'mode': 'self.mode'}), '([], o, mode=self.mode)\n', (13440, 13463), False, 'import theano\n'), ((13854, 13874), 'numpy.random.rand', 'np.random.rand', (['(4)', '(5)'], {}), '(4, 5)\n', (13868, 13874), True, 'import numpy as np\n'), ((13940, 13960), 'numpy.random.rand', 'np.random.rand', (['(3)', '(5)'], {}), '(3, 5)\n', (13954, 13960), True, 'import numpy as np\n'), ((15292, 15304), 'theano.tensor.iscalar', 'tt.iscalar', ([], {}), '()\n', (15302, 15304), True, 'import theano.tensor as tt\n'), ((15322, 15334), 'theano.tensor.iscalar', 'tt.iscalar', ([], {}), '()\n', (15332, 15334), True, 'import theano.tensor as tt\n'), ((15352, 15364), 'theano.tensor.iscalar', 'tt.iscalar', ([], {}), '()\n', (15362, 15364), True, 'import theano.tensor as tt\n'), ((15463, 15529), 'theano.function', 'theano.function', (['[N_symb, M_symb, k_symb]', 'out'], {'mode': 'mode_with_gpu'}), '([N_symb, M_symb, k_symb], out, mode=mode_with_gpu)\n', (15478, 15529), False, 'import theano\n'), ((16663, 16723), 'theano.gpuarray.type.GpuArrayType', 'theano.gpuarray.type.GpuArrayType', (['"""float32"""', '(False, False)'], {}), "('float32', (False, False))\n", (16696, 16723), False, 'import theano\n'), ((16746, 16766), 'numpy.random.rand', 'np.random.rand', (['(5)', '(4)'], {}), '(5, 4)\n', (16760, 16766), True, 'import numpy as np\n'), ((16820, 16840), 'numpy.random.rand', 'np.random.rand', (['(5)', '(4)'], {}), '(5, 4)\n', (16834, 16840), True, 'import numpy as np\n'), ((17379, 17396), 'theano.gpuarray.basic_ops.host_from_gpu', 'host_from_gpu', (['ca'], {}), '(ca)\n', (17392, 17396), False, 'from theano.gpuarray.basic_ops import GpuAlloc, GpuAllocEmpty, GpuContiguous, GpuEye, GpuFromHost, GpuJoin, GpuReshape, GpuSplit, GpuToGpu, GpuTri, HostFromGpu, gpu_contiguous, gpu_join, host_from_gpu\n'), ((18338, 18364), 'theano.Out', 'theano.Out', (['c'], {'borrow': '(True)'}), '(c, borrow=True)\n', (18348, 18364), False, 'import theano\n'), ((18615, 18639), 'theano.tensor.matrix', 'tt.matrix', ([], {'dtype': 'm.dtype'}), '(dtype=m.dtype)\n', (18624, 18639), True, 'import theano.tensor as tt\n'), ((18657, 18669), 'theano.tensor.iscalar', 'tt.iscalar', ([], {}), '()\n', (18667, 18669), True, 'import theano.tensor as tt\n'), ((19037, 19061), 'theano.tensor.matrix', 'tt.matrix', ([], {'dtype': 'm.dtype'}), '(dtype=m.dtype)\n', (19046, 19061), True, 'import theano.tensor as tt\n'), ((19079, 19091), 'theano.tensor.iscalar', 'tt.iscalar', ([], {}), '()\n', (19089, 19091), True, 'import theano.tensor as tt\n'), ((20502, 20514), 'theano.tensor.iscalar', 'tt.iscalar', ([], {}), '()\n', (20512, 20514), True, 'import theano.tensor as tt\n'), ((20532, 20544), 'theano.tensor.iscalar', 'tt.iscalar', ([], {}), '()\n', (20542, 20544), True, 'import theano.tensor as tt\n'), ((20562, 20574), 'theano.tensor.iscalar', 'tt.iscalar', ([], {}), '()\n', (20572, 20574), True, 'import theano.tensor as tt\n'), ((20673, 20739), 'theano.function', 'theano.function', (['[N_symb, M_symb, k_symb]', 'out'], {'mode': 'mode_with_gpu'}), '([N_symb, M_symb, k_symb], out, mode=mode_with_gpu)\n', (20688, 20739), False, 'import theano\n'), ((2273, 2299), 'theano.gpuarray.type.get_context', 'get_context', (['test_ctx_name'], {}), '(test_ctx_name)\n', (2284, 2299), False, 'from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor\n'), ((6939, 6965), 'theano.gpuarray.type.get_context', 'get_context', (['test_ctx_name'], {}), '(test_ctx_name)\n', (6950, 6965), False, 'from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor\n'), ((6997, 7023), 'theano.gpuarray.basic_ops.GpuFromHost', 'GpuFromHost', (['test_ctx_name'], {}), '(test_ctx_name)\n', (7008, 7023), False, 'from theano.gpuarray.basic_ops import GpuAlloc, GpuAllocEmpty, GpuContiguous, GpuEye, GpuFromHost, GpuJoin, GpuReshape, GpuSplit, GpuToGpu, GpuTri, HostFromGpu, gpu_contiguous, gpu_join, host_from_gpu\n'), ((7409, 7435), 'theano.gpuarray.type.get_context', 'get_context', (['test_ctx_name'], {}), '(test_ctx_name)\n', (7420, 7435), False, 'from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor\n'), ((7570, 7593), 'theano.gpuarray.basic_ops.GpuToGpu', 'GpuToGpu', (['test_ctx_name'], {}), '(test_ctx_name)\n', (7578, 7593), False, 'from theano.gpuarray.basic_ops import GpuAlloc, GpuAllocEmpty, GpuContiguous, GpuEye, GpuFromHost, GpuJoin, GpuReshape, GpuSplit, GpuToGpu, GpuTri, HostFromGpu, gpu_contiguous, gpu_join, host_from_gpu\n'), ((8134, 8160), 'theano.gpuarray.type.get_context', 'get_context', (['test_ctx_name'], {}), '(test_ctx_name)\n', (8145, 8160), False, 'from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor\n'), ((8233, 8259), 'theano.gpuarray.basic_ops.GpuFromHost', 'GpuFromHost', (['test_ctx_name'], {}), '(test_ctx_name)\n', (8244, 8259), False, 'from theano.gpuarray.basic_ops import GpuAlloc, GpuAllocEmpty, GpuContiguous, GpuEye, GpuFromHost, GpuJoin, GpuReshape, GpuSplit, GpuToGpu, GpuTri, HostFromGpu, gpu_contiguous, gpu_join, host_from_gpu\n'), ((8498, 8524), 'theano.gpuarray.type.get_context', 'get_context', (['test_ctx_name'], {}), '(test_ctx_name)\n', (8509, 8524), False, 'from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor\n'), ((10426, 10452), 'theano.gpuarray.type.get_context', 'get_context', (['test_ctx_name'], {}), '(test_ctx_name)\n', (10437, 10452), False, 'from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor\n'), ((12986, 13048), 'theano.gpuarray.type.gpuarray_shared_constructor', 'gpuarray_shared_constructor', (['x'], {'target': 'test_ctx_name'}), '(x, target=test_ctx_name, **kwargs)\n', (13013, 13048), False, 'from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor\n'), ((13348, 13359), 'theano.tensor.Split', 'tt.Split', (['(2)'], {}), '(2)\n', (13356, 13359), True, 'import theano.tensor as tt\n'), ((14156, 14172), 'theano.tensor.zeros_like', 'tt.zeros_like', (['a'], {}), '(a)\n', (14169, 14172), True, 'import theano.tensor as tt\n'), ((14174, 14189), 'theano.tensor.ones_like', 'tt.ones_like', (['b'], {}), '(b)\n', (14186, 14189), True, 'import theano.tensor as tt\n'), ((15379, 15422), 'theano.tensor.eye', 'tt.eye', (['N_symb', 'M_symb', 'k_symb'], {'dtype': 'dtype'}), '(N_symb, M_symb, k_symb, dtype=dtype)\n', (15385, 15422), True, 'import theano.tensor as tt\n'), ((15634, 15663), 'numpy.eye', 'np.eye', (['N', 'M_', 'k'], {'dtype': 'dtype'}), '(N, M_, k, dtype=dtype)\n', (15640, 15663), True, 'import numpy as np\n'), ((15696, 15711), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (15704, 15711), True, 'import numpy as np\n'), ((16867, 16893), 'theano.gpuarray.type.get_context', 'get_context', (['test_ctx_name'], {}), '(test_ctx_name)\n', (16878, 16893), False, 'from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor\n'), ((16930, 16956), 'theano.gpuarray.basic_ops.GpuFromHost', 'GpuFromHost', (['test_ctx_name'], {}), '(test_ctx_name)\n', (16941, 16956), False, 'from theano.gpuarray.basic_ops import GpuAlloc, GpuAllocEmpty, GpuContiguous, GpuEye, GpuFromHost, GpuJoin, GpuReshape, GpuSplit, GpuToGpu, GpuTri, HostFromGpu, gpu_contiguous, gpu_join, host_from_gpu\n'), ((17506, 17523), 'theano.gpuarray.basic_ops.host_from_gpu', 'host_from_gpu', (['ca'], {}), '(ca)\n', (17519, 17523), False, 'from theano.gpuarray.basic_ops import GpuAlloc, GpuAllocEmpty, GpuContiguous, GpuEye, GpuFromHost, GpuJoin, GpuReshape, GpuSplit, GpuToGpu, GpuTri, HostFromGpu, gpu_contiguous, gpu_join, host_from_gpu\n'), ((18730, 18753), 'theano.tensor.tril', 'tt.tril', (['m_symb', 'k_symb'], {}), '(m_symb, k_symb)\n', (18737, 18753), True, 'import theano.tensor as tt\n'), ((18844, 18857), 'numpy.tril', 'np.tril', (['m', 'k'], {}), '(m, k)\n', (18851, 18857), True, 'import numpy as np\n'), ((18890, 18905), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (18898, 18905), True, 'import numpy as np\n'), ((19151, 19174), 'theano.tensor.triu', 'tt.triu', (['m_symb', 'k_symb'], {}), '(m_symb, k_symb)\n', (19158, 19174), True, 'import theano.tensor as tt\n'), ((19265, 19278), 'numpy.triu', 'np.triu', (['m', 'k'], {}), '(m, k)\n', (19272, 19278), True, 'import numpy as np\n'), ((19311, 19326), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (19319, 19326), True, 'import numpy as np\n'), ((19477, 19493), 'tests.unittest_tools.fetch_seed', 'utt.fetch_seed', ([], {}), '()\n', (19491, 19493), True, 'from tests import unittest_tools as utt\n'), ((20589, 20632), 'theano.tensor.tri', 'tt.tri', (['N_symb', 'M_symb', 'k_symb'], {'dtype': 'dtype'}), '(N_symb, M_symb, k_symb, dtype=dtype)\n', (20595, 20632), True, 'import theano.tensor as tt\n'), ((20843, 20872), 'numpy.tri', 'np.tri', (['N', 'M_', 'k'], {'dtype': 'dtype'}), '(N, M_, k, dtype=dtype)\n', (20849, 20872), True, 'import numpy as np\n'), ((20905, 20920), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (20913, 20920), True, 'import numpy as np\n'), ((2968, 2985), 'pytest.skip', 'pytest.skip', (['skip'], {}), '(skip)\n', (2979, 2985), False, 'import pytest\n'), ((3339, 3357), 'theano.shared', 'theano.shared', (['inp'], {}), '(inp)\n', (3352, 3357), False, 'import theano\n'), ((3403, 3421), 'theano.shared', 'theano.shared', (['inp'], {}), '(inp)\n', (3416, 3421), False, 'import theano\n'), ((3486, 3522), 'tests.tensor.utils.safe_make_node', 'safe_make_node', (['self.op', '*inputs_ref'], {}), '(self.op, *inputs_ref)\n', (3500, 3522), False, 'from tests.tensor.utils import rand, safe_make_node\n'), ((3550, 3586), 'tests.tensor.utils.safe_make_node', 'safe_make_node', (['self.op', '*inputs_tst'], {}), '(self.op, *inputs_tst)\n', (3564, 3586), False, 'from tests.tensor.utils import rand, safe_make_node\n'), ((8681, 8693), 'theano.tensor.basic.alloc', 'alloc', (['*args'], {}), '(*args)\n', (8686, 8693), False, 'from theano.tensor.basic import alloc\n'), ((9533, 9578), 'theano.gpuarray.basic_ops.GpuAllocEmpty', 'GpuAllocEmpty', (['dt'], {'context_name': 'test_ctx_name'}), '(dt, context_name=test_ctx_name)\n', (9546, 9578), False, 'from theano.gpuarray.basic_ops import GpuAlloc, GpuAllocEmpty, GpuContiguous, GpuEye, GpuFromHost, GpuJoin, GpuReshape, GpuSplit, GpuToGpu, GpuTri, HostFromGpu, gpu_contiguous, gpu_join, host_from_gpu\n'), ((9782, 9820), 'theano.gpuarray.basic_ops.GpuAllocEmpty', 'GpuAllocEmpty', (['"""uint64"""', 'test_ctx_name'], {}), "('uint64', test_ctx_name)\n", (9795, 9820), False, 'from theano.gpuarray.basic_ops import GpuAlloc, GpuAllocEmpty, GpuContiguous, GpuEye, GpuFromHost, GpuJoin, GpuReshape, GpuSplit, GpuToGpu, GpuTri, HostFromGpu, gpu_contiguous, gpu_join, host_from_gpu\n'), ((9840, 9878), 'theano.gpuarray.basic_ops.GpuAllocEmpty', 'GpuAllocEmpty', (['"""uint64"""', 'test_ctx_name'], {}), "('uint64', test_ctx_name)\n", (9853, 9878), False, 'from theano.gpuarray.basic_ops import GpuAlloc, GpuAllocEmpty, GpuContiguous, GpuEye, GpuFromHost, GpuJoin, GpuReshape, GpuSplit, GpuToGpu, GpuTri, HostFromGpu, gpu_contiguous, gpu_join, host_from_gpu\n'), ((13260, 13276), 'tests.unittest_tools.fetch_seed', 'utt.fetch_seed', ([], {}), '()\n', (13274, 13276), True, 'from tests import unittest_tools as utt\n'), ((14032, 14048), 'theano.tensor.zeros_like', 'tt.zeros_like', (['a'], {}), '(a)\n', (14045, 14048), True, 'import theano.tensor as tt\n'), ((14050, 14065), 'theano.tensor.ones_like', 'tt.ones_like', (['b'], {}), '(b)\n', (14062, 14065), True, 'import theano.tensor as tt\n'), ((14274, 14290), 'theano.tensor.zeros_like', 'tt.zeros_like', (['a'], {}), '(a)\n', (14287, 14290), True, 'import theano.tensor as tt\n'), ((14292, 14307), 'theano.tensor.ones_like', 'tt.ones_like', (['b'], {}), '(b)\n', (14304, 14307), True, 'import theano.tensor as tt\n'), ((17079, 17105), 'theano.gpuarray.basic_ops.GpuFromHost', 'GpuFromHost', (['test_ctx_name'], {}), '(test_ctx_name)\n', (17090, 17105), False, 'from theano.gpuarray.basic_ops import GpuAlloc, GpuAllocEmpty, GpuContiguous, GpuEye, GpuFromHost, GpuJoin, GpuReshape, GpuSplit, GpuToGpu, GpuTri, HostFromGpu, gpu_contiguous, gpu_join, host_from_gpu\n'), ((8770, 8776), 'tests.tensor.utils.rand', 'rand', ([], {}), '()\n', (8774, 8776), False, 'from tests.tensor.utils import rand, safe_make_node\n'), ((8778, 8789), 'numpy.int32', 'np.int32', (['(7)'], {}), '(7)\n', (8786, 8789), True, 'import numpy as np\n'), ((8934, 8940), 'tests.tensor.utils.rand', 'rand', ([], {}), '()\n', (8938, 8940), False, 'from tests.tensor.utils import rand, safe_make_node\n'), ((8942, 8953), 'numpy.int32', 'np.int32', (['(4)'], {}), '(4)\n', (8950, 8953), True, 'import numpy as np\n'), ((8955, 8966), 'numpy.int32', 'np.int32', (['(7)'], {}), '(7)\n', (8963, 8966), True, 'import numpy as np\n'), ((8988, 8995), 'tests.tensor.utils.rand', 'rand', (['(7)'], {}), '(7)\n', (8992, 8995), False, 'from tests.tensor.utils import rand, safe_make_node\n'), ((8997, 9008), 'numpy.int32', 'np.int32', (['(4)'], {}), '(4)\n', (9005, 9008), True, 'import numpy as np\n'), ((9010, 9021), 'numpy.int32', 'np.int32', (['(7)'], {}), '(7)\n', (9018, 9021), True, 'import numpy as np\n'), ((9043, 9050), 'tests.tensor.utils.rand', 'rand', (['(7)'], {}), '(7)\n', (9047, 9050), False, 'from tests.tensor.utils import rand, safe_make_node\n'), ((9052, 9063), 'numpy.int32', 'np.int32', (['(2)'], {}), '(2)\n', (9060, 9063), True, 'import numpy as np\n'), ((9065, 9076), 'numpy.int32', 'np.int32', (['(4)'], {}), '(4)\n', (9073, 9076), True, 'import numpy as np\n'), ((9078, 9089), 'numpy.int32', 'np.int32', (['(7)'], {}), '(7)\n', (9086, 9089), True, 'import numpy as np\n'), ((9111, 9121), 'tests.tensor.utils.rand', 'rand', (['(4)', '(7)'], {}), '(4, 7)\n', (9115, 9121), False, 'from tests.tensor.utils import rand, safe_make_node\n'), ((9123, 9134), 'numpy.int32', 'np.int32', (['(2)'], {}), '(2)\n', (9131, 9134), True, 'import numpy as np\n'), ((9136, 9147), 'numpy.int32', 'np.int32', (['(4)'], {}), '(4)\n', (9144, 9147), True, 'import numpy as np\n'), ((9149, 9160), 'numpy.int32', 'np.int32', (['(7)'], {}), '(7)\n', (9157, 9160), True, 'import numpy as np\n'), ((9184, 9191), 'tests.tensor.utils.rand', 'rand', (['(7)'], {}), '(7)\n', (9188, 9191), False, 'from tests.tensor.utils import rand, safe_make_node\n'), ((9193, 9204), 'numpy.int32', 'np.int32', (['(7)'], {}), '(7)\n', (9201, 9204), True, 'import numpy as np\n'), ((9206, 9217), 'numpy.int32', 'np.int32', (['(5)'], {}), '(5)\n', (9214, 9217), True, 'import numpy as np\n'), ((15425, 15436), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (15433, 15436), True, 'import numpy as np\n'), ((15573, 15584), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (15581, 15584), True, 'import numpy as np\n'), ((20635, 20646), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (20643, 20646), True, 'import numpy as np\n'), ((20782, 20793), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (20790, 20793), True, 'import numpy as np\n'), ((3168, 3217), 'numpy.asarray', 'np.asarray', (['inputs[_]'], {'dtype': 'theano.config.floatX'}), '(inputs[_], dtype=theano.config.floatX)\n', (3178, 3217), True, 'import numpy as np\n'), ((5709, 5756), 'theano.tensor.TensorType.values_eq_approx', 'TensorType.values_eq_approx', (['variable', 'expected'], {}), '(variable, expected)\n', (5736, 5756), False, 'from theano.tensor import TensorType\n')]
|
import gym
import numpy as np
from itertools import product
import matplotlib.pyplot as plt
def print_policy(Q, env):
""" This is a helper function to print a nice policy from the Q function"""
moves = [u'←', u'↓',u'→', u'↑']
if not hasattr(env, 'desc'):
env = env.env
dims = env.desc.shape
policy = np.chararray(dims, unicode=True)
policy[:] = ' '
for s in range(len(Q)):
idx = np.unravel_index(s, dims)
policy[idx] = moves[np.argmax(Q[s])]
if env.desc[idx] in ['H', 'G']:
policy[idx] = u'·'
print('\n'.join([''.join([u'{:2}'.format(item) for item in row])
for row in policy]))
def plot_V(Q, env):
""" This is a helper function to plot the state values from the Q function"""
fig = plt.figure()
if not hasattr(env, 'desc'):
env = env.env
dims = env.desc.shape
V = np.zeros(dims)
for s in range(len(Q)):
idx = np.unravel_index(s, dims)
V[idx] = np.max(Q[s])
if env.desc[idx] in ['H', 'G']:
V[idx] = 0.
plt.imshow(V, origin='upper',
extent=[0,dims[0],0,dims[1]], vmin=.0, vmax=.6,
cmap=plt.cm.RdYlGn, interpolation='none')
for x, y in product(range(dims[0]), range(dims[1])):
plt.text(y+0.5, dims[0]-x-0.5, '{:.3f}'.format(V[x,y]),
horizontalalignment='center',
verticalalignment='center')
plt.xticks([])
plt.yticks([])
def plot_Q(Q, env):
""" This is a helper function to plot the Q function """
from matplotlib import colors, patches
fig = plt.figure()
ax = fig.gca()
if not hasattr(env, 'desc'):
env = env.env
dims = env.desc.shape
up = np.array([[0, 1], [0.5, 0.5], [1,1]])
down = np.array([[0, 0], [0.5, 0.5], [1,0]])
left = np.array([[0, 0], [0.5, 0.5], [0,1]])
right = np.array([[1, 0], [0.5, 0.5], [1,1]])
tri = [left, down, right, up]
pos = [[0.2, 0.5], [0.5, 0.2], [0.8, 0.5], [0.5, 0.8]]
cmap = plt.cm.RdYlGn
norm = colors.Normalize(vmin=.0,vmax=.6)
ax.imshow(np.zeros(dims), origin='upper', extent=[0,dims[0],0,dims[1]], vmin=.0, vmax=.6, cmap=cmap)
ax.grid(which='major', color='black', linestyle='-', linewidth=2)
for s in range(len(Q)):
idx = np.unravel_index(s, dims)
x, y = idx
if env.desc[idx] in ['H', 'G']:
ax.add_patch(patches.Rectangle((y, 3-x), 1, 1, color=cmap(.0)))
plt.text(y+0.5, dims[0]-x-0.5, '{:.2f}'.format(.0),
horizontalalignment='center',
verticalalignment='center')
continue
for a in range(len(tri)):
ax.add_patch(patches.Polygon(tri[a] + np.array([y, 3-x]), color=cmap(Q[s][a])))
plt.text(y+pos[a][0], dims[0]-1-x+pos[a][1], '{:.2f}'.format(Q[s][a]),
horizontalalignment='center', verticalalignment='center',
fontsize=9, fontweight=('bold' if Q[s][a] == np.max(Q[s]) else 'normal'))
plt.xticks([])
plt.yticks([])
def choose_abs_greedy_action(state, Q, epsilon):
action = None
if np.random.uniform(0, 1) < epsilon:
action = np.random.randint(env.action_space.n)
else:
action = np.argmax(Q[state,:])
return action
def max_action_state(state, Q):
action = np.argmax(Q[state,:])
return Q[state, action]
def sarsa(env, alpha=0.1, gamma=0.9, epsilon=0.1, num_ep=int(1e4)):
#Q = np.zeros((env.observation_space.n, env.action_space.n))
Q = np.random.rand(env.observation_space.n, env.action_space.n)
# TODO: implement the sarsa algorithm
# This is some starting point performing random walks in the environment:
for i in range(num_ep):
s = env.reset()
done = False
a = choose_abs_greedy_action(s, Q, epsilon)
while not done:
s_, r, done, _ = env.step(a)
a_ = choose_abs_greedy_action(s_, Q, epsilon)
#update Q using sarsa
Q[s, a] = Q[s, a] + alpha * (r + (gamma * Q[s_,a_]) - Q[s,a])
s = s_
a = a_
return Q
def qlearning(env, alpha=0.1, gamma=0.9, epsilon=0.1, num_ep=int(1e4)):
#Q = np.zeros((env.observation_space.n, env.action_space.n))
Q = np.random.rand(env.observation_space.n, env.action_space.n)
# TODO: implement the qlearning algorithm
for i in range(num_ep):
s = env.reset()
done = False
while not done:
a = choose_abs_greedy_action(s, Q, epsilon)
s_, r, done, _ = env.step(a)
#update Q using Q learning
Q[s, a] = Q[s, a] + alpha * (r+ ( gamma * max_action_state(s_, Q)) - Q[s,a] )
s = s_
return Q
env=gym.make('FrozenLake-v0')
#env=gym.make('FrozenLake-v0', is_slippery=False)
#env=gym.make('FrozenLake-v0', map_name="8x8")
print("Running sarsa...")
Q = sarsa(env)
plot_V(Q, env)
plot_Q(Q, env)
print_policy(Q, env)
plt.show()
print("Running qlearning")
Q = qlearning(env)
plot_V(Q, env)
plot_Q(Q, env)
print_policy(Q, env)
plt.show()
|
[
"matplotlib.pyplot.imshow",
"numpy.random.rand",
"matplotlib.pyplot.xticks",
"numpy.argmax",
"numpy.max",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.yticks",
"numpy.unravel_index",
"matplotlib.colors.Normalize",
"numpy.chararray",
"numpy.random.uniform",
"numpy.random.randint",
"gym.make",
"matplotlib.pyplot.show"
] |
[((4710, 4735), 'gym.make', 'gym.make', (['"""FrozenLake-v0"""'], {}), "('FrozenLake-v0')\n", (4718, 4735), False, 'import gym\n'), ((4926, 4936), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4934, 4936), True, 'import matplotlib.pyplot as plt\n'), ((5035, 5045), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5043, 5045), True, 'import matplotlib.pyplot as plt\n'), ((330, 362), 'numpy.chararray', 'np.chararray', (['dims'], {'unicode': '(True)'}), '(dims, unicode=True)\n', (342, 362), True, 'import numpy as np\n'), ((780, 792), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (790, 792), True, 'import matplotlib.pyplot as plt\n'), ((882, 896), 'numpy.zeros', 'np.zeros', (['dims'], {}), '(dims)\n', (890, 896), True, 'import numpy as np\n'), ((1063, 1191), 'matplotlib.pyplot.imshow', 'plt.imshow', (['V'], {'origin': '"""upper"""', 'extent': '[0, dims[0], 0, dims[1]]', 'vmin': '(0.0)', 'vmax': '(0.6)', 'cmap': 'plt.cm.RdYlGn', 'interpolation': '"""none"""'}), "(V, origin='upper', extent=[0, dims[0], 0, dims[1]], vmin=0.0,\n vmax=0.6, cmap=plt.cm.RdYlGn, interpolation='none')\n", (1073, 1191), True, 'import matplotlib.pyplot as plt\n'), ((1431, 1445), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (1441, 1445), True, 'import matplotlib.pyplot as plt\n'), ((1450, 1464), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (1460, 1464), True, 'import matplotlib.pyplot as plt\n'), ((1601, 1613), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1611, 1613), True, 'import matplotlib.pyplot as plt\n'), ((1725, 1763), 'numpy.array', 'np.array', (['[[0, 1], [0.5, 0.5], [1, 1]]'], {}), '([[0, 1], [0.5, 0.5], [1, 1]])\n', (1733, 1763), True, 'import numpy as np\n'), ((1774, 1812), 'numpy.array', 'np.array', (['[[0, 0], [0.5, 0.5], [1, 0]]'], {}), '([[0, 0], [0.5, 0.5], [1, 0]])\n', (1782, 1812), True, 'import numpy as np\n'), ((1823, 1861), 'numpy.array', 'np.array', (['[[0, 0], [0.5, 0.5], [0, 1]]'], {}), '([[0, 0], [0.5, 0.5], [0, 1]])\n', (1831, 1861), True, 'import numpy as np\n'), ((1873, 1911), 'numpy.array', 'np.array', (['[[1, 0], [0.5, 0.5], [1, 1]]'], {}), '([[1, 0], [0.5, 0.5], [1, 1]])\n', (1881, 1911), True, 'import numpy as np\n'), ((2045, 2081), 'matplotlib.colors.Normalize', 'colors.Normalize', ([], {'vmin': '(0.0)', 'vmax': '(0.6)'}), '(vmin=0.0, vmax=0.6)\n', (2061, 2081), False, 'from matplotlib import colors, patches\n'), ((3027, 3041), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (3037, 3041), True, 'import matplotlib.pyplot as plt\n'), ((3046, 3060), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (3056, 3060), True, 'import matplotlib.pyplot as plt\n'), ((3312, 3334), 'numpy.argmax', 'np.argmax', (['Q[state, :]'], {}), '(Q[state, :])\n', (3321, 3334), True, 'import numpy as np\n'), ((3503, 3562), 'numpy.random.rand', 'np.random.rand', (['env.observation_space.n', 'env.action_space.n'], {}), '(env.observation_space.n, env.action_space.n)\n', (3517, 3562), True, 'import numpy as np\n'), ((4240, 4299), 'numpy.random.rand', 'np.random.rand', (['env.observation_space.n', 'env.action_space.n'], {}), '(env.observation_space.n, env.action_space.n)\n', (4254, 4299), True, 'import numpy as np\n'), ((425, 450), 'numpy.unravel_index', 'np.unravel_index', (['s', 'dims'], {}), '(s, dims)\n', (441, 450), True, 'import numpy as np\n'), ((939, 964), 'numpy.unravel_index', 'np.unravel_index', (['s', 'dims'], {}), '(s, dims)\n', (955, 964), True, 'import numpy as np\n'), ((982, 994), 'numpy.max', 'np.max', (['Q[s]'], {}), '(Q[s])\n', (988, 994), True, 'import numpy as np\n'), ((2098, 2112), 'numpy.zeros', 'np.zeros', (['dims'], {}), '(dims)\n', (2106, 2112), True, 'import numpy as np\n'), ((2302, 2327), 'numpy.unravel_index', 'np.unravel_index', (['s', 'dims'], {}), '(s, dims)\n', (2318, 2327), True, 'import numpy as np\n'), ((3130, 3153), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (3147, 3153), True, 'import numpy as np\n'), ((3176, 3213), 'numpy.random.randint', 'np.random.randint', (['env.action_space.n'], {}), '(env.action_space.n)\n', (3193, 3213), True, 'import numpy as np\n'), ((3232, 3254), 'numpy.argmax', 'np.argmax', (['Q[state, :]'], {}), '(Q[state, :])\n', (3241, 3254), True, 'import numpy as np\n'), ((479, 494), 'numpy.argmax', 'np.argmax', (['Q[s]'], {}), '(Q[s])\n', (488, 494), True, 'import numpy as np\n'), ((2723, 2743), 'numpy.array', 'np.array', (['[y, 3 - x]'], {}), '([y, 3 - x])\n', (2731, 2743), True, 'import numpy as np\n'), ((2993, 3005), 'numpy.max', 'np.max', (['Q[s]'], {}), '(Q[s])\n', (2999, 3005), True, 'import numpy as np\n')]
|
from sklearn.metrics import f1_score,accuracy_score
import numpy as np
from utilities.tools import load_model
import pandas as pd
def predict_MSRP_test_data(n_models,nb_words,nlp_f,test_data_1,test_data_2,test_labels):
models=[]
n_h_features=nlp_f.shape[1]
print('loading the models...')
for i in range(n_models):
models.append(load_model(i+1,nb_words,n_h_features))
preds=[]
print('predicting the test data...\n')
i=0
for m in models:
i+=1
preds_prob=m.predict([test_data_1, test_data_2,nlp_f], batch_size=64, verbose=0)
preds.append(preds_prob[:,1])
preds=np.asarray(preds)
final_labels=np.zeros(len(test_data_1),dtype=int)
#average the predicttion
for i in range(len(test_data_1)):
final_labels[i]=round(np.mean(preds[:,i]))
if i%100==0:
print(i ,' out of ',len(test_data_1))
print("test data accuracy: ", accuracy_score(final_labels,test_labels))
print("test data f_measure: ", f1_score(final_labels, test_labels))
submission = pd.DataFrame({"Quality": final_labels})
submission.to_csv("predictions/MSRP.tsv", index=True,index_label='test_id')
def predict_Quora_test_data(n_models,nb_words,nlp_f,test_data_1,test_data_2):
models=[]
n_h_features=nlp_f.shape[1]
print('loading the models...')
for i in range(n_models):
models.append(load_model(i+1,nb_words,n_h_features))
preds=[]
print('predicting the test data...\n')
i=0
for m in models:
i+=1
preds_prob=m.predict([test_data_1, test_data_2,nlp_f], batch_size=125, verbose=0)
preds.append(preds_prob[:,1])
preds=np.asarray(preds)
final_labels=np.zeros(len(test_data_1),dtype=float)
#average the predicttion
for i in range(len(test_data_1)):
final_labels[i]=np.mean(preds[:,i])
if i%10000==0:
print(i ,' out of ',len(test_data_1))
print('making the sumbission file')
submission = pd.DataFrame({"is_duplicate": final_labels})
submission.to_csv("predictions/Quora.tsv", index=True,index_label='test_id')
|
[
"numpy.mean",
"sklearn.metrics.f1_score",
"numpy.asarray",
"utilities.tools.load_model",
"pandas.DataFrame",
"sklearn.metrics.accuracy_score"
] |
[((624, 641), 'numpy.asarray', 'np.asarray', (['preds'], {}), '(preds)\n', (634, 641), True, 'import numpy as np\n'), ((1052, 1091), 'pandas.DataFrame', 'pd.DataFrame', (["{'Quality': final_labels}"], {}), "({'Quality': final_labels})\n", (1064, 1091), True, 'import pandas as pd\n'), ((1657, 1674), 'numpy.asarray', 'np.asarray', (['preds'], {}), '(preds)\n', (1667, 1674), True, 'import numpy as np\n'), ((1974, 2018), 'pandas.DataFrame', 'pd.DataFrame', (["{'is_duplicate': final_labels}"], {}), "({'is_duplicate': final_labels})\n", (1986, 2018), True, 'import pandas as pd\n'), ((920, 961), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['final_labels', 'test_labels'], {}), '(final_labels, test_labels)\n', (934, 961), False, 'from sklearn.metrics import f1_score, accuracy_score\n'), ((997, 1032), 'sklearn.metrics.f1_score', 'f1_score', (['final_labels', 'test_labels'], {}), '(final_labels, test_labels)\n', (1005, 1032), False, 'from sklearn.metrics import f1_score, accuracy_score\n'), ((1822, 1842), 'numpy.mean', 'np.mean', (['preds[:, i]'], {}), '(preds[:, i])\n', (1829, 1842), True, 'import numpy as np\n'), ((354, 395), 'utilities.tools.load_model', 'load_model', (['(i + 1)', 'nb_words', 'n_h_features'], {}), '(i + 1, nb_words, n_h_features)\n', (364, 395), False, 'from utilities.tools import load_model\n'), ((793, 813), 'numpy.mean', 'np.mean', (['preds[:, i]'], {}), '(preds[:, i])\n', (800, 813), True, 'import numpy as np\n'), ((1386, 1427), 'utilities.tools.load_model', 'load_model', (['(i + 1)', 'nb_words', 'n_h_features'], {}), '(i + 1, nb_words, n_h_features)\n', (1396, 1427), False, 'from utilities.tools import load_model\n')]
|
# coding=utf-8
import logging
import traceback
from os import makedirs
from os.path import exists, join
from textwrap import fill
import matplotlib.patheffects as PathEffects
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from koino.plot import big_square, default_alpha
from matplotlib import cm
from ..utils.base import jaccard
def plot_silhouette(
X, figure_fp, n_clusters, silhouette_values, cluster_labels, silhouette_avg
):
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(26, 10))
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but here all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
y_lower = 10
for k in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = np.sort(silhouette_values[cluster_labels == k])
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(k) / n_clusters)
ax1.fill_betweenx(
np.arange(y_lower, y_upper),
0,
ith_cluster_silhouette_values,
facecolor=color,
edgecolor=color,
alpha=default_alpha,
)
# Label the silhouette plots with their cluster numbers at the
# middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(k))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhouette score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# Construct cluster
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(cluster_labels.astype(float) / n_clusters)
# colors = y
ax2.scatter(X[:, 0], X[:, 1], marker=".", s=20, lw=0, alpha=default_alpha, c=colors)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(
("Silhouette analysis for KMeans " "with n_clusters = %d" % n_clusters),
fontsize=14,
fontweight="bold",
)
plt.savefig(figure_fp)
plt.close()
plt.clf()
def plot_cluster_assignments(
X, y, n_clusters, figures_dir, transparent=False, cluster_names=None, title=""
):
"""Clustering assignments scatter plot
Notes
-----
Can use mean or median to fix cluster centroid coordinates."""
if cluster_names is None:
cluster_names = ["Cluster {}".format(i + 1) for i in range(n_clusters)]
# We first reorder the data points according to the centroids labels
X = np.vstack([X[y == i] for i in range(n_clusters)])
y = np.hstack([y[y == i] for i in range(n_clusters)])
# Choose a color palette with seaborn.
palette = np.array(sns.color_palette("hls", n_clusters))
fig, ax = plt.subplots(figsize=big_square)
# for i in range(n_clusters):
# mask = y == i
# ax.scatter(X[mask, 0], X[mask, 1], lw=0, s=20, c=palette[i],
# label=cluster_names[i])
ax.set_title(title)
ax.scatter(X[:, 0], X[:, 1], lw=0, s=20, c=palette[y.astype(np.int)])
ax.axis("off")
# Add the labels for each cluster.
for i in range(n_clusters):
# Position of each label.
samples = np.atleast_2d(X[y == i, :2])
if not len(samples):
logging.warning(
"Probably singular cluster {} (shape:{})".format(i + 1, X[y == i].shape)
)
continue
xtext, ytext = np.median(samples, axis=0)
name = fill(cluster_names[i], width=20)
assert np.isfinite(xtext)
assert np.isfinite(ytext)
txt = ax.text(xtext, ytext, name, fontsize=20, wrap=True, ha="left")
txt.set_path_effects(
[PathEffects.Stroke(linewidth=5, foreground="w"), PathEffects.Normal()]
)
# plt.legend()
figure_fp = join(figures_dir, "Clustered {}.png".format(title))
fig.tight_layout()
try:
fig.savefig(figure_fp, transparent=transparent)
except ValueError:
logging.warning(traceback.format_exc())
finally:
plt.close()
plt.clf()
def overlap_jaccard(
indx,
y_a,
y_b,
names_a,
names_b,
n_a=None,
n_b=None,
figsize=None,
output_dir=None,
alabel="socio-demographic",
blabel="purchases",
transparent=False,
):
"""Compute and plot contingency tables based on set intersection and
jaccard score.
# TODO: Normaliser par len(sd_set) ou len(diet_set) ?
"""
if not (n_a or n_b) or not output_dir:
return
elif output_dir and not exists(output_dir):
makedirs(output_dir)
else:
assert n_a and n_b
assert len(indx) == len(y_a) == len(y_b)
assert len(names_a) == n_a
assert len(names_b) == n_b
a_sets = [set(indx[y_a == i]) for i in range(n_a)]
b_sets = [set(indx[y_b == i]) for i in range(n_b)]
inter_sets = np.asarray(
[[len(set_a & set_t) for set_a in a_sets] for set_t in b_sets], dtype=np.int_
)
fig, ax = plt.subplots(figsize=figsize)
plt.title("Overlap between {} and {} clusters".format(alabel, blabel))
sns.heatmap(
inter_sets,
annot=True,
fmt="6.0f",
ax=ax,
square=True,
xticklabels=names_a,
yticklabels=names_b,
)
plt.tight_layout()
inter_path = join(output_dir, "Clusters Intersection.png")
plt.savefig(inter_path, transparent=transparent)
plt.close()
plt.clf()
jac_arr = np.asarray(
[[jaccard(set_a, set_b) for set_a in a_sets] for set_b in b_sets],
dtype=np.float_,
)
fig, ax = plt.subplots(figsize=figsize)
plt.title("Jaccard scores between {} and {} clusters".format(alabel, blabel))
sns.heatmap(
jac_arr,
annot=True,
fmt=".3f",
ax=ax,
square=True,
xticklabels=names_a,
yticklabels=names_b,
)
plt.tight_layout()
jaccard_path = join(output_dir, "Clusters Jaccard.png")
plt.savefig(jaccard_path, transparent=transparent)
plt.close()
plt.clf()
|
[
"matplotlib.patheffects.Normal",
"textwrap.fill",
"numpy.isfinite",
"numpy.arange",
"numpy.atleast_2d",
"os.path.exists",
"seaborn.color_palette",
"numpy.sort",
"matplotlib.pyplot.close",
"matplotlib.pyplot.savefig",
"seaborn.heatmap",
"matplotlib.pyplot.suptitle",
"traceback.format_exc",
"numpy.median",
"os.makedirs",
"matplotlib.pyplot.clf",
"os.path.join",
"matplotlib.patheffects.Stroke",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots"
] |
[((534, 570), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(26, 10)'}), '(1, 2, figsize=(26, 10))\n', (546, 570), True, 'import matplotlib.pyplot as plt\n'), ((2652, 2768), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (["('Silhouette analysis for KMeans with n_clusters = %d' % n_clusters)"], {'fontsize': '(14)', 'fontweight': '"""bold"""'}), "('Silhouette analysis for KMeans with n_clusters = %d' %\n n_clusters, fontsize=14, fontweight='bold')\n", (2664, 2768), True, 'import matplotlib.pyplot as plt\n'), ((2805, 2827), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figure_fp'], {}), '(figure_fp)\n', (2816, 2827), True, 'import matplotlib.pyplot as plt\n'), ((2832, 2843), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2841, 2843), True, 'import matplotlib.pyplot as plt\n'), ((2848, 2857), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2855, 2857), True, 'import matplotlib.pyplot as plt\n'), ((3526, 3558), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'big_square'}), '(figsize=big_square)\n', (3538, 3558), True, 'import matplotlib.pyplot as plt\n'), ((5747, 5776), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (5759, 5776), True, 'import matplotlib.pyplot as plt\n'), ((5856, 5969), 'seaborn.heatmap', 'sns.heatmap', (['inter_sets'], {'annot': '(True)', 'fmt': '"""6.0f"""', 'ax': 'ax', 'square': '(True)', 'xticklabels': 'names_a', 'yticklabels': 'names_b'}), "(inter_sets, annot=True, fmt='6.0f', ax=ax, square=True,\n xticklabels=names_a, yticklabels=names_b)\n", (5867, 5969), True, 'import seaborn as sns\n'), ((6033, 6051), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6049, 6051), True, 'import matplotlib.pyplot as plt\n'), ((6069, 6114), 'os.path.join', 'join', (['output_dir', '"""Clusters Intersection.png"""'], {}), "(output_dir, 'Clusters Intersection.png')\n", (6073, 6114), False, 'from os.path import exists, join\n'), ((6119, 6167), 'matplotlib.pyplot.savefig', 'plt.savefig', (['inter_path'], {'transparent': 'transparent'}), '(inter_path, transparent=transparent)\n', (6130, 6167), True, 'import matplotlib.pyplot as plt\n'), ((6172, 6183), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6181, 6183), True, 'import matplotlib.pyplot as plt\n'), ((6188, 6197), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6195, 6197), True, 'import matplotlib.pyplot as plt\n'), ((6346, 6375), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (6358, 6375), True, 'import matplotlib.pyplot as plt\n'), ((6462, 6572), 'seaborn.heatmap', 'sns.heatmap', (['jac_arr'], {'annot': '(True)', 'fmt': '""".3f"""', 'ax': 'ax', 'square': '(True)', 'xticklabels': 'names_a', 'yticklabels': 'names_b'}), "(jac_arr, annot=True, fmt='.3f', ax=ax, square=True, xticklabels\n =names_a, yticklabels=names_b)\n", (6473, 6572), True, 'import seaborn as sns\n'), ((6635, 6653), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6651, 6653), True, 'import matplotlib.pyplot as plt\n'), ((6673, 6713), 'os.path.join', 'join', (['output_dir', '"""Clusters Jaccard.png"""'], {}), "(output_dir, 'Clusters Jaccard.png')\n", (6677, 6713), False, 'from os.path import exists, join\n'), ((6718, 6768), 'matplotlib.pyplot.savefig', 'plt.savefig', (['jaccard_path'], {'transparent': 'transparent'}), '(jaccard_path, transparent=transparent)\n', (6729, 6768), True, 'import matplotlib.pyplot as plt\n'), ((6773, 6784), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6782, 6784), True, 'import matplotlib.pyplot as plt\n'), ((6789, 6798), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6796, 6798), True, 'import matplotlib.pyplot as plt\n'), ((1125, 1172), 'numpy.sort', 'np.sort', (['silhouette_values[cluster_labels == k]'], {}), '(silhouette_values[cluster_labels == k])\n', (1132, 1172), True, 'import numpy as np\n'), ((3473, 3509), 'seaborn.color_palette', 'sns.color_palette', (['"""hls"""', 'n_clusters'], {}), "('hls', n_clusters)\n", (3490, 3509), True, 'import seaborn as sns\n'), ((3959, 3987), 'numpy.atleast_2d', 'np.atleast_2d', (['X[y == i, :2]'], {}), '(X[y == i, :2])\n', (3972, 3987), True, 'import numpy as np\n'), ((4193, 4219), 'numpy.median', 'np.median', (['samples'], {'axis': '(0)'}), '(samples, axis=0)\n', (4202, 4219), True, 'import numpy as np\n'), ((4235, 4267), 'textwrap.fill', 'fill', (['cluster_names[i]'], {'width': '(20)'}), '(cluster_names[i], width=20)\n', (4239, 4267), False, 'from textwrap import fill\n'), ((4283, 4301), 'numpy.isfinite', 'np.isfinite', (['xtext'], {}), '(xtext)\n', (4294, 4301), True, 'import numpy as np\n'), ((4317, 4335), 'numpy.isfinite', 'np.isfinite', (['ytext'], {}), '(ytext)\n', (4328, 4335), True, 'import numpy as np\n'), ((4804, 4815), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4813, 4815), True, 'import matplotlib.pyplot as plt\n'), ((4824, 4833), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4831, 4833), True, 'import matplotlib.pyplot as plt\n'), ((1372, 1399), 'numpy.arange', 'np.arange', (['y_lower', 'y_upper'], {}), '(y_lower, y_upper)\n', (1381, 1399), True, 'import numpy as np\n'), ((5333, 5353), 'os.makedirs', 'makedirs', (['output_dir'], {}), '(output_dir)\n', (5341, 5353), False, 'from os import makedirs\n'), ((4456, 4503), 'matplotlib.patheffects.Stroke', 'PathEffects.Stroke', ([], {'linewidth': '(5)', 'foreground': '"""w"""'}), "(linewidth=5, foreground='w')\n", (4474, 4503), True, 'import matplotlib.patheffects as PathEffects\n'), ((4505, 4525), 'matplotlib.patheffects.Normal', 'PathEffects.Normal', ([], {}), '()\n', (4523, 4525), True, 'import matplotlib.patheffects as PathEffects\n'), ((4759, 4781), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (4779, 4781), False, 'import traceback\n'), ((5305, 5323), 'os.path.exists', 'exists', (['output_dir'], {}), '(output_dir)\n', (5311, 5323), False, 'from os.path import exists, join\n')]
|
"""Bindings for the Barnes Hut TSNE algorithm with fast nearest neighbors
Refs:
References
[1] <NAME>, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] <NAME>, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
"""
import numpy as N
import ctypes
import os
import pkg_resources
def ord_string(s):
b = bytearray()
arr = b.extend(map(ord, s))
return N.array([x for x in b] + [0]).astype(N.uint8)
class TSNE(object):
def __init__(self,
n_components=2,
perplexity=50.0,
early_exaggeration=2.0,
learning_rate=200.0,
num_neighbors=1023,
force_magnify_iters=250,
pre_momentum=0.5,
post_momentum=0.8,
theta=0.5,
epssq=0.0025,
n_iter=1000,
n_iter_without_progress=1000,
min_grad_norm=1e-7,
perplexity_epsilon=1e-3,
metric='euclidean',
init='random',
return_style='once',
num_snapshots=5,
verbose=0,
random_seed=None,
use_interactive=False,
viz_timeout=10000,
viz_server="tcp://localhost:5556",
dump_points=False,
dump_file="dump.txt",
dump_interval=1,
print_interval=10,
device=0,
):
"""Initialization method for barnes hut T-SNE class.
"""
# Initialize the variables
self.n_components = int(n_components)
if self.n_components != 2:
raise ValueError('The current barnes-hut implementation does not support projection into dimensions other than 2 for now.')
self.perplexity = float(perplexity)
self.early_exaggeration = float(early_exaggeration)
self.learning_rate = float(learning_rate)
self.n_iter = int(n_iter)
self.n_iter_without_progress = int(n_iter_without_progress)
self.min_grad_norm = float(min_grad_norm)
if metric not in ['euclidean']:
raise ValueError('Non-Euclidean metrics are not currently supported. Please use metric=\'euclidean\' for now.')
else:
self.metric = metric
if init not in ['random']:
raise ValueError('Non-Random initialization is not currently supported. Please use init=\'random\' for now.')
else:
self.init = init
self.verbose = int(verbose)
# Initialize non-sklearn variables
self.num_neighbors = int(num_neighbors)
self.force_magnify_iters = int(force_magnify_iters)
self.perplexity_epsilon = float(perplexity_epsilon)
self.pre_momentum = float(pre_momentum)
self.post_momentum = float(post_momentum)
self.theta = float(theta)
self.epssq =float(epssq)
self.device = int(device)
self.print_interval = int(print_interval)
# Point dumpoing
self.dump_file = str(dump_file)
self.dump_points = bool(dump_points)
self.dump_interval = int(dump_interval)
# Viz
self.use_interactive = bool(use_interactive)
self.viz_server = str(viz_server)
self.viz_timeout = int(viz_timeout)
# Return style
if return_style not in ['once','snapshots']:
raise ValueError('Invalid return style...')
elif return_style == 'once':
self.return_style = 0
elif return_style == 'snapshots':
self.return_style = 1
self.num_snapshots = int(num_snapshots)
# Build the hooks for the BH T-SNE library
self._path = pkg_resources.resource_filename('tsnecuda','') # Load from current location
# self._faiss_lib = N.ctypeslib.load_library('libfaiss', self._path) # Load the ctypes library
# self._gpufaiss_lib = N.ctypeslib.load_library('libgpufaiss', self._path) # Load the ctypes library
self._lib = N.ctypeslib.load_library('libtsnecuda', self._path) # Load the ctypes library
# Hook the BH T-SNE function
self._lib.pymodule_bh_tsne.restype = None
self._lib.pymodule_bh_tsne.argtypes = [
N.ctypeslib.ndpointer(N.float32, ndim=2, flags='ALIGNED, F_CONTIGUOUS, WRITEABLE'), # result
N.ctypeslib.ndpointer(N.float32, ndim=2, flags='ALIGNED, CONTIGUOUS'), # points
ctypes.POINTER(N.ctypeslib.c_intp), # dims
ctypes.c_float, # Perplexity
ctypes.c_float, # Learning Rate
ctypes.c_float, # Magnitude Factor
ctypes.c_int, # Num Neighbors
ctypes.c_int, # Iterations
ctypes.c_int, # Iterations no progress
ctypes.c_int, # Force Magnify iterations
ctypes.c_float, # Perplexity search epsilon
ctypes.c_float, # pre-exaggeration momentum
ctypes.c_float, # post-exaggeration momentum
ctypes.c_float, # Theta
ctypes.c_float, # epssq
ctypes.c_float, # Minimum gradient norm
ctypes.c_int, # Initialization types
N.ctypeslib.ndpointer(N.float32, ndim=2, flags='ALIGNED, F_CONTIGUOUS'), # Initialization Data
ctypes.c_bool, # Dump points
N.ctypeslib.ndpointer(N.uint8, flags='ALIGNED, CONTIGUOUS'), # Dump File
ctypes.c_int, # Dump interval
ctypes.c_bool, # Use interactive
N.ctypeslib.ndpointer(N.uint8, flags='ALIGNED, CONTIGUOUS'), # Viz Server
ctypes.c_int, # Viz timeout
ctypes.c_int, # Verbosity
ctypes.c_int, # Print interval
ctypes.c_int, # GPU Device
ctypes.c_int, # Return style
ctypes.c_int ] # Number of snapshots
def fit_transform(self, X, y=None):
"""Fit X into an embedded space and return that transformed output.
Arguments:
X {array} -- Input array, shape: (n_points, n_dimensions)
Keyword Arguments:
y {None} -- Ignored (default: {None})
"""
# Setup points/embedding requirements
self.points = N.require(X, N.float32, ['CONTIGUOUS', 'ALIGNED'])
self.embedding = N.zeros(shape=(X.shape[0],self.n_components))
self.embedding = N.require(self.embedding , N.float32, ['F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE'])
# Handle Initialization
if y is None:
self.initialization_type = 1
self.init_data = N.require(N.zeros((1,1)),N.float32,['CONTIGUOUS','ALIGNED'])
else:
self.initialization_type = 3
self.init_data = N.require(y, N.float32, ['F_CONTIGUOUS', 'ALIGNED'])
# Handle dumping and viz strings
self.dump_file_ = N.require(ord_string(self.dump_file), N.uint8, ['CONTIGUOUS', 'ALIGNED'])
self.viz_server_ = N.require(ord_string(self.viz_server), N.uint8, ['CONTIGUOUS', 'ALIGNED'])
self._lib.pymodule_bh_tsne(
self.embedding, # result
self.points, # points
self.points.ctypes.shape, # dims
ctypes.c_float(self.perplexity), # Perplexity
ctypes.c_float(self.learning_rate), # Learning Rate
ctypes.c_float(self.early_exaggeration), # Magnitude Factor
ctypes.c_int(self.num_neighbors), # Num Neighbors
ctypes.c_int(self.n_iter), # Iterations
ctypes.c_int(self.n_iter_without_progress), # Iterations no progress
ctypes.c_int(self.force_magnify_iters), # Force Magnify iterations
ctypes.c_float(self.perplexity_epsilon), # Perplexity search epsilon
ctypes.c_float(self.pre_momentum), # pre-exaggeration momentum
ctypes.c_float(self.post_momentum), # post-exaggeration momentum
ctypes.c_float(self.theta), # Theta
ctypes.c_float(self.epssq), # epssq
ctypes.c_float(self.min_grad_norm), # Minimum gradient norm
ctypes.c_int(self.initialization_type), # Initialization types
self.init_data, # Initialization Data
ctypes.c_bool(self.dump_points), # Dump points
self.dump_file_, # Dump File
ctypes.c_int(self.dump_interval), # Dump interval
ctypes.c_bool(self.use_interactive), # Use interactive
self.viz_server_, # Viz Server
ctypes.c_int(self.viz_timeout), # Viz timeout
ctypes.c_int(self.verbose), # Verbosity
ctypes.c_int(self.print_interval), # Print interval
ctypes.c_int(self.device), # GPU Device
ctypes.c_int(self.return_style), # Return style
ctypes.c_int(self.num_snapshots) ) # Number of snapshots
return self.embedding
|
[
"ctypes.POINTER",
"numpy.require",
"pkg_resources.resource_filename",
"numpy.array",
"numpy.zeros",
"numpy.ctypeslib.ndpointer",
"ctypes.c_bool",
"ctypes.c_int",
"numpy.ctypeslib.load_library",
"ctypes.c_float"
] |
[((3861, 3908), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""tsnecuda"""', '""""""'], {}), "('tsnecuda', '')\n", (3892, 3908), False, 'import pkg_resources\n'), ((4169, 4220), 'numpy.ctypeslib.load_library', 'N.ctypeslib.load_library', (['"""libtsnecuda"""', 'self._path'], {}), "('libtsnecuda', self._path)\n", (4193, 4220), True, 'import numpy as N\n'), ((6450, 6500), 'numpy.require', 'N.require', (['X', 'N.float32', "['CONTIGUOUS', 'ALIGNED']"], {}), "(X, N.float32, ['CONTIGUOUS', 'ALIGNED'])\n", (6459, 6500), True, 'import numpy as N\n'), ((6526, 6572), 'numpy.zeros', 'N.zeros', ([], {'shape': '(X.shape[0], self.n_components)'}), '(shape=(X.shape[0], self.n_components))\n', (6533, 6572), True, 'import numpy as N\n'), ((6597, 6675), 'numpy.require', 'N.require', (['self.embedding', 'N.float32', "['F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE']"], {}), "(self.embedding, N.float32, ['F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE'])\n", (6606, 6675), True, 'import numpy as N\n'), ((487, 516), 'numpy.array', 'N.array', (['([x for x in b] + [0])'], {}), '([x for x in b] + [0])\n', (494, 516), True, 'import numpy as N\n'), ((4400, 4487), 'numpy.ctypeslib.ndpointer', 'N.ctypeslib.ndpointer', (['N.float32'], {'ndim': '(2)', 'flags': '"""ALIGNED, F_CONTIGUOUS, WRITEABLE"""'}), "(N.float32, ndim=2, flags=\n 'ALIGNED, F_CONTIGUOUS, WRITEABLE')\n", (4421, 4487), True, 'import numpy as N\n'), ((4509, 4578), 'numpy.ctypeslib.ndpointer', 'N.ctypeslib.ndpointer', (['N.float32'], {'ndim': '(2)', 'flags': '"""ALIGNED, CONTIGUOUS"""'}), "(N.float32, ndim=2, flags='ALIGNED, CONTIGUOUS')\n", (4530, 4578), True, 'import numpy as N\n'), ((4605, 4639), 'ctypes.POINTER', 'ctypes.POINTER', (['N.ctypeslib.c_intp'], {}), '(N.ctypeslib.c_intp)\n', (4619, 4639), False, 'import ctypes\n'), ((5379, 5450), 'numpy.ctypeslib.ndpointer', 'N.ctypeslib.ndpointer', (['N.float32'], {'ndim': '(2)', 'flags': '"""ALIGNED, F_CONTIGUOUS"""'}), "(N.float32, ndim=2, flags='ALIGNED, F_CONTIGUOUS')\n", (5400, 5450), True, 'import numpy as N\n'), ((5535, 5594), 'numpy.ctypeslib.ndpointer', 'N.ctypeslib.ndpointer', (['N.uint8'], {'flags': '"""ALIGNED, CONTIGUOUS"""'}), "(N.uint8, flags='ALIGNED, CONTIGUOUS')\n", (5556, 5594), True, 'import numpy as N\n'), ((5719, 5778), 'numpy.ctypeslib.ndpointer', 'N.ctypeslib.ndpointer', (['N.uint8'], {'flags': '"""ALIGNED, CONTIGUOUS"""'}), "(N.uint8, flags='ALIGNED, CONTIGUOUS')\n", (5740, 5778), True, 'import numpy as N\n'), ((6947, 6999), 'numpy.require', 'N.require', (['y', 'N.float32', "['F_CONTIGUOUS', 'ALIGNED']"], {}), "(y, N.float32, ['F_CONTIGUOUS', 'ALIGNED'])\n", (6956, 6999), True, 'import numpy as N\n'), ((7426, 7457), 'ctypes.c_float', 'ctypes.c_float', (['self.perplexity'], {}), '(self.perplexity)\n', (7440, 7457), False, 'import ctypes\n'), ((7488, 7522), 'ctypes.c_float', 'ctypes.c_float', (['self.learning_rate'], {}), '(self.learning_rate)\n', (7502, 7522), False, 'import ctypes\n'), ((7556, 7595), 'ctypes.c_float', 'ctypes.c_float', (['self.early_exaggeration'], {}), '(self.early_exaggeration)\n', (7570, 7595), False, 'import ctypes\n'), ((7632, 7664), 'ctypes.c_int', 'ctypes.c_int', (['self.num_neighbors'], {}), '(self.num_neighbors)\n', (7644, 7664), False, 'import ctypes\n'), ((7698, 7723), 'ctypes.c_int', 'ctypes.c_int', (['self.n_iter'], {}), '(self.n_iter)\n', (7710, 7723), False, 'import ctypes\n'), ((7754, 7796), 'ctypes.c_int', 'ctypes.c_int', (['self.n_iter_without_progress'], {}), '(self.n_iter_without_progress)\n', (7766, 7796), False, 'import ctypes\n'), ((7839, 7877), 'ctypes.c_int', 'ctypes.c_int', (['self.force_magnify_iters'], {}), '(self.force_magnify_iters)\n', (7851, 7877), False, 'import ctypes\n'), ((7922, 7961), 'ctypes.c_float', 'ctypes.c_float', (['self.perplexity_epsilon'], {}), '(self.perplexity_epsilon)\n', (7936, 7961), False, 'import ctypes\n'), ((8007, 8040), 'ctypes.c_float', 'ctypes.c_float', (['self.pre_momentum'], {}), '(self.pre_momentum)\n', (8021, 8040), False, 'import ctypes\n'), ((8086, 8120), 'ctypes.c_float', 'ctypes.c_float', (['self.post_momentum'], {}), '(self.post_momentum)\n', (8100, 8120), False, 'import ctypes\n'), ((8167, 8193), 'ctypes.c_float', 'ctypes.c_float', (['self.theta'], {}), '(self.theta)\n', (8181, 8193), False, 'import ctypes\n'), ((8219, 8245), 'ctypes.c_float', 'ctypes.c_float', (['self.epssq'], {}), '(self.epssq)\n', (8233, 8245), False, 'import ctypes\n'), ((8271, 8305), 'ctypes.c_float', 'ctypes.c_float', (['self.min_grad_norm'], {}), '(self.min_grad_norm)\n', (8285, 8305), False, 'import ctypes\n'), ((8347, 8385), 'ctypes.c_int', 'ctypes.c_int', (['self.initialization_type'], {}), '(self.initialization_type)\n', (8359, 8385), False, 'import ctypes\n'), ((8480, 8511), 'ctypes.c_bool', 'ctypes.c_bool', (['self.dump_points'], {}), '(self.dump_points)\n', (8493, 8511), False, 'import ctypes\n'), ((8588, 8620), 'ctypes.c_int', 'ctypes.c_int', (['self.dump_interval'], {}), '(self.dump_interval)\n', (8600, 8620), False, 'import ctypes\n'), ((8654, 8689), 'ctypes.c_bool', 'ctypes.c_bool', (['self.use_interactive'], {}), '(self.use_interactive)\n', (8667, 8689), False, 'import ctypes\n'), ((8772, 8802), 'ctypes.c_int', 'ctypes.c_int', (['self.viz_timeout'], {}), '(self.viz_timeout)\n', (8784, 8802), False, 'import ctypes\n'), ((8834, 8860), 'ctypes.c_int', 'ctypes.c_int', (['self.verbose'], {}), '(self.verbose)\n', (8846, 8860), False, 'import ctypes\n'), ((8890, 8923), 'ctypes.c_int', 'ctypes.c_int', (['self.print_interval'], {}), '(self.print_interval)\n', (8902, 8923), False, 'import ctypes\n'), ((8958, 8983), 'ctypes.c_int', 'ctypes.c_int', (['self.device'], {}), '(self.device)\n', (8970, 8983), False, 'import ctypes\n'), ((9014, 9045), 'ctypes.c_int', 'ctypes.c_int', (['self.return_style'], {}), '(self.return_style)\n', (9026, 9045), False, 'import ctypes\n'), ((9078, 9110), 'ctypes.c_int', 'ctypes.c_int', (['self.num_snapshots'], {}), '(self.num_snapshots)\n', (9090, 9110), False, 'import ctypes\n'), ((6812, 6827), 'numpy.zeros', 'N.zeros', (['(1, 1)'], {}), '((1, 1))\n', (6819, 6827), True, 'import numpy as N\n')]
|
import torch
import torchvision
import matplotlib
import matplotlib.pyplot as plt
from PIL import Image
from captum.attr import GuidedGradCam, GuidedBackprop
from captum.attr import LayerActivation, LayerConductance, LayerGradCam
from data_utils import *
from image_utils import *
from captum_utils import *
import numpy as np
from visualizers import GradCam
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
X, y, class_names = load_imagenet_val(num=5)
# FOR THIS SECTION ONLY, we need to use gradients. We introduce a new model we will use explicitly for GradCAM for this.
gc_model = torchvision.models.squeezenet1_1(pretrained=True)
gc = GradCam()
X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0).requires_grad_(True)
y_tensor = torch.LongTensor(y)
# Guided Back-Propagation
gbp_result = gc.guided_backprop(X_tensor,y_tensor, gc_model)
plt.figure(figsize=(24, 24))
for i in range(gbp_result.shape[0]):
plt.subplot(1, 5, i + 1)
img = gbp_result[i]
img = rescale(img)
plt.imshow(img)
plt.title(class_names[y[i]])
plt.axis('off')
plt.gcf().tight_layout()
plt.savefig('visualization/guided_backprop.png')
# GradCam
# GradCAM. We have given you which module(=layer) that we need to capture gradients from, which you can see in conv_module variable below
gc_model = torchvision.models.squeezenet1_1(pretrained=True)
for param in gc_model.parameters():
param.requires_grad = True
X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0).requires_grad_(True)
y_tensor = torch.LongTensor(y)
gradcam_result = gc.grad_cam(X_tensor, y_tensor, gc_model)
plt.figure(figsize=(24, 24))
for i in range(gradcam_result.shape[0]):
gradcam_val = gradcam_result[i]
img = X[i] + (matplotlib.cm.jet(gradcam_val)[:,:,:3]*255)
img = img / np.max(img)
plt.subplot(1, 5, i + 1)
plt.imshow(img)
plt.title(class_names[y[i]])
plt.axis('off')
plt.gcf().tight_layout()
plt.savefig('visualization/gradcam.png')
# As a final step, we can combine GradCam and Guided Backprop to get Guided GradCam.
X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0).requires_grad_(True)
y_tensor = torch.LongTensor(y)
gradcam_result = gc.grad_cam(X_tensor, y_tensor, gc_model)
gbp_result = gc.guided_backprop(X_tensor, y_tensor, gc_model)
plt.figure(figsize=(24, 24))
for i in range(gradcam_result.shape[0]):
gbp_val = gbp_result[i]
gradcam_val = np.expand_dims(gradcam_result[i], axis=2)
# Pointwise multiplication and normalization of the gradcam and guided backprop results (2 lines)
img = gradcam_val * gbp_val
img = np.expand_dims(img.transpose(2, 0, 1), axis=0)
img = np.float32(img)
img = torch.from_numpy(img)
img = deprocess(img)
plt.subplot(1, 5, i + 1)
plt.imshow(img)
plt.title(class_names[y[i]])
plt.axis('off')
plt.gcf().tight_layout()
plt.savefig('visualization/guided_gradcam.png')
# **************************************************************************************** #
# Captum
model = torchvision.models.squeezenet1_1(pretrained=True)
# We don't want to train the model, so tell PyTorch not to compute gradients
# with respect to model parameters.
for param in model.parameters():
param.requires_grad = False
# Convert X and y from numpy arrays to Torch Tensors
X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0)
y_tensor = torch.LongTensor(y)
conv_module = model.features[12]
##############################################################################
# TODO: Compute/Visualize GuidedBackprop and Guided GradCAM as well. #
# visualize_attr_maps function from captum_utils.py is useful for #
# visualizing captum outputs #
# Use conv_module as the convolution layer for gradcam #
##############################################################################
# Computing Guided GradCam
ggc = GuidedGradCam(model, conv_module)
attribution_gcc = compute_attributions(ggc, X_tensor, target = y_tensor)
# print(X_tensor.shape, y_tensor.shape, attribution_gcc.shape)
visualize_attr_maps('visualization/GuidedGradCam.png', X, y, class_names, [attribution_gcc], ['Guided_Grad_Cam'])
# Computing Guided BackProp
gbp = GuidedBackprop(model)
attribution_gbp = compute_attributions(gbp, X_tensor, target = y_tensor)
visualize_attr_maps('visualization/GuidedBackpropCam.png', X, y, class_names, [attribution_gbp], ['Guided_Backprop_Cam'])
##############################################################################
# END OF YOUR CODE #
##############################################################################
# Try out different layers and see observe how the attributions change
layer = model.features[3]
# Example visualization for using layer visualizations
# layer_act = LayerActivation(model, layer)
# layer_act_attr = compute_attributions(layer_act, X_tensor)
# layer_act_attr_sum = layer_act_attr.mean(axis=1, keepdim=True)
##############################################################################
# TODO: Visualize Individual Layer Gradcam and Layer Conductance (similar #
# to what we did for the other captum sections, using our helper methods), #
# but with some preprocessing calculations. #
# #
# You can refer to the LayerActivation example above and you should be #
# using 'layer' given above for this section #
# #
# Also note that, you would need to customize your 'attr_preprocess' #
# parameter that you send along to 'visualize_attr_maps' as the default #
# 'attr_preprocess' is written to only to handle multi channel attributions. #
# #
# For layer gradcam look at the usage of the parameter relu_attributions #
##############################################################################
# Layer gradcam aggregates across all channels
from captum.attr import LayerAttribution
N, C, H, W = X_tensor.shape
LC = LayerConductance(model, layer)
LC_attr = compute_attributions(LC, X_tensor, target = y_tensor)
LC_attr_sum = LC_attr.mean(axis = 1, keepdim = True)
LC_attr_int = LayerAttribution.interpolate(LC_attr_sum, (H,W) )
LC_attr_int = LC_attr_int.repeat(1, 3, 1, 1)
visualize_attr_maps('visualization/LayerConductance.png', X, y, class_names, [LC_attr_int], ['LayerConductance'])
LGC = LayerGradCam(model, layer)
LGC_attr = compute_attributions(LGC, X_tensor, target = y_tensor)
LGC_attr_sum = LGC_attr.mean(axis = 1, keepdim = True)
LGC_attr_int = LayerAttribution.interpolate(LGC_attr_sum, (H,W))
LGC_attr_int = LGC_attr_int.repeat(1, 3, 1, 1)
visualize_attr_maps ('visualization/LayerGradCam.png', X, y, class_names, [LGC_attr_int], ['LayerGradCam'])
##############################################################################
# END OF YOUR CODE #
##############################################################################
|
[
"torch.LongTensor",
"torch.from_numpy",
"matplotlib.cm.jet",
"visualizers.GradCam",
"matplotlib.pyplot.imshow",
"numpy.max",
"matplotlib.pyplot.axis",
"captum.attr.GuidedGradCam",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.title",
"captum.attr.LayerAttribution.interpolate",
"PIL.Image.fromarray",
"captum.attr.GuidedBackprop",
"captum.attr.LayerGradCam",
"matplotlib.pyplot.figure",
"captum.attr.LayerConductance",
"numpy.expand_dims",
"torchvision.models.squeezenet1_1",
"matplotlib.pyplot.subplot",
"numpy.float32"
] |
[((699, 748), 'torchvision.models.squeezenet1_1', 'torchvision.models.squeezenet1_1', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (731, 748), False, 'import torchvision\n'), ((754, 763), 'visualizers.GradCam', 'GradCam', ([], {}), '()\n', (761, 763), False, 'from visualizers import GradCam\n'), ((870, 889), 'torch.LongTensor', 'torch.LongTensor', (['y'], {}), '(y)\n', (886, 889), False, 'import torch\n'), ((979, 1007), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(24, 24)'}), '(figsize=(24, 24))\n', (989, 1007), True, 'import matplotlib.pyplot as plt\n'), ((1219, 1267), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""visualization/guided_backprop.png"""'], {}), "('visualization/guided_backprop.png')\n", (1230, 1267), True, 'import matplotlib.pyplot as plt\n'), ((1430, 1479), 'torchvision.models.squeezenet1_1', 'torchvision.models.squeezenet1_1', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (1462, 1479), False, 'import torchvision\n'), ((1653, 1672), 'torch.LongTensor', 'torch.LongTensor', (['y'], {}), '(y)\n', (1669, 1672), False, 'import torch\n'), ((1733, 1761), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(24, 24)'}), '(figsize=(24, 24))\n', (1743, 1761), True, 'import matplotlib.pyplot as plt\n'), ((2056, 2096), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""visualization/gradcam.png"""'], {}), "('visualization/gradcam.png')\n", (2067, 2096), True, 'import matplotlib.pyplot as plt\n'), ((2289, 2308), 'torch.LongTensor', 'torch.LongTensor', (['y'], {}), '(y)\n', (2305, 2308), False, 'import torch\n'), ((2431, 2459), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(24, 24)'}), '(figsize=(24, 24))\n', (2441, 2459), True, 'import matplotlib.pyplot as plt\n'), ((2992, 3039), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""visualization/guided_gradcam.png"""'], {}), "('visualization/guided_gradcam.png')\n", (3003, 3039), True, 'import matplotlib.pyplot as plt\n'), ((3152, 3201), 'torchvision.models.squeezenet1_1', 'torchvision.models.squeezenet1_1', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (3184, 3201), False, 'import torchvision\n'), ((3519, 3538), 'torch.LongTensor', 'torch.LongTensor', (['y'], {}), '(y)\n', (3535, 3538), False, 'import torch\n'), ((4082, 4115), 'captum.attr.GuidedGradCam', 'GuidedGradCam', (['model', 'conv_module'], {}), '(model, conv_module)\n', (4095, 4115), False, 'from captum.attr import GuidedGradCam, GuidedBackprop\n'), ((4401, 4422), 'captum.attr.GuidedBackprop', 'GuidedBackprop', (['model'], {}), '(model)\n', (4415, 4422), False, 'from captum.attr import GuidedGradCam, GuidedBackprop\n'), ((6412, 6442), 'captum.attr.LayerConductance', 'LayerConductance', (['model', 'layer'], {}), '(model, layer)\n', (6428, 6442), False, 'from captum.attr import LayerActivation, LayerConductance, LayerGradCam\n'), ((6574, 6623), 'captum.attr.LayerAttribution.interpolate', 'LayerAttribution.interpolate', (['LC_attr_sum', '(H, W)'], {}), '(LC_attr_sum, (H, W))\n', (6602, 6623), False, 'from captum.attr import LayerAttribution\n'), ((6797, 6823), 'captum.attr.LayerGradCam', 'LayerGradCam', (['model', 'layer'], {}), '(model, layer)\n', (6809, 6823), False, 'from captum.attr import LayerActivation, LayerConductance, LayerGradCam\n'), ((6964, 7014), 'captum.attr.LayerAttribution.interpolate', 'LayerAttribution.interpolate', (['LGC_attr_sum', '(H, W)'], {}), '(LGC_attr_sum, (H, W))\n', (6992, 7014), False, 'from captum.attr import LayerAttribution\n'), ((1049, 1073), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(5)', '(i + 1)'], {}), '(1, 5, i + 1)\n', (1060, 1073), True, 'import matplotlib.pyplot as plt\n'), ((1125, 1140), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (1135, 1140), True, 'import matplotlib.pyplot as plt\n'), ((1145, 1173), 'matplotlib.pyplot.title', 'plt.title', (['class_names[y[i]]'], {}), '(class_names[y[i]])\n', (1154, 1173), True, 'import matplotlib.pyplot as plt\n'), ((1178, 1193), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1186, 1193), True, 'import matplotlib.pyplot as plt\n'), ((1933, 1957), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(5)', '(i + 1)'], {}), '(1, 5, i + 1)\n', (1944, 1957), True, 'import matplotlib.pyplot as plt\n'), ((1962, 1977), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (1972, 1977), True, 'import matplotlib.pyplot as plt\n'), ((1982, 2010), 'matplotlib.pyplot.title', 'plt.title', (['class_names[y[i]]'], {}), '(class_names[y[i]])\n', (1991, 2010), True, 'import matplotlib.pyplot as plt\n'), ((2015, 2030), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2023, 2030), True, 'import matplotlib.pyplot as plt\n'), ((2547, 2588), 'numpy.expand_dims', 'np.expand_dims', (['gradcam_result[i]'], {'axis': '(2)'}), '(gradcam_result[i], axis=2)\n', (2561, 2588), True, 'import numpy as np\n'), ((2792, 2807), 'numpy.float32', 'np.float32', (['img'], {}), '(img)\n', (2802, 2807), True, 'import numpy as np\n'), ((2818, 2839), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (2834, 2839), False, 'import torch\n'), ((2869, 2893), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(5)', '(i + 1)'], {}), '(1, 5, i + 1)\n', (2880, 2893), True, 'import matplotlib.pyplot as plt\n'), ((2898, 2913), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (2908, 2913), True, 'import matplotlib.pyplot as plt\n'), ((2918, 2946), 'matplotlib.pyplot.title', 'plt.title', (['class_names[y[i]]'], {}), '(class_names[y[i]])\n', (2927, 2946), True, 'import matplotlib.pyplot as plt\n'), ((2951, 2966), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2959, 2966), True, 'import matplotlib.pyplot as plt\n'), ((1194, 1203), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (1201, 1203), True, 'import matplotlib.pyplot as plt\n'), ((1917, 1928), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (1923, 1928), True, 'import numpy as np\n'), ((2031, 2040), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (2038, 2040), True, 'import matplotlib.pyplot as plt\n'), ((2967, 2976), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (2974, 2976), True, 'import matplotlib.pyplot as plt\n'), ((3468, 3486), 'PIL.Image.fromarray', 'Image.fromarray', (['x'], {}), '(x)\n', (3483, 3486), False, 'from PIL import Image\n'), ((1857, 1887), 'matplotlib.cm.jet', 'matplotlib.cm.jet', (['gradcam_val'], {}), '(gradcam_val)\n', (1874, 1887), False, 'import matplotlib\n'), ((798, 816), 'PIL.Image.fromarray', 'Image.fromarray', (['x'], {}), '(x)\n', (813, 816), False, 'from PIL import Image\n'), ((1581, 1599), 'PIL.Image.fromarray', 'Image.fromarray', (['x'], {}), '(x)\n', (1596, 1599), False, 'from PIL import Image\n'), ((2217, 2235), 'PIL.Image.fromarray', 'Image.fromarray', (['x'], {}), '(x)\n', (2232, 2235), False, 'from PIL import Image\n')]
|
from itertools import product
import numpy as np
import pytest
from alibi_detect.utils.discretizer import Discretizer
x = np.random.rand(10, 4)
n_features = x.shape[1]
feature_names = [str(_) for _ in range(n_features)]
categorical_features = [[], [1, 3]]
percentiles = [list(np.arange(25, 100, 25)), list(np.arange(10, 100, 10))]
tests = list(product(categorical_features, percentiles))
n_tests = len(tests)
@pytest.fixture
def cats_and_percentiles(request):
cat, perc = tests[request.param]
return cat, perc
@pytest.mark.parametrize('cats_and_percentiles', list(range(n_tests)), indirect=True)
def test_discretizer(cats_and_percentiles):
cat, perc = cats_and_percentiles
disc = Discretizer(x, cat, feature_names, perc)
to_disc = list(disc.names.keys())
assert len(to_disc) == (x.shape[1] - len(cat))
x_disc = disc.discretize(x)
for k, v in disc.names.items():
assert len(v) <= len(perc) + 1
assert callable(disc.lambdas[k])
assert (x_disc[:, k].min() == 0).all()
assert (x_disc[:, k].max() == len(perc)).all()
for i in range(x.shape[1]):
if i not in to_disc:
assert (x_disc[:, i] == x[:, i]).all()
|
[
"alibi_detect.utils.discretizer.Discretizer",
"itertools.product",
"numpy.random.rand",
"numpy.arange"
] |
[((123, 144), 'numpy.random.rand', 'np.random.rand', (['(10)', '(4)'], {}), '(10, 4)\n', (137, 144), True, 'import numpy as np\n'), ((346, 388), 'itertools.product', 'product', (['categorical_features', 'percentiles'], {}), '(categorical_features, percentiles)\n', (353, 388), False, 'from itertools import product\n'), ((702, 742), 'alibi_detect.utils.discretizer.Discretizer', 'Discretizer', (['x', 'cat', 'feature_names', 'perc'], {}), '(x, cat, feature_names, perc)\n', (713, 742), False, 'from alibi_detect.utils.discretizer import Discretizer\n'), ((278, 300), 'numpy.arange', 'np.arange', (['(25)', '(100)', '(25)'], {}), '(25, 100, 25)\n', (287, 300), True, 'import numpy as np\n'), ((308, 330), 'numpy.arange', 'np.arange', (['(10)', '(100)', '(10)'], {}), '(10, 100, 10)\n', (317, 330), True, 'import numpy as np\n')]
|
# Created by <NAME> on 8/28/19
import gym
import numpy as np
import torch
from interpretable_ddts.agents.ddt_agent import DDTAgent
from interpretable_ddts.agents.mlp_agent import MLPAgent
from interpretable_ddts.opt_helpers.replay_buffer import discount_reward
import torch.multiprocessing as mp
import argparse
import copy
import random
def run_episode(q, agent_in, ENV_NAME, seed=0):
agent = agent_in.duplicate()
if ENV_NAME == 'lunar':
env = gym.make('LunarLander-v2')
elif ENV_NAME == 'cart':
env = gym.make('CartPole-v1')
else:
raise Exception('No valid environment selected')
done = False
torch.manual_seed(seed)
env.seed(seed)
np.random.seed(seed)
env.action_space.seed(seed)
random.seed(seed)
state = env.reset() # Reset environment and record the starting state
while not done:
action = agent.get_action(state)
# Step through environment using chosen action
state, reward, done, _ = env.step(action)
# env.render()
# Save reward
agent.save_reward(reward)
if done:
break
reward_sum = np.sum(agent.replay_buffer.rewards_list)
rewards_list, advantage_list, deeper_advantage_list = discount_reward(agent.replay_buffer.rewards_list,
agent.replay_buffer.value_list,
agent.replay_buffer.deeper_value_list)
agent.replay_buffer.rewards_list = rewards_list
agent.replay_buffer.advantage_list = advantage_list
agent.replay_buffer.deeper_advantage_list = deeper_advantage_list
to_return = [reward_sum, copy.deepcopy(agent.replay_buffer.__getstate__())]
if q is not None:
try:
q.put(to_return)
except RuntimeError as e:
print(e)
return to_return
return to_return
def main(episodes, agent, ENV_NAME):
running_reward_array = []
for episode in range(episodes):
reward = 0
returned_object = run_episode(None, agent_in=agent, ENV_NAME=ENV_NAME)
reward += returned_object[0]
running_reward_array.append(returned_object[0])
agent.replay_buffer.extend(returned_object[1])
if reward >= 499:
agent.save('../models/'+str(episode)+'th')
agent.end_episode(reward)
running_reward = sum(running_reward_array[-100:]) / float(min(100.0, len(running_reward_array)))
if episode % 50 == 0:
print(f'Episode {episode} Last Reward: {reward} Average Reward: {running_reward}')
if episode % 500 == 0:
agent.save('../models/'+str(episode)+'th')
return running_reward_array
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--agent_type", help="architecture of agent to run", type=str, default='ddt')
parser.add_argument("-e", "--episodes", help="how many episodes", type=int, default=2000)
parser.add_argument("-l", "--num_leaves", help="number of leaves for DDT/DRL ", type=int, default=8)
parser.add_argument("-n", "--num_hidden", help="number of hidden layers for MLP ", type=int, default=0)
parser.add_argument("-env", "--env_type", help="environment to run on", type=str, default='cart')
parser.add_argument("-gpu", help="run on GPU?", action='store_true')
args = parser.parse_args()
AGENT_TYPE = args.agent_type # 'ddt', 'mlp'
NUM_EPS = args.episodes # num episodes Default 1000
ENV_TYPE = args.env_type # 'cart' or 'lunar' Default 'cart'
USE_GPU = args.gpu # Applies for 'prolo' only. use gpu? Default false
if ENV_TYPE == 'lunar':
init_env = gym.make('LunarLander-v2')
dim_in = init_env.observation_space.shape[0]
dim_out = init_env.action_space.n
elif ENV_TYPE == 'cart':
init_env = gym.make('CartPole-v1')
dim_in = init_env.observation_space.shape[0]
dim_out = init_env.action_space.n
else:
raise Exception('No valid environment selected')
print(f"Agent {AGENT_TYPE} on {ENV_TYPE} ")
# mp.set_start_method('spawn')
mp.set_sharing_strategy('file_system')
for i in range(5):
bot_name = AGENT_TYPE + ENV_TYPE
if USE_GPU:
bot_name += 'GPU'
if AGENT_TYPE == 'ddt':
policy_agent = DDTAgent(bot_name=bot_name,
input_dim=dim_in,
output_dim=dim_out,
rule_list=False,
num_rules=args.num_leaves)
elif AGENT_TYPE == 'mlp':
policy_agent = MLPAgent(input_dim=dim_in,
bot_name=bot_name,
output_dim=dim_out,
num_hidden=args.num_hidden)
else:
raise Exception('No valid network selected')
reward_array = main(NUM_EPS, policy_agent, ENV_TYPE)
|
[
"torch.manual_seed",
"argparse.ArgumentParser",
"interpretable_ddts.agents.mlp_agent.MLPAgent",
"interpretable_ddts.opt_helpers.replay_buffer.discount_reward",
"random.seed",
"numpy.sum",
"numpy.random.seed",
"interpretable_ddts.agents.ddt_agent.DDTAgent",
"torch.multiprocessing.set_sharing_strategy",
"gym.make"
] |
[((645, 668), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (662, 668), False, 'import torch\n'), ((692, 712), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (706, 712), True, 'import numpy as np\n'), ((749, 766), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (760, 766), False, 'import random\n'), ((1140, 1180), 'numpy.sum', 'np.sum', (['agent.replay_buffer.rewards_list'], {}), '(agent.replay_buffer.rewards_list)\n', (1146, 1180), True, 'import numpy as np\n'), ((1239, 1364), 'interpretable_ddts.opt_helpers.replay_buffer.discount_reward', 'discount_reward', (['agent.replay_buffer.rewards_list', 'agent.replay_buffer.value_list', 'agent.replay_buffer.deeper_value_list'], {}), '(agent.replay_buffer.rewards_list, agent.replay_buffer.\n value_list, agent.replay_buffer.deeper_value_list)\n', (1254, 1364), False, 'from interpretable_ddts.opt_helpers.replay_buffer import discount_reward\n'), ((2796, 2821), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2819, 2821), False, 'import argparse\n'), ((4181, 4219), 'torch.multiprocessing.set_sharing_strategy', 'mp.set_sharing_strategy', (['"""file_system"""'], {}), "('file_system')\n", (4204, 4219), True, 'import torch.multiprocessing as mp\n'), ((463, 489), 'gym.make', 'gym.make', (['"""LunarLander-v2"""'], {}), "('LunarLander-v2')\n", (471, 489), False, 'import gym\n'), ((3737, 3763), 'gym.make', 'gym.make', (['"""LunarLander-v2"""'], {}), "('LunarLander-v2')\n", (3745, 3763), False, 'import gym\n'), ((533, 556), 'gym.make', 'gym.make', (['"""CartPole-v1"""'], {}), "('CartPole-v1')\n", (541, 556), False, 'import gym\n'), ((3907, 3930), 'gym.make', 'gym.make', (['"""CartPole-v1"""'], {}), "('CartPole-v1')\n", (3915, 3930), False, 'import gym\n'), ((4393, 4507), 'interpretable_ddts.agents.ddt_agent.DDTAgent', 'DDTAgent', ([], {'bot_name': 'bot_name', 'input_dim': 'dim_in', 'output_dim': 'dim_out', 'rule_list': '(False)', 'num_rules': 'args.num_leaves'}), '(bot_name=bot_name, input_dim=dim_in, output_dim=dim_out, rule_list\n =False, num_rules=args.num_leaves)\n', (4401, 4507), False, 'from interpretable_ddts.agents.ddt_agent import DDTAgent\n'), ((4708, 4805), 'interpretable_ddts.agents.mlp_agent.MLPAgent', 'MLPAgent', ([], {'input_dim': 'dim_in', 'bot_name': 'bot_name', 'output_dim': 'dim_out', 'num_hidden': 'args.num_hidden'}), '(input_dim=dim_in, bot_name=bot_name, output_dim=dim_out,\n num_hidden=args.num_hidden)\n', (4716, 4805), False, 'from interpretable_ddts.agents.mlp_agent import MLPAgent\n')]
|
"""
YTArray class.
"""
from __future__ import print_function
#-----------------------------------------------------------------------------
# Copyright (c) 2013, yt Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import copy
import numpy as np
from distutils.version import LooseVersion
from functools import wraps
from numpy import \
add, subtract, multiply, divide, logaddexp, logaddexp2, true_divide, \
floor_divide, negative, power, remainder, mod, absolute, rint, \
sign, conj, exp, exp2, log, log2, log10, expm1, log1p, sqrt, square, \
reciprocal, sin, cos, tan, arcsin, arccos, arctan, arctan2, \
hypot, sinh, cosh, tanh, arcsinh, arccosh, arctanh, deg2rad, rad2deg, \
bitwise_and, bitwise_or, bitwise_xor, invert, left_shift, right_shift, \
greater, greater_equal, less, less_equal, not_equal, equal, logical_and, \
logical_or, logical_xor, logical_not, maximum, minimum, fmax, fmin, \
isreal, iscomplex, isfinite, isinf, isnan, signbit, copysign, nextafter, \
modf, ldexp, frexp, fmod, floor, ceil, trunc, fabs, spacing
try:
# numpy 1.13 or newer
from numpy import positive, divmod as divmod_, isnat, heaviside
except ImportError:
positive, divmod_, isnat, heaviside = (None,)*4
from yt.units.unit_object import Unit, UnitParseError
from yt.units.unit_registry import UnitRegistry
from yt.units.dimensions import \
angle, \
current_mks, \
dimensionless, \
em_dimensions
from yt.utilities.exceptions import \
YTUnitOperationError, YTUnitConversionError, \
YTUfuncUnitError, YTIterableUnitCoercionError, \
YTInvalidUnitEquivalence, YTEquivalentDimsError
from yt.utilities.lru_cache import lru_cache
from numbers import Number as numeric_type
from yt.utilities.on_demand_imports import _astropy
from sympy import Rational
from yt.units.unit_lookup_table import \
default_unit_symbol_lut
from yt.units.equivalencies import equivalence_registry
from yt.utilities.logger import ytLogger as mylog
from .pint_conversions import convert_pint_units
NULL_UNIT = Unit()
POWER_SIGN_MAPPING = {multiply: 1, divide: -1}
# redefine this here to avoid a circular import from yt.funcs
def iterable(obj):
try: len(obj)
except: return False
return True
def return_arr(func):
@wraps(func)
def wrapped(*args, **kwargs):
ret, units = func(*args, **kwargs)
if ret.shape == ():
return YTQuantity(ret, units)
else:
# This could be a subclass, so don't call YTArray directly.
return type(args[0])(ret, units)
return wrapped
@lru_cache(maxsize=128, typed=False)
def sqrt_unit(unit):
return unit**0.5
@lru_cache(maxsize=128, typed=False)
def multiply_units(unit1, unit2):
return unit1 * unit2
def preserve_units(unit1, unit2=None):
return unit1
@lru_cache(maxsize=128, typed=False)
def power_unit(unit, power):
return unit**power
@lru_cache(maxsize=128, typed=False)
def square_unit(unit):
return unit*unit
@lru_cache(maxsize=128, typed=False)
def divide_units(unit1, unit2):
return unit1/unit2
@lru_cache(maxsize=128, typed=False)
def reciprocal_unit(unit):
return unit**-1
def passthrough_unit(unit, unit2=None):
return unit
def return_without_unit(unit, unit2=None):
return None
def arctan2_unit(unit1, unit2):
return NULL_UNIT
def comparison_unit(unit1, unit2=None):
return None
def invert_units(unit):
raise TypeError(
"Bit-twiddling operators are not defined for YTArray instances")
def bitop_units(unit1, unit2):
raise TypeError(
"Bit-twiddling operators are not defined for YTArray instances")
def get_inp_u_unary(ufunc, inputs, out_arr=None):
inp = inputs[0]
u = getattr(inp, 'units', None)
if u is None:
u = NULL_UNIT
if u.dimensions is angle and ufunc in trigonometric_operators:
inp = inp.in_units('radian').v
if out_arr is not None:
out_arr = ufunc(inp).view(np.ndarray)
return out_arr, inp, u
def get_inp_u_binary(ufunc, inputs):
inp1 = coerce_iterable_units(inputs[0])
inp2 = coerce_iterable_units(inputs[1])
unit1 = getattr(inp1, 'units', None)
unit2 = getattr(inp2, 'units', None)
ret_class = get_binary_op_return_class(type(inp1), type(inp2))
if unit1 is None:
unit1 = Unit(registry=getattr(unit2, 'registry', None))
if unit2 is None and ufunc is not power:
unit2 = Unit(registry=getattr(unit1, 'registry', None))
elif ufunc is power:
unit2 = inp2
if isinstance(unit2, np.ndarray):
if isinstance(unit2, YTArray):
if unit2.units.is_dimensionless:
pass
else:
raise YTUnitOperationError(ufunc, unit1, unit2)
unit2 = 1.0
return (inp1, inp2), (unit1, unit2), ret_class
def handle_preserve_units(inps, units, ufunc, ret_class):
if units[0] != units[1]:
any_nonzero = [np.any(inps[0]), np.any(inps[1])]
if any_nonzero[0] == np.bool_(False):
units = (units[1], units[1])
elif any_nonzero[1] == np.bool_(False):
units = (units[0], units[0])
else:
if not units[0].same_dimensions_as(units[1]):
raise YTUnitOperationError(ufunc, *units)
inps = (inps[0], ret_class(inps[1]).to(
ret_class(inps[0]).units))
return inps, units
def handle_comparison_units(inps, units, ufunc, ret_class, raise_error=False):
if units[0] != units[1]:
u1d = units[0].is_dimensionless
u2d = units[1].is_dimensionless
any_nonzero = [np.any(inps[0]), np.any(inps[1])]
if any_nonzero[0] == np.bool_(False):
units = (units[1], units[1])
elif any_nonzero[1] == np.bool_(False):
units = (units[0], units[0])
elif not any([u1d, u2d]):
if not units[0].same_dimensions_as(units[1]):
raise YTUnitOperationError(ufunc, *units)
else:
if raise_error:
raise YTUfuncUnitError(ufunc, *units)
inps = (inps[0], ret_class(inps[1]).to(
ret_class(inps[0]).units))
return inps, units
def handle_multiply_divide_units(unit, units, out, out_arr):
if unit.is_dimensionless and unit.base_value != 1.0:
if not units[0].is_dimensionless:
if units[0].dimensions == units[1].dimensions:
out_arr = np.multiply(out_arr.view(np.ndarray),
unit.base_value, out=out)
unit = Unit(registry=unit.registry)
return out, out_arr, unit
def coerce_iterable_units(input_object):
if isinstance(input_object, np.ndarray):
return input_object
if iterable(input_object):
if any([isinstance(o, YTArray) for o in input_object]):
ff = getattr(input_object[0], 'units', NULL_UNIT, )
if any([ff != getattr(_, 'units', NULL_UNIT) for _ in input_object]):
raise YTIterableUnitCoercionError(input_object)
# This will create a copy of the data in the iterable.
return YTArray(input_object)
return input_object
else:
return input_object
def sanitize_units_mul(this_object, other_object):
inp = coerce_iterable_units(this_object)
ret = coerce_iterable_units(other_object)
# If the other object is a YTArray and has the same dimensions as the object
# under consideration, convert so we don't mix units with the same
# dimensions.
if isinstance(ret, YTArray):
if inp.units.same_dimensions_as(ret.units):
ret.in_units(inp.units)
return ret
def sanitize_units_add(this_object, other_object, op_string):
inp = coerce_iterable_units(this_object)
ret = coerce_iterable_units(other_object)
# Make sure the other object is a YTArray before we use the `units`
# attribute.
if isinstance(ret, YTArray):
if not inp.units.same_dimensions_as(ret.units):
# handle special case of adding or subtracting with zero or
# array filled with zero
if not np.any(other_object):
return ret.view(np.ndarray)
elif not np.any(this_object):
return ret
raise YTUnitOperationError(op_string, inp.units, ret.units)
ret = ret.in_units(inp.units)
else:
# If the other object is not a YTArray, then one of the arrays must be
# dimensionless or filled with zeros
if not inp.units.is_dimensionless and np.any(ret):
raise YTUnitOperationError(op_string, inp.units, dimensionless)
return ret
def validate_comparison_units(this, other, op_string):
# Check that other is a YTArray.
if hasattr(other, 'units'):
if this.units.expr is other.units.expr:
if this.units.base_value == other.units.base_value:
return other
if not this.units.same_dimensions_as(other.units):
raise YTUnitOperationError(op_string, this.units, other.units)
return other.in_units(this.units)
return other
@lru_cache(maxsize=128, typed=False)
def _unit_repr_check_same(my_units, other_units):
"""
Takes a Unit object, or string of known unit symbol, and check that it
is compatible with this quantity. Returns Unit object.
"""
# let Unit() handle units arg if it's not already a Unit obj.
if not isinstance(other_units, Unit):
other_units = Unit(other_units, registry=my_units.registry)
equiv_dims = em_dimensions.get(my_units.dimensions, None)
if equiv_dims == other_units.dimensions:
if current_mks in equiv_dims.free_symbols:
base = "SI"
else:
base = "CGS"
raise YTEquivalentDimsError(my_units, other_units, base)
if not my_units.same_dimensions_as(other_units):
raise YTUnitConversionError(
my_units, my_units.dimensions, other_units, other_units.dimensions)
return other_units
unary_operators = (
negative, absolute, rint, sign, conj, exp, exp2, log, log2,
log10, expm1, log1p, sqrt, square, reciprocal, sin, cos, tan, arcsin,
arccos, arctan, sinh, cosh, tanh, arcsinh, arccosh, arctanh, deg2rad,
rad2deg, invert, logical_not, isreal, iscomplex, isfinite, isinf, isnan,
signbit, floor, ceil, trunc, modf, frexp, fabs, spacing, positive, isnat,
)
binary_operators = (
add, subtract, multiply, divide, logaddexp, logaddexp2, true_divide, power,
remainder, mod, arctan2, hypot, bitwise_and, bitwise_or, bitwise_xor,
left_shift, right_shift, greater, greater_equal, less, less_equal,
not_equal, equal, logical_and, logical_or, logical_xor, maximum, minimum,
fmax, fmin, copysign, nextafter, ldexp, fmod, divmod_, heaviside
)
trigonometric_operators = (
sin, cos, tan,
)
class YTArray(np.ndarray):
"""
An ndarray subclass that attaches a symbolic unit object to the array data.
Parameters
----------
input_array : :obj:`!iterable`
A tuple, list, or array to attach units to
input_units : String unit specification, unit symbol object, or astropy units
The units of the array. Powers must be specified using python
syntax (cm**3, not cm^3).
registry : ~yt.units.unit_registry.UnitRegistry
The registry to create units from. If input_units is already associated
with a unit registry and this is specified, this will be used instead of
the registry associated with the unit object.
dtype : data-type
The dtype of the array data. Defaults to the dtype of the input data,
or, if none is found, uses np.float64
bypass_validation : boolean
If True, all input validation is skipped. Using this option may produce
corrupted, invalid units or array data, but can lead to significant
speedups in the input validation logic adds significant overhead. If set,
input_units *must* be a valid unit object. Defaults to False.
Examples
--------
>>> from yt import YTArray
>>> a = YTArray([1, 2, 3], 'cm')
>>> b = YTArray([4, 5, 6], 'm')
>>> a + b
YTArray([ 401., 502., 603.]) cm
>>> b + a
YTArray([ 4.01, 5.02, 6.03]) m
NumPy ufuncs will pass through units where appropriate.
>>> import numpy as np
>>> a = YTArray(np.arange(8) - 4, 'g/cm**3')
>>> np.abs(a)
YTArray([4, 3, 2, 1, 0, 1, 2, 3]) g/cm**3
and strip them when it would be annoying to deal with them.
>>> np.log10(a)
array([ -inf, 0. , 0.30103 , 0.47712125, 0.60205999,
0.69897 , 0.77815125, 0.84509804])
YTArray is tightly integrated with yt datasets:
>>> import yt
>>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
>>> a = ds.arr(np.ones(5), 'code_length')
>>> a.in_cgs()
YTArray([ 3.08600000e+24, 3.08600000e+24, 3.08600000e+24,
3.08600000e+24, 3.08600000e+24]) cm
This is equivalent to:
>>> b = YTArray(np.ones(5), 'code_length', registry=ds.unit_registry)
>>> np.all(a == b)
True
"""
_ufunc_registry = {
add: preserve_units,
subtract: preserve_units,
multiply: multiply_units,
divide: divide_units,
logaddexp: return_without_unit,
logaddexp2: return_without_unit,
true_divide: divide_units,
floor_divide: divide_units,
negative: passthrough_unit,
power: power_unit,
remainder: preserve_units,
mod: preserve_units,
fmod: preserve_units,
absolute: passthrough_unit,
fabs: passthrough_unit,
rint: return_without_unit,
sign: return_without_unit,
conj: passthrough_unit,
exp: return_without_unit,
exp2: return_without_unit,
log: return_without_unit,
log2: return_without_unit,
log10: return_without_unit,
expm1: return_without_unit,
log1p: return_without_unit,
sqrt: sqrt_unit,
square: square_unit,
reciprocal: reciprocal_unit,
sin: return_without_unit,
cos: return_without_unit,
tan: return_without_unit,
sinh: return_without_unit,
cosh: return_without_unit,
tanh: return_without_unit,
arcsin: return_without_unit,
arccos: return_without_unit,
arctan: return_without_unit,
arctan2: arctan2_unit,
arcsinh: return_without_unit,
arccosh: return_without_unit,
arctanh: return_without_unit,
hypot: preserve_units,
deg2rad: return_without_unit,
rad2deg: return_without_unit,
bitwise_and: bitop_units,
bitwise_or: bitop_units,
bitwise_xor: bitop_units,
invert: invert_units,
left_shift: bitop_units,
right_shift: bitop_units,
greater: comparison_unit,
greater_equal: comparison_unit,
less: comparison_unit,
less_equal: comparison_unit,
not_equal: comparison_unit,
equal: comparison_unit,
logical_and: comparison_unit,
logical_or: comparison_unit,
logical_xor: comparison_unit,
logical_not: return_without_unit,
maximum: preserve_units,
minimum: preserve_units,
fmax: preserve_units,
fmin: preserve_units,
isreal: return_without_unit,
iscomplex: return_without_unit,
isfinite: return_without_unit,
isinf: return_without_unit,
isnan: return_without_unit,
signbit: return_without_unit,
copysign: passthrough_unit,
nextafter: preserve_units,
modf: passthrough_unit,
ldexp: bitop_units,
frexp: return_without_unit,
floor: passthrough_unit,
ceil: passthrough_unit,
trunc: passthrough_unit,
spacing: passthrough_unit,
positive: passthrough_unit,
divmod_: passthrough_unit,
isnat: return_without_unit,
heaviside: preserve_units,
}
__array_priority__ = 2.0
def __new__(cls, input_array, input_units=None, registry=None, dtype=None,
bypass_validation=False):
if dtype is None:
dtype = getattr(input_array, 'dtype', np.float64)
if bypass_validation is True:
obj = np.asarray(input_array, dtype=dtype).view(cls)
obj.units = input_units
if registry is not None:
obj.units.registry = registry
return obj
if input_array is NotImplemented:
return input_array.view(cls)
if registry is None and isinstance(input_units, (str, bytes)):
if input_units.startswith('code_'):
raise UnitParseError(
"Code units used without referring to a dataset. \n"
"Perhaps you meant to do something like this instead: \n"
"ds.arr(%s, \"%s\")" % (input_array, input_units)
)
if isinstance(input_array, YTArray):
ret = input_array.view(cls)
if input_units is None:
if registry is None:
ret.units = input_array.units
else:
units = Unit(str(input_array.units), registry=registry)
ret.units = units
elif isinstance(input_units, Unit):
ret.units = input_units
else:
ret.units = Unit(input_units, registry=registry)
return ret
elif isinstance(input_array, np.ndarray):
pass
elif iterable(input_array) and input_array:
if isinstance(input_array[0], YTArray):
return YTArray(np.array(input_array, dtype=dtype),
input_array[0].units, registry=registry)
# Input array is an already formed ndarray instance
# We first cast to be our class type
obj = np.asarray(input_array, dtype=dtype).view(cls)
# Check units type
if input_units is None:
# Nothing provided. Make dimensionless...
units = Unit()
elif isinstance(input_units, Unit):
if registry and registry is not input_units.registry:
units = Unit(str(input_units), registry=registry)
else:
units = input_units
else:
# units kwarg set, but it's not a Unit object.
# don't handle all the cases here, let the Unit class handle if
# it's a str.
units = Unit(input_units, registry=registry)
# Attach the units
obj.units = units
return obj
def __repr__(self):
"""
"""
return super(YTArray, self).__repr__()+' '+self.units.__repr__()
def __str__(self):
"""
"""
return str(self.view(np.ndarray)) + ' ' + str(self.units)
#
# Start unit conversion methods
#
def convert_to_units(self, units):
"""
Convert the array and units to the given units.
Parameters
----------
units : Unit object or str
The units you want to convert to.
"""
new_units = _unit_repr_check_same(self.units, units)
(conversion_factor, offset) = self.units.get_conversion_factor(new_units)
self.units = new_units
values = self.d
values *= conversion_factor
if offset:
np.subtract(self, offset*self.uq, self)
return self
def convert_to_base(self, unit_system="cgs"):
"""
Convert the array and units to the equivalent base units in
the specified unit system.
Parameters
----------
unit_system : string, optional
The unit system to be used in the conversion. If not specified,
the default base units of cgs are used.
Examples
--------
>>> E = YTQuantity(2.5, "erg/s")
>>> E.convert_to_base(unit_system="galactic")
"""
return self.convert_to_units(self.units.get_base_equivalent(unit_system))
def convert_to_cgs(self):
"""
Convert the array and units to the equivalent cgs units.
"""
return self.convert_to_units(self.units.get_cgs_equivalent())
def convert_to_mks(self):
"""
Convert the array and units to the equivalent mks units.
"""
return self.convert_to_units(self.units.get_mks_equivalent())
def in_units(self, units, equivalence=None, **kwargs):
"""
Creates a copy of this array with the data in the supplied
units, and returns it.
Optionally, an equivalence can be specified to convert to an
equivalent quantity which is not in the same dimensions.
.. note::
All additional keyword arguments are passed to the
equivalency, which should be used if that particular
equivalency requires them.
Parameters
----------
units : Unit object or string
The units you want to get a new quantity in.
equivalence : string, optional
The equivalence you wish to use. To see which
equivalencies are supported for this unitful
quantity, try the :meth:`list_equivalencies`
method. Default: None
Returns
-------
YTArray
"""
if equivalence is None:
new_units = _unit_repr_check_same(self.units, units)
(conversion_factor, offset) = self.units.get_conversion_factor(new_units)
new_array = type(self)(self.ndview * conversion_factor, new_units)
if offset:
np.subtract(new_array, offset*new_array.uq, new_array)
return new_array
else:
return self.to_equivalent(units, equivalence, **kwargs)
def to(self, units, equivalence=None, **kwargs):
"""
An alias for YTArray.in_units().
See the docstrings of that function for details.
"""
return self.in_units(units, equivalence=equivalence, **kwargs)
def to_value(self, units=None, equivalence=None, **kwargs):
"""
Creates a copy of this array with the data in the supplied
units, and returns it without units. Output is therefore a
bare NumPy array.
Optionally, an equivalence can be specified to convert to an
equivalent quantity which is not in the same dimensions.
.. note::
All additional keyword arguments are passed to the
equivalency, which should be used if that particular
equivalency requires them.
Parameters
----------
units : Unit object or string, optional
The units you want to get the bare quantity in. If not
specified, the value will be returned in the current units.
equivalence : string, optional
The equivalence you wish to use. To see which
equivalencies are supported for this unitful
quantity, try the :meth:`list_equivalencies`
method. Default: None
Returns
-------
NumPy array
"""
if units is None:
v = self.value
else:
v = self.in_units(units, equivalence=equivalence, **kwargs).value
if isinstance(self, YTQuantity):
return float(v)
else:
return v
def in_base(self, unit_system="cgs"):
"""
Creates a copy of this array with the data in the specified unit system,
and returns it in that system's base units.
Parameters
----------
unit_system : string, optional
The unit system to be used in the conversion. If not specified,
the default base units of cgs are used.
Examples
--------
>>> E = YTQuantity(2.5, "erg/s")
>>> E_new = E.in_base(unit_system="galactic")
"""
return self.in_units(self.units.get_base_equivalent(unit_system))
def in_cgs(self):
"""
Creates a copy of this array with the data in the equivalent cgs units,
and returns it.
Returns
-------
Quantity object with data converted to cgs units.
"""
return self.in_units(self.units.get_cgs_equivalent())
def in_mks(self):
"""
Creates a copy of this array with the data in the equivalent mks units,
and returns it.
Returns
-------
Quantity object with data converted to mks units.
"""
return self.in_units(self.units.get_mks_equivalent())
def to_equivalent(self, unit, equiv, **kwargs):
"""
Convert a YTArray or YTQuantity to an equivalent, e.g., something that is
related by only a constant factor but not in the same units.
Parameters
----------
unit : string
The unit that you wish to convert to.
equiv : string
The equivalence you wish to use. To see which equivalencies are
supported for this unitful quantity, try the
:meth:`list_equivalencies` method.
Examples
--------
>>> a = yt.YTArray(1.0e7,"K")
>>> a.to_equivalent("keV", "thermal")
"""
conv_unit = Unit(unit, registry=self.units.registry)
if self.units.same_dimensions_as(conv_unit):
return self.in_units(conv_unit)
this_equiv = equivalence_registry[equiv]()
oneway_or_equivalent = (
conv_unit.has_equivalent(equiv) or this_equiv._one_way)
if self.has_equivalent(equiv) and oneway_or_equivalent:
new_arr = this_equiv.convert(
self, conv_unit.dimensions, **kwargs)
if isinstance(new_arr, tuple):
try:
return type(self)(new_arr[0], new_arr[1]).in_units(unit)
except YTUnitConversionError:
raise YTInvalidUnitEquivalence(equiv, self.units, unit)
else:
return new_arr.in_units(unit)
else:
raise YTInvalidUnitEquivalence(equiv, self.units, unit)
def list_equivalencies(self):
"""
Lists the possible equivalencies associated with this YTArray or
YTQuantity.
"""
self.units.list_equivalencies()
def has_equivalent(self, equiv):
"""
Check to see if this YTArray or YTQuantity has an equivalent unit in
*equiv*.
"""
return self.units.has_equivalent(equiv)
def ndarray_view(self):
"""
Returns a view into the array, but as an ndarray rather than ytarray.
Returns
-------
View of this array's data.
"""
return self.view(np.ndarray)
def to_ndarray(self):
"""
Creates a copy of this array with the unit information stripped
"""
return np.array(self)
@classmethod
def from_astropy(cls, arr, unit_registry=None):
"""
Convert an AstroPy "Quantity" to a YTArray or YTQuantity.
Parameters
----------
arr : AstroPy Quantity
The Quantity to convert from.
unit_registry : yt UnitRegistry, optional
A yt unit registry to use in the conversion. If one is not
supplied, the default one will be used.
"""
# Converting from AstroPy Quantity
u = arr.unit
ap_units = []
for base, exponent in zip(u.bases, u.powers):
unit_str = base.to_string()
# we have to do this because AstroPy is silly and defines
# hour as "h"
if unit_str == "h": unit_str = "hr"
ap_units.append("%s**(%s)" % (unit_str, Rational(exponent)))
ap_units = "*".join(ap_units)
if isinstance(arr.value, np.ndarray):
return YTArray(arr.value, ap_units, registry=unit_registry)
else:
return YTQuantity(arr.value, ap_units, registry=unit_registry)
def to_astropy(self, **kwargs):
"""
Creates a new AstroPy quantity with the same unit information.
"""
if _astropy.units is None:
raise ImportError("You don't have AstroPy installed, so you can't convert to " +
"an AstroPy quantity.")
return self.value*_astropy.units.Unit(str(self.units), **kwargs)
@classmethod
def from_pint(cls, arr, unit_registry=None):
"""
Convert a Pint "Quantity" to a YTArray or YTQuantity.
Parameters
----------
arr : Pint Quantity
The Quantity to convert from.
unit_registry : yt UnitRegistry, optional
A yt unit registry to use in the conversion. If one is not
supplied, the default one will be used.
Examples
--------
>>> from pint import UnitRegistry
>>> import numpy as np
>>> ureg = UnitRegistry()
>>> a = np.random.random(10)
>>> b = ureg.Quantity(a, "erg/cm**3")
>>> c = yt.YTArray.from_pint(b)
"""
p_units = []
for base, exponent in arr._units.items():
bs = convert_pint_units(base)
p_units.append("%s**(%s)" % (bs, Rational(exponent)))
p_units = "*".join(p_units)
if isinstance(arr.magnitude, np.ndarray):
return YTArray(arr.magnitude, p_units, registry=unit_registry)
else:
return YTQuantity(arr.magnitude, p_units, registry=unit_registry)
def to_pint(self, unit_registry=None):
"""
Convert a YTArray or YTQuantity to a Pint Quantity.
Parameters
----------
arr : YTArray or YTQuantity
The unitful quantity to convert from.
unit_registry : Pint UnitRegistry, optional
The Pint UnitRegistry to use in the conversion. If one is not
supplied, the default one will be used. NOTE: This is not
the same as a yt UnitRegistry object.
Examples
--------
>>> a = YTQuantity(4.0, "cm**2/s")
>>> b = a.to_pint()
"""
from pint import UnitRegistry
if unit_registry is None:
unit_registry = UnitRegistry()
powers_dict = self.units.expr.as_powers_dict()
units = []
for unit, pow in powers_dict.items():
# we have to do this because Pint doesn't recognize
# "yr" as "year"
if str(unit).endswith("yr") and len(str(unit)) in [2,3]:
unit = str(unit).replace("yr","year")
units.append("%s**(%s)" % (unit, Rational(pow)))
units = "*".join(units)
return unit_registry.Quantity(self.value, units)
#
# End unit conversion methods
#
def write_hdf5(self, filename, dataset_name=None, info=None, group_name=None):
r"""Writes a YTArray to hdf5 file.
Parameters
----------
filename: string
The filename to create and write a dataset to
dataset_name: string
The name of the dataset to create in the file.
info: dictionary
A dictionary of supplementary info to write to append as attributes
to the dataset.
group_name: string
An optional group to write the arrays to. If not specified, the arrays
are datasets at the top level by default.
Examples
--------
>>> a = YTArray([1,2,3], 'cm')
>>> myinfo = {'field':'dinosaurs', 'type':'field_data'}
>>> a.write_hdf5('test_array_data.h5', dataset_name='dinosaurs',
... info=myinfo)
"""
from yt.utilities.on_demand_imports import _h5py as h5py
from yt.extern.six.moves import cPickle as pickle
if info is None:
info = {}
info['units'] = str(self.units)
info['unit_registry'] = np.void(pickle.dumps(self.units.registry.lut))
if dataset_name is None:
dataset_name = 'array_data'
f = h5py.File(filename)
if group_name is not None:
if group_name in f:
g = f[group_name]
else:
g = f.create_group(group_name)
else:
g = f
if dataset_name in g.keys():
d = g[dataset_name]
# Overwrite without deleting if we can get away with it.
if d.shape == self.shape and d.dtype == self.dtype:
d[...] = self
for k in d.attrs.keys():
del d.attrs[k]
else:
del f[dataset_name]
d = g.create_dataset(dataset_name, data=self)
else:
d = g.create_dataset(dataset_name, data=self)
for k, v in info.items():
d.attrs[k] = v
f.close()
@classmethod
def from_hdf5(cls, filename, dataset_name=None, group_name=None):
r"""Attempts read in and convert a dataset in an hdf5 file into a
YTArray.
Parameters
----------
filename: string
The filename to of the hdf5 file.
dataset_name: string
The name of the dataset to read from. If the dataset has a units
attribute, attempt to infer units as well.
group_name: string
An optional group to read the arrays from. If not specified, the
arrays are datasets at the top level by default.
"""
import h5py
from yt.extern.six.moves import cPickle as pickle
if dataset_name is None:
dataset_name = 'array_data'
f = h5py.File(filename)
if group_name is not None:
g = f[group_name]
else:
g = f
dataset = g[dataset_name]
data = dataset[:]
units = dataset.attrs.get('units', '')
if 'unit_registry' in dataset.attrs.keys():
unit_lut = pickle.loads(dataset.attrs['unit_registry'].tostring())
else:
unit_lut = None
f.close()
registry = UnitRegistry(lut=unit_lut, add_default_symbols=False)
return cls(data, units, registry=registry)
#
# Start convenience methods
#
@property
def value(self):
"""Get a copy of the array data as a numpy ndarray"""
return np.array(self)
v = value
@property
def ndview(self):
"""Get a view of the array data."""
return self.ndarray_view()
d = ndview
@property
def unit_quantity(self):
"""Get a YTQuantity with the same unit as this array and a value of
1.0"""
return YTQuantity(1.0, self.units)
uq = unit_quantity
@property
def unit_array(self):
"""Get a YTArray filled with ones with the same unit and shape as this
array"""
return np.ones_like(self)
ua = unit_array
def __getitem__(self, item):
ret = super(YTArray, self).__getitem__(item)
if ret.shape == ():
return YTQuantity(ret, self.units, bypass_validation=True)
else:
if hasattr(self, 'units'):
ret.units = self.units
return ret
#
# Start operation methods
#
if LooseVersion(np.__version__) < LooseVersion('1.13.0'):
def __add__(self, right_object):
"""
Add this ytarray to the object on the right of the `+` operator.
Must check for the correct (same dimension) units.
"""
ro = sanitize_units_add(self, right_object, "addition")
return super(YTArray, self).__add__(ro)
def __radd__(self, left_object):
""" See __add__. """
lo = sanitize_units_add(self, left_object, "addition")
return super(YTArray, self).__radd__(lo)
def __iadd__(self, other):
""" See __add__. """
oth = sanitize_units_add(self, other, "addition")
np.add(self, oth, out=self)
return self
def __sub__(self, right_object):
"""
Subtract the object on the right of the `-` from this ytarray. Must
check for the correct (same dimension) units.
"""
ro = sanitize_units_add(self, right_object, "subtraction")
return super(YTArray, self).__sub__(ro)
def __rsub__(self, left_object):
""" See __sub__. """
lo = sanitize_units_add(self, left_object, "subtraction")
return super(YTArray, self).__rsub__(lo)
def __isub__(self, other):
""" See __sub__. """
oth = sanitize_units_add(self, other, "subtraction")
np.subtract(self, oth, out=self)
return self
def __neg__(self):
""" Negate the data. """
return super(YTArray, self).__neg__()
def __mul__(self, right_object):
"""
Multiply this YTArray by the object on the right of the `*`
operator. The unit objects handle being multiplied.
"""
ro = sanitize_units_mul(self, right_object)
return super(YTArray, self).__mul__(ro)
def __rmul__(self, left_object):
""" See __mul__. """
lo = sanitize_units_mul(self, left_object)
return super(YTArray, self).__rmul__(lo)
def __imul__(self, other):
""" See __mul__. """
oth = sanitize_units_mul(self, other)
np.multiply(self, oth, out=self)
return self
def __div__(self, right_object):
"""
Divide this YTArray by the object on the right of the `/` operator.
"""
ro = sanitize_units_mul(self, right_object)
return super(YTArray, self).__div__(ro)
def __rdiv__(self, left_object):
""" See __div__. """
lo = sanitize_units_mul(self, left_object)
return super(YTArray, self).__rdiv__(lo)
def __idiv__(self, other):
""" See __div__. """
oth = sanitize_units_mul(self, other)
np.divide(self, oth, out=self)
return self
def __truediv__(self, right_object):
ro = sanitize_units_mul(self, right_object)
return super(YTArray, self).__truediv__(ro)
def __rtruediv__(self, left_object):
""" See __div__. """
lo = sanitize_units_mul(self, left_object)
return super(YTArray, self).__rtruediv__(lo)
def __itruediv__(self, other):
""" See __div__. """
oth = sanitize_units_mul(self, other)
np.true_divide(self, oth, out=self)
return self
def __floordiv__(self, right_object):
ro = sanitize_units_mul(self, right_object)
return super(YTArray, self).__floordiv__(ro)
def __rfloordiv__(self, left_object):
""" See __div__. """
lo = sanitize_units_mul(self, left_object)
return super(YTArray, self).__rfloordiv__(lo)
def __ifloordiv__(self, other):
""" See __div__. """
oth = sanitize_units_mul(self, other)
np.floor_divide(self, oth, out=self)
return self
def __or__(self, right_object):
return super(YTArray, self).__or__(right_object)
def __ror__(self, left_object):
return super(YTArray, self).__ror__(left_object)
def __ior__(self, other):
np.bitwise_or(self, other, out=self)
return self
def __xor__(self, right_object):
return super(YTArray, self).__xor__(right_object)
def __rxor__(self, left_object):
return super(YTArray, self).__rxor__(left_object)
def __ixor__(self, other):
np.bitwise_xor(self, other, out=self)
return self
def __and__(self, right_object):
return super(YTArray, self).__and__(right_object)
def __rand__(self, left_object):
return super(YTArray, self).__rand__(left_object)
def __iand__(self, other):
np.bitwise_and(self, other, out=self)
return self
def __pow__(self, power):
"""
Raise this YTArray to some power.
Parameters
----------
power : float or dimensionless YTArray.
The pow value.
"""
if isinstance(power, YTArray):
if not power.units.is_dimensionless:
raise YTUnitOperationError('power', power.unit)
# Work around a sympy issue (I think?)
#
# If I don't do this, super(YTArray, self).__pow__ returns a YTArray
# with a unit attribute set to the sympy expression 1/1 rather than
# a dimensionless Unit object.
if self.units.is_dimensionless and power == -1:
ret = super(YTArray, self).__pow__(power)
return type(self)(ret, input_units='')
return super(YTArray, self).__pow__(power)
def __abs__(self):
""" Return a YTArray with the abs of the data. """
return super(YTArray, self).__abs__()
#
# Start comparison operators.
#
def __lt__(self, other):
""" Test if this is less than the object on the right. """
# converts if possible
oth = validate_comparison_units(self, other, 'less_than')
return super(YTArray, self).__lt__(oth)
def __le__(self, other):
"""Test if this is less than or equal to the object on the right.
"""
oth = validate_comparison_units(self, other, 'less_than or equal')
return super(YTArray, self).__le__(oth)
def __eq__(self, other):
""" Test if this is equal to the object on the right. """
# Check that other is a YTArray.
if other is None:
# self is a YTArray, so it can't be None.
return False
oth = validate_comparison_units(self, other, 'equal')
return super(YTArray, self).__eq__(oth)
def __ne__(self, other):
""" Test if this is not equal to the object on the right. """
# Check that the other is a YTArray.
if other is None:
return True
oth = validate_comparison_units(self, other, 'not equal')
return super(YTArray, self).__ne__(oth)
def __ge__(self, other):
""" Test if this is greater than or equal to other. """
# Check that the other is a YTArray.
oth = validate_comparison_units(
self, other, 'greater than or equal')
return super(YTArray, self).__ge__(oth)
def __gt__(self, other):
""" Test if this is greater than the object on the right. """
# Check that the other is a YTArray.
oth = validate_comparison_units(self, other, 'greater than')
return super(YTArray, self).__gt__(oth)
#
# End comparison operators
#
#
# Begin reduction operators
#
@return_arr
def prod(self, axis=None, dtype=None, out=None):
if axis is not None:
units = self.units**self.shape[axis]
else:
units = self.units**self.size
return super(YTArray, self).prod(axis, dtype, out), units
@return_arr
def mean(self, axis=None, dtype=None, out=None):
return super(YTArray, self).mean(axis, dtype, out), self.units
@return_arr
def sum(self, axis=None, dtype=None, out=None):
return super(YTArray, self).sum(axis, dtype, out), self.units
@return_arr
def std(self, axis=None, dtype=None, out=None, ddof=0):
return super(YTArray, self).std(axis, dtype, out, ddof), self.units
def __array_wrap__(self, out_arr, context=None):
ret = super(YTArray, self).__array_wrap__(out_arr, context)
if isinstance(ret, YTQuantity) and ret.shape != ():
ret = ret.view(YTArray)
if context is None:
if ret.shape == ():
return ret[()]
else:
return ret
ufunc = context[0]
inputs = context[1]
if ufunc in unary_operators:
out_arr, inp, u = get_inp_u_unary(ufunc, inputs, out_arr)
unit = self._ufunc_registry[context[0]](u)
ret_class = type(self)
elif ufunc in binary_operators:
unit_operator = self._ufunc_registry[context[0]]
inps, units, ret_class = get_inp_u_binary(ufunc, inputs)
if unit_operator in (preserve_units, comparison_unit,
arctan2_unit):
inps, units = handle_comparison_units(
inps, units, ufunc, ret_class, raise_error=True)
unit = unit_operator(*units)
if unit_operator in (multiply_units, divide_units):
out_arr, out_arr, unit = handle_multiply_divide_units(
unit, units, out_arr, out_arr)
else:
raise RuntimeError(
"Support for the %s ufunc has not been added "
"to YTArray." % str(context[0]))
if unit is None:
out_arr = np.array(out_arr, copy=False)
return out_arr
out_arr.units = unit
if out_arr.size == 1:
return YTQuantity(np.array(out_arr), unit)
else:
if ret_class is YTQuantity:
# This happens if you do ndarray * YTQuantity. Explicitly
# casting to YTArray avoids creating a YTQuantity with
# size > 1
return YTArray(np.array(out_arr), unit)
return ret_class(np.array(out_arr, copy=False), unit)
else: # numpy version equal to or newer than 1.13
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
func = getattr(ufunc, method)
if 'out' in kwargs:
out_orig = kwargs.pop('out')
out = np.asarray(out_orig[0])
else:
out = None
if len(inputs) == 1:
_, inp, u = get_inp_u_unary(ufunc, inputs)
out_arr = func(np.asarray(inp), out=out, **kwargs)
if ufunc in (multiply, divide) and method == 'reduce':
power_sign = POWER_SIGN_MAPPING[ufunc]
if 'axis' in kwargs and kwargs['axis'] is not None:
unit = u**(power_sign*inp.shape[kwargs['axis']])
else:
unit = u**(power_sign*inp.size)
else:
unit = self._ufunc_registry[ufunc](u)
ret_class = type(self)
elif len(inputs) == 2:
unit_operator = self._ufunc_registry[ufunc]
inps, units, ret_class = get_inp_u_binary(ufunc, inputs)
if unit_operator in (comparison_unit, arctan2_unit):
inps, units = handle_comparison_units(
inps, units, ufunc, ret_class)
elif unit_operator is preserve_units:
inps, units = handle_preserve_units(
inps, units, ufunc, ret_class)
unit = unit_operator(*units)
out_arr = func(np.asarray(inps[0]), np.asarray(inps[1]),
out=out, **kwargs)
if unit_operator in (multiply_units, divide_units):
out, out_arr, unit = handle_multiply_divide_units(
unit, units, out, out_arr)
else:
raise RuntimeError(
"Support for the %s ufunc with %i inputs has not been"
"added to YTArray." % (str(ufunc), len(inputs)))
if unit is None:
out_arr = np.array(out_arr, copy=False)
elif ufunc in (modf, divmod_):
out_arr = tuple((ret_class(o, unit) for o in out_arr))
elif out_arr.size == 1:
out_arr = YTQuantity(np.asarray(out_arr), unit)
else:
if ret_class is YTQuantity:
# This happens if you do ndarray * YTQuantity. Explicitly
# casting to YTArray avoids creating a YTQuantity with
# size > 1
out_arr = YTArray(np.asarray(out_arr), unit)
else:
out_arr = ret_class(np.asarray(out_arr), unit)
if out is not None:
out_orig[0].flat[:] = out.flat[:]
if isinstance(out_orig[0], YTArray):
out_orig[0].units = unit
return out_arr
def copy(self, order='C'):
return type(self)(np.copy(np.asarray(self)), self.units)
def __array_finalize__(self, obj):
if obj is None and hasattr(self, 'units'):
return
self.units = getattr(obj, 'units', NULL_UNIT)
def __pos__(self):
""" Posify the data. """
# this needs to be defined for all numpy versions, see
# numpy issue #9081
return type(self)(super(YTArray, self).__pos__(), self.units)
@return_arr
def dot(self, b, out=None):
return super(YTArray, self).dot(b), self.units*b.units
def __reduce__(self):
"""Pickle reduction method
See the documentation for the standard library pickle module:
http://docs.python.org/2/library/pickle.html
Unit metadata is encoded in the zeroth element of third element of the
returned tuple, itself a tuple used to restore the state of the ndarray.
This is always defined for numpy arrays.
"""
np_ret = super(YTArray, self).__reduce__()
obj_state = np_ret[2]
unit_state = (((str(self.units), self.units.registry.lut),) + obj_state[:],)
new_ret = np_ret[:2] + unit_state + np_ret[3:]
return new_ret
def __setstate__(self, state):
"""Pickle setstate method
This is called inside pickle.read() and restores the unit data from the
metadata extracted in __reduce__ and then serialized by pickle.
"""
super(YTArray, self).__setstate__(state[1:])
try:
unit, lut = state[0]
except TypeError:
# this case happens when we try to load an old pickle file
# created before we serialized the unit symbol lookup table
# into the pickle file
unit, lut = str(state[0]), default_unit_symbol_lut.copy()
# need to fix up the lut if the pickle was saved prior to PR #1728
# when the pickle format changed
if len(lut['m']) == 2:
lut.update(default_unit_symbol_lut)
for k, v in [(k, v) for k, v in lut.items() if len(v) == 2]:
lut[k] = v + (0.0, r'\rm{' + k.replace('_', '\ ') + '}')
registry = UnitRegistry(lut=lut, add_default_symbols=False)
self.units = Unit(unit, registry=registry)
def __deepcopy__(self, memodict=None):
"""copy.deepcopy implementation
This is necessary for stdlib deepcopy of arrays and quantities.
"""
if memodict is None:
memodict = {}
ret = super(YTArray, self).__deepcopy__(memodict)
return type(self)(ret, copy.deepcopy(self.units))
class YTQuantity(YTArray):
"""
A scalar associated with a unit.
Parameters
----------
input_scalar : an integer or floating point scalar
The scalar to attach units to
input_units : String unit specification, unit symbol object, or astropy units
The units of the quantity. Powers must be specified using python syntax
(cm**3, not cm^3).
registry : A UnitRegistry object
The registry to create units from. If input_units is already associated
with a unit registry and this is specified, this will be used instead of
the registry associated with the unit object.
dtype : data-type
The dtype of the array data.
Examples
--------
>>> from yt import YTQuantity
>>> a = YTQuantity(1, 'cm')
>>> b = YTQuantity(2, 'm')
>>> a + b
201.0 cm
>>> b + a
2.01 m
NumPy ufuncs will pass through units where appropriate.
>>> import numpy as np
>>> a = YTQuantity(12, 'g/cm**3')
>>> np.abs(a)
12 g/cm**3
and strip them when it would be annoying to deal with them.
>>> print(np.log10(a))
1.07918124605
YTQuantity is tightly integrated with yt datasets:
>>> import yt
>>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
>>> a = ds.quan(5, 'code_length')
>>> a.in_cgs()
1.543e+25 cm
This is equivalent to:
>>> b = YTQuantity(5, 'code_length', registry=ds.unit_registry)
>>> np.all(a == b)
True
"""
def __new__(cls, input_scalar, input_units=None, registry=None,
dtype=np.float64, bypass_validation=False):
if not isinstance(input_scalar, (numeric_type, np.number, np.ndarray)):
raise RuntimeError("YTQuantity values must be numeric")
ret = YTArray.__new__(cls, input_scalar, input_units, registry,
dtype=dtype, bypass_validation=bypass_validation)
if ret.size > 1:
raise RuntimeError("YTQuantity instances must be scalars")
return ret
def __repr__(self):
return str(self)
def validate_numpy_wrapper_units(v, arrs):
if not any(isinstance(a, YTArray) for a in arrs):
return v
if not all(isinstance(a, YTArray) for a in arrs):
raise RuntimeError("Not all of your arrays are YTArrays.")
a1 = arrs[0]
if not all(a.units == a1.units for a in arrs[1:]):
raise RuntimeError("Your arrays must have identical units.")
v.units = a1.units
return v
def uconcatenate(arrs, axis=0):
"""Concatenate a sequence of arrays.
This wrapper around numpy.concatenate preserves units. All input arrays must
have the same units. See the documentation of numpy.concatenate for full
details.
Examples
--------
>>> A = yt.YTArray([1, 2, 3], 'cm')
>>> B = yt.YTArray([2, 3, 4], 'cm')
>>> uconcatenate((A, B))
YTArray([ 1., 2., 3., 2., 3., 4.]) cm
"""
v = np.concatenate(arrs, axis=axis)
v = validate_numpy_wrapper_units(v, arrs)
return v
def ucross(arr1, arr2, registry=None, axisa=-1, axisb=-1, axisc=-1, axis=None):
"""Applies the cross product to two YT arrays.
This wrapper around numpy.cross preserves units.
See the documentation of numpy.cross for full
details.
"""
v = np.cross(arr1, arr2, axisa=axisa, axisb=axisb, axisc=axisc, axis=axis)
units = arr1.units * arr2.units
arr = YTArray(v, units, registry=registry)
return arr
def uintersect1d(arr1, arr2, assume_unique=False):
"""Find the sorted unique elements of the two input arrays.
A wrapper around numpy.intersect1d that preserves units. All input arrays
must have the same units. See the documentation of numpy.intersect1d for
full details.
Examples
--------
>>> A = yt.YTArray([1, 2, 3], 'cm')
>>> B = yt.YTArray([2, 3, 4], 'cm')
>>> uintersect1d(A, B)
YTArray([ 2., 3.]) cm
"""
v = np.intersect1d(arr1, arr2, assume_unique=assume_unique)
v = validate_numpy_wrapper_units(v, [arr1, arr2])
return v
def uunion1d(arr1, arr2):
"""Find the union of two arrays.
A wrapper around numpy.intersect1d that preserves units. All input arrays
must have the same units. See the documentation of numpy.intersect1d for
full details.
Examples
--------
>>> A = yt.YTArray([1, 2, 3], 'cm')
>>> B = yt.YTArray([2, 3, 4], 'cm')
>>> uunion1d(A, B)
YTArray([ 1., 2., 3., 4.]) cm
"""
v = np.union1d(arr1, arr2)
v = validate_numpy_wrapper_units(v, [arr1, arr2])
return v
def unorm(data, ord=None, axis=None, keepdims=False):
"""Matrix or vector norm that preserves units
This is a wrapper around np.linalg.norm that preserves units. See
the documentation for that function for descriptions of the keyword
arguments.
The keepdims argument is ignored if the version of numpy installed is
older than numpy 1.10.0.
"""
if LooseVersion(np.__version__) < LooseVersion('1.10.0'):
norm = np.linalg.norm(data, ord=ord, axis=axis)
else:
norm = np.linalg.norm(data, ord=ord, axis=axis, keepdims=keepdims)
if norm.shape == ():
return YTQuantity(norm, data.units)
return YTArray(norm, data.units)
def udot(op1, op2):
"""Matrix or vector dot product that preserves units
This is a wrapper around np.dot that preserves units.
"""
dot = np.dot(op1.d, op2.d)
units = op1.units*op2.units
if dot.shape == ():
return YTQuantity(dot, units)
return YTArray(dot, units)
def uvstack(arrs):
"""Stack arrays in sequence vertically (row wise) while preserving units
This is a wrapper around np.vstack that preserves units.
"""
v = np.vstack(arrs)
v = validate_numpy_wrapper_units(v, arrs)
return v
def uhstack(arrs):
"""Stack arrays in sequence horizontally (column wise) while preserving units
This is a wrapper around np.hstack that preserves units.
"""
v = np.hstack(arrs)
v = validate_numpy_wrapper_units(v, arrs)
return v
def ustack(arrs, axis=0):
"""Join a sequence of arrays along a new axis while preserving units
The axis parameter specifies the index of the new axis in the
dimensions of the result. For example, if ``axis=0`` it will be the
first dimension and if ``axis=-1`` it will be the last dimension.
This is a wrapper around np.stack that preserves units.
"""
v = np.stack(arrs)
v = validate_numpy_wrapper_units(v, arrs)
return v
def array_like_field(data, x, field):
field = data._determine_fields(field)[0]
if isinstance(field, tuple):
finfo = data.ds._get_field_info(field[0],field[1])
else:
finfo = data.ds._get_field_info(field)
if finfo.sampling_type == 'particle':
units = finfo.output_units
else:
units = finfo.units
if isinstance(x, YTArray):
arr = copy.deepcopy(x)
arr.convert_to_units(units)
return arr
if isinstance(x, np.ndarray):
return data.ds.arr(x, units)
else:
return data.ds.quan(x, units)
def get_binary_op_return_class(cls1, cls2):
if cls1 is cls2:
return cls1
if cls1 in (np.ndarray, np.matrix, np.ma.masked_array) or issubclass(cls1, (numeric_type, np.number, list, tuple)):
return cls2
if cls2 in (np.ndarray, np.matrix, np.ma.masked_array) or issubclass(cls2, (numeric_type, np.number, list, tuple)):
return cls1
if issubclass(cls1, YTQuantity):
return cls2
if issubclass(cls2, YTQuantity):
return cls1
if issubclass(cls1, cls2):
return cls1
if issubclass(cls2, cls1):
return cls2
else:
raise RuntimeError("Undefined operation for a YTArray subclass. "
"Received operand types (%s) and (%s)" % (cls1, cls2))
def loadtxt(fname, dtype='float', delimiter='\t', usecols=None, comments='#'):
r"""
Load YTArrays with unit information from a text file. Each row in the
text file must have the same number of values.
Parameters
----------
fname : str
Filename to read.
dtype : data-type, optional
Data-type of the resulting array; default: float.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
comments : str, optional
The character used to indicate the start of a comment;
default: '#'.
Examples
--------
>>> temp, velx = yt.loadtxt("sphere.dat", usecols=(1,2), delimiter="\t")
"""
f = open(fname, 'r')
next_one = False
units = []
num_cols = -1
for line in f.readlines():
words = line.strip().split()
if len(words) == 0:
continue
if line[0] == comments:
if next_one:
units = words[1:]
if len(words) == 2 and words[1] == "Units":
next_one = True
else:
# Here we catch the first line of numbers
try:
col_words = line.strip().split(delimiter)
for word in col_words:
float(word)
num_cols = len(col_words)
break
except ValueError:
mylog.warning("Unrecognized character at beginning of line: \"%s\"." % line[0])
f.close()
if len(units) != num_cols:
mylog.warning("Malformed or incomplete units header. Arrays will be "
"dimensionless!")
units = ["dimensionless"]*num_cols
arrays = np.loadtxt(fname, dtype=dtype, comments=comments,
delimiter=delimiter, converters=None,
unpack=True, usecols=usecols, ndmin=0)
if usecols is not None:
units = [units[col] for col in usecols]
mylog.info("Array units: %s" % ", ".join(units))
return tuple([YTArray(arr, unit) for arr, unit in zip(arrays, units)])
def savetxt(fname, arrays, fmt='%.18e', delimiter='\t', header='',
footer='', comments='#'):
r"""
Write YTArrays with unit information to a text file.
Parameters
----------
fname : str
The file to write the YTArrays to.
arrays : list of YTArrays or single YTArray
The array(s) to write to the file.
fmt : str or sequence of strs, optional
A single format (%10.5f), or a sequence of formats.
delimiter : str, optional
String or character separating columns.
header : str, optional
String that will be written at the beginning of the file, before the
unit header.
footer : str, optional
String that will be written at the end of the file.
comments : str, optional
String that will be prepended to the ``header`` and ``footer`` strings,
to mark them as comments. Default: '# ', as expected by e.g.
``yt.loadtxt``.
Examples
--------
>>> sp = ds.sphere("c", (100,"kpc"))
>>> a = sp["density"]
>>> b = sp["temperature"]
>>> c = sp["velocity_x"]
>>> yt.savetxt("sphere.dat", [a,b,c], header='My sphere stuff', delimiter="\t")
"""
if not isinstance(arrays, list):
arrays = [arrays]
units = []
for array in arrays:
if hasattr(array, "units"):
units.append(str(array.units))
else:
units.append("dimensionless")
if header != '':
header += '\n'
header += " Units\n " + '\t'.join(units)
np.savetxt(fname, np.transpose(arrays), header=header,
fmt=fmt, delimiter=delimiter, footer=footer,
newline='\n', comments=comments)
|
[
"numpy.bitwise_or",
"numpy.union1d",
"yt.units.dimensions.em_dimensions.get",
"numpy.hstack",
"yt.units.unit_object.UnitParseError",
"yt.units.unit_lookup_table.default_unit_symbol_lut.copy",
"yt.utilities.exceptions.YTInvalidUnitEquivalence",
"numpy.array",
"numpy.linalg.norm",
"copy.deepcopy",
"pint.UnitRegistry",
"numpy.divide",
"numpy.multiply",
"yt.utilities.exceptions.YTUfuncUnitError",
"numpy.cross",
"yt.utilities.exceptions.YTIterableUnitCoercionError",
"numpy.bitwise_xor",
"numpy.asarray",
"yt.extern.six.moves.cPickle.dumps",
"functools.wraps",
"numpy.subtract",
"numpy.stack",
"numpy.dot",
"numpy.vstack",
"numpy.concatenate",
"yt.utilities.exceptions.YTUnitOperationError",
"numpy.add",
"yt.utilities.lru_cache.lru_cache",
"yt.utilities.logger.ytLogger.warning",
"numpy.any",
"h5py.File",
"yt.units.unit_object.Unit",
"numpy.transpose",
"numpy.intersect1d",
"numpy.ones_like",
"numpy.floor_divide",
"yt.utilities.exceptions.YTEquivalentDimsError",
"numpy.bitwise_and",
"yt.utilities.exceptions.YTUnitConversionError",
"numpy.bool_",
"numpy.true_divide",
"distutils.version.LooseVersion",
"numpy.loadtxt",
"sympy.Rational"
] |
[((2243, 2249), 'yt.units.unit_object.Unit', 'Unit', ([], {}), '()\n', (2247, 2249), False, 'from yt.units.unit_object import Unit, UnitParseError\n'), ((2777, 2812), 'yt.utilities.lru_cache.lru_cache', 'lru_cache', ([], {'maxsize': '(128)', 'typed': '(False)'}), '(maxsize=128, typed=False)\n', (2786, 2812), False, 'from yt.utilities.lru_cache import lru_cache\n'), ((2857, 2892), 'yt.utilities.lru_cache.lru_cache', 'lru_cache', ([], {'maxsize': '(128)', 'typed': '(False)'}), '(maxsize=128, typed=False)\n', (2866, 2892), False, 'from yt.utilities.lru_cache import lru_cache\n'), ((3011, 3046), 'yt.utilities.lru_cache.lru_cache', 'lru_cache', ([], {'maxsize': '(128)', 'typed': '(False)'}), '(maxsize=128, typed=False)\n', (3020, 3046), False, 'from yt.utilities.lru_cache import lru_cache\n'), ((3101, 3136), 'yt.utilities.lru_cache.lru_cache', 'lru_cache', ([], {'maxsize': '(128)', 'typed': '(False)'}), '(maxsize=128, typed=False)\n', (3110, 3136), False, 'from yt.utilities.lru_cache import lru_cache\n'), ((3183, 3218), 'yt.utilities.lru_cache.lru_cache', 'lru_cache', ([], {'maxsize': '(128)', 'typed': '(False)'}), '(maxsize=128, typed=False)\n', (3192, 3218), False, 'from yt.utilities.lru_cache import lru_cache\n'), ((3276, 3311), 'yt.utilities.lru_cache.lru_cache', 'lru_cache', ([], {'maxsize': '(128)', 'typed': '(False)'}), '(maxsize=128, typed=False)\n', (3285, 3311), False, 'from yt.utilities.lru_cache import lru_cache\n'), ((9333, 9368), 'yt.utilities.lru_cache.lru_cache', 'lru_cache', ([], {'maxsize': '(128)', 'typed': '(False)'}), '(maxsize=128, typed=False)\n', (9342, 9368), False, 'from yt.utilities.lru_cache import lru_cache\n'), ((2466, 2477), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (2471, 2477), False, 'from functools import wraps\n'), ((9764, 9808), 'yt.units.dimensions.em_dimensions.get', 'em_dimensions.get', (['my_units.dimensions', 'None'], {}), '(my_units.dimensions, None)\n', (9781, 9808), False, 'from yt.units.dimensions import angle, current_mks, dimensionless, em_dimensions\n'), ((55134, 55165), 'numpy.concatenate', 'np.concatenate', (['arrs'], {'axis': 'axis'}), '(arrs, axis=axis)\n', (55148, 55165), True, 'import numpy as np\n'), ((55490, 55560), 'numpy.cross', 'np.cross', (['arr1', 'arr2'], {'axisa': 'axisa', 'axisb': 'axisb', 'axisc': 'axisc', 'axis': 'axis'}), '(arr1, arr2, axisa=axisa, axisb=axisb, axisc=axisc, axis=axis)\n', (55498, 55560), True, 'import numpy as np\n'), ((56128, 56183), 'numpy.intersect1d', 'np.intersect1d', (['arr1', 'arr2'], {'assume_unique': 'assume_unique'}), '(arr1, arr2, assume_unique=assume_unique)\n', (56142, 56183), True, 'import numpy as np\n'), ((56672, 56694), 'numpy.union1d', 'np.union1d', (['arr1', 'arr2'], {}), '(arr1, arr2)\n', (56682, 56694), True, 'import numpy as np\n'), ((57601, 57621), 'numpy.dot', 'np.dot', (['op1.d', 'op2.d'], {}), '(op1.d, op2.d)\n', (57607, 57621), True, 'import numpy as np\n'), ((57922, 57937), 'numpy.vstack', 'np.vstack', (['arrs'], {}), '(arrs)\n', (57931, 57937), True, 'import numpy as np\n'), ((58177, 58192), 'numpy.hstack', 'np.hstack', (['arrs'], {}), '(arrs)\n', (58186, 58192), True, 'import numpy as np\n'), ((58639, 58653), 'numpy.stack', 'np.stack', (['arrs'], {}), '(arrs)\n', (58647, 58653), True, 'import numpy as np\n'), ((62003, 62133), 'numpy.loadtxt', 'np.loadtxt', (['fname'], {'dtype': 'dtype', 'comments': 'comments', 'delimiter': 'delimiter', 'converters': 'None', 'unpack': '(True)', 'usecols': 'usecols', 'ndmin': '(0)'}), '(fname, dtype=dtype, comments=comments, delimiter=delimiter,\n converters=None, unpack=True, usecols=usecols, ndmin=0)\n', (62013, 62133), True, 'import numpy as np\n'), ((9700, 9745), 'yt.units.unit_object.Unit', 'Unit', (['other_units'], {'registry': 'my_units.registry'}), '(other_units, registry=my_units.registry)\n', (9704, 9745), False, 'from yt.units.unit_object import Unit, UnitParseError\n'), ((9982, 10032), 'yt.utilities.exceptions.YTEquivalentDimsError', 'YTEquivalentDimsError', (['my_units', 'other_units', 'base'], {}), '(my_units, other_units, base)\n', (10003, 10032), False, 'from yt.utilities.exceptions import YTUnitOperationError, YTUnitConversionError, YTUfuncUnitError, YTIterableUnitCoercionError, YTInvalidUnitEquivalence, YTEquivalentDimsError\n'), ((10101, 10194), 'yt.utilities.exceptions.YTUnitConversionError', 'YTUnitConversionError', (['my_units', 'my_units.dimensions', 'other_units', 'other_units.dimensions'], {}), '(my_units, my_units.dimensions, other_units,\n other_units.dimensions)\n', (10122, 10194), False, 'from yt.utilities.exceptions import YTUnitOperationError, YTUnitConversionError, YTUfuncUnitError, YTIterableUnitCoercionError, YTInvalidUnitEquivalence, YTEquivalentDimsError\n'), ((25654, 25694), 'yt.units.unit_object.Unit', 'Unit', (['unit'], {'registry': 'self.units.registry'}), '(unit, registry=self.units.registry)\n', (25658, 25694), False, 'from yt.units.unit_object import Unit, UnitParseError\n'), ((27284, 27298), 'numpy.array', 'np.array', (['self'], {}), '(self)\n', (27292, 27298), True, 'import numpy as np\n'), ((32434, 32453), 'h5py.File', 'h5py.File', (['filename'], {}), '(filename)\n', (32443, 32453), False, 'import h5py\n'), ((34022, 34041), 'h5py.File', 'h5py.File', (['filename'], {}), '(filename)\n', (34031, 34041), False, 'import h5py\n'), ((34456, 34509), 'pint.UnitRegistry', 'UnitRegistry', ([], {'lut': 'unit_lut', 'add_default_symbols': '(False)'}), '(lut=unit_lut, add_default_symbols=False)\n', (34468, 34509), False, 'from pint import UnitRegistry\n'), ((34719, 34733), 'numpy.array', 'np.array', (['self'], {}), '(self)\n', (34727, 34733), True, 'import numpy as np\n'), ((35237, 35255), 'numpy.ones_like', 'np.ones_like', (['self'], {}), '(self)\n', (35249, 35255), True, 'import numpy as np\n'), ((35629, 35657), 'distutils.version.LooseVersion', 'LooseVersion', (['np.__version__'], {}), '(np.__version__)\n', (35641, 35657), False, 'from distutils.version import LooseVersion\n'), ((35660, 35682), 'distutils.version.LooseVersion', 'LooseVersion', (['"""1.13.0"""'], {}), "('1.13.0')\n", (35672, 35682), False, 'from distutils.version import LooseVersion\n'), ((51752, 51800), 'pint.UnitRegistry', 'UnitRegistry', ([], {'lut': 'lut', 'add_default_symbols': '(False)'}), '(lut=lut, add_default_symbols=False)\n', (51764, 51800), False, 'from pint import UnitRegistry\n'), ((51822, 51851), 'yt.units.unit_object.Unit', 'Unit', (['unit'], {'registry': 'registry'}), '(unit, registry=registry)\n', (51826, 51851), False, 'from yt.units.unit_object import Unit, UnitParseError\n'), ((57144, 57172), 'distutils.version.LooseVersion', 'LooseVersion', (['np.__version__'], {}), '(np.__version__)\n', (57156, 57172), False, 'from distutils.version import LooseVersion\n'), ((57175, 57197), 'distutils.version.LooseVersion', 'LooseVersion', (['"""1.10.0"""'], {}), "('1.10.0')\n", (57187, 57197), False, 'from distutils.version import LooseVersion\n'), ((57214, 57254), 'numpy.linalg.norm', 'np.linalg.norm', (['data'], {'ord': 'ord', 'axis': 'axis'}), '(data, ord=ord, axis=axis)\n', (57228, 57254), True, 'import numpy as np\n'), ((57280, 57339), 'numpy.linalg.norm', 'np.linalg.norm', (['data'], {'ord': 'ord', 'axis': 'axis', 'keepdims': 'keepdims'}), '(data, ord=ord, axis=axis, keepdims=keepdims)\n', (57294, 57339), True, 'import numpy as np\n'), ((59106, 59122), 'copy.deepcopy', 'copy.deepcopy', (['x'], {}), '(x)\n', (59119, 59122), False, 'import copy\n'), ((61837, 61926), 'yt.utilities.logger.ytLogger.warning', 'mylog.warning', (['"""Malformed or incomplete units header. Arrays will be dimensionless!"""'], {}), "(\n 'Malformed or incomplete units header. Arrays will be dimensionless!')\n", (61850, 61926), True, 'from yt.utilities.logger import ytLogger as mylog\n'), ((63925, 63945), 'numpy.transpose', 'np.transpose', (['arrays'], {}), '(arrays)\n', (63937, 63945), True, 'import numpy as np\n'), ((5145, 5160), 'numpy.any', 'np.any', (['inps[0]'], {}), '(inps[0])\n', (5151, 5160), True, 'import numpy as np\n'), ((5162, 5177), 'numpy.any', 'np.any', (['inps[1]'], {}), '(inps[1])\n', (5168, 5177), True, 'import numpy as np\n'), ((5208, 5223), 'numpy.bool_', 'np.bool_', (['(False)'], {}), '(False)\n', (5216, 5223), True, 'import numpy as np\n'), ((5815, 5830), 'numpy.any', 'np.any', (['inps[0]'], {}), '(inps[0])\n', (5821, 5830), True, 'import numpy as np\n'), ((5832, 5847), 'numpy.any', 'np.any', (['inps[1]'], {}), '(inps[1])\n', (5838, 5847), True, 'import numpy as np\n'), ((5878, 5893), 'numpy.bool_', 'np.bool_', (['(False)'], {}), '(False)\n', (5886, 5893), True, 'import numpy as np\n'), ((8495, 8548), 'yt.utilities.exceptions.YTUnitOperationError', 'YTUnitOperationError', (['op_string', 'inp.units', 'ret.units'], {}), '(op_string, inp.units, ret.units)\n', (8515, 8548), False, 'from yt.utilities.exceptions import YTUnitOperationError, YTUnitConversionError, YTUfuncUnitError, YTIterableUnitCoercionError, YTInvalidUnitEquivalence, YTEquivalentDimsError\n'), ((8767, 8778), 'numpy.any', 'np.any', (['ret'], {}), '(ret)\n', (8773, 8778), True, 'import numpy as np\n'), ((8798, 8855), 'yt.utilities.exceptions.YTUnitOperationError', 'YTUnitOperationError', (['op_string', 'inp.units', 'dimensionless'], {}), '(op_string, inp.units, dimensionless)\n', (8818, 8855), False, 'from yt.utilities.exceptions import YTUnitOperationError, YTUnitConversionError, YTUfuncUnitError, YTIterableUnitCoercionError, YTInvalidUnitEquivalence, YTEquivalentDimsError\n'), ((9214, 9270), 'yt.utilities.exceptions.YTUnitOperationError', 'YTUnitOperationError', (['op_string', 'this.units', 'other.units'], {}), '(op_string, this.units, other.units)\n', (9234, 9270), False, 'from yt.utilities.exceptions import YTUnitOperationError, YTUnitConversionError, YTUfuncUnitError, YTIterableUnitCoercionError, YTInvalidUnitEquivalence, YTEquivalentDimsError\n'), ((18365, 18371), 'yt.units.unit_object.Unit', 'Unit', ([], {}), '()\n', (18369, 18371), False, 'from yt.units.unit_object import Unit, UnitParseError\n'), ((19703, 19744), 'numpy.subtract', 'np.subtract', (['self', '(offset * self.uq)', 'self'], {}), '(self, offset * self.uq, self)\n', (19714, 19744), True, 'import numpy as np\n'), ((26463, 26512), 'yt.utilities.exceptions.YTInvalidUnitEquivalence', 'YTInvalidUnitEquivalence', (['equiv', 'self.units', 'unit'], {}), '(equiv, self.units, unit)\n', (26487, 26512), False, 'from yt.utilities.exceptions import YTUnitOperationError, YTUnitConversionError, YTUfuncUnitError, YTIterableUnitCoercionError, YTInvalidUnitEquivalence, YTEquivalentDimsError\n'), ((30611, 30625), 'pint.UnitRegistry', 'UnitRegistry', ([], {}), '()\n', (30623, 30625), False, 'from pint import UnitRegistry\n'), ((32308, 32345), 'yt.extern.six.moves.cPickle.dumps', 'pickle.dumps', (['self.units.registry.lut'], {}), '(self.units.registry.lut)\n', (32320, 32345), True, 'from yt.extern.six.moves import cPickle as pickle\n'), ((36358, 36385), 'numpy.add', 'np.add', (['self', 'oth'], {'out': 'self'}), '(self, oth, out=self)\n', (36364, 36385), True, 'import numpy as np\n'), ((37090, 37122), 'numpy.subtract', 'np.subtract', (['self', 'oth'], {'out': 'self'}), '(self, oth, out=self)\n', (37101, 37122), True, 'import numpy as np\n'), ((37896, 37928), 'numpy.multiply', 'np.multiply', (['self', 'oth'], {'out': 'self'}), '(self, oth, out=self)\n', (37907, 37928), True, 'import numpy as np\n'), ((38530, 38560), 'numpy.divide', 'np.divide', (['self', 'oth'], {'out': 'self'}), '(self, oth, out=self)\n', (38539, 38560), True, 'import numpy as np\n'), ((39069, 39104), 'numpy.true_divide', 'np.true_divide', (['self', 'oth'], {'out': 'self'}), '(self, oth, out=self)\n', (39083, 39104), True, 'import numpy as np\n'), ((39618, 39654), 'numpy.floor_divide', 'np.floor_divide', (['self', 'oth'], {'out': 'self'}), '(self, oth, out=self)\n', (39633, 39654), True, 'import numpy as np\n'), ((39930, 39966), 'numpy.bitwise_or', 'np.bitwise_or', (['self', 'other'], {'out': 'self'}), '(self, other, out=self)\n', (39943, 39966), True, 'import numpy as np\n'), ((40247, 40284), 'numpy.bitwise_xor', 'np.bitwise_xor', (['self', 'other'], {'out': 'self'}), '(self, other, out=self)\n', (40261, 40284), True, 'import numpy as np\n'), ((40565, 40602), 'numpy.bitwise_and', 'np.bitwise_and', (['self', 'other'], {'out': 'self'}), '(self, other, out=self)\n', (40579, 40602), True, 'import numpy as np\n'), ((52165, 52190), 'copy.deepcopy', 'copy.deepcopy', (['self.units'], {}), '(self.units)\n', (52178, 52190), False, 'import copy\n'), ((5297, 5312), 'numpy.bool_', 'np.bool_', (['(False)'], {}), '(False)\n', (5305, 5312), True, 'import numpy as np\n'), ((5967, 5982), 'numpy.bool_', 'np.bool_', (['(False)'], {}), '(False)\n', (5975, 5982), True, 'import numpy as np\n'), ((6780, 6808), 'yt.units.unit_object.Unit', 'Unit', ([], {'registry': 'unit.registry'}), '(registry=unit.registry)\n', (6784, 6808), False, 'from yt.units.unit_object import Unit, UnitParseError\n'), ((7217, 7258), 'yt.utilities.exceptions.YTIterableUnitCoercionError', 'YTIterableUnitCoercionError', (['input_object'], {}), '(input_object)\n', (7244, 7258), False, 'from yt.utilities.exceptions import YTUnitOperationError, YTUnitConversionError, YTUfuncUnitError, YTIterableUnitCoercionError, YTInvalidUnitEquivalence, YTEquivalentDimsError\n'), ((8342, 8362), 'numpy.any', 'np.any', (['other_object'], {}), '(other_object)\n', (8348, 8362), True, 'import numpy as np\n'), ((16956, 17136), 'yt.units.unit_object.UnitParseError', 'UnitParseError', (['("""Code units used without referring to a dataset. \nPerhaps you meant to do something like this instead: \nds.arr(%s, "%s")"""\n % (input_array, input_units))'], {}), '(\n """Code units used without referring to a dataset. \nPerhaps you meant to do something like this instead: \nds.arr(%s, "%s")"""\n % (input_array, input_units))\n', (16970, 17136), False, 'from yt.units.unit_object import Unit, UnitParseError\n'), ((18184, 18220), 'numpy.asarray', 'np.asarray', (['input_array'], {'dtype': 'dtype'}), '(input_array, dtype=dtype)\n', (18194, 18220), True, 'import numpy as np\n'), ((18797, 18833), 'yt.units.unit_object.Unit', 'Unit', (['input_units'], {'registry': 'registry'}), '(input_units, registry=registry)\n', (18801, 18833), False, 'from yt.units.unit_object import Unit, UnitParseError\n'), ((21985, 22041), 'numpy.subtract', 'np.subtract', (['new_array', '(offset * new_array.uq)', 'new_array'], {}), '(new_array, offset * new_array.uq, new_array)\n', (21996, 22041), True, 'import numpy as np\n'), ((46015, 46044), 'numpy.array', 'np.array', (['out_arr'], {'copy': '(False)'}), '(out_arr, copy=False)\n', (46023, 46044), True, 'import numpy as np\n'), ((46845, 46868), 'numpy.asarray', 'np.asarray', (['out_orig[0]'], {}), '(out_orig[0])\n', (46855, 46868), True, 'import numpy as np\n'), ((48678, 48707), 'numpy.array', 'np.array', (['out_arr'], {'copy': '(False)'}), '(out_arr, copy=False)\n', (48686, 48707), True, 'import numpy as np\n'), ((5449, 5484), 'yt.utilities.exceptions.YTUnitOperationError', 'YTUnitOperationError', (['ufunc', '*units'], {}), '(ufunc, *units)\n', (5469, 5484), False, 'from yt.utilities.exceptions import YTUnitOperationError, YTUnitConversionError, YTUfuncUnitError, YTIterableUnitCoercionError, YTInvalidUnitEquivalence, YTEquivalentDimsError\n'), ((8429, 8448), 'numpy.any', 'np.any', (['this_object'], {}), '(this_object)\n', (8435, 8448), True, 'import numpy as np\n'), ((16543, 16579), 'numpy.asarray', 'np.asarray', (['input_array'], {'dtype': 'dtype'}), '(input_array, dtype=dtype)\n', (16553, 16579), True, 'import numpy as np\n'), ((17693, 17729), 'yt.units.unit_object.Unit', 'Unit', (['input_units'], {'registry': 'registry'}), '(input_units, registry=registry)\n', (17697, 17729), False, 'from yt.units.unit_object import Unit, UnitParseError\n'), ((40993, 41034), 'yt.utilities.exceptions.YTUnitOperationError', 'YTUnitOperationError', (['"""power"""', 'power.unit'], {}), "('power', power.unit)\n", (41013, 41034), False, 'from yt.utilities.exceptions import YTUnitOperationError, YTUnitConversionError, YTUfuncUnitError, YTIterableUnitCoercionError, YTInvalidUnitEquivalence, YTEquivalentDimsError\n'), ((46177, 46194), 'numpy.array', 'np.array', (['out_arr'], {}), '(out_arr)\n', (46185, 46194), True, 'import numpy as np\n'), ((46541, 46570), 'numpy.array', 'np.array', (['out_arr'], {'copy': '(False)'}), '(out_arr, copy=False)\n', (46549, 46570), True, 'import numpy as np\n'), ((47037, 47052), 'numpy.asarray', 'np.asarray', (['inp'], {}), '(inp)\n', (47047, 47052), True, 'import numpy as np\n'), ((49603, 49619), 'numpy.asarray', 'np.asarray', (['self'], {}), '(self)\n', (49613, 49619), True, 'import numpy as np\n'), ((51361, 51391), 'yt.units.unit_lookup_table.default_unit_symbol_lut.copy', 'default_unit_symbol_lut.copy', ([], {}), '()\n', (51389, 51391), False, 'from yt.units.unit_lookup_table import default_unit_symbol_lut\n'), ((61704, 61781), 'yt.utilities.logger.ytLogger.warning', 'mylog.warning', (['(\'Unrecognized character at beginning of line: "%s".\' % line[0])'], {}), '(\'Unrecognized character at beginning of line: "%s".\' % line[0])\n', (61717, 61781), True, 'from yt.utilities.logger import ytLogger as mylog\n'), ((4917, 4958), 'yt.utilities.exceptions.YTUnitOperationError', 'YTUnitOperationError', (['ufunc', 'unit1', 'unit2'], {}), '(ufunc, unit1, unit2)\n', (4937, 4958), False, 'from yt.utilities.exceptions import YTUnitOperationError, YTUnitConversionError, YTUfuncUnitError, YTIterableUnitCoercionError, YTInvalidUnitEquivalence, YTEquivalentDimsError\n'), ((6139, 6174), 'yt.utilities.exceptions.YTUnitOperationError', 'YTUnitOperationError', (['ufunc', '*units'], {}), '(ufunc, *units)\n', (6159, 6174), False, 'from yt.utilities.exceptions import YTUnitOperationError, YTUnitConversionError, YTUfuncUnitError, YTIterableUnitCoercionError, YTInvalidUnitEquivalence, YTEquivalentDimsError\n'), ((26317, 26366), 'yt.utilities.exceptions.YTInvalidUnitEquivalence', 'YTInvalidUnitEquivalence', (['equiv', 'self.units', 'unit'], {}), '(equiv, self.units, unit)\n', (26341, 26366), False, 'from yt.utilities.exceptions import YTUnitOperationError, YTUnitConversionError, YTUfuncUnitError, YTIterableUnitCoercionError, YTInvalidUnitEquivalence, YTEquivalentDimsError\n'), ((28120, 28138), 'sympy.Rational', 'Rational', (['exponent'], {}), '(exponent)\n', (28128, 28138), False, 'from sympy import Rational\n'), ((29632, 29650), 'sympy.Rational', 'Rational', (['exponent'], {}), '(exponent)\n', (29640, 29650), False, 'from sympy import Rational\n'), ((31007, 31020), 'sympy.Rational', 'Rational', (['pow'], {}), '(pow)\n', (31015, 31020), False, 'from sympy import Rational\n'), ((46483, 46500), 'numpy.array', 'np.array', (['out_arr'], {}), '(out_arr)\n', (46491, 46500), True, 'import numpy as np\n'), ((48143, 48162), 'numpy.asarray', 'np.asarray', (['inps[0]'], {}), '(inps[0])\n', (48153, 48162), True, 'import numpy as np\n'), ((48164, 48183), 'numpy.asarray', 'np.asarray', (['inps[1]'], {}), '(inps[1])\n', (48174, 48183), True, 'import numpy as np\n'), ((6251, 6282), 'yt.utilities.exceptions.YTUfuncUnitError', 'YTUfuncUnitError', (['ufunc', '*units'], {}), '(ufunc, *units)\n', (6267, 6282), False, 'from yt.utilities.exceptions import YTUnitOperationError, YTUnitConversionError, YTUfuncUnitError, YTIterableUnitCoercionError, YTInvalidUnitEquivalence, YTEquivalentDimsError\n'), ((17955, 17989), 'numpy.array', 'np.array', (['input_array'], {'dtype': 'dtype'}), '(input_array, dtype=dtype)\n', (17963, 17989), True, 'import numpy as np\n'), ((48895, 48914), 'numpy.asarray', 'np.asarray', (['out_arr'], {}), '(out_arr)\n', (48905, 48914), True, 'import numpy as np\n'), ((49206, 49225), 'numpy.asarray', 'np.asarray', (['out_arr'], {}), '(out_arr)\n', (49216, 49225), True, 'import numpy as np\n'), ((49295, 49314), 'numpy.asarray', 'np.asarray', (['out_arr'], {}), '(out_arr)\n', (49305, 49314), True, 'import numpy as np\n')]
|
import numpy as np
import argparse
import composition
import os
import json
import torch
from spinup.algos.pytorch.ppo.core import MLPActorCritic
from spinup.algos.pytorch.ppo.ppo import ppo
from spinup.utils.run_utils import setup_logger_kwargs
from spinup.utils.mpi_tools import proc_id, num_procs
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data-dir', default='spinningup_training/logs')
parser.add_argument('--load-dir', default=None)
parser.add_argument('--gridsearch-id', type=int, default=-1)
parser.add_argument('--task-id', type=int, default=-1)
parser.add_argument('--hid', type=int, default=256)
parser.add_argument('--l', type=int, default=2)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--seed', '-s', type=int, default=4)
parser.add_argument('--cpu', type=int, default=4)
parser.add_argument('--steps', type=int, default=16000)
parser.add_argument('--epochs', type=int, default=625)
parser.add_argument('--exp-name', type=str, default='ppo')
parser.add_argument('--clip', type=float, default=0.2)
parser.add_argument('--pi-lr', type=float, default=1e-4)
parser.add_argument('--vf-lr', type=float, default=1e-4)
parser.add_argument('--pi-iters', type=int, default=128)
parser.add_argument('--vf-iters', type=int, default=128)
parser.add_argument('--target-kl', type=float, default=0.02)
parser.add_argument('--ent-coef', type=float, default=0.02)
parser.add_argument('--log-std-init', type=float, default=0.)
parser.add_argument('--controller', type=str, default="joint")
parser.add_argument('--robot', type=str, default="IIWA")
parser.add_argument('--object', type=str, default="Hollowbox")
parser.add_argument('--obstacle', type=str, default=None)
parser.add_argument('--task', type=str, default="PickPlace")
parser.add_argument('--horizon', type=int, default=500)
args = parser.parse_args()
np.random.seed(args.seed)
task_list = np.random.choice(256, num_procs(), replace=False)
args.task_id = int(task_list[proc_id()])
_robots = ["IIWA", "Jaco", "Kinova3", "Panda"]
_objects = ["Box", "Dumbbell", "Plate", "Hollowbox"]
_objectives = ["PickPlace", "Push", "Shelf", "Trashcan"]
_obstacles = ["None", "GoalWall", "ObjectDoor", "ObjectWall"]
idx = np.unravel_index(args.task_id, (len(_robots), len(_objects), len(_objectives), len(_obstacles)))
args.robot = _robots[idx[0]]
args.object = _objects[idx[1]]
args.task = _objectives[idx[2]]
args.obstacle = _obstacles[idx[3]]
# args.exp_name = "t:" + str(args.task_id) + "_name:" + args.exp_name + "_robot:" + str(args.robot) + "_task:" + str(args.task) + "_object:" + str(args.object) + "_obstacle:" + str(args.obstacle)
args.exp_name = 'MTL_{}'.format(len(task_list))
return args
def main():
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.set_num_threads(1)
args = parse_args()
os.makedirs(os.path.join(args.data_dir, args.exp_name), exist_ok=True)
with open(os.path.join(args.data_dir, args.exp_name, 'args_{}.json'.format(proc_id())), 'w') as f:
json.dump(args.__dict__, f, indent=2)
logger_kwargs = setup_logger_kwargs(
args.exp_name, data_dir=args.data_dir)
checkpoint = None
if args.load_dir is not None:
checkpoint = torch.load(os.path.join(args.load_dir, 'pyt_save', 'state_dicts.pt'))
ppo(lambda: composition.make(
args.robot, args.object, args.obstacle, args.task, args.controller, args.horizon, use_task_id_obs=True), actor_critic=MLPActorCritic,
ac_kwargs=dict(hidden_sizes=[args.hid]*args.l, log_std_init=args.log_std_init), seed=args.seed, gamma=args.gamma, steps_per_epoch=args.steps, epochs=args.epochs, clip_ratio=args.clip,
pi_lr=args.pi_lr, vf_lr=args.vf_lr, train_pi_iters=args.pi_iters, train_v_iters=args.vf_iters, target_kl=args.target_kl,
logger_kwargs=logger_kwargs, max_ep_len=args.horizon, ent_coef=args.ent_coef, log_per_proc=True, checkpoint=checkpoint)
if __name__ == '__main__':
main()
|
[
"argparse.ArgumentParser",
"spinup.utils.run_utils.setup_logger_kwargs",
"spinup.utils.mpi_tools.num_procs",
"os.path.join",
"torch.set_num_threads",
"numpy.random.seed",
"composition.make",
"spinup.utils.mpi_tools.proc_id",
"json.dump"
] |
[((335, 360), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (358, 360), False, 'import argparse\n'), ((1997, 2022), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (2011, 2022), True, 'import numpy as np\n'), ((2998, 3022), 'torch.set_num_threads', 'torch.set_num_threads', (['(1)'], {}), '(1)\n', (3019, 3022), False, 'import torch\n'), ((3293, 3351), 'spinup.utils.run_utils.setup_logger_kwargs', 'setup_logger_kwargs', (['args.exp_name'], {'data_dir': 'args.data_dir'}), '(args.exp_name, data_dir=args.data_dir)\n', (3312, 3351), False, 'from spinup.utils.run_utils import setup_logger_kwargs\n'), ((2061, 2072), 'spinup.utils.mpi_tools.num_procs', 'num_procs', ([], {}), '()\n', (2070, 2072), False, 'from spinup.utils.mpi_tools import proc_id, num_procs\n'), ((3064, 3106), 'os.path.join', 'os.path.join', (['args.data_dir', 'args.exp_name'], {}), '(args.data_dir, args.exp_name)\n', (3076, 3106), False, 'import os\n'), ((3234, 3271), 'json.dump', 'json.dump', (['args.__dict__', 'f'], {'indent': '(2)'}), '(args.__dict__, f, indent=2)\n', (3243, 3271), False, 'import json\n'), ((2123, 2132), 'spinup.utils.mpi_tools.proc_id', 'proc_id', ([], {}), '()\n', (2130, 2132), False, 'from spinup.utils.mpi_tools import proc_id, num_procs\n'), ((3450, 3507), 'os.path.join', 'os.path.join', (['args.load_dir', '"""pyt_save"""', '"""state_dicts.pt"""'], {}), "(args.load_dir, 'pyt_save', 'state_dicts.pt')\n", (3462, 3507), False, 'import os\n'), ((3526, 3651), 'composition.make', 'composition.make', (['args.robot', 'args.object', 'args.obstacle', 'args.task', 'args.controller', 'args.horizon'], {'use_task_id_obs': '(True)'}), '(args.robot, args.object, args.obstacle, args.task, args.\n controller, args.horizon, use_task_id_obs=True)\n', (3542, 3651), False, 'import composition\n'), ((3202, 3211), 'spinup.utils.mpi_tools.proc_id', 'proc_id', ([], {}), '()\n', (3209, 3211), False, 'from spinup.utils.mpi_tools import proc_id, num_procs\n')]
|
from __future__ import division
from math import sqrt as sqrt
from itertools import product as product
import torch
import numpy as np
import cv2
from lib.utils.visualize_utils import TBWriter
def vis(func):
"""tensorboard visualization if has writer as input"""
def wrapper(*args, **kw):
return func(*args, **kw) if kw['tb_writer'] is not None else None
return wrapper
class PriorBoxBase(object):
"""Compute priorbox coordinates in center-offset form for each source
feature map.
"""
def __init__(self, cfg):
super(PriorBoxBase, self).__init__()
self.image_size = cfg.MODEL.IMAGE_SIZE
self._steps = cfg.MODEL.STEPS
self._cfg_list = []
self._prior_cfg = {}
self._clip = cfg.MODEL.CLIP
self._variance = cfg.MODEL.VARIANCE
for v in self._variance:
if v <= 0:
raise ValueError('Variances must be greater than 0')
def _setup(self, cfg):
num_feat = len(self._steps)
for item in self._cfg_list:
if item not in cfg.MODEL:
raise Exception("wrong anchor config!")
if len(cfg.MODEL[item]) != num_feat and len(cfg.MODEL[item]) != 0:
raise Exception("config {} length does not match step length!".format(item))
self._prior_cfg[item] = cfg.MODEL[item]
@property
def num_priors(self):
"""allow prior num calculation before knowing feature map size"""
assert self._prior_cfg is not {}
return [int(len(self._create_prior(0, 0, k)) / 4) for k in range(len(self._steps))]
def _create_prior(self, cx, cy, k):
raise NotImplementedError
@vis
def _image_proc(self, image=None, tb_writer=None):
# TODO test with image
if isinstance(image, type(None)):
image = np.ones((self.image_size[1], self.image_size[0], 3))
elif isinstance(image, str):
image = cv2.imread(image, -1)
image = cv2.resize(image, (self.image_size[1], self.image_size[0]))
return image
@vis
def _prior_vis(self, anchor, image_ori, feat_idx, tb_writer=None):
# TODO add output path to the signature
writer = tb_writer.writer
prior_num = self.num_priors[feat_idx]
# transform coordinates
scale = [self.image_size[1], self.image_size[0], self.image_size[1], self.image_size[0]]
bboxs = np.array(anchor).reshape((-1, 4))
box_centers = bboxs[:, :2] * scale[:2] # [x, y]
# bboxs: [xmin, ymin, xmax, ymax]
bboxs = np.hstack((bboxs[:, :2] - bboxs[:, 2:4] / 2, bboxs[:, :2] + bboxs[:, 2:4] / 2)) * scale
box_centers = box_centers.astype(np.int32)
bboxs = bboxs.astype(np.int32)
# visualize each anchor box on a feature map
for prior_idx in range(prior_num):
image = image_ori.copy()
bboxs_ = bboxs[prior_idx::prior_num, :]
box_centers_ = box_centers[4 * prior_idx::prior_num, :]
for archor, bbox in zip(box_centers_, bboxs_):
cv2.circle(image, (archor[0], archor[1]), 1, (0, 0, 255), -1)
if archor[0] == archor[1]: # only show diagnal anchors
cv2.rectangle(image, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 0), 1)
image = image[..., ::-1]
image = image.transpose((2,0,1))
writer.add_image('base/feature_map_{}_{}'.format(feat_idx, prior_idx), image, 2)
def forward(self, layer_dims, tb_writer=None, image=None):
priors = []
image = self._image_proc(image=image, tb_writer=tb_writer)
for k in range(len(layer_dims)):
prior = []
for i, j in product(range(layer_dims[k][0]), range(layer_dims[k][1])):
steps_x = self.image_size[1] / self._steps[k]
steps_y = self.image_size[0] / self._steps[k]
cx = (j + 0.5) / steps_x # unit center x,y
cy = (i + 0.5) / steps_y
prior += self._create_prior(cx, cy, k)
priors += prior
self._prior_vis(prior, image, k, tb_writer=tb_writer)
output = torch.Tensor(priors).view(-1, 4)
# TODO this clip is meanless, should clip on [xmin, ymin, xmax, ymax]
if self._clip:
output.clamp_(max=1, min=0)
return output
class PriorBoxSSD(PriorBoxBase):
def __init__(self, cfg):
super(PriorBoxSSD, self).__init__(cfg)
# self.image_size = cfg['image_size']
self._cfg_list = ['MIN_SIZES', 'MAX_SIZES', 'ASPECT_RATIOS']
self._flip = cfg.MODEL.FLIP
self._setup(cfg)
def _create_prior(self, cx, cy, k):
# as the original paper do
prior = []
min_sizes = self._prior_cfg['MIN_SIZES'][k]
min_sizes = [min_sizes] if not isinstance(min_sizes, list) else min_sizes
for ms in min_sizes:
# min square
s_i = ms / self.image_size[0]
s_j = ms / self.image_size[1]
prior += [cx, cy, s_j, s_i]
# min max square
if len(self._prior_cfg['MAX_SIZES']) != 0:
assert type(self._prior_cfg['MAX_SIZES'][k]) is not list # one max size per layer
s_i_prime = sqrt(s_i * (self._prior_cfg['MAX_SIZES'][k] / self.image_size[0]))
s_j_prime = sqrt(s_j * (self._prior_cfg['MAX_SIZES'][k] / self.image_size[1]))
prior += [cx, cy, s_j_prime, s_i_prime]
# rectangles by min and aspect ratio
for ar in self._prior_cfg['ASPECT_RATIOS'][k]:
prior += [cx, cy, s_j * sqrt(ar), s_i / sqrt(ar)] # a vertical box
if self._flip:
prior += [cx, cy, s_j / sqrt(ar), s_i * sqrt(ar)]
return prior
# PriorBox = PriorBoxSSD
def test_no_vis(cfg, tb_writer):
cfg = copy.deepcopy(cfg)
cfg['feature_maps'] = [38, 19, 10, 5, 3, 1]
cfg['min_sizes'] = [[30], [60], 111, 162, 213, 264]
cfg['flip'] = True
feat_dim = [list(a) for a in zip(cfg['feature_maps'], cfg['feature_maps'])]
p = PriorBoxSSD(cfg)
print(p.num_priors)
p1 = p.forward(feat_dim)
print(p1)
def test_filp(cfg, tb_writer):
cfg = copy.deepcopy(cfg)
cfg['feature_maps'] = [38, 19, 10, 5, 3, 1]
cfg['flip'] = True
feat_dim = [list(a) for a in zip(cfg['feature_maps'], cfg['feature_maps'])]
p = PriorBoxSSD(cfg)
p1 = p.forward(feat_dim, tb_writer=tb_writer)
cfg['flip'] = False
cfg['aspect_ratios'] = [[2, 1 / 2], [2, 1 / 2, 3, 1 / 3], [2, 1 / 2, 3, 1 / 3],
[2, 1 / 2, 3, 1 / 3], [2, 1 / 2], [2, 1 / 2]]
p = PriorBox(cfg)
p2 = p.forward(feat_dim, tb_writer=tb_writer)
# print(p2)
assert (p2 - p1).sum() < 1e-8
def test_rectangle(cfg, tb_writer):
cfg = copy.deepcopy(cfg)
cfg['feature_maps'] = [38, 19, 10, 5, 3, 1]
cfg['min_sizes'] = [30, 60, 111, 162, 213, 264]
cfg['flip'] = True
# feat_dim = [list(a) for a in zip(cfg['feature_maps'], cfg['feature_maps'])]
# cfg['image_size'] = [300, 300]
# feat_dim = [list(a) for a in zip(cfg['feature_maps'], [item * 2 for item in cfg['feature_maps']])]
# cfg['image_size'] = [300, 600]
feat_dim = [list(a) for a in zip([item * 2 for item in cfg['feature_maps']], cfg['feature_maps'])]
cfg['image_size'] = [600, 300]
p = PriorBoxSSD(cfg)
p1 = p.forward(feat_dim, tb_writer=tb_writer)
print(p1.shape)
if __name__ == '__main__':
import copy
# from lib.datasets.config import ssd_voc_vgg as cfg
# from lib.utils.visualize_utils import TBWriter
# tb_writer = TBWriter(log_dir, {'epoch': 50})
#
# test_no_vis(cfg, tb_writer)
# test_filp(cfg, tb_writer)
# test_rectangle(cfg, tb_writer)
print('haha')
from lib.utils.config import cfg
print(cfg)
|
[
"cv2.rectangle",
"numpy.ones",
"numpy.hstack",
"torch.Tensor",
"math.sqrt",
"numpy.array",
"cv2.circle",
"copy.deepcopy",
"cv2.resize",
"cv2.imread"
] |
[((5886, 5904), 'copy.deepcopy', 'copy.deepcopy', (['cfg'], {}), '(cfg)\n', (5899, 5904), False, 'import copy\n'), ((6247, 6265), 'copy.deepcopy', 'copy.deepcopy', (['cfg'], {}), '(cfg)\n', (6260, 6265), False, 'import copy\n'), ((6845, 6863), 'copy.deepcopy', 'copy.deepcopy', (['cfg'], {}), '(cfg)\n', (6858, 6863), False, 'import copy\n'), ((1993, 2052), 'cv2.resize', 'cv2.resize', (['image', '(self.image_size[1], self.image_size[0])'], {}), '(image, (self.image_size[1], self.image_size[0]))\n', (2003, 2052), False, 'import cv2\n'), ((1845, 1897), 'numpy.ones', 'np.ones', (['(self.image_size[1], self.image_size[0], 3)'], {}), '((self.image_size[1], self.image_size[0], 3))\n', (1852, 1897), True, 'import numpy as np\n'), ((2578, 2657), 'numpy.hstack', 'np.hstack', (['(bboxs[:, :2] - bboxs[:, 2:4] / 2, bboxs[:, :2] + bboxs[:, 2:4] / 2)'], {}), '((bboxs[:, :2] - bboxs[:, 2:4] / 2, bboxs[:, :2] + bboxs[:, 2:4] / 2))\n', (2587, 2657), True, 'import numpy as np\n'), ((1955, 1976), 'cv2.imread', 'cv2.imread', (['image', '(-1)'], {}), '(image, -1)\n', (1965, 1976), False, 'import cv2\n'), ((2429, 2445), 'numpy.array', 'np.array', (['anchor'], {}), '(anchor)\n', (2437, 2445), True, 'import numpy as np\n'), ((3084, 3145), 'cv2.circle', 'cv2.circle', (['image', '(archor[0], archor[1])', '(1)', '(0, 0, 255)', '(-1)'], {}), '(image, (archor[0], archor[1]), 1, (0, 0, 255), -1)\n', (3094, 3145), False, 'import cv2\n'), ((4181, 4201), 'torch.Tensor', 'torch.Tensor', (['priors'], {}), '(priors)\n', (4193, 4201), False, 'import torch\n'), ((5282, 5348), 'math.sqrt', 'sqrt', (["(s_i * (self._prior_cfg['MAX_SIZES'][k] / self.image_size[0]))"], {}), "(s_i * (self._prior_cfg['MAX_SIZES'][k] / self.image_size[0]))\n", (5286, 5348), True, 'from math import sqrt as sqrt\n'), ((5377, 5443), 'math.sqrt', 'sqrt', (["(s_j * (self._prior_cfg['MAX_SIZES'][k] / self.image_size[1]))"], {}), "(s_j * (self._prior_cfg['MAX_SIZES'][k] / self.image_size[1]))\n", (5381, 5443), True, 'from math import sqrt as sqrt\n'), ((3238, 3314), 'cv2.rectangle', 'cv2.rectangle', (['image', '(bbox[0], bbox[1])', '(bbox[2], bbox[3])', '(0, 255, 0)', '(1)'], {}), '(image, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 0), 1)\n', (3251, 3314), False, 'import cv2\n'), ((5648, 5656), 'math.sqrt', 'sqrt', (['ar'], {}), '(ar)\n', (5652, 5656), True, 'from math import sqrt as sqrt\n'), ((5664, 5672), 'math.sqrt', 'sqrt', (['ar'], {}), '(ar)\n', (5668, 5672), True, 'from math import sqrt as sqrt\n'), ((5767, 5775), 'math.sqrt', 'sqrt', (['ar'], {}), '(ar)\n', (5771, 5775), True, 'from math import sqrt as sqrt\n'), ((5783, 5791), 'math.sqrt', 'sqrt', (['ar'], {}), '(ar)\n', (5787, 5791), True, 'from math import sqrt as sqrt\n')]
|
"""
Scatter plot with panning and zooming
Shows a scatter plot of a set of random points,
with basic Chaco panning and zooming.
Interacting with the plot:
- Left-mouse-drag pans the plot.
- Mouse wheel up and down zooms the plot in and out.
- Pressing "z" brings up the Zoom Box, and you can click-drag a rectangular
region to zoom. If you use a sequence of zoom boxes, pressing alt-left-arrow
and alt-right-arrow moves you forwards and backwards through the "zoom
history".
"""
# Major library imports
from numpy import sort
from numpy.random import random
# Enthought library imports
from enable.api import Component, ComponentEditor
from traits.api import HasTraits, Instance
from traitsui.api import Item, Group, View
# Chaco imports
from chaco.api import ArrayPlotData, Plot
from chaco.tools.api import PanTool, ZoomTool
#===============================================================================
# # Create the Chaco plot.
#===============================================================================
def _create_plot_component():
# Create some data
numpts = 5000
x = sort(random(numpts))
y = random(numpts)
# Create a plot data obect and give it this data
pd = ArrayPlotData()
pd.set_data("index", x)
pd.set_data("value", y)
# Create the plot
plot = Plot(pd)
plot.plot(("index", "value"),
type="scatter",
marker="circle",
index_sort="ascending",
color="orange",
marker_size=3,
bgcolor="white")
# Tweak some of the plot properties
plot.title = "Scatter Plot"
plot.line_width = 0.5
plot.padding = 50
# Attach some tools to the plot
plot.tools.append(PanTool(plot, constrain_key="shift"))
zoom = ZoomTool(component=plot, tool_mode="box", always_on=False)
plot.overlays.append(zoom)
return plot
#===============================================================================
# Attributes to use for the plot view.
size = (650, 650)
title = "Basic scatter plot"
bg_color="lightgray"
#===============================================================================
# # Demo class that is used by the demo.py application.
#===============================================================================
class Demo(HasTraits):
plot = Instance(Component)
traits_view = View(
Group(
Item('plot', editor=ComponentEditor(size=size,
bgcolor=bg_color),
show_label=False),
orientation = "vertical"),
resizable=True, title=title
)
def _plot_default(self):
return _create_plot_component()
demo = Demo()
if __name__ == "__main__":
demo.configure_traits()
#--EOF---
|
[
"traits.api.Instance",
"chaco.api.ArrayPlotData",
"chaco.tools.api.PanTool",
"chaco.tools.api.ZoomTool",
"numpy.random.random",
"chaco.api.Plot",
"enable.api.ComponentEditor"
] |
[((1150, 1164), 'numpy.random.random', 'random', (['numpts'], {}), '(numpts)\n', (1156, 1164), False, 'from numpy.random import random\n'), ((1228, 1243), 'chaco.api.ArrayPlotData', 'ArrayPlotData', ([], {}), '()\n', (1241, 1243), False, 'from chaco.api import ArrayPlotData, Plot\n'), ((1334, 1342), 'chaco.api.Plot', 'Plot', (['pd'], {}), '(pd)\n', (1338, 1342), False, 'from chaco.api import ArrayPlotData, Plot\n'), ((1795, 1853), 'chaco.tools.api.ZoomTool', 'ZoomTool', ([], {'component': 'plot', 'tool_mode': '"""box"""', 'always_on': '(False)'}), "(component=plot, tool_mode='box', always_on=False)\n", (1803, 1853), False, 'from chaco.tools.api import PanTool, ZoomTool\n'), ((2344, 2363), 'traits.api.Instance', 'Instance', (['Component'], {}), '(Component)\n', (2352, 2363), False, 'from traits.api import HasTraits, Instance\n'), ((1126, 1140), 'numpy.random.random', 'random', (['numpts'], {}), '(numpts)\n', (1132, 1140), False, 'from numpy.random import random\n'), ((1746, 1782), 'chaco.tools.api.PanTool', 'PanTool', (['plot'], {'constrain_key': '"""shift"""'}), "(plot, constrain_key='shift')\n", (1753, 1782), False, 'from chaco.tools.api import PanTool, ZoomTool\n'), ((2460, 2504), 'enable.api.ComponentEditor', 'ComponentEditor', ([], {'size': 'size', 'bgcolor': 'bg_color'}), '(size=size, bgcolor=bg_color)\n', (2475, 2504), False, 'from enable.api import Component, ComponentEditor\n')]
|
from abc import ABCMeta, abstractmethod
import os
from vmaf.tools.misc import make_absolute_path, run_process
from vmaf.tools.stats import ListStats
__copyright__ = "Copyright 2016-2018, Netflix, Inc."
__license__ = "Apache, Version 2.0"
import re
import numpy as np
import ast
from vmaf import ExternalProgramCaller, to_list
from vmaf.config import VmafConfig, VmafExternalConfig
from vmaf.core.executor import Executor
from vmaf.core.result import Result
from vmaf.tools.reader import YuvReader
class FeatureExtractor(Executor):
"""
FeatureExtractor takes in a list of assets, and run feature extraction on
them, and return a list of corresponding results. A FeatureExtractor must
specify a unique type and version combination (by the TYPE and VERSION
attribute), so that the Result generated by it can be identified.
A derived class of FeatureExtractor must:
1) Override TYPE and VERSION
2) Override _generate_result(self, asset), which call a
command-line executable and generate feature scores in a log file.
3) Override _get_feature_scores(self, asset), which read the feature
scores from the log file, and return the scores in a dictionary format.
For an example, follow VmafFeatureExtractor.
"""
__metaclass__ = ABCMeta
@property
@abstractmethod
def ATOM_FEATURES(self):
raise NotImplementedError
def _read_result(self, asset):
result = {}
result.update(self._get_feature_scores(asset))
executor_id = self.executor_id
return Result(asset, executor_id, result)
@classmethod
def get_scores_key(cls, atom_feature):
return "{type}_{atom_feature}_scores".format(
type=cls.TYPE, atom_feature=atom_feature)
@classmethod
def get_score_key(cls, atom_feature):
return "{type}_{atom_feature}_score".format(
type=cls.TYPE, atom_feature=atom_feature)
def _get_feature_scores(self, asset):
# routine to read the feature scores from the log file, and return
# the scores in a dictionary format.
log_file_path = self._get_log_file_path(asset)
atom_feature_scores_dict = {}
atom_feature_idx_dict = {}
for atom_feature in self.ATOM_FEATURES:
atom_feature_scores_dict[atom_feature] = []
atom_feature_idx_dict[atom_feature] = 0
with open(log_file_path, 'rt') as log_file:
for line in log_file.readlines():
for atom_feature in self.ATOM_FEATURES:
re_template = "{af}: ([0-9]+) ([a-zA-Z0-9.-]+)".format(af=atom_feature)
mo = re.match(re_template, line)
if mo:
cur_idx = int(mo.group(1))
assert cur_idx == atom_feature_idx_dict[atom_feature]
# parse value, allowing NaN and inf
val = float(mo.group(2))
if np.isnan(val) or np.isinf(val):
val = None
atom_feature_scores_dict[atom_feature].append(val)
atom_feature_idx_dict[atom_feature] += 1
continue
len_score = len(atom_feature_scores_dict[self.ATOM_FEATURES[0]])
assert len_score != 0
for atom_feature in self.ATOM_FEATURES[1:]:
assert len_score == len(atom_feature_scores_dict[atom_feature]), \
"Feature data possibly corrupt. Run cleanup script and try again."
feature_result = {}
for atom_feature in self.ATOM_FEATURES:
scores_key = self.get_scores_key(atom_feature)
feature_result[scores_key] = atom_feature_scores_dict[atom_feature]
return feature_result
class VmafFeatureExtractor(FeatureExtractor):
TYPE = "VMAF_feature"
# VERSION = '0.1' # vmaf_study; Anush's VIF fix
# VERSION = '0.2' # expose vif_num, vif_den, adm_num, adm_den, anpsnr
# VERSION = '0.2.1' # expose vif num/den of each scale
# VERSION = '0.2.2' # adm abs-->fabs, corrected border handling, uniform reading with option of offset for input YUV, updated VIF corner case
# VERSION = '0.2.2b' # expose adm_den/num_scalex
# VERSION = '0.2.3' # AVX for VMAF convolution; update adm features by folding noise floor into per coef
# VERSION = '0.2.4' # Fix a bug in adm feature passing scale into dwt_quant_step
# VERSION = '0.2.4b' # Modify by adding ADM noise floor outside cube root; add derived feature motion2
VERSION = '0.2.4c' # Modify by moving motion2 to c code
ATOM_FEATURES = ['vif', 'adm', 'ansnr', 'motion', 'motion2',
'vif_num', 'vif_den', 'adm_num', 'adm_den', 'anpsnr',
'vif_num_scale0', 'vif_den_scale0',
'vif_num_scale1', 'vif_den_scale1',
'vif_num_scale2', 'vif_den_scale2',
'vif_num_scale3', 'vif_den_scale3',
'adm_num_scale0', 'adm_den_scale0',
'adm_num_scale1', 'adm_den_scale1',
'adm_num_scale2', 'adm_den_scale2',
'adm_num_scale3', 'adm_den_scale3',
]
DERIVED_ATOM_FEATURES = ['vif_scale0', 'vif_scale1', 'vif_scale2', 'vif_scale3',
'vif2', 'adm2', 'adm3',
'adm_scale0', 'adm_scale1', 'adm_scale2', 'adm_scale3',
]
ADM2_CONSTANT = 0
ADM_SCALE_CONSTANT = 0
def _generate_result(self, asset):
# routine to call the command-line executable and generate feature
# scores in the log file.
quality_width, quality_height = asset.quality_width_height
log_file_path = self._get_log_file_path(asset)
yuv_type=self._get_workfile_yuv_type(asset)
ref_path=asset.ref_workfile_path
dis_path=asset.dis_workfile_path
w=quality_width
h=quality_height
logger = self.logger
ExternalProgramCaller.call_vmaf_feature(yuv_type, ref_path, dis_path, w, h, log_file_path, logger)
@classmethod
def _post_process_result(cls, result):
# override Executor._post_process_result
result = super(VmafFeatureExtractor, cls)._post_process_result(result)
# adm2 =
# (adm_num + ADM2_CONSTANT) / (adm_den + ADM2_CONSTANT)
adm2_scores_key = cls.get_scores_key('adm2')
adm_num_scores_key = cls.get_scores_key('adm_num')
adm_den_scores_key = cls.get_scores_key('adm_den')
result.result_dict[adm2_scores_key] = list(
(np.array(result.result_dict[adm_num_scores_key]) + cls.ADM2_CONSTANT) /
(np.array(result.result_dict[adm_den_scores_key]) + cls.ADM2_CONSTANT)
)
# vif_scalei = vif_num_scalei / vif_den_scalei, i = 0, 1, 2, 3
vif_num_scale0_scores_key = cls.get_scores_key('vif_num_scale0')
vif_den_scale0_scores_key = cls.get_scores_key('vif_den_scale0')
vif_num_scale1_scores_key = cls.get_scores_key('vif_num_scale1')
vif_den_scale1_scores_key = cls.get_scores_key('vif_den_scale1')
vif_num_scale2_scores_key = cls.get_scores_key('vif_num_scale2')
vif_den_scale2_scores_key = cls.get_scores_key('vif_den_scale2')
vif_num_scale3_scores_key = cls.get_scores_key('vif_num_scale3')
vif_den_scale3_scores_key = cls.get_scores_key('vif_den_scale3')
vif_scale0_scores_key = cls.get_scores_key('vif_scale0')
vif_scale1_scores_key = cls.get_scores_key('vif_scale1')
vif_scale2_scores_key = cls.get_scores_key('vif_scale2')
vif_scale3_scores_key = cls.get_scores_key('vif_scale3')
result.result_dict[vif_scale0_scores_key] = list(
(np.array(result.result_dict[vif_num_scale0_scores_key])
/ np.array(result.result_dict[vif_den_scale0_scores_key]))
)
result.result_dict[vif_scale1_scores_key] = list(
(np.array(result.result_dict[vif_num_scale1_scores_key])
/ np.array(result.result_dict[vif_den_scale1_scores_key]))
)
result.result_dict[vif_scale2_scores_key] = list(
(np.array(result.result_dict[vif_num_scale2_scores_key])
/ np.array(result.result_dict[vif_den_scale2_scores_key]))
)
result.result_dict[vif_scale3_scores_key] = list(
(np.array(result.result_dict[vif_num_scale3_scores_key])
/ np.array(result.result_dict[vif_den_scale3_scores_key]))
)
# vif2 =
# ((vif_num_scale0 / vif_den_scale0) + (vif_num_scale1 / vif_den_scale1) +
# (vif_num_scale2 / vif_den_scale2) + (vif_num_scale3 / vif_den_scale3)) / 4.0
vif_scores_key = cls.get_scores_key('vif2')
result.result_dict[vif_scores_key] = list(
(
(np.array(result.result_dict[vif_num_scale0_scores_key])
/ np.array(result.result_dict[vif_den_scale0_scores_key])) +
(np.array(result.result_dict[vif_num_scale1_scores_key])
/ np.array(result.result_dict[vif_den_scale1_scores_key])) +
(np.array(result.result_dict[vif_num_scale2_scores_key])
/ np.array(result.result_dict[vif_den_scale2_scores_key])) +
(np.array(result.result_dict[vif_num_scale3_scores_key])
/ np.array(result.result_dict[vif_den_scale3_scores_key]))
) / 4.0
)
# adm_scalei = adm_num_scalei / adm_den_scalei, i = 0, 1, 2, 3
adm_num_scale0_scores_key = cls.get_scores_key('adm_num_scale0')
adm_den_scale0_scores_key = cls.get_scores_key('adm_den_scale0')
adm_num_scale1_scores_key = cls.get_scores_key('adm_num_scale1')
adm_den_scale1_scores_key = cls.get_scores_key('adm_den_scale1')
adm_num_scale2_scores_key = cls.get_scores_key('adm_num_scale2')
adm_den_scale2_scores_key = cls.get_scores_key('adm_den_scale2')
adm_num_scale3_scores_key = cls.get_scores_key('adm_num_scale3')
adm_den_scale3_scores_key = cls.get_scores_key('adm_den_scale3')
adm_scale0_scores_key = cls.get_scores_key('adm_scale0')
adm_scale1_scores_key = cls.get_scores_key('adm_scale1')
adm_scale2_scores_key = cls.get_scores_key('adm_scale2')
adm_scale3_scores_key = cls.get_scores_key('adm_scale3')
result.result_dict[adm_scale0_scores_key] = list(
(np.array(result.result_dict[adm_num_scale0_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale0_scores_key]) + cls.ADM_SCALE_CONSTANT)
)
result.result_dict[adm_scale1_scores_key] = list(
(np.array(result.result_dict[adm_num_scale1_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale1_scores_key]) + cls.ADM_SCALE_CONSTANT)
)
result.result_dict[adm_scale2_scores_key] = list(
(np.array(result.result_dict[adm_num_scale2_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale2_scores_key]) + cls.ADM_SCALE_CONSTANT)
)
result.result_dict[adm_scale3_scores_key] = list(
(np.array(result.result_dict[adm_num_scale3_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale3_scores_key]) + cls.ADM_SCALE_CONSTANT)
)
# adm3 = \
# (((adm_num_scale0 + ADM_SCALE_CONSTANT) / (adm_den_scale0 + ADM_SCALE_CONSTANT))
# + ((adm_num_scale1 + ADM_SCALE_CONSTANT) / (adm_den_scale1 + ADM_SCALE_CONSTANT))
# + ((adm_num_scale2 + ADM_SCALE_CONSTANT) / (adm_den_scale2 + ADM_SCALE_CONSTANT))
# + ((adm_num_scale3 + ADM_SCALE_CONSTANT) / (adm_den_scale3 + ADM_SCALE_CONSTANT))) / 4.0
adm3_scores_key = cls.get_scores_key('adm3')
result.result_dict[adm3_scores_key] = list(
(
((np.array(result.result_dict[adm_num_scale0_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale0_scores_key]) + cls.ADM_SCALE_CONSTANT)) +
((np.array(result.result_dict[adm_num_scale1_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale1_scores_key]) + cls.ADM_SCALE_CONSTANT)) +
((np.array(result.result_dict[adm_num_scale2_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale2_scores_key]) + cls.ADM_SCALE_CONSTANT)) +
((np.array(result.result_dict[adm_num_scale3_scores_key]) + cls.ADM_SCALE_CONSTANT)
/ (np.array(result.result_dict[adm_den_scale3_scores_key]) + cls.ADM_SCALE_CONSTANT))
) / 4.0
)
# validate
for feature in cls.DERIVED_ATOM_FEATURES:
assert cls.get_scores_key(feature) in result.result_dict
return result
class VifFrameDifferenceFeatureExtractor(FeatureExtractor):
TYPE = "VifDiff_feature"
VERSION = '0.1'
ATOM_FEATURES = ['vifdiff',
'vifdiff_num', 'vifdiff_den',
'vifdiff_num_scale0', 'vifdiff_den_scale0',
'vifdiff_num_scale1', 'vifdiff_den_scale1',
'vifdiff_num_scale2', 'vifdiff_den_scale2',
'vifdiff_num_scale3', 'vifdiff_den_scale3',
]
DERIVED_ATOM_FEATURES = ['vifdiff_scale0', 'vifdiff_scale1', 'vifdiff_scale2', 'vifdiff_scale3',
]
ADM2_CONSTANT = 0
ADM_SCALE_CONSTANT = 0
def _generate_result(self, asset):
# routine to call the command-line executable and generate feature
# scores in the log file.
quality_width, quality_height = asset.quality_width_height
log_file_path = self._get_log_file_path(asset)
yuv_type=self._get_workfile_yuv_type(asset)
ref_path=asset.ref_workfile_path
dis_path=asset.dis_workfile_path
w=quality_width
h=quality_height
logger = self.logger
ExternalProgramCaller.call_vifdiff_feature(yuv_type, ref_path, dis_path, w, h, log_file_path, logger)
@classmethod
def _post_process_result(cls, result):
# override Executor._post_process_result
result = super(VifFrameDifferenceFeatureExtractor, cls)._post_process_result(result)
# vifdiff_scalei = vifdiff_num_scalei / vifdiff_den_scalei, i = 0, 1, 2, 3
vifdiff_num_scale0_scores_key = cls.get_scores_key('vifdiff_num_scale0')
vifdiff_den_scale0_scores_key = cls.get_scores_key('vifdiff_den_scale0')
vifdiff_num_scale1_scores_key = cls.get_scores_key('vifdiff_num_scale1')
vifdiff_den_scale1_scores_key = cls.get_scores_key('vifdiff_den_scale1')
vifdiff_num_scale2_scores_key = cls.get_scores_key('vifdiff_num_scale2')
vifdiff_den_scale2_scores_key = cls.get_scores_key('vifdiff_den_scale2')
vifdiff_num_scale3_scores_key = cls.get_scores_key('vifdiff_num_scale3')
vifdiff_den_scale3_scores_key = cls.get_scores_key('vifdiff_den_scale3')
vifdiff_scale0_scores_key = cls.get_scores_key('vifdiff_scale0')
vifdiff_scale1_scores_key = cls.get_scores_key('vifdiff_scale1')
vifdiff_scale2_scores_key = cls.get_scores_key('vifdiff_scale2')
vifdiff_scale3_scores_key = cls.get_scores_key('vifdiff_scale3')
result.result_dict[vifdiff_scale0_scores_key] = list(
(np.array(result.result_dict[vifdiff_num_scale0_scores_key])
/ np.array(result.result_dict[vifdiff_den_scale0_scores_key]))
)
result.result_dict[vifdiff_scale1_scores_key] = list(
(np.array(result.result_dict[vifdiff_num_scale1_scores_key])
/ np.array(result.result_dict[vifdiff_den_scale1_scores_key]))
)
result.result_dict[vifdiff_scale2_scores_key] = list(
(np.array(result.result_dict[vifdiff_num_scale2_scores_key])
/ np.array(result.result_dict[vifdiff_den_scale2_scores_key]))
)
result.result_dict[vifdiff_scale3_scores_key] = list(
(np.array(result.result_dict[vifdiff_num_scale3_scores_key])
/ np.array(result.result_dict[vifdiff_den_scale3_scores_key]))
)
# validate
for feature in cls.DERIVED_ATOM_FEATURES:
assert cls.get_scores_key(feature) in result.result_dict
return result
class PsnrFeatureExtractor(FeatureExtractor):
TYPE = "PSNR_feature"
VERSION = "1.0"
ATOM_FEATURES = ['psnr']
def _generate_result(self, asset):
# routine to call the command-line executable and generate quality
# scores in the log file.
quality_width, quality_height = asset.quality_width_height
log_file_path = self._get_log_file_path(asset)
yuv_type=self._get_workfile_yuv_type(asset)
ref_path=asset.ref_workfile_path
dis_path=asset.dis_workfile_path
w=quality_width
h=quality_height
logger = self.logger
ExternalProgramCaller.call_psnr(yuv_type, ref_path, dis_path, w, h, log_file_path, logger)
class MomentFeatureExtractor(FeatureExtractor):
TYPE = "Moment_feature"
# VERSION = "1.0" # call executable
VERSION = "1.1" # python only
ATOM_FEATURES = ['ref1st', 'ref2nd', 'dis1st', 'dis2nd', ]
DERIVED_ATOM_FEATURES = ['refvar', 'disvar', ]
def _generate_result(self, asset):
# routine to call the command-line executable and generate feature
# scores in the log file.
quality_w, quality_h = asset.quality_width_height
ref_scores_mtx = None
with YuvReader(filepath=asset.ref_workfile_path, width=quality_w, height=quality_h,
yuv_type=self._get_workfile_yuv_type(asset)) as ref_yuv_reader:
scores_mtx_list = []
i = 0
for ref_yuv in ref_yuv_reader:
ref_y = ref_yuv[0]
firstm = ref_y.mean()
secondm = ref_y.var() + firstm**2
scores_mtx_list.append(np.hstack(([firstm], [secondm])))
i += 1
ref_scores_mtx = np.vstack(scores_mtx_list)
dis_scores_mtx = None
with YuvReader(filepath=asset.dis_workfile_path, width=quality_w, height=quality_h,
yuv_type=self._get_workfile_yuv_type(asset)) as dis_yuv_reader:
scores_mtx_list = []
i = 0
for dis_yuv in dis_yuv_reader:
dis_y = dis_yuv[0]
firstm = dis_y.mean()
secondm = dis_y.var() + firstm**2
scores_mtx_list.append(np.hstack(([firstm], [secondm])))
i += 1
dis_scores_mtx = np.vstack(scores_mtx_list)
assert ref_scores_mtx is not None and dis_scores_mtx is not None
log_dict = {'ref_scores_mtx': ref_scores_mtx.tolist(),
'dis_scores_mtx': dis_scores_mtx.tolist()}
log_file_path = self._get_log_file_path(asset)
with open(log_file_path, 'wt') as log_file:
log_file.write(str(log_dict))
def _get_feature_scores(self, asset):
# routine to read the feature scores from the log file, and return
# the scores in a dictionary format.
log_file_path = self._get_log_file_path(asset)
with open(log_file_path, 'rt') as log_file:
log_str = log_file.read()
log_dict = ast.literal_eval(log_str)
ref_scores_mtx = np.array(log_dict['ref_scores_mtx'])
dis_scores_mtx = np.array(log_dict['dis_scores_mtx'])
_, num_ref_features = ref_scores_mtx.shape
assert num_ref_features == 2 # ref1st, ref2nd
_, num_dis_features = dis_scores_mtx.shape
assert num_dis_features == 2 # dis1st, dis2nd
feature_result = {}
feature_result[self.get_scores_key('ref1st')] = list(ref_scores_mtx[:, 0])
feature_result[self.get_scores_key('ref2nd')] = list(ref_scores_mtx[:, 1])
feature_result[self.get_scores_key('dis1st')] = list(dis_scores_mtx[:, 0])
feature_result[self.get_scores_key('dis2nd')] = list(dis_scores_mtx[:, 1])
return feature_result
@classmethod
def _post_process_result(cls, result):
# override Executor._post_process_result
result = super(MomentFeatureExtractor, cls)._post_process_result(result)
# calculate refvar and disvar from ref1st, ref2nd, dis1st, dis2nd
refvar_scores_key = cls.get_scores_key('refvar')
ref1st_scores_key = cls.get_scores_key('ref1st')
ref2nd_scores_key = cls.get_scores_key('ref2nd')
disvar_scores_key = cls.get_scores_key('disvar')
dis1st_scores_key = cls.get_scores_key('dis1st')
dis2nd_scores_key = cls.get_scores_key('dis2nd')
get_var = lambda m: m[1] - m[0] * m[0]
result.result_dict[refvar_scores_key] = \
to_list(map(get_var, zip(result.result_dict[ref1st_scores_key],
result.result_dict[ref2nd_scores_key])))
result.result_dict[disvar_scores_key] = \
to_list(map(get_var, zip(result.result_dict[dis1st_scores_key],
result.result_dict[dis2nd_scores_key])))
# validate
for feature in cls.DERIVED_ATOM_FEATURES:
assert cls.get_scores_key(feature) in result.result_dict
return result
class SsimFeatureExtractor(FeatureExtractor):
TYPE = "SSIM_feature"
# VERSION = "1.0"
VERSION = "1.1" # fix OPT_RANGE_PIXEL_OFFSET = 0
ATOM_FEATURES = ['ssim', 'ssim_l', 'ssim_c', 'ssim_s']
def _generate_result(self, asset):
# routine to call the command-line executable and generate quality
# scores in the log file.
quality_width, quality_height = asset.quality_width_height
log_file_path = self._get_log_file_path(asset)
yuv_type=self._get_workfile_yuv_type(asset)
ref_path=asset.ref_workfile_path
dis_path=asset.dis_workfile_path
w=quality_width
h=quality_height
logger = self.logger
ExternalProgramCaller.call_ssim(yuv_type, ref_path, dis_path, w, h, log_file_path, logger)
class MsSsimFeatureExtractor(FeatureExtractor):
TYPE = "MS_SSIM_feature"
# VERSION = "1.0"
VERSION = "1.1" # fix OPT_RANGE_PIXEL_OFFSET = 0
ATOM_FEATURES = ['ms_ssim',
'ms_ssim_l_scale0', 'ms_ssim_c_scale0', 'ms_ssim_s_scale0',
'ms_ssim_l_scale1', 'ms_ssim_c_scale1', 'ms_ssim_s_scale1',
'ms_ssim_l_scale2', 'ms_ssim_c_scale2', 'ms_ssim_s_scale2',
'ms_ssim_l_scale3', 'ms_ssim_c_scale3', 'ms_ssim_s_scale3',
'ms_ssim_l_scale4', 'ms_ssim_c_scale4', 'ms_ssim_s_scale4',
]
def _generate_result(self, asset):
# routine to call the command-line executable and generate quality
# scores in the log file.
quality_width, quality_height = asset.quality_width_height
log_file_path = self._get_log_file_path(asset)
yuv_type=self._get_workfile_yuv_type(asset)
ref_path=asset.ref_workfile_path
dis_path=asset.dis_workfile_path
w=quality_width
h=quality_height
logger = self.logger
ExternalProgramCaller.call_ms_ssim(yuv_type, ref_path, dis_path, w, h, log_file_path, logger)
|
[
"vmaf.ExternalProgramCaller.call_vmaf_feature",
"vmaf.core.result.Result",
"vmaf.ExternalProgramCaller.call_ssim",
"numpy.hstack",
"re.match",
"ast.literal_eval",
"numpy.array",
"vmaf.ExternalProgramCaller.call_psnr",
"numpy.isnan",
"numpy.vstack",
"vmaf.ExternalProgramCaller.call_ms_ssim",
"numpy.isinf",
"vmaf.ExternalProgramCaller.call_vifdiff_feature"
] |
[((1573, 1607), 'vmaf.core.result.Result', 'Result', (['asset', 'executor_id', 'result'], {}), '(asset, executor_id, result)\n', (1579, 1607), False, 'from vmaf.core.result import Result\n'), ((6044, 6146), 'vmaf.ExternalProgramCaller.call_vmaf_feature', 'ExternalProgramCaller.call_vmaf_feature', (['yuv_type', 'ref_path', 'dis_path', 'w', 'h', 'log_file_path', 'logger'], {}), '(yuv_type, ref_path, dis_path, w, h,\n log_file_path, logger)\n', (6083, 6146), False, 'from vmaf import ExternalProgramCaller, to_list\n'), ((14143, 14248), 'vmaf.ExternalProgramCaller.call_vifdiff_feature', 'ExternalProgramCaller.call_vifdiff_feature', (['yuv_type', 'ref_path', 'dis_path', 'w', 'h', 'log_file_path', 'logger'], {}), '(yuv_type, ref_path, dis_path, w,\n h, log_file_path, logger)\n', (14185, 14248), False, 'from vmaf import ExternalProgramCaller, to_list\n'), ((17137, 17231), 'vmaf.ExternalProgramCaller.call_psnr', 'ExternalProgramCaller.call_psnr', (['yuv_type', 'ref_path', 'dis_path', 'w', 'h', 'log_file_path', 'logger'], {}), '(yuv_type, ref_path, dis_path, w, h,\n log_file_path, logger)\n', (17168, 17231), False, 'from vmaf import ExternalProgramCaller, to_list\n'), ((19599, 19635), 'numpy.array', 'np.array', (["log_dict['ref_scores_mtx']"], {}), "(log_dict['ref_scores_mtx'])\n", (19607, 19635), True, 'import numpy as np\n'), ((19661, 19697), 'numpy.array', 'np.array', (["log_dict['dis_scores_mtx']"], {}), "(log_dict['dis_scores_mtx'])\n", (19669, 19697), True, 'import numpy as np\n'), ((22214, 22308), 'vmaf.ExternalProgramCaller.call_ssim', 'ExternalProgramCaller.call_ssim', (['yuv_type', 'ref_path', 'dis_path', 'w', 'h', 'log_file_path', 'logger'], {}), '(yuv_type, ref_path, dis_path, w, h,\n log_file_path, logger)\n', (22245, 22308), False, 'from vmaf import ExternalProgramCaller, to_list\n'), ((23415, 23512), 'vmaf.ExternalProgramCaller.call_ms_ssim', 'ExternalProgramCaller.call_ms_ssim', (['yuv_type', 'ref_path', 'dis_path', 'w', 'h', 'log_file_path', 'logger'], {}), '(yuv_type, ref_path, dis_path, w, h,\n log_file_path, logger)\n', (23449, 23512), False, 'from vmaf import ExternalProgramCaller, to_list\n'), ((18258, 18284), 'numpy.vstack', 'np.vstack', (['scores_mtx_list'], {}), '(scores_mtx_list)\n', (18267, 18284), True, 'import numpy as np\n'), ((18837, 18863), 'numpy.vstack', 'np.vstack', (['scores_mtx_list'], {}), '(scores_mtx_list)\n', (18846, 18863), True, 'import numpy as np\n'), ((19548, 19573), 'ast.literal_eval', 'ast.literal_eval', (['log_str'], {}), '(log_str)\n', (19564, 19573), False, 'import ast\n'), ((7803, 7858), 'numpy.array', 'np.array', (['result.result_dict[vif_num_scale0_scores_key]'], {}), '(result.result_dict[vif_num_scale0_scores_key])\n', (7811, 7858), True, 'import numpy as np\n'), ((7874, 7929), 'numpy.array', 'np.array', (['result.result_dict[vif_den_scale0_scores_key]'], {}), '(result.result_dict[vif_den_scale0_scores_key])\n', (7882, 7929), True, 'import numpy as np\n'), ((8012, 8067), 'numpy.array', 'np.array', (['result.result_dict[vif_num_scale1_scores_key]'], {}), '(result.result_dict[vif_num_scale1_scores_key])\n', (8020, 8067), True, 'import numpy as np\n'), ((8083, 8138), 'numpy.array', 'np.array', (['result.result_dict[vif_den_scale1_scores_key]'], {}), '(result.result_dict[vif_den_scale1_scores_key])\n', (8091, 8138), True, 'import numpy as np\n'), ((8221, 8276), 'numpy.array', 'np.array', (['result.result_dict[vif_num_scale2_scores_key]'], {}), '(result.result_dict[vif_num_scale2_scores_key])\n', (8229, 8276), True, 'import numpy as np\n'), ((8292, 8347), 'numpy.array', 'np.array', (['result.result_dict[vif_den_scale2_scores_key]'], {}), '(result.result_dict[vif_den_scale2_scores_key])\n', (8300, 8347), True, 'import numpy as np\n'), ((8430, 8485), 'numpy.array', 'np.array', (['result.result_dict[vif_num_scale3_scores_key]'], {}), '(result.result_dict[vif_num_scale3_scores_key])\n', (8438, 8485), True, 'import numpy as np\n'), ((8501, 8556), 'numpy.array', 'np.array', (['result.result_dict[vif_den_scale3_scores_key]'], {}), '(result.result_dict[vif_den_scale3_scores_key])\n', (8509, 8556), True, 'import numpy as np\n'), ((15548, 15607), 'numpy.array', 'np.array', (['result.result_dict[vifdiff_num_scale0_scores_key]'], {}), '(result.result_dict[vifdiff_num_scale0_scores_key])\n', (15556, 15607), True, 'import numpy as np\n'), ((15623, 15682), 'numpy.array', 'np.array', (['result.result_dict[vifdiff_den_scale0_scores_key]'], {}), '(result.result_dict[vifdiff_den_scale0_scores_key])\n', (15631, 15682), True, 'import numpy as np\n'), ((15769, 15828), 'numpy.array', 'np.array', (['result.result_dict[vifdiff_num_scale1_scores_key]'], {}), '(result.result_dict[vifdiff_num_scale1_scores_key])\n', (15777, 15828), True, 'import numpy as np\n'), ((15844, 15903), 'numpy.array', 'np.array', (['result.result_dict[vifdiff_den_scale1_scores_key]'], {}), '(result.result_dict[vifdiff_den_scale1_scores_key])\n', (15852, 15903), True, 'import numpy as np\n'), ((15990, 16049), 'numpy.array', 'np.array', (['result.result_dict[vifdiff_num_scale2_scores_key]'], {}), '(result.result_dict[vifdiff_num_scale2_scores_key])\n', (15998, 16049), True, 'import numpy as np\n'), ((16065, 16124), 'numpy.array', 'np.array', (['result.result_dict[vifdiff_den_scale2_scores_key]'], {}), '(result.result_dict[vifdiff_den_scale2_scores_key])\n', (16073, 16124), True, 'import numpy as np\n'), ((16211, 16270), 'numpy.array', 'np.array', (['result.result_dict[vifdiff_num_scale3_scores_key]'], {}), '(result.result_dict[vifdiff_num_scale3_scores_key])\n', (16219, 16270), True, 'import numpy as np\n'), ((16286, 16345), 'numpy.array', 'np.array', (['result.result_dict[vifdiff_den_scale3_scores_key]'], {}), '(result.result_dict[vifdiff_den_scale3_scores_key])\n', (16294, 16345), True, 'import numpy as np\n'), ((2665, 2692), 're.match', 're.match', (['re_template', 'line'], {}), '(re_template, line)\n', (2673, 2692), False, 'import re\n'), ((6651, 6699), 'numpy.array', 'np.array', (['result.result_dict[adm_num_scores_key]'], {}), '(result.result_dict[adm_num_scores_key])\n', (6659, 6699), True, 'import numpy as np\n'), ((6736, 6784), 'numpy.array', 'np.array', (['result.result_dict[adm_den_scores_key]'], {}), '(result.result_dict[adm_den_scores_key])\n', (6744, 6784), True, 'import numpy as np\n'), ((10492, 10547), 'numpy.array', 'np.array', (['result.result_dict[adm_num_scale0_scores_key]'], {}), '(result.result_dict[adm_num_scale0_scores_key])\n', (10500, 10547), True, 'import numpy as np\n'), ((10589, 10644), 'numpy.array', 'np.array', (['result.result_dict[adm_den_scale0_scores_key]'], {}), '(result.result_dict[adm_den_scale0_scores_key])\n', (10597, 10644), True, 'import numpy as np\n'), ((10752, 10807), 'numpy.array', 'np.array', (['result.result_dict[adm_num_scale1_scores_key]'], {}), '(result.result_dict[adm_num_scale1_scores_key])\n', (10760, 10807), True, 'import numpy as np\n'), ((10849, 10904), 'numpy.array', 'np.array', (['result.result_dict[adm_den_scale1_scores_key]'], {}), '(result.result_dict[adm_den_scale1_scores_key])\n', (10857, 10904), True, 'import numpy as np\n'), ((11012, 11067), 'numpy.array', 'np.array', (['result.result_dict[adm_num_scale2_scores_key]'], {}), '(result.result_dict[adm_num_scale2_scores_key])\n', (11020, 11067), True, 'import numpy as np\n'), ((11109, 11164), 'numpy.array', 'np.array', (['result.result_dict[adm_den_scale2_scores_key]'], {}), '(result.result_dict[adm_den_scale2_scores_key])\n', (11117, 11164), True, 'import numpy as np\n'), ((11272, 11327), 'numpy.array', 'np.array', (['result.result_dict[adm_num_scale3_scores_key]'], {}), '(result.result_dict[adm_num_scale3_scores_key])\n', (11280, 11327), True, 'import numpy as np\n'), ((11369, 11424), 'numpy.array', 'np.array', (['result.result_dict[adm_den_scale3_scores_key]'], {}), '(result.result_dict[adm_den_scale3_scores_key])\n', (11377, 11424), True, 'import numpy as np\n'), ((18172, 18204), 'numpy.hstack', 'np.hstack', (['([firstm], [secondm])'], {}), '(([firstm], [secondm]))\n', (18181, 18204), True, 'import numpy as np\n'), ((18751, 18783), 'numpy.hstack', 'np.hstack', (['([firstm], [secondm])'], {}), '(([firstm], [secondm]))\n', (18760, 18783), True, 'import numpy as np\n'), ((9343, 9398), 'numpy.array', 'np.array', (['result.result_dict[vif_num_scale3_scores_key]'], {}), '(result.result_dict[vif_num_scale3_scores_key])\n', (9351, 9398), True, 'import numpy as np\n'), ((9418, 9473), 'numpy.array', 'np.array', (['result.result_dict[vif_den_scale3_scores_key]'], {}), '(result.result_dict[vif_den_scale3_scores_key])\n', (9426, 9473), True, 'import numpy as np\n'), ((2987, 3000), 'numpy.isnan', 'np.isnan', (['val'], {}), '(val)\n', (2995, 3000), True, 'import numpy as np\n'), ((3004, 3017), 'numpy.isinf', 'np.isinf', (['val'], {}), '(val)\n', (3012, 3017), True, 'import numpy as np\n'), ((9192, 9247), 'numpy.array', 'np.array', (['result.result_dict[vif_num_scale2_scores_key]'], {}), '(result.result_dict[vif_num_scale2_scores_key])\n', (9200, 9247), True, 'import numpy as np\n'), ((9267, 9322), 'numpy.array', 'np.array', (['result.result_dict[vif_den_scale2_scores_key]'], {}), '(result.result_dict[vif_den_scale2_scores_key])\n', (9275, 9322), True, 'import numpy as np\n'), ((12610, 12665), 'numpy.array', 'np.array', (['result.result_dict[adm_num_scale3_scores_key]'], {}), '(result.result_dict[adm_num_scale3_scores_key])\n', (12618, 12665), True, 'import numpy as np\n'), ((12712, 12767), 'numpy.array', 'np.array', (['result.result_dict[adm_den_scale3_scores_key]'], {}), '(result.result_dict[adm_den_scale3_scores_key])\n', (12720, 12767), True, 'import numpy as np\n'), ((8890, 8945), 'numpy.array', 'np.array', (['result.result_dict[vif_num_scale0_scores_key]'], {}), '(result.result_dict[vif_num_scale0_scores_key])\n', (8898, 8945), True, 'import numpy as np\n'), ((8965, 9020), 'numpy.array', 'np.array', (['result.result_dict[vif_den_scale0_scores_key]'], {}), '(result.result_dict[vif_den_scale0_scores_key])\n', (8973, 9020), True, 'import numpy as np\n'), ((9041, 9096), 'numpy.array', 'np.array', (['result.result_dict[vif_num_scale1_scores_key]'], {}), '(result.result_dict[vif_num_scale1_scores_key])\n', (9049, 9096), True, 'import numpy as np\n'), ((9116, 9171), 'numpy.array', 'np.array', (['result.result_dict[vif_den_scale1_scores_key]'], {}), '(result.result_dict[vif_den_scale1_scores_key])\n', (9124, 9171), True, 'import numpy as np\n'), ((12405, 12460), 'numpy.array', 'np.array', (['result.result_dict[adm_num_scale2_scores_key]'], {}), '(result.result_dict[adm_num_scale2_scores_key])\n', (12413, 12460), True, 'import numpy as np\n'), ((12507, 12562), 'numpy.array', 'np.array', (['result.result_dict[adm_den_scale2_scores_key]'], {}), '(result.result_dict[adm_den_scale2_scores_key])\n', (12515, 12562), True, 'import numpy as np\n'), ((11995, 12050), 'numpy.array', 'np.array', (['result.result_dict[adm_num_scale0_scores_key]'], {}), '(result.result_dict[adm_num_scale0_scores_key])\n', (12003, 12050), True, 'import numpy as np\n'), ((12097, 12152), 'numpy.array', 'np.array', (['result.result_dict[adm_den_scale0_scores_key]'], {}), '(result.result_dict[adm_den_scale0_scores_key])\n', (12105, 12152), True, 'import numpy as np\n'), ((12200, 12255), 'numpy.array', 'np.array', (['result.result_dict[adm_num_scale1_scores_key]'], {}), '(result.result_dict[adm_num_scale1_scores_key])\n', (12208, 12255), True, 'import numpy as np\n'), ((12302, 12357), 'numpy.array', 'np.array', (['result.result_dict[adm_den_scale1_scores_key]'], {}), '(result.result_dict[adm_den_scale1_scores_key])\n', (12310, 12357), True, 'import numpy as np\n')]
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import json
from pathlib import Path
import numpy as np
import torch
from PIL import Image
from panopticapi.utils import rgb2id
# from util.box_ops import masks_to_boxes
from .construction import make_construction_transforms
import logging
def box_xywh_to_xyxy(x):
xs, ys, w, h = x.unbind(-1)
b = [xs, ys, (xs + w), (ys + h)]
return torch.stack(b, dim=-1)
def masks_to_boxes(segments):
boxes = []
labels = []
iscrowd = []
area = []
for ann in segments:
if len(ann["bbox"]) == 4:
boxes.append(ann["bbox"])
area.append(ann['area'])
else:
boxes.append([0, 0, 2, 2])
area.append(4)
labels.append(ann["category_id"])
iscrowd.append(ann['iscrowd'])
if len(boxes) == 0 and len(labels) == 0:
boxes.append([0, 0, 2, 2])
labels.append(1)
area.append(4)
iscrowd.append(0)
boxes = torch.tensor(boxes, dtype=torch.int64)
labels = torch.tensor(labels, dtype=torch.int64)
iscrowd = torch.tensor(iscrowd)
area = torch.tensor(area)
boxes = box_xywh_to_xyxy(boxes)
return boxes, labels, iscrowd, area
class ConstructionPanoptic:
def __init__(self, img_folder, ann_folder, ann_file, transforms=None, return_masks=True):
with open(ann_file, "r") as f:
self.coco = json.load(f)
# sort 'images' field so that they are aligned with 'annotations'
# i.e., in alphabetical order
self.coco["images"] = sorted(self.coco["images"], key=lambda x: x["id"])
# sanity check
if "annotations" in self.coco:
for img, ann in zip(self.coco["images"], self.coco["annotations"]):
assert img["file_name"][:-4] == ann["file_name"][:-4]
self.img_folder = img_folder
self.ann_folder = ann_folder
self.ann_file = ann_file
self.transforms = transforms
self.return_masks = return_masks
def __getitem__(self, idx):
try:
ann_info = (
self.coco["annotations"][idx]
if "annotations" in self.coco
else self.coco["images"][idx]
)
img_path = Path(self.img_folder) / ann_info["file_name"].replace(".png", ".jpg")
ann_path = Path(self.ann_folder) / ann_info["file_name"]
img = Image.open(img_path).convert("RGB")
w, h = img.size
if "segments_info" in ann_info:
masks = np.asarray(Image.open(ann_path), dtype=np.uint32)
masks = rgb2id(masks)
ids = np.array([ann["id"] for ann in ann_info["segments_info"]])
masks = masks == ids[:, None, None]
masks = torch.as_tensor(masks, dtype=torch.uint8)
# labels = torch.tensor(
# [ann["category_id"] for ann in ann_info["segments_info"]],
# dtype=torch.int64,
# )
target = {}
target['image_id'] = torch.tensor([ann_info['image_id'] if "image_id" in ann_info else ann_info["id"]])
if self.return_masks:
target['masks'] = masks
boxes, labels, iscrowd, area = masks_to_boxes(ann_info["segments_info"])
target['labels'] = labels
# Instead of finding boxes, just take the one from json info available
# target["boxes"] = masks_to_boxes(ann_info["segments_info"])
target["boxes"] = boxes
target['size'] = torch.as_tensor([int(h), int(w)])
target['orig_size'] = torch.as_tensor([int(h), int(w)])
target['iscrowd'] = iscrowd
target['area'] = area
# if "segments_info" in ann_info:
# for name in ['iscrowd', 'area']:
# target[name] = torch.tensor([ann[name] for ann in ann_info['segments_info']])
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target
except Exception as e:
logging.error(ann_info)
raise e
def __len__(self):
return len(self.coco['images'])
def get_height_and_width(self, idx):
img_info = self.coco['images'][idx]
height = img_info['height']
width = img_info['width']
return height, width
def build(image_set, args):
root = Path(args.data_path)
assert (
root.exists()
), f"provided Panoptic path {root} does not exist"
mode = "panoptic"
PATHS = {
"train": ("images", f"{mode}", f"{mode}.json"),
"val": ("images", f"val_{mode}", f"val_{mode}.json"),
}
img_folder, ann_folder, ann_file = PATHS[image_set]
img_folder_path = root / img_folder
ann_folder_path = root / ann_folder
ann_file = root / ann_file
dataset = ConstructionPanoptic(
img_folder_path,
ann_folder_path,
ann_file,
transforms=make_construction_transforms(image_set),
return_masks=args.masks,
)
return dataset
|
[
"torch.as_tensor",
"PIL.Image.open",
"pathlib.Path",
"torch.stack",
"panopticapi.utils.rgb2id",
"torch.tensor",
"numpy.array",
"json.load",
"logging.error"
] |
[((422, 444), 'torch.stack', 'torch.stack', (['b'], {'dim': '(-1)'}), '(b, dim=-1)\n', (433, 444), False, 'import torch\n'), ((1007, 1045), 'torch.tensor', 'torch.tensor', (['boxes'], {'dtype': 'torch.int64'}), '(boxes, dtype=torch.int64)\n', (1019, 1045), False, 'import torch\n'), ((1059, 1098), 'torch.tensor', 'torch.tensor', (['labels'], {'dtype': 'torch.int64'}), '(labels, dtype=torch.int64)\n', (1071, 1098), False, 'import torch\n'), ((1113, 1134), 'torch.tensor', 'torch.tensor', (['iscrowd'], {}), '(iscrowd)\n', (1125, 1134), False, 'import torch\n'), ((1146, 1164), 'torch.tensor', 'torch.tensor', (['area'], {}), '(area)\n', (1158, 1164), False, 'import torch\n'), ((4513, 4533), 'pathlib.Path', 'Path', (['args.data_path'], {}), '(args.data_path)\n', (4517, 4533), False, 'from pathlib import Path\n'), ((1427, 1439), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1436, 1439), False, 'import json\n'), ((3116, 3203), 'torch.tensor', 'torch.tensor', (["[ann_info['image_id'] if 'image_id' in ann_info else ann_info['id']]"], {}), "([ann_info['image_id'] if 'image_id' in ann_info else ann_info[\n 'id']])\n", (3128, 3203), False, 'import torch\n'), ((2278, 2299), 'pathlib.Path', 'Path', (['self.img_folder'], {}), '(self.img_folder)\n', (2282, 2299), False, 'from pathlib import Path\n'), ((2371, 2392), 'pathlib.Path', 'Path', (['self.ann_folder'], {}), '(self.ann_folder)\n', (2375, 2392), False, 'from pathlib import Path\n'), ((2642, 2655), 'panopticapi.utils.rgb2id', 'rgb2id', (['masks'], {}), '(masks)\n', (2648, 2655), False, 'from panopticapi.utils import rgb2id\n'), ((2679, 2737), 'numpy.array', 'np.array', (["[ann['id'] for ann in ann_info['segments_info']]"], {}), "([ann['id'] for ann in ann_info['segments_info']])\n", (2687, 2737), True, 'import numpy as np\n'), ((2815, 2856), 'torch.as_tensor', 'torch.as_tensor', (['masks'], {'dtype': 'torch.uint8'}), '(masks, dtype=torch.uint8)\n', (2830, 2856), False, 'import torch\n'), ((4178, 4201), 'logging.error', 'logging.error', (['ann_info'], {}), '(ann_info)\n', (4191, 4201), False, 'import logging\n'), ((2436, 2456), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (2446, 2456), False, 'from PIL import Image\n'), ((2579, 2599), 'PIL.Image.open', 'Image.open', (['ann_path'], {}), '(ann_path)\n', (2589, 2599), False, 'from PIL import Image\n')]
|
import copy
import functools
import itertools
import numbers
import warnings
from collections import defaultdict
from datetime import timedelta
from distutils.version import LooseVersion
from typing import (
Any,
Dict,
Hashable,
Mapping,
Optional,
Sequence,
Tuple,
TypeVar,
Union,
)
import numpy as np
import pandas as pd
import xarray as xr # only for Dataset and DataArray
from . import arithmetic, common, dtypes, duck_array_ops, indexing, nputils, ops, utils
from .indexing import (
BasicIndexer,
OuterIndexer,
PandasIndexAdapter,
VectorizedIndexer,
as_indexable,
)
from .npcompat import IS_NEP18_ACTIVE
from .options import _get_keep_attrs
from .pycompat import (
cupy_array_type,
dask_array_type,
integer_types,
is_duck_dask_array,
)
from .utils import (
OrderedSet,
_default,
decode_numpy_dict_values,
drop_dims_from_indexers,
either_dict_or_kwargs,
ensure_us_time_resolution,
infix_dims,
is_duck_array,
)
NON_NUMPY_SUPPORTED_ARRAY_TYPES = (
(
indexing.ExplicitlyIndexed,
pd.Index,
)
+ dask_array_type
+ cupy_array_type
)
# https://github.com/python/mypy/issues/224
BASIC_INDEXING_TYPES = integer_types + (slice,) # type: ignore
VariableType = TypeVar("VariableType", bound="Variable")
"""Type annotation to be used when methods of Variable return self or a copy of self.
When called from an instance of a subclass, e.g. IndexVariable, mypy identifies the
output as an instance of the subclass.
Usage::
class Variable:
def f(self: VariableType, ...) -> VariableType:
...
"""
class MissingDimensionsError(ValueError):
"""Error class used when we can't safely guess a dimension name."""
# inherits from ValueError for backward compatibility
# TODO: move this to an xarray.exceptions module?
def as_variable(obj, name=None) -> "Union[Variable, IndexVariable]":
"""Convert an object into a Variable.
Parameters
----------
obj : object
Object to convert into a Variable.
- If the object is already a Variable, return a shallow copy.
- Otherwise, if the object has 'dims' and 'data' attributes, convert
it into a new Variable.
- If all else fails, attempt to convert the object into a Variable by
unpacking it into the arguments for creating a new Variable.
name : str, optional
If provided:
- `obj` can be a 1D array, which is assumed to label coordinate values
along a dimension of this given name.
- Variables with name matching one of their dimensions are converted
into `IndexVariable` objects.
Returns
-------
var : Variable
The newly created variable.
"""
from .dataarray import DataArray
# TODO: consider extending this method to automatically handle Iris and
if isinstance(obj, DataArray):
# extract the primary Variable from DataArrays
obj = obj.variable
if isinstance(obj, Variable):
obj = obj.copy(deep=False)
elif isinstance(obj, tuple):
try:
obj = Variable(*obj)
except (TypeError, ValueError) as error:
# use .format() instead of % because it handles tuples consistently
raise error.__class__(
"Could not convert tuple of form "
"(dims, data[, attrs, encoding]): "
"{} to Variable.".format(obj)
)
elif utils.is_scalar(obj):
obj = Variable([], obj)
elif isinstance(obj, (pd.Index, IndexVariable)) and obj.name is not None:
obj = Variable(obj.name, obj)
elif isinstance(obj, (set, dict)):
raise TypeError("variable {!r} has invalid type {!r}".format(name, type(obj)))
elif name is not None:
data = as_compatible_data(obj)
if data.ndim != 1:
raise MissingDimensionsError(
"cannot set variable %r with %r-dimensional data "
"without explicit dimension names. Pass a tuple of "
"(dims, data) instead." % (name, data.ndim)
)
obj = Variable(name, data, fastpath=True)
else:
raise TypeError(
"unable to convert object into a variable without an "
"explicit list of dimensions: %r" % obj
)
if name is not None and name in obj.dims:
# convert the Variable into an Index
if obj.ndim != 1:
raise MissingDimensionsError(
"%r has more than 1-dimension and the same name as one of its "
"dimensions %r. xarray disallows such variables because they "
"conflict with the coordinates used to label "
"dimensions." % (name, obj.dims)
)
obj = obj.to_index_variable()
return obj
def _maybe_wrap_data(data):
"""
Put pandas.Index and numpy.ndarray arguments in adapter objects to ensure
they can be indexed properly.
NumpyArrayAdapter, PandasIndexAdapter and LazilyOuterIndexedArray should
all pass through unmodified.
"""
if isinstance(data, pd.Index):
return PandasIndexAdapter(data)
return data
def _possibly_convert_objects(values):
"""Convert arrays of datetime.datetime and datetime.timedelta objects into
datetime64 and timedelta64, according to the pandas convention. Also used for
validating that datetime64 and timedelta64 objects are within the valid date
range for ns precision, as pandas will raise an error if they are not.
"""
return np.asarray(pd.Series(values.ravel())).reshape(values.shape)
def as_compatible_data(data, fastpath=False):
"""Prepare and wrap data to put in a Variable.
- If data does not have the necessary attributes, convert it to ndarray.
- If data has dtype=datetime64, ensure that it has ns precision. If it's a
pandas.Timestamp, convert it to datetime64.
- If data is already a pandas or xarray object (other than an Index), just
use the values.
Finally, wrap it up with an adapter if necessary.
"""
if fastpath and getattr(data, "ndim", 0) > 0:
# can't use fastpath (yet) for scalars
return _maybe_wrap_data(data)
if isinstance(data, Variable):
return data.data
if isinstance(data, NON_NUMPY_SUPPORTED_ARRAY_TYPES):
return _maybe_wrap_data(data)
if isinstance(data, tuple):
data = utils.to_0d_object_array(data)
if isinstance(data, pd.Timestamp):
# TODO: convert, handle datetime objects, too
data = np.datetime64(data.value, "ns")
if isinstance(data, timedelta):
data = np.timedelta64(getattr(data, "value", data), "ns")
# we don't want nested self-described arrays
data = getattr(data, "values", data)
if isinstance(data, np.ma.MaskedArray):
mask = np.ma.getmaskarray(data)
if mask.any():
dtype, fill_value = dtypes.maybe_promote(data.dtype)
data = np.asarray(data, dtype=dtype)
data[mask] = fill_value
else:
data = np.asarray(data)
if not isinstance(data, np.ndarray):
if hasattr(data, "__array_function__"):
if IS_NEP18_ACTIVE:
return data
else:
raise TypeError(
"Got an NumPy-like array type providing the "
"__array_function__ protocol but NEP18 is not enabled. "
"Check that numpy >= v1.16 and that the environment "
'variable "NUMPY_EXPERIMENTAL_ARRAY_FUNCTION" is set to '
'"1"'
)
# validate whether the data is valid data types.
data = np.asarray(data)
if isinstance(data, np.ndarray):
if data.dtype.kind == "O":
data = _possibly_convert_objects(data)
elif data.dtype.kind == "M":
data = _possibly_convert_objects(data)
elif data.dtype.kind == "m":
data = _possibly_convert_objects(data)
return _maybe_wrap_data(data)
def _as_array_or_item(data):
"""Return the given values as a numpy array, or as an individual item if
it's a 0d datetime64 or timedelta64 array.
Importantly, this function does not copy data if it is already an ndarray -
otherwise, it will not be possible to update Variable values in place.
This function mostly exists because 0-dimensional ndarrays with
dtype=datetime64 are broken :(
https://github.com/numpy/numpy/issues/4337
https://github.com/numpy/numpy/issues/7619
TODO: remove this (replace with np.asarray) once these issues are fixed
"""
if isinstance(data, cupy_array_type):
data = data.get()
else:
data = np.asarray(data)
if data.ndim == 0:
if data.dtype.kind == "M":
data = np.datetime64(data, "ns")
elif data.dtype.kind == "m":
data = np.timedelta64(data, "ns")
return data
class Variable(
common.AbstractArray, arithmetic.SupportsArithmetic, utils.NdimSizeLenMixin
):
"""A netcdf-like variable consisting of dimensions, data and attributes
which describe a single Array. A single Variable object is not fully
described outside the context of its parent Dataset (if you want such a
fully described object, use a DataArray instead).
The main functional difference between Variables and numpy arrays is that
numerical operations on Variables implement array broadcasting by dimension
name. For example, adding an Variable with dimensions `('time',)` to
another Variable with dimensions `('space',)` results in a new Variable
with dimensions `('time', 'space')`. Furthermore, numpy reduce operations
like ``mean`` or ``sum`` are overwritten to take a "dimension" argument
instead of an "axis".
Variables are light-weight objects used as the building block for datasets.
They are more primitive objects, so operations with them provide marginally
higher performance than using DataArrays. However, manipulating data in the
form of a Dataset or DataArray should almost always be preferred, because
they can use more complete metadata in context of coordinate labels.
"""
__slots__ = ("_dims", "_data", "_attrs", "_encoding")
def __init__(self, dims, data, attrs=None, encoding=None, fastpath=False):
"""
Parameters
----------
dims : str or sequence of str
Name(s) of the the data dimension(s). Must be either a string (only
for 1D data) or a sequence of strings with length equal to the
number of dimensions.
data : array_like
Data array which supports numpy-like data access.
attrs : dict_like or None, optional
Attributes to assign to the new variable. If None (default), an
empty attribute dictionary is initialized.
encoding : dict_like or None, optional
Dictionary specifying how to encode this array's data into a
serialized format like netCDF4. Currently used keys (for netCDF)
include '_FillValue', 'scale_factor', 'add_offset' and 'dtype'.
Well-behaved code to serialize a Variable should ignore
unrecognized encoding items.
"""
self._data = as_compatible_data(data, fastpath=fastpath)
self._dims = self._parse_dimensions(dims)
self._attrs = None
self._encoding = None
if attrs is not None:
self.attrs = attrs
if encoding is not None:
self.encoding = encoding
@property
def dtype(self):
return self._data.dtype
@property
def shape(self):
return self._data.shape
@property
def nbytes(self):
return self.size * self.dtype.itemsize
@property
def _in_memory(self):
return isinstance(self._data, (np.ndarray, np.number, PandasIndexAdapter)) or (
isinstance(self._data, indexing.MemoryCachedArray)
and isinstance(self._data.array, indexing.NumpyIndexingAdapter)
)
@property
def data(self):
if is_duck_array(self._data):
return self._data
else:
return self.values
@data.setter
def data(self, data):
data = as_compatible_data(data)
if data.shape != self.shape:
raise ValueError(
f"replacement data must match the Variable's shape. "
f"replacement data has shape {data.shape}; Variable has shape {self.shape}"
)
self._data = data
def astype(
self: VariableType,
dtype,
*,
order=None,
casting=None,
subok=None,
copy=None,
keep_attrs=True,
) -> VariableType:
"""
Copy of the Variable object, with data cast to a specified type.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout order of the result. ‘C’ means C order,
‘F’ means Fortran order, ‘A’ means ‘F’ order if all the arrays are
Fortran contiguous, ‘C’ order otherwise, and ‘K’ means as close to
the order the array elements appear in memory as possible.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
subok : bool, optional
If True, then sub-classes will be passed-through, otherwise the
returned array will be forced to be a base-class array.
copy : bool, optional
By default, astype always returns a newly allocated array. If this
is set to False and the `dtype` requirement is satisfied, the input
array is returned instead of a copy.
keep_attrs : bool, optional
By default, astype keeps attributes. Set to False to remove
attributes in the returned object.
Returns
-------
out : same as object
New object with data cast to the specified type.
Notes
-----
The ``order``, ``casting``, ``subok`` and ``copy`` arguments are only passed
through to the ``astype`` method of the underlying array when a value
different than ``None`` is supplied.
Make sure to only supply these arguments if the underlying array class
supports them.
See also
--------
numpy.ndarray.astype
dask.array.Array.astype
sparse.COO.astype
"""
from .computation import apply_ufunc
kwargs = dict(order=order, casting=casting, subok=subok, copy=copy)
kwargs = {k: v for k, v in kwargs.items() if v is not None}
return apply_ufunc(
duck_array_ops.astype,
self,
dtype,
kwargs=kwargs,
keep_attrs=keep_attrs,
dask="allowed",
)
def load(self, **kwargs):
"""Manually trigger loading of this variable's data from disk or a
remote source into memory and return this variable.
Normally, it should not be necessary to call this method in user code,
because all xarray functions should either work on deferred data or
load data automatically.
Parameters
----------
**kwargs : dict
Additional keyword arguments passed on to ``dask.array.compute``.
See Also
--------
dask.array.compute
"""
if is_duck_dask_array(self._data):
self._data = as_compatible_data(self._data.compute(**kwargs))
elif not is_duck_array(self._data):
self._data = np.asarray(self._data)
return self
def compute(self, **kwargs):
"""Manually trigger loading of this variable's data from disk or a
remote source into memory and return a new variable. The original is
left unaltered.
Normally, it should not be necessary to call this method in user code,
because all xarray functions should either work on deferred data or
load data automatically.
Parameters
----------
**kwargs : dict
Additional keyword arguments passed on to ``dask.array.compute``.
See Also
--------
dask.array.compute
"""
new = self.copy(deep=False)
return new.load(**kwargs)
def __dask_tokenize__(self):
# Use v.data, instead of v._data, in order to cope with the wrappers
# around NetCDF and the like
from dask.base import normalize_token
return normalize_token((type(self), self._dims, self.data, self._attrs))
def __dask_graph__(self):
if is_duck_dask_array(self._data):
return self._data.__dask_graph__()
else:
return None
def __dask_keys__(self):
return self._data.__dask_keys__()
def __dask_layers__(self):
return self._data.__dask_layers__()
@property
def __dask_optimize__(self):
return self._data.__dask_optimize__
@property
def __dask_scheduler__(self):
return self._data.__dask_scheduler__
def __dask_postcompute__(self):
array_func, array_args = self._data.__dask_postcompute__()
return (
self._dask_finalize,
(array_func, array_args, self._dims, self._attrs, self._encoding),
)
def __dask_postpersist__(self):
array_func, array_args = self._data.__dask_postpersist__()
return (
self._dask_finalize,
(array_func, array_args, self._dims, self._attrs, self._encoding),
)
@staticmethod
def _dask_finalize(results, array_func, array_args, dims, attrs, encoding):
data = array_func(results, *array_args)
return Variable(dims, data, attrs=attrs, encoding=encoding)
@property
def values(self):
"""The variable's data as a numpy.ndarray"""
return _as_array_or_item(self._data)
@values.setter
def values(self, values):
self.data = values
def to_base_variable(self):
"""Return this variable as a base xarray.Variable"""
return Variable(
self.dims, self._data, self._attrs, encoding=self._encoding, fastpath=True
)
to_variable = utils.alias(to_base_variable, "to_variable")
def to_index_variable(self):
"""Return this variable as an xarray.IndexVariable"""
return IndexVariable(
self.dims, self._data, self._attrs, encoding=self._encoding, fastpath=True
)
to_coord = utils.alias(to_index_variable, "to_coord")
def to_index(self):
"""Convert this variable to a pandas.Index"""
return self.to_index_variable().to_index()
def to_dict(self, data=True):
"""Dictionary representation of variable."""
item = {"dims": self.dims, "attrs": decode_numpy_dict_values(self.attrs)}
if data:
item["data"] = ensure_us_time_resolution(self.values).tolist()
else:
item.update({"dtype": str(self.dtype), "shape": self.shape})
return item
@property
def dims(self):
"""Tuple of dimension names with which this variable is associated."""
return self._dims
@dims.setter
def dims(self, value):
self._dims = self._parse_dimensions(value)
def _parse_dimensions(self, dims):
if isinstance(dims, str):
dims = (dims,)
dims = tuple(dims)
if len(dims) != self.ndim:
raise ValueError(
"dimensions %s must have the same length as the "
"number of data dimensions, ndim=%s" % (dims, self.ndim)
)
return dims
def _item_key_to_tuple(self, key):
if utils.is_dict_like(key):
return tuple(key.get(dim, slice(None)) for dim in self.dims)
else:
return key
def _broadcast_indexes(self, key):
"""Prepare an indexing key for an indexing operation.
Parameters
-----------
key: int, slice, array-like, dict or tuple of integer, slice and array-like
Any valid input for indexing.
Returns
-------
dims : tuple
Dimension of the resultant variable.
indexers : IndexingTuple subclass
Tuple of integer, array-like, or slices to use when indexing
self._data. The type of this argument indicates the type of
indexing to perform, either basic, outer or vectorized.
new_order : Optional[Sequence[int]]
Optional reordering to do on the result of indexing. If not None,
the first len(new_order) indexing should be moved to these
positions.
"""
key = self._item_key_to_tuple(key) # key is a tuple
# key is a tuple of full size
key = indexing.expanded_indexer(key, self.ndim)
# Convert a scalar Variable to an integer
key = tuple(
k.data.item() if isinstance(k, Variable) and k.ndim == 0 else k for k in key
)
# Convert a 0d-array to an integer
key = tuple(
k.item() if isinstance(k, np.ndarray) and k.ndim == 0 else k for k in key
)
if all(isinstance(k, BASIC_INDEXING_TYPES) for k in key):
return self._broadcast_indexes_basic(key)
self._validate_indexers(key)
# Detect it can be mapped as an outer indexer
# If all key is unlabeled, or
# key can be mapped as an OuterIndexer.
if all(not isinstance(k, Variable) for k in key):
return self._broadcast_indexes_outer(key)
# If all key is 1-dimensional and there are no duplicate labels,
# key can be mapped as an OuterIndexer.
dims = []
for k, d in zip(key, self.dims):
if isinstance(k, Variable):
if len(k.dims) > 1:
return self._broadcast_indexes_vectorized(key)
dims.append(k.dims[0])
elif not isinstance(k, integer_types):
dims.append(d)
if len(set(dims)) == len(dims):
return self._broadcast_indexes_outer(key)
return self._broadcast_indexes_vectorized(key)
def _broadcast_indexes_basic(self, key):
dims = tuple(
dim for k, dim in zip(key, self.dims) if not isinstance(k, integer_types)
)
return dims, BasicIndexer(key), None
def _validate_indexers(self, key):
""" Make sanity checks """
for dim, k in zip(self.dims, key):
if isinstance(k, BASIC_INDEXING_TYPES):
pass
else:
if not isinstance(k, Variable):
k = np.asarray(k)
if k.ndim > 1:
raise IndexError(
"Unlabeled multi-dimensional array cannot be "
"used for indexing: {}".format(k)
)
if k.dtype.kind == "b":
if self.shape[self.get_axis_num(dim)] != len(k):
raise IndexError(
"Boolean array size {:d} is used to index array "
"with shape {:s}.".format(len(k), str(self.shape))
)
if k.ndim > 1:
raise IndexError(
"{}-dimensional boolean indexing is "
"not supported. ".format(k.ndim)
)
if getattr(k, "dims", (dim,)) != (dim,):
raise IndexError(
"Boolean indexer should be unlabeled or on the "
"same dimension to the indexed array. Indexer is "
"on {:s} but the target dimension is {:s}.".format(
str(k.dims), dim
)
)
def _broadcast_indexes_outer(self, key):
dims = tuple(
k.dims[0] if isinstance(k, Variable) else dim
for k, dim in zip(key, self.dims)
if not isinstance(k, integer_types)
)
new_key = []
for k in key:
if isinstance(k, Variable):
k = k.data
if not isinstance(k, BASIC_INDEXING_TYPES):
k = np.asarray(k)
if k.size == 0:
# Slice by empty list; numpy could not infer the dtype
k = k.astype(int)
elif k.dtype.kind == "b":
(k,) = np.nonzero(k)
new_key.append(k)
return dims, OuterIndexer(tuple(new_key)), None
def _nonzero(self):
""" Equivalent numpy's nonzero but returns a tuple of Varibles. """
# TODO we should replace dask's native nonzero
# after https://github.com/dask/dask/issues/1076 is implemented.
nonzeros = np.nonzero(self.data)
return tuple(Variable((dim), nz) for nz, dim in zip(nonzeros, self.dims))
def _broadcast_indexes_vectorized(self, key):
variables = []
out_dims_set = OrderedSet()
for dim, value in zip(self.dims, key):
if isinstance(value, slice):
out_dims_set.add(dim)
else:
variable = (
value
if isinstance(value, Variable)
else as_variable(value, name=dim)
)
if variable.dtype.kind == "b": # boolean indexing case
(variable,) = variable._nonzero()
variables.append(variable)
out_dims_set.update(variable.dims)
variable_dims = set()
for variable in variables:
variable_dims.update(variable.dims)
slices = []
for i, (dim, value) in enumerate(zip(self.dims, key)):
if isinstance(value, slice):
if dim in variable_dims:
# We only convert slice objects to variables if they share
# a dimension with at least one other variable. Otherwise,
# we can equivalently leave them as slices aknd transpose
# the result. This is significantly faster/more efficient
# for most array backends.
values = np.arange(*value.indices(self.sizes[dim]))
variables.insert(i - len(slices), Variable((dim,), values))
else:
slices.append((i, value))
try:
variables = _broadcast_compat_variables(*variables)
except ValueError:
raise IndexError(f"Dimensions of indexers mismatch: {key}")
out_key = [variable.data for variable in variables]
out_dims = tuple(out_dims_set)
slice_positions = set()
for i, value in slices:
out_key.insert(i, value)
new_position = out_dims.index(self.dims[i])
slice_positions.add(new_position)
if slice_positions:
new_order = [i for i in range(len(out_dims)) if i not in slice_positions]
else:
new_order = None
return out_dims, VectorizedIndexer(tuple(out_key)), new_order
def __getitem__(self: VariableType, key) -> VariableType:
"""Return a new Variable object whose contents are consistent with
getting the provided key from the underlying data.
NB. __getitem__ and __setitem__ implement xarray-style indexing,
where if keys are unlabeled arrays, we index the array orthogonally
with them. If keys are labeled array (such as Variables), they are
broadcasted with our usual scheme and then the array is indexed with
the broadcasted key, like numpy's fancy indexing.
If you really want to do indexing like `x[x > 0]`, manipulate the numpy
array `x.values` directly.
"""
dims, indexer, new_order = self._broadcast_indexes(key)
data = as_indexable(self._data)[indexer]
if new_order:
data = duck_array_ops.moveaxis(data, range(len(new_order)), new_order)
return self._finalize_indexing_result(dims, data)
def _finalize_indexing_result(self: VariableType, dims, data) -> VariableType:
"""Used by IndexVariable to return IndexVariable objects when possible."""
return type(self)(dims, data, self._attrs, self._encoding, fastpath=True)
def _getitem_with_mask(self, key, fill_value=dtypes.NA):
"""Index this Variable with -1 remapped to fill_value."""
# TODO(shoyer): expose this method in public API somewhere (isel?) and
# use it for reindex.
# TODO(shoyer): add a sanity check that all other integers are
# non-negative
# TODO(shoyer): add an optimization, remapping -1 to an adjacent value
# that is actually indexed rather than mapping it to the last value
# along each axis.
if fill_value is dtypes.NA:
fill_value = dtypes.get_fill_value(self.dtype)
dims, indexer, new_order = self._broadcast_indexes(key)
if self.size:
if is_duck_dask_array(self._data):
# dask's indexing is faster this way; also vindex does not
# support negative indices yet:
# https://github.com/dask/dask/pull/2967
actual_indexer = indexing.posify_mask_indexer(indexer)
else:
actual_indexer = indexer
data = as_indexable(self._data)[actual_indexer]
mask = indexing.create_mask(indexer, self.shape, data)
# we need to invert the mask in order to pass data first. This helps
# pint to choose the correct unit
# TODO: revert after https://github.com/hgrecco/pint/issues/1019 is fixed
data = duck_array_ops.where(np.logical_not(mask), data, fill_value)
else:
# array cannot be indexed along dimensions of size 0, so just
# build the mask directly instead.
mask = indexing.create_mask(indexer, self.shape)
data = np.broadcast_to(fill_value, getattr(mask, "shape", ()))
if new_order:
data = duck_array_ops.moveaxis(data, range(len(new_order)), new_order)
return self._finalize_indexing_result(dims, data)
def __setitem__(self, key, value):
"""__setitem__ is overloaded to access the underlying numpy values with
orthogonal indexing.
See __getitem__ for more details.
"""
dims, index_tuple, new_order = self._broadcast_indexes(key)
if not isinstance(value, Variable):
value = as_compatible_data(value)
if value.ndim > len(dims):
raise ValueError(
"shape mismatch: value array of shape %s could not be "
"broadcast to indexing result with %s dimensions"
% (value.shape, len(dims))
)
if value.ndim == 0:
value = Variable((), value)
else:
value = Variable(dims[-value.ndim :], value)
# broadcast to become assignable
value = value.set_dims(dims).data
if new_order:
value = duck_array_ops.asarray(value)
value = value[(len(dims) - value.ndim) * (np.newaxis,) + (Ellipsis,)]
value = duck_array_ops.moveaxis(value, new_order, range(len(new_order)))
indexable = as_indexable(self._data)
indexable[index_tuple] = value
@property
def attrs(self) -> Dict[Hashable, Any]:
"""Dictionary of local attributes on this variable."""
if self._attrs is None:
self._attrs = {}
return self._attrs
@attrs.setter
def attrs(self, value: Mapping[Hashable, Any]) -> None:
self._attrs = dict(value)
@property
def encoding(self):
"""Dictionary of encodings on this variable."""
if self._encoding is None:
self._encoding = {}
return self._encoding
@encoding.setter
def encoding(self, value):
try:
self._encoding = dict(value)
except ValueError:
raise ValueError("encoding must be castable to a dictionary")
def copy(self, deep=True, data=None):
"""Returns a copy of this object.
If `deep=True`, the data array is loaded into memory and copied onto
the new object. Dimensions, attributes and encodings are always copied.
Use `data` to create a new object with the same structure as
original but entirely new data.
Parameters
----------
deep : bool, optional
Whether the data array is loaded into memory and copied onto
the new object. Default is True.
data : array_like, optional
Data to use in the new object. Must have same shape as original.
When `data` is used, `deep` is ignored.
Returns
-------
object : Variable
New object with dimensions, attributes, encodings, and optionally
data copied from original.
Examples
--------
Shallow copy versus deep copy
>>> var = xr.Variable(data=[1, 2, 3], dims="x")
>>> var.copy()
<xarray.Variable (x: 3)>
array([1, 2, 3])
>>> var_0 = var.copy(deep=False)
>>> var_0[0] = 7
>>> var_0
<xarray.Variable (x: 3)>
array([7, 2, 3])
>>> var
<xarray.Variable (x: 3)>
array([7, 2, 3])
Changing the data using the ``data`` argument maintains the
structure of the original object, but with the new data. Original
object is unaffected.
>>> var.copy(data=[0.1, 0.2, 0.3])
<xarray.Variable (x: 3)>
array([0.1, 0.2, 0.3])
>>> var
<xarray.Variable (x: 3)>
array([7, 2, 3])
See Also
--------
pandas.DataFrame.copy
"""
if data is None:
data = self._data
if isinstance(data, indexing.MemoryCachedArray):
# don't share caching between copies
data = indexing.MemoryCachedArray(data.array)
if deep:
data = copy.deepcopy(data)
else:
data = as_compatible_data(data)
if self.shape != data.shape:
raise ValueError(
"Data shape {} must match shape of object {}".format(
data.shape, self.shape
)
)
# note:
# dims is already an immutable tuple
# attributes and encoding will be copied when the new Array is created
return self._replace(data=data)
def _replace(
self, dims=_default, data=_default, attrs=_default, encoding=_default
) -> "Variable":
if dims is _default:
dims = copy.copy(self._dims)
if data is _default:
data = copy.copy(self.data)
if attrs is _default:
attrs = copy.copy(self._attrs)
if encoding is _default:
encoding = copy.copy(self._encoding)
return type(self)(dims, data, attrs, encoding, fastpath=True)
def __copy__(self):
return self.copy(deep=False)
def __deepcopy__(self, memo=None):
# memo does nothing but is required for compatibility with
# copy.deepcopy
return self.copy(deep=True)
# mutable objects should not be hashable
# https://github.com/python/mypy/issues/4266
__hash__ = None # type: ignore
@property
def chunks(self):
"""Block dimensions for this array's data or None if it's not a dask
array.
"""
return getattr(self._data, "chunks", None)
_array_counter = itertools.count()
def chunk(self, chunks={}, name=None, lock=False):
"""Coerce this array's data into a dask arrays with the given chunks.
If this variable is a non-dask array, it will be converted to dask
array. If it's a dask array, it will be rechunked to the given chunk
sizes.
If neither chunks is not provided for one or more dimensions, chunk
sizes along that dimension will not be updated; non-dask arrays will be
converted into dask arrays with a single block.
Parameters
----------
chunks : int, tuple or dict, optional
Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or
``{'x': 5, 'y': 5}``.
name : str, optional
Used to generate the name for this array in the internal dask
graph. Does not need not be unique.
lock : optional
Passed on to :py:func:`dask.array.from_array`, if the array is not
already as dask array.
Returns
-------
chunked : xarray.Variable
"""
import dask
import dask.array as da
if chunks is None:
warnings.warn(
"None value for 'chunks' is deprecated. "
"It will raise an error in the future. Use instead '{}'",
category=FutureWarning,
)
chunks = {}
if utils.is_dict_like(chunks):
chunks = {self.get_axis_num(dim): chunk for dim, chunk in chunks.items()}
data = self._data
if is_duck_dask_array(data):
data = data.rechunk(chunks)
else:
if isinstance(data, indexing.ExplicitlyIndexed):
# Unambiguously handle array storage backends (like NetCDF4 and h5py)
# that can't handle general array indexing. For example, in netCDF4 you
# can do "outer" indexing along two dimensions independent, which works
# differently from how NumPy handles it.
# da.from_array works by using lazy indexing with a tuple of slices.
# Using OuterIndexer is a pragmatic choice: dask does not yet handle
# different indexing types in an explicit way:
# https://github.com/dask/dask/issues/2883
data = indexing.ImplicitToExplicitIndexingAdapter(
data, indexing.OuterIndexer
)
if LooseVersion(dask.__version__) < "2.0.0":
kwargs = {}
else:
# All of our lazily loaded backend array classes should use NumPy
# array operations.
kwargs = {"meta": np.ndarray}
else:
kwargs = {}
if utils.is_dict_like(chunks):
chunks = tuple(chunks.get(n, s) for n, s in enumerate(self.shape))
data = da.from_array(data, chunks, name=name, lock=lock, **kwargs)
return type(self)(self.dims, data, self._attrs, self._encoding, fastpath=True)
def _as_sparse(self, sparse_format=_default, fill_value=dtypes.NA):
"""
use sparse-array as backend.
"""
import sparse
# TODO: what to do if dask-backended?
if fill_value is dtypes.NA:
dtype, fill_value = dtypes.maybe_promote(self.dtype)
else:
dtype = dtypes.result_type(self.dtype, fill_value)
if sparse_format is _default:
sparse_format = "coo"
try:
as_sparse = getattr(sparse, f"as_{sparse_format.lower()}")
except AttributeError:
raise ValueError(f"{sparse_format} is not a valid sparse format")
data = as_sparse(self.data.astype(dtype), fill_value=fill_value)
return self._replace(data=data)
def _to_dense(self):
"""
Change backend from sparse to np.array
"""
if hasattr(self._data, "todense"):
return self._replace(data=self._data.todense())
return self.copy(deep=False)
def isel(
self: VariableType,
indexers: Mapping[Hashable, Any] = None,
missing_dims: str = "raise",
**indexers_kwargs: Any,
) -> VariableType:
"""Return a new array indexed along the specified dimension(s).
Parameters
----------
**indexers : {dim: indexer, ...}
Keyword arguments with names matching dimensions and values given
by integers, slice objects or arrays.
missing_dims : {"raise", "warn", "ignore"}, default: "raise"
What to do if dimensions that should be selected from are not present in the
DataArray:
- "raise": raise an exception
- "warning": raise a warning, and ignore the missing dimensions
- "ignore": ignore the missing dimensions
Returns
-------
obj : Array object
A new Array with the selected data and dimensions. In general,
the new variable's data will be a view of this variable's data,
unless numpy fancy indexing was triggered by using an array
indexer, in which case the data will be a copy.
"""
indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "isel")
indexers = drop_dims_from_indexers(indexers, self.dims, missing_dims)
key = tuple(indexers.get(dim, slice(None)) for dim in self.dims)
return self[key]
def squeeze(self, dim=None):
"""Return a new object with squeezed data.
Parameters
----------
dim : None or str or tuple of str, optional
Selects a subset of the length one dimensions. If a dimension is
selected with length greater than one, an error is raised. If
None, all length one dimensions are squeezed.
Returns
-------
squeezed : same type as caller
This object, but with with all or a subset of the dimensions of
length 1 removed.
See Also
--------
numpy.squeeze
"""
dims = common.get_squeeze_dims(self, dim)
return self.isel({d: 0 for d in dims})
def _shift_one_dim(self, dim, count, fill_value=dtypes.NA):
axis = self.get_axis_num(dim)
if count > 0:
keep = slice(None, -count)
elif count < 0:
keep = slice(-count, None)
else:
keep = slice(None)
trimmed_data = self[(slice(None),) * axis + (keep,)].data
if fill_value is dtypes.NA:
dtype, fill_value = dtypes.maybe_promote(self.dtype)
else:
dtype = self.dtype
width = min(abs(count), self.shape[axis])
dim_pad = (width, 0) if count >= 0 else (0, width)
pads = [(0, 0) if d != dim else dim_pad for d in self.dims]
data = duck_array_ops.pad(
trimmed_data.astype(dtype),
pads,
mode="constant",
constant_values=fill_value,
)
if is_duck_dask_array(data):
# chunked data should come out with the same chunks; this makes
# it feasible to combine shifted and unshifted data
# TODO: remove this once dask.array automatically aligns chunks
data = data.rechunk(self.data.chunks)
return type(self)(self.dims, data, self._attrs, fastpath=True)
def shift(self, shifts=None, fill_value=dtypes.NA, **shifts_kwargs):
"""
Return a new Variable with shifted data.
Parameters
----------
shifts : mapping of the form {dim: offset}
Integer offset to shift along each of the given dimensions.
Positive offsets shift to the right; negative offsets shift to the
left.
fill_value: scalar, optional
Value to use for newly missing values
**shifts_kwargs
The keyword arguments form of ``shifts``.
One of shifts or shifts_kwargs must be provided.
Returns
-------
shifted : Variable
Variable with the same dimensions and attributes but shifted data.
"""
shifts = either_dict_or_kwargs(shifts, shifts_kwargs, "shift")
result = self
for dim, count in shifts.items():
result = result._shift_one_dim(dim, count, fill_value=fill_value)
return result
def _pad_options_dim_to_index(
self,
pad_option: Mapping[Hashable, Union[int, Tuple[int, int]]],
fill_with_shape=False,
):
if fill_with_shape:
return [
(n, n) if d not in pad_option else pad_option[d]
for d, n in zip(self.dims, self.data.shape)
]
return [(0, 0) if d not in pad_option else pad_option[d] for d in self.dims]
def pad(
self,
pad_width: Mapping[Hashable, Union[int, Tuple[int, int]]] = None,
mode: str = "constant",
stat_length: Union[
int, Tuple[int, int], Mapping[Hashable, Tuple[int, int]]
] = None,
constant_values: Union[
int, Tuple[int, int], Mapping[Hashable, Tuple[int, int]]
] = None,
end_values: Union[
int, Tuple[int, int], Mapping[Hashable, Tuple[int, int]]
] = None,
reflect_type: str = None,
**pad_width_kwargs: Any,
):
"""
Return a new Variable with padded data.
Parameters
----------
pad_width : mapping of hashable to tuple of int
Mapping with the form of {dim: (pad_before, pad_after)}
describing the number of values padded along each dimension.
{dim: pad} is a shortcut for pad_before = pad_after = pad
mode : str, default: "constant"
See numpy / Dask docs
stat_length : int, tuple or mapping of hashable to tuple
Used in 'maximum', 'mean', 'median', and 'minimum'. Number of
values at edge of each axis used to calculate the statistic value.
constant_values : scalar, tuple or mapping of hashable to tuple
Used in 'constant'. The values to set the padded values for each
axis.
end_values : scalar, tuple or mapping of hashable to tuple
Used in 'linear_ramp'. The values used for the ending value of the
linear_ramp and that will form the edge of the padded array.
reflect_type : {"even", "odd"}, optional
Used in "reflect", and "symmetric". The "even" style is the
default with an unaltered reflection around the edge value. For
the "odd" style, the extended part of the array is created by
subtracting the reflected values from two times the edge value.
**pad_width_kwargs
One of pad_width or pad_width_kwargs must be provided.
Returns
-------
padded : Variable
Variable with the same dimensions and attributes but padded data.
"""
pad_width = either_dict_or_kwargs(pad_width, pad_width_kwargs, "pad")
# change default behaviour of pad with mode constant
if mode == "constant" and (
constant_values is None or constant_values is dtypes.NA
):
dtype, constant_values = dtypes.maybe_promote(self.dtype)
else:
dtype = self.dtype
# create pad_options_kwargs, numpy requires only relevant kwargs to be nonempty
if isinstance(stat_length, dict):
stat_length = self._pad_options_dim_to_index(
stat_length, fill_with_shape=True
)
if isinstance(constant_values, dict):
constant_values = self._pad_options_dim_to_index(constant_values)
if isinstance(end_values, dict):
end_values = self._pad_options_dim_to_index(end_values)
# workaround for bug in Dask's default value of stat_length https://github.com/dask/dask/issues/5303
if stat_length is None and mode in ["maximum", "mean", "median", "minimum"]:
stat_length = [(n, n) for n in self.data.shape] # type: ignore
# change integer values to a tuple of two of those values and change pad_width to index
for k, v in pad_width.items():
if isinstance(v, numbers.Number):
pad_width[k] = (v, v)
pad_width_by_index = self._pad_options_dim_to_index(pad_width)
# create pad_options_kwargs, numpy/dask requires only relevant kwargs to be nonempty
pad_option_kwargs = {}
if stat_length is not None:
pad_option_kwargs["stat_length"] = stat_length
if constant_values is not None:
pad_option_kwargs["constant_values"] = constant_values
if end_values is not None:
pad_option_kwargs["end_values"] = end_values
if reflect_type is not None:
pad_option_kwargs["reflect_type"] = reflect_type # type: ignore
array = duck_array_ops.pad(
self.data.astype(dtype, copy=False),
pad_width_by_index,
mode=mode,
**pad_option_kwargs,
)
return type(self)(self.dims, array)
def _roll_one_dim(self, dim, count):
axis = self.get_axis_num(dim)
count %= self.shape[axis]
if count != 0:
indices = [slice(-count, None), slice(None, -count)]
else:
indices = [slice(None)]
arrays = [self[(slice(None),) * axis + (idx,)].data for idx in indices]
data = duck_array_ops.concatenate(arrays, axis)
if is_duck_dask_array(data):
# chunked data should come out with the same chunks; this makes
# it feasible to combine shifted and unshifted data
# TODO: remove this once dask.array automatically aligns chunks
data = data.rechunk(self.data.chunks)
return type(self)(self.dims, data, self._attrs, fastpath=True)
def roll(self, shifts=None, **shifts_kwargs):
"""
Return a new Variable with rolld data.
Parameters
----------
shifts : mapping of hashable to int
Integer offset to roll along each of the given dimensions.
Positive offsets roll to the right; negative offsets roll to the
left.
**shifts_kwargs
The keyword arguments form of ``shifts``.
One of shifts or shifts_kwargs must be provided.
Returns
-------
shifted : Variable
Variable with the same dimensions and attributes but rolled data.
"""
shifts = either_dict_or_kwargs(shifts, shifts_kwargs, "roll")
result = self
for dim, count in shifts.items():
result = result._roll_one_dim(dim, count)
return result
def transpose(self, *dims) -> "Variable":
"""Return a new Variable object with transposed dimensions.
Parameters
----------
*dims : str, optional
By default, reverse the dimensions. Otherwise, reorder the
dimensions to this order.
Returns
-------
transposed : Variable
The returned object has transposed data and dimensions with the
same attributes as the original.
Notes
-----
This operation returns a view of this variable's data. It is
lazy for dask-backed Variables but not for numpy-backed Variables.
See Also
--------
numpy.transpose
"""
if len(dims) == 0:
dims = self.dims[::-1]
dims = tuple(infix_dims(dims, self.dims))
axes = self.get_axis_num(dims)
if len(dims) < 2 or dims == self.dims:
# no need to transpose if only one dimension
# or dims are in same order
return self.copy(deep=False)
data = as_indexable(self._data).transpose(axes)
return type(self)(dims, data, self._attrs, self._encoding, fastpath=True)
@property
def T(self) -> "Variable":
return self.transpose()
def set_dims(self, dims, shape=None):
"""Return a new variable with given set of dimensions.
This method might be used to attach new dimension(s) to variable.
When possible, this operation does not copy this variable's data.
Parameters
----------
dims : str or sequence of str or dict
Dimensions to include on the new variable. If a dict, values are
used to provide the sizes of new dimensions; otherwise, new
dimensions are inserted with length 1.
Returns
-------
Variable
"""
if isinstance(dims, str):
dims = [dims]
if shape is None and utils.is_dict_like(dims):
shape = dims.values()
missing_dims = set(self.dims) - set(dims)
if missing_dims:
raise ValueError(
"new dimensions %r must be a superset of "
"existing dimensions %r" % (dims, self.dims)
)
self_dims = set(self.dims)
expanded_dims = tuple(d for d in dims if d not in self_dims) + self.dims
if self.dims == expanded_dims:
# don't use broadcast_to unless necessary so the result remains
# writeable if possible
expanded_data = self.data
elif shape is not None:
dims_map = dict(zip(dims, shape))
tmp_shape = tuple(dims_map[d] for d in expanded_dims)
expanded_data = duck_array_ops.broadcast_to(self.data, tmp_shape)
else:
expanded_data = self.data[(None,) * (len(expanded_dims) - self.ndim)]
expanded_var = Variable(
expanded_dims, expanded_data, self._attrs, self._encoding, fastpath=True
)
return expanded_var.transpose(*dims)
def _stack_once(self, dims, new_dim):
if not set(dims) <= set(self.dims):
raise ValueError("invalid existing dimensions: %s" % dims)
if new_dim in self.dims:
raise ValueError(
"cannot create a new dimension with the same "
"name as an existing dimension"
)
if len(dims) == 0:
# don't stack
return self.copy(deep=False)
other_dims = [d for d in self.dims if d not in dims]
dim_order = other_dims + list(dims)
reordered = self.transpose(*dim_order)
new_shape = reordered.shape[: len(other_dims)] + (-1,)
new_data = reordered.data.reshape(new_shape)
new_dims = reordered.dims[: len(other_dims)] + (new_dim,)
return Variable(new_dims, new_data, self._attrs, self._encoding, fastpath=True)
def stack(self, dimensions=None, **dimensions_kwargs):
"""
Stack any number of existing dimensions into a single new dimension.
New dimensions will be added at the end, and the order of the data
along each new dimension will be in contiguous (C) order.
Parameters
----------
dimensions : mapping of hashable to tuple of hashable
Mapping of form new_name=(dim1, dim2, ...) describing the
names of new dimensions, and the existing dimensions that
they replace.
**dimensions_kwargs
The keyword arguments form of ``dimensions``.
One of dimensions or dimensions_kwargs must be provided.
Returns
-------
stacked : Variable
Variable with the same attributes but stacked data.
See also
--------
Variable.unstack
"""
dimensions = either_dict_or_kwargs(dimensions, dimensions_kwargs, "stack")
result = self
for new_dim, dims in dimensions.items():
result = result._stack_once(dims, new_dim)
return result
def _unstack_once(self, dims, old_dim):
new_dim_names = tuple(dims.keys())
new_dim_sizes = tuple(dims.values())
if old_dim not in self.dims:
raise ValueError("invalid existing dimension: %s" % old_dim)
if set(new_dim_names).intersection(self.dims):
raise ValueError(
"cannot create a new dimension with the same "
"name as an existing dimension"
)
if np.prod(new_dim_sizes) != self.sizes[old_dim]:
raise ValueError(
"the product of the new dimension sizes must "
"equal the size of the old dimension"
)
other_dims = [d for d in self.dims if d != old_dim]
dim_order = other_dims + [old_dim]
reordered = self.transpose(*dim_order)
new_shape = reordered.shape[: len(other_dims)] + new_dim_sizes
new_data = reordered.data.reshape(new_shape)
new_dims = reordered.dims[: len(other_dims)] + new_dim_names
return Variable(new_dims, new_data, self._attrs, self._encoding, fastpath=True)
def unstack(self, dimensions=None, **dimensions_kwargs):
"""
Unstack an existing dimension into multiple new dimensions.
New dimensions will be added at the end, and the order of the data
along each new dimension will be in contiguous (C) order.
Parameters
----------
dimensions : mapping of hashable to mapping of hashable to int
Mapping of the form old_dim={dim1: size1, ...} describing the
names of existing dimensions, and the new dimensions and sizes
that they map to.
**dimensions_kwargs
The keyword arguments form of ``dimensions``.
One of dimensions or dimensions_kwargs must be provided.
Returns
-------
unstacked : Variable
Variable with the same attributes but unstacked data.
See also
--------
Variable.stack
"""
dimensions = either_dict_or_kwargs(dimensions, dimensions_kwargs, "unstack")
result = self
for old_dim, dims in dimensions.items():
result = result._unstack_once(dims, old_dim)
return result
def fillna(self, value):
return ops.fillna(self, value)
def where(self, cond, other=dtypes.NA):
return ops.where_method(self, cond, other)
def reduce(
self,
func,
dim=None,
axis=None,
keep_attrs=None,
keepdims=False,
**kwargs,
):
"""Reduce this array by applying `func` along some dimension(s).
Parameters
----------
func : callable
Function which can be called in the form
`func(x, axis=axis, **kwargs)` to return the result of reducing an
np.ndarray over an integer valued axis.
dim : str or sequence of str, optional
Dimension(s) over which to apply `func`.
axis : int or sequence of int, optional
Axis(es) over which to apply `func`. Only one of the 'dim'
and 'axis' arguments can be supplied. If neither are supplied, then
the reduction is calculated over the flattened array (by calling
`func(x)` without an axis argument).
keep_attrs : bool, optional
If True, the variable's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
keepdims : bool, default: False
If True, the dimensions which are reduced are left in the result
as dimensions of size one
**kwargs : dict
Additional keyword arguments passed on to `func`.
Returns
-------
reduced : Array
Array with summarized data and the indicated dimension(s)
removed.
"""
if dim == ...:
dim = None
if dim is not None and axis is not None:
raise ValueError("cannot supply both 'axis' and 'dim' arguments")
if dim is not None:
axis = self.get_axis_num(dim)
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", r"Mean of empty slice", category=RuntimeWarning
)
if axis is not None:
data = func(self.data, axis=axis, **kwargs)
else:
data = func(self.data, **kwargs)
if getattr(data, "shape", ()) == self.shape:
dims = self.dims
else:
removed_axes = (
range(self.ndim) if axis is None else np.atleast_1d(axis) % self.ndim
)
if keepdims:
# Insert np.newaxis for removed dims
slices = tuple(
np.newaxis if i in removed_axes else slice(None, None)
for i in range(self.ndim)
)
if getattr(data, "shape", None) is None:
# Reduce has produced a scalar value, not an array-like
data = np.asanyarray(data)[slices]
else:
data = data[slices]
dims = self.dims
else:
dims = [
adim for n, adim in enumerate(self.dims) if n not in removed_axes
]
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
attrs = self._attrs if keep_attrs else None
return Variable(dims, data, attrs=attrs)
@classmethod
def concat(cls, variables, dim="concat_dim", positions=None, shortcut=False):
"""Concatenate variables along a new or existing dimension.
Parameters
----------
variables : iterable of Variable
Arrays to stack together. Each variable is expected to have
matching dimensions and shape except for along the stacked
dimension.
dim : str or DataArray, optional
Name of the dimension to stack along. This can either be a new
dimension name, in which case it is added along axis=0, or an
existing dimension name, in which case the location of the
dimension is unchanged. Where to insert the new dimension is
determined by the first variable.
positions : None or list of array-like, optional
List of integer arrays which specifies the integer positions to
which to assign each dataset along the concatenated dimension.
If not supplied, objects are concatenated in the provided order.
shortcut : bool, optional
This option is used internally to speed-up groupby operations.
If `shortcut` is True, some checks of internal consistency between
arrays to concatenate are skipped.
Returns
-------
stacked : Variable
Concatenated Variable formed by stacking all the supplied variables
along the given dimension.
"""
if not isinstance(dim, str):
(dim,) = dim.dims
# can't do this lazily: we need to loop through variables at least
# twice
variables = list(variables)
first_var = variables[0]
arrays = [v.data for v in variables]
if dim in first_var.dims:
axis = first_var.get_axis_num(dim)
dims = first_var.dims
data = duck_array_ops.concatenate(arrays, axis=axis)
if positions is not None:
# TODO: deprecate this option -- we don't need it for groupby
# any more.
indices = nputils.inverse_permutation(np.concatenate(positions))
data = duck_array_ops.take(data, indices, axis=axis)
else:
axis = 0
dims = (dim,) + first_var.dims
data = duck_array_ops.stack(arrays, axis=axis)
attrs = dict(first_var.attrs)
encoding = dict(first_var.encoding)
if not shortcut:
for var in variables:
if var.dims != first_var.dims:
raise ValueError(
f"Variable has dimensions {list(var.dims)} but first Variable has dimensions {list(first_var.dims)}"
)
return cls(dims, data, attrs, encoding)
def equals(self, other, equiv=duck_array_ops.array_equiv):
"""True if two Variables have the same dimensions and values;
otherwise False.
Variables can still be equal (like pandas objects) if they have NaN
values in the same locations.
This method is necessary because `v1 == v2` for Variables
does element-wise comparisons (like numpy.ndarrays).
"""
other = getattr(other, "variable", other)
try:
return self.dims == other.dims and (
self._data is other._data or equiv(self.data, other.data)
)
except (TypeError, AttributeError):
return False
def broadcast_equals(self, other, equiv=duck_array_ops.array_equiv):
"""True if two Variables have the values after being broadcast against
each other; otherwise False.
Variables can still be equal (like pandas objects) if they have NaN
values in the same locations.
"""
try:
self, other = broadcast_variables(self, other)
except (ValueError, AttributeError):
return False
return self.equals(other, equiv=equiv)
def identical(self, other, equiv=duck_array_ops.array_equiv):
"""Like equals, but also checks attributes."""
try:
return utils.dict_equiv(self.attrs, other.attrs) and self.equals(
other, equiv=equiv
)
except (TypeError, AttributeError):
return False
def no_conflicts(self, other, equiv=duck_array_ops.array_notnull_equiv):
"""True if the intersection of two Variable's non-null data is
equal; otherwise false.
Variables can thus still be equal if there are locations where either,
or both, contain NaN values.
"""
return self.broadcast_equals(other, equiv=equiv)
def quantile(
self, q, dim=None, interpolation="linear", keep_attrs=None, skipna=True
):
"""Compute the qth quantile of the data along the specified dimension.
Returns the qth quantiles(s) of the array elements.
Parameters
----------
q : float or sequence of float
Quantile to compute, which must be between 0 and 1
inclusive.
dim : str or sequence of str, optional
Dimension(s) over which to apply quantile.
interpolation : {"linear", "lower", "higher", "midpoint", "nearest"}, default: "linear"
This optional parameter specifies the interpolation method to
use when the desired quantile lies between two data points
``i < j``:
* linear: ``i + (j - i) * fraction``, where ``fraction`` is
the fractional part of the index surrounded by ``i`` and
``j``.
* lower: ``i``.
* higher: ``j``.
* nearest: ``i`` or ``j``, whichever is nearest.
* midpoint: ``(i + j) / 2``.
keep_attrs : bool, optional
If True, the variable's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
Returns
-------
quantiles : Variable
If `q` is a single quantile, then the result
is a scalar. If multiple percentiles are given, first axis of
the result corresponds to the quantile and a quantile dimension
is added to the return array. The other dimensions are the
dimensions that remain after the reduction of the array.
See Also
--------
numpy.nanquantile, pandas.Series.quantile, Dataset.quantile,
DataArray.quantile
"""
from .computation import apply_ufunc
_quantile_func = np.nanquantile if skipna else np.quantile
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
scalar = utils.is_scalar(q)
q = np.atleast_1d(np.asarray(q, dtype=np.float64))
if dim is None:
dim = self.dims
if utils.is_scalar(dim):
dim = [dim]
def _wrapper(npa, **kwargs):
# move quantile axis to end. required for apply_ufunc
return np.moveaxis(_quantile_func(npa, **kwargs), 0, -1)
axis = np.arange(-1, -1 * len(dim) - 1, -1)
result = apply_ufunc(
_wrapper,
self,
input_core_dims=[dim],
exclude_dims=set(dim),
output_core_dims=[["quantile"]],
output_dtypes=[np.float64],
dask_gufunc_kwargs=dict(output_sizes={"quantile": len(q)}),
dask="parallelized",
kwargs={"q": q, "axis": axis, "interpolation": interpolation},
)
# for backward compatibility
result = result.transpose("quantile", ...)
if scalar:
result = result.squeeze("quantile")
if keep_attrs:
result.attrs = self._attrs
return result
def rank(self, dim, pct=False):
"""Ranks the data.
Equal values are assigned a rank that is the average of the ranks that
would have been otherwise assigned to all of the values within that
set. Ranks begin at 1, not 0. If `pct`, computes percentage ranks.
NaNs in the input array are returned as NaNs.
The `bottleneck` library is required.
Parameters
----------
dim : str
Dimension over which to compute rank.
pct : bool, optional
If True, compute percentage ranks, otherwise compute integer ranks.
Returns
-------
ranked : Variable
See Also
--------
Dataset.rank, DataArray.rank
"""
import bottleneck as bn
data = self.data
if is_duck_dask_array(data):
raise TypeError(
"rank does not work for arrays stored as dask "
"arrays. Load the data via .compute() or .load() "
"prior to calling this method."
)
elif not isinstance(data, np.ndarray):
raise TypeError(
"rank is not implemented for {} objects.".format(type(data))
)
axis = self.get_axis_num(dim)
func = bn.nanrankdata if self.dtype.kind == "f" else bn.rankdata
ranked = func(data, axis=axis)
if pct:
count = np.sum(~np.isnan(data), axis=axis, keepdims=True)
ranked /= count
return Variable(self.dims, ranked)
def rolling_window(
self, dim, window, window_dim, center=False, fill_value=dtypes.NA
):
"""
Make a rolling_window along dim and add a new_dim to the last place.
Parameters
----------
dim : str
Dimension over which to compute rolling_window.
For nd-rolling, should be list of dimensions.
window : int
Window size of the rolling
For nd-rolling, should be list of integers.
window_dim : str
New name of the window dimension.
For nd-rolling, should be list of integers.
center : bool, default: False
If True, pad fill_value for both ends. Otherwise, pad in the head
of the axis.
fill_value
value to be filled.
Returns
-------
Variable that is a view of the original array with a added dimension of
size w.
The return dim: self.dims + (window_dim, )
The return shape: self.shape + (window, )
Examples
--------
>>> v = Variable(("a", "b"), np.arange(8).reshape((2, 4)))
>>> v.rolling_window("b", 3, "window_dim")
<xarray.Variable (a: 2, b: 4, window_dim: 3)>
array([[[nan, nan, 0.],
[nan, 0., 1.],
[ 0., 1., 2.],
[ 1., 2., 3.]],
<BLANKLINE>
[[nan, nan, 4.],
[nan, 4., 5.],
[ 4., 5., 6.],
[ 5., 6., 7.]]])
>>> v.rolling_window("b", 3, "window_dim", center=True)
<xarray.Variable (a: 2, b: 4, window_dim: 3)>
array([[[nan, 0., 1.],
[ 0., 1., 2.],
[ 1., 2., 3.],
[ 2., 3., nan]],
<BLANKLINE>
[[nan, 4., 5.],
[ 4., 5., 6.],
[ 5., 6., 7.],
[ 6., 7., nan]]])
"""
if fill_value is dtypes.NA: # np.nan is passed
dtype, fill_value = dtypes.maybe_promote(self.dtype)
array = self.astype(dtype, copy=False).data
else:
dtype = self.dtype
array = self.data
if isinstance(dim, list):
assert len(dim) == len(window)
assert len(dim) == len(window_dim)
assert len(dim) == len(center)
else:
dim = [dim]
window = [window]
window_dim = [window_dim]
center = [center]
axis = [self.get_axis_num(d) for d in dim]
new_dims = self.dims + tuple(window_dim)
return Variable(
new_dims,
duck_array_ops.rolling_window(
array, axis=axis, window=window, center=center, fill_value=fill_value
),
)
def coarsen(
self, windows, func, boundary="exact", side="left", keep_attrs=None, **kwargs
):
"""
Apply reduction function.
"""
windows = {k: v for k, v in windows.items() if k in self.dims}
if not windows:
return self.copy()
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
if keep_attrs:
_attrs = self.attrs
else:
_attrs = None
reshaped, axes = self._coarsen_reshape(windows, boundary, side)
if isinstance(func, str):
name = func
func = getattr(duck_array_ops, name, None)
if func is None:
raise NameError(f"{name} is not a valid method.")
return self._replace(data=func(reshaped, axis=axes, **kwargs), attrs=_attrs)
def _coarsen_reshape(self, windows, boundary, side):
"""
Construct a reshaped-array for coarsen
"""
if not utils.is_dict_like(boundary):
boundary = {d: boundary for d in windows.keys()}
if not utils.is_dict_like(side):
side = {d: side for d in windows.keys()}
# remove unrelated dimensions
boundary = {k: v for k, v in boundary.items() if k in windows}
side = {k: v for k, v in side.items() if k in windows}
for d, window in windows.items():
if window <= 0:
raise ValueError(f"window must be > 0. Given {window}")
variable = self
for d, window in windows.items():
# trim or pad the object
size = variable.shape[self._get_axis_num(d)]
n = int(size / window)
if boundary[d] == "exact":
if n * window != size:
raise ValueError(
"Could not coarsen a dimension of size {} with "
"window {}".format(size, window)
)
elif boundary[d] == "trim":
if side[d] == "left":
variable = variable.isel({d: slice(0, window * n)})
else:
excess = size - window * n
variable = variable.isel({d: slice(excess, None)})
elif boundary[d] == "pad": # pad
pad = window * n - size
if pad < 0:
pad += window
if side[d] == "left":
pad_width = {d: (0, pad)}
else:
pad_width = {d: (pad, 0)}
variable = variable.pad(pad_width, mode="constant")
else:
raise TypeError(
"{} is invalid for boundary. Valid option is 'exact', "
"'trim' and 'pad'".format(boundary[d])
)
shape = []
axes = []
axis_count = 0
for i, d in enumerate(variable.dims):
if d in windows:
size = variable.shape[i]
shape.append(int(size / windows[d]))
shape.append(windows[d])
axis_count += 1
axes.append(i + axis_count)
else:
shape.append(variable.shape[i])
return variable.data.reshape(shape), tuple(axes)
def isnull(self, keep_attrs: bool = None):
"""Test each value in the array for whether it is a missing value.
Returns
-------
isnull : Variable
Same type and shape as object, but the dtype of the data is bool.
See Also
--------
pandas.isnull
Examples
--------
>>> var = xr.Variable("x", [1, np.nan, 3])
>>> var
<xarray.Variable (x: 3)>
array([ 1., nan, 3.])
>>> var.isnull()
<xarray.Variable (x: 3)>
array([False, True, False])
"""
from .computation import apply_ufunc
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
return apply_ufunc(
duck_array_ops.isnull,
self,
dask="allowed",
keep_attrs=keep_attrs,
)
def notnull(self, keep_attrs: bool = None):
"""Test each value in the array for whether it is not a missing value.
Returns
-------
notnull : Variable
Same type and shape as object, but the dtype of the data is bool.
See Also
--------
pandas.notnull
Examples
--------
>>> var = xr.Variable("x", [1, np.nan, 3])
>>> var
<xarray.Variable (x: 3)>
array([ 1., nan, 3.])
>>> var.notnull()
<xarray.Variable (x: 3)>
array([ True, False, True])
"""
from .computation import apply_ufunc
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
return apply_ufunc(
duck_array_ops.notnull,
self,
dask="allowed",
keep_attrs=keep_attrs,
)
@property
def real(self):
return type(self)(self.dims, self.data.real, self._attrs)
@property
def imag(self):
return type(self)(self.dims, self.data.imag, self._attrs)
def __array_wrap__(self, obj, context=None):
return Variable(self.dims, obj)
@staticmethod
def _unary_op(f):
@functools.wraps(f)
def func(self, *args, **kwargs):
keep_attrs = kwargs.pop("keep_attrs", None)
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=True)
with np.errstate(all="ignore"):
result = self.__array_wrap__(f(self.data, *args, **kwargs))
if keep_attrs:
result.attrs = self.attrs
return result
return func
@staticmethod
def _binary_op(f, reflexive=False, **ignored_kwargs):
@functools.wraps(f)
def func(self, other):
if isinstance(other, (xr.DataArray, xr.Dataset)):
return NotImplemented
self_data, other_data, dims = _broadcast_compat_data(self, other)
keep_attrs = _get_keep_attrs(default=False)
attrs = self._attrs if keep_attrs else None
with np.errstate(all="ignore"):
new_data = (
f(self_data, other_data)
if not reflexive
else f(other_data, self_data)
)
result = Variable(dims, new_data, attrs=attrs)
return result
return func
@staticmethod
def _inplace_binary_op(f):
@functools.wraps(f)
def func(self, other):
if isinstance(other, xr.Dataset):
raise TypeError("cannot add a Dataset to a Variable in-place")
self_data, other_data, dims = _broadcast_compat_data(self, other)
if dims != self.dims:
raise ValueError("dimensions cannot change for in-place operations")
with np.errstate(all="ignore"):
self.values = f(self_data, other_data)
return self
return func
def _to_numeric(self, offset=None, datetime_unit=None, dtype=float):
"""A (private) method to convert datetime array to numeric dtype
See duck_array_ops.datetime_to_numeric
"""
numeric_array = duck_array_ops.datetime_to_numeric(
self.data, offset, datetime_unit, dtype
)
return type(self)(self.dims, numeric_array, self._attrs)
def _unravel_argminmax(
self,
argminmax: str,
dim: Union[Hashable, Sequence[Hashable], None],
axis: Union[int, None],
keep_attrs: Optional[bool],
skipna: Optional[bool],
) -> Union["Variable", Dict[Hashable, "Variable"]]:
"""Apply argmin or argmax over one or more dimensions, returning the result as a
dict of DataArray that can be passed directly to isel.
"""
if dim is None and axis is None:
warnings.warn(
"Behaviour of argmin/argmax with neither dim nor axis argument will "
"change to return a dict of indices of each dimension. To get a "
"single, flat index, please use np.argmin(da.data) or "
"np.argmax(da.data) instead of da.argmin() or da.argmax().",
DeprecationWarning,
stacklevel=3,
)
argminmax_func = getattr(duck_array_ops, argminmax)
if dim is ...:
# In future, should do this also when (dim is None and axis is None)
dim = self.dims
if (
dim is None
or axis is not None
or not isinstance(dim, Sequence)
or isinstance(dim, str)
):
# Return int index if single dimension is passed, and is not part of a
# sequence
return self.reduce(
argminmax_func, dim=dim, axis=axis, keep_attrs=keep_attrs, skipna=skipna
)
# Get a name for the new dimension that does not conflict with any existing
# dimension
newdimname = "_unravel_argminmax_dim_0"
count = 1
while newdimname in self.dims:
newdimname = f"_unravel_argminmax_dim_{count}"
count += 1
stacked = self.stack({newdimname: dim})
result_dims = stacked.dims[:-1]
reduce_shape = tuple(self.sizes[d] for d in dim)
result_flat_indices = stacked.reduce(argminmax_func, axis=-1, skipna=skipna)
result_unravelled_indices = duck_array_ops.unravel_index(
result_flat_indices.data, reduce_shape
)
result = {
d: Variable(dims=result_dims, data=i)
for d, i in zip(dim, result_unravelled_indices)
}
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
if keep_attrs:
for v in result.values():
v.attrs = self.attrs
return result
def argmin(
self,
dim: Union[Hashable, Sequence[Hashable]] = None,
axis: int = None,
keep_attrs: bool = None,
skipna: bool = None,
) -> Union["Variable", Dict[Hashable, "Variable"]]:
"""Index or indices of the minimum of the Variable over one or more dimensions.
If a sequence is passed to 'dim', then result returned as dict of Variables,
which can be passed directly to isel(). If a single str is passed to 'dim' then
returns a Variable with dtype int.
If there are multiple minima, the indices of the first one found will be
returned.
Parameters
----------
dim : hashable, sequence of hashable or ..., optional
The dimensions over which to find the minimum. By default, finds minimum over
all dimensions - for now returning an int for backward compatibility, but
this is deprecated, in future will return a dict with indices for all
dimensions; to return a dict with all dimensions now, pass '...'.
axis : int, optional
Axis over which to apply `argmin`. Only one of the 'dim' and 'axis' arguments
can be supplied.
keep_attrs : bool, optional
If True, the attributes (`attrs`) will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
skipna : bool, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or skipna=True has not been
implemented (object, datetime64 or timedelta64).
Returns
-------
result : Variable or dict of Variable
See also
--------
DataArray.argmin, DataArray.idxmin
"""
return self._unravel_argminmax("argmin", dim, axis, keep_attrs, skipna)
def argmax(
self,
dim: Union[Hashable, Sequence[Hashable]] = None,
axis: int = None,
keep_attrs: bool = None,
skipna: bool = None,
) -> Union["Variable", Dict[Hashable, "Variable"]]:
"""Index or indices of the maximum of the Variable over one or more dimensions.
If a sequence is passed to 'dim', then result returned as dict of Variables,
which can be passed directly to isel(). If a single str is passed to 'dim' then
returns a Variable with dtype int.
If there are multiple maxima, the indices of the first one found will be
returned.
Parameters
----------
dim : hashable, sequence of hashable or ..., optional
The dimensions over which to find the maximum. By default, finds maximum over
all dimensions - for now returning an int for backward compatibility, but
this is deprecated, in future will return a dict with indices for all
dimensions; to return a dict with all dimensions now, pass '...'.
axis : int, optional
Axis over which to apply `argmin`. Only one of the 'dim' and 'axis' arguments
can be supplied.
keep_attrs : bool, optional
If True, the attributes (`attrs`) will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
skipna : bool, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or skipna=True has not been
implemented (object, datetime64 or timedelta64).
Returns
-------
result : Variable or dict of Variable
See also
--------
DataArray.argmax, DataArray.idxmax
"""
return self._unravel_argminmax("argmax", dim, axis, keep_attrs, skipna)
ops.inject_all_ops_and_reduce_methods(Variable)
class IndexVariable(Variable):
"""Wrapper for accommodating a pandas.Index in an xarray.Variable.
IndexVariable preserve loaded values in the form of a pandas.Index instead
of a NumPy array. Hence, their values are immutable and must always be one-
dimensional.
They also have a name property, which is the name of their sole dimension
unless another name is given.
"""
__slots__ = ()
def __init__(self, dims, data, attrs=None, encoding=None, fastpath=False):
super().__init__(dims, data, attrs, encoding, fastpath)
if self.ndim != 1:
raise ValueError("%s objects must be 1-dimensional" % type(self).__name__)
# Unlike in Variable, always eagerly load values into memory
if not isinstance(self._data, PandasIndexAdapter):
self._data = PandasIndexAdapter(self._data)
def __dask_tokenize__(self):
from dask.base import normalize_token
# Don't waste time converting pd.Index to np.ndarray
return normalize_token((type(self), self._dims, self._data.array, self._attrs))
def load(self):
# data is already loaded into memory for IndexVariable
return self
# https://github.com/python/mypy/issues/1465
@Variable.data.setter # type: ignore
def data(self, data):
raise ValueError(
f"Cannot assign to the .data attribute of dimension coordinate a.k.a IndexVariable {self.name!r}. "
f"Please use DataArray.assign_coords, Dataset.assign_coords or Dataset.assign as appropriate."
)
@Variable.values.setter # type: ignore
def values(self, values):
raise ValueError(
f"Cannot assign to the .values attribute of dimension coordinate a.k.a IndexVariable {self.name!r}. "
f"Please use DataArray.assign_coords, Dataset.assign_coords or Dataset.assign as appropriate."
)
def chunk(self, chunks={}, name=None, lock=False):
# Dummy - do not chunk. This method is invoked e.g. by Dataset.chunk()
return self.copy(deep=False)
def _as_sparse(self, sparse_format=_default, fill_value=_default):
# Dummy
return self.copy(deep=False)
def _to_dense(self):
# Dummy
return self.copy(deep=False)
def _finalize_indexing_result(self, dims, data):
if getattr(data, "ndim", 0) != 1:
# returns Variable rather than IndexVariable if multi-dimensional
return Variable(dims, data, self._attrs, self._encoding)
else:
return type(self)(dims, data, self._attrs, self._encoding, fastpath=True)
def __setitem__(self, key, value):
raise TypeError("%s values cannot be modified" % type(self).__name__)
@classmethod
def concat(cls, variables, dim="concat_dim", positions=None, shortcut=False):
"""Specialized version of Variable.concat for IndexVariable objects.
This exists because we want to avoid converting Index objects to NumPy
arrays, if possible.
"""
if not isinstance(dim, str):
(dim,) = dim.dims
variables = list(variables)
first_var = variables[0]
if any(not isinstance(v, cls) for v in variables):
raise TypeError(
"IndexVariable.concat requires that all input "
"variables be IndexVariable objects"
)
indexes = [v._data.array for v in variables]
if not indexes:
data = []
else:
data = indexes[0].append(indexes[1:])
if positions is not None:
indices = nputils.inverse_permutation(np.concatenate(positions))
data = data.take(indices)
attrs = dict(first_var.attrs)
if not shortcut:
for var in variables:
if var.dims != first_var.dims:
raise ValueError("inconsistent dimensions")
utils.remove_incompatible_items(attrs, var.attrs)
return cls(first_var.dims, data, attrs)
def copy(self, deep=True, data=None):
"""Returns a copy of this object.
`deep` is ignored since data is stored in the form of
pandas.Index, which is already immutable. Dimensions, attributes
and encodings are always copied.
Use `data` to create a new object with the same structure as
original but entirely new data.
Parameters
----------
deep : bool, optional
Deep is ignored when data is given. Whether the data array is
loaded into memory and copied onto the new object. Default is True.
data : array_like, optional
Data to use in the new object. Must have same shape as original.
Returns
-------
object : Variable
New object with dimensions, attributes, encodings, and optionally
data copied from original.
"""
if data is None:
data = self._data.copy(deep=deep)
else:
data = as_compatible_data(data)
if self.shape != data.shape:
raise ValueError(
"Data shape {} must match shape of object {}".format(
data.shape, self.shape
)
)
return type(self)(self.dims, data, self._attrs, self._encoding, fastpath=True)
def equals(self, other, equiv=None):
# if equiv is specified, super up
if equiv is not None:
return super().equals(other, equiv)
# otherwise use the native index equals, rather than looking at _data
other = getattr(other, "variable", other)
try:
return self.dims == other.dims and self._data_equals(other)
except (TypeError, AttributeError):
return False
def _data_equals(self, other):
return self.to_index().equals(other.to_index())
def to_index_variable(self):
"""Return this variable as an xarray.IndexVariable"""
return self
to_coord = utils.alias(to_index_variable, "to_coord")
def to_index(self):
"""Convert this variable to a pandas.Index"""
# n.b. creating a new pandas.Index from an old pandas.Index is
# basically free as pandas.Index objects are immutable
assert self.ndim == 1
index = self._data.array
if isinstance(index, pd.MultiIndex):
# set default names for multi-index unnamed levels so that
# we can safely rename dimension / coordinate later
valid_level_names = [
name or "{}_level_{}".format(self.dims[0], i)
for i, name in enumerate(index.names)
]
index = index.set_names(valid_level_names)
else:
index = index.set_names(self.name)
return index
@property
def level_names(self):
"""Return MultiIndex level names or None if this IndexVariable has no
MultiIndex.
"""
index = self.to_index()
if isinstance(index, pd.MultiIndex):
return index.names
else:
return None
def get_level_variable(self, level):
"""Return a new IndexVariable from a given MultiIndex level."""
if self.level_names is None:
raise ValueError("IndexVariable %r has no MultiIndex" % self.name)
index = self.to_index()
return type(self)(self.dims, index.get_level_values(level))
@property
def name(self):
return self.dims[0]
@name.setter
def name(self, value):
raise AttributeError("cannot modify name of IndexVariable in-place")
# for backwards compatibility
Coordinate = utils.alias(IndexVariable, "Coordinate")
def _unified_dims(variables):
# validate dimensions
all_dims = {}
for var in variables:
var_dims = var.dims
if len(set(var_dims)) < len(var_dims):
raise ValueError(
"broadcasting cannot handle duplicate "
"dimensions: %r" % list(var_dims)
)
for d, s in zip(var_dims, var.shape):
if d not in all_dims:
all_dims[d] = s
elif all_dims[d] != s:
raise ValueError(
"operands cannot be broadcast together "
"with mismatched lengths for dimension %r: %s"
% (d, (all_dims[d], s))
)
return all_dims
def _broadcast_compat_variables(*variables):
"""Create broadcast compatible variables, with the same dimensions.
Unlike the result of broadcast_variables(), some variables may have
dimensions of size 1 instead of the the size of the broadcast dimension.
"""
dims = tuple(_unified_dims(variables))
return tuple(var.set_dims(dims) if var.dims != dims else var for var in variables)
def broadcast_variables(*variables):
"""Given any number of variables, return variables with matching dimensions
and broadcast data.
The data on the returned variables will be a view of the data on the
corresponding original arrays, but dimensions will be reordered and
inserted so that both broadcast arrays have the same dimensions. The new
dimensions are sorted in order of appearance in the first variable's
dimensions followed by the second variable's dimensions.
"""
dims_map = _unified_dims(variables)
dims_tuple = tuple(dims_map)
return tuple(
var.set_dims(dims_map) if var.dims != dims_tuple else var for var in variables
)
def _broadcast_compat_data(self, other):
if all(hasattr(other, attr) for attr in ["dims", "data", "shape", "encoding"]):
# `other` satisfies the necessary Variable API for broadcast_variables
new_self, new_other = _broadcast_compat_variables(self, other)
self_data = new_self.data
other_data = new_other.data
dims = new_self.dims
else:
# rely on numpy broadcasting rules
self_data = self.data
other_data = other
dims = self.dims
return self_data, other_data, dims
def concat(variables, dim="concat_dim", positions=None, shortcut=False):
"""Concatenate variables along a new or existing dimension.
Parameters
----------
variables : iterable of Variable
Arrays to stack together. Each variable is expected to have
matching dimensions and shape except for along the stacked
dimension.
dim : str or DataArray, optional
Name of the dimension to stack along. This can either be a new
dimension name, in which case it is added along axis=0, or an
existing dimension name, in which case the location of the
dimension is unchanged. Where to insert the new dimension is
determined by the first variable.
positions : None or list of array-like, optional
List of integer arrays which specifies the integer positions to which
to assign each dataset along the concatenated dimension. If not
supplied, objects are concatenated in the provided order.
shortcut : bool, optional
This option is used internally to speed-up groupby operations.
If `shortcut` is True, some checks of internal consistency between
arrays to concatenate are skipped.
Returns
-------
stacked : Variable
Concatenated Variable formed by stacking all the supplied variables
along the given dimension.
"""
variables = list(variables)
if all(isinstance(v, IndexVariable) for v in variables):
return IndexVariable.concat(variables, dim, positions, shortcut)
else:
return Variable.concat(variables, dim, positions, shortcut)
def assert_unique_multiindex_level_names(variables):
"""Check for uniqueness of MultiIndex level names in all given
variables.
Not public API. Used for checking consistency of DataArray and Dataset
objects.
"""
level_names = defaultdict(list)
all_level_names = set()
for var_name, var in variables.items():
if isinstance(var._data, PandasIndexAdapter):
idx_level_names = var.to_index_variable().level_names
if idx_level_names is not None:
for n in idx_level_names:
level_names[n].append(f"{n!r} ({var_name})")
if idx_level_names:
all_level_names.update(idx_level_names)
for k, v in level_names.items():
if k in variables:
v.append("(%s)" % k)
duplicate_names = [v for v in level_names.values() if len(v) > 1]
if duplicate_names:
conflict_str = "\n".join(", ".join(v) for v in duplicate_names)
raise ValueError("conflicting MultiIndex level name(s):\n%s" % conflict_str)
# Check confliction between level names and dimensions GH:2299
for k, v in variables.items():
for d in v.dims:
if d in all_level_names:
raise ValueError(
"conflicting level / dimension names. {} "
"already exists as a level name.".format(d)
)
|
[
"numpy.prod",
"numpy.ma.getmaskarray",
"numpy.logical_not",
"numpy.asanyarray",
"copy.deepcopy",
"copy.copy",
"numpy.asarray",
"functools.wraps",
"numpy.concatenate",
"numpy.datetime64",
"warnings.warn",
"dask.array.from_array",
"numpy.isnan",
"numpy.nonzero",
"numpy.timedelta64",
"warnings.filterwarnings",
"numpy.atleast_1d",
"warnings.catch_warnings",
"numpy.errstate",
"itertools.count",
"collections.defaultdict",
"distutils.version.LooseVersion",
"typing.TypeVar"
] |
[((1294, 1335), 'typing.TypeVar', 'TypeVar', (['"""VariableType"""'], {'bound': '"""Variable"""'}), "('VariableType', bound='Variable')\n", (1301, 1335), False, 'from typing import Any, Dict, Hashable, Mapping, Optional, Sequence, Tuple, TypeVar, Union\n'), ((7749, 7765), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (7759, 7765), True, 'import numpy as np\n'), ((36561, 36578), 'itertools.count', 'itertools.count', ([], {}), '()\n', (36576, 36578), False, 'import itertools\n'), ((100638, 100655), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (100649, 100655), False, 'from collections import defaultdict\n'), ((6610, 6641), 'numpy.datetime64', 'np.datetime64', (['data.value', '"""ns"""'], {}), "(data.value, 'ns')\n", (6623, 6641), True, 'import numpy as np\n'), ((6896, 6920), 'numpy.ma.getmaskarray', 'np.ma.getmaskarray', (['data'], {}), '(data)\n', (6914, 6920), True, 'import numpy as np\n'), ((8788, 8804), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (8798, 8804), True, 'import numpy as np\n'), ((25611, 25632), 'numpy.nonzero', 'np.nonzero', (['self.data'], {}), '(self.data)\n', (25621, 25632), True, 'import numpy as np\n'), ((79843, 79861), 'functools.wraps', 'functools.wraps', (['f'], {}), '(f)\n', (79858, 79861), False, 'import functools\n'), ((80387, 80405), 'functools.wraps', 'functools.wraps', (['f'], {}), '(f)\n', (80402, 80405), False, 'import functools\n'), ((81115, 81133), 'functools.wraps', 'functools.wraps', (['f'], {}), '(f)\n', (81130, 81133), False, 'import functools\n'), ((7028, 7057), 'numpy.asarray', 'np.asarray', (['data'], {'dtype': 'dtype'}), '(data, dtype=dtype)\n', (7038, 7057), True, 'import numpy as np\n'), ((7127, 7143), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (7137, 7143), True, 'import numpy as np\n'), ((8882, 8907), 'numpy.datetime64', 'np.datetime64', (['data', '"""ns"""'], {}), "(data, 'ns')\n", (8895, 8907), True, 'import numpy as np\n'), ((35671, 35692), 'copy.copy', 'copy.copy', (['self._dims'], {}), '(self._dims)\n', (35680, 35692), False, 'import copy\n'), ((35741, 35761), 'copy.copy', 'copy.copy', (['self.data'], {}), '(self.data)\n', (35750, 35761), False, 'import copy\n'), ((35812, 35834), 'copy.copy', 'copy.copy', (['self._attrs'], {}), '(self._attrs)\n', (35821, 35834), False, 'import copy\n'), ((35891, 35916), 'copy.copy', 'copy.copy', (['self._encoding'], {}), '(self._encoding)\n', (35900, 35916), False, 'import copy\n'), ((37746, 37890), 'warnings.warn', 'warnings.warn', (['"""None value for \'chunks\' is deprecated. It will raise an error in the future. Use instead \'{}\'"""'], {'category': 'FutureWarning'}), '(\n "None value for \'chunks\' is deprecated. It will raise an error in the future. Use instead \'{}\'"\n , category=FutureWarning)\n', (37759, 37890), False, 'import warnings\n'), ((39504, 39563), 'dask.array.from_array', 'da.from_array', (['data', 'chunks'], {'name': 'name', 'lock': 'lock'}), '(data, chunks, name=name, lock=lock, **kwargs)\n', (39517, 39563), True, 'import dask.array as da\n'), ((56994, 57016), 'numpy.prod', 'np.prod', (['new_dim_sizes'], {}), '(new_dim_sizes)\n', (57001, 57016), True, 'import numpy as np\n'), ((60768, 60793), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (60791, 60793), False, 'import warnings\n'), ((60807, 60893), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""', '"""Mean of empty slice"""'], {'category': 'RuntimeWarning'}), "('ignore', 'Mean of empty slice', category=\n RuntimeWarning)\n", (60830, 60893), False, 'import warnings\n'), ((69068, 69099), 'numpy.asarray', 'np.asarray', (['q'], {'dtype': 'np.float64'}), '(q, dtype=np.float64)\n', (69078, 69099), True, 'import numpy as np\n'), ((82520, 82821), 'warnings.warn', 'warnings.warn', (['"""Behaviour of argmin/argmax with neither dim nor axis argument will change to return a dict of indices of each dimension. To get a single, flat index, please use np.argmin(da.data) or np.argmax(da.data) instead of da.argmin() or da.argmax()."""', 'DeprecationWarning'], {'stacklevel': '(3)'}), "(\n 'Behaviour of argmin/argmax with neither dim nor axis argument will change to return a dict of indices of each dimension. To get a single, flat index, please use np.argmin(da.data) or np.argmax(da.data) instead of da.argmin() or da.argmax().'\n , DeprecationWarning, stacklevel=3)\n", (82533, 82821), False, 'import warnings\n'), ((8964, 8990), 'numpy.timedelta64', 'np.timedelta64', (['data', '"""ns"""'], {}), "(data, 'ns')\n", (8978, 8990), True, 'import numpy as np\n'), ((16262, 16284), 'numpy.asarray', 'np.asarray', (['self._data'], {}), '(self._data)\n', (16272, 16284), True, 'import numpy as np\n'), ((25034, 25047), 'numpy.asarray', 'np.asarray', (['k'], {}), '(k)\n', (25044, 25047), True, 'import numpy as np\n'), ((30583, 30603), 'numpy.logical_not', 'np.logical_not', (['mask'], {}), '(mask)\n', (30597, 30603), True, 'import numpy as np\n'), ((35009, 35028), 'copy.deepcopy', 'copy.deepcopy', (['data'], {}), '(data)\n', (35022, 35028), False, 'import copy\n'), ((80070, 80095), 'numpy.errstate', 'np.errstate', ([], {'all': '"""ignore"""'}), "(all='ignore')\n", (80081, 80095), True, 'import numpy as np\n'), ((80744, 80769), 'numpy.errstate', 'np.errstate', ([], {'all': '"""ignore"""'}), "(all='ignore')\n", (80755, 80769), True, 'import numpy as np\n'), ((81504, 81529), 'numpy.errstate', 'np.errstate', ([], {'all': '"""ignore"""'}), "(all='ignore')\n", (81515, 81529), True, 'import numpy as np\n'), ((23355, 23368), 'numpy.asarray', 'np.asarray', (['k'], {}), '(k)\n', (23365, 23368), True, 'import numpy as np\n'), ((39039, 39069), 'distutils.version.LooseVersion', 'LooseVersion', (['dask.__version__'], {}), '(dask.__version__)\n', (39051, 39069), False, 'from distutils.version import LooseVersion\n'), ((61260, 61279), 'numpy.atleast_1d', 'np.atleast_1d', (['axis'], {}), '(axis)\n', (61273, 61279), True, 'import numpy as np\n'), ((64333, 64358), 'numpy.concatenate', 'np.concatenate', (['positions'], {}), '(positions)\n', (64347, 64358), True, 'import numpy as np\n'), ((71530, 71544), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (71538, 71544), True, 'import numpy as np\n'), ((92294, 92319), 'numpy.concatenate', 'np.concatenate', (['positions'], {}), '(positions)\n', (92308, 92319), True, 'import numpy as np\n'), ((25262, 25275), 'numpy.nonzero', 'np.nonzero', (['k'], {}), '(k)\n', (25272, 25275), True, 'import numpy as np\n'), ((61715, 61734), 'numpy.asanyarray', 'np.asanyarray', (['data'], {}), '(data)\n', (61728, 61734), True, 'import numpy as np\n')]
|
'''
<NAME>
set up :2020-1-9
intergrate img and label into one file
-- fiducial1024_v1
'''
import argparse
import sys, os
import pickle
import random
import collections
import json
import numpy as np
import scipy.io as io
import scipy.misc as m
import matplotlib.pyplot as plt
import glob
import math
import time
import threading
import multiprocessing as mp
from multiprocessing import Pool
import re
import cv2
# sys.path.append('/lustre/home/gwxie/hope/project/dewarp/datasets/') # /lustre/home/gwxie/program/project/unwarp/perturbed_imgaes/GAN
import utils
def getDatasets(dir):
return os.listdir(dir)
class perturbed(utils.BasePerturbed):
def __init__(self, path, bg_path, save_path, save_suffix):
self.path = path
self.bg_path = bg_path
self.save_path = save_path
self.save_suffix = save_suffix
def save_img(self, m, n, fold_curve='fold', repeat_time=4, fiducial_points = 16, relativeShift_position='relativeShift_v2'):
origin_img = cv2.imread(self.path, flags=cv2.IMREAD_COLOR)
save_img_shape = [512*2, 480*2] # 320
# reduce_value = np.random.choice([2**4, 2**5, 2**6, 2**7, 2**8], p=[0.01, 0.1, 0.4, 0.39, 0.1])
reduce_value = np.random.choice([2*2, 4*2, 8*2, 16*2, 24*2, 32*2, 40*2, 48*2], p=[0.02, 0.18, 0.2, 0.3, 0.1, 0.1, 0.08, 0.02])
# reduce_value = np.random.choice([8*2, 16*2, 24*2, 32*2, 40*2, 48*2], p=[0.01, 0.02, 0.2, 0.4, 0.19, 0.18])
# reduce_value = np.random.choice([16, 24, 32, 40, 48, 64], p=[0.01, 0.1, 0.2, 0.4, 0.2, 0.09])
base_img_shrink = save_img_shape[0] - reduce_value
# enlarge_img_shrink = [1024, 768]
# enlarge_img_shrink = [896, 672] # 420
enlarge_img_shrink = [512*4, 480*4] # 420
# enlarge_img_shrink = [896*2, 768*2] # 420
# enlarge_img_shrink = [896, 768] # 420
# enlarge_img_shrink = [768, 576] # 420
# enlarge_img_shrink = [640, 480] # 420
''''''
im_lr = origin_img.shape[0]
im_ud = origin_img.shape[1]
reduce_value_v2 = np.random.choice([2*2, 4*2, 8*2, 16*2, 24*2, 28*2, 32*2, 48*2], p=[0.02, 0.18, 0.2, 0.2, 0.1, 0.1, 0.1, 0.1])
# reduce_value_v2 = np.random.choice([16, 24, 28, 32, 48, 64], p=[0.01, 0.1, 0.2, 0.3, 0.25, 0.14])
if im_lr > im_ud:
im_ud = min(int(im_ud / im_lr * base_img_shrink), save_img_shape[1] - reduce_value_v2)
im_lr = save_img_shape[0] - reduce_value
else:
base_img_shrink = save_img_shape[1] - reduce_value
im_lr = min(int(im_lr / im_ud * base_img_shrink), save_img_shape[0] - reduce_value_v2)
im_ud = base_img_shrink
if round(im_lr / im_ud, 2) < 0.5 or round(im_ud / im_lr, 2) < 0.5:
repeat_time = min(repeat_time, 8)
edge_padding = 3
im_lr -= im_lr % (fiducial_points-1) - (2*edge_padding) # im_lr % (fiducial_points-1) - 1
im_ud -= im_ud % (fiducial_points-1) - (2*edge_padding) # im_ud % (fiducial_points-1) - 1
im_hight = np.linspace(edge_padding, im_lr - edge_padding, fiducial_points, dtype=np.int64)
im_wide = np.linspace(edge_padding, im_ud - edge_padding, fiducial_points, dtype=np.int64)
# im_lr -= im_lr % (fiducial_points-1) - (1+2*edge_padding) # im_lr % (fiducial_points-1) - 1
# im_ud -= im_ud % (fiducial_points-1) - (1+2*edge_padding) # im_ud % (fiducial_points-1) - 1
# im_hight = np.linspace(edge_padding, im_lr - (1+edge_padding), fiducial_points, dtype=np.int64)
# im_wide = np.linspace(edge_padding, im_ud - (1+edge_padding), fiducial_points, dtype=np.int64)
im_x, im_y = np.meshgrid(im_hight, im_wide)
segment_x = (im_lr) // (fiducial_points-1)
segment_y = (im_ud) // (fiducial_points-1)
# plt.plot(im_x, im_y,
# color='limegreen',
# marker='.',
# linestyle='')
# plt.grid(True)
# plt.show()
self.origin_img = cv2.resize(origin_img, (im_ud, im_lr), interpolation=cv2.INTER_CUBIC)
perturbed_bg_ = getDatasets(self.bg_path)
perturbed_bg_img_ = self.bg_path+random.choice(perturbed_bg_)
perturbed_bg_img = cv2.imread(perturbed_bg_img_, flags=cv2.IMREAD_COLOR)
mesh_shape = self.origin_img.shape[:2]
self.synthesis_perturbed_img = np.full((enlarge_img_shrink[0], enlarge_img_shrink[1], 3), 256, dtype=np.float32)#np.zeros_like(perturbed_bg_img)
# self.synthesis_perturbed_img = np.full((enlarge_img_shrink[0], enlarge_img_shrink[1], 3), 0, dtype=np.int16)#np.zeros_like(perturbed_bg_img)
self.new_shape = self.synthesis_perturbed_img.shape[:2]
perturbed_bg_img = cv2.resize(perturbed_bg_img, (save_img_shape[1], save_img_shape[0]), cv2.INPAINT_TELEA)
origin_pixel_position = np.argwhere(np.zeros(mesh_shape, dtype=np.uint32) == 0).reshape(mesh_shape[0], mesh_shape[1], 2)
pixel_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape(self.new_shape[0], self.new_shape[1], 2)
self.perturbed_xy_ = np.zeros((self.new_shape[0], self.new_shape[1], 2))
# self.perturbed_xy_ = pixel_position.copy().astype(np.float32)
# fiducial_points_grid = origin_pixel_position[im_x, im_y]
self.synthesis_perturbed_label = np.zeros((self.new_shape[0], self.new_shape[1], 2))
x_min, y_min, x_max, y_max = self.adjust_position_v2(0, 0, mesh_shape[0], mesh_shape[1], save_img_shape)
origin_pixel_position += [x_min, y_min]
x_min, y_min, x_max, y_max = self.adjust_position(0, 0, mesh_shape[0], mesh_shape[1])
x_shift = random.randint(-enlarge_img_shrink[0]//16, enlarge_img_shrink[0]//16)
y_shift = random.randint(-enlarge_img_shrink[1]//16, enlarge_img_shrink[1]//16)
x_min += x_shift
x_max += x_shift
y_min += y_shift
y_max += y_shift
'''im_x,y'''
im_x += x_min
im_y += y_min
self.synthesis_perturbed_img[x_min:x_max, y_min:y_max] = self.origin_img
self.synthesis_perturbed_label[x_min:x_max, y_min:y_max] = origin_pixel_position
synthesis_perturbed_img_map = self.synthesis_perturbed_img.copy()
synthesis_perturbed_label_map = self.synthesis_perturbed_label.copy()
foreORbackground_label = np.full((mesh_shape), 1, dtype=np.int16)
foreORbackground_label_map = np.full((self.new_shape), 0, dtype=np.int16)
foreORbackground_label_map[x_min:x_max, y_min:y_max] = foreORbackground_label
# synthesis_perturbed_img_map = self.pad(self.synthesis_perturbed_img.copy(), x_min, y_min, x_max, y_max)
# synthesis_perturbed_label_map = self.pad(synthesis_perturbed_label_map, x_min, y_min, x_max, y_max)
'''*****************************************************************'''
is_normalizationFun_mixture = self.is_perform(0.2, 0.8)
# if not is_normalizationFun_mixture:
normalizationFun_0_1 = False
# normalizationFun_0_1 = self.is_perform(0.5, 0.5)
if fold_curve == 'fold':
fold_curve_random = True
# is_normalizationFun_mixture = False
normalizationFun_0_1 = self.is_perform(0.2, 0.8)
if is_normalizationFun_mixture:
alpha_perturbed = random.randint(80, 120) / 100
else:
if normalizationFun_0_1 and repeat_time < 8:
alpha_perturbed = random.randint(50, 70) / 100
else:
alpha_perturbed = random.randint(70, 130) / 100
else:
fold_curve_random = self.is_perform(0.1, 0.9) # False # self.is_perform(0.01, 0.99)
alpha_perturbed = random.randint(80, 160) / 100
# is_normalizationFun_mixture = False # self.is_perform(0.01, 0.99)
synthesis_perturbed_img = np.full_like(self.synthesis_perturbed_img, 256)
# synthesis_perturbed_img = np.full_like(self.synthesis_perturbed_img, 0, dtype=np.int16)
synthesis_perturbed_label = np.zeros_like(self.synthesis_perturbed_label)
alpha_perturbed_change = self.is_perform(0.5, 0.5)
p_pp_choice = self.is_perform(0.8, 0.2) if fold_curve == 'fold' else self.is_perform(0.1, 0.9)
for repeat_i in range(repeat_time):
if alpha_perturbed_change:
if fold_curve == 'fold':
if is_normalizationFun_mixture:
alpha_perturbed = random.randint(80, 120) / 100
else:
if normalizationFun_0_1 and repeat_time < 8:
alpha_perturbed = random.randint(50, 70) / 100
else:
alpha_perturbed = random.randint(70, 130) / 100
else:
alpha_perturbed = random.randint(80, 160) / 100
''''''
linspace_x = [0, (self.new_shape[0] - im_lr) // 2 - 1,
self.new_shape[0] - (self.new_shape[0] - im_lr) // 2 - 1, self.new_shape[0] - 1]
linspace_y = [0, (self.new_shape[1] - im_ud) // 2 - 1,
self.new_shape[1] - (self.new_shape[1] - im_ud) // 2 - 1, self.new_shape[1] - 1]
linspace_x_seq = [1, 2, 3]
linspace_y_seq = [1, 2, 3]
r_x = random.choice(linspace_x_seq)
r_y = random.choice(linspace_y_seq)
perturbed_p = np.array(
[random.randint(linspace_x[r_x-1] * 10, linspace_x[r_x] * 10),
random.randint(linspace_y[r_y-1] * 10, linspace_y[r_y] * 10)])/10
if ((r_x == 1 or r_x == 3) and (r_y == 1 or r_y == 3)) and p_pp_choice:
linspace_x_seq.remove(r_x)
linspace_y_seq.remove(r_y)
r_x = random.choice(linspace_x_seq)
r_y = random.choice(linspace_y_seq)
perturbed_pp = np.array(
[random.randint(linspace_x[r_x-1] * 10, linspace_x[r_x] * 10),
random.randint(linspace_y[r_y-1] * 10, linspace_y[r_y] * 10)])/10
# perturbed_p, perturbed_pp = np.array(
# [random.randint(0, self.new_shape[0] * 10) / 10,
# random.randint(0, self.new_shape[1] * 10) / 10]) \
# , np.array([random.randint(0, self.new_shape[0] * 10) / 10,
# random.randint(0, self.new_shape[1] * 10) / 10])
# perturbed_p, perturbed_pp = np.array(
# [random.randint((self.new_shape[0]-im_lr)//2*10, (self.new_shape[0]-(self.new_shape[0]-im_lr)//2) * 10) / 10,
# random.randint((self.new_shape[1]-im_ud)//2*10, (self.new_shape[1]-(self.new_shape[1]-im_ud)//2) * 10) / 10]) \
# , np.array([random.randint((self.new_shape[0]-im_lr)//2*10, (self.new_shape[0]-(self.new_shape[0]-im_lr)//2) * 10) / 10,
# random.randint((self.new_shape[1]-im_ud)//2*10, (self.new_shape[1]-(self.new_shape[1]-im_ud)//2) * 10) / 10])
''''''
perturbed_vp = perturbed_pp - perturbed_p
perturbed_vp_norm = np.linalg.norm(perturbed_vp)
perturbed_distance_vertex_and_line = np.dot((perturbed_p - pixel_position), perturbed_vp) / perturbed_vp_norm
''''''
# perturbed_v = np.array([random.randint(-3000, 3000) / 100, random.randint(-3000, 3000) / 100])
# perturbed_v = np.array([random.randint(-4000, 4000) / 100, random.randint(-4000, 4000) / 100])
if fold_curve == 'fold' and self.is_perform(0.6, 0.4): # self.is_perform(0.3, 0.7):
# perturbed_v = np.array([random.randint(-9000, 9000) / 100, random.randint(-9000, 9000) / 100])
perturbed_v = np.array([random.randint(-10000, 10000) / 100, random.randint(-10000, 10000) / 100])
# perturbed_v = np.array([random.randint(-11000, 11000) / 100, random.randint(-11000, 11000) / 100])
else:
# perturbed_v = np.array([random.randint(-9000, 9000) / 100, random.randint(-9000, 9000) / 100])
# perturbed_v = np.array([random.randint(-16000, 16000) / 100, random.randint(-16000, 16000) / 100])
perturbed_v = np.array([random.randint(-8000, 8000) / 100, random.randint(-8000, 8000) / 100])
# perturbed_v = np.array([random.randint(-3500, 3500) / 100, random.randint(-3500, 3500) / 100])
# perturbed_v = np.array([random.randint(-600, 600) / 10, random.randint(-600, 600) / 10])
''''''
if fold_curve == 'fold':
if is_normalizationFun_mixture:
if self.is_perform(0.5, 0.5):
perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line))
else:
perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), random.randint(1, 2))
else:
if normalizationFun_0_1:
perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), 2)
else:
perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line))
else:
if is_normalizationFun_mixture:
if self.is_perform(0.5, 0.5):
perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line))
else:
perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), random.randint(1, 2))
else:
if normalizationFun_0_1:
perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), 2)
else:
perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line))
''''''
if fold_curve_random:
# omega_perturbed = (alpha_perturbed+0.2) / (perturbed_d + alpha_perturbed)
# omega_perturbed = alpha_perturbed**perturbed_d
omega_perturbed = alpha_perturbed / (perturbed_d + alpha_perturbed)
else:
omega_perturbed = 1 - perturbed_d ** alpha_perturbed
'''shadow'''
if self.is_perform(0.6, 0.4):
synthesis_perturbed_img_map[x_min:x_max, y_min:y_max] = np.minimum(np.maximum(synthesis_perturbed_img_map[x_min:x_max, y_min:y_max] - np.int16(np.round(omega_perturbed[x_min:x_max, y_min:y_max].repeat(3).reshape(x_max-x_min, y_max-y_min, 3) * abs(np.linalg.norm(perturbed_v//2))*np.array([0.4-random.random()*0.1, 0.4-random.random()*0.1, 0.4-random.random()*0.1]))), 0), 255)
''''''
if relativeShift_position in ['position', 'relativeShift_v2']:
self.perturbed_xy_ += np.array([omega_perturbed * perturbed_v[0], omega_perturbed * perturbed_v[1]]).transpose(1, 2, 0)
else:
print('relativeShift_position error')
exit()
'''
flat_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape(
self.new_shape[0] * self.new_shape[1], 2)
vtx, wts = self.interp_weights(self.perturbed_xy_.reshape(self.new_shape[0] * self.new_shape[1], 2), flat_position)
wts_sum = np.abs(wts).sum(-1)
# flat_img.reshape(flat_shape[0] * flat_shape[1], 3)[:] = interpolate(pixel, vtx, wts)
wts = wts[wts_sum <= 1, :]
vtx = vtx[wts_sum <= 1, :]
synthesis_perturbed_img.reshape(self.new_shape[0] * self.new_shape[1], 3)[wts_sum <= 1,
:] = self.interpolate(synthesis_perturbed_img_map.reshape(self.new_shape[0] * self.new_shape[1], 3), vtx, wts)
synthesis_perturbed_label.reshape(self.new_shape[0] * self.new_shape[1], 2)[wts_sum <= 1,
:] = self.interpolate(synthesis_perturbed_label_map.reshape(self.new_shape[0] * self.new_shape[1], 2), vtx, wts)
foreORbackground_label = np.zeros(self.new_shape)
foreORbackground_label.reshape(self.new_shape[0] * self.new_shape[1], 1)[wts_sum <= 1, :] = self.interpolate(foreORbackground_label_map.reshape(self.new_shape[0] * self.new_shape[1], 1), vtx, wts)
foreORbackground_label[foreORbackground_label < 0.99] = 0
foreORbackground_label[foreORbackground_label >= 0.99] = 1
# synthesis_perturbed_img = np.around(synthesis_perturbed_img).astype(np.uint8)
synthesis_perturbed_label[:, :, 0] *= foreORbackground_label
synthesis_perturbed_label[:, :, 1] *= foreORbackground_label
synthesis_perturbed_img[:, :, 0] *= foreORbackground_label
synthesis_perturbed_img[:, :, 1] *= foreORbackground_label
synthesis_perturbed_img[:, :, 2] *= foreORbackground_label
self.synthesis_perturbed_img = synthesis_perturbed_img
self.synthesis_perturbed_label = synthesis_perturbed_label
'''
'''perspective'''
perspective_shreshold = random.randint(26, 36)*10 # 280
x_min_per, y_min_per, x_max_per, y_max_per = self.adjust_position(perspective_shreshold, perspective_shreshold, self.new_shape[0]-perspective_shreshold, self.new_shape[1]-perspective_shreshold)
pts1 = np.float32([[x_min_per, y_min_per], [x_max_per, y_min_per], [x_min_per, y_max_per], [x_max_per, y_max_per]])
e_1_ = x_max_per - x_min_per
e_2_ = y_max_per - y_min_per
e_3_ = e_2_
e_4_ = e_1_
perspective_shreshold_h = e_1_*0.02
perspective_shreshold_w = e_2_*0.02
a_min_, a_max_ = 70, 110
# if self.is_perform(1, 0):
if fold_curve == 'curve' and self.is_perform(0.5, 0.5):
if self.is_perform(0.5, 0.5):
while True:
pts2 = np.around(
np.float32([[x_min_per - (random.random()) * perspective_shreshold, y_min_per + (random.random()) * perspective_shreshold],
[x_max_per - (random.random()) * perspective_shreshold, y_min_per - (random.random()) * perspective_shreshold],
[x_min_per + (random.random()) * perspective_shreshold, y_max_per + (random.random()) * perspective_shreshold],
[x_max_per + (random.random()) * perspective_shreshold, y_max_per - (random.random()) * perspective_shreshold]])) # right
e_1 = np.linalg.norm(pts2[0]-pts2[1])
e_2 = np.linalg.norm(pts2[0]-pts2[2])
e_3 = np.linalg.norm(pts2[1]-pts2[3])
e_4 = np.linalg.norm(pts2[2]-pts2[3])
if e_1_+perspective_shreshold_h > e_1 and e_2_+perspective_shreshold_w > e_2 and e_3_+perspective_shreshold_w > e_3 and e_4_+perspective_shreshold_h > e_4 and \
e_1_ - perspective_shreshold_h < e_1 and e_2_ - perspective_shreshold_w < e_2 and e_3_ - perspective_shreshold_w < e_3 and e_4_ - perspective_shreshold_h < e_4 and \
abs(e_1-e_4) < perspective_shreshold_h and abs(e_2-e_3) < perspective_shreshold_w:
a0_, a1_, a2_, a3_ = self.get_angle_4(pts2)
if (a0_ > a_min_ and a0_ < a_max_) or (a1_ > a_min_ and a1_ < a_max_) or (a2_ > a_min_ and a2_ < a_max_) or (a3_ > a_min_ and a3_ < a_max_):
break
else:
while True:
pts2 = np.around(
np.float32([[x_min_per + (random.random()) * perspective_shreshold, y_min_per - (random.random()) * perspective_shreshold],
[x_max_per + (random.random()) * perspective_shreshold, y_min_per + (random.random()) * perspective_shreshold],
[x_min_per - (random.random()) * perspective_shreshold, y_max_per - (random.random()) * perspective_shreshold],
[x_max_per - (random.random()) * perspective_shreshold, y_max_per + (random.random()) * perspective_shreshold]]))
e_1 = np.linalg.norm(pts2[0]-pts2[1])
e_2 = np.linalg.norm(pts2[0]-pts2[2])
e_3 = np.linalg.norm(pts2[1]-pts2[3])
e_4 = np.linalg.norm(pts2[2]-pts2[3])
if e_1_+perspective_shreshold_h > e_1 and e_2_+perspective_shreshold_w > e_2 and e_3_+perspective_shreshold_w > e_3 and e_4_+perspective_shreshold_h > e_4 and \
e_1_ - perspective_shreshold_h < e_1 and e_2_ - perspective_shreshold_w < e_2 and e_3_ - perspective_shreshold_w < e_3 and e_4_ - perspective_shreshold_h < e_4 and \
abs(e_1-e_4) < perspective_shreshold_h and abs(e_2-e_3) < perspective_shreshold_w:
a0_, a1_, a2_, a3_ = self.get_angle_4(pts2)
if (a0_ > a_min_ and a0_ < a_max_) or (a1_ > a_min_ and a1_ < a_max_) or (a2_ > a_min_ and a2_ < a_max_) or (a3_ > a_min_ and a3_ < a_max_):
break
else:
while True:
pts2 = np.around(np.float32([[x_min_per+(random.random()-0.5)*perspective_shreshold, y_min_per+(random.random()-0.5)*perspective_shreshold],
[x_max_per+(random.random()-0.5)*perspective_shreshold, y_min_per+(random.random()-0.5)*perspective_shreshold],
[x_min_per+(random.random()-0.5)*perspective_shreshold, y_max_per+(random.random()-0.5)*perspective_shreshold],
[x_max_per+(random.random()-0.5)*perspective_shreshold, y_max_per+(random.random()-0.5)*perspective_shreshold]]))
e_1 = np.linalg.norm(pts2[0]-pts2[1])
e_2 = np.linalg.norm(pts2[0]-pts2[2])
e_3 = np.linalg.norm(pts2[1]-pts2[3])
e_4 = np.linalg.norm(pts2[2]-pts2[3])
if e_1_+perspective_shreshold_h > e_1 and e_2_+perspective_shreshold_w > e_2 and e_3_+perspective_shreshold_w > e_3 and e_4_+perspective_shreshold_h > e_4 and \
e_1_ - perspective_shreshold_h < e_1 and e_2_ - perspective_shreshold_w < e_2 and e_3_ - perspective_shreshold_w < e_3 and e_4_ - perspective_shreshold_h < e_4 and \
abs(e_1-e_4) < perspective_shreshold_h and abs(e_2-e_3) < perspective_shreshold_w:
a0_, a1_, a2_, a3_ = self.get_angle_4(pts2)
if (a0_ > a_min_ and a0_ < a_max_) or (a1_ > a_min_ and a1_ < a_max_) or (a2_ > a_min_ and a2_ < a_max_) or (a3_ > a_min_ and a3_ < a_max_):
break
M = cv2.getPerspectiveTransform(pts1, pts2)
one = np.ones((self.new_shape[0], self.new_shape[1], 1), dtype=np.int16)
matr = np.dstack((pixel_position, one))
new = np.dot(M, matr.reshape(-1, 3).T).T.reshape(self.new_shape[0], self.new_shape[1], 3)
x = new[:, :, 0]/new[:, :, 2]
y = new[:, :, 1]/new[:, :, 2]
perturbed_xy_ = np.dstack((x, y))
# perturbed_xy_round_int = np.around(cv2.bilateralFilter(perturbed_xy_round_int, 9, 75, 75))
# perturbed_xy_round_int = np.around(cv2.blur(perturbed_xy_, (17, 17)))
# perturbed_xy_round_int = cv2.blur(perturbed_xy_round_int, (17, 17))
# perturbed_xy_round_int = cv2.GaussianBlur(perturbed_xy_round_int, (7, 7), 0)
perturbed_xy_ = perturbed_xy_-np.min(perturbed_xy_.T.reshape(2, -1), 1)
# perturbed_xy_round_int = np.around(perturbed_xy_round_int-np.min(perturbed_xy_round_int.T.reshape(2, -1), 1)).astype(np.int16)
self.perturbed_xy_ += perturbed_xy_
'''perspective end'''
'''to img'''
flat_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape(
self.new_shape[0] * self.new_shape[1], 2)
# self.perturbed_xy_ = cv2.blur(self.perturbed_xy_, (7, 7))
self.perturbed_xy_ = cv2.GaussianBlur(self.perturbed_xy_, (7, 7), 0)
'''get fiducial points'''
fiducial_points_coordinate = self.perturbed_xy_[im_x, im_y]
vtx, wts = self.interp_weights(self.perturbed_xy_.reshape(self.new_shape[0] * self.new_shape[1], 2), flat_position)
wts_sum = np.abs(wts).sum(-1)
# flat_img.reshape(flat_shape[0] * flat_shape[1], 3)[:] = interpolate(pixel, vtx, wts)
wts = wts[wts_sum <= 1, :]
vtx = vtx[wts_sum <= 1, :]
synthesis_perturbed_img.reshape(self.new_shape[0] * self.new_shape[1], 3)[wts_sum <= 1,
:] = self.interpolate(synthesis_perturbed_img_map.reshape(self.new_shape[0] * self.new_shape[1], 3), vtx, wts)
synthesis_perturbed_label.reshape(self.new_shape[0] * self.new_shape[1], 2)[wts_sum <= 1,
:] = self.interpolate(synthesis_perturbed_label_map.reshape(self.new_shape[0] * self.new_shape[1], 2), vtx, wts)
foreORbackground_label = np.zeros(self.new_shape)
foreORbackground_label.reshape(self.new_shape[0] * self.new_shape[1], 1)[wts_sum <= 1, :] = self.interpolate(foreORbackground_label_map.reshape(self.new_shape[0] * self.new_shape[1], 1), vtx, wts)
foreORbackground_label[foreORbackground_label < 0.99] = 0
foreORbackground_label[foreORbackground_label >= 0.99] = 1
self.synthesis_perturbed_img = synthesis_perturbed_img
self.synthesis_perturbed_label = synthesis_perturbed_label
self.foreORbackground_label = foreORbackground_label
'''draw fiducial points
stepSize = 0
fiducial_points_synthesis_perturbed_img = self.synthesis_perturbed_img.copy()
for l in fiducial_points_coordinate.astype(np.int64).reshape(-1,2):
cv2.circle(fiducial_points_synthesis_perturbed_img, (l[1] + math.ceil(stepSize / 2), l[0] + math.ceil(stepSize / 2)), 5, (0, 0, 255), -1)
cv2.imwrite('/lustre/home/gwxie/program/project/unwarp/unwarp_perturbed/TPS/img/cv_TPS_large.jpg', fiducial_points_synthesis_perturbed_img)
'''
'''clip'''
perturbed_x_min, perturbed_y_min, perturbed_x_max, perturbed_y_max = -1, -1, self.new_shape[0], self.new_shape[1]
for x in range(self.new_shape[0] // 2, perturbed_x_max):
if np.sum(self.synthesis_perturbed_img[x, :]) == 768 * self.new_shape[1] and perturbed_x_max - 1 > x:
perturbed_x_max = x
break
for x in range(self.new_shape[0] // 2, perturbed_x_min, -1):
if np.sum(self.synthesis_perturbed_img[x, :]) == 768 * self.new_shape[1] and x > 0:
perturbed_x_min = x
break
for y in range(self.new_shape[1] // 2, perturbed_y_max):
if np.sum(self.synthesis_perturbed_img[:, y]) == 768 * self.new_shape[0] and perturbed_y_max - 1 > y:
perturbed_y_max = y
break
for y in range(self.new_shape[1] // 2, perturbed_y_min, -1):
if np.sum(self.synthesis_perturbed_img[:, y]) == 768 * self.new_shape[0] and y > 0:
perturbed_y_min = y
break
if perturbed_x_min == 0 or perturbed_x_max == self.new_shape[0] or perturbed_y_min == self.new_shape[1] or perturbed_y_max == self.new_shape[1]:
raise Exception('clip error')
if perturbed_x_max - perturbed_x_min < im_lr//2 or perturbed_y_max - perturbed_y_min < im_ud//2:
raise Exception('clip error')
perfix_ = self.save_suffix+'_'+str(m)+'_'+str(n)
is_shrink = False
if perturbed_x_max - perturbed_x_min > save_img_shape[0] or perturbed_y_max - perturbed_y_min > save_img_shape[1]:
is_shrink = True
synthesis_perturbed_img = cv2.resize(self.synthesis_perturbed_img[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max, :].copy(), (im_ud, im_lr), interpolation=cv2.INTER_LINEAR)
synthesis_perturbed_label = cv2.resize(self.synthesis_perturbed_label[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max, :].copy(), (im_ud, im_lr), interpolation=cv2.INTER_LINEAR)
foreORbackground_label = cv2.resize(self.foreORbackground_label[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max].copy(), (im_ud, im_lr), interpolation=cv2.INTER_LINEAR)
foreORbackground_label[foreORbackground_label < 0.99] = 0
foreORbackground_label[foreORbackground_label >= 0.99] = 1
'''shrink fiducial points'''
center_x_l, center_y_l = perturbed_x_min + (perturbed_x_max - perturbed_x_min) // 2, perturbed_y_min + (perturbed_y_max - perturbed_y_min) // 2
fiducial_points_coordinate_copy = fiducial_points_coordinate.copy()
shrink_x = im_lr/(perturbed_x_max - perturbed_x_min)
shrink_y = im_ud/(perturbed_y_max - perturbed_y_min)
fiducial_points_coordinate *= [shrink_x, shrink_y]
center_x_l *= shrink_x
center_y_l *= shrink_y
# fiducial_points_coordinate[1:, 1:] *= [shrink_x, shrink_y]
# fiducial_points_coordinate[1:, :1, 0] *= shrink_x
# fiducial_points_coordinate[:1, 1:, 1] *= shrink_y
# perturbed_x_min_copy, perturbed_y_min_copy, perturbed_x_max_copy, perturbed_y_max_copy = perturbed_x_min, perturbed_y_min, perturbed_x_max, perturbed_y_max
perturbed_x_min, perturbed_y_min, perturbed_x_max, perturbed_y_max = self.adjust_position_v2(0, 0, im_lr, im_ud, self.new_shape)
self.synthesis_perturbed_img = np.full_like(self.synthesis_perturbed_img, 256)
self.synthesis_perturbed_label = np.zeros_like(self.synthesis_perturbed_label)
self.foreORbackground_label = np.zeros_like(self.foreORbackground_label)
self.synthesis_perturbed_img[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max, :] = synthesis_perturbed_img
self.synthesis_perturbed_label[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max, :] = synthesis_perturbed_label
self.foreORbackground_label[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max] = foreORbackground_label
center_x, center_y = perturbed_x_min + (perturbed_x_max - perturbed_x_min) // 2, perturbed_y_min + (perturbed_y_max - perturbed_y_min) // 2
if is_shrink:
fiducial_points_coordinate += [center_x-center_x_l, center_y-center_y_l]
'''draw fiducial points
stepSize = 0
fiducial_points_synthesis_perturbed_img = self.synthesis_perturbed_img.copy()
for l in fiducial_points_coordinate.astype(np.int64).reshape(-1, 2):
cv2.circle(fiducial_points_synthesis_perturbed_img,
(l[1] + math.ceil(stepSize / 2), l[0] + math.ceil(stepSize / 2)), 5, (0, 0, 255), -1)
cv2.imwrite('/lustre/home/gwxie/program/project/unwarp/unwarp_perturbed/TPS/img/cv_TPS_small.jpg',fiducial_points_synthesis_perturbed_img)
'''
self.new_shape = save_img_shape
self.synthesis_perturbed_img = self.synthesis_perturbed_img[
center_x - self.new_shape[0] // 2:center_x + self.new_shape[0] // 2,
center_y - self.new_shape[1] // 2:center_y + self.new_shape[1] // 2,
:].copy()
self.synthesis_perturbed_label = self.synthesis_perturbed_label[
center_x - self.new_shape[0] // 2:center_x + self.new_shape[0] // 2,
center_y - self.new_shape[1] // 2:center_y + self.new_shape[1] // 2,
:].copy()
self.foreORbackground_label = self.foreORbackground_label[
center_x - self.new_shape[0] // 2:center_x + self.new_shape[0] // 2,
center_y - self.new_shape[1] // 2:center_y + self.new_shape[1] // 2].copy()
perturbed_x_ = max(self.new_shape[0] - (perturbed_x_max - perturbed_x_min), 0)
perturbed_x_min = perturbed_x_ // 2
perturbed_x_max = self.new_shape[0] - perturbed_x_ // 2 if perturbed_x_%2 == 0 else self.new_shape[0] - (perturbed_x_ // 2 + 1)
perturbed_y_ = max(self.new_shape[1] - (perturbed_y_max - perturbed_y_min), 0)
perturbed_y_min = perturbed_y_ // 2
perturbed_y_max = self.new_shape[1] - perturbed_y_ // 2 if perturbed_y_%2 == 0 else self.new_shape[1] - (perturbed_y_ // 2 + 1)
'''clip
perturbed_x_min, perturbed_y_min, perturbed_x_max, perturbed_y_max = -1, -1, self.new_shape[0], self.new_shape[1]
for x in range(self.new_shape[0] // 2, perturbed_x_max):
if np.sum(self.synthesis_perturbed_img[x, :]) == 768 * self.new_shape[1] and perturbed_x_max - 1 > x:
perturbed_x_max = x
break
for x in range(self.new_shape[0] // 2, perturbed_x_min, -1):
if np.sum(self.synthesis_perturbed_img[x, :]) == 768 * self.new_shape[1] and x > 0:
perturbed_x_min = x
break
for y in range(self.new_shape[1] // 2, perturbed_y_max):
if np.sum(self.synthesis_perturbed_img[:, y]) == 768 * self.new_shape[0] and perturbed_y_max - 1 > y:
perturbed_y_max = y
break
for y in range(self.new_shape[1] // 2, perturbed_y_min, -1):
if np.sum(self.synthesis_perturbed_img[:, y]) == 768 * self.new_shape[0] and y > 0:
perturbed_y_min = y
break
center_x, center_y = perturbed_x_min+(perturbed_x_max - perturbed_x_min)//2, perturbed_y_min+(perturbed_y_max - perturbed_y_min)//2
perfix_ = self.save_suffix+'_'+str(m)+'_'+str(n)
self.new_shape = save_img_shape
perturbed_x_ = max(self.new_shape[0] - (perturbed_x_max - perturbed_x_min), 0)
perturbed_x_min = perturbed_x_ // 2
perturbed_x_max = self.new_shape[0] - perturbed_x_ // 2 if perturbed_x_%2 == 0 else self.new_shape[0] - (perturbed_x_ // 2 + 1)
perturbed_y_ = max(self.new_shape[1] - (perturbed_y_max - perturbed_y_min), 0)
perturbed_y_min = perturbed_y_ // 2
perturbed_y_max = self.new_shape[1] - perturbed_y_ // 2 if perturbed_y_%2 == 0 else self.new_shape[1] - (perturbed_y_ // 2 + 1)
self.synthesis_perturbed_img = self.synthesis_perturbed_img[center_x-self.new_shape[0]//2:center_x+self.new_shape[0]//2, center_y-self.new_shape[1]//2:center_y+self.new_shape[1]//2, :].copy()
self.synthesis_perturbed_label = self.synthesis_perturbed_label[center_x-self.new_shape[0]//2:center_x+self.new_shape[0]//2, center_y-self.new_shape[1]//2:center_y+self.new_shape[1]//2, :].copy()
self.foreORbackground_label = self.foreORbackground_label[center_x-self.new_shape[0]//2:center_x+self.new_shape[0]//2, center_y-self.new_shape[1]//2:center_y+self.new_shape[1]//2].copy()
'''
'''save'''
pixel_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape(self.new_shape[0], self.new_shape[1], 2)
if relativeShift_position == 'relativeShift_v2':
self.synthesis_perturbed_label -= pixel_position
fiducial_points_coordinate -= [center_x - self.new_shape[0] // 2, center_y - self.new_shape[1] // 2]
self.synthesis_perturbed_label[:, :, 0] *= self.foreORbackground_label
self.synthesis_perturbed_label[:, :, 1] *= self.foreORbackground_label
self.synthesis_perturbed_img[:, :, 0] *= self.foreORbackground_label
self.synthesis_perturbed_img[:, :, 1] *= self.foreORbackground_label
self.synthesis_perturbed_img[:, :, 2] *= self.foreORbackground_label
'''
synthesis_perturbed_img_filter = self.synthesis_perturbed_img.copy()
synthesis_perturbed_img_filter = cv2.GaussianBlur(synthesis_perturbed_img_filter, (3, 3), 0)
# if self.is_perform(0.9, 0.1) or repeat_time > 5:
# # if self.is_perform(0.1, 0.9) and repeat_time > 9:
# # synthesis_perturbed_img_filter = cv2.GaussianBlur(synthesis_perturbed_img_filter, (7, 7), 0)
# # else:
# synthesis_perturbed_img_filter = cv2.GaussianBlur(synthesis_perturbed_img_filter, (5, 5), 0)
# else:
# synthesis_perturbed_img_filter = cv2.GaussianBlur(synthesis_perturbed_img_filter, (3, 3), 0)
self.synthesis_perturbed_img[self.foreORbackground_label == 1] = synthesis_perturbed_img_filter[self.foreORbackground_label == 1]
'''
'''
perturbed_bg_img = perturbed_bg_img.astype(np.float32)
perturbed_bg_img[:, :, 0] *= 1 - self.foreORbackground_label
perturbed_bg_img[:, :, 1] *= 1 - self.foreORbackground_label
perturbed_bg_img[:, :, 2] *= 1 - self.foreORbackground_label
self.synthesis_perturbed_img += perturbed_bg_img
HSV
perturbed_bg_img = perturbed_bg_img.astype(np.float32)
if self.is_perform(0.1, 0.9):
if self.is_perform(0.2, 0.8):
synthesis_perturbed_img_clip_HSV = self.synthesis_perturbed_img.copy()
synthesis_perturbed_img_clip_HSV = cv2.cvtColor(synthesis_perturbed_img_clip_HSV, cv2.COLOR_RGB2HSV)
H_, S_, V_ = (random.random()-0.2)*20, (random.random()-0.2)/8, (random.random()-0.2)*20
synthesis_perturbed_img_clip_HSV[:, :, 0], synthesis_perturbed_img_clip_HSV[:, :, 1], synthesis_perturbed_img_clip_HSV[:, :, 2] = synthesis_perturbed_img_clip_HSV[:, :, 0]-H_, synthesis_perturbed_img_clip_HSV[:, :, 1]-S_, synthesis_perturbed_img_clip_HSV[:, :, 2]-V_
synthesis_perturbed_img_clip_HSV = cv2.cvtColor(synthesis_perturbed_img_clip_HSV, cv2.COLOR_HSV2RGB)
perturbed_bg_img[:, :, 0] *= 1-self.foreORbackground_label
perturbed_bg_img[:, :, 1] *= 1-self.foreORbackground_label
perturbed_bg_img[:, :, 2] *= 1-self.foreORbackground_label
synthesis_perturbed_img_clip_HSV += perturbed_bg_img
self.synthesis_perturbed_img = synthesis_perturbed_img_clip_HSV
else:
perturbed_bg_img_HSV = perturbed_bg_img
perturbed_bg_img_HSV = cv2.cvtColor(perturbed_bg_img_HSV, cv2.COLOR_RGB2HSV)
H_, S_, V_ = (random.random()-0.5)*20, (random.random()-0.5)/8, (random.random()-0.2)*20
perturbed_bg_img_HSV[:, :, 0], perturbed_bg_img_HSV[:, :, 1], perturbed_bg_img_HSV[:, :, 2] = perturbed_bg_img_HSV[:, :, 0]-H_, perturbed_bg_img_HSV[:, :, 1]-S_, perturbed_bg_img_HSV[:, :, 2]-V_
perturbed_bg_img_HSV = cv2.cvtColor(perturbed_bg_img_HSV, cv2.COLOR_HSV2RGB)
perturbed_bg_img_HSV[:, :, 0] *= 1-self.foreORbackground_label
perturbed_bg_img_HSV[:, :, 1] *= 1-self.foreORbackground_label
perturbed_bg_img_HSV[:, :, 2] *= 1-self.foreORbackground_label
self.synthesis_perturbed_img += perturbed_bg_img_HSV
# self.synthesis_perturbed_img[np.sum(self.synthesis_perturbed_img, 2) == 771] = perturbed_bg_img_HSV[np.sum(self.synthesis_perturbed_img, 2) == 771]
else:
synthesis_perturbed_img_clip_HSV = self.synthesis_perturbed_img.copy()
perturbed_bg_img[:, :, 0] *= 1 - self.foreORbackground_label
perturbed_bg_img[:, :, 1] *= 1 - self.foreORbackground_label
perturbed_bg_img[:, :, 2] *= 1 - self.foreORbackground_label
synthesis_perturbed_img_clip_HSV += perturbed_bg_img
# synthesis_perturbed_img_clip_HSV[np.sum(self.synthesis_perturbed_img, 2) == 771] = perturbed_bg_img[np.sum(self.synthesis_perturbed_img, 2) == 771]
synthesis_perturbed_img_clip_HSV = cv2.cvtColor(synthesis_perturbed_img_clip_HSV, cv2.COLOR_RGB2HSV)
H_, S_, V_ = (random.random()-0.5)*20, (random.random()-0.5)/10, (random.random()-0.4)*20
synthesis_perturbed_img_clip_HSV[:, :, 0], synthesis_perturbed_img_clip_HSV[:, :, 1], synthesis_perturbed_img_clip_HSV[:, :, 2] = synthesis_perturbed_img_clip_HSV[:, :, 0]-H_, synthesis_perturbed_img_clip_HSV[:, :, 1]-S_, synthesis_perturbed_img_clip_HSV[:, :, 2]-V_
synthesis_perturbed_img_clip_HSV = cv2.cvtColor(synthesis_perturbed_img_clip_HSV, cv2.COLOR_HSV2RGB)
self.synthesis_perturbed_img = synthesis_perturbed_img_clip_HSV
'''
'''HSV_v2'''
perturbed_bg_img = perturbed_bg_img.astype(np.float32)
# if self.is_perform(1, 0):
# if self.is_perform(1, 0):
if self.is_perform(0.1, 0.9):
if self.is_perform(0.2, 0.8):
synthesis_perturbed_img_clip_HSV = self.synthesis_perturbed_img.copy()
synthesis_perturbed_img_clip_HSV = self.HSV_v1(synthesis_perturbed_img_clip_HSV)
perturbed_bg_img[:, :, 0] *= 1-self.foreORbackground_label
perturbed_bg_img[:, :, 1] *= 1-self.foreORbackground_label
perturbed_bg_img[:, :, 2] *= 1-self.foreORbackground_label
synthesis_perturbed_img_clip_HSV += perturbed_bg_img
self.synthesis_perturbed_img = synthesis_perturbed_img_clip_HSV
else:
perturbed_bg_img_HSV = perturbed_bg_img
perturbed_bg_img_HSV = self.HSV_v1(perturbed_bg_img_HSV)
perturbed_bg_img_HSV[:, :, 0] *= 1-self.foreORbackground_label
perturbed_bg_img_HSV[:, :, 1] *= 1-self.foreORbackground_label
perturbed_bg_img_HSV[:, :, 2] *= 1-self.foreORbackground_label
self.synthesis_perturbed_img += perturbed_bg_img_HSV
# self.synthesis_perturbed_img[np.sum(self.synthesis_perturbed_img, 2) == 771] = perturbed_bg_img_HSV[np.sum(self.synthesis_perturbed_img, 2) == 771]
else:
synthesis_perturbed_img_clip_HSV = self.synthesis_perturbed_img.copy()
perturbed_bg_img[:, :, 0] *= 1 - self.foreORbackground_label
perturbed_bg_img[:, :, 1] *= 1 - self.foreORbackground_label
perturbed_bg_img[:, :, 2] *= 1 - self.foreORbackground_label
synthesis_perturbed_img_clip_HSV += perturbed_bg_img
synthesis_perturbed_img_clip_HSV = self.HSV_v1(synthesis_perturbed_img_clip_HSV)
self.synthesis_perturbed_img = synthesis_perturbed_img_clip_HSV
''''''
# cv2.imwrite(self.save_path+'clip/'+perfix_+'_'+fold_curve+str(perturbed_time)+'-'+str(repeat_time)+'.png', synthesis_perturbed_img_clip)
self.synthesis_perturbed_img[self.synthesis_perturbed_img < 0] = 0
self.synthesis_perturbed_img[self.synthesis_perturbed_img > 255] = 255
self.synthesis_perturbed_img = np.around(self.synthesis_perturbed_img).astype(np.uint8)
label = np.zeros_like(self.synthesis_perturbed_img, dtype=np.float32)
label[:, :, :2] = self.synthesis_perturbed_label
label[:, :, 2] = self.foreORbackground_label
# grey = np.around(self.synthesis_perturbed_img[:, :, 0] * 0.2989 + self.synthesis_perturbed_img[:, :, 1] * 0.5870 + self.synthesis_perturbed_img[:, :, 0] * 0.1140).astype(np.int16)
# synthesis_perturbed_grey = np.concatenate((grey.reshape(self.new_shape[0], self.new_shape[1], 1), label), axis=2)
synthesis_perturbed_color = np.concatenate((self.synthesis_perturbed_img, label), axis=2)
self.synthesis_perturbed_color = np.zeros_like(synthesis_perturbed_color, dtype=np.float32)
# self.synthesis_perturbed_grey = np.zeros_like(synthesis_perturbed_grey, dtype=np.float32)
reduce_value_x = int(round(min((random.random() / 2) * (self.new_shape[0] - (perturbed_x_max - perturbed_x_min)), min(reduce_value, reduce_value_v2))))
reduce_value_y = int(round(min((random.random() / 2) * (self.new_shape[1] - (perturbed_y_max - perturbed_y_min)), min(reduce_value, reduce_value_v2))))
perturbed_x_min = max(perturbed_x_min - reduce_value_x, 0)
perturbed_x_max = min(perturbed_x_max + reduce_value_x, self.new_shape[0])
perturbed_y_min = max(perturbed_y_min - reduce_value_y, 0)
perturbed_y_max = min(perturbed_y_max + reduce_value_y, self.new_shape[1])
if im_lr >= im_ud:
self.synthesis_perturbed_color[:, perturbed_y_min:perturbed_y_max, :] = synthesis_perturbed_color[:, perturbed_y_min:perturbed_y_max, :]
# self.synthesis_perturbed_grey[:, perturbed_y_min:perturbed_y_max, :] = synthesis_perturbed_grey[:, perturbed_y_min:perturbed_y_max, :]
else:
self.synthesis_perturbed_color[perturbed_x_min:perturbed_x_max, :, :] = synthesis_perturbed_color[perturbed_x_min:perturbed_x_max, :, :]
# self.synthesis_perturbed_grey[perturbed_x_min:perturbed_x_max, :, :] = synthesis_perturbed_grey[perturbed_x_min:perturbed_x_max, :, :]
'''blur'''
if self.is_perform(0.1, 0.9):
synthesis_perturbed_img_filter = self.synthesis_perturbed_color[:, :, :3].copy()
if self.is_perform(0.1, 0.9):
synthesis_perturbed_img_filter = cv2.GaussianBlur(synthesis_perturbed_img_filter, (5, 5), 0)
else:
synthesis_perturbed_img_filter = cv2.GaussianBlur(synthesis_perturbed_img_filter, (3, 3), 0)
if self.is_perform(0.5, 0.5):
self.synthesis_perturbed_color[:, :, :3][self.synthesis_perturbed_color[:, :, 5] == 1] = synthesis_perturbed_img_filter[self.synthesis_perturbed_color[:, :, 5] == 1]
else:
self.synthesis_perturbed_color[:, :, :3] = synthesis_perturbed_img_filter
fiducial_points_coordinate = fiducial_points_coordinate[:, :, ::-1]
'''draw fiducial points'''
stepSize = 0
fiducial_points_synthesis_perturbed_img = self.synthesis_perturbed_color[:, :, :3].copy()
for l in fiducial_points_coordinate.astype(np.int64).reshape(-1, 2):
cv2.circle(fiducial_points_synthesis_perturbed_img, (l[0] + math.ceil(stepSize / 2), l[1] + math.ceil(stepSize / 2)), 2, (0, 0, 255), -1)
cv2.imwrite(self.save_path + 'fiducial_points/' + perfix_ + '_' + fold_curve + '.png', fiducial_points_synthesis_perturbed_img)
cv2.imwrite(self.save_path + 'png/' + perfix_ + '_' + fold_curve + '.png', self.synthesis_perturbed_color[:, :, :3])
'''forward-begin'''
self.forward_mapping = np.full((save_img_shape[0], save_img_shape[1], 2), 0, dtype=np.float32)
forward_mapping = np.full((save_img_shape[0], save_img_shape[1], 2), 0, dtype=np.float32)
forward_position = (self.synthesis_perturbed_color[:, :, 3:5] + pixel_position)[self.synthesis_perturbed_color[:, :, 5] != 0, :]
flat_position = np.argwhere(np.zeros(save_img_shape, dtype=np.uint32) == 0)
vtx, wts = self.interp_weights(forward_position, flat_position)
wts_sum = np.abs(wts).sum(-1)
wts = wts[wts_sum <= 1, :]
vtx = vtx[wts_sum <= 1, :]
flat_position_forward = flat_position.reshape(save_img_shape[0], save_img_shape[1], 2)[self.synthesis_perturbed_color[:, :, 5] != 0, :]
forward_mapping.reshape(save_img_shape[0] * save_img_shape[1], 2)[wts_sum <= 1, :] = self.interpolate(flat_position_forward, vtx, wts)
forward_mapping = forward_mapping.reshape(save_img_shape[0], save_img_shape[1], 2)
mapping_x_min_, mapping_y_min_, mapping_x_max_, mapping_y_max_ = self.adjust_position_v2(0, 0, im_lr, im_ud, self.new_shape)
shreshold_zoom_out = 2
mapping_x_min = mapping_x_min_ + shreshold_zoom_out
mapping_y_min = mapping_y_min_ + shreshold_zoom_out
mapping_x_max = mapping_x_max_ - shreshold_zoom_out
mapping_y_max = mapping_y_max_ - shreshold_zoom_out
self.forward_mapping[mapping_x_min:mapping_x_max, mapping_y_min:mapping_y_max] = forward_mapping[mapping_x_min:mapping_x_max, mapping_y_min:mapping_y_max]
self.scan_img = np.full((save_img_shape[0], save_img_shape[1], 3), 0, dtype=np.float32)
self.scan_img[mapping_x_min_:mapping_x_max_, mapping_y_min_:mapping_y_max_] = self.origin_img
self.origin_img = self.scan_img
# flat_img = np.full((save_img_shape[0], save_img_shape[1], 3), 0, dtype=np.float32)
# cv2.remap(self.synthesis_perturbed_color[:, :, :3], self.forward_mapping[:, :, 1], self.forward_mapping[:, :, 0], cv2.INTER_LINEAR, flat_img)
# cv2.imwrite(self.save_path + 'outputs/1.jpg', flat_img)
'''forward-end'''
synthesis_perturbed_data = {
'fiducial_points': fiducial_points_coordinate,
'segment': np.array((segment_x, segment_y))
}
cv2.imwrite(self.save_path + 'png/' + perfix_ + '_' + fold_curve + '.png', self.synthesis_perturbed_color[:, :, :3])
with open(self.save_path+'color/'+perfix_+'_'+fold_curve+'.gw', 'wb') as f:
pickle_perturbed_data = pickle.dumps(synthesis_perturbed_data)
f.write(pickle_perturbed_data)
# with open(self.save_path+'grey/'+perfix_+'_'+fold_curve+'.gw', 'wb') as f:
# pickle_perturbed_data = pickle.dumps(self.synthesis_perturbed_grey)
# f.write(pickle_perturbed_data)
# cv2.imwrite(self.save_path+'grey_im/'+perfix_+'_'+fold_curve+'.png', self.synthesis_perturbed_color[:, :, :1])
# cv2.imwrite(self.save_path + 'scan/' + self.save_suffix + '_' + str(m) + '.png', self.origin_img)
trian_t = time.time() - begin_train
mm, ss = divmod(trian_t, 60)
hh, mm = divmod(mm, 60)
print(str(m)+'_'+str(n)+'_'+fold_curve+' '+str(repeat_time)+" Time : %02d:%02d:%02d\n" % (hh, mm, ss))
def multiThread(m, n, img_path_, bg_path_, save_path, save_suffix):
saveFold = perturbed(img_path_, bg_path_, save_path, save_suffix)
saveCurve = perturbed(img_path_, bg_path_, save_path, save_suffix)
repeat_time = min(max(round(np.random.normal(10, 3)), 5), 16)
fold = threading.Thread(target=saveFold.save_img, args=(m, n, 'fold', repeat_time, 'relativeShift_v2'), name='fold')
curve = threading.Thread(target=saveCurve.save_img, args=(m, n, 'curve', repeat_time, 'relativeShift_v2'), name='curve')
fold.start()
curve.start()
curve.join()
fold.join()
def xgw(args):
path = args.path
bg_path = args.bg_path
if args.output_path is None:
save_path = '/lustre/home/gwxie/data/unwarp_new/train/general1024/general1024_v1/'
else:
save_path = args.output_path
# if not os.path.exists(save_path + 'grey/'):
# os.makedirs(save_path + 'grey/')
if not os.path.exists(save_path + 'color/'):
os.makedirs(save_path + 'color/')
if not os.path.exists(save_path + 'fiducial_points/'):
os.makedirs(save_path + 'fiducial_points/')
if not os.path.exists(save_path + 'png/'):
os.makedirs(save_path + 'png/')
if not os.path.exists(save_path + 'scan/'):
os.makedirs(save_path + 'scan/')
if not os.path.exists(save_path + 'outputs/'):
os.makedirs(save_path + 'outputs/')
save_suffix = str.split(args.path, '/')[-2]
all_img_path = getDatasets(path)
all_bgImg_path = getDatasets(bg_path)
global begin_train
begin_train = time.time()
fiducial_points = 61 # 31
process_pool = Pool(2)
for m, img_path in enumerate(all_img_path):
for n in range(args.sys_num):
img_path_ = path+img_path
bg_path_ = bg_path+random.choice(all_bgImg_path)+'/'
for m_n in range(10):
try:
saveFold = perturbed(img_path_, bg_path_, save_path, save_suffix)
saveCurve = perturbed(img_path_, bg_path_, save_path, save_suffix)
repeat_time = min(max(round(np.random.normal(12, 4)), 1), 18)
# repeat_time = min(max(round(np.random.normal(8, 4)), 1), 12) # random.randint(1, 2) # min(max(round(np.random.normal(8, 4)), 1), 12)
process_pool.apply_async(func=saveFold.save_img, args=(m, n, 'fold', repeat_time, fiducial_points, 'relativeShift_v2'))
repeat_time = min(max(round(np.random.normal(8, 4)), 1), 13)
# repeat_time = min(max(round(np.random.normal(6, 4)), 1), 10)
process_pool.apply_async(func=saveCurve.save_img, args=(m, n, 'curve', repeat_time, fiducial_points, 'relativeShift_v2'))
except BaseException as err:
print(err)
continue
break
# print('end')
process_pool.close()
process_pool.join()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Hyperparams')
parser.add_argument('--path',
default='./scan/new/', type=str,
help='the path of origin img.')
parser.add_argument('--bg_path',
default='./background/', type=str,
help='the path of bg img.')
parser.add_argument('--output_path',
default='./output/', type=str,
help='the path of origin img.')
# parser.set_defaults(output_path='test')
parser.add_argument('--count_from', '-p', default=0, type=int,
metavar='N', help='print frequency (default: 10)') # print frequency
parser.add_argument('--repeat_T', default=0, type=int)
parser.add_argument('--sys_num', default=6, type=int)
args = parser.parse_args()
xgw(args)
|
[
"pickle.dumps",
"numpy.array",
"numpy.linalg.norm",
"os.path.exists",
"os.listdir",
"numpy.full_like",
"argparse.ArgumentParser",
"numpy.linspace",
"numpy.dot",
"numpy.concatenate",
"numpy.meshgrid",
"random.randint",
"numpy.random.normal",
"numpy.abs",
"random.choice",
"numpy.ones",
"cv2.getPerspectiveTransform",
"numpy.random.choice",
"numpy.around",
"threading.Thread",
"cv2.resize",
"cv2.GaussianBlur",
"time.time",
"cv2.imread",
"numpy.dstack",
"cv2.imwrite",
"math.ceil",
"os.makedirs",
"numpy.sum",
"numpy.zeros",
"multiprocessing.Pool",
"numpy.full",
"random.random",
"numpy.zeros_like",
"numpy.float32"
] |
[((595, 610), 'os.listdir', 'os.listdir', (['dir'], {}), '(dir)\n', (605, 610), False, 'import sys, os\n'), ((44019, 44132), 'threading.Thread', 'threading.Thread', ([], {'target': 'saveFold.save_img', 'args': "(m, n, 'fold', repeat_time, 'relativeShift_v2')", 'name': '"""fold"""'}), "(target=saveFold.save_img, args=(m, n, 'fold', repeat_time,\n 'relativeShift_v2'), name='fold')\n", (44035, 44132), False, 'import threading\n'), ((44138, 44254), 'threading.Thread', 'threading.Thread', ([], {'target': 'saveCurve.save_img', 'args': "(m, n, 'curve', repeat_time, 'relativeShift_v2')", 'name': '"""curve"""'}), "(target=saveCurve.save_img, args=(m, n, 'curve',\n repeat_time, 'relativeShift_v2'), name='curve')\n", (44154, 44254), False, 'import threading\n'), ((45191, 45202), 'time.time', 'time.time', ([], {}), '()\n', (45200, 45202), False, 'import time\n'), ((45246, 45253), 'multiprocessing.Pool', 'Pool', (['(2)'], {}), '(2)\n', (45250, 45253), False, 'from multiprocessing import Pool\n'), ((46365, 46415), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Hyperparams"""'}), "(description='Hyperparams')\n", (46388, 46415), False, 'import argparse\n'), ((960, 1005), 'cv2.imread', 'cv2.imread', (['self.path'], {'flags': 'cv2.IMREAD_COLOR'}), '(self.path, flags=cv2.IMREAD_COLOR)\n', (970, 1005), False, 'import cv2\n'), ((1164, 1295), 'numpy.random.choice', 'np.random.choice', (['[2 * 2, 4 * 2, 8 * 2, 16 * 2, 24 * 2, 32 * 2, 40 * 2, 48 * 2]'], {'p': '[0.02, 0.18, 0.2, 0.3, 0.1, 0.1, 0.08, 0.02]'}), '([2 * 2, 4 * 2, 8 * 2, 16 * 2, 24 * 2, 32 * 2, 40 * 2, 48 *\n 2], p=[0.02, 0.18, 0.2, 0.3, 0.1, 0.1, 0.08, 0.02])\n', (1180, 1295), True, 'import numpy as np\n'), ((1930, 2059), 'numpy.random.choice', 'np.random.choice', (['[2 * 2, 4 * 2, 8 * 2, 16 * 2, 24 * 2, 28 * 2, 32 * 2, 48 * 2]'], {'p': '[0.02, 0.18, 0.2, 0.2, 0.1, 0.1, 0.1, 0.1]'}), '([2 * 2, 4 * 2, 8 * 2, 16 * 2, 24 * 2, 28 * 2, 32 * 2, 48 *\n 2], p=[0.02, 0.18, 0.2, 0.2, 0.1, 0.1, 0.1, 0.1])\n', (1946, 2059), True, 'import numpy as np\n'), ((2802, 2887), 'numpy.linspace', 'np.linspace', (['edge_padding', '(im_lr - edge_padding)', 'fiducial_points'], {'dtype': 'np.int64'}), '(edge_padding, im_lr - edge_padding, fiducial_points, dtype=np.int64\n )\n', (2813, 2887), True, 'import numpy as np\n'), ((2895, 2980), 'numpy.linspace', 'np.linspace', (['edge_padding', '(im_ud - edge_padding)', 'fiducial_points'], {'dtype': 'np.int64'}), '(edge_padding, im_ud - edge_padding, fiducial_points, dtype=np.int64\n )\n', (2906, 2980), True, 'import numpy as np\n'), ((3384, 3414), 'numpy.meshgrid', 'np.meshgrid', (['im_hight', 'im_wide'], {}), '(im_hight, im_wide)\n', (3395, 3414), True, 'import numpy as np\n'), ((3651, 3720), 'cv2.resize', 'cv2.resize', (['origin_img', '(im_ud, im_lr)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(origin_img, (im_ud, im_lr), interpolation=cv2.INTER_CUBIC)\n', (3661, 3720), False, 'import cv2\n'), ((3851, 3904), 'cv2.imread', 'cv2.imread', (['perturbed_bg_img_'], {'flags': 'cv2.IMREAD_COLOR'}), '(perturbed_bg_img_, flags=cv2.IMREAD_COLOR)\n', (3861, 3904), False, 'import cv2\n'), ((3981, 4067), 'numpy.full', 'np.full', (['(enlarge_img_shrink[0], enlarge_img_shrink[1], 3)', '(256)'], {'dtype': 'np.float32'}), '((enlarge_img_shrink[0], enlarge_img_shrink[1], 3), 256, dtype=np.\n float32)\n', (3988, 4067), True, 'import numpy as np\n'), ((4319, 4411), 'cv2.resize', 'cv2.resize', (['perturbed_bg_img', '(save_img_shape[1], save_img_shape[0])', 'cv2.INPAINT_TELEA'], {}), '(perturbed_bg_img, (save_img_shape[1], save_img_shape[0]), cv2.\n INPAINT_TELEA)\n', (4329, 4411), False, 'import cv2\n'), ((4682, 4733), 'numpy.zeros', 'np.zeros', (['(self.new_shape[0], self.new_shape[1], 2)'], {}), '((self.new_shape[0], self.new_shape[1], 2))\n', (4690, 4733), True, 'import numpy as np\n'), ((4897, 4948), 'numpy.zeros', 'np.zeros', (['(self.new_shape[0], self.new_shape[1], 2)'], {}), '((self.new_shape[0], self.new_shape[1], 2))\n', (4905, 4948), True, 'import numpy as np\n'), ((5199, 5272), 'random.randint', 'random.randint', (['(-enlarge_img_shrink[0] // 16)', '(enlarge_img_shrink[0] // 16)'], {}), '(-enlarge_img_shrink[0] // 16, enlarge_img_shrink[0] // 16)\n', (5213, 5272), False, 'import random\n'), ((5281, 5354), 'random.randint', 'random.randint', (['(-enlarge_img_shrink[1] // 16)', '(enlarge_img_shrink[1] // 16)'], {}), '(-enlarge_img_shrink[1] // 16, enlarge_img_shrink[1] // 16)\n', (5295, 5354), False, 'import random\n'), ((5803, 5841), 'numpy.full', 'np.full', (['mesh_shape', '(1)'], {'dtype': 'np.int16'}), '(mesh_shape, 1, dtype=np.int16)\n', (5810, 5841), True, 'import numpy as np\n'), ((5875, 5917), 'numpy.full', 'np.full', (['self.new_shape', '(0)'], {'dtype': 'np.int16'}), '(self.new_shape, 0, dtype=np.int16)\n', (5882, 5917), True, 'import numpy as np\n'), ((7126, 7173), 'numpy.full_like', 'np.full_like', (['self.synthesis_perturbed_img', '(256)'], {}), '(self.synthesis_perturbed_img, 256)\n', (7138, 7173), True, 'import numpy as np\n'), ((7296, 7341), 'numpy.zeros_like', 'np.zeros_like', (['self.synthesis_perturbed_label'], {}), '(self.synthesis_perturbed_label)\n', (7309, 7341), True, 'import numpy as np\n'), ((15095, 15207), 'numpy.float32', 'np.float32', (['[[x_min_per, y_min_per], [x_max_per, y_min_per], [x_min_per, y_max_per], [\n x_max_per, y_max_per]]'], {}), '([[x_min_per, y_min_per], [x_max_per, y_min_per], [x_min_per,\n y_max_per], [x_max_per, y_max_per]])\n', (15105, 15207), True, 'import numpy as np\n'), ((19561, 19600), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['pts1', 'pts2'], {}), '(pts1, pts2)\n', (19588, 19600), False, 'import cv2\n'), ((19609, 19675), 'numpy.ones', 'np.ones', (['(self.new_shape[0], self.new_shape[1], 1)'], {'dtype': 'np.int16'}), '((self.new_shape[0], self.new_shape[1], 1), dtype=np.int16)\n', (19616, 19675), True, 'import numpy as np\n'), ((19685, 19717), 'numpy.dstack', 'np.dstack', (['(pixel_position, one)'], {}), '((pixel_position, one))\n', (19694, 19717), True, 'import numpy as np\n'), ((19892, 19909), 'numpy.dstack', 'np.dstack', (['(x, y)'], {}), '((x, y))\n', (19901, 19909), True, 'import numpy as np\n'), ((20734, 20781), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['self.perturbed_xy_', '(7, 7)', '(0)'], {}), '(self.perturbed_xy_, (7, 7), 0)\n', (20750, 20781), False, 'import cv2\n'), ((21612, 21636), 'numpy.zeros', 'np.zeros', (['self.new_shape'], {}), '(self.new_shape)\n', (21620, 21636), True, 'import numpy as np\n'), ((37466, 37527), 'numpy.zeros_like', 'np.zeros_like', (['self.synthesis_perturbed_img'], {'dtype': 'np.float32'}), '(self.synthesis_perturbed_img, dtype=np.float32)\n', (37479, 37527), True, 'import numpy as np\n'), ((37959, 38020), 'numpy.concatenate', 'np.concatenate', (['(self.synthesis_perturbed_img, label)'], {'axis': '(2)'}), '((self.synthesis_perturbed_img, label), axis=2)\n', (37973, 38020), True, 'import numpy as np\n'), ((38057, 38115), 'numpy.zeros_like', 'np.zeros_like', (['synthesis_perturbed_color'], {'dtype': 'np.float32'}), '(synthesis_perturbed_color, dtype=np.float32)\n', (38070, 38115), True, 'import numpy as np\n'), ((40461, 40592), 'cv2.imwrite', 'cv2.imwrite', (["(self.save_path + 'fiducial_points/' + perfix_ + '_' + fold_curve + '.png')", 'fiducial_points_synthesis_perturbed_img'], {}), "(self.save_path + 'fiducial_points/' + perfix_ + '_' +\n fold_curve + '.png', fiducial_points_synthesis_perturbed_img)\n", (40472, 40592), False, 'import cv2\n'), ((40592, 40712), 'cv2.imwrite', 'cv2.imwrite', (["(self.save_path + 'png/' + perfix_ + '_' + fold_curve + '.png')", 'self.synthesis_perturbed_color[:, :, :3]'], {}), "(self.save_path + 'png/' + perfix_ + '_' + fold_curve + '.png',\n self.synthesis_perturbed_color[:, :, :3])\n", (40603, 40712), False, 'import cv2\n'), ((40757, 40828), 'numpy.full', 'np.full', (['(save_img_shape[0], save_img_shape[1], 2)', '(0)'], {'dtype': 'np.float32'}), '((save_img_shape[0], save_img_shape[1], 2), 0, dtype=np.float32)\n', (40764, 40828), True, 'import numpy as np\n'), ((40849, 40920), 'numpy.full', 'np.full', (['(save_img_shape[0], save_img_shape[1], 2)', '(0)'], {'dtype': 'np.float32'}), '((save_img_shape[0], save_img_shape[1], 2), 0, dtype=np.float32)\n', (40856, 40920), True, 'import numpy as np\n'), ((42190, 42261), 'numpy.full', 'np.full', (['(save_img_shape[0], save_img_shape[1], 3)', '(0)'], {'dtype': 'np.float32'}), '((save_img_shape[0], save_img_shape[1], 3), 0, dtype=np.float32)\n', (42197, 42261), True, 'import numpy as np\n'), ((42840, 42960), 'cv2.imwrite', 'cv2.imwrite', (["(self.save_path + 'png/' + perfix_ + '_' + fold_curve + '.png')", 'self.synthesis_perturbed_color[:, :, :3]'], {}), "(self.save_path + 'png/' + perfix_ + '_' + fold_curve + '.png',\n self.synthesis_perturbed_color[:, :, :3])\n", (42851, 42960), False, 'import cv2\n'), ((44612, 44648), 'os.path.exists', 'os.path.exists', (["(save_path + 'color/')"], {}), "(save_path + 'color/')\n", (44626, 44648), False, 'import sys, os\n'), ((44652, 44685), 'os.makedirs', 'os.makedirs', (["(save_path + 'color/')"], {}), "(save_path + 'color/')\n", (44663, 44685), False, 'import sys, os\n'), ((44695, 44741), 'os.path.exists', 'os.path.exists', (["(save_path + 'fiducial_points/')"], {}), "(save_path + 'fiducial_points/')\n", (44709, 44741), False, 'import sys, os\n'), ((44745, 44788), 'os.makedirs', 'os.makedirs', (["(save_path + 'fiducial_points/')"], {}), "(save_path + 'fiducial_points/')\n", (44756, 44788), False, 'import sys, os\n'), ((44798, 44832), 'os.path.exists', 'os.path.exists', (["(save_path + 'png/')"], {}), "(save_path + 'png/')\n", (44812, 44832), False, 'import sys, os\n'), ((44836, 44867), 'os.makedirs', 'os.makedirs', (["(save_path + 'png/')"], {}), "(save_path + 'png/')\n", (44847, 44867), False, 'import sys, os\n'), ((44877, 44912), 'os.path.exists', 'os.path.exists', (["(save_path + 'scan/')"], {}), "(save_path + 'scan/')\n", (44891, 44912), False, 'import sys, os\n'), ((44916, 44948), 'os.makedirs', 'os.makedirs', (["(save_path + 'scan/')"], {}), "(save_path + 'scan/')\n", (44927, 44948), False, 'import sys, os\n'), ((44958, 44996), 'os.path.exists', 'os.path.exists', (["(save_path + 'outputs/')"], {}), "(save_path + 'outputs/')\n", (44972, 44996), False, 'import sys, os\n'), ((45000, 45035), 'os.makedirs', 'os.makedirs', (["(save_path + 'outputs/')"], {}), "(save_path + 'outputs/')\n", (45011, 45035), False, 'import sys, os\n'), ((3801, 3829), 'random.choice', 'random.choice', (['perturbed_bg_'], {}), '(perturbed_bg_)\n', (3814, 3829), False, 'import random\n'), ((8301, 8330), 'random.choice', 'random.choice', (['linspace_x_seq'], {}), '(linspace_x_seq)\n', (8314, 8330), False, 'import random\n'), ((8340, 8369), 'random.choice', 'random.choice', (['linspace_y_seq'], {}), '(linspace_y_seq)\n', (8353, 8369), False, 'import random\n'), ((8681, 8710), 'random.choice', 'random.choice', (['linspace_x_seq'], {}), '(linspace_x_seq)\n', (8694, 8710), False, 'import random\n'), ((8720, 8749), 'random.choice', 'random.choice', (['linspace_y_seq'], {}), '(linspace_y_seq)\n', (8733, 8749), False, 'import random\n'), ((9801, 9829), 'numpy.linalg.norm', 'np.linalg.norm', (['perturbed_vp'], {}), '(perturbed_vp)\n', (9815, 9829), True, 'import numpy as np\n'), ((14858, 14880), 'random.randint', 'random.randint', (['(26)', '(36)'], {}), '(26, 36)\n', (14872, 14880), False, 'import random\n'), ((25705, 25752), 'numpy.full_like', 'np.full_like', (['self.synthesis_perturbed_img', '(256)'], {}), '(self.synthesis_perturbed_img, 256)\n', (25717, 25752), True, 'import numpy as np\n'), ((25789, 25834), 'numpy.zeros_like', 'np.zeros_like', (['self.synthesis_perturbed_label'], {}), '(self.synthesis_perturbed_label)\n', (25802, 25834), True, 'import numpy as np\n'), ((25868, 25910), 'numpy.zeros_like', 'np.zeros_like', (['self.foreORbackground_label'], {}), '(self.foreORbackground_label)\n', (25881, 25910), True, 'import numpy as np\n'), ((42800, 42832), 'numpy.array', 'np.array', (['(segment_x, segment_y)'], {}), '((segment_x, segment_y))\n', (42808, 42832), True, 'import numpy as np\n'), ((43062, 43100), 'pickle.dumps', 'pickle.dumps', (['synthesis_perturbed_data'], {}), '(synthesis_perturbed_data)\n', (43074, 43100), False, 'import pickle\n'), ((43554, 43565), 'time.time', 'time.time', ([], {}), '()\n', (43563, 43565), False, 'import time\n'), ((6996, 7019), 'random.randint', 'random.randint', (['(80)', '(160)'], {}), '(80, 160)\n', (7010, 7019), False, 'import random\n'), ((9871, 9921), 'numpy.dot', 'np.dot', (['(perturbed_p - pixel_position)', 'perturbed_vp'], {}), '(perturbed_p - pixel_position, perturbed_vp)\n', (9877, 9921), True, 'import numpy as np\n'), ((18765, 18798), 'numpy.linalg.norm', 'np.linalg.norm', (['(pts2[0] - pts2[1])'], {}), '(pts2[0] - pts2[1])\n', (18779, 18798), True, 'import numpy as np\n'), ((18807, 18840), 'numpy.linalg.norm', 'np.linalg.norm', (['(pts2[0] - pts2[2])'], {}), '(pts2[0] - pts2[2])\n', (18821, 18840), True, 'import numpy as np\n'), ((18849, 18882), 'numpy.linalg.norm', 'np.linalg.norm', (['(pts2[1] - pts2[3])'], {}), '(pts2[1] - pts2[3])\n', (18863, 18882), True, 'import numpy as np\n'), ((18891, 18924), 'numpy.linalg.norm', 'np.linalg.norm', (['(pts2[2] - pts2[3])'], {}), '(pts2[2] - pts2[3])\n', (18905, 18924), True, 'import numpy as np\n'), ((21005, 21016), 'numpy.abs', 'np.abs', (['wts'], {}), '(wts)\n', (21011, 21016), True, 'import numpy as np\n'), ((37399, 37438), 'numpy.around', 'np.around', (['self.synthesis_perturbed_img'], {}), '(self.synthesis_perturbed_img)\n', (37408, 37438), True, 'import numpy as np\n'), ((39584, 39643), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['synthesis_perturbed_img_filter', '(5, 5)', '(0)'], {}), '(synthesis_perturbed_img_filter, (5, 5), 0)\n', (39600, 39643), False, 'import cv2\n'), ((39690, 39749), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['synthesis_perturbed_img_filter', '(3, 3)', '(0)'], {}), '(synthesis_perturbed_img_filter, (3, 3), 0)\n', (39706, 39749), False, 'import cv2\n'), ((41082, 41123), 'numpy.zeros', 'np.zeros', (['save_img_shape'], {'dtype': 'np.uint32'}), '(save_img_shape, dtype=np.uint32)\n', (41090, 41123), True, 'import numpy as np\n'), ((41208, 41219), 'numpy.abs', 'np.abs', (['wts'], {}), '(wts)\n', (41214, 41219), True, 'import numpy as np\n'), ((43977, 44000), 'numpy.random.normal', 'np.random.normal', (['(10)', '(3)'], {}), '(10, 3)\n', (43993, 44000), True, 'import numpy as np\n'), ((6675, 6698), 'random.randint', 'random.randint', (['(80)', '(120)'], {}), '(80, 120)\n', (6689, 6698), False, 'import random\n'), ((16071, 16104), 'numpy.linalg.norm', 'np.linalg.norm', (['(pts2[0] - pts2[1])'], {}), '(pts2[0] - pts2[1])\n', (16085, 16104), True, 'import numpy as np\n'), ((16114, 16147), 'numpy.linalg.norm', 'np.linalg.norm', (['(pts2[0] - pts2[2])'], {}), '(pts2[0] - pts2[2])\n', (16128, 16147), True, 'import numpy as np\n'), ((16157, 16190), 'numpy.linalg.norm', 'np.linalg.norm', (['(pts2[1] - pts2[3])'], {}), '(pts2[1] - pts2[3])\n', (16171, 16190), True, 'import numpy as np\n'), ((16200, 16233), 'numpy.linalg.norm', 'np.linalg.norm', (['(pts2[2] - pts2[3])'], {}), '(pts2[2] - pts2[3])\n', (16214, 16233), True, 'import numpy as np\n'), ((17423, 17456), 'numpy.linalg.norm', 'np.linalg.norm', (['(pts2[0] - pts2[1])'], {}), '(pts2[0] - pts2[1])\n', (17437, 17456), True, 'import numpy as np\n'), ((17466, 17499), 'numpy.linalg.norm', 'np.linalg.norm', (['(pts2[0] - pts2[2])'], {}), '(pts2[0] - pts2[2])\n', (17480, 17499), True, 'import numpy as np\n'), ((17509, 17542), 'numpy.linalg.norm', 'np.linalg.norm', (['(pts2[1] - pts2[3])'], {}), '(pts2[1] - pts2[3])\n', (17523, 17542), True, 'import numpy as np\n'), ((17552, 17585), 'numpy.linalg.norm', 'np.linalg.norm', (['(pts2[2] - pts2[3])'], {}), '(pts2[2] - pts2[3])\n', (17566, 17585), True, 'import numpy as np\n'), ((22807, 22849), 'numpy.sum', 'np.sum', (['self.synthesis_perturbed_img[x, :]'], {}), '(self.synthesis_perturbed_img[x, :])\n', (22813, 22849), True, 'import numpy as np\n'), ((23009, 23051), 'numpy.sum', 'np.sum', (['self.synthesis_perturbed_img[x, :]'], {}), '(self.synthesis_perturbed_img[x, :])\n', (23015, 23051), True, 'import numpy as np\n'), ((23189, 23231), 'numpy.sum', 'np.sum', (['self.synthesis_perturbed_img[:, y]'], {}), '(self.synthesis_perturbed_img[:, y])\n', (23195, 23231), True, 'import numpy as np\n'), ((23391, 23433), 'numpy.sum', 'np.sum', (['self.synthesis_perturbed_img[:, y]'], {}), '(self.synthesis_perturbed_img[:, y])\n', (23397, 23433), True, 'import numpy as np\n'), ((45382, 45411), 'random.choice', 'random.choice', (['all_bgImg_path'], {}), '(all_bgImg_path)\n', (45395, 45411), False, 'import random\n'), ((4446, 4483), 'numpy.zeros', 'np.zeros', (['mesh_shape'], {'dtype': 'np.uint32'}), '(mesh_shape, dtype=np.uint32)\n', (4454, 4483), True, 'import numpy as np\n'), ((4562, 4603), 'numpy.zeros', 'np.zeros', (['self.new_shape'], {'dtype': 'np.uint32'}), '(self.new_shape, dtype=np.uint32)\n', (4570, 4603), True, 'import numpy as np\n'), ((6786, 6808), 'random.randint', 'random.randint', (['(50)', '(70)'], {}), '(50, 70)\n', (6800, 6808), False, 'import random\n'), ((6848, 6871), 'random.randint', 'random.randint', (['(70)', '(130)'], {}), '(70, 130)\n', (6862, 6871), False, 'import random\n'), ((7898, 7921), 'random.randint', 'random.randint', (['(80)', '(160)'], {}), '(80, 160)\n', (7912, 7921), False, 'import random\n'), ((8402, 8464), 'random.randint', 'random.randint', (['(linspace_x[r_x - 1] * 10)', '(linspace_x[r_x] * 10)'], {}), '(linspace_x[r_x - 1] * 10, linspace_x[r_x] * 10)\n', (8416, 8464), False, 'import random\n'), ((8469, 8531), 'random.randint', 'random.randint', (['(linspace_y[r_y - 1] * 10)', '(linspace_y[r_y] * 10)'], {}), '(linspace_y[r_y - 1] * 10, linspace_y[r_y] * 10)\n', (8483, 8531), False, 'import random\n'), ((8783, 8845), 'random.randint', 'random.randint', (['(linspace_x[r_x - 1] * 10)', '(linspace_x[r_x] * 10)'], {}), '(linspace_x[r_x - 1] * 10, linspace_x[r_x] * 10)\n', (8797, 8845), False, 'import random\n'), ((8850, 8912), 'random.randint', 'random.randint', (['(linspace_y[r_y - 1] * 10)', '(linspace_y[r_y] * 10)'], {}), '(linspace_y[r_y - 1] * 10, linspace_y[r_y] * 10)\n', (8864, 8912), False, 'import random\n'), ((12912, 12990), 'numpy.array', 'np.array', (['[omega_perturbed * perturbed_v[0], omega_perturbed * perturbed_v[1]]'], {}), '([omega_perturbed * perturbed_v[0], omega_perturbed * perturbed_v[1]])\n', (12920, 12990), True, 'import numpy as np\n'), ((20547, 20588), 'numpy.zeros', 'np.zeros', (['self.new_shape'], {'dtype': 'np.uint32'}), '(self.new_shape, dtype=np.uint32)\n', (20555, 20588), True, 'import numpy as np\n'), ((30532, 30573), 'numpy.zeros', 'np.zeros', (['self.new_shape'], {'dtype': 'np.uint32'}), '(self.new_shape, dtype=np.uint32)\n', (30540, 30573), True, 'import numpy as np\n'), ((40381, 40404), 'math.ceil', 'math.ceil', (['(stepSize / 2)'], {}), '(stepSize / 2)\n', (40390, 40404), False, 'import math\n'), ((40413, 40436), 'math.ceil', 'math.ceil', (['(stepSize / 2)'], {}), '(stepSize / 2)\n', (40422, 40436), False, 'import math\n'), ((7652, 7675), 'random.randint', 'random.randint', (['(80)', '(120)'], {}), '(80, 120)\n', (7666, 7675), False, 'import random\n'), ((10370, 10399), 'random.randint', 'random.randint', (['(-10000)', '(10000)'], {}), '(-10000, 10000)\n', (10384, 10399), False, 'import random\n'), ((10407, 10436), 'random.randint', 'random.randint', (['(-10000)', '(10000)'], {}), '(-10000, 10000)\n', (10421, 10436), False, 'import random\n'), ((10793, 10820), 'random.randint', 'random.randint', (['(-8000)', '(8000)'], {}), '(-8000, 8000)\n', (10807, 10820), False, 'import random\n'), ((10828, 10855), 'random.randint', 'random.randint', (['(-8000)', '(8000)'], {}), '(-8000, 8000)\n', (10842, 10855), False, 'import random\n'), ((11298, 11340), 'numpy.abs', 'np.abs', (['perturbed_distance_vertex_and_line'], {}), '(perturbed_distance_vertex_and_line)\n', (11304, 11340), True, 'import numpy as np\n'), ((11342, 11362), 'random.randint', 'random.randint', (['(1)', '(2)'], {}), '(1, 2)\n', (11356, 11362), False, 'import random\n'), ((11439, 11481), 'numpy.abs', 'np.abs', (['perturbed_distance_vertex_and_line'], {}), '(perturbed_distance_vertex_and_line)\n', (11445, 11481), True, 'import numpy as np\n'), ((11790, 11832), 'numpy.abs', 'np.abs', (['perturbed_distance_vertex_and_line'], {}), '(perturbed_distance_vertex_and_line)\n', (11796, 11832), True, 'import numpy as np\n'), ((11834, 11854), 'random.randint', 'random.randint', (['(1)', '(2)'], {}), '(1, 2)\n', (11848, 11854), False, 'import random\n'), ((11931, 11973), 'numpy.abs', 'np.abs', (['perturbed_distance_vertex_and_line'], {}), '(perturbed_distance_vertex_and_line)\n', (11937, 11973), True, 'import numpy as np\n'), ((38244, 38259), 'random.random', 'random.random', ([], {}), '()\n', (38257, 38259), False, 'import random\n'), ((38398, 38413), 'random.random', 'random.random', ([], {}), '()\n', (38411, 38413), False, 'import random\n'), ((7769, 7791), 'random.randint', 'random.randint', (['(50)', '(70)'], {}), '(50, 70)\n', (7783, 7791), False, 'import random\n'), ((7835, 7858), 'random.randint', 'random.randint', (['(70)', '(130)'], {}), '(70, 130)\n', (7849, 7858), False, 'import random\n'), ((45628, 45651), 'numpy.random.normal', 'np.random.normal', (['(12)', '(4)'], {}), '(12, 4)\n', (45644, 45651), True, 'import numpy as np\n'), ((45962, 45984), 'numpy.random.normal', 'np.random.normal', (['(8)', '(4)'], {}), '(8, 4)\n', (45978, 45984), True, 'import numpy as np\n'), ((15589, 15604), 'random.random', 'random.random', ([], {}), '()\n', (15602, 15604), False, 'import random\n'), ((15644, 15659), 'random.random', 'random.random', ([], {}), '()\n', (15657, 15659), False, 'import random\n'), ((15710, 15725), 'random.random', 'random.random', ([], {}), '()\n', (15723, 15725), False, 'import random\n'), ((15765, 15780), 'random.random', 'random.random', ([], {}), '()\n', (15778, 15780), False, 'import random\n'), ((15831, 15846), 'random.random', 'random.random', ([], {}), '()\n', (15844, 15846), False, 'import random\n'), ((15886, 15901), 'random.random', 'random.random', ([], {}), '()\n', (15899, 15901), False, 'import random\n'), ((15952, 15967), 'random.random', 'random.random', ([], {}), '()\n', (15965, 15967), False, 'import random\n'), ((16007, 16022), 'random.random', 'random.random', ([], {}), '()\n', (16020, 16022), False, 'import random\n'), ((16949, 16964), 'random.random', 'random.random', ([], {}), '()\n', (16962, 16964), False, 'import random\n'), ((17004, 17019), 'random.random', 'random.random', ([], {}), '()\n', (17017, 17019), False, 'import random\n'), ((17070, 17085), 'random.random', 'random.random', ([], {}), '()\n', (17083, 17085), False, 'import random\n'), ((17125, 17140), 'random.random', 'random.random', ([], {}), '()\n', (17138, 17140), False, 'import random\n'), ((17191, 17206), 'random.random', 'random.random', ([], {}), '()\n', (17204, 17206), False, 'import random\n'), ((17246, 17261), 'random.random', 'random.random', ([], {}), '()\n', (17259, 17261), False, 'import random\n'), ((17312, 17327), 'random.random', 'random.random', ([], {}), '()\n', (17325, 17327), False, 'import random\n'), ((17367, 17382), 'random.random', 'random.random', ([], {}), '()\n', (17380, 17382), False, 'import random\n'), ((18290, 18305), 'random.random', 'random.random', ([], {}), '()\n', (18303, 18305), False, 'import random\n'), ((18345, 18360), 'random.random', 'random.random', ([], {}), '()\n', (18358, 18360), False, 'import random\n'), ((18411, 18426), 'random.random', 'random.random', ([], {}), '()\n', (18424, 18426), False, 'import random\n'), ((18466, 18481), 'random.random', 'random.random', ([], {}), '()\n', (18479, 18481), False, 'import random\n'), ((18532, 18547), 'random.random', 'random.random', ([], {}), '()\n', (18545, 18547), False, 'import random\n'), ((18587, 18602), 'random.random', 'random.random', ([], {}), '()\n', (18600, 18602), False, 'import random\n'), ((18653, 18668), 'random.random', 'random.random', ([], {}), '()\n', (18666, 18668), False, 'import random\n'), ((18708, 18723), 'random.random', 'random.random', ([], {}), '()\n', (18721, 18723), False, 'import random\n'), ((12679, 12711), 'numpy.linalg.norm', 'np.linalg.norm', (['(perturbed_v // 2)'], {}), '(perturbed_v // 2)\n', (12693, 12711), True, 'import numpy as np\n'), ((12725, 12740), 'random.random', 'random.random', ([], {}), '()\n', (12738, 12740), False, 'import random\n'), ((12750, 12765), 'random.random', 'random.random', ([], {}), '()\n', (12763, 12765), False, 'import random\n'), ((12775, 12790), 'random.random', 'random.random', ([], {}), '()\n', (12788, 12790), False, 'import random\n')]
|
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
class TwoLayerNet(object):
"""
A two-layer fully-connected neural network. The net has an input dimension
of N, a hidden layer dimension of H, and performs classification over C
classes.
We train the network with a softmax loss function and L2 regularization on
the weight matrices. The network uses a ReLU nonlinearity after the first
fully connected layer.
In other words, the network has the following architecture:
input - fully connected layer - ReLU - fully connected layer - softmax
The outputs of the second fully-connected layer are the scores for each
class.
"""
def __init__(self, input_size, hidden_size, output_size, std=1e-4):
"""
Initialize the model. Weights are initialized to small random values
and biases are initialized to zero. Weights and biases are stored in
the variable self.params, which is a dictionary with the following keys
W1: First layer weights; has shape (D, H)
b1: First layer biases; has shape (H,)
W2: Second layer weights; has shape (H, C)
b2: Second layer biases; has shape (C,)
Inputs:
- input_size: The dimension D of the input data.
- hidden_size: The number of neurons H in the hidden layer.
- output_size: The number of classes C.
"""
self.params = {}
self.params['W1'] = std * np.random.randn(input_size, hidden_size)
self.params['b1'] = np.zeros(hidden_size)
self.params['W2'] = std * np.random.randn(hidden_size, output_size)
self.params['b2'] = np.zeros(output_size)
def loss(self, X, y=None, reg=0.0):
"""
Compute the loss and gradients for a two layer fully connected neural
network.
Inputs:
- X: Input data of shape (N, D). Each X[i] is a training sample.
- y: Vector of training labels. y[i] is the label for X[i], and each
y[i] is an integer in the range 0 <= y[i] < C. This parameter is
optional; if it is not passed then we only return scores, and if it
is passed then we instead return the loss and gradients.
- reg: Regularization strength.
Returns:
If y is None, return a matrix scores of shape (N, C) where scores[i, c]
is the score for class c on input X[i].
If y is not None, instead return a tuple of:
- loss: Loss (data loss and regularization loss) for this batch of
training samples.
- grads: Dictionary mapping parameter names to gradients of those
parameters with respect to the loss function; has the same keys as
self.params.
"""
# Unpack variables from the params dictionary
W1, b1 = self.params['W1'], self.params['b1']
W2, b2 = self.params['W2'], self.params['b2']
N, D = X.shape
# Compute the forward pass
scores = None
#######################################################################
# TODO: Perform the forward pass, computing the class scores for the #
# input. Store the result in the scores variable, which should be an #
# array of shape (N, C). #
#######################################################################
scores1 = X.dot(W1) + b1 # FC1
X2 = np.maximum(0, scores1) # ReLU FC1
scores = X2.dot(W2) + b2 # FC2
#######################################################################
# END OF YOUR CODE #
#######################################################################
# If the targets are not given then jump out, we're done
if y is None:
return scores
scores -= np.max(scores) # Fix Number instability
scores_exp = np.exp(scores)
probs = scores_exp / np.sum(scores_exp, axis=1, keepdims=True)
# Compute the loss
loss = None
#######################################################################
# TODO: Finish the forward pass, and compute the loss. This should #
# include both the data loss and L2 regularization for W1 and W2. #
# Store the result in the variable loss, which should be a scalar. Use#
# the Softmax classifier loss. #
#######################################################################
correct_probs = -np.log(probs[np.arange(N), y])
# L_i = -log(e^correct_score/sum(e^scores))) = -log(correct_probs)
loss = np.sum(correct_probs)
loss /= N
# L2 regularization WRT W1 and W2
loss += reg * (np.sum(W1 * W1) + np.sum(W2 * W2))
#######################################################################
# END OF YOUR CODE #
#######################################################################
# Backward pass: compute gradients
grads = {}
#############################################################################
# TODO: Compute the backward pass, computing the derivatives of the weights #
# and biases. Store the results in the grads dictionary. For example, #
# grads['W1'] should store the gradient on W1, and be a matrix of same size #
#############################################################################
# gradient of loss_i WRT scores_k
# dL_i/ds_k = probs_k-1(y_i == k)
# this means the gradient is the score for "other" classes and score-1
# for the target class
d_scores = probs.copy()
d_scores[np.arange(N), y] -= 1
d_scores /= N
# W2 were multiplied with X2, by chain rule and multiplication
# derivative, WRT W2 we need to multiply downstream derivative by X2
d_W2 = X2.T.dot(d_scores)
# b2 was added, so it's d is 1 but we must multiply it with chain rule
# (downstream), in this case d_scores
d_b2 = np.sum(d_scores, axis=0)
# W1 is upstream of X2, so we continue this way
d_X2 = d_scores.dot(W2.T)
# ReLU derivative is 1 for > 0, else 0
d_scores1 = d_X2 * (scores1 > 0)
d_W1 = X.T.dot(d_scores1)
# b1 gradient
d_b1 = d_scores1.sum(axis=0)
# regularization gradient (reg*W2^2)
d_W2 += reg * 2 * W2
d_W1 += reg * 2 * W1
grads['W1'] = d_W1
grads['b1'] = d_b1
grads['W2'] = d_W2
grads['b2'] = d_b2
#######################################################################
# END OF YOUR CODE #
#######################################################################
return loss, grads
def train(self, X, y, X_val, y_val,
learning_rate=1e-3, learning_rate_decay=0.95,
reg=5e-6, num_iters=100,
batch_size=200, verbose=False):
"""
Train this neural network using stochastic gradient descent.
Inputs:
- X: A numpy array of shape (N, D) giving training data.
- y: A numpy array f shape (N,) giving training labels; y[i] = c means
that X[i] has label c, where 0 <= c < C.
- X_val: A numpy array of shape (N_val, D) giving validation data.
- y_val: A numpy array of shape (N_val,) giving validation labels.
- learning_rate: Scalar giving learning rate for optimization.
- learning_rate_decay: Scalar giving factor used to decay the learning
rate after each epoch.
- reg: Scalar giving regularization strength.
- num_iters: Number of steps to take when optimizing.
- batch_size: Number of training examples to use per step.
- verbose: boolean; if true print progress during optimization.
"""
num_train = X.shape[0]
iterations_per_epoch = max(num_train / batch_size, 1)
# Use SGD to optimize the parameters in self.model
loss_history = []
train_acc_history = []
val_acc_history = []
for it in range(num_iters):
X_batch = None
y_batch = None
###################################################################
# TODO: Create a random minibatch of training data and labels, #
# storing them in X_batch and y_batch respectively. #
###################################################################
# random indexes to sample training data/labels
sample_idx = np.random.choice(num_train, batch_size, replace=True)
X_batch = X[sample_idx]
y_batch = y[sample_idx]
###################################################################
# END OF YOUR CODE #
###################################################################
# Compute loss and gradients using the current minibatch
loss, grads = self.loss(X_batch, y=y_batch, reg=reg)
loss_history.append(loss)
###################################################################
# TODO: Use the gradients in the grads dictionary to update the #
# parameters of the network (stored in the dictionary self.params)#
# using stochastic gradient descent. You'll need to use the #
# gradients stored in the grads dictionary defined above. #
###################################################################
# For each weight in network parameters, update it with the
# corresponding calculated gradient
for key in self.params:
self.params[key] -= learning_rate * grads[key]
###################################################################
# END OF YOUR CODE #
###################################################################
if verbose and it % 100 == 0:
print('iteration %d / %d: loss %f' % (it, num_iters, loss))
# Every epoch, check train and val accuracy and decay learning rate
if it % iterations_per_epoch == 0:
# Check accuracy
train_acc = (self.predict(X_batch) == y_batch).mean()
val_acc = (self.predict(X_val) == y_val).mean()
train_acc_history.append(train_acc)
val_acc_history.append(val_acc)
# Decay learning rate
learning_rate *= learning_rate_decay
return {
'loss_history': loss_history,
'train_acc_history': train_acc_history,
'val_acc_history': val_acc_history,
}
def predict(self, X):
"""
Use the trained weights of this two-layer network to predict labels for
data points. For each data point we predict scores for each of the C
classes, and assign each data point to the class with the highest score
Inputs:
- X: A numpy array of shape (N, D) giving N D-dimensional data points
to classify.
Returns:
- y_pred: A numpy array of shape (N,) giving predicted labels for each
of the elements of X. For all i, y_pred[i] = c means that X[i] is
predicted to have class c, where 0 <= c < C.
"""
y_pred = None
#######################################################################
# TODO: Implement this function; it should be VERY simple! #
#######################################################################
y_pred = np.argmax(self.loss(X), axis=1)
#######################################################################
# END OF YOUR CODE #
#######################################################################
return y_pred
|
[
"numpy.random.choice",
"numpy.max",
"numpy.exp",
"numpy.sum",
"numpy.zeros",
"numpy.maximum",
"numpy.random.randn",
"numpy.arange"
] |
[((1558, 1579), 'numpy.zeros', 'np.zeros', (['hidden_size'], {}), '(hidden_size)\n', (1566, 1579), True, 'import numpy as np\n'), ((1684, 1705), 'numpy.zeros', 'np.zeros', (['output_size'], {}), '(output_size)\n', (1692, 1705), True, 'import numpy as np\n'), ((3464, 3486), 'numpy.maximum', 'np.maximum', (['(0)', 'scores1'], {}), '(0, scores1)\n', (3474, 3486), True, 'import numpy as np\n'), ((3914, 3928), 'numpy.max', 'np.max', (['scores'], {}), '(scores)\n', (3920, 3928), True, 'import numpy as np\n'), ((3976, 3990), 'numpy.exp', 'np.exp', (['scores'], {}), '(scores)\n', (3982, 3990), True, 'import numpy as np\n'), ((4746, 4767), 'numpy.sum', 'np.sum', (['correct_probs'], {}), '(correct_probs)\n', (4752, 4767), True, 'import numpy as np\n'), ((6231, 6255), 'numpy.sum', 'np.sum', (['d_scores'], {'axis': '(0)'}), '(d_scores, axis=0)\n', (6237, 6255), True, 'import numpy as np\n'), ((1489, 1529), 'numpy.random.randn', 'np.random.randn', (['input_size', 'hidden_size'], {}), '(input_size, hidden_size)\n', (1504, 1529), True, 'import numpy as np\n'), ((1614, 1655), 'numpy.random.randn', 'np.random.randn', (['hidden_size', 'output_size'], {}), '(hidden_size, output_size)\n', (1629, 1655), True, 'import numpy as np\n'), ((4020, 4061), 'numpy.sum', 'np.sum', (['scores_exp'], {'axis': '(1)', 'keepdims': '(True)'}), '(scores_exp, axis=1, keepdims=True)\n', (4026, 4061), True, 'import numpy as np\n'), ((8825, 8878), 'numpy.random.choice', 'np.random.choice', (['num_train', 'batch_size'], {'replace': '(True)'}), '(num_train, batch_size, replace=True)\n', (8841, 8878), True, 'import numpy as np\n'), ((4851, 4866), 'numpy.sum', 'np.sum', (['(W1 * W1)'], {}), '(W1 * W1)\n', (4857, 4866), True, 'import numpy as np\n'), ((4869, 4884), 'numpy.sum', 'np.sum', (['(W2 * W2)'], {}), '(W2 * W2)\n', (4875, 4884), True, 'import numpy as np\n'), ((5864, 5876), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (5873, 5876), True, 'import numpy as np\n'), ((4637, 4649), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (4646, 4649), True, 'import numpy as np\n')]
|
import numpy as np
from scipy import ndimage
def erode_value_blobs(array, steps=1, values_to_ignore=tuple(), new_value=0):
unique_values = list(np.unique(array))
all_entries_to_keep = np.zeros(shape=array.shape, dtype=np.bool)
for unique_value in unique_values:
entries_of_this_value = array == unique_value
if unique_value in values_to_ignore:
all_entries_to_keep = np.logical_or(entries_of_this_value, all_entries_to_keep)
else:
eroded_unique_indicator = ndimage.binary_erosion(entries_of_this_value, iterations=steps)
all_entries_to_keep = np.logical_or(eroded_unique_indicator, all_entries_to_keep)
result = array * all_entries_to_keep
if new_value != 0:
eroded_entries = np.logical_not(all_entries_to_keep)
new_values = new_value * eroded_entries
result += new_values
return result
|
[
"numpy.unique",
"scipy.ndimage.binary_erosion",
"numpy.logical_not",
"numpy.logical_or",
"numpy.zeros"
] |
[((194, 236), 'numpy.zeros', 'np.zeros', ([], {'shape': 'array.shape', 'dtype': 'np.bool'}), '(shape=array.shape, dtype=np.bool)\n', (202, 236), True, 'import numpy as np\n'), ((150, 166), 'numpy.unique', 'np.unique', (['array'], {}), '(array)\n', (159, 166), True, 'import numpy as np\n'), ((766, 801), 'numpy.logical_not', 'np.logical_not', (['all_entries_to_keep'], {}), '(all_entries_to_keep)\n', (780, 801), True, 'import numpy as np\n'), ((409, 466), 'numpy.logical_or', 'np.logical_or', (['entries_of_this_value', 'all_entries_to_keep'], {}), '(entries_of_this_value, all_entries_to_keep)\n', (422, 466), True, 'import numpy as np\n'), ((519, 582), 'scipy.ndimage.binary_erosion', 'ndimage.binary_erosion', (['entries_of_this_value'], {'iterations': 'steps'}), '(entries_of_this_value, iterations=steps)\n', (541, 582), False, 'from scipy import ndimage\n'), ((617, 676), 'numpy.logical_or', 'np.logical_or', (['eroded_unique_indicator', 'all_entries_to_keep'], {}), '(eroded_unique_indicator, all_entries_to_keep)\n', (630, 676), True, 'import numpy as np\n')]
|
# coding: utf-8
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Test the Logarithmic Units and Quantities
"""
from __future__ import (absolute_import, unicode_literals, division,
print_function)
from ...extern import six
from ...extern.six.moves import zip
import pickle
import itertools
import pytest
import numpy as np
from numpy.testing.utils import assert_allclose
from ...tests.helper import assert_quantity_allclose
from ... import units as u, constants as c
lu_units = [u.dex, u.mag, u.decibel]
lu_subclasses = [u.DexUnit, u.MagUnit, u.DecibelUnit]
lq_subclasses = [u.Dex, u.Magnitude, u.Decibel]
pu_sample = (u.dimensionless_unscaled, u.m, u.g/u.s**2, u.Jy)
class TestLogUnitCreation(object):
def test_logarithmic_units(self):
"""Check logarithmic units are set up correctly."""
assert u.dB.to(u.dex) == 0.1
assert u.dex.to(u.mag) == -2.5
assert u.mag.to(u.dB) == -4
@pytest.mark.parametrize('lu_unit, lu_cls', zip(lu_units, lu_subclasses))
def test_callable_units(self, lu_unit, lu_cls):
assert isinstance(lu_unit, u.UnitBase)
assert callable(lu_unit)
assert lu_unit._function_unit_class is lu_cls
@pytest.mark.parametrize('lu_unit', lu_units)
def test_equality_to_normal_unit_for_dimensionless(self, lu_unit):
lu = lu_unit()
assert lu == lu._default_function_unit # eg, MagUnit() == u.mag
assert lu._default_function_unit == lu # and u.mag == MagUnit()
@pytest.mark.parametrize('lu_unit, physical_unit',
itertools.product(lu_units, pu_sample))
def test_call_units(self, lu_unit, physical_unit):
"""Create a LogUnit subclass using the callable unit and physical unit,
and do basic check that output is right."""
lu1 = lu_unit(physical_unit)
assert lu1.physical_unit == physical_unit
assert lu1.function_unit == lu1._default_function_unit
def test_call_invalid_unit(self):
with pytest.raises(TypeError):
u.mag([])
with pytest.raises(ValueError):
u.mag(u.mag())
@pytest.mark.parametrize('lu_cls, physical_unit', itertools.product(
lu_subclasses + [u.LogUnit], pu_sample))
def test_subclass_creation(self, lu_cls, physical_unit):
"""Create a LogUnit subclass object for given physical unit,
and do basic check that output is right."""
lu1 = lu_cls(physical_unit)
assert lu1.physical_unit == physical_unit
assert lu1.function_unit == lu1._default_function_unit
lu2 = lu_cls(physical_unit,
function_unit=2*lu1._default_function_unit)
assert lu2.physical_unit == physical_unit
assert lu2.function_unit == u.Unit(2*lu2._default_function_unit)
with pytest.raises(ValueError):
lu_cls(physical_unit, u.m)
def test_predefined_magnitudes():
assert_quantity_allclose((-21.1*u.STmag).physical,
1.*u.erg/u.cm**2/u.s/u.AA)
assert_quantity_allclose((-48.6*u.ABmag).physical,
1.*u.erg/u.cm**2/u.s/u.Hz)
assert_quantity_allclose((0*u.M_bol).physical, c.L_bol0)
assert_quantity_allclose((0*u.m_bol).physical,
c.L_bol0/(4.*np.pi*(10.*c.pc)**2))
def test_predefined_reinitialisation():
assert u.mag('ST') == u.STmag
assert u.mag('AB') == u.ABmag
assert u.mag('Bol') == u.M_bol
assert u.mag('bol') == u.m_bol
def test_predefined_string_roundtrip():
"""Ensure roundtripping; see #5015"""
with u.magnitude_zero_points.enable():
assert u.Unit(u.STmag.to_string()) == u.STmag
assert u.Unit(u.ABmag.to_string()) == u.ABmag
assert u.Unit(u.M_bol.to_string()) == u.M_bol
assert u.Unit(u.m_bol.to_string()) == u.m_bol
def test_inequality():
"""Check __ne__ works (regresssion for #5342)."""
lu1 = u.mag(u.Jy)
lu2 = u.dex(u.Jy)
lu3 = u.mag(u.Jy**2)
lu4 = lu3 - lu1
assert lu1 != lu2
assert lu1 != lu3
assert lu1 == lu4
class TestLogUnitStrings(object):
def test_str(self):
"""Do some spot checks that str, repr, etc. work as expected."""
lu1 = u.mag(u.Jy)
assert str(lu1) == 'mag(Jy)'
assert repr(lu1) == 'Unit("mag(Jy)")'
assert lu1.to_string('generic') == 'mag(Jy)'
with pytest.raises(ValueError):
lu1.to_string('fits')
lu2 = u.dex()
assert str(lu2) == 'dex'
assert repr(lu2) == 'Unit("dex(1)")'
assert lu2.to_string() == 'dex(1)'
lu3 = u.MagUnit(u.Jy, function_unit=2*u.mag)
assert str(lu3) == '2 mag(Jy)'
assert repr(lu3) == 'MagUnit("Jy", unit="2 mag")'
assert lu3.to_string() == '2 mag(Jy)'
lu4 = u.mag(u.ct)
assert lu4.to_string('generic') == 'mag(ct)'
assert lu4.to_string('latex') == ('$\\mathrm{mag}$$\\mathrm{\\left( '
'\\mathrm{ct} \\right)}$')
assert lu4._repr_latex_() == lu4.to_string('latex')
class TestLogUnitConversion(object):
@pytest.mark.parametrize('lu_unit, physical_unit',
itertools.product(lu_units, pu_sample))
def test_physical_unit_conversion(self, lu_unit, physical_unit):
"""Check various LogUnit subclasses are equivalent and convertible
to their non-log counterparts."""
lu1 = lu_unit(physical_unit)
assert lu1.is_equivalent(physical_unit)
assert lu1.to(physical_unit, 0.) == 1.
assert physical_unit.is_equivalent(lu1)
assert physical_unit.to(lu1, 1.) == 0.
pu = u.Unit(8.*physical_unit)
assert lu1.is_equivalent(physical_unit)
assert lu1.to(pu, 0.) == 0.125
assert pu.is_equivalent(lu1)
assert_allclose(pu.to(lu1, 0.125), 0., atol=1.e-15)
# Check we round-trip.
value = np.linspace(0., 10., 6)
assert_allclose(pu.to(lu1, lu1.to(pu, value)), value, atol=1.e-15)
# And that we're not just returning True all the time.
pu2 = u.g
assert not lu1.is_equivalent(pu2)
with pytest.raises(u.UnitsError):
lu1.to(pu2)
assert not pu2.is_equivalent(lu1)
with pytest.raises(u.UnitsError):
pu2.to(lu1)
@pytest.mark.parametrize('lu_unit', lu_units)
def test_container_unit_conversion(self, lu_unit):
"""Check that conversion to logarithmic units (u.mag, u.dB, u.dex)
is only possible when the physical unit is dimensionless."""
values = np.linspace(0., 10., 6)
lu1 = lu_unit(u.dimensionless_unscaled)
assert lu1.is_equivalent(lu1.function_unit)
assert_allclose(lu1.to(lu1.function_unit, values), values)
lu2 = lu_unit(u.Jy)
assert not lu2.is_equivalent(lu2.function_unit)
with pytest.raises(u.UnitsError):
lu2.to(lu2.function_unit, values)
@pytest.mark.parametrize(
'flu_unit, tlu_unit, physical_unit',
itertools.product(lu_units, lu_units, pu_sample))
def test_subclass_conversion(self, flu_unit, tlu_unit, physical_unit):
"""Check various LogUnit subclasses are equivalent and convertible
to each other if they correspond to equivalent physical units."""
values = np.linspace(0., 10., 6)
flu = flu_unit(physical_unit)
tlu = tlu_unit(physical_unit)
assert flu.is_equivalent(tlu)
assert_allclose(flu.to(tlu), flu.function_unit.to(tlu.function_unit))
assert_allclose(flu.to(tlu, values),
values * flu.function_unit.to(tlu.function_unit))
tlu2 = tlu_unit(u.Unit(100.*physical_unit))
assert flu.is_equivalent(tlu2)
# Check that we round-trip.
assert_allclose(flu.to(tlu2, tlu2.to(flu, values)), values, atol=1.e-15)
tlu3 = tlu_unit(physical_unit.to_system(u.si)[0])
assert flu.is_equivalent(tlu3)
assert_allclose(flu.to(tlu3, tlu3.to(flu, values)), values, atol=1.e-15)
tlu4 = tlu_unit(u.g)
assert not flu.is_equivalent(tlu4)
with pytest.raises(u.UnitsError):
flu.to(tlu4, values)
def test_unit_decomposition(self):
lu = u.mag(u.Jy)
assert lu.decompose() == u.mag(u.Jy.decompose())
assert lu.decompose().physical_unit.bases == [u.kg, u.s]
assert lu.si == u.mag(u.Jy.si)
assert lu.si.physical_unit.bases == [u.kg, u.s]
assert lu.cgs == u.mag(u.Jy.cgs)
assert lu.cgs.physical_unit.bases == [u.g, u.s]
def test_unit_multiple_possible_equivalencies(self):
lu = u.mag(u.Jy)
assert lu.is_equivalent(pu_sample)
class TestLogUnitArithmetic(object):
def test_multiplication_division(self):
"""Check that multiplication/division with other units is only
possible when the physical unit is dimensionless, and that this
turns the unit into a normal one."""
lu1 = u.mag(u.Jy)
with pytest.raises(u.UnitsError):
lu1 * u.m
with pytest.raises(u.UnitsError):
u.m * lu1
with pytest.raises(u.UnitsError):
lu1 / lu1
for unit in (u.dimensionless_unscaled, u.m, u.mag, u.dex):
with pytest.raises(u.UnitsError):
lu1 / unit
lu2 = u.mag(u.dimensionless_unscaled)
with pytest.raises(u.UnitsError):
lu2 * lu1
with pytest.raises(u.UnitsError):
lu2 / lu1
# But dimensionless_unscaled can be cancelled.
assert lu2 / lu2 == u.dimensionless_unscaled
# With dimensionless, normal units are OK, but we return a plain unit.
tf = lu2 * u.m
tr = u.m * lu2
for t in (tf, tr):
assert not isinstance(t, type(lu2))
assert t == lu2.function_unit * u.m
with u.set_enabled_equivalencies(u.logarithmic()):
with pytest.raises(u.UnitsError):
t.to(lu2.physical_unit)
# Now we essentially have a LogUnit with a prefactor of 100,
# so should be equivalent again.
t = tf / u.cm
with u.set_enabled_equivalencies(u.logarithmic()):
assert t.is_equivalent(lu2.function_unit)
assert_allclose(t.to(u.dimensionless_unscaled, np.arange(3.)/100.),
lu2.to(lu2.physical_unit, np.arange(3.)))
# If we effectively remove lu1, a normal unit should be returned.
t2 = tf / lu2
assert not isinstance(t2, type(lu2))
assert t2 == u.m
t3 = tf / lu2.function_unit
assert not isinstance(t3, type(lu2))
assert t3 == u.m
# For completeness, also ensure non-sensical operations fail
with pytest.raises(TypeError):
lu1 * object()
with pytest.raises(TypeError):
slice(None) * lu1
with pytest.raises(TypeError):
lu1 / []
with pytest.raises(TypeError):
1 / lu1
@pytest.mark.parametrize('power', (2, 0.5, 1, 0))
def test_raise_to_power(self, power):
"""Check that raising LogUnits to some power is only possible when the
physical unit is dimensionless, and that conversion is turned off when
the resulting logarithmic unit (such as mag**2) is incompatible."""
lu1 = u.mag(u.Jy)
if power == 0:
assert lu1 ** power == u.dimensionless_unscaled
elif power == 1:
assert lu1 ** power == lu1
else:
with pytest.raises(u.UnitsError):
lu1 ** power
# With dimensionless, though, it works, but returns a normal unit.
lu2 = u.mag(u.dimensionless_unscaled)
t = lu2**power
if power == 0:
assert t == u.dimensionless_unscaled
elif power == 1:
assert t == lu2
else:
assert not isinstance(t, type(lu2))
assert t == lu2.function_unit**power
# also check we roundtrip
t2 = t**(1./power)
assert t2 == lu2.function_unit
with u.set_enabled_equivalencies(u.logarithmic()):
assert_allclose(t2.to(u.dimensionless_unscaled, np.arange(3.)),
lu2.to(lu2.physical_unit, np.arange(3.)))
@pytest.mark.parametrize('other', pu_sample)
def test_addition_subtraction_to_normal_units_fails(self, other):
lu1 = u.mag(u.Jy)
with pytest.raises(u.UnitsError):
lu1 + other
with pytest.raises(u.UnitsError):
lu1 - other
with pytest.raises(u.UnitsError):
other - lu1
def test_addition_subtraction_to_non_units_fails(self):
lu1 = u.mag(u.Jy)
with pytest.raises(TypeError):
lu1 + 1.
with pytest.raises(TypeError):
lu1 - [1., 2., 3.]
@pytest.mark.parametrize(
'other', (u.mag, u.mag(), u.mag(u.Jy), u.mag(u.m),
u.Unit(2*u.mag), u.MagUnit('', 2.*u.mag)))
def test_addition_subtraction(self, other):
"""Check physical units are changed appropriately"""
lu1 = u.mag(u.Jy)
other_pu = getattr(other, 'physical_unit', u.dimensionless_unscaled)
lu_sf = lu1 + other
assert lu_sf.is_equivalent(lu1.physical_unit * other_pu)
lu_sr = other + lu1
assert lu_sr.is_equivalent(lu1.physical_unit * other_pu)
lu_df = lu1 - other
assert lu_df.is_equivalent(lu1.physical_unit / other_pu)
lu_dr = other - lu1
assert lu_dr.is_equivalent(other_pu / lu1.physical_unit)
def test_complicated_addition_subtraction(self):
"""for fun, a more complicated example of addition and subtraction"""
dm0 = u.Unit('DM', 1./(4.*np.pi*(10.*u.pc)**2))
lu_dm = u.mag(dm0)
lu_absST = u.STmag - lu_dm
assert lu_absST.is_equivalent(u.erg/u.s/u.AA)
def test_neg_pos(self):
lu1 = u.mag(u.Jy)
neg_lu = -lu1
assert neg_lu != lu1
assert neg_lu.physical_unit == u.Jy**-1
assert -neg_lu == lu1
pos_lu = +lu1
assert pos_lu is not lu1
assert pos_lu == lu1
def test_pickle():
lu1 = u.dex(u.cm/u.s**2)
s = pickle.dumps(lu1)
lu2 = pickle.loads(s)
assert lu1 == lu2
def test_hashable():
lu1 = u.dB(u.mW)
lu2 = u.dB(u.m)
lu3 = u.dB(u.mW)
assert hash(lu1) != hash(lu2)
assert hash(lu1) == hash(lu3)
luset = {lu1, lu2, lu3}
assert len(luset) == 2
class TestLogQuantityCreation(object):
@pytest.mark.parametrize('lq, lu', zip(lq_subclasses + [u.LogQuantity],
lu_subclasses + [u.LogUnit]))
def test_logarithmic_quantities(self, lq, lu):
"""Check logarithmic quantities are all set up correctly"""
assert lq._unit_class == lu
assert type(lu()._quantity_class(1.)) is lq
@pytest.mark.parametrize('lq_cls, physical_unit',
itertools.product(lq_subclasses, pu_sample))
def test_subclass_creation(self, lq_cls, physical_unit):
"""Create LogQuantity subclass objects for some physical units,
and basic check on transformations"""
value = np.arange(1., 10.)
log_q = lq_cls(value * physical_unit)
assert log_q.unit.physical_unit == physical_unit
assert log_q.unit.function_unit == log_q.unit._default_function_unit
assert_allclose(log_q.physical.value, value)
with pytest.raises(ValueError):
lq_cls(value, physical_unit)
@pytest.mark.parametrize(
'unit', (u.mag, u.mag(), u.mag(u.Jy), u.mag(u.m),
u.Unit(2*u.mag), u.MagUnit('', 2.*u.mag),
u.MagUnit(u.Jy, -1*u.mag), u.MagUnit(u.m, -2.*u.mag)))
def test_different_units(self, unit):
q = u.Magnitude(1.23, unit)
assert q.unit.function_unit == getattr(unit, 'function_unit', unit)
assert q.unit.physical_unit is getattr(unit, 'physical_unit',
u.dimensionless_unscaled)
@pytest.mark.parametrize('value, unit', (
(1.*u.mag(u.Jy), None),
(1.*u.dex(u.Jy), None),
(1.*u.mag(u.W/u.m**2/u.Hz), u.mag(u.Jy)),
(1.*u.dex(u.W/u.m**2/u.Hz), u.mag(u.Jy))))
def test_function_values(self, value, unit):
lq = u.Magnitude(value, unit)
assert lq == value
assert lq.unit.function_unit == u.mag
assert lq.unit.physical_unit == getattr(unit, 'physical_unit',
value.unit.physical_unit)
@pytest.mark.parametrize(
'unit', (u.mag(), u.mag(u.Jy), u.mag(u.m), u.MagUnit('', 2.*u.mag),
u.MagUnit(u.Jy, -1*u.mag), u.MagUnit(u.m, -2.*u.mag)))
def test_indirect_creation(self, unit):
q1 = 2.5 * unit
assert isinstance(q1, u.Magnitude)
assert q1.value == 2.5
assert q1.unit == unit
pv = 100. * unit.physical_unit
q2 = unit * pv
assert q2.unit == unit
assert q2.unit.physical_unit == pv.unit
assert q2.to_value(unit.physical_unit) == 100.
assert (q2._function_view / u.mag).to_value(1) == -5.
q3 = unit / 0.4
assert q3 == q1
def test_from_view(self):
# Cannot view a physical quantity as a function quantity, since the
# values would change.
q = [100., 1000.] * u.cm/u.s**2
with pytest.raises(TypeError):
q.view(u.Dex)
# But fine if we have the right magnitude.
q = [2., 3.] * u.dex
lq = q.view(u.Dex)
assert isinstance(lq, u.Dex)
assert lq.unit.physical_unit == u.dimensionless_unscaled
assert np.all(q == lq)
def test_using_quantity_class(self):
"""Check that we can use Quantity if we have subok=True"""
# following issue #5851
lu = u.dex(u.AA)
with pytest.raises(u.UnitTypeError):
u.Quantity(1., lu)
q = u.Quantity(1., lu, subok=True)
assert type(q) is lu._quantity_class
def test_conversion_to_and_from_physical_quantities():
"""Ensures we can convert from regular quantities."""
mst = [10., 12., 14.] * u.STmag
flux_lambda = mst.physical
mst_roundtrip = flux_lambda.to(u.STmag)
# check we return a logquantity; see #5178.
assert isinstance(mst_roundtrip, u.Magnitude)
assert mst_roundtrip.unit == mst.unit
assert_allclose(mst_roundtrip.value, mst.value)
wave = [4956.8, 4959.55, 4962.3] * u.AA
flux_nu = mst.to(u.Jy, equivalencies=u.spectral_density(wave))
mst_roundtrip2 = flux_nu.to(u.STmag, u.spectral_density(wave))
assert isinstance(mst_roundtrip2, u.Magnitude)
assert mst_roundtrip2.unit == mst.unit
assert_allclose(mst_roundtrip2.value, mst.value)
def test_quantity_decomposition():
lq = 10.*u.mag(u.Jy)
assert lq.decompose() == lq
assert lq.decompose().unit.physical_unit.bases == [u.kg, u.s]
assert lq.si == lq
assert lq.si.unit.physical_unit.bases == [u.kg, u.s]
assert lq.cgs == lq
assert lq.cgs.unit.physical_unit.bases == [u.g, u.s]
class TestLogQuantityViews(object):
def setup(self):
self.lq = u.Magnitude(np.arange(10.) * u.Jy)
self.lq2 = u.Magnitude(np.arange(5.))
def test_value_view(self):
lq_value = self.lq.value
assert type(lq_value) is np.ndarray
lq_value[2] = -1.
assert np.all(self.lq.value == lq_value)
def test_function_view(self):
lq_fv = self.lq._function_view
assert type(lq_fv) is u.Quantity
assert lq_fv.unit is self.lq.unit.function_unit
lq_fv[3] = -2. * lq_fv.unit
assert np.all(self.lq.value == lq_fv.value)
def test_quantity_view(self):
# Cannot view as Quantity, since the unit cannot be represented.
with pytest.raises(TypeError):
self.lq.view(u.Quantity)
# But a dimensionless one is fine.
q2 = self.lq2.view(u.Quantity)
assert q2.unit is u.mag
assert np.all(q2.value == self.lq2.value)
lq3 = q2.view(u.Magnitude)
assert type(lq3.unit) is u.MagUnit
assert lq3.unit.physical_unit == u.dimensionless_unscaled
assert np.all(lq3 == self.lq2)
class TestLogQuantitySlicing(object):
def test_item_get_and_set(self):
lq1 = u.Magnitude(np.arange(1., 11.)*u.Jy)
assert lq1[9] == u.Magnitude(10.*u.Jy)
lq1[2] = 100.*u.Jy
assert lq1[2] == u.Magnitude(100.*u.Jy)
with pytest.raises(u.UnitsError):
lq1[2] = 100.*u.m
with pytest.raises(u.UnitsError):
lq1[2] = 100.*u.mag
with pytest.raises(u.UnitsError):
lq1[2] = u.Magnitude(100.*u.m)
assert lq1[2] == u.Magnitude(100.*u.Jy)
def test_slice_get_and_set(self):
lq1 = u.Magnitude(np.arange(1., 10.)*u.Jy)
lq1[2:4] = 100.*u.Jy
assert np.all(lq1[2:4] == u.Magnitude(100.*u.Jy))
with pytest.raises(u.UnitsError):
lq1[2:4] = 100.*u.m
with pytest.raises(u.UnitsError):
lq1[2:4] = 100.*u.mag
with pytest.raises(u.UnitsError):
lq1[2:4] = u.Magnitude(100.*u.m)
assert np.all(lq1[2] == u.Magnitude(100.*u.Jy))
class TestLogQuantityArithmetic(object):
def test_multiplication_division(self):
"""Check that multiplication/division with other quantities is only
possible when the physical unit is dimensionless, and that this turns
the result into a normal quantity."""
lq = u.Magnitude(np.arange(1., 11.)*u.Jy)
with pytest.raises(u.UnitsError):
lq * (1.*u.m)
with pytest.raises(u.UnitsError):
(1.*u.m) * lq
with pytest.raises(u.UnitsError):
lq / lq
for unit in (u.m, u.mag, u.dex):
with pytest.raises(u.UnitsError):
lq / unit
lq2 = u.Magnitude(np.arange(1, 11.))
with pytest.raises(u.UnitsError):
lq2 * lq
with pytest.raises(u.UnitsError):
lq2 / lq
with pytest.raises(u.UnitsError):
lq / lq2
# but dimensionless_unscaled can be cancelled
r = lq2 / u.Magnitude(2.)
assert r.unit == u.dimensionless_unscaled
assert np.all(r.value == lq2.value/2.)
# with dimensionless, normal units OK, but return normal quantities
tf = lq2 * u.m
tr = u.m * lq2
for t in (tf, tr):
assert not isinstance(t, type(lq2))
assert t.unit == lq2.unit.function_unit * u.m
with u.set_enabled_equivalencies(u.logarithmic()):
with pytest.raises(u.UnitsError):
t.to(lq2.unit.physical_unit)
t = tf / (50.*u.cm)
# now we essentially have the same quantity but with a prefactor of 2
assert t.unit.is_equivalent(lq2.unit.function_unit)
assert_allclose(t.to(lq2.unit.function_unit), lq2._function_view*2)
@pytest.mark.parametrize('power', (2, 0.5, 1, 0))
def test_raise_to_power(self, power):
"""Check that raising LogQuantities to some power is only possible when
the physical unit is dimensionless, and that conversion is turned off
when the resulting logarithmic unit (say, mag**2) is incompatible."""
lq = u.Magnitude(np.arange(1., 4.)*u.Jy)
if power == 0:
assert np.all(lq ** power == 1.)
elif power == 1:
assert np.all(lq ** power == lq)
else:
with pytest.raises(u.UnitsError):
lq ** power
# with dimensionless, it works, but falls back to normal quantity
# (except for power=1)
lq2 = u.Magnitude(np.arange(10.))
t = lq2**power
if power == 0:
assert t.unit is u.dimensionless_unscaled
assert np.all(t.value == 1.)
elif power == 1:
assert np.all(t == lq2)
else:
assert not isinstance(t, type(lq2))
assert t.unit == lq2.unit.function_unit ** power
with u.set_enabled_equivalencies(u.logarithmic()):
with pytest.raises(u.UnitsError):
t.to(u.dimensionless_unscaled)
def test_error_on_lq_as_power(self):
lq = u.Magnitude(np.arange(1., 4.)*u.Jy)
with pytest.raises(TypeError):
lq ** lq
@pytest.mark.parametrize('other', pu_sample)
def test_addition_subtraction_to_normal_units_fails(self, other):
lq = u.Magnitude(np.arange(1., 10.)*u.Jy)
q = 1.23 * other
with pytest.raises(u.UnitsError):
lq + q
with pytest.raises(u.UnitsError):
lq - q
with pytest.raises(u.UnitsError):
q - lq
@pytest.mark.parametrize(
'other', (1.23 * u.mag, 2.34 * u.mag(),
u.Magnitude(3.45 * u.Jy), u.Magnitude(4.56 * u.m),
5.67 * u.Unit(2*u.mag), u.Magnitude(6.78, 2.*u.mag)))
def test_addition_subtraction(self, other):
"""Check that addition/subtraction with quantities with magnitude or
MagUnit units works, and that it changes the physical units
appropriately."""
lq = u.Magnitude(np.arange(1., 10.)*u.Jy)
other_physical = other.to(getattr(other.unit, 'physical_unit',
u.dimensionless_unscaled),
equivalencies=u.logarithmic())
lq_sf = lq + other
assert_allclose(lq_sf.physical, lq.physical * other_physical)
lq_sr = other + lq
assert_allclose(lq_sr.physical, lq.physical * other_physical)
lq_df = lq - other
assert_allclose(lq_df.physical, lq.physical / other_physical)
lq_dr = other - lq
assert_allclose(lq_dr.physical, other_physical / lq.physical)
@pytest.mark.parametrize('other', pu_sample)
def test_inplace_addition_subtraction_unit_checks(self, other):
lu1 = u.mag(u.Jy)
lq1 = u.Magnitude(np.arange(1., 10.), lu1)
with pytest.raises(u.UnitsError):
lq1 += other
assert np.all(lq1.value == np.arange(1., 10.))
assert lq1.unit == lu1
with pytest.raises(u.UnitsError):
lq1 -= other
assert np.all(lq1.value == np.arange(1., 10.))
assert lq1.unit == lu1
@pytest.mark.parametrize(
'other', (1.23 * u.mag, 2.34 * u.mag(),
u.Magnitude(3.45 * u.Jy), u.Magnitude(4.56 * u.m),
5.67 * u.Unit(2*u.mag), u.Magnitude(6.78, 2.*u.mag)))
def test_inplace_addition_subtraction(self, other):
"""Check that inplace addition/subtraction with quantities with
magnitude or MagUnit units works, and that it changes the physical
units appropriately."""
lq = u.Magnitude(np.arange(1., 10.)*u.Jy)
other_physical = other.to(getattr(other.unit, 'physical_unit',
u.dimensionless_unscaled),
equivalencies=u.logarithmic())
lq_sf = lq.copy()
lq_sf += other
assert_allclose(lq_sf.physical, lq.physical * other_physical)
lq_df = lq.copy()
lq_df -= other
assert_allclose(lq_df.physical, lq.physical / other_physical)
def test_complicated_addition_subtraction(self):
"""For fun, a more complicated example of addition and subtraction."""
dm0 = u.Unit('DM', 1./(4.*np.pi*(10.*u.pc)**2))
DMmag = u.mag(dm0)
m_st = 10. * u.STmag
dm = 5. * DMmag
M_st = m_st - dm
assert M_st.unit.is_equivalent(u.erg/u.s/u.AA)
assert np.abs(M_st.physical /
(m_st.physical*4.*np.pi*(100.*u.pc)**2) - 1.) < 1.e-15
class TestLogQuantityComparisons(object):
def test_comparison_to_non_quantities_fails(self):
lq = u.Magnitude(np.arange(1., 10.)*u.Jy)
# On python2, ordering operations always succeed, given essentially
# meaningless results.
if not six.PY2:
with pytest.raises(TypeError):
lq > 'a'
assert not (lq == 'a')
assert lq != 'a'
def test_comparison(self):
lq1 = u.Magnitude(np.arange(1., 4.)*u.Jy)
lq2 = u.Magnitude(2.*u.Jy)
assert np.all((lq1 > lq2) == np.array([True, False, False]))
assert np.all((lq1 == lq2) == np.array([False, True, False]))
lq3 = u.Dex(2.*u.Jy)
assert np.all((lq1 > lq3) == np.array([True, False, False]))
assert np.all((lq1 == lq3) == np.array([False, True, False]))
lq4 = u.Magnitude(2.*u.m)
assert not (lq1 == lq4)
assert lq1 != lq4
with pytest.raises(u.UnitsError):
lq1 < lq4
q5 = 1.5 * u.Jy
assert np.all((lq1 > q5) == np.array([True, False, False]))
assert np.all((q5 < lq1) == np.array([True, False, False]))
with pytest.raises(u.UnitsError):
lq1 >= 2.*u.m
with pytest.raises(u.UnitsError):
lq1 <= lq1.value * u.mag
# For physically dimensionless, we can compare with the function unit.
lq6 = u.Magnitude(np.arange(1., 4.))
fv6 = lq6.value * u.mag
assert np.all(lq6 == fv6)
# but not some arbitrary unit, of course.
with pytest.raises(u.UnitsError):
lq6 < 2.*u.m
class TestLogQuantityMethods(object):
def setup(self):
self.mJy = np.arange(1., 5.).reshape(2, 2) * u.mag(u.Jy)
self.m1 = np.arange(1., 5.5, 0.5).reshape(3, 3) * u.mag()
self.mags = (self.mJy, self.m1)
@pytest.mark.parametrize('method', ('mean', 'min', 'max', 'round', 'trace',
'std', 'var', 'ptp', 'diff', 'ediff1d'))
def test_always_ok(self, method):
for mag in self.mags:
res = getattr(mag, method)()
assert np.all(res.value ==
getattr(mag._function_view, method)().value)
if method in ('std', 'ptp', 'diff', 'ediff1d'):
assert res.unit == u.mag()
elif method == 'var':
assert res.unit == u.mag**2
else:
assert res.unit == mag.unit
def test_clip(self):
for mag in self.mags:
assert np.all(mag.clip(2. * mag.unit, 4. * mag.unit).value ==
mag.value.clip(2., 4.))
@pytest.mark.parametrize('method', ('sum', 'cumsum', 'nansum'))
def test_only_ok_if_dimensionless(self, method):
res = getattr(self.m1, method)()
assert np.all(res.value ==
getattr(self.m1._function_view, method)().value)
assert res.unit == self.m1.unit
with pytest.raises(TypeError):
getattr(self.mJy, method)()
def test_dot(self):
assert np.all(self.m1.dot(self.m1).value ==
self.m1.value.dot(self.m1.value))
@pytest.mark.parametrize('method', ('prod', 'cumprod'))
def test_never_ok(self, method):
with pytest.raises(ValueError):
getattr(self.mJy, method)()
with pytest.raises(ValueError):
getattr(self.m1, method)()
class TestLogQuantityUfuncs(object):
"""Spot checks on ufuncs."""
def setup(self):
self.mJy = np.arange(1., 5.).reshape(2, 2) * u.mag(u.Jy)
self.m1 = np.arange(1., 5.5, 0.5).reshape(3, 3) * u.mag()
self.mags = (self.mJy, self.m1)
def test_power(self):
assert np.all(np.power(self.mJy, 0.) == 1.)
assert np.all(np.power(self.m1, 1.) == self.m1)
assert np.all(np.power(self.mJy, 1.) == self.mJy)
assert np.all(np.power(self.m1, 2.) == self.m1 ** 2)
with pytest.raises(u.UnitsError):
np.power(self.mJy, 2.)
def test_not_implemented_with_physical_unit(self):
with pytest.raises(u.UnitsError):
np.square(self.mJy)
assert np.all(np.square(self.m1) == self.m1 ** 2)
|
[
"numpy.abs",
"numpy.power",
"pickle.dumps",
"itertools.product",
"numpy.square",
"pytest.mark.parametrize",
"numpy.linspace",
"numpy.array",
"pytest.raises",
"numpy.testing.utils.assert_allclose",
"pickle.loads",
"numpy.all",
"numpy.arange"
] |
[((1241, 1285), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""lu_unit"""', 'lu_units'], {}), "('lu_unit', lu_units)\n", (1264, 1285), False, 'import pytest\n'), ((6355, 6399), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""lu_unit"""', 'lu_units'], {}), "('lu_unit', lu_units)\n", (6378, 6399), False, 'import pytest\n'), ((11055, 11103), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""power"""', '(2, 0.5, 1, 0)'], {}), "('power', (2, 0.5, 1, 0))\n", (11078, 11103), False, 'import pytest\n'), ((12360, 12403), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""other"""', 'pu_sample'], {}), "('other', pu_sample)\n", (12383, 12403), False, 'import pytest\n'), ((14287, 14304), 'pickle.dumps', 'pickle.dumps', (['lu1'], {}), '(lu1)\n', (14299, 14304), False, 'import pickle\n'), ((14315, 14330), 'pickle.loads', 'pickle.loads', (['s'], {}), '(s)\n', (14327, 14330), False, 'import pickle\n'), ((18491, 18538), 'numpy.testing.utils.assert_allclose', 'assert_allclose', (['mst_roundtrip.value', 'mst.value'], {}), '(mst_roundtrip.value, mst.value)\n', (18506, 18538), False, 'from numpy.testing.utils import assert_allclose\n'), ((18815, 18863), 'numpy.testing.utils.assert_allclose', 'assert_allclose', (['mst_roundtrip2.value', 'mst.value'], {}), '(mst_roundtrip2.value, mst.value)\n', (18830, 18863), False, 'from numpy.testing.utils import assert_allclose\n'), ((23059, 23107), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""power"""', '(2, 0.5, 1, 0)'], {}), "('power', (2, 0.5, 1, 0))\n", (23082, 23107), False, 'import pytest\n'), ((24457, 24500), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""other"""', 'pu_sample'], {}), "('other', pu_sample)\n", (24480, 24500), False, 'import pytest\n'), ((25923, 25966), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""other"""', 'pu_sample'], {}), "('other', pu_sample)\n", (25946, 25966), False, 'import pytest\n'), ((29671, 29790), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""method"""', "('mean', 'min', 'max', 'round', 'trace', 'std', 'var', 'ptp', 'diff', 'ediff1d'\n )"], {}), "('method', ('mean', 'min', 'max', 'round', 'trace',\n 'std', 'var', 'ptp', 'diff', 'ediff1d'))\n", (29694, 29790), False, 'import pytest\n'), ((30475, 30537), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""method"""', "('sum', 'cumsum', 'nansum')"], {}), "('method', ('sum', 'cumsum', 'nansum'))\n", (30498, 30537), False, 'import pytest\n'), ((30996, 31050), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""method"""', "('prod', 'cumprod')"], {}), "('method', ('prod', 'cumprod'))\n", (31019, 31050), False, 'import pytest\n'), ((1611, 1649), 'itertools.product', 'itertools.product', (['lu_units', 'pu_sample'], {}), '(lu_units, pu_sample)\n', (1628, 1649), False, 'import itertools\n'), ((2210, 2267), 'itertools.product', 'itertools.product', (['(lu_subclasses + [u.LogUnit])', 'pu_sample'], {}), '(lu_subclasses + [u.LogUnit], pu_sample)\n', (2227, 2267), False, 'import itertools\n'), ((5952, 5977), 'numpy.linspace', 'np.linspace', (['(0.0)', '(10.0)', '(6)'], {}), '(0.0, 10.0, 6)\n', (5963, 5977), True, 'import numpy as np\n'), ((5226, 5264), 'itertools.product', 'itertools.product', (['lu_units', 'pu_sample'], {}), '(lu_units, pu_sample)\n', (5243, 5264), False, 'import itertools\n'), ((6616, 6641), 'numpy.linspace', 'np.linspace', (['(0.0)', '(10.0)', '(6)'], {}), '(0.0, 10.0, 6)\n', (6627, 6641), True, 'import numpy as np\n'), ((7355, 7380), 'numpy.linspace', 'np.linspace', (['(0.0)', '(10.0)', '(6)'], {}), '(0.0, 10.0, 6)\n', (7366, 7380), True, 'import numpy as np\n'), ((7064, 7112), 'itertools.product', 'itertools.product', (['lu_units', 'lu_units', 'pu_sample'], {}), '(lu_units, lu_units, pu_sample)\n', (7081, 7112), False, 'import itertools\n'), ((15283, 15303), 'numpy.arange', 'np.arange', (['(1.0)', '(10.0)'], {}), '(1.0, 10.0)\n', (15292, 15303), True, 'import numpy as np\n'), ((15490, 15534), 'numpy.testing.utils.assert_allclose', 'assert_allclose', (['log_q.physical.value', 'value'], {}), '(log_q.physical.value, value)\n', (15505, 15534), False, 'from numpy.testing.utils import assert_allclose\n'), ((15043, 15086), 'itertools.product', 'itertools.product', (['lq_subclasses', 'pu_sample'], {}), '(lq_subclasses, pu_sample)\n', (15060, 15086), False, 'import itertools\n'), ((17775, 17790), 'numpy.all', 'np.all', (['(q == lq)'], {}), '(q == lq)\n', (17781, 17790), True, 'import numpy as np\n'), ((19493, 19526), 'numpy.all', 'np.all', (['(self.lq.value == lq_value)'], {}), '(self.lq.value == lq_value)\n', (19499, 19526), True, 'import numpy as np\n'), ((19749, 19785), 'numpy.all', 'np.all', (['(self.lq.value == lq_fv.value)'], {}), '(self.lq.value == lq_fv.value)\n', (19755, 19785), True, 'import numpy as np\n'), ((20099, 20133), 'numpy.all', 'np.all', (['(q2.value == self.lq2.value)'], {}), '(q2.value == self.lq2.value)\n', (20105, 20133), True, 'import numpy as np\n'), ((20293, 20316), 'numpy.all', 'np.all', (['(lq3 == self.lq2)'], {}), '(lq3 == self.lq2)\n', (20299, 20316), True, 'import numpy as np\n'), ((22360, 22394), 'numpy.all', 'np.all', (['(r.value == lq2.value / 2.0)'], {}), '(r.value == lq2.value / 2.0)\n', (22366, 22394), True, 'import numpy as np\n'), ((25561, 25622), 'numpy.testing.utils.assert_allclose', 'assert_allclose', (['lq_sf.physical', '(lq.physical * other_physical)'], {}), '(lq_sf.physical, lq.physical * other_physical)\n', (25576, 25622), False, 'from numpy.testing.utils import assert_allclose\n'), ((25659, 25720), 'numpy.testing.utils.assert_allclose', 'assert_allclose', (['lq_sr.physical', '(lq.physical * other_physical)'], {}), '(lq_sr.physical, lq.physical * other_physical)\n', (25674, 25720), False, 'from numpy.testing.utils import assert_allclose\n'), ((25757, 25818), 'numpy.testing.utils.assert_allclose', 'assert_allclose', (['lq_df.physical', '(lq.physical / other_physical)'], {}), '(lq_df.physical, lq.physical / other_physical)\n', (25772, 25818), False, 'from numpy.testing.utils import assert_allclose\n'), ((25855, 25916), 'numpy.testing.utils.assert_allclose', 'assert_allclose', (['lq_dr.physical', '(other_physical / lq.physical)'], {}), '(lq_dr.physical, other_physical / lq.physical)\n', (25870, 25916), False, 'from numpy.testing.utils import assert_allclose\n'), ((27188, 27249), 'numpy.testing.utils.assert_allclose', 'assert_allclose', (['lq_sf.physical', '(lq.physical * other_physical)'], {}), '(lq_sf.physical, lq.physical * other_physical)\n', (27203, 27249), False, 'from numpy.testing.utils import assert_allclose\n'), ((27308, 27369), 'numpy.testing.utils.assert_allclose', 'assert_allclose', (['lq_df.physical', '(lq.physical / other_physical)'], {}), '(lq_df.physical, lq.physical / other_physical)\n', (27323, 27369), False, 'from numpy.testing.utils import assert_allclose\n'), ((29297, 29315), 'numpy.all', 'np.all', (['(lq6 == fv6)'], {}), '(lq6 == fv6)\n', (29303, 29315), True, 'import numpy as np\n'), ((2040, 2064), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (2053, 2064), False, 'import pytest\n'), ((2101, 2126), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2114, 2126), False, 'import pytest\n'), ((2848, 2873), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2861, 2873), False, 'import pytest\n'), ((4414, 4439), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4427, 4439), False, 'import pytest\n'), ((6187, 6214), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (6200, 6214), False, 'import pytest\n'), ((6296, 6323), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (6309, 6323), False, 'import pytest\n'), ((6905, 6932), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (6918, 6932), False, 'import pytest\n'), ((8165, 8192), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (8178, 8192), False, 'import pytest\n'), ((9043, 9070), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (9056, 9070), False, 'import pytest\n'), ((9108, 9135), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (9121, 9135), False, 'import pytest\n'), ((9173, 9200), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (9186, 9200), False, 'import pytest\n'), ((9426, 9453), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (9439, 9453), False, 'import pytest\n'), ((9491, 9518), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (9504, 9518), False, 'import pytest\n'), ((10808, 10832), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (10821, 10832), False, 'import pytest\n'), ((10874, 10898), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (10887, 10898), False, 'import pytest\n'), ((10943, 10967), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (10956, 10967), False, 'import pytest\n'), ((11003, 11027), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (11016, 11027), False, 'import pytest\n'), ((12513, 12540), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (12526, 12540), False, 'import pytest\n'), ((12580, 12607), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (12593, 12607), False, 'import pytest\n'), ((12647, 12674), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (12660, 12674), False, 'import pytest\n'), ((12800, 12824), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (12813, 12824), False, 'import pytest\n'), ((12861, 12885), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (12874, 12885), False, 'import pytest\n'), ((15548, 15573), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (15561, 15573), False, 'import pytest\n'), ((17499, 17523), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (17512, 17523), False, 'import pytest\n'), ((17970, 18000), 'pytest.raises', 'pytest.raises', (['u.UnitTypeError'], {}), '(u.UnitTypeError)\n', (17983, 18000), False, 'import pytest\n'), ((19328, 19342), 'numpy.arange', 'np.arange', (['(5.0)'], {}), '(5.0)\n', (19337, 19342), True, 'import numpy as np\n'), ((19907, 19931), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (19920, 19931), False, 'import pytest\n'), ((20580, 20607), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (20593, 20607), False, 'import pytest\n'), ((20652, 20679), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (20665, 20679), False, 'import pytest\n'), ((20726, 20753), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (20739, 20753), False, 'import pytest\n'), ((21036, 21063), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (21049, 21063), False, 'import pytest\n'), ((21110, 21137), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (21123, 21137), False, 'import pytest\n'), ((21186, 21213), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (21199, 21213), False, 'import pytest\n'), ((21667, 21694), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (21680, 21694), False, 'import pytest\n'), ((21736, 21763), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (21749, 21763), False, 'import pytest\n'), ((21805, 21832), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (21818, 21832), False, 'import pytest\n'), ((21995, 22013), 'numpy.arange', 'np.arange', (['(1)', '(11.0)'], {}), '(1, 11.0)\n', (22004, 22013), True, 'import numpy as np\n'), ((22028, 22055), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (22041, 22055), False, 'import pytest\n'), ((22092, 22119), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (22105, 22119), False, 'import pytest\n'), ((22156, 22183), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (22169, 22183), False, 'import pytest\n'), ((23478, 23504), 'numpy.all', 'np.all', (['(lq ** power == 1.0)'], {}), '(lq ** power == 1.0)\n', (23484, 23504), True, 'import numpy as np\n'), ((23794, 23809), 'numpy.arange', 'np.arange', (['(10.0)'], {}), '(10.0)\n', (23803, 23809), True, 'import numpy as np\n'), ((23930, 23952), 'numpy.all', 'np.all', (['(t.value == 1.0)'], {}), '(t.value == 1.0)\n', (23936, 23952), True, 'import numpy as np\n'), ((24404, 24428), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (24417, 24428), False, 'import pytest\n'), ((24659, 24686), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (24672, 24686), False, 'import pytest\n'), ((24721, 24748), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (24734, 24748), False, 'import pytest\n'), ((24783, 24810), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (24796, 24810), False, 'import pytest\n'), ((26087, 26107), 'numpy.arange', 'np.arange', (['(1.0)', '(10.0)'], {}), '(1.0, 10.0)\n', (26096, 26107), True, 'import numpy as np\n'), ((26125, 26152), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (26138, 26152), False, 'import pytest\n'), ((26280, 26307), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (26293, 26307), False, 'import pytest\n'), ((27734, 27819), 'numpy.abs', 'np.abs', (['(M_st.physical / (m_st.physical * 4.0 * np.pi * (100.0 * u.pc) ** 2) - 1.0)'], {}), '(M_st.physical / (m_st.physical * 4.0 * np.pi * (100.0 * u.pc) ** 2) -\n 1.0)\n', (27740, 27819), True, 'import numpy as np\n'), ((28768, 28795), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (28781, 28795), False, 'import pytest\n'), ((28992, 29019), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (29005, 29019), False, 'import pytest\n'), ((29060, 29087), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (29073, 29087), False, 'import pytest\n'), ((29231, 29250), 'numpy.arange', 'np.arange', (['(1.0)', '(4.0)'], {}), '(1.0, 4.0)\n', (29240, 29250), True, 'import numpy as np\n'), ((29379, 29406), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (29392, 29406), False, 'import pytest\n'), ((30791, 30815), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (30804, 30815), False, 'import pytest\n'), ((31101, 31126), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (31114, 31126), False, 'import pytest\n'), ((31182, 31207), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (31195, 31207), False, 'import pytest\n'), ((31780, 31807), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (31793, 31807), False, 'import pytest\n'), ((31821, 31844), 'numpy.power', 'np.power', (['self.mJy', '(2.0)'], {}), '(self.mJy, 2.0)\n', (31829, 31844), True, 'import numpy as np\n'), ((31913, 31940), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (31926, 31940), False, 'import pytest\n'), ((31954, 31973), 'numpy.square', 'np.square', (['self.mJy'], {}), '(self.mJy)\n', (31963, 31973), True, 'import numpy as np\n'), ((9309, 9336), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (9322, 9336), False, 'import pytest\n'), ((19274, 19289), 'numpy.arange', 'np.arange', (['(10.0)'], {}), '(10.0)\n', (19283, 19289), True, 'import numpy as np\n'), ((20420, 20440), 'numpy.arange', 'np.arange', (['(1.0)', '(11.0)'], {}), '(1.0, 11.0)\n', (20429, 20440), True, 'import numpy as np\n'), ((20911, 20931), 'numpy.arange', 'np.arange', (['(1.0)', '(10.0)'], {}), '(1.0, 10.0)\n', (20920, 20931), True, 'import numpy as np\n'), ((21628, 21648), 'numpy.arange', 'np.arange', (['(1.0)', '(11.0)'], {}), '(1.0, 11.0)\n', (21637, 21648), True, 'import numpy as np\n'), ((21913, 21940), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (21926, 21940), False, 'import pytest\n'), ((23411, 23430), 'numpy.arange', 'np.arange', (['(1.0)', '(4.0)'], {}), '(1.0, 4.0)\n', (23420, 23430), True, 'import numpy as np\n'), ((23548, 23573), 'numpy.all', 'np.all', (['(lq ** power == lq)'], {}), '(lq ** power == lq)\n', (23554, 23573), True, 'import numpy as np\n'), ((23996, 24012), 'numpy.all', 'np.all', (['(t == lq2)'], {}), '(t == lq2)\n', (24002, 24012), True, 'import numpy as np\n'), ((24367, 24386), 'numpy.arange', 'np.arange', (['(1.0)', '(4.0)'], {}), '(1.0, 4.0)\n', (24376, 24386), True, 'import numpy as np\n'), ((24596, 24616), 'numpy.arange', 'np.arange', (['(1.0)', '(10.0)'], {}), '(1.0, 10.0)\n', (24605, 24616), True, 'import numpy as np\n'), ((25295, 25315), 'numpy.arange', 'np.arange', (['(1.0)', '(10.0)'], {}), '(1.0, 10.0)\n', (25304, 25315), True, 'import numpy as np\n'), ((26215, 26235), 'numpy.arange', 'np.arange', (['(1.0)', '(10.0)'], {}), '(1.0, 10.0)\n', (26224, 26235), True, 'import numpy as np\n'), ((26370, 26390), 'numpy.arange', 'np.arange', (['(1.0)', '(10.0)'], {}), '(1.0, 10.0)\n', (26379, 26390), True, 'import numpy as np\n'), ((26901, 26921), 'numpy.arange', 'np.arange', (['(1.0)', '(10.0)'], {}), '(1.0, 10.0)\n', (26910, 26921), True, 'import numpy as np\n'), ((27958, 27978), 'numpy.arange', 'np.arange', (['(1.0)', '(10.0)'], {}), '(1.0, 10.0)\n', (27967, 27978), True, 'import numpy as np\n'), ((28131, 28155), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (28144, 28155), False, 'import pytest\n'), ((28297, 28316), 'numpy.arange', 'np.arange', (['(1.0)', '(4.0)'], {}), '(1.0, 4.0)\n', (28306, 28316), True, 'import numpy as np\n'), ((28393, 28423), 'numpy.array', 'np.array', (['[True, False, False]'], {}), '([True, False, False])\n', (28401, 28423), True, 'import numpy as np\n'), ((28463, 28493), 'numpy.array', 'np.array', (['[False, True, False]'], {}), '([False, True, False])\n', (28471, 28493), True, 'import numpy as np\n'), ((28561, 28591), 'numpy.array', 'np.array', (['[True, False, False]'], {}), '([True, False, False])\n', (28569, 28591), True, 'import numpy as np\n'), ((28631, 28661), 'numpy.array', 'np.array', (['[False, True, False]'], {}), '([False, True, False])\n', (28639, 28661), True, 'import numpy as np\n'), ((28879, 28909), 'numpy.array', 'np.array', (['[True, False, False]'], {}), '([True, False, False])\n', (28887, 28909), True, 'import numpy as np\n'), ((28947, 28977), 'numpy.array', 'np.array', (['[True, False, False]'], {}), '([True, False, False])\n', (28955, 28977), True, 'import numpy as np\n'), ((31562, 31585), 'numpy.power', 'np.power', (['self.mJy', '(0.0)'], {}), '(self.mJy, 0.0)\n', (31570, 31585), True, 'import numpy as np\n'), ((31614, 31636), 'numpy.power', 'np.power', (['self.m1', '(1.0)'], {}), '(self.m1, 1.0)\n', (31622, 31636), True, 'import numpy as np\n'), ((31670, 31693), 'numpy.power', 'np.power', (['self.mJy', '(1.0)'], {}), '(self.mJy, 1.0)\n', (31678, 31693), True, 'import numpy as np\n'), ((31728, 31750), 'numpy.power', 'np.power', (['self.m1', '(2.0)'], {}), '(self.m1, 2.0)\n', (31736, 31750), True, 'import numpy as np\n'), ((31996, 32014), 'numpy.square', 'np.square', (['self.m1'], {}), '(self.m1)\n', (32005, 32014), True, 'import numpy as np\n'), ((9984, 10011), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (9997, 10011), False, 'import pytest\n'), ((10436, 10450), 'numpy.arange', 'np.arange', (['(3.0)'], {}), '(3.0)\n', (10445, 10450), True, 'import numpy as np\n'), ((11585, 11612), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (11598, 11612), False, 'import pytest\n'), ((22732, 22759), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (22745, 22759), False, 'import pytest\n'), ((23605, 23632), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (23618, 23632), False, 'import pytest\n'), ((29513, 29532), 'numpy.arange', 'np.arange', (['(1.0)', '(5.0)'], {}), '(1.0, 5.0)\n', (29522, 29532), True, 'import numpy as np\n'), ((29577, 29601), 'numpy.arange', 'np.arange', (['(1.0)', '(5.5)', '(0.5)'], {}), '(1.0, 5.5, 0.5)\n', (29586, 29601), True, 'import numpy as np\n'), ((31361, 31380), 'numpy.arange', 'np.arange', (['(1.0)', '(5.0)'], {}), '(1.0, 5.0)\n', (31370, 31380), True, 'import numpy as np\n'), ((31425, 31449), 'numpy.arange', 'np.arange', (['(1.0)', '(5.5)', '(0.5)'], {}), '(1.0, 5.5, 0.5)\n', (31434, 31449), True, 'import numpy as np\n'), ((10361, 10375), 'numpy.arange', 'np.arange', (['(3.0)'], {}), '(3.0)\n', (10370, 10375), True, 'import numpy as np\n'), ((24220, 24247), 'pytest.raises', 'pytest.raises', (['u.UnitsError'], {}), '(u.UnitsError)\n', (24233, 24247), False, 'import pytest\n'), ((12264, 12278), 'numpy.arange', 'np.arange', (['(3.0)'], {}), '(3.0)\n', (12273, 12278), True, 'import numpy as np\n'), ((12338, 12352), 'numpy.arange', 'np.arange', (['(3.0)'], {}), '(3.0)\n', (12347, 12352), True, 'import numpy as np\n')]
|
"""
Collection of tests asserting things that should be true for
any index subclass. Makes use of the `indices` fixture defined
in pandas/tests/indexes/conftest.py.
"""
import re
import numpy as np
import pytest
from pandas._libs.tslibs import iNaT
from pandas.core.dtypes.common import is_period_dtype, needs_i8_conversion
import pandas as pd
from pandas import (
CategoricalIndex,
DatetimeIndex,
MultiIndex,
PeriodIndex,
RangeIndex,
TimedeltaIndex,
)
import pandas._testing as tm
class TestCommon:
def test_droplevel(self, index):
# GH 21115
if isinstance(index, MultiIndex):
# Tested separately in test_multi.py
return
assert index.droplevel([]).equals(index)
for level in index.name, [index.name]:
if isinstance(index.name, tuple) and level is index.name:
# GH 21121 : droplevel with tuple name
continue
with pytest.raises(ValueError):
index.droplevel(level)
for level in "wrong", ["wrong"]:
with pytest.raises(
KeyError,
match=r"'Requested level \(wrong\) does not match index name \(None\)'",
):
index.droplevel(level)
def test_constructor_non_hashable_name(self, index):
# GH 20527
if isinstance(index, MultiIndex):
pytest.skip("multiindex handled in test_multi.py")
message = "Index.name must be a hashable type"
renamed = [["1"]]
# With .rename()
with pytest.raises(TypeError, match=message):
index.rename(name=renamed)
# With .set_names()
with pytest.raises(TypeError, match=message):
index.set_names(names=renamed)
def test_constructor_unwraps_index(self, index):
if isinstance(index, pd.MultiIndex):
raise pytest.skip("MultiIndex has no ._data")
a = index
b = type(a)(a)
tm.assert_equal(a._data, b._data)
@pytest.mark.parametrize("itm", [101, "no_int"])
# FutureWarning from non-tuple sequence of nd indexing
@pytest.mark.filterwarnings("ignore::FutureWarning")
def test_getitem_error(self, index, itm):
with pytest.raises(IndexError):
index[itm]
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_corner_union(self, index, fname, sname, expected_name):
# GH 9943 9862
# Test unions with various name combinations
# Do not test MultiIndex or repeats
if isinstance(index, MultiIndex) or not index.is_unique:
pytest.skip("Not for MultiIndex or repeated indices")
# Test copy.union(copy)
first = index.copy().set_names(fname)
second = index.copy().set_names(sname)
union = first.union(second)
expected = index.copy().set_names(expected_name)
tm.assert_index_equal(union, expected)
# Test copy.union(empty)
first = index.copy().set_names(fname)
second = index.drop(index).set_names(sname)
union = first.union(second)
expected = index.copy().set_names(expected_name)
tm.assert_index_equal(union, expected)
# Test empty.union(copy)
first = index.drop(index).set_names(fname)
second = index.copy().set_names(sname)
union = first.union(second)
expected = index.copy().set_names(expected_name)
tm.assert_index_equal(union, expected)
# Test empty.union(empty)
first = index.drop(index).set_names(fname)
second = index.drop(index).set_names(sname)
union = first.union(second)
expected = index.drop(index).set_names(expected_name)
tm.assert_index_equal(union, expected)
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_union_unequal(self, index, fname, sname, expected_name):
if isinstance(index, MultiIndex) or not index.is_unique:
pytest.skip("Not for MultiIndex or repeated indices")
# test copy.union(subset) - need sort for unicode and string
first = index.copy().set_names(fname)
second = index[1:].set_names(sname)
union = first.union(second).sort_values()
expected = index.set_names(expected_name).sort_values()
tm.assert_index_equal(union, expected)
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_corner_intersect(self, index, fname, sname, expected_name):
# GH35847
# Test intersections with various name combinations
if isinstance(index, MultiIndex) or not index.is_unique:
pytest.skip("Not for MultiIndex or repeated indices")
# Test copy.intersection(copy)
first = index.copy().set_names(fname)
second = index.copy().set_names(sname)
intersect = first.intersection(second)
expected = index.copy().set_names(expected_name)
tm.assert_index_equal(intersect, expected)
# Test copy.intersection(empty)
first = index.copy().set_names(fname)
second = index.drop(index).set_names(sname)
intersect = first.intersection(second)
expected = index.drop(index).set_names(expected_name)
tm.assert_index_equal(intersect, expected)
# Test empty.intersection(copy)
first = index.drop(index).set_names(fname)
second = index.copy().set_names(sname)
intersect = first.intersection(second)
expected = index.drop(index).set_names(expected_name)
tm.assert_index_equal(intersect, expected)
# Test empty.intersection(empty)
first = index.drop(index).set_names(fname)
second = index.drop(index).set_names(sname)
intersect = first.intersection(second)
expected = index.drop(index).set_names(expected_name)
tm.assert_index_equal(intersect, expected)
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_intersect_unequal(self, index, fname, sname, expected_name):
if isinstance(index, MultiIndex) or not index.is_unique:
pytest.skip("Not for MultiIndex or repeated indices")
# test copy.intersection(subset) - need sort for unicode and string
first = index.copy().set_names(fname)
second = index[1:].set_names(sname)
intersect = first.intersection(second).sort_values()
expected = index[1:].set_names(expected_name).sort_values()
tm.assert_index_equal(intersect, expected)
def test_to_flat_index(self, index):
# 22866
if isinstance(index, MultiIndex):
pytest.skip("Separate expectation for MultiIndex")
result = index.to_flat_index()
tm.assert_index_equal(result, index)
def test_set_name_methods(self, index):
new_name = "This is the new name for this index"
# don't tests a MultiIndex here (as its tested separated)
if isinstance(index, MultiIndex):
pytest.skip("Skip check for MultiIndex")
original_name = index.name
new_ind = index.set_names([new_name])
assert new_ind.name == new_name
assert index.name == original_name
res = index.rename(new_name, inplace=True)
# should return None
assert res is None
assert index.name == new_name
assert index.names == [new_name]
# FIXME: dont leave commented-out
# with pytest.raises(TypeError, match="list-like"):
# # should still fail even if it would be the right length
# ind.set_names("a")
with pytest.raises(ValueError, match="Level must be None"):
index.set_names("a", level=0)
# rename in place just leaves tuples and other containers alone
name = ("A", "B")
index.rename(name, inplace=True)
assert index.name == name
assert index.names == [name]
def test_copy_and_deepcopy(self, index):
from copy import copy, deepcopy
if isinstance(index, MultiIndex):
pytest.skip("Skip check for MultiIndex")
for func in (copy, deepcopy):
idx_copy = func(index)
assert idx_copy is not index
assert idx_copy.equals(index)
new_copy = index.copy(deep=True, name="banana")
assert new_copy.name == "banana"
def test_unique(self, index):
# don't test a MultiIndex here (as its tested separated)
# don't test a CategoricalIndex because categories change (GH 18291)
if isinstance(index, (MultiIndex, CategoricalIndex)):
pytest.skip("Skip check for MultiIndex/CategoricalIndex")
# GH 17896
expected = index.drop_duplicates()
for level in 0, index.name, None:
result = index.unique(level=level)
tm.assert_index_equal(result, expected)
msg = "Too many levels: Index has only 1 level, not 4"
with pytest.raises(IndexError, match=msg):
index.unique(level=3)
msg = (
fr"Requested level \(wrong\) does not match index name "
fr"\({re.escape(index.name.__repr__())}\)"
)
with pytest.raises(KeyError, match=msg):
index.unique(level="wrong")
def test_get_unique_index(self, index):
# MultiIndex tested separately
if not len(index) or isinstance(index, MultiIndex):
pytest.skip("Skip check for empty Index and MultiIndex")
idx = index[[0] * 5]
idx_unique = index[[0]]
# We test against `idx_unique`, so first we make sure it's unique
# and doesn't contain nans.
assert idx_unique.is_unique is True
try:
assert idx_unique.hasnans is False
except NotImplementedError:
pass
for dropna in [False, True]:
result = idx._get_unique_index(dropna=dropna)
tm.assert_index_equal(result, idx_unique)
# nans:
if not index._can_hold_na:
pytest.skip("Skip na-check if index cannot hold na")
if is_period_dtype(index.dtype):
vals = index[[0] * 5]._data
vals[0] = pd.NaT
elif needs_i8_conversion(index.dtype):
vals = index.asi8[[0] * 5]
vals[0] = iNaT
else:
vals = index.values[[0] * 5]
vals[0] = np.nan
vals_unique = vals[:2]
if index.dtype.kind in ["m", "M"]:
# i.e. needs_i8_conversion but not period_dtype, as above
vals = type(index._data)._simple_new(vals, dtype=index.dtype)
vals_unique = type(index._data)._simple_new(vals_unique, dtype=index.dtype)
idx_nan = index._shallow_copy(vals)
idx_unique_nan = index._shallow_copy(vals_unique)
assert idx_unique_nan.is_unique is True
assert idx_nan.dtype == index.dtype
assert idx_unique_nan.dtype == index.dtype
for dropna, expected in zip([False, True], [idx_unique_nan, idx_unique]):
for i in [idx_nan, idx_unique_nan]:
result = i._get_unique_index(dropna=dropna)
tm.assert_index_equal(result, expected)
def test_mutability(self, index):
if not len(index):
pytest.skip("Skip check for empty Index")
msg = "Index does not support mutable operations"
with pytest.raises(TypeError, match=msg):
index[0] = index[0]
def test_view(self, index):
assert index.view().name == index.name
def test_searchsorted_monotonic(self, index):
# GH17271
# not implemented for tuple searches in MultiIndex
# or Intervals searches in IntervalIndex
if isinstance(index, (MultiIndex, pd.IntervalIndex)):
pytest.skip("Skip check for MultiIndex/IntervalIndex")
# nothing to test if the index is empty
if index.empty:
pytest.skip("Skip check for empty Index")
value = index[0]
# determine the expected results (handle dupes for 'right')
expected_left, expected_right = 0, (index == value).argmin()
if expected_right == 0:
# all values are the same, expected_right should be length
expected_right = len(index)
# test _searchsorted_monotonic in all cases
# test searchsorted only for increasing
if index.is_monotonic_increasing:
ssm_left = index._searchsorted_monotonic(value, side="left")
assert expected_left == ssm_left
ssm_right = index._searchsorted_monotonic(value, side="right")
assert expected_right == ssm_right
ss_left = index.searchsorted(value, side="left")
assert expected_left == ss_left
ss_right = index.searchsorted(value, side="right")
assert expected_right == ss_right
elif index.is_monotonic_decreasing:
ssm_left = index._searchsorted_monotonic(value, side="left")
assert expected_left == ssm_left
ssm_right = index._searchsorted_monotonic(value, side="right")
assert expected_right == ssm_right
else:
# non-monotonic should raise.
with pytest.raises(ValueError):
index._searchsorted_monotonic(value, side="left")
def test_pickle(self, index):
original_name, index.name = index.name, "foo"
unpickled = tm.round_trip_pickle(index)
assert index.equals(unpickled)
index.name = original_name
def test_drop_duplicates(self, index, keep):
if isinstance(index, MultiIndex):
pytest.skip("MultiIndex is tested separately")
if isinstance(index, RangeIndex):
pytest.skip(
"RangeIndex is tested in test_drop_duplicates_no_duplicates "
"as it cannot hold duplicates"
)
if len(index) == 0:
pytest.skip(
"empty index is tested in test_drop_duplicates_no_duplicates "
"as it cannot hold duplicates"
)
# make unique index
holder = type(index)
unique_values = list(set(index))
unique_idx = holder(unique_values)
# make duplicated index
n = len(unique_idx)
duplicated_selection = np.random.choice(n, int(n * 1.5))
idx = holder(unique_idx.values[duplicated_selection])
# Series.duplicated is tested separately
expected_duplicated = (
pd.Series(duplicated_selection).duplicated(keep=keep).values
)
tm.assert_numpy_array_equal(idx.duplicated(keep=keep), expected_duplicated)
# Series.drop_duplicates is tested separately
expected_dropped = holder(pd.Series(idx).drop_duplicates(keep=keep))
tm.assert_index_equal(idx.drop_duplicates(keep=keep), expected_dropped)
def test_drop_duplicates_no_duplicates(self, index):
if isinstance(index, MultiIndex):
pytest.skip("MultiIndex is tested separately")
# make unique index
if isinstance(index, RangeIndex):
# RangeIndex cannot have duplicates
unique_idx = index
else:
holder = type(index)
unique_values = list(set(index))
unique_idx = holder(unique_values)
# check on unique index
expected_duplicated = np.array([False] * len(unique_idx), dtype="bool")
tm.assert_numpy_array_equal(unique_idx.duplicated(), expected_duplicated)
result_dropped = unique_idx.drop_duplicates()
tm.assert_index_equal(result_dropped, unique_idx)
# validate shallow copy
assert result_dropped is not unique_idx
def test_drop_duplicates_inplace(self, index):
msg = r"drop_duplicates\(\) got an unexpected keyword argument"
with pytest.raises(TypeError, match=msg):
index.drop_duplicates(inplace=True)
def test_has_duplicates(self, index):
holder = type(index)
if not len(index) or isinstance(index, (MultiIndex, RangeIndex)):
# MultiIndex tested separately in:
# tests/indexes/multi/test_unique_and_duplicates.
# RangeIndex is unique by definition.
pytest.skip("Skip check for empty Index, MultiIndex, and RangeIndex")
idx = holder([index[0]] * 5)
assert idx.is_unique is False
assert idx.has_duplicates is True
@pytest.mark.parametrize(
"dtype",
["int64", "uint64", "float64", "category", "datetime64[ns]", "timedelta64[ns]"],
)
def test_astype_preserves_name(self, index, dtype):
# https://github.com/pandas-dev/pandas/issues/32013
if isinstance(index, MultiIndex):
index.names = ["idx" + str(i) for i in range(index.nlevels)]
else:
index.name = "idx"
try:
# Some of these conversions cannot succeed so we use a try / except
result = index.astype(dtype)
except (ValueError, TypeError, NotImplementedError, SystemError):
return
if isinstance(index, MultiIndex):
assert result.names == index.names
else:
assert result.name == index.name
def test_ravel_deprecation(self, index):
# GH#19956 ravel returning ndarray is deprecated
with tm.assert_produces_warning(FutureWarning):
index.ravel()
@pytest.mark.parametrize("na_position", [None, "middle"])
def test_sort_values_invalid_na_position(index_with_missing, na_position):
if isinstance(index_with_missing, (DatetimeIndex, PeriodIndex, TimedeltaIndex)):
# datetime-like indices will get na_position kwarg as part of
# synchronizing duplicate-sorting behavior, because we currently expect
# them, other indices, and Series to sort differently (xref 35922)
pytest.xfail("sort_values does not support na_position kwarg")
elif isinstance(index_with_missing, (CategoricalIndex, MultiIndex)):
pytest.xfail("missing value sorting order not defined for index type")
if na_position not in ["first", "last"]:
with pytest.raises(ValueError, match=f"invalid na_position: {na_position}"):
index_with_missing.sort_values(na_position=na_position)
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_sort_values_with_missing(index_with_missing, na_position):
# GH 35584. Test that sort_values works with missing values,
# sort non-missing and place missing according to na_position
if isinstance(index_with_missing, (DatetimeIndex, PeriodIndex, TimedeltaIndex)):
# datetime-like indices will get na_position kwarg as part of
# synchronizing duplicate-sorting behavior, because we currently expect
# them, other indices, and Series to sort differently (xref 35922)
pytest.xfail("sort_values does not support na_position kwarg")
elif isinstance(index_with_missing, (CategoricalIndex, MultiIndex)):
pytest.xfail("missing value sorting order not defined for index type")
missing_count = np.sum(index_with_missing.isna())
not_na_vals = index_with_missing[index_with_missing.notna()].values
sorted_values = np.sort(not_na_vals)
if na_position == "first":
sorted_values = np.concatenate([[None] * missing_count, sorted_values])
else:
sorted_values = np.concatenate([sorted_values, [None] * missing_count])
expected = type(index_with_missing)(sorted_values)
result = index_with_missing.sort_values(na_position=na_position)
tm.assert_index_equal(result, expected)
|
[
"pandas.Series",
"pytest.mark.filterwarnings",
"numpy.sort",
"pandas._testing.round_trip_pickle",
"pandas._testing.assert_index_equal",
"pytest.mark.parametrize",
"pandas._testing.assert_equal",
"pandas.core.dtypes.common.needs_i8_conversion",
"pytest.raises",
"numpy.concatenate",
"pandas._testing.assert_produces_warning",
"pytest.skip",
"pandas.core.dtypes.common.is_period_dtype",
"pytest.xfail"
] |
[((18137, 18193), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""na_position"""', "[None, 'middle']"], {}), "('na_position', [None, 'middle'])\n", (18160, 18193), False, 'import pytest\n'), ((19004, 19061), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""na_position"""', "['first', 'last']"], {}), "('na_position', ['first', 'last'])\n", (19027, 19061), False, 'import pytest\n'), ((2026, 2073), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""itm"""', "[101, 'no_int']"], {}), "('itm', [101, 'no_int'])\n", (2049, 2073), False, 'import pytest\n'), ((2138, 2189), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore::FutureWarning"""'], {}), "('ignore::FutureWarning')\n", (2164, 2189), False, 'import pytest\n'), ((2305, 2459), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""fname, sname, expected_name"""', "[('A', 'A', 'A'), ('A', 'B', None), ('A', None, None), (None, 'B', None), (\n None, None, None)]"], {}), "('fname, sname, expected_name', [('A', 'A', 'A'), (\n 'A', 'B', None), ('A', None, None), (None, 'B', None), (None, None, None)])\n", (2328, 2459), False, 'import pytest\n'), ((3969, 4123), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""fname, sname, expected_name"""', "[('A', 'A', 'A'), ('A', 'B', None), ('A', None, None), (None, 'B', None), (\n None, None, None)]"], {}), "('fname, sname, expected_name', [('A', 'A', 'A'), (\n 'A', 'B', None), ('A', None, None), (None, 'B', None), (None, None, None)])\n", (3992, 4123), False, 'import pytest\n'), ((4741, 4895), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""fname, sname, expected_name"""', "[('A', 'A', 'A'), ('A', 'B', None), ('A', None, None), (None, 'B', None), (\n None, None, None)]"], {}), "('fname, sname, expected_name', [('A', 'A', 'A'), (\n 'A', 'B', None), ('A', None, None), (None, 'B', None), (None, None, None)])\n", (4764, 4895), False, 'import pytest\n'), ((6465, 6619), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""fname, sname, expected_name"""', "[('A', 'A', 'A'), ('A', 'B', None), ('A', None, None), (None, 'B', None), (\n None, None, None)]"], {}), "('fname, sname, expected_name', [('A', 'A', 'A'), (\n 'A', 'B', None), ('A', None, None), (None, 'B', None), (None, None, None)])\n", (6488, 6619), False, 'import pytest\n'), ((17159, 17276), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', "['int64', 'uint64', 'float64', 'category', 'datetime64[ns]', 'timedelta64[ns]']"], {}), "('dtype', ['int64', 'uint64', 'float64', 'category',\n 'datetime64[ns]', 'timedelta64[ns]'])\n", (17182, 17276), False, 'import pytest\n'), ((19942, 19962), 'numpy.sort', 'np.sort', (['not_na_vals'], {}), '(not_na_vals)\n', (19949, 19962), True, 'import numpy as np\n'), ((20293, 20332), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['result', 'expected'], {}), '(result, expected)\n', (20314, 20332), True, 'import pandas._testing as tm\n'), ((1986, 2019), 'pandas._testing.assert_equal', 'tm.assert_equal', (['a._data', 'b._data'], {}), '(a._data, b._data)\n', (2001, 2019), True, 'import pandas._testing as tm\n'), ((3097, 3135), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['union', 'expected'], {}), '(union, expected)\n', (3118, 3135), True, 'import pandas._testing as tm\n'), ((3369, 3407), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['union', 'expected'], {}), '(union, expected)\n', (3390, 3407), True, 'import pandas._testing as tm\n'), ((3641, 3679), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['union', 'expected'], {}), '(union, expected)\n', (3662, 3679), True, 'import pandas._testing as tm\n'), ((3924, 3962), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['union', 'expected'], {}), '(union, expected)\n', (3945, 3962), True, 'import pandas._testing as tm\n'), ((4696, 4734), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['union', 'expected'], {}), '(union, expected)\n', (4717, 4734), True, 'import pandas._testing as tm\n'), ((5513, 5555), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['intersect', 'expected'], {}), '(intersect, expected)\n', (5534, 5555), True, 'import pandas._testing as tm\n'), ((5812, 5854), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['intersect', 'expected'], {}), '(intersect, expected)\n', (5833, 5854), True, 'import pandas._testing as tm\n'), ((6111, 6153), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['intersect', 'expected'], {}), '(intersect, expected)\n', (6132, 6153), True, 'import pandas._testing as tm\n'), ((6416, 6458), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['intersect', 'expected'], {}), '(intersect, expected)\n', (6437, 6458), True, 'import pandas._testing as tm\n'), ((7218, 7260), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['intersect', 'expected'], {}), '(intersect, expected)\n', (7239, 7260), True, 'import pandas._testing as tm\n'), ((7472, 7508), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['result', 'index'], {}), '(result, index)\n', (7493, 7508), True, 'import pandas._testing as tm\n'), ((10810, 10838), 'pandas.core.dtypes.common.is_period_dtype', 'is_period_dtype', (['index.dtype'], {}), '(index.dtype)\n', (10825, 10838), False, 'from pandas.core.dtypes.common import is_period_dtype, needs_i8_conversion\n'), ((14146, 14173), 'pandas._testing.round_trip_pickle', 'tm.round_trip_pickle', (['index'], {}), '(index)\n', (14166, 14173), True, 'import pandas._testing as tm\n'), ((16294, 16343), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['result_dropped', 'unique_idx'], {}), '(result_dropped, unique_idx)\n', (16315, 16343), True, 'import pandas._testing as tm\n'), ((18587, 18649), 'pytest.xfail', 'pytest.xfail', (['"""sort_values does not support na_position kwarg"""'], {}), "('sort_values does not support na_position kwarg')\n", (18599, 18649), False, 'import pytest\n'), ((19580, 19642), 'pytest.xfail', 'pytest.xfail', (['"""sort_values does not support na_position kwarg"""'], {}), "('sort_values does not support na_position kwarg')\n", (19592, 19642), False, 'import pytest\n'), ((20018, 20073), 'numpy.concatenate', 'np.concatenate', (['[[None] * missing_count, sorted_values]'], {}), '([[None] * missing_count, sorted_values])\n', (20032, 20073), True, 'import numpy as np\n'), ((20108, 20163), 'numpy.concatenate', 'np.concatenate', (['[sorted_values, [None] * missing_count]'], {}), '([sorted_values, [None] * missing_count])\n', (20122, 20163), True, 'import numpy as np\n'), ((1402, 1452), 'pytest.skip', 'pytest.skip', (['"""multiindex handled in test_multi.py"""'], {}), "('multiindex handled in test_multi.py')\n", (1413, 1452), False, 'import pytest\n'), ((1574, 1613), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': 'message'}), '(TypeError, match=message)\n', (1587, 1613), False, 'import pytest\n'), ((1696, 1735), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': 'message'}), '(TypeError, match=message)\n', (1709, 1735), False, 'import pytest\n'), ((1897, 1936), 'pytest.skip', 'pytest.skip', (['"""MultiIndex has no ._data"""'], {}), "('MultiIndex has no ._data')\n", (1908, 1936), False, 'import pytest\n'), ((2249, 2274), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (2262, 2274), False, 'import pytest\n'), ((2816, 2869), 'pytest.skip', 'pytest.skip', (['"""Not for MultiIndex or repeated indices"""'], {}), "('Not for MultiIndex or repeated indices')\n", (2827, 2869), False, 'import pytest\n'), ((4360, 4413), 'pytest.skip', 'pytest.skip', (['"""Not for MultiIndex or repeated indices"""'], {}), "('Not for MultiIndex or repeated indices')\n", (4371, 4413), False, 'import pytest\n'), ((5214, 5267), 'pytest.skip', 'pytest.skip', (['"""Not for MultiIndex or repeated indices"""'], {}), "('Not for MultiIndex or repeated indices')\n", (5225, 5267), False, 'import pytest\n'), ((6860, 6913), 'pytest.skip', 'pytest.skip', (['"""Not for MultiIndex or repeated indices"""'], {}), "('Not for MultiIndex or repeated indices')\n", (6871, 6913), False, 'import pytest\n'), ((7373, 7423), 'pytest.skip', 'pytest.skip', (['"""Separate expectation for MultiIndex"""'], {}), "('Separate expectation for MultiIndex')\n", (7384, 7423), False, 'import pytest\n'), ((7732, 7772), 'pytest.skip', 'pytest.skip', (['"""Skip check for MultiIndex"""'], {}), "('Skip check for MultiIndex')\n", (7743, 7772), False, 'import pytest\n'), ((8341, 8394), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Level must be None"""'}), "(ValueError, match='Level must be None')\n", (8354, 8394), False, 'import pytest\n'), ((8790, 8830), 'pytest.skip', 'pytest.skip', (['"""Skip check for MultiIndex"""'], {}), "('Skip check for MultiIndex')\n", (8801, 8830), False, 'import pytest\n'), ((9337, 9394), 'pytest.skip', 'pytest.skip', (['"""Skip check for MultiIndex/CategoricalIndex"""'], {}), "('Skip check for MultiIndex/CategoricalIndex')\n", (9348, 9394), False, 'import pytest\n'), ((9559, 9598), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['result', 'expected'], {}), '(result, expected)\n', (9580, 9598), True, 'import pandas._testing as tm\n'), ((9676, 9712), 'pytest.raises', 'pytest.raises', (['IndexError'], {'match': 'msg'}), '(IndexError, match=msg)\n', (9689, 9712), False, 'import pytest\n'), ((9912, 9946), 'pytest.raises', 'pytest.raises', (['KeyError'], {'match': 'msg'}), '(KeyError, match=msg)\n', (9925, 9946), False, 'import pytest\n'), ((10144, 10200), 'pytest.skip', 'pytest.skip', (['"""Skip check for empty Index and MultiIndex"""'], {}), "('Skip check for empty Index and MultiIndex')\n", (10155, 10200), False, 'import pytest\n'), ((10639, 10680), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['result', 'idx_unique'], {}), '(result, idx_unique)\n', (10660, 10680), True, 'import pandas._testing as tm\n'), ((10745, 10797), 'pytest.skip', 'pytest.skip', (['"""Skip na-check if index cannot hold na"""'], {}), "('Skip na-check if index cannot hold na')\n", (10756, 10797), False, 'import pytest\n'), ((10922, 10954), 'pandas.core.dtypes.common.needs_i8_conversion', 'needs_i8_conversion', (['index.dtype'], {}), '(index.dtype)\n', (10941, 10954), False, 'from pandas.core.dtypes.common import is_period_dtype, needs_i8_conversion\n'), ((11984, 12025), 'pytest.skip', 'pytest.skip', (['"""Skip check for empty Index"""'], {}), "('Skip check for empty Index')\n", (11995, 12025), False, 'import pytest\n'), ((12097, 12132), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': 'msg'}), '(TypeError, match=msg)\n', (12110, 12132), False, 'import pytest\n'), ((12497, 12551), 'pytest.skip', 'pytest.skip', (['"""Skip check for MultiIndex/IntervalIndex"""'], {}), "('Skip check for MultiIndex/IntervalIndex')\n", (12508, 12551), False, 'import pytest\n'), ((12637, 12678), 'pytest.skip', 'pytest.skip', (['"""Skip check for empty Index"""'], {}), "('Skip check for empty Index')\n", (12648, 12678), False, 'import pytest\n'), ((14352, 14398), 'pytest.skip', 'pytest.skip', (['"""MultiIndex is tested separately"""'], {}), "('MultiIndex is tested separately')\n", (14363, 14398), False, 'import pytest\n'), ((14453, 14565), 'pytest.skip', 'pytest.skip', (['"""RangeIndex is tested in test_drop_duplicates_no_duplicates as it cannot hold duplicates"""'], {}), "(\n 'RangeIndex is tested in test_drop_duplicates_no_duplicates as it cannot hold duplicates'\n )\n", (14464, 14565), False, 'import pytest\n'), ((14645, 14758), 'pytest.skip', 'pytest.skip', (['"""empty index is tested in test_drop_duplicates_no_duplicates as it cannot hold duplicates"""'], {}), "(\n 'empty index is tested in test_drop_duplicates_no_duplicates as it cannot hold duplicates'\n )\n", (14656, 14758), False, 'import pytest\n'), ((15701, 15747), 'pytest.skip', 'pytest.skip', (['"""MultiIndex is tested separately"""'], {}), "('MultiIndex is tested separately')\n", (15712, 15747), False, 'import pytest\n'), ((16561, 16596), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': 'msg'}), '(TypeError, match=msg)\n', (16574, 16596), False, 'import pytest\n'), ((16965, 17034), 'pytest.skip', 'pytest.skip', (['"""Skip check for empty Index, MultiIndex, and RangeIndex"""'], {}), "('Skip check for empty Index, MultiIndex, and RangeIndex')\n", (16976, 17034), False, 'import pytest\n'), ((18065, 18106), 'pandas._testing.assert_produces_warning', 'tm.assert_produces_warning', (['FutureWarning'], {}), '(FutureWarning)\n', (18091, 18106), True, 'import pandas._testing as tm\n'), ((18731, 18801), 'pytest.xfail', 'pytest.xfail', (['"""missing value sorting order not defined for index type"""'], {}), "('missing value sorting order not defined for index type')\n", (18743, 18801), False, 'import pytest\n'), ((18861, 18931), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'f"""invalid na_position: {na_position}"""'}), "(ValueError, match=f'invalid na_position: {na_position}')\n", (18874, 18931), False, 'import pytest\n'), ((19724, 19794), 'pytest.xfail', 'pytest.xfail', (['"""missing value sorting order not defined for index type"""'], {}), "('missing value sorting order not defined for index type')\n", (19736, 19794), False, 'import pytest\n'), ((961, 986), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (974, 986), False, 'import pytest\n'), ((1086, 1190), 'pytest.raises', 'pytest.raises', (['KeyError'], {'match': '"""\'Requested level \\\\(wrong\\\\) does not match index name \\\\(None\\\\)\'"""'}), '(KeyError, match=\n "\'Requested level \\\\(wrong\\\\) does not match index name \\\\(None\\\\)\'")\n', (1099, 1190), False, 'import pytest\n'), ((11866, 11905), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['result', 'expected'], {}), '(result, expected)\n', (11887, 11905), True, 'import pandas._testing as tm\n'), ((13944, 13969), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (13957, 13969), False, 'import pytest\n'), ((15222, 15253), 'pandas.Series', 'pd.Series', (['duplicated_selection'], {}), '(duplicated_selection)\n', (15231, 15253), True, 'import pandas as pd\n'), ((15466, 15480), 'pandas.Series', 'pd.Series', (['idx'], {}), '(idx)\n', (15475, 15480), True, 'import pandas as pd\n')]
|
from numpy import genfromtxt
import matplotlib.pyplot as plt
import mpl_finance
import numpy as np
import uuid
import matplotlib
# Input your csv file here with historical data
ad = genfromtxt(f"../financial_data/SM.csv", delimiter=",", dtype=str)
def convolve_sma(array, period):
return np.convolve(array, np.ones((period,)) / period, mode="valid")
def graphwerk(start, finish):
open = []
high = []
low = []
close = []
volume = []
# decision = []
date = []
c_open = []
c_high = []
c_low = []
c_close = []
c_volume = []
c_date = []
c_start = start + 12
for x in range(finish - start):
c_open.append(float(pd[c_start][1]))
c_high.append(float(pd[c_start][2]))
c_low.append(float(pd[c_start][3]))
c_close.append(float(pd[c_start][4]))
c_volume.append(float(pd[c_start][5]))
c_date.append(pd[c_start][0])
c_start = c_start + 1
for x in range(finish - start):
# Below filtering is valid for eurusd.csv file. Other financial data files have different orders so you need to find out
# what means open, high and close in their respective order.
open.append(float(pd[start][1]))
high.append(float(pd[start][2]))
low.append(float(pd[start][3]))
close.append(float(pd[start][4]))
volume.append(float(pd[start][5]))
# decision.append(str(pd[start][6]))
date.append(pd[start][0])
start = start + 1
decision = "sell"
min_forecast = min(c_low)
max_forecast = max(c_high)
if close[-1] * 1.03 < max_forecast:
decision = "buy"
# for z in all_prices:
# if close[-1] * 1.03 < z:
# decision = "buy"
sma = convolve_sma(close, 5)
smb = list(sma)
diff = sma[-1] - sma[-2]
for x in range(len(close) - len(smb)):
smb.append(smb[-1] + diff)
fig = plt.figure(num=1, figsize=(3, 3), dpi=50, facecolor="w", edgecolor="k")
dx = fig.add_subplot(111)
# mpl_finance.volume_overlay(ax, open, close, volume, width=0.4, colorup='b', colordown='b', alpha=1)
mpl_finance.candlestick2_ochl(
dx, open, close, high, low, width=1.5, colorup="g", colordown="r", alpha=0.5
)
plt.autoscale()
# plt.plot(smb, color="blue", linewidth=10, alpha=0.5)
plt.axis("off")
if decision == "sell":
print("last value: " + str(close[-1]))
print(
"range of values in next 13 bars: "
+ str(min_forecast)
+ "-"
+ str(max_forecast)
)
print("sell")
plt.savefig(sell_dir + str(uuid.uuid4()) + ".jpg", bbox_inches="tight")
else:
print("last value: " + str(close[-1]))
print(
"range of values in next 13 bars: "
+ str(min_forecast)
+ "-"
+ str(max_forecast)
)
print("buy")
plt.savefig(buy_dir + str(uuid.uuid4()) + ".jpg", bbox_inches="tight")
# if close[-1] >= close_next:
# print('previous value is bigger')
# print('last value: ' + str(close[-1]))
# print('next value: ' + str(close_next))
# print('sell')
# plt.savefig(sell_dir + str(uuid.uuid4()) +'.jpg', bbox_inches='tight')
# else:
# print('previous value is smaller')
# print('last value: '+ str(close[-1]))
# print('next value: ' + str(close_next))
# print('buy')
# plt.savefig(buy_dir + str(uuid.uuid4())+'.jpg', bbox_inches='tight')
# plt.show()
open.clear()
close.clear()
volume.clear()
high.clear()
low.clear()
plt.cla()
plt.clf()
# output = []
# with open("STOCKbluechip.csv") as f:
# output = [str(s) for line in f.readlines() for s in line[:-1].split(",")]
# for stock in output:
pd = ad
buy_dir = "../data/train/buy/"
sell_dir = "../data/train/sell/"
iter = 0
for x in range(len(pd)):
graphwerk(iter, iter + 12)
iter = iter + 2
|
[
"mpl_finance.candlestick2_ochl",
"numpy.ones",
"matplotlib.pyplot.clf",
"uuid.uuid4",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.autoscale",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.cla",
"numpy.genfromtxt"
] |
[((184, 249), 'numpy.genfromtxt', 'genfromtxt', (['f"""../financial_data/SM.csv"""'], {'delimiter': '""","""', 'dtype': 'str'}), "(f'../financial_data/SM.csv', delimiter=',', dtype=str)\n", (194, 249), False, 'from numpy import genfromtxt\n'), ((1909, 1980), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'num': '(1)', 'figsize': '(3, 3)', 'dpi': '(50)', 'facecolor': '"""w"""', 'edgecolor': '"""k"""'}), "(num=1, figsize=(3, 3), dpi=50, facecolor='w', edgecolor='k')\n", (1919, 1980), True, 'import matplotlib.pyplot as plt\n'), ((2121, 2232), 'mpl_finance.candlestick2_ochl', 'mpl_finance.candlestick2_ochl', (['dx', 'open', 'close', 'high', 'low'], {'width': '(1.5)', 'colorup': '"""g"""', 'colordown': '"""r"""', 'alpha': '(0.5)'}), "(dx, open, close, high, low, width=1.5,\n colorup='g', colordown='r', alpha=0.5)\n", (2150, 2232), False, 'import mpl_finance\n'), ((2247, 2262), 'matplotlib.pyplot.autoscale', 'plt.autoscale', ([], {}), '()\n', (2260, 2262), True, 'import matplotlib.pyplot as plt\n'), ((2326, 2341), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2334, 2341), True, 'import matplotlib.pyplot as plt\n'), ((3674, 3683), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (3681, 3683), True, 'import matplotlib.pyplot as plt\n'), ((3688, 3697), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3695, 3697), True, 'import matplotlib.pyplot as plt\n'), ((315, 333), 'numpy.ones', 'np.ones', (['(period,)'], {}), '((period,))\n', (322, 333), True, 'import numpy as np\n'), ((2629, 2641), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2639, 2641), False, 'import uuid\n'), ((2941, 2953), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2951, 2953), False, 'import uuid\n')]
|
import hashlib
from io import BytesIO
import logging
import os
from typing import Any, cast, Dict, List, Optional, Sequence, Type, TYPE_CHECKING, Union
from pkg_resources import parse_version
import wandb
from wandb import util
from ._private import MEDIA_TMP
from .base_types.media import BatchableMedia, Media
from .helper_types.bounding_boxes_2d import BoundingBoxes2D
from .helper_types.classes import Classes
from .helper_types.image_mask import ImageMask
if TYPE_CHECKING: # pragma: no cover
import matplotlib # type: ignore
import numpy as np # type: ignore
import PIL # type: ignore
import torch # type: ignore
from wandb.apis.public import Artifact as PublicArtifact
from ..wandb_artifacts import Artifact as LocalArtifact
from ..wandb_run import Run as LocalRun
ImageDataType = Union[
"matplotlib.artist.Artist", "PIL.Image", "TorchTensorType", "np.ndarray"
]
ImageDataOrPathType = Union[str, "Image", ImageDataType]
TorchTensorType = Union["torch.Tensor", "torch.Variable"]
def _server_accepts_image_filenames() -> bool:
# Newer versions of wandb accept large image filenames arrays
# but older versions would have issues with this.
max_cli_version = util._get_max_cli_version()
if max_cli_version is None:
return False
return parse_version("0.12.10") <= parse_version(max_cli_version)
class Image(BatchableMedia):
"""Format images for logging to W&B.
Arguments:
data_or_path: (numpy array, string, io) Accepts numpy array of
image data, or a PIL image. The class attempts to infer
the data format and converts it.
mode: (string) The PIL mode for an image. Most common are "L", "RGB",
"RGBA". Full explanation at https://pillow.readthedocs.io/en/4.2.x/handbook/concepts.html#concept-modes.
caption: (string) Label for display of image.
Examples:
### Create a wandb.Image from a numpy array
<!--yeadoc-test:log-image-numpy->
```python
import numpy as np
import wandb
wandb.init()
examples = []
for i in range(3):
pixels = np.random.randint(low=0, high=256, size=(100, 100, 3))
image = wandb.Image(pixels, caption=f"random field {i}")
examples.append(image)
wandb.log({"examples": examples})
```
### Create a wandb.Image from a PILImage
<!--yeadoc-test:log-image-pil->
```python
import numpy as np
from PIL import Image as PILImage
import wandb
wandb.init()
examples = []
for i in range(3):
pixels = np.random.randint(low=0, high=256, size=(100, 100, 3), dtype=np.uint8)
pil_image = PILImage.fromarray(pixels, mode="RGB")
image = wandb.Image(pil_image, caption=f"random field {i}")
examples.append(image)
wandb.log({"examples": examples})
```
"""
MAX_ITEMS = 108
# PIL limit
MAX_DIMENSION = 65500
_log_type = "image-file"
format: Optional[str]
_grouping: Optional[int]
_caption: Optional[str]
_width: Optional[int]
_height: Optional[int]
_image: Optional["PIL.Image"]
_classes: Optional["Classes"]
_boxes: Optional[Dict[str, "BoundingBoxes2D"]]
_masks: Optional[Dict[str, "ImageMask"]]
def __init__(
self,
data_or_path: "ImageDataOrPathType",
mode: Optional[str] = None,
caption: Optional[str] = None,
grouping: Optional[int] = None,
classes: Optional[Union["Classes", Sequence[dict]]] = None,
boxes: Optional[Union[Dict[str, "BoundingBoxes2D"], Dict[str, dict]]] = None,
masks: Optional[Union[Dict[str, "ImageMask"], Dict[str, dict]]] = None,
) -> None:
super(Image, self).__init__()
# TODO: We should remove grouping, it's a terrible name and I don't
# think anyone uses it.
self._grouping = None
self._caption = None
self._width = None
self._height = None
self._image = None
self._classes = None
self._boxes = None
self._masks = None
# Allows the user to pass an Image object as the first parameter and have a perfect copy,
# only overriding additional metdata passed in. If this pattern is compelling, we can generalize.
if isinstance(data_or_path, Image):
self._initialize_from_wbimage(data_or_path)
elif isinstance(data_or_path, str):
self._initialize_from_path(data_or_path)
else:
self._initialize_from_data(data_or_path, mode)
self._set_initialization_meta(grouping, caption, classes, boxes, masks)
def _set_initialization_meta(
self,
grouping: Optional[int] = None,
caption: Optional[str] = None,
classes: Optional[Union["Classes", Sequence[dict]]] = None,
boxes: Optional[Union[Dict[str, "BoundingBoxes2D"], Dict[str, dict]]] = None,
masks: Optional[Union[Dict[str, "ImageMask"], Dict[str, dict]]] = None,
) -> None:
if grouping is not None:
self._grouping = grouping
if caption is not None:
self._caption = caption
total_classes = {}
if boxes:
if not isinstance(boxes, dict):
raise ValueError('Images "boxes" argument must be a dictionary')
boxes_final: Dict[str, BoundingBoxes2D] = {}
for key in boxes:
box_item = boxes[key]
if isinstance(box_item, BoundingBoxes2D):
boxes_final[key] = box_item
elif isinstance(box_item, dict):
# TODO: Consider injecting top-level classes if user-provided is empty
boxes_final[key] = BoundingBoxes2D(box_item, key)
total_classes.update(boxes_final[key]._class_labels)
self._boxes = boxes_final
if masks:
if not isinstance(masks, dict):
raise ValueError('Images "masks" argument must be a dictionary')
masks_final: Dict[str, ImageMask] = {}
for key in masks:
mask_item = masks[key]
if isinstance(mask_item, ImageMask):
masks_final[key] = mask_item
elif isinstance(mask_item, dict):
# TODO: Consider injecting top-level classes if user-provided is empty
masks_final[key] = ImageMask(mask_item, key)
if hasattr(masks_final[key], "_val"):
total_classes.update(masks_final[key]._val["class_labels"])
self._masks = masks_final
if classes is not None:
if isinstance(classes, Classes):
total_classes.update(
{val["id"]: val["name"] for val in classes._class_set}
)
else:
total_classes.update({val["id"]: val["name"] for val in classes})
if len(total_classes.keys()) > 0:
self._classes = Classes(
[
{"id": key, "name": total_classes[key]}
for key in total_classes.keys()
]
)
self._width, self._height = self.image.size # type: ignore
self._free_ram()
def _initialize_from_wbimage(self, wbimage: "Image") -> None:
self._grouping = wbimage._grouping
self._caption = wbimage._caption
self._width = wbimage._width
self._height = wbimage._height
self._image = wbimage._image
self._classes = wbimage._classes
self._path = wbimage._path
self._is_tmp = wbimage._is_tmp
self._extension = wbimage._extension
self._sha256 = wbimage._sha256
self._size = wbimage._size
self.format = wbimage.format
self._artifact_source = wbimage._artifact_source
self._artifact_target = wbimage._artifact_target
# We do not want to implicitly copy boxes or masks, just the image-related data.
# self._boxes = wbimage._boxes
# self._masks = wbimage._masks
def _initialize_from_path(self, path: str) -> None:
pil_image = util.get_module(
"PIL.Image",
required='wandb.Image needs the PIL package. To get it, run "pip install pillow".',
)
self._set_file(path, is_tmp=False)
self._image = pil_image.open(path)
self._image.load()
ext = os.path.splitext(path)[1][1:]
self.format = ext
def _initialize_from_data(self, data: "ImageDataType", mode: str = None,) -> None:
pil_image = util.get_module(
"PIL.Image",
required='wandb.Image needs the PIL package. To get it, run "pip install pillow".',
)
if util.is_matplotlib_typename(util.get_full_typename(data)):
buf = BytesIO()
util.ensure_matplotlib_figure(data).savefig(buf)
self._image = pil_image.open(buf)
elif isinstance(data, pil_image.Image):
self._image = data
elif util.is_pytorch_tensor_typename(util.get_full_typename(data)):
vis_util = util.get_module(
"torchvision.utils", "torchvision is required to render images"
)
if hasattr(data, "requires_grad") and data.requires_grad:
data = data.detach()
data = vis_util.make_grid(data, normalize=True)
self._image = pil_image.fromarray(
data.mul(255).clamp(0, 255).byte().permute(1, 2, 0).cpu().numpy()
)
else:
if hasattr(data, "numpy"): # TF data eager tensors
data = data.numpy()
if data.ndim > 2:
data = data.squeeze() # get rid of trivial dimensions as a convenience
self._image = pil_image.fromarray(
self.to_uint8(data), mode=mode or self.guess_mode(data)
)
tmp_path = os.path.join(MEDIA_TMP.name, str(util.generate_id()) + ".png")
self.format = "png"
self._image.save(tmp_path, transparency=None)
self._set_file(tmp_path, is_tmp=True)
@classmethod
def from_json(
cls: Type["Image"], json_obj: dict, source_artifact: "PublicArtifact"
) -> "Image":
classes = None
if json_obj.get("classes") is not None:
classes = source_artifact.get(json_obj["classes"]["path"])
masks = json_obj.get("masks")
_masks: Optional[Dict[str, ImageMask]] = None
if masks:
_masks = {}
for key in masks:
_masks[key] = ImageMask.from_json(masks[key], source_artifact)
_masks[key]._set_artifact_source(source_artifact)
_masks[key]._key = key
boxes = json_obj.get("boxes")
_boxes: Optional[Dict[str, BoundingBoxes2D]] = None
if boxes:
_boxes = {}
for key in boxes:
_boxes[key] = BoundingBoxes2D.from_json(boxes[key], source_artifact)
_boxes[key]._key = key
return cls(
source_artifact.get_path(json_obj["path"]).download(),
caption=json_obj.get("caption"),
grouping=json_obj.get("grouping"),
classes=classes,
boxes=_boxes,
masks=_masks,
)
@classmethod
def get_media_subdir(cls: Type["Image"]) -> str:
return os.path.join("media", "images")
def bind_to_run(
self,
run: "LocalRun",
key: Union[int, str],
step: Union[int, str],
id_: Optional[Union[int, str]] = None,
ignore_copy_err: Optional[bool] = None,
) -> None:
super().bind_to_run(run, key, step, id_, ignore_copy_err=ignore_copy_err)
if self._boxes is not None:
for i, k in enumerate(self._boxes):
id_ = "{}{}".format(id_, i) if id_ is not None else None
self._boxes[k].bind_to_run(
run, key, step, id_, ignore_copy_err=ignore_copy_err
)
if self._masks is not None:
for i, k in enumerate(self._masks):
id_ = "{}{}".format(id_, i) if id_ is not None else None
self._masks[k].bind_to_run(
run, key, step, id_, ignore_copy_err=ignore_copy_err
)
def to_json(self, run_or_artifact: Union["LocalRun", "LocalArtifact"]) -> dict:
json_dict = super(Image, self).to_json(run_or_artifact)
json_dict["_type"] = Image._log_type
json_dict["format"] = self.format
if self._width is not None:
json_dict["width"] = self._width
if self._height is not None:
json_dict["height"] = self._height
if self._grouping:
json_dict["grouping"] = self._grouping
if self._caption:
json_dict["caption"] = self._caption
if isinstance(run_or_artifact, wandb.wandb_sdk.wandb_artifacts.Artifact):
artifact = run_or_artifact
if (
self._masks is not None or self._boxes is not None
) and self._classes is None:
raise ValueError(
"classes must be passed to wandb.Image which have masks or bounding boxes when adding to artifacts"
)
if self._classes is not None:
class_id = hashlib.md5(
str(self._classes._class_set).encode("utf-8")
).hexdigest()
class_name = os.path.join("media", "classes", class_id + "_cls",)
classes_entry = artifact.add(self._classes, class_name)
json_dict["classes"] = {
"type": "classes-file",
"path": classes_entry.path,
"digest": classes_entry.digest,
}
elif not isinstance(run_or_artifact, wandb.wandb_sdk.wandb_run.Run):
raise ValueError("to_json accepts wandb_run.Run or wandb_artifact.Artifact")
if self._boxes:
json_dict["boxes"] = {
k: box.to_json(run_or_artifact) for (k, box) in self._boxes.items()
}
if self._masks:
json_dict["masks"] = {
k: mask.to_json(run_or_artifact) for (k, mask) in self._masks.items()
}
return json_dict
def guess_mode(self, data: "np.ndarray") -> str:
"""
Guess what type of image the np.array is representing
"""
# TODO: do we want to support dimensions being at the beginning of the array?
if data.ndim == 2:
return "L"
elif data.shape[-1] == 3:
return "RGB"
elif data.shape[-1] == 4:
return "RGBA"
else:
raise ValueError(
"Un-supported shape for image conversion %s" % list(data.shape)
)
@classmethod
def to_uint8(cls, data: "np.ndarray") -> "np.ndarray":
"""
Converts floating point image on the range [0,1] and integer images
on the range [0,255] to uint8, clipping if necessary.
"""
np = util.get_module(
"numpy",
required="wandb.Image requires numpy if not supplying PIL Images: pip install numpy",
)
# I think it's better to check the image range vs the data type, since many
# image libraries will return floats between 0 and 255
# some images have range -1...1 or 0-1
dmin = np.min(data)
if dmin < 0:
data = (data - np.min(data)) / np.ptp(data)
if np.max(data) <= 1.0:
data = (data * 255).astype(np.int32)
# assert issubclass(data.dtype.type, np.integer), 'Illegal image format.'
return data.clip(0, 255).astype(np.uint8)
@classmethod
def seq_to_json(
cls: Type["Image"],
seq: Sequence["BatchableMedia"],
run: "LocalRun",
key: str,
step: Union[int, str],
) -> dict:
"""
Combines a list of images into a meta dictionary object describing the child images.
"""
if TYPE_CHECKING:
seq = cast(Sequence["Image"], seq)
jsons = [obj.to_json(run) for obj in seq]
media_dir = cls.get_media_subdir()
for obj in jsons:
expected = util.to_forward_slash_path(media_dir)
if not obj["path"].startswith(expected):
raise ValueError(
"Files in an array of Image's must be in the {} directory, not {}".format(
cls.get_media_subdir(), obj["path"]
)
)
num_images_to_log = len(seq)
width, height = seq[0].image.size # type: ignore
format = jsons[0]["format"]
def size_equals_image(image: "Image") -> bool:
img_width, img_height = image.image.size # type: ignore
return img_width == width and img_height == height # type: ignore
sizes_match = all(size_equals_image(img) for img in seq)
if not sizes_match:
logging.warning(
"Images sizes do not match. This will causes images to be display incorrectly in the UI."
)
meta = {
"_type": "images/separated",
"width": width,
"height": height,
"format": format,
"count": num_images_to_log,
}
if _server_accepts_image_filenames():
meta["filenames"] = [obj["path"] for obj in jsons]
else:
wandb.termwarn(
"Unable to log image array filenames. In some cases, this can prevent images from being"
"viewed in the UI. Please upgrade your wandb server",
repeat=False,
)
captions = Image.all_captions(seq)
if captions:
meta["captions"] = captions
all_masks = Image.all_masks(seq, run, key, step)
if all_masks:
meta["all_masks"] = all_masks
all_boxes = Image.all_boxes(seq, run, key, step)
if all_boxes:
meta["all_boxes"] = all_boxes
return meta
@classmethod
def all_masks(
cls: Type["Image"],
images: Sequence["Image"],
run: "LocalRun",
run_key: str,
step: Union[int, str],
) -> Union[List[Optional[dict]], bool]:
all_mask_groups: List[Optional[dict]] = []
for image in images:
if image._masks:
mask_group = {}
for k in image._masks:
mask = image._masks[k]
mask_group[k] = mask.to_json(run)
all_mask_groups.append(mask_group)
else:
all_mask_groups.append(None)
if all_mask_groups and not all(x is None for x in all_mask_groups):
return all_mask_groups
else:
return False
@classmethod
def all_boxes(
cls: Type["Image"],
images: Sequence["Image"],
run: "LocalRun",
run_key: str,
step: Union[int, str],
) -> Union[List[Optional[dict]], bool]:
all_box_groups: List[Optional[dict]] = []
for image in images:
if image._boxes:
box_group = {}
for k in image._boxes:
box = image._boxes[k]
box_group[k] = box.to_json(run)
all_box_groups.append(box_group)
else:
all_box_groups.append(None)
if all_box_groups and not all(x is None for x in all_box_groups):
return all_box_groups
else:
return False
@classmethod
def all_captions(
cls: Type["Image"], images: Sequence["Media"]
) -> Union[bool, Sequence[Optional[str]]]:
return cls.captions(images)
def __ne__(self, other: object) -> bool:
return not self.__eq__(other)
def __eq__(self, other: object) -> bool:
if not isinstance(other, Image):
return False
else:
self_image = self.image
other_image = other.image
if self_image is not None:
self_image = list(self_image.getdata())
if other_image is not None:
other_image = list(other_image.getdata())
return (
self._grouping == other._grouping
and self._caption == other._caption
and self._width == other._width
and self._height == other._height
and self_image == other_image
and self._classes == other._classes
)
def to_data_array(self) -> List[Any]:
res = []
if self.image is not None:
data = list(self.image.getdata())
for i in range(self.image.height):
res.append(data[i * self.image.width : (i + 1) * self.image.width])
self._free_ram()
return res
def _free_ram(self) -> None:
if self._path is not None:
self._image = None
@property
def image(self) -> Optional["PIL.Image"]:
if self._image is None:
if self._path is not None:
pil_image = util.get_module(
"PIL.Image",
required='wandb.Image needs the PIL package. To get it, run "pip install pillow".',
)
self._image = pil_image.open(self._path)
self._image.load()
return self._image
|
[
"wandb.util.get_full_typename",
"numpy.ptp",
"wandb.util.generate_id",
"os.path.join",
"io.BytesIO",
"logging.warning",
"wandb.util.get_module",
"numpy.max",
"os.path.splitext",
"pkg_resources.parse_version",
"wandb.util._get_max_cli_version",
"wandb.termwarn",
"wandb.util.ensure_matplotlib_figure",
"numpy.min",
"wandb.util.to_forward_slash_path",
"typing.cast"
] |
[((1238, 1265), 'wandb.util._get_max_cli_version', 'util._get_max_cli_version', ([], {}), '()\n', (1263, 1265), False, 'from wandb import util\n'), ((1330, 1354), 'pkg_resources.parse_version', 'parse_version', (['"""0.12.10"""'], {}), "('0.12.10')\n", (1343, 1354), False, 'from pkg_resources import parse_version\n'), ((1358, 1388), 'pkg_resources.parse_version', 'parse_version', (['max_cli_version'], {}), '(max_cli_version)\n', (1371, 1388), False, 'from pkg_resources import parse_version\n'), ((8266, 8383), 'wandb.util.get_module', 'util.get_module', (['"""PIL.Image"""'], {'required': '"""wandb.Image needs the PIL package. To get it, run "pip install pillow"."""'}), '(\'PIL.Image\', required=\n \'wandb.Image needs the PIL package. To get it, run "pip install pillow".\')\n', (8281, 8383), False, 'from wandb import util\n'), ((8705, 8822), 'wandb.util.get_module', 'util.get_module', (['"""PIL.Image"""'], {'required': '"""wandb.Image needs the PIL package. To get it, run "pip install pillow"."""'}), '(\'PIL.Image\', required=\n \'wandb.Image needs the PIL package. To get it, run "pip install pillow".\')\n', (8720, 8822), False, 'from wandb import util\n'), ((11509, 11540), 'os.path.join', 'os.path.join', (['"""media"""', '"""images"""'], {}), "('media', 'images')\n", (11521, 11540), False, 'import os\n'), ((15244, 15364), 'wandb.util.get_module', 'util.get_module', (['"""numpy"""'], {'required': '"""wandb.Image requires numpy if not supplying PIL Images: pip install numpy"""'}), "('numpy', required=\n 'wandb.Image requires numpy if not supplying PIL Images: pip install numpy'\n )\n", (15259, 15364), False, 'from wandb import util\n'), ((15601, 15613), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (15607, 15613), True, 'import numpy as np\n'), ((8892, 8920), 'wandb.util.get_full_typename', 'util.get_full_typename', (['data'], {}), '(data)\n', (8914, 8920), False, 'from wandb import util\n'), ((8941, 8950), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (8948, 8950), False, 'from io import BytesIO\n'), ((15702, 15714), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (15708, 15714), True, 'import numpy as np\n'), ((16263, 16291), 'typing.cast', 'cast', (["Sequence['Image']", 'seq'], {}), "(Sequence['Image'], seq)\n", (16267, 16291), False, 'from typing import Any, cast, Dict, List, Optional, Sequence, Type, TYPE_CHECKING, Union\n'), ((16437, 16474), 'wandb.util.to_forward_slash_path', 'util.to_forward_slash_path', (['media_dir'], {}), '(media_dir)\n', (16463, 16474), False, 'from wandb import util\n'), ((17199, 17315), 'logging.warning', 'logging.warning', (['"""Images sizes do not match. This will causes images to be display incorrectly in the UI."""'], {}), "(\n 'Images sizes do not match. This will causes images to be display incorrectly in the UI.'\n )\n", (17214, 17315), False, 'import logging\n'), ((17668, 17846), 'wandb.termwarn', 'wandb.termwarn', (['"""Unable to log image array filenames. In some cases, this can prevent images from beingviewed in the UI. Please upgrade your wandb server"""'], {'repeat': '(False)'}), "(\n 'Unable to log image array filenames. In some cases, this can prevent images from beingviewed in the UI. Please upgrade your wandb server'\n , repeat=False)\n", (17682, 17846), False, 'import wandb\n'), ((8541, 8563), 'os.path.splitext', 'os.path.splitext', (['path'], {}), '(path)\n', (8557, 8563), False, 'import os\n'), ((13622, 13673), 'os.path.join', 'os.path.join', (['"""media"""', '"""classes"""', "(class_id + '_cls')"], {}), "('media', 'classes', class_id + '_cls')\n", (13634, 13673), False, 'import os\n'), ((15678, 15690), 'numpy.ptp', 'np.ptp', (['data'], {}), '(data)\n', (15684, 15690), True, 'import numpy as np\n'), ((21355, 21472), 'wandb.util.get_module', 'util.get_module', (['"""PIL.Image"""'], {'required': '"""wandb.Image needs the PIL package. To get it, run "pip install pillow"."""'}), '(\'PIL.Image\', required=\n \'wandb.Image needs the PIL package. To get it, run "pip install pillow".\')\n', (21370, 21472), False, 'from wandb import util\n'), ((8963, 8998), 'wandb.util.ensure_matplotlib_figure', 'util.ensure_matplotlib_figure', (['data'], {}), '(data)\n', (8992, 8998), False, 'from wandb import util\n'), ((9182, 9210), 'wandb.util.get_full_typename', 'util.get_full_typename', (['data'], {}), '(data)\n', (9204, 9210), False, 'from wandb import util\n'), ((9236, 9321), 'wandb.util.get_module', 'util.get_module', (['"""torchvision.utils"""', '"""torchvision is required to render images"""'], {}), "('torchvision.utils', 'torchvision is required to render images'\n )\n", (9251, 9321), False, 'from wandb import util\n'), ((10075, 10093), 'wandb.util.generate_id', 'util.generate_id', ([], {}), '()\n', (10091, 10093), False, 'from wandb import util\n'), ((15662, 15674), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (15668, 15674), True, 'import numpy as np\n')]
|
import sys
import numpy as np
from matplotlib import pyplot as pl
from rw import WriteGTiff
fn = '../pozo-steep-vegetated-pcl.npy'
pts = np.load(fn)
x, y, z, c = pts[:, 0], pts[:, 1], pts[:, 2], pts[:, 5]
ix = (0.2 * (x - x.min())).astype('int')
iy = (0.2 * (y - y.min())).astype('int')
shape = (100, 100)
xb = np.arange(shape[1]+1)
yb = np.arange(shape[0]+1)
fg, ax = pl.subplots(ncols = 2, nrows = 2,
figsize = (10.24, 10.24),
sharex = True, sharey = True)
uc = (2, 5)
for j in range(len(uc)):
print('Class %i' % uc[j])
b = c == uc[j]
cx, cy, cz = ix[b], iy[b], z[b]
mean = np.zeros(shape)
stdr = np.zeros(shape)
for i in range(shape[0]):
print('% 3d%%' % i)
for k in range(shape[1]):
b = (cy == i) * (cx == k)
mean[i, k] = cz[b].mean()
stdr[i, k] = cz[b].std()
fname = 'pozo_5m_dem_mean_cl%i.tif' % uc[j]
WriteGTiff(fname, mean, x.min(), y.min()+500, step = 5)
np.save('pozo_5m_dem_mean_cl%i.npy' % uc[j], mean)
np.save('pozo_5m_dem_stdr_cl%i.npy' % uc[j], stdr)
ax[0, j].set_title('Class %i' % uc[j])
im = ax[0, j].pcolormesh(xb, yb,
np.ma.masked_invalid(mean),
cmap = pl.cm.viridis_r)
cb = fg.colorbar(im, ax = ax[0, j])
cb.set_label('Mean elevation [m]')
im = ax[1, j].pcolormesh(xb, yb,
np.ma.masked_invalid(stdr),
cmap = pl.cm.magma_r)
cb = fg.colorbar(im, ax = ax[1, j])
cb.set_label('Elevation STD')
ax[0, j].set_aspect('equal')
ax[1, j].set_aspect('equal')
pl.savefig('%s.png' % sys.argv[0][:-3])
|
[
"matplotlib.pyplot.savefig",
"numpy.zeros",
"numpy.save",
"numpy.ma.masked_invalid",
"numpy.load",
"matplotlib.pyplot.subplots",
"numpy.arange"
] |
[((138, 149), 'numpy.load', 'np.load', (['fn'], {}), '(fn)\n', (145, 149), True, 'import numpy as np\n'), ((313, 336), 'numpy.arange', 'np.arange', (['(shape[1] + 1)'], {}), '(shape[1] + 1)\n', (322, 336), True, 'import numpy as np\n'), ((340, 363), 'numpy.arange', 'np.arange', (['(shape[0] + 1)'], {}), '(shape[0] + 1)\n', (349, 363), True, 'import numpy as np\n'), ((371, 450), 'matplotlib.pyplot.subplots', 'pl.subplots', ([], {'ncols': '(2)', 'nrows': '(2)', 'figsize': '(10.24, 10.24)', 'sharex': '(True)', 'sharey': '(True)'}), '(ncols=2, nrows=2, figsize=(10.24, 10.24), sharex=True, sharey=True)\n', (382, 450), True, 'from matplotlib import pyplot as pl\n'), ((1599, 1638), 'matplotlib.pyplot.savefig', 'pl.savefig', (["('%s.png' % sys.argv[0][:-3])"], {}), "('%s.png' % sys.argv[0][:-3])\n", (1609, 1638), True, 'from matplotlib import pyplot as pl\n'), ((620, 635), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (628, 635), True, 'import numpy as np\n'), ((647, 662), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (655, 662), True, 'import numpy as np\n'), ((981, 1031), 'numpy.save', 'np.save', (["('pozo_5m_dem_mean_cl%i.npy' % uc[j])", 'mean'], {}), "('pozo_5m_dem_mean_cl%i.npy' % uc[j], mean)\n", (988, 1031), True, 'import numpy as np\n'), ((1036, 1086), 'numpy.save', 'np.save', (["('pozo_5m_dem_stdr_cl%i.npy' % uc[j])", 'stdr'], {}), "('pozo_5m_dem_stdr_cl%i.npy' % uc[j], stdr)\n", (1043, 1086), True, 'import numpy as np\n'), ((1185, 1211), 'numpy.ma.masked_invalid', 'np.ma.masked_invalid', (['mean'], {}), '(mean)\n', (1205, 1211), True, 'import numpy as np\n'), ((1390, 1416), 'numpy.ma.masked_invalid', 'np.ma.masked_invalid', (['stdr'], {}), '(stdr)\n', (1410, 1416), True, 'import numpy as np\n')]
|
import os
import random
from typing import Any, Dict, List, Union
import numpy as np
import torch
from colorama import Fore, Style
from sklearn.metrics import f1_score
from sklearn.metrics import precision_recall_fscore_support as score
from sklearn.metrics import precision_score, recall_score
def highlight(input_: Any) -> str:
input_ = str(input_)
return str(Fore.YELLOW + str(input_) + Style.RESET_ALL)
def get_intent_labels(args: Any) -> List[str]:
return [
label.strip()
for label in open(
os.path.join(args.data_dir, args.intent_label_file), "r", encoding="utf-8"
)
]
def get_slot_labels(args: Any) -> List[str]:
return [
label.strip()
for label in open(
os.path.join(args.data_dir, args.slot_label_file), "r", encoding="utf-8"
)
]
def get_pos_labels(args: Any) -> List[str]:
return [
label.strip()
for label in open(
os.path.join(args.data_dir, args.pos_label_file), "r", encoding="utf-8"
)
]
def set_torch_seed(seed: Any, no_cuda: bool) -> None:
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed) # type: ignore
if not no_cuda and torch.cuda.is_available():
torch.cuda.manual_seed_all(seed) # type: ignore
def compute_metrics(
intent_preds: List[str],
intent_labels: List[str],
slot_preds: List[List[str]],
slot_labels: List[List[str]],
) -> Dict[Any, Any]:
assert (
len(intent_preds) == len(intent_labels) == len(slot_preds) == len(slot_labels)
)
results: Dict[Any, Any] = {}
intent_result = get_intent_acc(intent_preds, intent_labels)
slot_result = get_slot_metrics(slot_preds, slot_labels)
sementic_result = get_sentence_frame_acc(
intent_preds, intent_labels, slot_preds, slot_labels
)
# New metrics added following Dan's request.
slot_simple_result = get_slot_simple_metrics(slot_preds, slot_labels)
partial_match_result = get_partial_match_metrics(slot_preds, slot_labels)
results.update(intent_result)
results.update(slot_result)
results.update(sementic_result)
results.update(slot_simple_result)
results.update(partial_match_result)
return results
def simplify_tokens(preds: List[str]) -> List[str]:
simple_preds = []
for p in preds:
if p.endswith("TERM"):
simple_preds.append("TERM")
elif p.endswith("DEF"):
simple_preds.append("DEF")
else:
simple_preds.append(p)
return simple_preds
def get_partial_match_metrics(
preds: List[List[str]], labels: List[List[str]]
) -> Dict[Any, Any]:
"""
Suppose there are N such pairs in the gold data and the system predicts M such pairs. Say a ‘partial match’ happens when the system predicts a pair <term,defn> and there is some overlap (at least one token) between the predicted and gold term spans AND there is some overlap between the predicted and gold definition spans. Let X be the number of partial matches. What are
Partial match precision = P/M
Partial match recall = P/N
"""
assert len(preds) == len(labels)
both_in_preds, both_in_labels = [], []
partial_matches, exact_matches = [], []
for pred_sent, label_sent in zip(preds, labels):
simple_pred_sent = simplify_tokens(pred_sent)
simple_label_sent = simplify_tokens(label_sent)
# check whether term/def exist together
both_in_pred = "TERM" in simple_pred_sent and "DEF" in simple_pred_sent
both_in_label = "TERM" in simple_label_sent and "DEF" in simple_label_sent
both_in_preds.append(both_in_pred)
both_in_labels.append(both_in_label)
partial_match = False
exact_match = False
match: List[Union[str, bool]] = []
if both_in_pred and both_in_label:
for p, l in zip(simple_pred_sent, simple_label_sent):
if p == l:
match.append(p)
else:
match.append(False)
if "TERM" in match and "DEF" in match:
partial_match = True
if False not in match:
exact_match = True
partial_matches.append(partial_match)
exact_matches.append(exact_match)
count_both_in_preds = sum(both_in_preds) # N
count_both_in_labels = sum(both_in_labels) # M
count_partial_matches = sum(partial_matches) # P
count_exact_matches = sum(exact_matches) # E
partial_precision = count_partial_matches / count_both_in_preds
partial_recall = count_partial_matches / count_both_in_labels
partial_fscore = (
2 * partial_precision * partial_recall / (partial_precision + partial_recall)
)
exact_precision = count_exact_matches / count_both_in_preds
exact_recall = count_exact_matches / count_both_in_labels
exact_fscore = 2 * exact_precision * exact_recall / (exact_precision + exact_recall)
return {
"partial_match_precision": partial_precision,
"partial_match_recall": partial_recall,
"partial_match_f1": partial_fscore,
"exact_match_precision": exact_precision,
"excat_match_recall": exact_recall,
"excat_match_f1": exact_fscore,
}
def get_slot_simple_metrics(
preds: List[List[str]], labels: List[List[str]]
) -> Dict[Any, Any]:
"""
Conceptually, define the following new types of ‘virtual tags’
TERM = B-term OR I-Term (ie the union of those two tags)
DEF = B-Def OR I-Def
Now, what are the P,R & F1 numbers for TERM and DEF? (I think these matter because users may just care about accuracy of term and defn matching and the macro averaged scores conflate other things like recall on these metrics and precision on O. Likewise the current macro average treats missing the first word in a definition differently from skipping the last word.
"""
assert len(preds) == len(labels)
# flatten
preds_flattened = [p for ps in preds for p in ps]
labels_flattened = [l for ls in labels for l in ls]
# simplify by replacing {B,I}-TERM to TERM and {B,I}-DEF to DEF
simple_preds = simplify_tokens(preds_flattened)
simple_labels = simplify_tokens(labels_flattened)
assert len(simple_preds) == len(simple_labels)
label_names = ["O", "TERM", "DEF"]
p, r, f, s = score(simple_labels, simple_preds, average=None, labels=label_names)
s = [int(si) for si in s]
p = [round(float(pi), 3) for pi in p]
r = [round(float(pi), 3) for pi in r]
f = [round(float(pi), 3) for pi in f]
per_class = {"p": list(p), "r": list(r), "f": list(f), "s": list(s)}
# pprint(per_class)
return {
"slot_merged_TERM_precision": per_class["p"][1],
"slot_merged_TERM_recall": per_class["r"][1],
"slot_merged_TERM_f1": per_class["f"][1],
"slot_merged_DEFINITION_precision": per_class["p"][2],
"slot_merged_DEFINITION_recall": per_class["r"][2],
"slot_merged_DEFINITION_f1": per_class["f"][2],
}
def get_slot_metrics(preds: List[List[str]], labels: List[List[str]]) -> Dict[Any, Any]:
assert len(preds) == len(labels)
# flatten
preds_flattened = [p for ps in preds for p in ps]
labels_flattened = [l for ls in labels for l in ls]
macro_f1 = f1_score(labels_flattened, preds_flattened, average="macro")
micro_f1 = f1_score(labels_flattened, preds_flattened, average="micro")
macro_p = precision_score(labels_flattened, preds_flattened, average="macro")
micro_p = precision_score(labels_flattened, preds_flattened, average="micro")
macro_r = recall_score(labels_flattened, preds_flattened, average="macro")
micro_r = recall_score(labels_flattened, preds_flattened, average="micro")
label_names = ["O", "B-TERM", "I-TERM", "B-DEF", "I-DEF"]
p, r, f, s = score(
labels_flattened, preds_flattened, average=None, labels=label_names
)
s = [int(si) for si in s]
p = [round(float(pi), 3) for pi in p]
r = [round(float(pi), 3) for pi in r]
f = [round(float(pi), 3) for pi in f]
per_class = {"p": list(p), "r": list(r), "f": list(f), "s": list(s)}
# print(per_class)
return {
"slot_precision_macro": macro_p,
"slot_recall_macro": macro_r,
"slot_f1_macro": macro_f1,
"slot_precision_micro": micro_p,
"slot_recall_micro": micro_r,
"slot_f1_micro": micro_f1,
"slot_precision_per_label": per_class["p"],
"slot_recal_per_label": per_class["r"],
"slot_f1_per_label": per_class["f"],
"slot_num_per_label": per_class["s"],
}
def get_intent_acc(preds: List[str], labels: List[str]) -> Dict[Any, Any]:
acc = (preds == labels).mean()
return {"intent_acc": acc}
def read_prediction_text(args: Any) -> List[str]:
return [
text.strip()
for text in open(
os.path.join(args.pred_dir, args.pred_input_file), "r", encoding="utf-8"
)
]
def get_sentence_frame_acc(
intent_preds: List[str],
intent_labels: List[str],
slot_preds: List[List[str]],
slot_labels: List[List[str]],
) -> Dict[Any, Any]:
"""For the cases that intent and all the slots are correct (in one sentence)"""
# Get the intent comparison result
intent_result = intent_preds == intent_labels
# Get the slot comparision result
slot_result = []
for preds, labels in zip(slot_preds, slot_labels):
assert len(preds) == len(labels)
one_sent_result = True
for p, l in zip(preds, labels):
if p != l:
one_sent_result = False
break
slot_result.append(one_sent_result)
slot_result = np.array(slot_result)
sementic_acc = np.multiply(intent_result, slot_result).mean()
return {"sementic_frame_acc": sementic_acc}
|
[
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"numpy.multiply",
"sklearn.metrics.f1_score",
"sklearn.metrics.precision_recall_fscore_support",
"os.path.join",
"random.seed",
"sklearn.metrics.precision_score",
"sklearn.metrics.recall_score",
"numpy.array",
"torch.cuda.is_available",
"numpy.random.seed"
] |
[((1110, 1127), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (1121, 1127), False, 'import random\n'), ((1132, 1152), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1146, 1152), True, 'import numpy as np\n'), ((1157, 1180), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (1174, 1180), False, 'import torch\n'), ((6364, 6432), 'sklearn.metrics.precision_recall_fscore_support', 'score', (['simple_labels', 'simple_preds'], {'average': 'None', 'labels': 'label_names'}), '(simple_labels, simple_preds, average=None, labels=label_names)\n', (6369, 6432), True, 'from sklearn.metrics import precision_recall_fscore_support as score\n'), ((7315, 7375), 'sklearn.metrics.f1_score', 'f1_score', (['labels_flattened', 'preds_flattened'], {'average': '"""macro"""'}), "(labels_flattened, preds_flattened, average='macro')\n", (7323, 7375), False, 'from sklearn.metrics import f1_score\n'), ((7391, 7451), 'sklearn.metrics.f1_score', 'f1_score', (['labels_flattened', 'preds_flattened'], {'average': '"""micro"""'}), "(labels_flattened, preds_flattened, average='micro')\n", (7399, 7451), False, 'from sklearn.metrics import f1_score\n'), ((7466, 7533), 'sklearn.metrics.precision_score', 'precision_score', (['labels_flattened', 'preds_flattened'], {'average': '"""macro"""'}), "(labels_flattened, preds_flattened, average='macro')\n", (7481, 7533), False, 'from sklearn.metrics import precision_score, recall_score\n'), ((7548, 7615), 'sklearn.metrics.precision_score', 'precision_score', (['labels_flattened', 'preds_flattened'], {'average': '"""micro"""'}), "(labels_flattened, preds_flattened, average='micro')\n", (7563, 7615), False, 'from sklearn.metrics import precision_score, recall_score\n'), ((7630, 7694), 'sklearn.metrics.recall_score', 'recall_score', (['labels_flattened', 'preds_flattened'], {'average': '"""macro"""'}), "(labels_flattened, preds_flattened, average='macro')\n", (7642, 7694), False, 'from sklearn.metrics import precision_score, recall_score\n'), ((7709, 7773), 'sklearn.metrics.recall_score', 'recall_score', (['labels_flattened', 'preds_flattened'], {'average': '"""micro"""'}), "(labels_flattened, preds_flattened, average='micro')\n", (7721, 7773), False, 'from sklearn.metrics import precision_score, recall_score\n'), ((7854, 7928), 'sklearn.metrics.precision_recall_fscore_support', 'score', (['labels_flattened', 'preds_flattened'], {'average': 'None', 'labels': 'label_names'}), '(labels_flattened, preds_flattened, average=None, labels=label_names)\n', (7859, 7928), True, 'from sklearn.metrics import precision_recall_fscore_support as score\n'), ((9714, 9735), 'numpy.array', 'np.array', (['slot_result'], {}), '(slot_result)\n', (9722, 9735), True, 'import numpy as np\n'), ((1220, 1245), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1243, 1245), False, 'import torch\n'), ((1255, 1287), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (1281, 1287), False, 'import torch\n'), ((9756, 9795), 'numpy.multiply', 'np.multiply', (['intent_result', 'slot_result'], {}), '(intent_result, slot_result)\n', (9767, 9795), True, 'import numpy as np\n'), ((541, 592), 'os.path.join', 'os.path.join', (['args.data_dir', 'args.intent_label_file'], {}), '(args.data_dir, args.intent_label_file)\n', (553, 592), False, 'import os\n'), ((753, 802), 'os.path.join', 'os.path.join', (['args.data_dir', 'args.slot_label_file'], {}), '(args.data_dir, args.slot_label_file)\n', (765, 802), False, 'import os\n'), ((962, 1010), 'os.path.join', 'os.path.join', (['args.data_dir', 'args.pos_label_file'], {}), '(args.data_dir, args.pos_label_file)\n', (974, 1010), False, 'import os\n'), ((8901, 8950), 'os.path.join', 'os.path.join', (['args.pred_dir', 'args.pred_input_file'], {}), '(args.pred_dir, args.pred_input_file)\n', (8913, 8950), False, 'import os\n')]
|
###############################################################################
# @todo add Pilot2-splash-app disclaimer
###############################################################################
""" Get's KRAS states """
import MDAnalysis as mda
from MDAnalysis.analysis import align
from MDAnalysis.lib.mdamath import make_whole
import os
import numpy as np
import math
############## Below section needs to be uncommented ############
import mummi_core
import mummi_ras
from mummi_core.utils import Naming
# # Logger has to be initialized the first thing in the script
from logging import getLogger
LOGGER = getLogger(__name__)
# # Innitilize MuMMI if it has not been done before
# MUMMI_ROOT = mummi.init(True)
# This is needed so the Naming works below
#@TODO fix this so we don't have these on import make them as an init
mummi_core.init()
dirKRASStates = Naming.dir_res('states')
dirKRASStructures = Naming.dir_res('structures')
# #RAS_ONLY_macrostate = np.loadtxt(os.path.join(dirKRASStates, "RAS-ONLY.microstates.txt"))
RAS_ONLY_macrostate = np.loadtxt(os.path.join(dirKRASStates, "ras-states.txt"),comments='#')
# #RAS_RAF_macrostate = np.loadtxt(os.path.join(dirKRASStates, "RAS-RAF.microstates.txt"))
RAS_RAF_macrostate = np.loadtxt(os.path.join(dirKRASStates, "ras-raf-states.txt"),comments='#') # Note diffrent number of columns so index change below
# TODO: CS, my edits to test
# RAS_ONLY_macrostate = np.loadtxt('ras-states.txt')
# RAS_RAF_macrostate = np.loadtxt('ras-raf-states.txt')
############## above section needs to be uncommented ############
# TODO: CS, my edits to test
# TODO: TSC, The reference structure has to currently be set as the 'RAS-ONLY-reference-structure.gro'
# TODO: TSC, path to the reference structure is: mummi_resources/structures/
kras_ref_universe = mda.Universe(os.path.join(dirKRASStructures, "RAS-ONLY-reference-structure.gro"))
# kras_ref_universe = mda.Universe("RAS-ONLY-reference-structure.gro")
# kras_ref_universe = mda.Universe('AA_pfpatch_000000004641_RAS_RAF2_411.gro')
# TODO: CS, not using these for x4 proteins; instead using protein_systems below to set num_res
######### Below hard codes the number of residues within RAS-only and RAS-RAF ##########
RAS_only_num_res = 184
RAS_RAF_num_res = 320
######### Above hard codes the number of residues within RAS-only and RAS-RAF ##########
####### This can be removed
# def get_kras(syst, kras_start):
# """Gets all atoms for a KRAS protein starting at 'kras_start'."""
# return syst.atoms[kras_start:kras_start+428]
####### This can be removed
def get_segids(u):
"""Identifies the list of segments within the system. Only needs to be called x1 time"""
segs = u.segments
segs = segs.segids
ras_segids = []
rasraf_segids = []
for i in range(len(segs)):
# print(segs[i])
if segs[i][-3:] == 'RAS':
ras_segids.append(segs[i])
if segs[i][-3:] == 'RAF':
rasraf_segids.append(segs[i])
return ras_segids, rasraf_segids
def get_protein_info(u,tag):
"""Uses the segments identified in get_segids to make a list of all proteins in the systems.\
Outputs a list of the first residue number of the protein, and whether it is 'RAS-ONLY', or 'RAS-RAF'.\
The 'tag' input defines what is used to identify the first residue of the protein. i.e. 'resname ACE1 and name BB'.\
Only needs to be called x1 time"""
ras_segids, rasraf_segids = get_segids(u)
if len(ras_segids) > 0:
RAS = u.select_atoms('segid '+ras_segids[0]+' and '+str(tag))
else:
RAS = []
if len(rasraf_segids) > 0:
RAF = u.select_atoms('segid '+rasraf_segids[0]+' and '+str(tag))
else:
RAF = []
protein_info = []#np.empty([len(RAS)+len(RAF),2])
for i in range(len(RAS)):
protein_info.append((RAS[i].resid,'RAS-ONLY'))
for i in range(len(RAF)):
protein_info.append((RAF[i].resid,'RAS-RAF'))
######## sort protein info
protein_info = sorted(protein_info)
######## sort protein info
return protein_info
def get_ref_kras():
"""Gets the reference KRAS struct. Only called x1 time when class is loaded"""
start_of_g_ref = kras_ref_universe.residues[0].resid
ref_selection = 'resid '+str(start_of_g_ref)+':'+str(start_of_g_ref+24)+' ' +\
str(start_of_g_ref+38)+':'+str(start_of_g_ref+54)+' ' +\
str(start_of_g_ref+67)+':'+str(start_of_g_ref+164)+' ' +\
'and (name CA or name BB)'
r2_26r40_56r69_166_ref = kras_ref_universe.select_atoms(str(ref_selection))
return kras_ref_universe.select_atoms(str(ref_selection)).positions - kras_ref_universe.select_atoms(str(ref_selection)).center_of_mass()
# Load inital ref frames (only need to do this once)
ref0 = get_ref_kras()
def getKRASstates(u,kras_indices):
"""Gets states for all KRAS proteins in path."""
# res_shift = 8
# all_glycine = u.select_atoms("resname GLY")
# kras_indices = []
# for i in range(0, len(all_glycine), 26):
# kras_indices.append(all_glycine[i].index)
########## Below is taken out of the function so it is only done once #########
# kras_indices = get_protein_info(u,'resname ACE1 and name BB')
########## Above is taken out of the function so it is only done once #########
# CS, for x4 cases:
# [{protein_x4: (protein_type, num_res)}]
protein_systems = [{'ras4a': ('RAS-ONLY', 185),
'ras4araf': ('RAS-RAF', 321),
'ras': ('RAS-ONLY', 184),
'rasraf': ('RAS-RAF', 320)}]
ALLOUT = []
for k in range(len(kras_indices)):
start_of_g = kras_indices[k][0]
protein_x4 = str(kras_indices[k][1])
try:
protein_type = [item[protein_x4] for item in protein_systems][0][0] # 'RAS-ONLY' OR 'RAS-RAF'
num_res = [item[protein_x4] for item in protein_systems][0][1]
except:
LOGGER.error('Check KRas naming between modules')
raise Exception('Error: unknown KRas name')
# TODO: CS, replacing this comment section with the above, to handle x4 protein types
# ---------------------------------------
# ALLOUT = []
# for k in range(len(kras_indices)):
# start_of_g = kras_indices[k][0]
# protein_type = str(kras_indices[k][1])
# ########## BELOW SECTION TO DETERMINE WHICH RESIDUES ARE PART OF THE PROTEIN GROUP - NEEDED FOR PBC REMOVAL ##############
# ########## POTENTIALLY REDO WITH A 'HARD-CODED' NUMBER OF RESIDUES PER PROTEIN GROUP (WHETHER RAS-ONLY OR RAS-RAF) #######
# ########## HAS BEEN REDONE WITH A 'HARD-CODED' NUMBER OF RESIDUES PER PROTEIN GROUP (WHETHER RAS-ONLY OR RAS-RAF) ########
# # if len(kras_indices) == 1:
# # krases0_BB = u.select_atoms('resid '+str(start_of_g)+':'+str(len(u.residues))+' and name BB') ####### HAS TO BE FIXED FOR BACKBONE ATOMS FOR SPECIFIC PROTEIN
# # elif len(kras_indices) > 1:
# # if k == len(kras_indices)-1:
# # krases0_BB = u.select_atoms('resid '+str(start_of_g)+':'+str(len(u.residues))+' and name BB')
# # else:
# # krases0_BB = u.select_atoms('resid '+str(start_of_g)+':'+str(kras_indices[k+1][0])+' and name BB')
# ########## ABOVE SECTION TO DETERMINE WHICH RESIDUES ARE PART OF THE PROTEIN GROUP - NEEDED FOR PBC REMOVAL ##############
#
# ########## Below hard codes the number of residues/beads in the RAS-ONLY and RAS-RAF simulations #########################
# if protein_type == 'RAS-ONLY':
# num_res = RAS_only_num_res
# elif protein_type == 'RAS-RAF':
# num_res = RAS_RAF_num_res
# ########## Above hard codes the number of residues/beads in the RAS-ONLY and RAS-RAF simulations #########################
# ---------------------------------------
# TODO: TSC, I changed the selection below, which can be used for the make_whole...
# krases0_BB = u.select_atoms('resid '+str(start_of_g)+':'+str(start_of_g+num_res)+' and (name CA or name BB)')
krases0_BB = u.select_atoms('resid '+str(start_of_g)+':'+str(start_of_g+num_res))
krases0_BB.guess_bonds()
r2_26r40_56r69_166 = u.select_atoms('resid '+str(start_of_g)+':'+str(start_of_g+24)+' ' +\
str(start_of_g+38)+':'+str(start_of_g+54)+' ' +\
str(start_of_g+67)+':'+str(start_of_g+164)+\
' and (name CA or name BB)')
u_selection = \
'resid '+str(start_of_g)+':'+str(start_of_g+24)+' '+str(start_of_g+38)+':'+str(start_of_g+54)+' ' +\
str(start_of_g+67)+':'+str(start_of_g+164)+' and (name CA or name BB)'
mobile0 = u.select_atoms(str(u_selection)).positions - u.select_atoms(str(u_selection)).center_of_mass()
# TODO: CS, something wrong with ref0 from get_kras_ref()
# just making ref0 = mobile0 to test for now
# ref0 = mobile0
# TSC removed this
R, RMSD_junk = align.rotation_matrix(mobile0, ref0)
######## TODO: TSC, Adjusted for AA lipid names ########
# lipids = u.select_atoms('resname POPX POPC PAPC POPE DIPE DPSM PAPS PAP6 CHOL')
lipids = u.select_atoms('resname POPC PAPC POPE DIPE SSM PAPS SAPI CHL1')
coords = ref0
RotMat = []
OS = []
r152_165 = krases0_BB.select_atoms('resid '+str(start_of_g+150)+':'+str(start_of_g+163)+' and (name CA or name BB)')
r65_74 = krases0_BB.select_atoms('resid '+str(start_of_g+63)+':'+str(start_of_g+72)+' and (name CA or name BB)')
timeframes = []
# TODO: CS, for AA need bonds to run make_whole()
# krases0_BB.guess_bonds()
# TODO: CS, turn off for now to test beyond this point
''' *** for AA, need to bring that back on once all else runs ***
'''
# @Tim and <NAME>. this was commented out - please check.
#make_whole(krases0_BB)
j, rmsd_junk = mda.analysis.align.rotation_matrix((r2_26r40_56r69_166.positions-r2_26r40_56r69_166.center_of_mass()), coords)
RotMat.append(j)
OS.append(r65_74.center_of_mass()-r152_165.center_of_mass())
timeframes.append(u.trajectory.time)
if protein_type == 'RAS-RAF':
z_pos = []
############### NEED TO CONFIRM THE SELECTION OF THE RAF LOOP RESIDUES BELOW ####################
############### TODO: TSC, zshifting is set to -1 (instead of -2), as there are ACE caps that are separate residues in AA
#zshifting=-1
if protein_x4 == 'rasraf':
zshifting = -1
elif protein_x4 == 'ras4araf':
zshifting = 0
else:
zshifting = 0
LOGGER.error('Found unsupported protein_x4 type')
raf_loops_selection = u.select_atoms('resid '+str(start_of_g+zshifting+291)+':'+str(start_of_g+zshifting+294)+' ' +\
str(start_of_g+zshifting+278)+':'+str(start_of_g+zshifting+281)+' ' +\
' and (name CA or name BB)')
############### NEED TO CONFIRM THE SELECTION OF THE RAF LOOP RESIDUES ABOVE ####################
diff = (lipids.center_of_mass()[2]-raf_loops_selection.center_of_mass(unwrap=True)[2])/10
if diff < 0:
diff = diff+(u.dimensions[2]/10)
z_pos.append(diff)
z_pos = np.array(z_pos)
RotMatNP = np.array(RotMat)
OS = np.array(OS)
OA = RotMatNP[:, 2, :]/(((RotMatNP[:, 2, 0]**2)+(RotMatNP[:, 2, 1]**2)+(RotMatNP[:, 2, 2]**2))**0.5)[:, None]
OWAS = np.arccos(RotMatNP[:, 2, 2])*180/math.pi
OC_temp = np.concatenate((OA, OS), axis=1)
t = ((OC_temp[:, 0]*OC_temp[:, 3])+(OC_temp[:, 1]*OC_temp[:, 4]) +
(OC_temp[:, 2]*OC_temp[:, 5]))/((OC_temp[:, 0]**2)+(OC_temp[:, 1]**2)+(OC_temp[:, 2]**2))
OC = OA*t[:, None]
ORS_tp = np.concatenate((OC, OS), axis=1)
ORS_norm = (((ORS_tp[:, 3]-ORS_tp[:, 0])**2)+((ORS_tp[:, 4]-ORS_tp[:, 1])**2)+((ORS_tp[:, 5]-ORS_tp[:, 2])**2))**0.5
ORS = (OS - OC)/ORS_norm[:, None]
OACRS = np.cross(OA, ORS)
OZCA = OA * OA[:, 2][:, None]
Z_unit = np.full([len(OZCA), 3], 1)
Z_adjust = np.array([0, 0, 1])
Z_unit = Z_unit*Z_adjust
Z_OZCA = Z_unit-OZCA
OZPACB = Z_OZCA/((Z_OZCA[:, 0]**2+Z_OZCA[:, 1]**2+Z_OZCA[:, 2]**2)**0.5)[:, None]
OROTNOTSIGNED = np.zeros([len(ORS)])
for i in range(len(ORS)):
OROTNOTSIGNED[i] = np.arccos(np.dot(OZPACB[i, :], ORS[i, :]) /
(np.sqrt(np.dot(OZPACB[i, :], OZPACB[i, :]))) *
(np.sqrt(np.dot(ORS[i, :], ORS[i, :]))))*180/math.pi
OZPACBCRS_cross = np.cross(OZPACB, ORS)
OZPACBCRS = OZPACBCRS_cross/((OZPACBCRS_cross[:, 0]**2+OZPACBCRS_cross[:, 1]**2+OZPACBCRS_cross[:, 2]**2)**0.5)[:, None]
OFORSIGN_temp = (OA - OZPACBCRS)**2
OFORSIGN = OFORSIGN_temp[:, 0]+OFORSIGN_temp[:, 1]+OFORSIGN_temp[:, 2]
OROT = OROTNOTSIGNED
for i in range(len(OROT)):
if OROT[i] < 0:
OROT[i] = -(OROT[i])
for i in range(len(OROT)):
if OFORSIGN[i] < 0.25:
OROT[i] = -(OROT[i])
###### Below introduces new shift to account for upper vs. lower leaflet #####
for i in range(len(OWAS)):
OWAS[i] = abs(-(OWAS[i])+180) # made this an absolute value so that the tilt remains positive
for i in range(len(OROT)):
if OROT[i] < 0:
OROT[i] = OROT[i]+180
elif OROT[i] > 0:
OROT[i] = OROT[i]-180
###### Above introduces new shift to account for upper vs. lower leaflet #####
###### Below might have to be updated to take into account the periodic nature of the rotation ######
if protein_type == 'RAS-ONLY':
states = np.zeros(len(OROT))
for j in range(len(OROT)):
diff0 = []
for i in range(len(RAS_ONLY_macrostate)):
#diff0.append([((RAS_ONLY_macrostate[i,0]-OWAS[j])**2+(RAS_ONLY_macrostate[i,1]-OROT[j])**2)**0.5, RAS_ONLY_macrostate[i,6]])
diff0.append([((RAS_ONLY_macrostate[i,1]-OWAS[j])**2+(RAS_ONLY_macrostate[i,0]-OROT[j])**2)**0.5, RAS_ONLY_macrostate[i,5]])
diff0.sort()
states[j] = diff0[0][1]
elif protein_type == 'RAS-RAF':
states = np.zeros(len(OROT))
for j in range(len(OROT)):
### below: adding in the requirements for the 'high-z' state ###
if (OROT[j] < -45 or OROT[j] > 140) and z_pos[j] > 4.8:
states[j] = 3
else:
### above: adding in the requirements for the 'high-z' state ###
diff0 = []
for i in range(len(RAS_RAF_macrostate)):
#diff0.append([((RAS_RAF_macrostate[i,0]-OWAS[j])**2+(RAS_RAF_macrostate[i,1]-OROT[j])**2)**0.5, RAS_RAF_macrostate[i,6]])
diff0.append([((RAS_RAF_macrostate[i,1]-OWAS[j])**2+(RAS_RAF_macrostate[i,0]-OROT[j])**2)**0.5, RAS_RAF_macrostate[i,4]])
diff0.sort()
states[j] = diff0[0][1]
###### Above might have to be updated to take into account the periodic nature of the rotation ######
###### Assume we want to remove this? Where is the code that reads this information? i.e. will there be knock-on effects? ######
###### If feedback code needs index 5 (two_states) from the output, deleting this four_states will shift that to index 4 #######
# four_states = np.zeros(len(OROT))
# for j in range(len(OROT)):
# diff0 = []
# for i in range(len(macrostate4)):
# diff0.append([((macrostate4[i,0]-OWAS[j])**2+(macrostate4[i,1]-OROT[j])**2)**0.5, macrostate4[i,6]])
# diff0.sort()
# four_states[j] = diff0[0][1]+1
###### below: old output details.... ######################################
###### Updated - RAS-only to NOT HAVE the Z-distance ######################
###### Updated - Added in the protein 'tag', i.e. RAS-ONLY or RAS-RAF #####
# OUTPUT = np.zeros([len(OROT), 6])
# for i in range(len(OROT)):
# OUTPUT[i] = timeframes[i], OWAS[i], OROT[i], z_pos[i], four_states[i], two_states[i]
###### above: old output details.... ######################################
###### below: NEW output details.... ######################################
if protein_type == 'RAS-ONLY':
OUTPUT = np.zeros([len(OROT), 6]).astype(object)
for i in range(len(OROT)):
OUTPUT[i] = str(protein_type), timeframes[i], OWAS[i], OROT[i], 'n/a', int(states[i])
elif protein_type == 'RAS-RAF':
OUTPUT = np.zeros([len(OROT), 6]).astype(object)
for i in range(len(OROT)):
OUTPUT[i] = str(protein_type), timeframes[i], OWAS[i], OROT[i], z_pos[i], int(states[i])
ALLOUT.append(OUTPUT)
return np.asarray(ALLOUT)
#np.savetxt(str(tpr)+"_tilt_rot_z_state.KRAS_"+str(k+1)+".txt", OUTPUT, fmt=['%i','%10.3f','%10.3f','%10.3f','%i','%i'], delimiter=' ')
|
[
"logging.getLogger",
"MDAnalysis.analysis.align.rotation_matrix",
"mummi_core.utils.Naming.dir_res",
"numpy.arccos",
"numpy.cross",
"numpy.asarray",
"os.path.join",
"numpy.array",
"numpy.dot",
"mummi_core.init",
"numpy.concatenate"
] |
[((621, 640), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (630, 640), False, 'from logging import getLogger\n'), ((840, 857), 'mummi_core.init', 'mummi_core.init', ([], {}), '()\n', (855, 857), False, 'import mummi_core\n'), ((875, 899), 'mummi_core.utils.Naming.dir_res', 'Naming.dir_res', (['"""states"""'], {}), "('states')\n", (889, 899), False, 'from mummi_core.utils import Naming\n'), ((920, 948), 'mummi_core.utils.Naming.dir_res', 'Naming.dir_res', (['"""structures"""'], {}), "('structures')\n", (934, 948), False, 'from mummi_core.utils import Naming\n'), ((1075, 1120), 'os.path.join', 'os.path.join', (['dirKRASStates', '"""ras-states.txt"""'], {}), "(dirKRASStates, 'ras-states.txt')\n", (1087, 1120), False, 'import os\n'), ((1259, 1308), 'os.path.join', 'os.path.join', (['dirKRASStates', '"""ras-raf-states.txt"""'], {}), "(dirKRASStates, 'ras-raf-states.txt')\n", (1271, 1308), False, 'import os\n'), ((1830, 1897), 'os.path.join', 'os.path.join', (['dirKRASStructures', '"""RAS-ONLY-reference-structure.gro"""'], {}), "(dirKRASStructures, 'RAS-ONLY-reference-structure.gro')\n", (1842, 1897), False, 'import os\n'), ((17304, 17322), 'numpy.asarray', 'np.asarray', (['ALLOUT'], {}), '(ALLOUT)\n', (17314, 17322), True, 'import numpy as np\n'), ((9165, 9201), 'MDAnalysis.analysis.align.rotation_matrix', 'align.rotation_matrix', (['mobile0', 'ref0'], {}), '(mobile0, ref0)\n', (9186, 9201), False, 'from MDAnalysis.analysis import align\n'), ((11647, 11663), 'numpy.array', 'np.array', (['RotMat'], {}), '(RotMat)\n', (11655, 11663), True, 'import numpy as np\n'), ((11678, 11690), 'numpy.array', 'np.array', (['OS'], {}), '(OS)\n', (11686, 11690), True, 'import numpy as np\n'), ((11883, 11915), 'numpy.concatenate', 'np.concatenate', (['(OA, OS)'], {'axis': '(1)'}), '((OA, OS), axis=1)\n', (11897, 11915), True, 'import numpy as np\n'), ((12138, 12170), 'numpy.concatenate', 'np.concatenate', (['(OC, OS)'], {'axis': '(1)'}), '((OC, OS), axis=1)\n', (12152, 12170), True, 'import numpy as np\n'), ((12354, 12371), 'numpy.cross', 'np.cross', (['OA', 'ORS'], {}), '(OA, ORS)\n', (12362, 12371), True, 'import numpy as np\n'), ((12473, 12492), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (12481, 12492), True, 'import numpy as np\n'), ((13009, 13030), 'numpy.cross', 'np.cross', (['OZPACB', 'ORS'], {}), '(OZPACB, ORS)\n', (13017, 13030), True, 'import numpy as np\n'), ((11611, 11626), 'numpy.array', 'np.array', (['z_pos'], {}), '(z_pos)\n', (11619, 11626), True, 'import numpy as np\n'), ((11824, 11852), 'numpy.arccos', 'np.arccos', (['RotMatNP[:, 2, 2]'], {}), '(RotMatNP[:, 2, 2])\n', (11833, 11852), True, 'import numpy as np\n'), ((12765, 12796), 'numpy.dot', 'np.dot', (['OZPACB[i, :]', 'ORS[i, :]'], {}), '(OZPACB[i, :], ORS[i, :])\n', (12771, 12796), True, 'import numpy as np\n'), ((12938, 12966), 'numpy.dot', 'np.dot', (['ORS[i, :]', 'ORS[i, :]'], {}), '(ORS[i, :], ORS[i, :])\n', (12944, 12966), True, 'import numpy as np\n'), ((12849, 12883), 'numpy.dot', 'np.dot', (['OZPACB[i, :]', 'OZPACB[i, :]'], {}), '(OZPACB[i, :], OZPACB[i, :])\n', (12855, 12883), True, 'import numpy as np\n')]
|
"""
Binary serialization
NPY format
==========
A simple format for saving numpy arrays to disk with the full
information about them.
The ``.npy`` format is the standard binary file format in NumPy for
persisting a *single* arbitrary NumPy array on disk. The format stores all
of the shape and dtype information necessary to reconstruct the array
correctly even on another machine with a different architecture.
The format is designed to be as simple as possible while achieving
its limited goals.
The ``.npz`` format is the standard format for persisting *multiple* NumPy
arrays on disk. A ``.npz`` file is a zip file containing multiple ``.npy``
files, one for each array.
Capabilities
------------
- Can represent all NumPy arrays including nested record arrays and
object arrays.
- Represents the data in its native binary form.
- Supports Fortran-contiguous arrays directly.
- Stores all of the necessary information to reconstruct the array
including shape and dtype on a machine of a different
architecture. Both little-endian and big-endian arrays are
supported, and a file with little-endian numbers will yield
a little-endian array on any machine reading the file. The
types are described in terms of their actual sizes. For example,
if a machine with a 64-bit C "long int" writes out an array with
"long ints", a reading machine with 32-bit C "long ints" will yield
an array with 64-bit integers.
- Is straightforward to reverse engineer. Datasets often live longer than
the programs that created them. A competent developer should be
able to create a solution in their preferred programming language to
read most ``.npy`` files that they have been given without much
documentation.
- Allows memory-mapping of the data. See `open_memmap`.
- Can be read from a filelike stream object instead of an actual file.
- Stores object arrays, i.e. arrays containing elements that are arbitrary
Python objects. Files with object arrays are not to be mmapable, but
can be read and written to disk.
Limitations
-----------
- Arbitrary subclasses of numpy.ndarray are not completely preserved.
Subclasses will be accepted for writing, but only the array data will
be written out. A regular numpy.ndarray object will be created
upon reading the file.
.. warning::
Due to limitations in the interpretation of structured dtypes, dtypes
with fields with empty names will have the names replaced by 'f0', 'f1',
etc. Such arrays will not round-trip through the format entirely
accurately. The data is intact; only the field names will differ. We are
working on a fix for this. This fix will not require a change in the
file format. The arrays with such structures can still be saved and
restored, and the correct dtype may be restored by using the
``loadedarray.view(correct_dtype)`` method.
File extensions
---------------
We recommend using the ``.npy`` and ``.npz`` extensions for files saved
in this format. This is by no means a requirement; applications may wish
to use these file formats but use an extension specific to the
application. In the absence of an obvious alternative, however,
we suggest using ``.npy`` and ``.npz``.
Version numbering
-----------------
The version numbering of these formats is independent of NumPy version
numbering. If the format is upgraded, the code in `numpy.io` will still
be able to read and write Version 1.0 files.
Format Version 1.0
------------------
The first 6 bytes are a magic string: exactly ``\\x93NUMPY``.
The next 1 byte is an unsigned byte: the major version number of the file
format, e.g. ``\\x01``.
The next 1 byte is an unsigned byte: the minor version number of the file
format, e.g. ``\\x00``. Note: the version of the file format is not tied
to the version of the numpy package.
The next 2 bytes form a little-endian unsigned short int: the length of
the header data HEADER_LEN.
The next HEADER_LEN bytes form the header data describing the array's
format. It is an ASCII string which contains a Python literal expression
of a dictionary. It is terminated by a newline (``\\n``) and padded with
spaces (``\\x20``) to make the total of
``len(magic string) + 2 + len(length) + HEADER_LEN`` be evenly divisible
by 64 for alignment purposes.
The dictionary contains three keys:
"descr" : dtype.descr
An object that can be passed as an argument to the `numpy.dtype`
constructor to create the array's dtype.
"fortran_order" : bool
Whether the array data is Fortran-contiguous or not. Since
Fortran-contiguous arrays are a common form of non-C-contiguity,
we allow them to be written directly to disk for efficiency.
"shape" : tuple of int
The shape of the array.
For repeatability and readability, the dictionary keys are sorted in
alphabetic order. This is for convenience only. A writer SHOULD implement
this if possible. A reader MUST NOT depend on this.
Following the header comes the array data. If the dtype contains Python
objects (i.e. ``dtype.hasobject is True``), then the data is a Python
pickle of the array. Otherwise the data is the contiguous (either C-
or Fortran-, depending on ``fortran_order``) bytes of the array.
Consumers can figure out the number of bytes by multiplying the number
of elements given by the shape (noting that ``shape=()`` means there is
1 element) by ``dtype.itemsize``.
Format Version 2.0
------------------
The version 1.0 format only allowed the array header to have a total size of
65535 bytes. This can be exceeded by structured arrays with a large number of
columns. The version 2.0 format extends the header size to 4 GiB.
`numpy.save` will automatically save in 2.0 format if the data requires it,
else it will always use the more compatible 1.0 format.
The description of the fourth element of the header therefore has become:
"The next 4 bytes form a little-endian unsigned int: the length of the header
data HEADER_LEN."
Format Version 3.0
------------------
This version replaces the ASCII string (which in practice was latin1) with
a utf8-encoded string, so supports structured types with any unicode field
names.
Notes
-----
The ``.npy`` format, including motivation for creating it and a comparison of
alternatives, is described in the
:doc:`"npy-format" NEP <neps:nep-0001-npy-format>`, however details have
evolved with time and this document is more current.
"""
import numpy
import io
import warnings
from numpy.lib.utils import safe_eval
from numpy.compat import (
isfileobj, os_fspath, pickle
)
__all__ = []
EXPECTED_KEYS = {'descr', 'fortran_order', 'shape'}
MAGIC_PREFIX = b'\x93NUMPY'
MAGIC_LEN = len(MAGIC_PREFIX) + 2
ARRAY_ALIGN = 64 # plausible values are powers of 2 between 16 and 4096
BUFFER_SIZE = 2**18 # size of buffer for reading npz files in bytes
# difference between version 1.0 and 2.0 is a 4 byte (I) header length
# instead of 2 bytes (H) allowing storage of large structured arrays
_header_size_info = {
(1, 0): ('<H', 'latin1'),
(2, 0): ('<I', 'latin1'),
(3, 0): ('<I', 'utf8'),
}
def _check_version(version):
if version not in [(1, 0), (2, 0), (3, 0), None]:
msg = "we only support format version (1,0), (2,0), and (3,0), not %s"
raise ValueError(msg % (version,))
def magic(major, minor):
""" Return the magic string for the given file format version.
Parameters
----------
major : int in [0, 255]
minor : int in [0, 255]
Returns
-------
magic : str
Raises
------
ValueError if the version cannot be formatted.
"""
if major < 0 or major > 255:
raise ValueError("major version must be 0 <= major < 256")
if minor < 0 or minor > 255:
raise ValueError("minor version must be 0 <= minor < 256")
return MAGIC_PREFIX + bytes([major, minor])
def read_magic(fp):
""" Read the magic string to get the version of the file format.
Parameters
----------
fp : filelike object
Returns
-------
major : int
minor : int
"""
magic_str = _read_bytes(fp, MAGIC_LEN, "magic string")
if magic_str[:-2] != MAGIC_PREFIX:
msg = "the magic string is not correct; expected %r, got %r"
raise ValueError(msg % (MAGIC_PREFIX, magic_str[:-2]))
major, minor = magic_str[-2:]
return major, minor
def _has_metadata(dt):
if dt.metadata is not None:
return True
elif dt.names is not None:
return any(_has_metadata(dt[k]) for k in dt.names)
elif dt.subdtype is not None:
return _has_metadata(dt.base)
else:
return False
def dtype_to_descr(dtype):
"""
Get a serializable descriptor from the dtype.
The .descr attribute of a dtype object cannot be round-tripped through
the dtype() constructor. Simple types, like dtype('float32'), have
a descr which looks like a record array with one field with '' as
a name. The dtype() constructor interprets this as a request to give
a default name. Instead, we construct descriptor that can be passed to
dtype().
Parameters
----------
dtype : dtype
The dtype of the array that will be written to disk.
Returns
-------
descr : object
An object that can be passed to `numpy.dtype()` in order to
replicate the input dtype.
"""
if _has_metadata(dtype):
warnings.warn("metadata on a dtype may be saved or ignored, but will "
"raise if saved when read. Use another form of storage.",
UserWarning, stacklevel=2)
if dtype.names is not None:
# This is a record array. The .descr is fine. XXX: parts of the
# record array with an empty name, like padding bytes, still get
# fiddled with. This needs to be fixed in the C implementation of
# dtype().
return dtype.descr
else:
return dtype.str
def descr_to_dtype(descr):
"""
Returns a dtype based off the given description.
This is essentially the reverse of `dtype_to_descr()`. It will remove
the valueless padding fields created by, i.e. simple fields like
dtype('float32'), and then convert the description to its corresponding
dtype.
Parameters
----------
descr : object
The object retreived by dtype.descr. Can be passed to
`numpy.dtype()` in order to replicate the input dtype.
Returns
-------
dtype : dtype
The dtype constructed by the description.
"""
if isinstance(descr, str):
# No padding removal needed
return numpy.dtype(descr)
elif isinstance(descr, tuple):
# subtype, will always have a shape descr[1]
dt = descr_to_dtype(descr[0])
return numpy.dtype((dt, descr[1]))
titles = []
names = []
formats = []
offsets = []
offset = 0
for field in descr:
if len(field) == 2:
name, descr_str = field
dt = descr_to_dtype(descr_str)
else:
name, descr_str, shape = field
dt = numpy.dtype((descr_to_dtype(descr_str), shape))
# Ignore padding bytes, which will be void bytes with '' as name
# Once support for blank names is removed, only "if name == ''" needed)
is_pad = (name == '' and dt.type is numpy.void and dt.names is None)
if not is_pad:
title, name = name if isinstance(name, tuple) else (None, name)
titles.append(title)
names.append(name)
formats.append(dt)
offsets.append(offset)
offset += dt.itemsize
return numpy.dtype({'names': names, 'formats': formats, 'titles': titles,
'offsets': offsets, 'itemsize': offset})
def header_data_from_array_1_0(array):
""" Get the dictionary of header metadata from a numpy.ndarray.
Parameters
----------
array : numpy.ndarray
Returns
-------
d : dict
This has the appropriate entries for writing its string representation
to the header of the file.
"""
d = {'shape': array.shape}
if array.flags.c_contiguous:
d['fortran_order'] = False
elif array.flags.f_contiguous:
d['fortran_order'] = True
else:
# Totally non-contiguous data. We will have to make it C-contiguous
# before writing. Note that we need to test for C_CONTIGUOUS first
# because a 1-D array is both C_CONTIGUOUS and F_CONTIGUOUS.
d['fortran_order'] = False
d['descr'] = dtype_to_descr(array.dtype)
return d
def _wrap_header(header, version):
"""
Takes a stringified header, and attaches the prefix and padding to it
"""
import struct
assert version is not None
fmt, encoding = _header_size_info[version]
if not isinstance(header, bytes): # always true on python 3
header = header.encode(encoding)
hlen = len(header) + 1
padlen = ARRAY_ALIGN - ((MAGIC_LEN + struct.calcsize(fmt) + hlen) % ARRAY_ALIGN)
try:
header_prefix = magic(*version) + struct.pack(fmt, hlen + padlen)
except struct.error:
msg = "Header length {} too big for version={}".format(hlen, version)
raise ValueError(msg) from None
# Pad the header with spaces and a final newline such that the magic
# string, the header-length short and the header are aligned on a
# ARRAY_ALIGN byte boundary. This supports memory mapping of dtypes
# aligned up to ARRAY_ALIGN on systems like Linux where mmap()
# offset must be page-aligned (i.e. the beginning of the file).
return header_prefix + header + b' '*padlen + b'\n'
def _wrap_header_guess_version(header):
"""
Like `_wrap_header`, but chooses an appropriate version given the contents
"""
try:
return _wrap_header(header, (1, 0))
except ValueError:
pass
try:
ret = _wrap_header(header, (2, 0))
except UnicodeEncodeError:
pass
else:
warnings.warn("Stored array in format 2.0. It can only be"
"read by NumPy >= 1.9", UserWarning, stacklevel=2)
return ret
header = _wrap_header(header, (3, 0))
warnings.warn("Stored array in format 3.0. It can only be "
"read by NumPy >= 1.17", UserWarning, stacklevel=2)
return header
def _write_array_header(fp, d, version=None):
""" Write the header for an array and returns the version used
Parameters
----------
fp : filelike object
d : dict
This has the appropriate entries for writing its string representation
to the header of the file.
version: tuple or None
None means use oldest that works
explicit version will raise a ValueError if the format does not
allow saving this data. Default: None
"""
header = ["{"]
for key, value in sorted(d.items()):
# Need to use repr here, since we eval these when reading
header.append("'%s': %s, " % (key, repr(value)))
header.append("}")
header = "".join(header)
if version is None:
header = _wrap_header_guess_version(header)
else:
header = _wrap_header(header, version)
fp.write(header)
def write_array_header_1_0(fp, d):
""" Write the header for an array using the 1.0 format.
Parameters
----------
fp : filelike object
d : dict
This has the appropriate entries for writing its string
representation to the header of the file.
"""
_write_array_header(fp, d, (1, 0))
def write_array_header_2_0(fp, d):
""" Write the header for an array using the 2.0 format.
The 2.0 format allows storing very large structured arrays.
.. versionadded:: 1.9.0
Parameters
----------
fp : filelike object
d : dict
This has the appropriate entries for writing its string
representation to the header of the file.
"""
_write_array_header(fp, d, (2, 0))
def read_array_header_1_0(fp):
"""
Read an array header from a filelike object using the 1.0 file format
version.
This will leave the file object located just after the header.
Parameters
----------
fp : filelike object
A file object or something with a `.read()` method like a file.
Returns
-------
shape : tuple of int
The shape of the array.
fortran_order : bool
The array data will be written out directly if it is either
C-contiguous or Fortran-contiguous. Otherwise, it will be made
contiguous before writing it out.
dtype : dtype
The dtype of the file's data.
Raises
------
ValueError
If the data is invalid.
"""
return _read_array_header(fp, version=(1, 0))
def read_array_header_2_0(fp):
"""
Read an array header from a filelike object using the 2.0 file format
version.
This will leave the file object located just after the header.
.. versionadded:: 1.9.0
Parameters
----------
fp : filelike object
A file object or something with a `.read()` method like a file.
Returns
-------
shape : tuple of int
The shape of the array.
fortran_order : bool
The array data will be written out directly if it is either
C-contiguous or Fortran-contiguous. Otherwise, it will be made
contiguous before writing it out.
dtype : dtype
The dtype of the file's data.
Raises
------
ValueError
If the data is invalid.
"""
return _read_array_header(fp, version=(2, 0))
def _filter_header(s):
"""Clean up 'L' in npz header ints.
Cleans up the 'L' in strings representing integers. Needed to allow npz
headers produced in Python2 to be read in Python3.
Parameters
----------
s : string
Npy file header.
Returns
-------
header : str
Cleaned up header.
"""
import tokenize
from io import StringIO
tokens = []
last_token_was_number = False
for token in tokenize.generate_tokens(StringIO(s).readline):
token_type = token[0]
token_string = token[1]
if (last_token_was_number and
token_type == tokenize.NAME and
token_string == "L"):
continue
else:
tokens.append(token)
last_token_was_number = (token_type == tokenize.NUMBER)
return tokenize.untokenize(tokens)
def _read_array_header(fp, version):
"""
see read_array_header_1_0
"""
# Read an unsigned, little-endian short int which has the length of the
# header.
import struct
hinfo = _header_size_info.get(version)
if hinfo is None:
raise ValueError("Invalid version {!r}".format(version))
hlength_type, encoding = hinfo
hlength_str = _read_bytes(fp, struct.calcsize(hlength_type), "array header length")
header_length = struct.unpack(hlength_type, hlength_str)[0]
header = _read_bytes(fp, header_length, "array header")
header = header.decode(encoding)
# The header is a pretty-printed string representation of a literal
# Python dictionary with trailing newlines padded to a ARRAY_ALIGN byte
# boundary. The keys are strings.
# "shape" : tuple of int
# "fortran_order" : bool
# "descr" : dtype.descr
# Versions (2, 0) and (1, 0) could have been created by a Python 2
# implementation before header filtering was implemented.
if version <= (2, 0):
header = _filter_header(header)
try:
d = safe_eval(header)
except SyntaxError as e:
msg = "Cannot parse header: {!r}"
raise ValueError(msg.format(header)) from e
if not isinstance(d, dict):
msg = "Header is not a dictionary: {!r}"
raise ValueError(msg.format(d))
if EXPECTED_KEYS != d.keys():
keys = sorted(d.keys())
msg = "Header does not contain the correct keys: {!r}"
raise ValueError(msg.format(keys))
# Sanity-check the values.
if (not isinstance(d['shape'], tuple) or
not all(isinstance(x, int) for x in d['shape'])):
msg = "shape is not valid: {!r}"
raise ValueError(msg.format(d['shape']))
if not isinstance(d['fortran_order'], bool):
msg = "fortran_order is not a valid bool: {!r}"
raise ValueError(msg.format(d['fortran_order']))
try:
dtype = descr_to_dtype(d['descr'])
except TypeError as e:
msg = "descr is not a valid dtype descriptor: {!r}"
raise ValueError(msg.format(d['descr'])) from e
return d['shape'], d['fortran_order'], dtype
def write_array(fp, array, version=None, allow_pickle=True, pickle_kwargs=None):
"""
Write an array to an NPY file, including a header.
If the array is neither C-contiguous nor Fortran-contiguous AND the
file_like object is not a real file object, this function will have to
copy data in memory.
Parameters
----------
fp : file_like object
An open, writable file object, or similar object with a
``.write()`` method.
array : ndarray
The array to write to disk.
version : (int, int) or None, optional
The version number of the format. None means use the oldest
supported version that is able to store the data. Default: None
allow_pickle : bool, optional
Whether to allow writing pickled data. Default: True
pickle_kwargs : dict, optional
Additional keyword arguments to pass to pickle.dump, excluding
'protocol'. These are only useful when pickling objects in object
arrays on Python 3 to Python 2 compatible format.
Raises
------
ValueError
If the array cannot be persisted. This includes the case of
allow_pickle=False and array being an object array.
Various other errors
If the array contains Python objects as part of its dtype, the
process of pickling them may raise various errors if the objects
are not picklable.
"""
_check_version(version)
_write_array_header(fp, header_data_from_array_1_0(array), version)
if array.itemsize == 0:
buffersize = 0
else:
# Set buffer size to 16 MiB to hide the Python loop overhead.
buffersize = max(16 * 1024 ** 2 // array.itemsize, 1)
if array.dtype.hasobject:
# We contain Python objects so we cannot write out the data
# directly. Instead, we will pickle it out
if not allow_pickle:
raise ValueError("Object arrays cannot be saved when "
"allow_pickle=False")
if pickle_kwargs is None:
pickle_kwargs = {}
pickle.dump(array, fp, protocol=3, **pickle_kwargs)
elif array.flags.f_contiguous and not array.flags.c_contiguous:
if isfileobj(fp):
array.T.tofile(fp)
else:
for chunk in numpy.nditer(
array, flags=['external_loop', 'buffered', 'zerosize_ok'],
buffersize=buffersize, order='F'):
fp.write(chunk.tobytes('C'))
else:
if isfileobj(fp):
array.tofile(fp)
else:
for chunk in numpy.nditer(
array, flags=['external_loop', 'buffered', 'zerosize_ok'],
buffersize=buffersize, order='C'):
fp.write(chunk.tobytes('C'))
def read_array(fp, allow_pickle=False, pickle_kwargs=None):
"""
Read an array from an NPY file.
Parameters
----------
fp : file_like object
If this is not a real file object, then this may take extra memory
and time.
allow_pickle : bool, optional
Whether to allow writing pickled data. Default: False
.. versionchanged:: 1.16.3
Made default False in response to CVE-2019-6446.
pickle_kwargs : dict
Additional keyword arguments to pass to pickle.load. These are only
useful when loading object arrays saved on Python 2 when using
Python 3.
Returns
-------
array : ndarray
The array from the data on disk.
Raises
------
ValueError
If the data is invalid, or allow_pickle=False and the file contains
an object array.
"""
version = read_magic(fp)
_check_version(version)
shape, fortran_order, dtype = _read_array_header(fp, version)
if len(shape) == 0:
count = 1
else:
count = numpy.multiply.reduce(shape, dtype=numpy.int64)
# Now read the actual data.
if dtype.hasobject:
# The array contained Python objects. We need to unpickle the data.
if not allow_pickle:
raise ValueError("Object arrays cannot be loaded when "
"allow_pickle=False")
if pickle_kwargs is None:
pickle_kwargs = {}
try:
array = pickle.load(fp, **pickle_kwargs)
except UnicodeError as err:
# Friendlier error message
raise UnicodeError("Unpickling a python object failed: %r\n"
"You may need to pass the encoding= option "
"to numpy.load" % (err,)) from err
else:
if isfileobj(fp):
# We can use the fast fromfile() function.
array = numpy.fromfile(fp, dtype=dtype, count=count)
else:
# This is not a real file. We have to read it the
# memory-intensive way.
# crc32 module fails on reads greater than 2 ** 32 bytes,
# breaking large reads from gzip streams. Chunk reads to
# BUFFER_SIZE bytes to avoid issue and reduce memory overhead
# of the read. In non-chunked case count < max_read_count, so
# only one read is performed.
# Use np.ndarray instead of np.empty since the latter does
# not correctly instantiate zero-width string dtypes; see
# https://github.com/numpy/numpy/pull/6430
array = numpy.ndarray(count, dtype=dtype)
if dtype.itemsize > 0:
# If dtype.itemsize == 0 then there's nothing more to read
max_read_count = BUFFER_SIZE // min(BUFFER_SIZE, dtype.itemsize)
for i in range(0, count, max_read_count):
read_count = min(max_read_count, count - i)
read_size = int(read_count * dtype.itemsize)
data = _read_bytes(fp, read_size, "array data")
array[i:i+read_count] = numpy.frombuffer(data, dtype=dtype,
count=read_count)
if fortran_order:
array.shape = shape[::-1]
array = array.transpose()
else:
array.shape = shape
return array
def open_memmap(filename, mode='r+', dtype=None, shape=None,
fortran_order=False, version=None):
"""
Open a .npy file as a memory-mapped array.
This may be used to read an existing file or create a new one.
Parameters
----------
filename : str or path-like
The name of the file on disk. This may *not* be a file-like
object.
mode : str, optional
The mode in which to open the file; the default is 'r+'. In
addition to the standard file modes, 'c' is also accepted to mean
"copy on write." See `memmap` for the available mode strings.
dtype : data-type, optional
The data type of the array if we are creating a new file in "write"
mode, if not, `dtype` is ignored. The default value is None, which
results in a data-type of `float64`.
shape : tuple of int
The shape of the array if we are creating a new file in "write"
mode, in which case this parameter is required. Otherwise, this
parameter is ignored and is thus optional.
fortran_order : bool, optional
Whether the array should be Fortran-contiguous (True) or
C-contiguous (False, the default) if we are creating a new file in
"write" mode.
version : tuple of int (major, minor) or None
If the mode is a "write" mode, then this is the version of the file
format used to create the file. None means use the oldest
supported version that is able to store the data. Default: None
Returns
-------
marray : memmap
The memory-mapped array.
Raises
------
ValueError
If the data or the mode is invalid.
IOError
If the file is not found or cannot be opened correctly.
See Also
--------
numpy.memmap
"""
if isfileobj(filename):
raise ValueError("Filename must be a string or a path-like object."
" Memmap cannot use existing file handles.")
if 'w' in mode:
# We are creating the file, not reading it.
# Check if we ought to create the file.
_check_version(version)
# Ensure that the given dtype is an authentic dtype object rather
# than just something that can be interpreted as a dtype object.
dtype = numpy.dtype(dtype)
if dtype.hasobject:
msg = "Array can't be memory-mapped: Python objects in dtype."
raise ValueError(msg)
d = dict(
descr=dtype_to_descr(dtype),
fortran_order=fortran_order,
shape=shape,
)
# If we got here, then it should be safe to create the file.
with open(os_fspath(filename), mode+'b') as fp:
_write_array_header(fp, d, version)
offset = fp.tell()
else:
# Read the header of the file first.
with open(os_fspath(filename), 'rb') as fp:
version = read_magic(fp)
_check_version(version)
shape, fortran_order, dtype = _read_array_header(fp, version)
if dtype.hasobject:
msg = "Array can't be memory-mapped: Python objects in dtype."
raise ValueError(msg)
offset = fp.tell()
if fortran_order:
order = 'F'
else:
order = 'C'
# We need to change a write-only mode to a read-write mode since we've
# already written data to the file.
if mode == 'w+':
mode = 'r+'
marray = numpy.memmap(filename, dtype=dtype, shape=shape, order=order,
mode=mode, offset=offset)
return marray
def _read_bytes(fp, size, error_template="ran out of data"):
"""
Read from file-like object until size bytes are read.
Raises ValueError if not EOF is encountered before size bytes are read.
Non-blocking objects only supported if they derive from io objects.
Required as e.g. ZipExtFile in python 2.6 can return less data than
requested.
"""
data = bytes()
while True:
# io files (default in python3) return None or raise on
# would-block, python2 file will truncate, probably nothing can be
# done about that. note that regular files can't be non-blocking
try:
r = fp.read(size - len(data))
data += r
if len(r) == 0 or len(data) == size:
break
except io.BlockingIOError:
pass
if len(data) != size:
msg = "EOF: reading %s, expected %d bytes got %d"
raise ValueError(msg % (error_template, size, len(data)))
else:
return data
|
[
"struct.calcsize",
"numpy.compat.pickle.dump",
"numpy.fromfile",
"tokenize.untokenize",
"numpy.frombuffer",
"numpy.multiply.reduce",
"numpy.compat.isfileobj",
"numpy.nditer",
"numpy.memmap",
"io.StringIO",
"struct.pack",
"struct.unpack",
"numpy.ndarray",
"numpy.compat.os_fspath",
"warnings.warn",
"numpy.compat.pickle.load",
"numpy.dtype",
"numpy.lib.utils.safe_eval"
] |
[((11608, 11719), 'numpy.dtype', 'numpy.dtype', (["{'names': names, 'formats': formats, 'titles': titles, 'offsets': offsets,\n 'itemsize': offset}"], {}), "({'names': names, 'formats': formats, 'titles': titles,\n 'offsets': offsets, 'itemsize': offset})\n", (11619, 11719), False, 'import numpy\n'), ((14171, 14288), 'warnings.warn', 'warnings.warn', (['"""Stored array in format 3.0. It can only be read by NumPy >= 1.17"""', 'UserWarning'], {'stacklevel': '(2)'}), "(\n 'Stored array in format 3.0. It can only be read by NumPy >= 1.17',\n UserWarning, stacklevel=2)\n", (14184, 14288), False, 'import warnings\n'), ((18411, 18438), 'tokenize.untokenize', 'tokenize.untokenize', (['tokens'], {}), '(tokens)\n', (18430, 18438), False, 'import tokenize\n'), ((28671, 28690), 'numpy.compat.isfileobj', 'isfileobj', (['filename'], {}), '(filename)\n', (28680, 28690), False, 'from numpy.compat import isfileobj, os_fspath, pickle\n'), ((30329, 30420), 'numpy.memmap', 'numpy.memmap', (['filename'], {'dtype': 'dtype', 'shape': 'shape', 'order': 'order', 'mode': 'mode', 'offset': 'offset'}), '(filename, dtype=dtype, shape=shape, order=order, mode=mode,\n offset=offset)\n', (30341, 30420), False, 'import numpy\n'), ((9372, 9534), 'warnings.warn', 'warnings.warn', (['"""metadata on a dtype may be saved or ignored, but will raise if saved when read. Use another form of storage."""', 'UserWarning'], {'stacklevel': '(2)'}), "(\n 'metadata on a dtype may be saved or ignored, but will raise if saved when read. Use another form of storage.'\n , UserWarning, stacklevel=2)\n", (9385, 9534), False, 'import warnings\n'), ((10584, 10602), 'numpy.dtype', 'numpy.dtype', (['descr'], {}), '(descr)\n', (10595, 10602), False, 'import numpy\n'), ((13973, 14083), 'warnings.warn', 'warnings.warn', (['"""Stored array in format 2.0. It can only beread by NumPy >= 1.9"""', 'UserWarning'], {'stacklevel': '(2)'}), "('Stored array in format 2.0. It can only beread by NumPy >= 1.9',\n UserWarning, stacklevel=2)\n", (13986, 14083), False, 'import warnings\n'), ((18832, 18861), 'struct.calcsize', 'struct.calcsize', (['hlength_type'], {}), '(hlength_type)\n', (18847, 18861), False, 'import struct\n'), ((18906, 18946), 'struct.unpack', 'struct.unpack', (['hlength_type', 'hlength_str'], {}), '(hlength_type, hlength_str)\n', (18919, 18946), False, 'import struct\n'), ((19546, 19563), 'numpy.lib.utils.safe_eval', 'safe_eval', (['header'], {}), '(header)\n', (19555, 19563), False, 'from numpy.lib.utils import safe_eval\n'), ((22694, 22745), 'numpy.compat.pickle.dump', 'pickle.dump', (['array', 'fp'], {'protocol': '(3)'}), '(array, fp, protocol=3, **pickle_kwargs)\n', (22705, 22745), False, 'from numpy.compat import isfileobj, os_fspath, pickle\n'), ((24465, 24512), 'numpy.multiply.reduce', 'numpy.multiply.reduce', (['shape'], {'dtype': 'numpy.int64'}), '(shape, dtype=numpy.int64)\n', (24486, 24512), False, 'import numpy\n'), ((25236, 25249), 'numpy.compat.isfileobj', 'isfileobj', (['fp'], {}), '(fp)\n', (25245, 25249), False, 'from numpy.compat import isfileobj, os_fspath, pickle\n'), ((29155, 29173), 'numpy.dtype', 'numpy.dtype', (['dtype'], {}), '(dtype)\n', (29166, 29173), False, 'import numpy\n'), ((10744, 10771), 'numpy.dtype', 'numpy.dtype', (['(dt, descr[1])'], {}), '((dt, descr[1]))\n', (10755, 10771), False, 'import numpy\n'), ((13049, 13080), 'struct.pack', 'struct.pack', (['fmt', '(hlen + padlen)'], {}), '(fmt, hlen + padlen)\n', (13060, 13080), False, 'import struct\n'), ((18059, 18070), 'io.StringIO', 'StringIO', (['s'], {}), '(s)\n', (18067, 18070), False, 'from io import StringIO\n'), ((22825, 22838), 'numpy.compat.isfileobj', 'isfileobj', (['fp'], {}), '(fp)\n', (22834, 22838), False, 'from numpy.compat import isfileobj, os_fspath, pickle\n'), ((23124, 23137), 'numpy.compat.isfileobj', 'isfileobj', (['fp'], {}), '(fp)\n', (23133, 23137), False, 'from numpy.compat import isfileobj, os_fspath, pickle\n'), ((24892, 24924), 'numpy.compat.pickle.load', 'pickle.load', (['fp'], {}), '(fp, **pickle_kwargs)\n', (24903, 24924), False, 'from numpy.compat import isfileobj, os_fspath, pickle\n'), ((25326, 25370), 'numpy.fromfile', 'numpy.fromfile', (['fp'], {'dtype': 'dtype', 'count': 'count'}), '(fp, dtype=dtype, count=count)\n', (25340, 25370), False, 'import numpy\n'), ((26029, 26062), 'numpy.ndarray', 'numpy.ndarray', (['count'], {'dtype': 'dtype'}), '(count, dtype=dtype)\n', (26042, 26062), False, 'import numpy\n'), ((22910, 23019), 'numpy.nditer', 'numpy.nditer', (['array'], {'flags': "['external_loop', 'buffered', 'zerosize_ok']", 'buffersize': 'buffersize', 'order': '"""F"""'}), "(array, flags=['external_loop', 'buffered', 'zerosize_ok'],\n buffersize=buffersize, order='F')\n", (22922, 23019), False, 'import numpy\n'), ((23207, 23316), 'numpy.nditer', 'numpy.nditer', (['array'], {'flags': "['external_loop', 'buffered', 'zerosize_ok']", 'buffersize': 'buffersize', 'order': '"""C"""'}), "(array, flags=['external_loop', 'buffered', 'zerosize_ok'],\n buffersize=buffersize, order='C')\n", (23219, 23316), False, 'import numpy\n'), ((29533, 29552), 'numpy.compat.os_fspath', 'os_fspath', (['filename'], {}), '(filename)\n', (29542, 29552), False, 'from numpy.compat import isfileobj, os_fspath, pickle\n'), ((29723, 29742), 'numpy.compat.os_fspath', 'os_fspath', (['filename'], {}), '(filename)\n', (29732, 29742), False, 'from numpy.compat import isfileobj, os_fspath, pickle\n'), ((12954, 12974), 'struct.calcsize', 'struct.calcsize', (['fmt'], {}), '(fmt)\n', (12969, 12974), False, 'import struct\n'), ((26555, 26608), 'numpy.frombuffer', 'numpy.frombuffer', (['data'], {'dtype': 'dtype', 'count': 'read_count'}), '(data, dtype=dtype, count=read_count)\n', (26571, 26608), False, 'import numpy\n')]
|
# ________
# /
# \ /
# \ /
# \/
import random
import textwrap
import emd_mean
import AdvEMDpy
import emd_basis
import emd_utils
import numpy as np
import pandas as pd
import cvxpy as cvx
import seaborn as sns
import matplotlib.pyplot as plt
from scipy.integrate import odeint
from scipy.ndimage import gaussian_filter
from emd_utils import time_extension, Utility
from scipy.interpolate import CubicSpline
from emd_hilbert import Hilbert, hilbert_spectrum
from emd_preprocess import Preprocess
from emd_mean import Fluctuation
from AdvEMDpy import EMD
# alternate packages
from PyEMD import EMD as pyemd0215
import emd as emd040
sns.set(style='darkgrid')
pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001)
pseudo_alg_time_series = np.sin(pseudo_alg_time) + np.sin(5 * pseudo_alg_time)
pseudo_utils = Utility(time=pseudo_alg_time, time_series=pseudo_alg_time_series)
# plot 0 - addition
fig = plt.figure(figsize=(9, 4))
ax = plt.subplot(111)
plt.gcf().subplots_adjust(bottom=0.10)
plt.title('First Iteration of Sifting Algorithm')
plt.plot(pseudo_alg_time, pseudo_alg_time_series, label=r'$h_{(1,0)}(t)$', zorder=1)
plt.scatter(pseudo_alg_time[pseudo_utils.max_bool_func_1st_order_fd()],
pseudo_alg_time_series[pseudo_utils.max_bool_func_1st_order_fd()],
c='r', label=r'$M(t_i)$', zorder=2)
plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) + 1, '--', c='r', label=r'$\tilde{h}_{(1,0)}^M(t)$', zorder=4)
plt.scatter(pseudo_alg_time[pseudo_utils.min_bool_func_1st_order_fd()],
pseudo_alg_time_series[pseudo_utils.min_bool_func_1st_order_fd()],
c='c', label=r'$m(t_j)$', zorder=3)
plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) - 1, '--', c='c', label=r'$\tilde{h}_{(1,0)}^m(t)$', zorder=5)
plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time), '--', c='purple', label=r'$\tilde{h}_{(1,0)}^{\mu}(t)$', zorder=5)
plt.yticks(ticks=[-2, -1, 0, 1, 2])
plt.xticks(ticks=[0, np.pi, 2 * np.pi],
labels=[r'0', r'$\pi$', r'$2\pi$'])
box_0 = ax.get_position()
ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.95, box_0.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('jss_figures/pseudo_algorithm.png')
plt.show()
knots = np.arange(12)
time = np.linspace(0, 11, 1101)
basis = emd_basis.Basis(time=time, time_series=time)
b_spline_basis = basis.cubic_b_spline(knots)
chsi_basis = basis.chsi_basis(knots)
# plot 1
plt.title('Non-Natural Cubic B-Spline Bases at Boundary')
plt.plot(time[500:], b_spline_basis[2, 500:].T, '--', label=r'$ B_{-3,4}(t) $')
plt.plot(time[500:], b_spline_basis[3, 500:].T, '--', label=r'$ B_{-2,4}(t) $')
plt.plot(time[500:], b_spline_basis[4, 500:].T, '--', label=r'$ B_{-1,4}(t) $')
plt.plot(time[500:], b_spline_basis[5, 500:].T, '--', label=r'$ B_{0,4}(t) $')
plt.plot(time[500:], b_spline_basis[6, 500:].T, '--', label=r'$ B_{1,4}(t) $')
plt.xticks([5, 6], [r'$ \tau_0 $', r'$ \tau_1 $'])
plt.xlim(4.4, 6.6)
plt.plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-')
plt.plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-')
plt.legend(loc='upper left')
plt.savefig('jss_figures/boundary_bases.png')
plt.show()
# plot 1a - addition
knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001)
knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time)
knots_uniform = np.linspace(0, 2 * np.pi, 51)
emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series)
imfs = emd.empirical_mode_decomposition(knots=knots_uniform, edge_effect='anti-symmetric', verbose=False)[0]
fig, axs = plt.subplots(3, 1)
fig.subplots_adjust(hspace=0.6)
plt.gcf().subplots_adjust(bottom=0.10)
axs[0].set_title('Time Series and Uniform Knots')
axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100)
axs[0].set_yticks(ticks=[-2, 0, 2])
axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[1].set_title('IMF 1 and Uniform Knots')
axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100)
axs[1].set_yticks(ticks=[-2, 0, 2])
axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[2].set_title('IMF 2 and Uniform Knots')
axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100)
axs[2].set_yticks(ticks=[-2, 0, 2])
axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[0].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots')
axs[0].legend(loc='lower left')
axs[1].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots')
axs[2].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots')
for i in range(3):
for j in range(1, len(knots_uniform)):
axs[i].plot(knots_uniform[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey')
plt.savefig('jss_figures/knot_uniform.png')
plt.show()
# plot 1b - addition
knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001)
knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time)
emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series)
imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric',
optimise_knots=1, verbose=False)
fig, axs = plt.subplots(3, 1)
fig.subplots_adjust(hspace=0.6)
plt.gcf().subplots_adjust(bottom=0.10)
axs[0].set_title('Time Series and Statically Optimised Knots')
axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100)
axs[0].set_yticks(ticks=[-2, 0, 2])
axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[1].set_title('IMF 1 and Statically Optimised Knots')
axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100)
axs[1].set_yticks(ticks=[-2, 0, 2])
axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[2].set_title('IMF 2 and Statically Optimised Knots')
axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100)
axs[2].set_yticks(ticks=[-2, 0, 2])
axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[0].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots')
axs[0].legend(loc='lower left')
axs[1].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots')
axs[2].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots')
for i in range(3):
for j in range(1, len(knots)):
axs[i].plot(knots[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey')
plt.savefig('jss_figures/knot_1.png')
plt.show()
# plot 1c - addition
knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001)
knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time)
emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series)
imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric',
optimise_knots=2, verbose=False)
fig, axs = plt.subplots(3, 1)
fig.subplots_adjust(hspace=0.6)
plt.gcf().subplots_adjust(bottom=0.10)
axs[0].set_title('Time Series and Dynamically Optimised Knots')
axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100)
axs[0].set_yticks(ticks=[-2, 0, 2])
axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[1].set_title('IMF 1 and Dynamically Knots')
axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100)
axs[1].set_yticks(ticks=[-2, 0, 2])
axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[2].set_title('IMF 2 and Dynamically Knots')
axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100)
axs[2].set_yticks(ticks=[-2, 0, 2])
axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[0].plot(knots[0][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots')
axs[0].legend(loc='lower left')
axs[1].plot(knots[1][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots')
axs[2].plot(knots[2][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots')
for i in range(3):
for j in range(1, len(knots[i])):
axs[i].plot(knots[i][j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey')
plt.savefig('jss_figures/knot_2.png')
plt.show()
# plot 1d - addition
window = 81
fig, axs = plt.subplots(2, 1)
fig.subplots_adjust(hspace=0.4)
figure_size = plt.gcf().get_size_inches()
factor = 0.8
plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1]))
plt.gcf().subplots_adjust(bottom=0.10)
axs[0].set_title('Preprocess Filtering Demonstration')
axs[1].set_title('Zoomed Region')
preprocess_time = pseudo_alg_time.copy()
np.random.seed(1)
random.seed(1)
preprocess_time_series = pseudo_alg_time_series + np.random.normal(0, 0.1, len(preprocess_time))
for i in random.sample(range(1000), 500):
preprocess_time_series[i] += np.random.normal(0, 1)
preprocess = Preprocess(time=preprocess_time, time_series=preprocess_time_series)
axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)')
axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple',
label=textwrap.fill('Noiseless time series', 12))
axs[0].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12))
axs[0].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13))
axs[0].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12))
axs[0].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1],
label=textwrap.fill('Windsorize interpolation filter', 14))
axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey',
label=textwrap.fill('Quantile window', 12))
axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey')
axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black',
label=textwrap.fill('Zoomed region', 10))
axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black')
axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black')
axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black')
axs[0].set_yticks(ticks=[-2, 0, 2])
axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)')
axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12))
axs[1].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12))
axs[1].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13))
axs[1].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12))
axs[1].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1],
label=textwrap.fill('Windsorize interpolation filter', 14))
axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey',
label=textwrap.fill('Quantile window', 12))
axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey')
axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi)
axs[1].set_ylim(-3, 3)
axs[1].set_yticks(ticks=[-2, 0, 2])
axs[1].set_xticks(ticks=[np.pi])
axs[1].set_xticklabels(labels=[r'$\pi$'])
box_0 = axs[0].get_position()
axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height])
axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15))
box_1 = axs[1].get_position()
axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height])
plt.savefig('jss_figures/preprocess_filter.png')
plt.show()
# plot 1e - addition
fig, axs = plt.subplots(2, 1)
fig.subplots_adjust(hspace=0.4)
figure_size = plt.gcf().get_size_inches()
factor = 0.8
plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1]))
plt.gcf().subplots_adjust(bottom=0.10)
axs[0].set_title('Preprocess Smoothing Demonstration')
axs[1].set_title('Zoomed Region')
axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)')
axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple',
label=textwrap.fill('Noiseless time series', 12))
axs[0].plot(preprocess_time, preprocess.hp()[1],
label=textwrap.fill('Hodrick-Prescott smoothing', 12))
axs[0].plot(preprocess_time, preprocess.hw(order=51)[1],
label=textwrap.fill('Henderson-Whittaker smoothing', 13))
downsampled_and_decimated = preprocess.downsample()
axs[0].plot(downsampled_and_decimated[0], downsampled_and_decimated[1],
label=textwrap.fill('Downsampled & decimated', 11))
downsampled = preprocess.downsample(decimate=False)
axs[0].plot(downsampled[0], downsampled[1],
label=textwrap.fill('Downsampled', 13))
axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black',
label=textwrap.fill('Zoomed region', 10))
axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black')
axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black')
axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black')
axs[0].set_yticks(ticks=[-2, 0, 2])
axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)')
axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple',
label=textwrap.fill('Noiseless time series', 12))
axs[1].plot(preprocess_time, preprocess.hp()[1],
label=textwrap.fill('Hodrick-Prescott smoothing', 12))
axs[1].plot(preprocess_time, preprocess.hw(order=51)[1],
label=textwrap.fill('Henderson-Whittaker smoothing', 13))
axs[1].plot(downsampled_and_decimated[0], downsampled_and_decimated[1],
label=textwrap.fill('Downsampled & decimated', 13))
axs[1].plot(downsampled[0], downsampled[1],
label=textwrap.fill('Downsampled', 13))
axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi)
axs[1].set_ylim(-3, 3)
axs[1].set_yticks(ticks=[-2, 0, 2])
axs[1].set_xticks(ticks=[np.pi])
axs[1].set_xticklabels(labels=[r'$\pi$'])
box_0 = axs[0].get_position()
axs[0].set_position([box_0.x0 - 0.06, box_0.y0, box_0.width * 0.85, box_0.height])
axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15))
box_1 = axs[1].get_position()
axs[1].set_position([box_1.x0 - 0.06, box_1.y0, box_1.width * 0.85, box_1.height])
plt.savefig('jss_figures/preprocess_smooth.png')
plt.show()
# plot 2
fig, axs = plt.subplots(1, 2, sharey=True)
axs[0].set_title('Cubic B-Spline Bases')
axs[0].plot(time, b_spline_basis[2, :].T, '--', label='Basis 1')
axs[0].plot(time, b_spline_basis[3, :].T, '--', label='Basis 2')
axs[0].plot(time, b_spline_basis[4, :].T, '--', label='Basis 3')
axs[0].plot(time, b_spline_basis[5, :].T, '--', label='Basis 4')
axs[0].legend(loc='upper left')
axs[0].plot(5 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-')
axs[0].plot(6 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-')
axs[0].set_xticks([5, 6])
axs[0].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $'])
axs[0].set_xlim(4.5, 6.5)
axs[1].set_title('Cubic Hermite Spline Bases')
axs[1].plot(time, chsi_basis[10, :].T, '--')
axs[1].plot(time, chsi_basis[11, :].T, '--')
axs[1].plot(time, chsi_basis[12, :].T, '--')
axs[1].plot(time, chsi_basis[13, :].T, '--')
axs[1].plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-')
axs[1].plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-')
axs[1].set_xticks([5, 6])
axs[1].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $'])
axs[1].set_xlim(4.5, 6.5)
plt.savefig('jss_figures/comparing_bases.png')
plt.show()
# plot 3
a = 0.25
width = 0.2
time = np.linspace(0, (5 - a) * np.pi, 1001)
time_series = np.cos(time) + np.cos(5 * time)
utils = emd_utils.Utility(time=time, time_series=time_series)
max_bool = utils.max_bool_func_1st_order_fd()
maxima_x = time[max_bool]
maxima_y = time_series[max_bool]
min_bool = utils.min_bool_func_1st_order_fd()
minima_x = time[min_bool]
minima_y = time_series[min_bool]
max_dash_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101)
max_dash = maxima_y[-1] * np.ones_like(max_dash_time)
min_dash_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101)
min_dash = minima_y[-1] * np.ones_like(min_dash_time)
dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101)
dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101)
max_discard = maxima_y[-1]
max_discard_time = minima_x[-1] - maxima_x[-1] + minima_x[-1]
max_discard_dash_time = np.linspace(max_discard_time - width, max_discard_time + width, 101)
max_discard_dash = max_discard * np.ones_like(max_discard_dash_time)
dash_2_time = np.linspace(minima_x[-1], max_discard_time, 101)
dash_2 = np.linspace(minima_y[-1], max_discard, 101)
end_point_time = time[-1]
end_point = time_series[-1]
time_reflect = np.linspace((5 - a) * np.pi, (5 + a) * np.pi, 101)
time_series_reflect = np.flip(np.cos(np.linspace((5 - 2.6 * a) * np.pi,
(5 - a) * np.pi, 101)) + np.cos(5 * np.linspace((5 - 2.6 * a) * np.pi,
(5 - a) * np.pi, 101)))
time_series_anti_reflect = time_series_reflect[0] - time_series_reflect
utils = emd_utils.Utility(time=time, time_series=time_series_anti_reflect)
anti_max_bool = utils.max_bool_func_1st_order_fd()
anti_max_point_time = time_reflect[anti_max_bool]
anti_max_point = time_series_anti_reflect[anti_max_bool]
utils = emd_utils.Utility(time=time, time_series=time_series_reflect)
no_anchor_max_time = time_reflect[utils.max_bool_func_1st_order_fd()]
no_anchor_max = time_series_reflect[utils.max_bool_func_1st_order_fd()]
point_1 = 5.4
length_distance = np.linspace(maxima_y[-1], minima_y[-1], 101)
length_distance_time = point_1 * np.pi * np.ones_like(length_distance)
length_time = np.linspace(point_1 * np.pi - width, point_1 * np.pi + width, 101)
length_top = maxima_y[-1] * np.ones_like(length_time)
length_bottom = minima_y[-1] * np.ones_like(length_time)
point_2 = 5.2
length_distance_2 = np.linspace(time_series[-1], minima_y[-1], 101)
length_distance_time_2 = point_2 * np.pi * np.ones_like(length_distance_2)
length_time_2 = np.linspace(point_2 * np.pi - width, point_2 * np.pi + width, 101)
length_top_2 = time_series[-1] * np.ones_like(length_time_2)
length_bottom_2 = minima_y[-1] * np.ones_like(length_time_2)
symmetry_axis_1_time = minima_x[-1] * np.ones(101)
symmetry_axis_2_time = time[-1] * np.ones(101)
symmetry_axis = np.linspace(-2, 2, 101)
end_time = np.linspace(time[-1] - width, time[-1] + width, 101)
end_signal = time_series[-1] * np.ones_like(end_time)
anti_symmetric_time = np.linspace(time[-1] - 0.5, time[-1] + 0.5, 101)
anti_symmetric_signal = time_series[-1] * np.ones_like(anti_symmetric_time)
ax = plt.subplot(111)
plt.gcf().subplots_adjust(bottom=0.10)
plt.plot(time, time_series, LineWidth=2, label='Signal')
plt.title('Symmetry Edge Effects Example')
plt.plot(time_reflect, time_series_reflect, 'g--', LineWidth=2, label=textwrap.fill('Symmetric signal', 10))
plt.plot(time_reflect[:51], time_series_anti_reflect[:51], '--', c='purple', LineWidth=2,
label=textwrap.fill('Anti-symmetric signal', 10))
plt.plot(max_dash_time, max_dash, 'k-')
plt.plot(min_dash_time, min_dash, 'k-')
plt.plot(dash_1_time, dash_1, 'k--')
plt.plot(dash_2_time, dash_2, 'k--')
plt.plot(length_distance_time, length_distance, 'k--')
plt.plot(length_distance_time_2, length_distance_2, 'k--')
plt.plot(length_time, length_top, 'k-')
plt.plot(length_time, length_bottom, 'k-')
plt.plot(length_time_2, length_top_2, 'k-')
plt.plot(length_time_2, length_bottom_2, 'k-')
plt.plot(end_time, end_signal, 'k-')
plt.plot(symmetry_axis_1_time, symmetry_axis, 'r--', zorder=1)
plt.plot(anti_symmetric_time, anti_symmetric_signal, 'r--', zorder=1)
plt.plot(symmetry_axis_2_time, symmetry_axis, 'r--', label=textwrap.fill('Axes of symmetry', 10), zorder=1)
plt.text(5.1 * np.pi, -0.7, r'$\beta$L')
plt.text(5.34 * np.pi, -0.05, 'L')
plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima')
plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima')
plt.scatter(max_discard_time, max_discard, c='purple', zorder=4, label=textwrap.fill('Symmetric Discard maxima', 10))
plt.scatter(end_point_time, end_point, c='orange', zorder=4, label=textwrap.fill('Symmetric Anchor maxima', 10))
plt.scatter(anti_max_point_time, anti_max_point, c='green', zorder=4, label=textwrap.fill('Anti-Symmetric maxima', 10))
plt.scatter(no_anchor_max_time, no_anchor_max, c='gray', zorder=4, label=textwrap.fill('Symmetric maxima', 10))
plt.xlim(3.9 * np.pi, 5.5 * np.pi)
plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$'))
plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2'))
box_0 = ax.get_position()
ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('jss_figures/edge_effects_symmetry_anti.png')
plt.show()
# plot 4
a = 0.21
width = 0.2
time = np.linspace(0, (5 - a) * np.pi, 1001)
time_series = np.cos(time) + np.cos(5 * time)
utils = emd_utils.Utility(time=time, time_series=time_series)
max_bool = utils.max_bool_func_1st_order_fd()
maxima_x = time[max_bool]
maxima_y = time_series[max_bool]
min_bool = utils.min_bool_func_1st_order_fd()
minima_x = time[min_bool]
minima_y = time_series[min_bool]
max_dash_1 = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101)
max_dash_2 = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101)
max_dash_time_1 = maxima_x[-1] * np.ones_like(max_dash_1)
max_dash_time_2 = maxima_x[-2] * np.ones_like(max_dash_1)
min_dash_1 = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101)
min_dash_2 = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101)
min_dash_time_1 = minima_x[-1] * np.ones_like(min_dash_1)
min_dash_time_2 = minima_x[-2] * np.ones_like(min_dash_1)
dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101)
dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101)
dash_2_time = np.linspace(maxima_x[-1], minima_x[-2], 101)
dash_2 = np.linspace(maxima_y[-1], minima_y[-2], 101)
s1 = (minima_y[-2] - maxima_y[-1]) / (minima_x[-2] - maxima_x[-1])
slope_based_maximum_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2])
slope_based_maximum = minima_y[-1] + (slope_based_maximum_time - minima_x[-1]) * s1
max_dash_time_3 = slope_based_maximum_time * np.ones_like(max_dash_1)
max_dash_3 = np.linspace(slope_based_maximum - width, slope_based_maximum + width, 101)
dash_3_time = np.linspace(minima_x[-1], slope_based_maximum_time, 101)
dash_3 = np.linspace(minima_y[-1], slope_based_maximum, 101)
s2 = (minima_y[-1] - maxima_y[-1]) / (minima_x[-1] - maxima_x[-1])
slope_based_minimum_time = minima_x[-1] + (minima_x[-1] - minima_x[-2])
slope_based_minimum = slope_based_maximum - (slope_based_maximum_time - slope_based_minimum_time) * s2
min_dash_time_3 = slope_based_minimum_time * np.ones_like(min_dash_1)
min_dash_3 = np.linspace(slope_based_minimum - width, slope_based_minimum + width, 101)
dash_4_time = np.linspace(slope_based_maximum_time, slope_based_minimum_time)
dash_4 = np.linspace(slope_based_maximum, slope_based_minimum)
maxima_dash = np.linspace(2.5 - width, 2.5 + width, 101)
maxima_dash_time_1 = maxima_x[-2] * np.ones_like(maxima_dash)
maxima_dash_time_2 = maxima_x[-1] * np.ones_like(maxima_dash)
maxima_dash_time_3 = slope_based_maximum_time * np.ones_like(maxima_dash)
maxima_line_dash_time = np.linspace(maxima_x[-2], slope_based_maximum_time, 101)
maxima_line_dash = 2.5 * np.ones_like(maxima_line_dash_time)
minima_dash = np.linspace(-3.4 - width, -3.4 + width, 101)
minima_dash_time_1 = minima_x[-2] * np.ones_like(minima_dash)
minima_dash_time_2 = minima_x[-1] * np.ones_like(minima_dash)
minima_dash_time_3 = slope_based_minimum_time * np.ones_like(minima_dash)
minima_line_dash_time = np.linspace(minima_x[-2], slope_based_minimum_time, 101)
minima_line_dash = -3.4 * np.ones_like(minima_line_dash_time)
# slightly edit signal to make difference between slope-based method and improved slope-based method more clear
time_series[time >= minima_x[-1]] = 1.5 * (time_series[time >= minima_x[-1]] - time_series[time == minima_x[-1]]) + \
time_series[time == minima_x[-1]]
improved_slope_based_maximum_time = time[-1]
improved_slope_based_maximum = time_series[-1]
improved_slope_based_minimum_time = slope_based_minimum_time
improved_slope_based_minimum = improved_slope_based_maximum + s2 * (improved_slope_based_minimum_time -
improved_slope_based_maximum_time)
min_dash_4 = np.linspace(improved_slope_based_minimum - width, improved_slope_based_minimum + width, 101)
min_dash_time_4 = improved_slope_based_minimum_time * np.ones_like(min_dash_4)
dash_final_time = np.linspace(improved_slope_based_maximum_time, improved_slope_based_minimum_time, 101)
dash_final = np.linspace(improved_slope_based_maximum, improved_slope_based_minimum, 101)
ax = plt.subplot(111)
figure_size = plt.gcf().get_size_inches()
factor = 0.9
plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1]))
plt.gcf().subplots_adjust(bottom=0.10)
plt.plot(time, time_series, LineWidth=2, label='Signal')
plt.title('Slope-Based Edge Effects Example')
plt.plot(max_dash_time_1, max_dash_1, 'k-')
plt.plot(max_dash_time_2, max_dash_2, 'k-')
plt.plot(max_dash_time_3, max_dash_3, 'k-')
plt.plot(min_dash_time_1, min_dash_1, 'k-')
plt.plot(min_dash_time_2, min_dash_2, 'k-')
plt.plot(min_dash_time_3, min_dash_3, 'k-')
plt.plot(min_dash_time_4, min_dash_4, 'k-')
plt.plot(maxima_dash_time_1, maxima_dash, 'k-')
plt.plot(maxima_dash_time_2, maxima_dash, 'k-')
plt.plot(maxima_dash_time_3, maxima_dash, 'k-')
plt.plot(minima_dash_time_1, minima_dash, 'k-')
plt.plot(minima_dash_time_2, minima_dash, 'k-')
plt.plot(minima_dash_time_3, minima_dash, 'k-')
plt.text(4.34 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$')
plt.text(4.74 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$')
plt.text(4.12 * np.pi, 2, r'$\Delta{t^{max}_{M}}$')
plt.text(4.50 * np.pi, 2, r'$\Delta{t^{max}_{M}}$')
plt.text(4.30 * np.pi, 0.35, r'$s_1$')
plt.text(4.43 * np.pi, -0.20, r'$s_2$')
plt.text(4.30 * np.pi + (minima_x[-1] - minima_x[-2]), 0.35 + (minima_y[-1] - minima_y[-2]), r'$s_1$')
plt.text(4.43 * np.pi + (slope_based_minimum_time - minima_x[-1]),
-0.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$')
plt.text(4.50 * np.pi + (slope_based_minimum_time - minima_x[-1]),
1.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$')
plt.plot(minima_line_dash_time, minima_line_dash, 'k--')
plt.plot(maxima_line_dash_time, maxima_line_dash, 'k--')
plt.plot(dash_1_time, dash_1, 'k--')
plt.plot(dash_2_time, dash_2, 'k--')
plt.plot(dash_3_time, dash_3, 'k--')
plt.plot(dash_4_time, dash_4, 'k--')
plt.plot(dash_final_time, dash_final, 'k--')
plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima')
plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima')
plt.scatter(slope_based_maximum_time, slope_based_maximum, c='orange', zorder=4,
label=textwrap.fill('Slope-based maximum', 11))
plt.scatter(slope_based_minimum_time, slope_based_minimum, c='purple', zorder=4,
label=textwrap.fill('Slope-based minimum', 11))
plt.scatter(improved_slope_based_maximum_time, improved_slope_based_maximum, c='deeppink', zorder=4,
label=textwrap.fill('Improved slope-based maximum', 11))
plt.scatter(improved_slope_based_minimum_time, improved_slope_based_minimum, c='dodgerblue', zorder=4,
label=textwrap.fill('Improved slope-based minimum', 11))
plt.xlim(3.9 * np.pi, 5.5 * np.pi)
plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$'))
plt.yticks((-3, -2, -1, 0, 1, 2), ('-3', '-2', '-1', '0', '1', '2'))
box_0 = ax.get_position()
ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('jss_figures/edge_effects_slope_based.png')
plt.show()
# plot 5
a = 0.25
width = 0.2
time = np.linspace(0, (5 - a) * np.pi, 1001)
time_series = np.cos(time) + np.cos(5 * time)
utils = emd_utils.Utility(time=time, time_series=time_series)
max_bool = utils.max_bool_func_1st_order_fd()
maxima_x = time[max_bool]
maxima_y = time_series[max_bool]
min_bool = utils.min_bool_func_1st_order_fd()
minima_x = time[min_bool]
minima_y = time_series[min_bool]
A2 = np.abs(maxima_y[-2] - minima_y[-2]) / 2
A1 = np.abs(maxima_y[-1] - minima_y[-1]) / 2
P2 = 2 * np.abs(maxima_x[-2] - minima_x[-2])
P1 = 2 * np.abs(maxima_x[-1] - minima_x[-1])
Huang_time = (P1 / P2) * (time[time >= maxima_x[-2]] - time[time == maxima_x[-2]]) + maxima_x[-1]
Huang_wave = (A1 / A2) * (time_series[time >= maxima_x[-2]] - time_series[time == maxima_x[-2]]) + maxima_y[-1]
Coughlin_time = Huang_time
Coughlin_wave = A1 * np.cos(2 * np.pi * (1 / P1) * (Coughlin_time - Coughlin_time[0]))
Average_max_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2])
Average_max = (maxima_y[-2] + maxima_y[-1]) / 2
Average_min_time = minima_x[-1] + (minima_x[-1] - minima_x[-2])
Average_min = (minima_y[-2] + minima_y[-1]) / 2
utils_Huang = emd_utils.Utility(time=time, time_series=Huang_wave)
Huang_max_bool = utils_Huang.max_bool_func_1st_order_fd()
Huang_min_bool = utils_Huang.min_bool_func_1st_order_fd()
utils_Coughlin = emd_utils.Utility(time=time, time_series=Coughlin_wave)
Coughlin_max_bool = utils_Coughlin.max_bool_func_1st_order_fd()
Coughlin_min_bool = utils_Coughlin.min_bool_func_1st_order_fd()
Huang_max_time = Huang_time[Huang_max_bool]
Huang_max = Huang_wave[Huang_max_bool]
Huang_min_time = Huang_time[Huang_min_bool]
Huang_min = Huang_wave[Huang_min_bool]
Coughlin_max_time = Coughlin_time[Coughlin_max_bool]
Coughlin_max = Coughlin_wave[Coughlin_max_bool]
Coughlin_min_time = Coughlin_time[Coughlin_min_bool]
Coughlin_min = Coughlin_wave[Coughlin_min_bool]
max_2_x_time = np.linspace(maxima_x[-2] - width, maxima_x[-2] + width, 101)
max_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101)
max_2_x = maxima_y[-2] * np.ones_like(max_2_x_time)
min_2_x_time = np.linspace(minima_x[-2] - width, minima_x[-2] + width, 101)
min_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101)
min_2_x = minima_y[-2] * np.ones_like(min_2_x_time)
dash_max_min_2_x = np.linspace(minima_y[-2], maxima_y[-2], 101)
dash_max_min_2_x_time = 5.3 * np.pi * np.ones_like(dash_max_min_2_x)
max_2_y = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101)
max_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101)
max_2_y_time = maxima_x[-2] * np.ones_like(max_2_y)
min_2_y = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101)
min_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101)
min_2_y_time = minima_x[-2] * np.ones_like(min_2_y)
dash_max_min_2_y_time = np.linspace(minima_x[-2], maxima_x[-2], 101)
dash_max_min_2_y = -1.8 * np.ones_like(dash_max_min_2_y_time)
max_1_x_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101)
max_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101)
max_1_x = maxima_y[-1] * np.ones_like(max_1_x_time)
min_1_x_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101)
min_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101)
min_1_x = minima_y[-1] * np.ones_like(min_1_x_time)
dash_max_min_1_x = np.linspace(minima_y[-1], maxima_y[-1], 101)
dash_max_min_1_x_time = 5.4 * np.pi * np.ones_like(dash_max_min_1_x)
max_1_y = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101)
max_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101)
max_1_y_time = maxima_x[-1] * np.ones_like(max_1_y)
min_1_y = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101)
min_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101)
min_1_y_time = minima_x[-1] * np.ones_like(min_1_y)
dash_max_min_1_y_time = np.linspace(minima_x[-1], maxima_x[-1], 101)
dash_max_min_1_y = -2.1 * np.ones_like(dash_max_min_1_y_time)
ax = plt.subplot(111)
plt.gcf().subplots_adjust(bottom=0.10)
plt.title('Characteristic Wave Effects Example')
plt.plot(time, time_series, LineWidth=2, label='Signal')
plt.scatter(Huang_max_time, Huang_max, c='magenta', zorder=4, label=textwrap.fill('Huang maximum', 10))
plt.scatter(Huang_min_time, Huang_min, c='lime', zorder=4, label=textwrap.fill('Huang minimum', 10))
plt.scatter(Coughlin_max_time, Coughlin_max, c='darkorange', zorder=4,
label=textwrap.fill('Coughlin maximum', 14))
plt.scatter(Coughlin_min_time, Coughlin_min, c='dodgerblue', zorder=4,
label=textwrap.fill('Coughlin minimum', 14))
plt.scatter(Average_max_time, Average_max, c='orangered', zorder=4,
label=textwrap.fill('Average maximum', 14))
plt.scatter(Average_min_time, Average_min, c='cyan', zorder=4,
label=textwrap.fill('Average minimum', 14))
plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima')
plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima')
plt.plot(Huang_time, Huang_wave, '--', c='darkviolet', label=textwrap.fill('Huang Characteristic Wave', 14))
plt.plot(Coughlin_time, Coughlin_wave, '--', c='darkgreen', label=textwrap.fill('Coughlin Characteristic Wave', 14))
plt.plot(max_2_x_time, max_2_x, 'k-')
plt.plot(max_2_x_time_side, max_2_x, 'k-')
plt.plot(min_2_x_time, min_2_x, 'k-')
plt.plot(min_2_x_time_side, min_2_x, 'k-')
plt.plot(dash_max_min_2_x_time, dash_max_min_2_x, 'k--')
plt.text(5.16 * np.pi, 0.85, r'$2a_2$')
plt.plot(max_2_y_time, max_2_y, 'k-')
plt.plot(max_2_y_time, max_2_y_side, 'k-')
plt.plot(min_2_y_time, min_2_y, 'k-')
plt.plot(min_2_y_time, min_2_y_side, 'k-')
plt.plot(dash_max_min_2_y_time, dash_max_min_2_y, 'k--')
plt.text(4.08 * np.pi, -2.2, r'$\frac{p_2}{2}$')
plt.plot(max_1_x_time, max_1_x, 'k-')
plt.plot(max_1_x_time_side, max_1_x, 'k-')
plt.plot(min_1_x_time, min_1_x, 'k-')
plt.plot(min_1_x_time_side, min_1_x, 'k-')
plt.plot(dash_max_min_1_x_time, dash_max_min_1_x, 'k--')
plt.text(5.42 * np.pi, -0.1, r'$2a_1$')
plt.plot(max_1_y_time, max_1_y, 'k-')
plt.plot(max_1_y_time, max_1_y_side, 'k-')
plt.plot(min_1_y_time, min_1_y, 'k-')
plt.plot(min_1_y_time, min_1_y_side, 'k-')
plt.plot(dash_max_min_1_y_time, dash_max_min_1_y, 'k--')
plt.text(4.48 * np.pi, -2.5, r'$\frac{p_1}{2}$')
plt.xlim(3.9 * np.pi, 5.6 * np.pi)
plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$'))
plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2'))
box_0 = ax.get_position()
ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('jss_figures/edge_effects_characteristic_wave.png')
plt.show()
# plot 6
t = np.linspace(5, 95, 100)
signal_orig = np.cos(2 * np.pi * t / 50) + 0.6 * np.cos(2 * np.pi * t / 25) + 0.5 * np.sin(2 * np.pi * t / 200)
util_nn = emd_utils.Utility(time=t, time_series=signal_orig)
maxima = signal_orig[util_nn.max_bool_func_1st_order_fd()]
minima = signal_orig[util_nn.min_bool_func_1st_order_fd()]
cs_max = CubicSpline(t[util_nn.max_bool_func_1st_order_fd()], maxima)
cs_min = CubicSpline(t[util_nn.min_bool_func_1st_order_fd()], minima)
time = np.linspace(0, 5 * np.pi, 1001)
lsq_signal = np.cos(time) + np.cos(5 * time)
knots = np.linspace(0, 5 * np.pi, 101)
time_extended = time_extension(time)
time_series_extended = np.zeros_like(time_extended) / 0
time_series_extended[int(len(lsq_signal) - 1):int(2 * (len(lsq_signal) - 1) + 1)] = lsq_signal
neural_network_m = 200
neural_network_k = 100
# forward ->
P = np.zeros((int(neural_network_k + 1), neural_network_m))
for col in range(neural_network_m):
P[:-1, col] = lsq_signal[(-(neural_network_m + neural_network_k - col)):(-(neural_network_m - col))]
P[-1, col] = 1 # for additive constant
t = lsq_signal[-neural_network_m:]
# test - top
seed_weights = np.ones(neural_network_k) / neural_network_k
weights = 0 * seed_weights.copy()
train_input = P[:-1, :]
lr = 0.01
for iterations in range(1000):
output = np.matmul(weights, train_input)
error = (t - output)
gradients = error * (- train_input)
# guess average gradients
average_gradients = np.mean(gradients, axis=1)
# steepest descent
max_gradient_vector = average_gradients * (np.abs(average_gradients) == max(np.abs(average_gradients)))
adjustment = - lr * average_gradients
# adjustment = - lr * max_gradient_vector
weights += adjustment
# test - bottom
weights_right = np.hstack((weights, 0))
max_count_right = 0
min_count_right = 0
i_right = 0
while ((max_count_right < 1) or (min_count_right < 1)) and (i_right < len(lsq_signal) - 1):
time_series_extended[int(2 * (len(lsq_signal) - 1) + 1 + i_right)] = \
sum(weights_right * np.hstack((time_series_extended[
int(2 * (len(lsq_signal) - 1) + 1 - neural_network_k + i_right):
int(2 * (len(lsq_signal) - 1) + 1 + i_right)], 1)))
i_right += 1
if i_right > 1:
emd_utils_max = \
emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1):
int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)],
time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1):
int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)])
if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0:
max_count_right += 1
emd_utils_min = \
emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1):
int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)],
time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1):
int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)])
if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0:
min_count_right += 1
# backward <-
P = np.zeros((int(neural_network_k + 1), neural_network_m))
for col in range(neural_network_m):
P[:-1, col] = lsq_signal[int(col + 1):int(col + neural_network_k + 1)]
P[-1, col] = 1 # for additive constant
t = lsq_signal[:neural_network_m]
vx = cvx.Variable(int(neural_network_k + 1))
objective = cvx.Minimize(cvx.norm((2 * (vx * P) + 1 - t), 2)) # linear activation function is arbitrary
prob = cvx.Problem(objective)
result = prob.solve(verbose=True, solver=cvx.ECOS)
weights_left = np.array(vx.value)
max_count_left = 0
min_count_left = 0
i_left = 0
while ((max_count_left < 1) or (min_count_left < 1)) and (i_left < len(lsq_signal) - 1):
time_series_extended[int(len(lsq_signal) - 2 - i_left)] = \
2 * sum(weights_left * np.hstack((time_series_extended[int(len(lsq_signal) - 1 - i_left):
int(len(lsq_signal) - 1 - i_left + neural_network_k)],
1))) + 1
i_left += 1
if i_left > 1:
emd_utils_max = \
emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))],
time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))])
if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0:
max_count_left += 1
emd_utils_min = \
emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))],
time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))])
if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0:
min_count_left += 1
lsq_utils = emd_utils.Utility(time=time, time_series=lsq_signal)
utils_extended = emd_utils.Utility(time=time_extended, time_series=time_series_extended)
maxima = lsq_signal[lsq_utils.max_bool_func_1st_order_fd()]
maxima_time = time[lsq_utils.max_bool_func_1st_order_fd()]
maxima_extrapolate = time_series_extended[utils_extended.max_bool_func_1st_order_fd()][-1]
maxima_extrapolate_time = time_extended[utils_extended.max_bool_func_1st_order_fd()][-1]
minima = lsq_signal[lsq_utils.min_bool_func_1st_order_fd()]
minima_time = time[lsq_utils.min_bool_func_1st_order_fd()]
minima_extrapolate = time_series_extended[utils_extended.min_bool_func_1st_order_fd()][-2:]
minima_extrapolate_time = time_extended[utils_extended.min_bool_func_1st_order_fd()][-2:]
ax = plt.subplot(111)
plt.gcf().subplots_adjust(bottom=0.10)
plt.title('Single Neuron Neural Network Example')
plt.plot(time, lsq_signal, zorder=2, label='Signal')
plt.plot(time_extended, time_series_extended, c='g', zorder=1, label=textwrap.fill('Extrapolated signal', 12))
plt.scatter(maxima_time, maxima, c='r', zorder=3, label='Maxima')
plt.scatter(minima_time, minima, c='b', zorder=3, label='Minima')
plt.scatter(maxima_extrapolate_time, maxima_extrapolate, c='magenta', zorder=3,
label=textwrap.fill('Extrapolated maxima', 12))
plt.scatter(minima_extrapolate_time, minima_extrapolate, c='cyan', zorder=4,
label=textwrap.fill('Extrapolated minima', 12))
plt.plot(((time[-302] + time[-301]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k',
label=textwrap.fill('Neural network inputs', 13))
plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100),
-2.75 * np.ones(100), c='k')
plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100),
2.75 * np.ones(100), c='k')
plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2),
((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='k')
plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2),
((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='k')
plt.plot(((time_extended[-1001] + time_extended[-1002]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k')
plt.plot(((time[-202] + time[-201]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray', linestyle='dashed',
label=textwrap.fill('Neural network targets', 13))
plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100),
-2.75 * np.ones(100), c='gray')
plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100),
2.75 * np.ones(100), c='gray')
plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2),
((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='gray')
plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2),
((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='gray')
plt.plot(((time_extended[-1001] + time_extended[-1000]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray',
linestyle='dashed')
plt.xlim(3.4 * np.pi, 5.6 * np.pi)
plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$'))
plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2'))
box_0 = ax.get_position()
ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('jss_figures/neural_network.png')
plt.show()
# plot 6a
np.random.seed(0)
time = np.linspace(0, 5 * np.pi, 1001)
knots_51 = np.linspace(0, 5 * np.pi, 51)
time_series = np.cos(2 * time) + np.cos(4 * time) + np.cos(8 * time)
noise = np.random.normal(0, 1, len(time_series))
time_series += noise
advemdpy = EMD(time=time, time_series=time_series)
imfs_51, hts_51, ifs_51 = advemdpy.empirical_mode_decomposition(knots=knots_51, max_imfs=3,
edge_effect='symmetric_anchor', verbose=False)[:3]
knots_31 = np.linspace(0, 5 * np.pi, 31)
imfs_31, hts_31, ifs_31 = advemdpy.empirical_mode_decomposition(knots=knots_31, max_imfs=2,
edge_effect='symmetric_anchor', verbose=False)[:3]
knots_11 = np.linspace(0, 5 * np.pi, 11)
imfs_11, hts_11, ifs_11 = advemdpy.empirical_mode_decomposition(knots=knots_11, max_imfs=1,
edge_effect='symmetric_anchor', verbose=False)[:3]
fig, axs = plt.subplots(3, 1)
plt.suptitle(textwrap.fill('Comparison of Trends Extracted with Different Knot Sequences', 40))
plt.subplots_adjust(hspace=0.1)
axs[0].plot(time, time_series, label='Time series')
axs[0].plot(time, imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 1, IMF 2, & IMF 3 with 51 knots', 21))
print(f'DFA fluctuation with 51 knots: {np.round(np.var(time_series - (imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :])), 3)}')
for knot in knots_51:
axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1)
axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots')
axs[0].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi])
axs[0].set_xticklabels(['', '', '', '', '', ''])
axs[0].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--')
axs[0].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--')
axs[0].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--')
axs[0].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region')
box_0 = axs[0].get_position()
axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height])
axs[0].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8)
axs[1].plot(time, time_series, label='Time series')
axs[1].plot(time, imfs_31[1, :] + imfs_31[2, :], label=textwrap.fill('Sum of IMF 1 and IMF 2 with 31 knots', 19))
axs[1].plot(time, imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 2 and IMF 3 with 51 knots', 19))
print(f'DFA fluctuation with 31 knots: {np.round(np.var(time_series - (imfs_31[1, :] + imfs_31[2, :])), 3)}')
for knot in knots_31:
axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1)
axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots')
axs[1].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi])
axs[1].set_xticklabels(['', '', '', '', '', ''])
box_1 = axs[1].get_position()
axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height])
axs[1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8)
axs[1].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--')
axs[1].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--')
axs[1].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--')
axs[1].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region')
axs[2].plot(time, time_series, label='Time series')
axs[2].plot(time, imfs_11[1, :], label='IMF 1 with 11 knots')
axs[2].plot(time, imfs_31[2, :], label='IMF 2 with 31 knots')
axs[2].plot(time, imfs_51[3, :], label='IMF 3 with 51 knots')
print(f'DFA fluctuation with 11 knots: {np.round(np.var(time_series - imfs_51[3, :]), 3)}')
for knot in knots_11:
axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1)
axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots')
axs[2].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi])
axs[2].set_xticklabels(['$0$', r'$\pi$', r'$2\pi$', r'$3\pi$', r'$4\pi$', r'$5\pi$'])
box_2 = axs[2].get_position()
axs[2].set_position([box_2.x0 - 0.05, box_2.y0, box_2.width * 0.85, box_2.height])
axs[2].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8)
axs[2].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--')
axs[2].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--')
axs[2].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--')
axs[2].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region')
plt.savefig('jss_figures/DFA_different_trends.png')
plt.show()
# plot 6b
fig, axs = plt.subplots(3, 1)
plt.suptitle(textwrap.fill('Comparison of Trends Extracted with Different Knot Sequences Zoomed Region', 40))
plt.subplots_adjust(hspace=0.1)
axs[0].plot(time, time_series, label='Time series')
axs[0].plot(time, imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 1, IMF 2, & IMF 3 with 51 knots', 21))
for knot in knots_51:
axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1)
axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots')
axs[0].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi])
axs[0].set_xticklabels(['', '', '', '', '', ''])
box_0 = axs[0].get_position()
axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height])
axs[0].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8)
axs[0].set_ylim(-5.5, 5.5)
axs[0].set_xlim(0.95 * np.pi, 1.55 * np.pi)
axs[1].plot(time, time_series, label='Time series')
axs[1].plot(time, imfs_31[1, :] + imfs_31[2, :], label=textwrap.fill('Sum of IMF 1 and IMF 2 with 31 knots', 19))
axs[1].plot(time, imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 2 and IMF 3 with 51 knots', 19))
for knot in knots_31:
axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1)
axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots')
axs[1].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi])
axs[1].set_xticklabels(['', '', '', '', '', ''])
box_1 = axs[1].get_position()
axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height])
axs[1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8)
axs[1].set_ylim(-5.5, 5.5)
axs[1].set_xlim(0.95 * np.pi, 1.55 * np.pi)
axs[2].plot(time, time_series, label='Time series')
axs[2].plot(time, imfs_11[1, :], label='IMF 1 with 11 knots')
axs[2].plot(time, imfs_31[2, :], label='IMF 2 with 31 knots')
axs[2].plot(time, imfs_51[3, :], label='IMF 3 with 51 knots')
for knot in knots_11:
axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1)
axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots')
axs[2].set_xticks([np.pi, (3 / 2) * np.pi])
axs[2].set_xticklabels([r'$\pi$', r'$\frac{3}{2}\pi$'])
box_2 = axs[2].get_position()
axs[2].set_position([box_2.x0 - 0.05, box_2.y0, box_2.width * 0.85, box_2.height])
axs[2].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8)
axs[2].set_ylim(-5.5, 5.5)
axs[2].set_xlim(0.95 * np.pi, 1.55 * np.pi)
plt.savefig('jss_figures/DFA_different_trends_zoomed.png')
plt.show()
hs_ouputs = hilbert_spectrum(time, imfs_51, hts_51, ifs_51, max_frequency=12, plot=False)
# plot 6c
ax = plt.subplot(111)
figure_size = plt.gcf().get_size_inches()
factor = 0.9
plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1]))
plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of Simple Sinusoidal Time Seres with Added Noise', 50))
x_hs, y, z = hs_ouputs
z_min, z_max = 0, np.abs(z).max()
ax.pcolormesh(x_hs, y, np.abs(z), cmap='gist_rainbow', vmin=z_min, vmax=z_max)
ax.plot(x_hs[0, :], 8 * np.ones_like(x_hs[0, :]), '--', label=r'$\omega = 8$', Linewidth=3)
ax.plot(x_hs[0, :], 4 * np.ones_like(x_hs[0, :]), '--', label=r'$\omega = 4$', Linewidth=3)
ax.plot(x_hs[0, :], 2 * np.ones_like(x_hs[0, :]), '--', label=r'$\omega = 2$', Linewidth=3)
ax.set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi])
ax.set_xticklabels(['$0$', r'$\pi$', r'$2\pi$', r'$3\pi$', r'$4\pi$'])
plt.ylabel(r'Frequency (rad.s$^{-1}$)')
plt.xlabel('Time (s)')
box_0 = ax.get_position()
ax.set_position([box_0.x0, box_0.y0 + 0.05, box_0.width * 0.85, box_0.height * 0.9])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('jss_figures/DFA_hilbert_spectrum.png')
plt.show()
# plot 6c
time = np.linspace(0, 5 * np.pi, 1001)
time_series = np.cos(time) + np.cos(5 * time)
knots = np.linspace(0, 5 * np.pi, 51)
fluc = Fluctuation(time=time, time_series=time_series)
max_unsmoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='maxima', smooth=False)
max_smoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='maxima', smooth=True)
min_unsmoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='minima', smooth=False)
min_smoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='minima', smooth=True)
util = Utility(time=time, time_series=time_series)
maxima = util.max_bool_func_1st_order_fd()
minima = util.min_bool_func_1st_order_fd()
ax = plt.subplot(111)
plt.gcf().subplots_adjust(bottom=0.10)
plt.title(textwrap.fill('Plot Demonstrating Unsmoothed Extrema Envelopes if Schoenberg–Whitney Conditions are Not Satisfied', 50))
plt.plot(time, time_series, label='Time series', zorder=2, LineWidth=2)
plt.scatter(time[maxima], time_series[maxima], c='r', label='Maxima', zorder=10)
plt.scatter(time[minima], time_series[minima], c='b', label='Minima', zorder=10)
plt.plot(time, max_unsmoothed[0], label=textwrap.fill('Unsmoothed maxima envelope', 10), c='darkorange')
plt.plot(time, max_smoothed[0], label=textwrap.fill('Smoothed maxima envelope', 10), c='red')
plt.plot(time, min_unsmoothed[0], label=textwrap.fill('Unsmoothed minima envelope', 10), c='cyan')
plt.plot(time, min_smoothed[0], label=textwrap.fill('Smoothed minima envelope', 10), c='blue')
for knot in knots[:-1]:
plt.plot(knot * np.ones(101), np.linspace(-3.0, -2.0, 101), '--', c='grey', zorder=1)
plt.plot(knots[-1] * np.ones(101), np.linspace(-3.0, -2.0, 101), '--', c='grey', label='Knots', zorder=1)
plt.xticks((0, 1 * np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi),
(r'$0$', r'$\pi$', r'2$\pi$', r'3$\pi$', r'4$\pi$', r'5$\pi$'))
plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2'))
plt.xlim(-0.25 * np.pi, 5.25 * np.pi)
box_0 = ax.get_position()
ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('jss_figures/Schoenberg_Whitney_Conditions.png')
plt.show()
# plot 7
a = 0.25
width = 0.2
time = np.linspace((0 + a) * np.pi, (5 - a) * np.pi, 1001)
knots = np.linspace((0 + a) * np.pi, (5 - a) * np.pi, 11)
time_series = np.cos(time) + np.cos(5 * time)
utils = emd_utils.Utility(time=time, time_series=time_series)
max_bool = utils.max_bool_func_1st_order_fd()
maxima_x = time[max_bool]
maxima_y = time_series[max_bool]
min_bool = utils.min_bool_func_1st_order_fd()
minima_x = time[min_bool]
minima_y = time_series[min_bool]
inflection_bool = utils.inflection_point()
inflection_x = time[inflection_bool]
inflection_y = time_series[inflection_bool]
fluctuation = emd_mean.Fluctuation(time=time, time_series=time_series)
maxima_envelope = fluctuation.envelope_basis_function_approximation(knots, 'maxima', smooth=False,
smoothing_penalty=0.2, edge_effect='none',
spline_method='b_spline')[0]
maxima_envelope_smooth = fluctuation.envelope_basis_function_approximation(knots, 'maxima', smooth=True,
smoothing_penalty=0.2, edge_effect='none',
spline_method='b_spline')[0]
minima_envelope = fluctuation.envelope_basis_function_approximation(knots, 'minima', smooth=False,
smoothing_penalty=0.2, edge_effect='none',
spline_method='b_spline')[0]
minima_envelope_smooth = fluctuation.envelope_basis_function_approximation(knots, 'minima', smooth=True,
smoothing_penalty=0.2, edge_effect='none',
spline_method='b_spline')[0]
inflection_points_envelope = fluctuation.direct_detrended_fluctuation_estimation(knots,
smooth=True,
smoothing_penalty=0.2,
technique='inflection_points')[0]
binomial_points_envelope = fluctuation.direct_detrended_fluctuation_estimation(knots,
smooth=True,
smoothing_penalty=0.2,
technique='binomial_average', order=21,
increment=20)[0]
derivative_of_lsq = utils.derivative_forward_diff()
derivative_time = time[:-1]
derivative_knots = np.linspace(knots[0], knots[-1], 31)
# change (1) detrended_fluctuation_technique and (2) max_internal_iter and (3) debug (confusing with external debugging)
emd = AdvEMDpy.EMD(time=derivative_time, time_series=derivative_of_lsq)
imf_1_of_derivative = emd.empirical_mode_decomposition(knots=derivative_knots,
knot_time=derivative_time, text=False, verbose=False)[0][1, :]
utils = emd_utils.Utility(time=time[:-1], time_series=imf_1_of_derivative)
optimal_maxima = np.r_[False, utils.derivative_forward_diff() < 0, False] & \
np.r_[utils.zero_crossing() == 1, False]
optimal_minima = np.r_[False, utils.derivative_forward_diff() > 0, False] & \
np.r_[utils.zero_crossing() == 1, False]
EEMD_maxima_envelope = fluctuation.envelope_basis_function_approximation_fixed_points(knots, 'maxima',
optimal_maxima,
optimal_minima,
smooth=False,
smoothing_penalty=0.2,
edge_effect='none')[0]
EEMD_minima_envelope = fluctuation.envelope_basis_function_approximation_fixed_points(knots, 'minima',
optimal_maxima,
optimal_minima,
smooth=False,
smoothing_penalty=0.2,
edge_effect='none')[0]
ax = plt.subplot(111)
plt.gcf().subplots_adjust(bottom=0.10)
plt.title('Detrended Fluctuation Analysis Examples')
plt.plot(time, time_series, LineWidth=2, label='Time series')
plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima')
plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima')
plt.scatter(time[optimal_maxima], time_series[optimal_maxima], c='darkred', zorder=4,
label=textwrap.fill('Optimal maxima', 10))
plt.scatter(time[optimal_minima], time_series[optimal_minima], c='darkblue', zorder=4,
label=textwrap.fill('Optimal minima', 10))
plt.scatter(inflection_x, inflection_y, c='magenta', zorder=4, label=textwrap.fill('Inflection points', 10))
plt.plot(time, maxima_envelope, c='darkblue', label=textwrap.fill('EMD envelope', 10))
plt.plot(time, minima_envelope, c='darkblue')
plt.plot(time, (maxima_envelope + minima_envelope) / 2, c='darkblue')
plt.plot(time, maxima_envelope_smooth, c='darkred', label=textwrap.fill('SEMD envelope', 10))
plt.plot(time, minima_envelope_smooth, c='darkred')
plt.plot(time, (maxima_envelope_smooth + minima_envelope_smooth) / 2, c='darkred')
plt.plot(time, EEMD_maxima_envelope, c='darkgreen', label=textwrap.fill('EEMD envelope', 10))
plt.plot(time, EEMD_minima_envelope, c='darkgreen')
plt.plot(time, (EEMD_maxima_envelope + EEMD_minima_envelope) / 2, c='darkgreen')
plt.plot(time, inflection_points_envelope, c='darkorange', label=textwrap.fill('Inflection point envelope', 10))
plt.plot(time, binomial_points_envelope, c='deeppink', label=textwrap.fill('Binomial average envelope', 10))
plt.plot(time, np.cos(time), c='black', label='True mean')
plt.xticks((0, 1 * np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi), (r'$0$', r'$\pi$', r'2$\pi$', r'3$\pi$',
r'4$\pi$', r'5$\pi$'))
plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2'))
plt.xlim(-0.25 * np.pi, 5.25 * np.pi)
box_0 = ax.get_position()
ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('jss_figures/detrended_fluctuation_analysis.png')
plt.show()
# Duffing Equation Example
def duffing_equation(xy, ts):
gamma = 0.1
epsilon = 1
omega = ((2 * np.pi) / 25)
return [xy[1], xy[0] - epsilon * xy[0] ** 3 + gamma * np.cos(omega * ts)]
t = np.linspace(0, 150, 1501)
XY0 = [1, 1]
solution = odeint(duffing_equation, XY0, t)
x = solution[:, 0]
dxdt = solution[:, 1]
x_points = [0, 50, 100, 150]
x_names = {0, 50, 100, 150}
y_points_1 = [-2, 0, 2]
y_points_2 = [-1, 0, 1]
fig, axs = plt.subplots(2, 1)
plt.subplots_adjust(hspace=0.2)
axs[0].plot(t, x)
axs[0].set_title('Duffing Equation Displacement')
axs[0].set_ylim([-2, 2])
axs[0].set_xlim([0, 150])
axs[1].plot(t, dxdt)
axs[1].set_title('Duffing Equation Velocity')
axs[1].set_ylim([-1.5, 1.5])
axs[1].set_xlim([0, 150])
axis = 0
for ax in axs.flat:
ax.label_outer()
if axis == 0:
ax.set_ylabel('x(t)')
ax.set_yticks(y_points_1)
if axis == 1:
ax.set_ylabel(r'$ \dfrac{dx(t)}{dt} $')
ax.set(xlabel='t')
ax.set_yticks(y_points_2)
ax.set_xticks(x_points)
ax.set_xticklabels(x_names)
axis += 1
plt.savefig('jss_figures/Duffing_equation.png')
plt.show()
# compare other packages Duffing - top
pyemd = pyemd0215()
py_emd = pyemd(x)
IP, IF, IA = emd040.spectra.frequency_transform(py_emd.T, 10, 'hilbert')
freq_edges, freq_bins = emd040.spectra.define_hist_bins(0, 0.2, 100)
hht = emd040.spectra.hilberthuang(IF, IA, freq_edges)
hht = gaussian_filter(hht, sigma=1)
ax = plt.subplot(111)
figure_size = plt.gcf().get_size_inches()
factor = 1.0
plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1]))
plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of Duffing Equation using PyEMD 0.2.10', 40))
plt.pcolormesh(t, freq_bins, hht, cmap='gist_rainbow', vmin=0, vmax=np.max(np.max(np.abs(hht))))
plt.plot(t[:-1], 0.124 * np.ones_like(t[:-1]), '--', label=textwrap.fill('Hamiltonian frequency approximation', 15))
plt.plot(t[:-1], 0.04 * np.ones_like(t[:-1]), 'g--', label=textwrap.fill('Driving function frequency', 15))
plt.xticks([0, 50, 100, 150])
plt.yticks([0, 0.1, 0.2])
plt.ylabel('Frequency (Hz)')
plt.xlabel('Time (s)')
box_0 = ax.get_position()
ax.set_position([box_0.x0, box_0.y0 + 0.05, box_0.width * 0.75, box_0.height * 0.9])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('jss_figures/Duffing_equation_ht_pyemd.png')
plt.show()
plt.show()
emd_sift = emd040.sift.sift(x)
IP, IF, IA = emd040.spectra.frequency_transform(emd_sift, 10, 'hilbert')
freq_edges, freq_bins = emd040.spectra.define_hist_bins(0, 0.2, 100)
hht = emd040.spectra.hilberthuang(IF, IA, freq_edges)
hht = gaussian_filter(hht, sigma=1)
ax = plt.subplot(111)
figure_size = plt.gcf().get_size_inches()
factor = 1.0
plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1]))
plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of Duffing Equation using emd 0.3.3', 40))
plt.pcolormesh(t, freq_bins, hht, cmap='gist_rainbow', vmin=0, vmax=np.max(np.max(np.abs(hht))))
plt.plot(t[:-1], 0.124 * np.ones_like(t[:-1]), '--', label=textwrap.fill('Hamiltonian frequency approximation', 15))
plt.plot(t[:-1], 0.04 * np.ones_like(t[:-1]), 'g--', label=textwrap.fill('Driving function frequency', 15))
plt.xticks([0, 50, 100, 150])
plt.yticks([0, 0.1, 0.2])
plt.ylabel('Frequency (Hz)')
plt.xlabel('Time (s)')
box_0 = ax.get_position()
ax.set_position([box_0.x0, box_0.y0 + 0.05, box_0.width * 0.75, box_0.height * 0.9])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('jss_figures/Duffing_equation_ht_emd.png')
plt.show()
# compare other packages Duffing - bottom
emd_duffing = AdvEMDpy.EMD(time=t, time_series=x)
emd_duff, emd_ht_duff, emd_if_duff, _, _, _, _ = emd_duffing.empirical_mode_decomposition(verbose=False)
fig, axs = plt.subplots(2, 1)
plt.subplots_adjust(hspace=0.3)
figure_size = plt.gcf().get_size_inches()
factor = 0.8
plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1]))
axs[0].plot(t, emd_duff[1, :], label='AdvEMDpy')
axs[0].plot(t, py_emd[0, :], '--', label='PyEMD 0.2.10')
axs[0].plot(t, emd_sift[:, 0], '--', label='emd 0.3.3')
axs[0].set_title('IMF 1')
axs[0].set_ylim([-2, 2])
axs[0].set_xlim([0, 150])
axs[1].plot(t, emd_duff[2, :], label='AdvEMDpy')
print(f'AdvEMDpy driving function error: {np.round(sum(abs(0.1 * np.cos(0.04 * 2 * np.pi * t) - emd_duff[2, :])), 3)}')
axs[1].plot(t, py_emd[1, :], '--', label='PyEMD 0.2.10')
print(f'PyEMD driving function error: {np.round(sum(abs(0.1 * np.cos(0.04 * 2 * np.pi * t) - py_emd[1, :])), 3)}')
axs[1].plot(t, emd_sift[:, 1], '--', label='emd 0.3.3')
print(f'emd driving function error: {np.round(sum(abs(0.1 * np.cos(0.04 * 2 * np.pi * t) - emd_sift[:, 1])), 3)}')
axs[1].plot(t, 0.1 * np.cos(0.04 * 2 * np.pi * t), '--', label=r'$0.1$cos$(0.08{\pi}t)$')
axs[1].set_title('IMF 2')
axs[1].set_ylim([-0.2, 0.4])
axs[1].set_xlim([0, 150])
axis = 0
for ax in axs.flat:
ax.label_outer()
if axis == 0:
ax.set_ylabel(r'$\gamma_1(t)$')
ax.set_yticks([-2, 0, 2])
if axis == 1:
ax.set_ylabel(r'$\gamma_2(t)$')
ax.set_yticks([-0.2, 0, 0.2])
box_0 = ax.get_position()
ax.set_position([box_0.x0, box_0.y0, box_0.width * 0.85, box_0.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8)
ax.set_xticks(x_points)
ax.set_xticklabels(x_names)
axis += 1
plt.savefig('jss_figures/Duffing_equation_imfs.png')
plt.show()
hs_ouputs = hilbert_spectrum(t, emd_duff, emd_ht_duff, emd_if_duff, max_frequency=1.3, plot=False)
ax = plt.subplot(111)
plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of Duffing Equation using AdvEMDpy', 40))
x, y, z = hs_ouputs
y = y / (2 * np.pi)
z_min, z_max = 0, np.abs(z).max()
figure_size = plt.gcf().get_size_inches()
factor = 1.0
plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1]))
ax.pcolormesh(x, y, np.abs(z), cmap='gist_rainbow', vmin=z_min, vmax=z_max)
plt.plot(t[:-1], 0.124 * np.ones_like(t[:-1]), '--', label=textwrap.fill('Hamiltonian frequency approximation', 15))
plt.plot(t[:-1], 0.04 * np.ones_like(t[:-1]), 'g--', label=textwrap.fill('Driving function frequency', 15))
plt.xticks([0, 50, 100, 150])
plt.yticks([0, 0.1, 0.2])
plt.ylabel('Frequency (Hz)')
plt.xlabel('Time (s)')
box_0 = ax.get_position()
ax.set_position([box_0.x0, box_0.y0 + 0.05, box_0.width * 0.75, box_0.height * 0.9])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('jss_figures/Duffing_equation_ht.png')
plt.show()
# Carbon Dioxide Concentration Example
CO2_data = pd.read_csv('Data/co2_mm_mlo.csv', header=51)
plt.plot(CO2_data['month'], CO2_data['decimal date'])
plt.title(textwrap.fill('Mean Monthly Concentration of Carbon Dioxide in the Atmosphere', 35))
plt.ylabel('Parts per million')
plt.xlabel('Time (years)')
plt.savefig('jss_figures/CO2_concentration.png')
plt.show()
signal = CO2_data['decimal date']
signal = np.asarray(signal)
time = CO2_data['month']
time = np.asarray(time)
# compare other packages Carbon Dioxide - top
pyemd = pyemd0215()
py_emd = pyemd(signal)
IP, IF, IA = emd040.spectra.frequency_transform(py_emd[:2, :].T, 12, 'hilbert')
print(f'PyEMD annual frequency error: {np.round(sum(np.abs(IF[:, 0] - np.ones_like(IF[:, 0]))), 3)}')
freq_edges, freq_bins = emd040.spectra.define_hist_bins(0, 2, 100)
hht = emd040.spectra.hilberthuang(IF, IA, freq_edges)
hht = gaussian_filter(hht, sigma=1)
fig, ax = plt.subplots()
figure_size = plt.gcf().get_size_inches()
factor = 0.8
plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1]))
plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of CO$_{2}$ Concentration using PyEMD 0.2.10', 45))
plt.ylabel('Frequency (year$^{-1}$)')
plt.xlabel('Time (years)')
plt.pcolormesh(time, freq_bins, hht, cmap='gist_rainbow', vmin=0, vmax=np.max(np.max(np.abs(hht))))
plt.plot(time, np.ones_like(time), 'k--', label=textwrap.fill('Annual cycle', 10))
box_0 = ax.get_position()
ax.set_position([box_0.x0 + 0.0125, box_0.y0 + 0.075, box_0.width * 0.8, box_0.height * 0.9])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('jss_figures/CO2_Hilbert_pyemd.png')
plt.show()
emd_sift = emd040.sift.sift(signal)
IP, IF, IA = emd040.spectra.frequency_transform(emd_sift[:, :1], 12, 'hilbert')
print(f'emd annual frequency error: {np.round(sum(np.abs(IF - np.ones_like(IF)))[0], 3)}')
freq_edges, freq_bins = emd040.spectra.define_hist_bins(0, 2, 100)
hht = emd040.spectra.hilberthuang(IF, IA, freq_edges)
hht = gaussian_filter(hht, sigma=1)
fig, ax = plt.subplots()
figure_size = plt.gcf().get_size_inches()
factor = 0.8
plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1]))
plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of CO$_{2}$ Concentration using emd 0.3.3', 45))
plt.ylabel('Frequency (year$^{-1}$)')
plt.xlabel('Time (years)')
plt.pcolormesh(time, freq_bins, hht, cmap='gist_rainbow', vmin=0, vmax=np.max(np.max(np.abs(hht))))
plt.plot(time, np.ones_like(time), 'k--', label=textwrap.fill('Annual cycle', 10))
box_0 = ax.get_position()
ax.set_position([box_0.x0 + 0.0125, box_0.y0 + 0.075, box_0.width * 0.8, box_0.height * 0.9])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('jss_figures/CO2_Hilbert_emd.png')
plt.show()
# compare other packages Carbon Dioxide - bottom
knots = np.linspace(time[0], time[-1], 200)
emd_example = AdvEMDpy.EMD(time=time, time_series=signal)
imfs, hts, ifs, _, _, _, _ = \
emd_example.empirical_mode_decomposition(knots=knots, knot_time=time, verbose=False)
print(f'AdvEMDpy annual frequency error: {np.round(sum(np.abs(ifs[1, :] / (2 * np.pi) - np.ones_like(ifs[1, :]))), 3)}')
fig, axs = plt.subplots(2, 2)
plt.subplots_adjust(hspace=0.5)
axs[0, 0].plot(time, signal)
axs[0, 1].plot(time, signal)
axs[0, 1].plot(time, imfs[0, :], label='Smoothed')
axs[0, 1].legend(loc='lower right')
axs[1, 0].plot(time, imfs[1, :])
axs[1, 1].plot(time, imfs[2, :])
axis = 0
for ax in axs.flat:
if axis == 0:
ax.set(ylabel=R'C0$_2$ concentration')
if axis == 1:
pass
if axis == 2:
ax.set(ylabel=R'C0$_2$ concentration')
ax.set(xlabel='Time (years)')
if axis == 3:
ax.set(xlabel='Time (years)')
axis += 1
plt.gcf().subplots_adjust(bottom=0.15)
axs[0, 0].set_title(r'Original CO$_2$ Concentration')
axs[0, 1].set_title('Smoothed CO$_2$ Concentration')
axs[1, 0].set_title('IMF 1')
axs[1, 1].set_title('Residual')
plt.gcf().subplots_adjust(bottom=0.15)
plt.savefig('jss_figures/CO2_EMD.png')
plt.show()
hs_ouputs = hilbert_spectrum(time, imfs, hts, ifs, max_frequency=10, which_imfs=[1], plot=False)
x_hs, y, z = hs_ouputs
y = y / (2 * np.pi)
z_min, z_max = 0, np.abs(z).max()
fig, ax = plt.subplots()
figure_size = plt.gcf().get_size_inches()
factor = 0.7
plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1]))
ax.pcolormesh(x_hs, y, np.abs(z), cmap='gist_rainbow', vmin=z_min, vmax=z_max)
ax.set_title(textwrap.fill(r'Gaussian Filtered Hilbert Spectrum of CO$_{2}$ Concentration using AdvEMDpy', 40))
plt.ylabel('Frequency (year$^{-1}$)')
plt.xlabel('Time (years)')
plt.plot(x_hs[0, :], np.ones_like(x_hs[0, :]), 'k--', label=textwrap.fill('Annual cycle', 10))
ax.axis([x_hs.min(), x_hs.max(), y.min(), y.max()])
box_0 = ax.get_position()
ax.set_position([box_0.x0 + 0.0125, box_0.y0 + 0.075, box_0.width * 0.8, box_0.height * 0.9])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('jss_figures/CO2_Hilbert.png')
plt.show()
|
[
"pandas.read_csv",
"numpy.hstack",
"emd_utils.Utility",
"matplotlib.pyplot.ylabel",
"numpy.array",
"textwrap.fill",
"scipy.ndimage.gaussian_filter",
"numpy.sin",
"numpy.arange",
"numpy.mean",
"seaborn.set",
"emd_hilbert.hilbert_spectrum",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.asarray",
"numpy.zeros_like",
"numpy.linspace",
"emd.spectra.hilberthuang",
"matplotlib.pyplot.yticks",
"numpy.random.seed",
"matplotlib.pyplot.scatter",
"numpy.matmul",
"cvxpy.norm",
"emd.sift.sift",
"numpy.random.normal",
"numpy.abs",
"emd_preprocess.Preprocess",
"cvxpy.Problem",
"matplotlib.pyplot.savefig",
"numpy.ones",
"matplotlib.pyplot.xticks",
"scipy.integrate.odeint",
"matplotlib.pyplot.gcf",
"emd.spectra.define_hist_bins",
"emd_mean.Fluctuation",
"numpy.cos",
"emd.spectra.frequency_transform",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.text",
"numpy.ones_like",
"emd_utils.time_extension",
"random.seed",
"AdvEMDpy.EMD",
"matplotlib.pyplot.figure",
"emd_basis.Basis",
"PyEMD.EMD",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.subplots",
"numpy.var"
] |
[((670, 695), 'seaborn.set', 'sns.set', ([], {'style': '"""darkgrid"""'}), "(style='darkgrid')\n", (677, 695), True, 'import seaborn as sns\n'), ((715, 746), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(1001)'], {}), '(0, 2 * np.pi, 1001)\n', (726, 746), True, 'import numpy as np\n'), ((841, 906), 'emd_utils.Utility', 'Utility', ([], {'time': 'pseudo_alg_time', 'time_series': 'pseudo_alg_time_series'}), '(time=pseudo_alg_time, time_series=pseudo_alg_time_series)\n', (848, 906), False, 'from emd_utils import time_extension, Utility\n'), ((934, 960), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 4)'}), '(figsize=(9, 4))\n', (944, 960), True, 'import matplotlib.pyplot as plt\n'), ((966, 982), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (977, 982), True, 'import matplotlib.pyplot as plt\n'), ((1022, 1071), 'matplotlib.pyplot.title', 'plt.title', (['"""First Iteration of Sifting Algorithm"""'], {}), "('First Iteration of Sifting Algorithm')\n", (1031, 1071), True, 'import matplotlib.pyplot as plt\n'), ((1072, 1159), 'matplotlib.pyplot.plot', 'plt.plot', (['pseudo_alg_time', 'pseudo_alg_time_series'], {'label': '"""$h_{(1,0)}(t)$"""', 'zorder': '(1)'}), "(pseudo_alg_time, pseudo_alg_time_series, label='$h_{(1,0)}(t)$',\n zorder=1)\n", (1080, 1159), True, 'import matplotlib.pyplot as plt\n'), ((1899, 1934), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'ticks': '[-2, -1, 0, 1, 2]'}), '(ticks=[-2, -1, 0, 1, 2])\n', (1909, 1934), True, 'import matplotlib.pyplot as plt\n'), ((1935, 2009), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'ticks': '[0, np.pi, 2 * np.pi]', 'labels': "['0', '$\\\\pi$', '$2\\\\pi$']"}), "(ticks=[0, np.pi, 2 * np.pi], labels=['0', '$\\\\pi$', '$2\\\\pi$'])\n", (1945, 2009), True, 'import matplotlib.pyplot as plt\n'), ((2181, 2228), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""jss_figures/pseudo_algorithm.png"""'], {}), "('jss_figures/pseudo_algorithm.png')\n", (2192, 2228), True, 'import matplotlib.pyplot as plt\n'), ((2229, 2239), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2237, 2239), True, 'import matplotlib.pyplot as plt\n'), ((2249, 2262), 'numpy.arange', 'np.arange', (['(12)'], {}), '(12)\n', (2258, 2262), True, 'import numpy as np\n'), ((2270, 2294), 'numpy.linspace', 'np.linspace', (['(0)', '(11)', '(1101)'], {}), '(0, 11, 1101)\n', (2281, 2294), True, 'import numpy as np\n'), ((2304, 2348), 'emd_basis.Basis', 'emd_basis.Basis', ([], {'time': 'time', 'time_series': 'time'}), '(time=time, time_series=time)\n', (2319, 2348), False, 'import emd_basis\n'), ((2441, 2498), 'matplotlib.pyplot.title', 'plt.title', (['"""Non-Natural Cubic B-Spline Bases at Boundary"""'], {}), "('Non-Natural Cubic B-Spline Bases at Boundary')\n", (2450, 2498), True, 'import matplotlib.pyplot as plt\n'), ((2499, 2577), 'matplotlib.pyplot.plot', 'plt.plot', (['time[500:]', 'b_spline_basis[2, 500:].T', '"""--"""'], {'label': '"""$ B_{-3,4}(t) $"""'}), "(time[500:], b_spline_basis[2, 500:].T, '--', label='$ B_{-3,4}(t) $')\n", (2507, 2577), True, 'import matplotlib.pyplot as plt\n'), ((2579, 2657), 'matplotlib.pyplot.plot', 'plt.plot', (['time[500:]', 'b_spline_basis[3, 500:].T', '"""--"""'], {'label': '"""$ B_{-2,4}(t) $"""'}), "(time[500:], b_spline_basis[3, 500:].T, '--', label='$ B_{-2,4}(t) $')\n", (2587, 2657), True, 'import matplotlib.pyplot as plt\n'), ((2659, 2737), 'matplotlib.pyplot.plot', 'plt.plot', (['time[500:]', 'b_spline_basis[4, 500:].T', '"""--"""'], {'label': '"""$ B_{-1,4}(t) $"""'}), "(time[500:], b_spline_basis[4, 500:].T, '--', label='$ B_{-1,4}(t) $')\n", (2667, 2737), True, 'import matplotlib.pyplot as plt\n'), ((2739, 2816), 'matplotlib.pyplot.plot', 'plt.plot', (['time[500:]', 'b_spline_basis[5, 500:].T', '"""--"""'], {'label': '"""$ B_{0,4}(t) $"""'}), "(time[500:], b_spline_basis[5, 500:].T, '--', label='$ B_{0,4}(t) $')\n", (2747, 2816), True, 'import matplotlib.pyplot as plt\n'), ((2818, 2895), 'matplotlib.pyplot.plot', 'plt.plot', (['time[500:]', 'b_spline_basis[6, 500:].T', '"""--"""'], {'label': '"""$ B_{1,4}(t) $"""'}), "(time[500:], b_spline_basis[6, 500:].T, '--', label='$ B_{1,4}(t) $')\n", (2826, 2895), True, 'import matplotlib.pyplot as plt\n'), ((2897, 2947), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[5, 6]', "['$ \\\\tau_0 $', '$ \\\\tau_1 $']"], {}), "([5, 6], ['$ \\\\tau_0 $', '$ \\\\tau_1 $'])\n", (2907, 2947), True, 'import matplotlib.pyplot as plt\n'), ((2948, 2966), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(4.4)', '(6.6)'], {}), '(4.4, 6.6)\n', (2956, 2966), True, 'import matplotlib.pyplot as plt\n'), ((3091, 3119), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (3101, 3119), True, 'import matplotlib.pyplot as plt\n'), ((3120, 3165), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""jss_figures/boundary_bases.png"""'], {}), "('jss_figures/boundary_bases.png')\n", (3131, 3165), True, 'import matplotlib.pyplot as plt\n'), ((3166, 3176), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3174, 3176), True, 'import matplotlib.pyplot as plt\n'), ((3223, 3254), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(1001)'], {}), '(0, 2 * np.pi, 1001)\n', (3234, 3254), True, 'import numpy as np\n'), ((3368, 3397), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(51)'], {}), '(0, 2 * np.pi, 51)\n', (3379, 3397), True, 'import numpy as np\n'), ((3404, 3477), 'AdvEMDpy.EMD', 'EMD', ([], {'time': 'knot_demonstrate_time', 'time_series': 'knot_demonstrate_time_series'}), '(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series)\n', (3407, 3477), False, 'from AdvEMDpy import EMD\n'), ((3599, 3617), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {}), '(3, 1)\n', (3611, 3617), True, 'import matplotlib.pyplot as plt\n'), ((4975, 5018), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""jss_figures/knot_uniform.png"""'], {}), "('jss_figures/knot_uniform.png')\n", (4986, 5018), True, 'import matplotlib.pyplot as plt\n'), ((5019, 5029), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5027, 5029), True, 'import matplotlib.pyplot as plt\n'), ((5076, 5107), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(1001)'], {}), '(0, 2 * np.pi, 1001)\n', (5087, 5107), True, 'import numpy as np\n'), ((5211, 5284), 'AdvEMDpy.EMD', 'EMD', ([], {'time': 'knot_demonstrate_time', 'time_series': 'knot_demonstrate_time_series'}), '(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series)\n', (5214, 5284), False, 'from AdvEMDpy import EMD\n'), ((5484, 5502), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {}), '(3, 1)\n', (5496, 5502), True, 'import matplotlib.pyplot as plt\n'), ((6859, 6896), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""jss_figures/knot_1.png"""'], {}), "('jss_figures/knot_1.png')\n", (6870, 6896), True, 'import matplotlib.pyplot as plt\n'), ((6897, 6907), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6905, 6907), True, 'import matplotlib.pyplot as plt\n'), ((6954, 6985), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(1001)'], {}), '(0, 2 * np.pi, 1001)\n', (6965, 6985), True, 'import numpy as np\n'), ((7089, 7162), 'AdvEMDpy.EMD', 'EMD', ([], {'time': 'knot_demonstrate_time', 'time_series': 'knot_demonstrate_time_series'}), '(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series)\n', (7092, 7162), False, 'from AdvEMDpy import EMD\n'), ((7362, 7380), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {}), '(3, 1)\n', (7374, 7380), True, 'import matplotlib.pyplot as plt\n'), ((8735, 8772), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""jss_figures/knot_2.png"""'], {}), "('jss_figures/knot_2.png')\n", (8746, 8772), True, 'import matplotlib.pyplot as plt\n'), ((8773, 8783), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8781, 8783), True, 'import matplotlib.pyplot as plt\n'), ((8829, 8847), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {}), '(2, 1)\n', (8841, 8847), True, 'import matplotlib.pyplot as plt\n'), ((9173, 9190), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (9187, 9190), True, 'import numpy as np\n'), ((9191, 9205), 'random.seed', 'random.seed', (['(1)'], {}), '(1)\n', (9202, 9205), False, 'import random\n'), ((9414, 9482), 'emd_preprocess.Preprocess', 'Preprocess', ([], {'time': 'preprocess_time', 'time_series': 'preprocess_time_series'}), '(time=preprocess_time, time_series=preprocess_time_series)\n', (9424, 9482), False, 'from emd_preprocess import Preprocess\n'), ((12457, 12505), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""jss_figures/preprocess_filter.png"""'], {}), "('jss_figures/preprocess_filter.png')\n", (12468, 12505), True, 'import matplotlib.pyplot as plt\n'), ((12506, 12516), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12514, 12516), True, 'import matplotlib.pyplot as plt\n'), ((12550, 12568), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {}), '(2, 1)\n', (12562, 12568), True, 'import matplotlib.pyplot as plt\n'), ((15319, 15367), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""jss_figures/preprocess_smooth.png"""'], {}), "('jss_figures/preprocess_smooth.png')\n", (15330, 15367), True, 'import matplotlib.pyplot as plt\n'), ((15368, 15378), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15376, 15378), True, 'import matplotlib.pyplot as plt\n'), ((15400, 15431), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'sharey': '(True)'}), '(1, 2, sharey=True)\n', (15412, 15431), True, 'import matplotlib.pyplot as plt\n'), ((16474, 16520), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""jss_figures/comparing_bases.png"""'], {}), "('jss_figures/comparing_bases.png')\n", (16485, 16520), True, 'import matplotlib.pyplot as plt\n'), ((16521, 16531), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16529, 16531), True, 'import matplotlib.pyplot as plt\n'), ((16570, 16607), 'numpy.linspace', 'np.linspace', (['(0)', '((5 - a) * np.pi)', '(1001)'], {}), '(0, (5 - a) * np.pi, 1001)\n', (16581, 16607), True, 'import numpy as np\n'), ((16662, 16715), 'emd_utils.Utility', 'emd_utils.Utility', ([], {'time': 'time', 'time_series': 'time_series'}), '(time=time, time_series=time_series)\n', (16679, 16715), False, 'import emd_utils\n'), ((16942, 17002), 'numpy.linspace', 'np.linspace', (['(maxima_x[-1] - width)', '(maxima_x[-1] + width)', '(101)'], {}), '(maxima_x[-1] - width, maxima_x[-1] + width, 101)\n', (16953, 17002), True, 'import numpy as np\n'), ((17073, 17133), 'numpy.linspace', 'np.linspace', (['(minima_x[-1] - width)', '(minima_x[-1] + width)', '(101)'], {}), '(minima_x[-1] - width, minima_x[-1] + width, 101)\n', (17084, 17133), True, 'import numpy as np\n'), ((17202, 17246), 'numpy.linspace', 'np.linspace', (['maxima_x[-1]', 'minima_x[-1]', '(101)'], {}), '(maxima_x[-1], minima_x[-1], 101)\n', (17213, 17246), True, 'import numpy as np\n'), ((17256, 17300), 'numpy.linspace', 'np.linspace', (['maxima_y[-1]', 'minima_y[-1]', '(101)'], {}), '(maxima_y[-1], minima_y[-1], 101)\n', (17267, 17300), True, 'import numpy as np\n'), ((17415, 17483), 'numpy.linspace', 'np.linspace', (['(max_discard_time - width)', '(max_discard_time + width)', '(101)'], {}), '(max_discard_time - width, max_discard_time + width, 101)\n', (17426, 17483), True, 'import numpy as np\n'), ((17567, 17615), 'numpy.linspace', 'np.linspace', (['minima_x[-1]', 'max_discard_time', '(101)'], {}), '(minima_x[-1], max_discard_time, 101)\n', (17578, 17615), True, 'import numpy as np\n'), ((17625, 17668), 'numpy.linspace', 'np.linspace', (['minima_y[-1]', 'max_discard', '(101)'], {}), '(minima_y[-1], max_discard, 101)\n', (17636, 17668), True, 'import numpy as np\n'), ((17740, 17790), 'numpy.linspace', 'np.linspace', (['((5 - a) * np.pi)', '((5 + a) * np.pi)', '(101)'], {}), '((5 - a) * np.pi, (5 + a) * np.pi, 101)\n', (17751, 17790), True, 'import numpy as np\n'), ((18184, 18250), 'emd_utils.Utility', 'emd_utils.Utility', ([], {'time': 'time', 'time_series': 'time_series_anti_reflect'}), '(time=time, time_series=time_series_anti_reflect)\n', (18201, 18250), False, 'import emd_utils\n'), ((18418, 18479), 'emd_utils.Utility', 'emd_utils.Utility', ([], {'time': 'time', 'time_series': 'time_series_reflect'}), '(time=time, time_series=time_series_reflect)\n', (18435, 18479), False, 'import emd_utils\n'), ((18655, 18699), 'numpy.linspace', 'np.linspace', (['maxima_y[-1]', 'minima_y[-1]', '(101)'], {}), '(maxima_y[-1], minima_y[-1], 101)\n', (18666, 18699), True, 'import numpy as np\n'), ((18785, 18851), 'numpy.linspace', 'np.linspace', (['(point_1 * np.pi - width)', '(point_1 * np.pi + width)', '(101)'], {}), '(point_1 * np.pi - width, point_1 * np.pi + width, 101)\n', (18796, 18851), True, 'import numpy as np\n'), ((18998, 19045), 'numpy.linspace', 'np.linspace', (['time_series[-1]', 'minima_y[-1]', '(101)'], {}), '(time_series[-1], minima_y[-1], 101)\n', (19009, 19045), True, 'import numpy as np\n'), ((19137, 19203), 'numpy.linspace', 'np.linspace', (['(point_2 * np.pi - width)', '(point_2 * np.pi + width)', '(101)'], {}), '(point_2 * np.pi - width, point_2 * np.pi + width, 101)\n', (19148, 19203), True, 'import numpy as np\n'), ((19441, 19464), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', '(101)'], {}), '(-2, 2, 101)\n', (19452, 19464), True, 'import numpy as np\n'), ((19477, 19529), 'numpy.linspace', 'np.linspace', (['(time[-1] - width)', '(time[-1] + width)', '(101)'], {}), '(time[-1] - width, time[-1] + width, 101)\n', (19488, 19529), True, 'import numpy as np\n'), ((19607, 19655), 'numpy.linspace', 'np.linspace', (['(time[-1] - 0.5)', '(time[-1] + 0.5)', '(101)'], {}), '(time[-1] - 0.5, time[-1] + 0.5, 101)\n', (19618, 19655), True, 'import numpy as np\n'), ((19738, 19754), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (19749, 19754), True, 'import matplotlib.pyplot as plt\n'), ((19794, 19850), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'time_series'], {'LineWidth': '(2)', 'label': '"""Signal"""'}), "(time, time_series, LineWidth=2, label='Signal')\n", (19802, 19850), True, 'import matplotlib.pyplot as plt\n'), ((19851, 19893), 'matplotlib.pyplot.title', 'plt.title', (['"""Symmetry Edge Effects Example"""'], {}), "('Symmetry Edge Effects Example')\n", (19860, 19893), True, 'import matplotlib.pyplot as plt\n'), ((20152, 20191), 'matplotlib.pyplot.plot', 'plt.plot', (['max_dash_time', 'max_dash', '"""k-"""'], {}), "(max_dash_time, max_dash, 'k-')\n", (20160, 20191), True, 'import matplotlib.pyplot as plt\n'), ((20192, 20231), 'matplotlib.pyplot.plot', 'plt.plot', (['min_dash_time', 'min_dash', '"""k-"""'], {}), "(min_dash_time, min_dash, 'k-')\n", (20200, 20231), True, 'import matplotlib.pyplot as plt\n'), ((20232, 20268), 'matplotlib.pyplot.plot', 'plt.plot', (['dash_1_time', 'dash_1', '"""k--"""'], {}), "(dash_1_time, dash_1, 'k--')\n", (20240, 20268), True, 'import matplotlib.pyplot as plt\n'), ((20269, 20305), 'matplotlib.pyplot.plot', 'plt.plot', (['dash_2_time', 'dash_2', '"""k--"""'], {}), "(dash_2_time, dash_2, 'k--')\n", (20277, 20305), True, 'import matplotlib.pyplot as plt\n'), ((20306, 20360), 'matplotlib.pyplot.plot', 'plt.plot', (['length_distance_time', 'length_distance', '"""k--"""'], {}), "(length_distance_time, length_distance, 'k--')\n", (20314, 20360), True, 'import matplotlib.pyplot as plt\n'), ((20361, 20419), 'matplotlib.pyplot.plot', 'plt.plot', (['length_distance_time_2', 'length_distance_2', '"""k--"""'], {}), "(length_distance_time_2, length_distance_2, 'k--')\n", (20369, 20419), True, 'import matplotlib.pyplot as plt\n'), ((20420, 20459), 'matplotlib.pyplot.plot', 'plt.plot', (['length_time', 'length_top', '"""k-"""'], {}), "(length_time, length_top, 'k-')\n", (20428, 20459), True, 'import matplotlib.pyplot as plt\n'), ((20460, 20502), 'matplotlib.pyplot.plot', 'plt.plot', (['length_time', 'length_bottom', '"""k-"""'], {}), "(length_time, length_bottom, 'k-')\n", (20468, 20502), True, 'import matplotlib.pyplot as plt\n'), ((20503, 20546), 'matplotlib.pyplot.plot', 'plt.plot', (['length_time_2', 'length_top_2', '"""k-"""'], {}), "(length_time_2, length_top_2, 'k-')\n", (20511, 20546), True, 'import matplotlib.pyplot as plt\n'), ((20547, 20593), 'matplotlib.pyplot.plot', 'plt.plot', (['length_time_2', 'length_bottom_2', '"""k-"""'], {}), "(length_time_2, length_bottom_2, 'k-')\n", (20555, 20593), True, 'import matplotlib.pyplot as plt\n'), ((20594, 20630), 'matplotlib.pyplot.plot', 'plt.plot', (['end_time', 'end_signal', '"""k-"""'], {}), "(end_time, end_signal, 'k-')\n", (20602, 20630), True, 'import matplotlib.pyplot as plt\n'), ((20631, 20693), 'matplotlib.pyplot.plot', 'plt.plot', (['symmetry_axis_1_time', 'symmetry_axis', '"""r--"""'], {'zorder': '(1)'}), "(symmetry_axis_1_time, symmetry_axis, 'r--', zorder=1)\n", (20639, 20693), True, 'import matplotlib.pyplot as plt\n'), ((20694, 20763), 'matplotlib.pyplot.plot', 'plt.plot', (['anti_symmetric_time', 'anti_symmetric_signal', '"""r--"""'], {'zorder': '(1)'}), "(anti_symmetric_time, anti_symmetric_signal, 'r--', zorder=1)\n", (20702, 20763), True, 'import matplotlib.pyplot as plt\n'), ((20872, 20912), 'matplotlib.pyplot.text', 'plt.text', (['(5.1 * np.pi)', '(-0.7)', '"""$\\\\beta$L"""'], {}), "(5.1 * np.pi, -0.7, '$\\\\beta$L')\n", (20880, 20912), True, 'import matplotlib.pyplot as plt\n'), ((20913, 20947), 'matplotlib.pyplot.text', 'plt.text', (['(5.34 * np.pi)', '(-0.05)', '"""L"""'], {}), "(5.34 * np.pi, -0.05, 'L')\n", (20921, 20947), True, 'import matplotlib.pyplot as plt\n'), ((20948, 21012), 'matplotlib.pyplot.scatter', 'plt.scatter', (['maxima_x', 'maxima_y'], {'c': '"""r"""', 'zorder': '(4)', 'label': '"""Maxima"""'}), "(maxima_x, maxima_y, c='r', zorder=4, label='Maxima')\n", (20959, 21012), True, 'import matplotlib.pyplot as plt\n'), ((21013, 21077), 'matplotlib.pyplot.scatter', 'plt.scatter', (['minima_x', 'minima_y'], {'c': '"""b"""', 'zorder': '(4)', 'label': '"""Minima"""'}), "(minima_x, minima_y, c='b', zorder=4, label='Minima')\n", (21024, 21077), True, 'import matplotlib.pyplot as plt\n'), ((21541, 21575), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(3.9 * np.pi)', '(5.5 * np.pi)'], {}), '(3.9 * np.pi, 5.5 * np.pi)\n', (21549, 21575), True, 'import matplotlib.pyplot as plt\n'), ((21576, 21634), 'matplotlib.pyplot.xticks', 'plt.xticks', (['(4 * np.pi, 5 * np.pi)', "('4$\\\\pi$', '5$\\\\pi$')"], {}), "((4 * np.pi, 5 * np.pi), ('4$\\\\pi$', '5$\\\\pi$'))\n", (21586, 21634), True, 'import matplotlib.pyplot as plt\n'), ((21635, 21693), 'matplotlib.pyplot.yticks', 'plt.yticks', (['(-2, -1, 0, 1, 2)', "('-2', '-1', '0', '1', '2')"], {}), "((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2'))\n", (21645, 21693), True, 'import matplotlib.pyplot as plt\n'), ((21853, 21910), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""jss_figures/edge_effects_symmetry_anti.png"""'], {}), "('jss_figures/edge_effects_symmetry_anti.png')\n", (21864, 21910), True, 'import matplotlib.pyplot as plt\n'), ((21911, 21921), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (21919, 21921), True, 'import matplotlib.pyplot as plt\n'), ((21960, 21997), 'numpy.linspace', 'np.linspace', (['(0)', '((5 - a) * np.pi)', '(1001)'], {}), '(0, (5 - a) * np.pi, 1001)\n', (21971, 21997), True, 'import numpy as np\n'), ((22052, 22105), 'emd_utils.Utility', 'emd_utils.Utility', ([], {'time': 'time', 'time_series': 'time_series'}), '(time=time, time_series=time_series)\n', (22069, 22105), False, 'import emd_utils\n'), ((22329, 22389), 'numpy.linspace', 'np.linspace', (['(maxima_y[-1] - width)', '(maxima_y[-1] + width)', '(101)'], {}), '(maxima_y[-1] - width, maxima_y[-1] + width, 101)\n', (22340, 22389), True, 'import numpy as np\n'), ((22403, 22463), 'numpy.linspace', 'np.linspace', (['(maxima_y[-2] - width)', '(maxima_y[-2] + width)', '(101)'], {}), '(maxima_y[-2] - width, maxima_y[-2] + width, 101)\n', (22414, 22463), True, 'import numpy as np\n'), ((22593, 22653), 'numpy.linspace', 'np.linspace', (['(minima_y[-1] - width)', '(minima_y[-1] + width)', '(101)'], {}), '(minima_y[-1] - width, minima_y[-1] + width, 101)\n', (22604, 22653), True, 'import numpy as np\n'), ((22667, 22727), 'numpy.linspace', 'np.linspace', (['(minima_y[-2] - width)', '(minima_y[-2] + width)', '(101)'], {}), '(minima_y[-2] - width, minima_y[-2] + width, 101)\n', (22678, 22727), True, 'import numpy as np\n'), ((22859, 22903), 'numpy.linspace', 'np.linspace', (['maxima_x[-1]', 'minima_x[-1]', '(101)'], {}), '(maxima_x[-1], minima_x[-1], 101)\n', (22870, 22903), True, 'import numpy as np\n'), ((22913, 22957), 'numpy.linspace', 'np.linspace', (['maxima_y[-1]', 'minima_y[-1]', '(101)'], {}), '(maxima_y[-1], minima_y[-1], 101)\n', (22924, 22957), True, 'import numpy as np\n'), ((22972, 23016), 'numpy.linspace', 'np.linspace', (['maxima_x[-1]', 'minima_x[-2]', '(101)'], {}), '(maxima_x[-1], minima_x[-2], 101)\n', (22983, 23016), True, 'import numpy as np\n'), ((23026, 23070), 'numpy.linspace', 'np.linspace', (['maxima_y[-1]', 'minima_y[-2]', '(101)'], {}), '(maxima_y[-1], minima_y[-2], 101)\n', (23037, 23070), True, 'import numpy as np\n'), ((23380, 23454), 'numpy.linspace', 'np.linspace', (['(slope_based_maximum - width)', '(slope_based_maximum + width)', '(101)'], {}), '(slope_based_maximum - width, slope_based_maximum + width, 101)\n', (23391, 23454), True, 'import numpy as np\n'), ((23470, 23526), 'numpy.linspace', 'np.linspace', (['minima_x[-1]', 'slope_based_maximum_time', '(101)'], {}), '(minima_x[-1], slope_based_maximum_time, 101)\n', (23481, 23526), True, 'import numpy as np\n'), ((23536, 23587), 'numpy.linspace', 'np.linspace', (['minima_y[-1]', 'slope_based_maximum', '(101)'], {}), '(minima_y[-1], slope_based_maximum, 101)\n', (23547, 23587), True, 'import numpy as np\n'), ((23916, 23990), 'numpy.linspace', 'np.linspace', (['(slope_based_minimum - width)', '(slope_based_minimum + width)', '(101)'], {}), '(slope_based_minimum - width, slope_based_minimum + width, 101)\n', (23927, 23990), True, 'import numpy as np\n'), ((24006, 24069), 'numpy.linspace', 'np.linspace', (['slope_based_maximum_time', 'slope_based_minimum_time'], {}), '(slope_based_maximum_time, slope_based_minimum_time)\n', (24017, 24069), True, 'import numpy as np\n'), ((24079, 24132), 'numpy.linspace', 'np.linspace', (['slope_based_maximum', 'slope_based_minimum'], {}), '(slope_based_maximum, slope_based_minimum)\n', (24090, 24132), True, 'import numpy as np\n'), ((24148, 24190), 'numpy.linspace', 'np.linspace', (['(2.5 - width)', '(2.5 + width)', '(101)'], {}), '(2.5 - width, 2.5 + width, 101)\n', (24159, 24190), True, 'import numpy as np\n'), ((24413, 24469), 'numpy.linspace', 'np.linspace', (['maxima_x[-2]', 'slope_based_maximum_time', '(101)'], {}), '(maxima_x[-2], slope_based_maximum_time, 101)\n', (24424, 24469), True, 'import numpy as np\n'), ((24546, 24590), 'numpy.linspace', 'np.linspace', (['(-3.4 - width)', '(-3.4 + width)', '(101)'], {}), '(-3.4 - width, -3.4 + width, 101)\n', (24557, 24590), True, 'import numpy as np\n'), ((24813, 24869), 'numpy.linspace', 'np.linspace', (['minima_x[-2]', 'slope_based_minimum_time', '(101)'], {}), '(minima_x[-2], slope_based_minimum_time, 101)\n', (24824, 24869), True, 'import numpy as np\n'), ((25610, 25707), 'numpy.linspace', 'np.linspace', (['(improved_slope_based_minimum - width)', '(improved_slope_based_minimum + width)', '(101)'], {}), '(improved_slope_based_minimum - width, \n improved_slope_based_minimum + width, 101)\n', (25621, 25707), True, 'import numpy as np\n'), ((25801, 25891), 'numpy.linspace', 'np.linspace', (['improved_slope_based_maximum_time', 'improved_slope_based_minimum_time', '(101)'], {}), '(improved_slope_based_maximum_time,\n improved_slope_based_minimum_time, 101)\n', (25812, 25891), True, 'import numpy as np\n'), ((25901, 25977), 'numpy.linspace', 'np.linspace', (['improved_slope_based_maximum', 'improved_slope_based_minimum', '(101)'], {}), '(improved_slope_based_maximum, improved_slope_based_minimum, 101)\n', (25912, 25977), True, 'import numpy as np\n'), ((25984, 26000), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (25995, 26000), True, 'import matplotlib.pyplot as plt\n'), ((26164, 26220), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'time_series'], {'LineWidth': '(2)', 'label': '"""Signal"""'}), "(time, time_series, LineWidth=2, label='Signal')\n", (26172, 26220), True, 'import matplotlib.pyplot as plt\n'), ((26221, 26266), 'matplotlib.pyplot.title', 'plt.title', (['"""Slope-Based Edge Effects Example"""'], {}), "('Slope-Based Edge Effects Example')\n", (26230, 26266), True, 'import matplotlib.pyplot as plt\n'), ((26267, 26310), 'matplotlib.pyplot.plot', 'plt.plot', (['max_dash_time_1', 'max_dash_1', '"""k-"""'], {}), "(max_dash_time_1, max_dash_1, 'k-')\n", (26275, 26310), True, 'import matplotlib.pyplot as plt\n'), ((26311, 26354), 'matplotlib.pyplot.plot', 'plt.plot', (['max_dash_time_2', 'max_dash_2', '"""k-"""'], {}), "(max_dash_time_2, max_dash_2, 'k-')\n", (26319, 26354), True, 'import matplotlib.pyplot as plt\n'), ((26355, 26398), 'matplotlib.pyplot.plot', 'plt.plot', (['max_dash_time_3', 'max_dash_3', '"""k-"""'], {}), "(max_dash_time_3, max_dash_3, 'k-')\n", (26363, 26398), True, 'import matplotlib.pyplot as plt\n'), ((26399, 26442), 'matplotlib.pyplot.plot', 'plt.plot', (['min_dash_time_1', 'min_dash_1', '"""k-"""'], {}), "(min_dash_time_1, min_dash_1, 'k-')\n", (26407, 26442), True, 'import matplotlib.pyplot as plt\n'), ((26443, 26486), 'matplotlib.pyplot.plot', 'plt.plot', (['min_dash_time_2', 'min_dash_2', '"""k-"""'], {}), "(min_dash_time_2, min_dash_2, 'k-')\n", (26451, 26486), True, 'import matplotlib.pyplot as plt\n'), ((26487, 26530), 'matplotlib.pyplot.plot', 'plt.plot', (['min_dash_time_3', 'min_dash_3', '"""k-"""'], {}), "(min_dash_time_3, min_dash_3, 'k-')\n", (26495, 26530), True, 'import matplotlib.pyplot as plt\n'), ((26531, 26574), 'matplotlib.pyplot.plot', 'plt.plot', (['min_dash_time_4', 'min_dash_4', '"""k-"""'], {}), "(min_dash_time_4, min_dash_4, 'k-')\n", (26539, 26574), True, 'import matplotlib.pyplot as plt\n'), ((26575, 26622), 'matplotlib.pyplot.plot', 'plt.plot', (['maxima_dash_time_1', 'maxima_dash', '"""k-"""'], {}), "(maxima_dash_time_1, maxima_dash, 'k-')\n", (26583, 26622), True, 'import matplotlib.pyplot as plt\n'), ((26623, 26670), 'matplotlib.pyplot.plot', 'plt.plot', (['maxima_dash_time_2', 'maxima_dash', '"""k-"""'], {}), "(maxima_dash_time_2, maxima_dash, 'k-')\n", (26631, 26670), True, 'import matplotlib.pyplot as plt\n'), ((26671, 26718), 'matplotlib.pyplot.plot', 'plt.plot', (['maxima_dash_time_3', 'maxima_dash', '"""k-"""'], {}), "(maxima_dash_time_3, maxima_dash, 'k-')\n", (26679, 26718), True, 'import matplotlib.pyplot as plt\n'), ((26719, 26766), 'matplotlib.pyplot.plot', 'plt.plot', (['minima_dash_time_1', 'minima_dash', '"""k-"""'], {}), "(minima_dash_time_1, minima_dash, 'k-')\n", (26727, 26766), True, 'import matplotlib.pyplot as plt\n'), ((26767, 26814), 'matplotlib.pyplot.plot', 'plt.plot', (['minima_dash_time_2', 'minima_dash', '"""k-"""'], {}), "(minima_dash_time_2, minima_dash, 'k-')\n", (26775, 26814), True, 'import matplotlib.pyplot as plt\n'), ((26815, 26862), 'matplotlib.pyplot.plot', 'plt.plot', (['minima_dash_time_3', 'minima_dash', '"""k-"""'], {}), "(minima_dash_time_3, minima_dash, 'k-')\n", (26823, 26862), True, 'import matplotlib.pyplot as plt\n'), ((26863, 26917), 'matplotlib.pyplot.text', 'plt.text', (['(4.34 * np.pi)', '(-3.2)', '"""$\\\\Delta{t^{min}_{m}}$"""'], {}), "(4.34 * np.pi, -3.2, '$\\\\Delta{t^{min}_{m}}$')\n", (26871, 26917), True, 'import matplotlib.pyplot as plt\n'), ((26918, 26972), 'matplotlib.pyplot.text', 'plt.text', (['(4.74 * np.pi)', '(-3.2)', '"""$\\\\Delta{t^{min}_{m}}$"""'], {}), "(4.74 * np.pi, -3.2, '$\\\\Delta{t^{min}_{m}}$')\n", (26926, 26972), True, 'import matplotlib.pyplot as plt\n'), ((26973, 27024), 'matplotlib.pyplot.text', 'plt.text', (['(4.12 * np.pi)', '(2)', '"""$\\\\Delta{t^{max}_{M}}$"""'], {}), "(4.12 * np.pi, 2, '$\\\\Delta{t^{max}_{M}}$')\n", (26981, 27024), True, 'import matplotlib.pyplot as plt\n'), ((27025, 27075), 'matplotlib.pyplot.text', 'plt.text', (['(4.5 * np.pi)', '(2)', '"""$\\\\Delta{t^{max}_{M}}$"""'], {}), "(4.5 * np.pi, 2, '$\\\\Delta{t^{max}_{M}}$')\n", (27033, 27075), True, 'import matplotlib.pyplot as plt\n'), ((27077, 27113), 'matplotlib.pyplot.text', 'plt.text', (['(4.3 * np.pi)', '(0.35)', '"""$s_1$"""'], {}), "(4.3 * np.pi, 0.35, '$s_1$')\n", (27085, 27113), True, 'import matplotlib.pyplot as plt\n'), ((27116, 27153), 'matplotlib.pyplot.text', 'plt.text', (['(4.43 * np.pi)', '(-0.2)', '"""$s_2$"""'], {}), "(4.43 * np.pi, -0.2, '$s_2$')\n", (27124, 27153), True, 'import matplotlib.pyplot as plt\n'), ((27156, 27260), 'matplotlib.pyplot.text', 'plt.text', (['(4.3 * np.pi + (minima_x[-1] - minima_x[-2]))', '(0.35 + (minima_y[-1] - minima_y[-2]))', '"""$s_1$"""'], {}), "(4.3 * np.pi + (minima_x[-1] - minima_x[-2]), 0.35 + (minima_y[-1] -\n minima_y[-2]), '$s_1$')\n", (27164, 27260), True, 'import matplotlib.pyplot as plt\n'), ((27259, 27384), 'matplotlib.pyplot.text', 'plt.text', (['(4.43 * np.pi + (slope_based_minimum_time - minima_x[-1]))', '(-0.2 + (slope_based_minimum - minima_y[-1]))', '"""$s_2$"""'], {}), "(4.43 * np.pi + (slope_based_minimum_time - minima_x[-1]), -0.2 + (\n slope_based_minimum - minima_y[-1]), '$s_2$')\n", (27267, 27384), True, 'import matplotlib.pyplot as plt\n'), ((27391, 27514), 'matplotlib.pyplot.text', 'plt.text', (['(4.5 * np.pi + (slope_based_minimum_time - minima_x[-1]))', '(1.2 + (slope_based_minimum - minima_y[-1]))', '"""$s_2$"""'], {}), "(4.5 * np.pi + (slope_based_minimum_time - minima_x[-1]), 1.2 + (\n slope_based_minimum - minima_y[-1]), '$s_2$')\n", (27399, 27514), True, 'import matplotlib.pyplot as plt\n'), ((27522, 27578), 'matplotlib.pyplot.plot', 'plt.plot', (['minima_line_dash_time', 'minima_line_dash', '"""k--"""'], {}), "(minima_line_dash_time, minima_line_dash, 'k--')\n", (27530, 27578), True, 'import matplotlib.pyplot as plt\n'), ((27579, 27635), 'matplotlib.pyplot.plot', 'plt.plot', (['maxima_line_dash_time', 'maxima_line_dash', '"""k--"""'], {}), "(maxima_line_dash_time, maxima_line_dash, 'k--')\n", (27587, 27635), True, 'import matplotlib.pyplot as plt\n'), ((27636, 27672), 'matplotlib.pyplot.plot', 'plt.plot', (['dash_1_time', 'dash_1', '"""k--"""'], {}), "(dash_1_time, dash_1, 'k--')\n", (27644, 27672), True, 'import matplotlib.pyplot as plt\n'), ((27673, 27709), 'matplotlib.pyplot.plot', 'plt.plot', (['dash_2_time', 'dash_2', '"""k--"""'], {}), "(dash_2_time, dash_2, 'k--')\n", (27681, 27709), True, 'import matplotlib.pyplot as plt\n'), ((27710, 27746), 'matplotlib.pyplot.plot', 'plt.plot', (['dash_3_time', 'dash_3', '"""k--"""'], {}), "(dash_3_time, dash_3, 'k--')\n", (27718, 27746), True, 'import matplotlib.pyplot as plt\n'), ((27747, 27783), 'matplotlib.pyplot.plot', 'plt.plot', (['dash_4_time', 'dash_4', '"""k--"""'], {}), "(dash_4_time, dash_4, 'k--')\n", (27755, 27783), True, 'import matplotlib.pyplot as plt\n'), ((27784, 27828), 'matplotlib.pyplot.plot', 'plt.plot', (['dash_final_time', 'dash_final', '"""k--"""'], {}), "(dash_final_time, dash_final, 'k--')\n", (27792, 27828), True, 'import matplotlib.pyplot as plt\n'), ((27829, 27893), 'matplotlib.pyplot.scatter', 'plt.scatter', (['maxima_x', 'maxima_y'], {'c': '"""r"""', 'zorder': '(4)', 'label': '"""Maxima"""'}), "(maxima_x, maxima_y, c='r', zorder=4, label='Maxima')\n", (27840, 27893), True, 'import matplotlib.pyplot as plt\n'), ((27894, 27958), 'matplotlib.pyplot.scatter', 'plt.scatter', (['minima_x', 'minima_y'], {'c': '"""b"""', 'zorder': '(4)', 'label': '"""Minima"""'}), "(minima_x, minima_y, c='b', zorder=4, label='Minima')\n", (27905, 27958), True, 'import matplotlib.pyplot as plt\n'), ((28583, 28617), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(3.9 * np.pi)', '(5.5 * np.pi)'], {}), '(3.9 * np.pi, 5.5 * np.pi)\n', (28591, 28617), True, 'import matplotlib.pyplot as plt\n'), ((28618, 28676), 'matplotlib.pyplot.xticks', 'plt.xticks', (['(4 * np.pi, 5 * np.pi)', "('4$\\\\pi$', '5$\\\\pi$')"], {}), "((4 * np.pi, 5 * np.pi), ('4$\\\\pi$', '5$\\\\pi$'))\n", (28628, 28676), True, 'import matplotlib.pyplot as plt\n'), ((28677, 28745), 'matplotlib.pyplot.yticks', 'plt.yticks', (['(-3, -2, -1, 0, 1, 2)', "('-3', '-2', '-1', '0', '1', '2')"], {}), "((-3, -2, -1, 0, 1, 2), ('-3', '-2', '-1', '0', '1', '2'))\n", (28687, 28745), True, 'import matplotlib.pyplot as plt\n'), ((28905, 28960), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""jss_figures/edge_effects_slope_based.png"""'], {}), "('jss_figures/edge_effects_slope_based.png')\n", (28916, 28960), True, 'import matplotlib.pyplot as plt\n'), ((28961, 28971), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (28969, 28971), True, 'import matplotlib.pyplot as plt\n'), ((29010, 29047), 'numpy.linspace', 'np.linspace', (['(0)', '((5 - a) * np.pi)', '(1001)'], {}), '(0, (5 - a) * np.pi, 1001)\n', (29021, 29047), True, 'import numpy as np\n'), ((29102, 29155), 'emd_utils.Utility', 'emd_utils.Utility', ([], {'time': 'time', 'time_series': 'time_series'}), '(time=time, time_series=time_series)\n', (29119, 29155), False, 'import emd_utils\n'), ((30112, 30164), 'emd_utils.Utility', 'emd_utils.Utility', ([], {'time': 'time', 'time_series': 'Huang_wave'}), '(time=time, time_series=Huang_wave)\n', (30129, 30164), False, 'import emd_utils\n'), ((30298, 30353), 'emd_utils.Utility', 'emd_utils.Utility', ([], {'time': 'time', 'time_series': 'Coughlin_wave'}), '(time=time, time_series=Coughlin_wave)\n', (30315, 30353), False, 'import emd_utils\n'), ((30868, 30928), 'numpy.linspace', 'np.linspace', (['(maxima_x[-2] - width)', '(maxima_x[-2] + width)', '(101)'], {}), '(maxima_x[-2] - width, maxima_x[-2] + width, 101)\n', (30879, 30928), True, 'import numpy as np\n'), ((30949, 31007), 'numpy.linspace', 'np.linspace', (['(5.3 * np.pi - width)', '(5.3 * np.pi + width)', '(101)'], {}), '(5.3 * np.pi - width, 5.3 * np.pi + width, 101)\n', (30960, 31007), True, 'import numpy as np\n'), ((31076, 31136), 'numpy.linspace', 'np.linspace', (['(minima_x[-2] - width)', '(minima_x[-2] + width)', '(101)'], {}), '(minima_x[-2] - width, minima_x[-2] + width, 101)\n', (31087, 31136), True, 'import numpy as np\n'), ((31157, 31215), 'numpy.linspace', 'np.linspace', (['(5.3 * np.pi - width)', '(5.3 * np.pi + width)', '(101)'], {}), '(5.3 * np.pi - width, 5.3 * np.pi + width, 101)\n', (31168, 31215), True, 'import numpy as np\n'), ((31288, 31332), 'numpy.linspace', 'np.linspace', (['minima_y[-2]', 'maxima_y[-2]', '(101)'], {}), '(minima_y[-2], maxima_y[-2], 101)\n', (31299, 31332), True, 'import numpy as np\n'), ((31413, 31473), 'numpy.linspace', 'np.linspace', (['(maxima_y[-2] - width)', '(maxima_y[-2] + width)', '(101)'], {}), '(maxima_y[-2] - width, maxima_y[-2] + width, 101)\n', (31424, 31473), True, 'import numpy as np\n'), ((31489, 31533), 'numpy.linspace', 'np.linspace', (['(-1.8 - width)', '(-1.8 + width)', '(101)'], {}), '(-1.8 - width, -1.8 + width, 101)\n', (31500, 31533), True, 'import numpy as np\n'), ((31597, 31657), 'numpy.linspace', 'np.linspace', (['(minima_y[-2] - width)', '(minima_y[-2] + width)', '(101)'], {}), '(minima_y[-2] - width, minima_y[-2] + width, 101)\n', (31608, 31657), True, 'import numpy as np\n'), ((31673, 31717), 'numpy.linspace', 'np.linspace', (['(-1.8 - width)', '(-1.8 + width)', '(101)'], {}), '(-1.8 - width, -1.8 + width, 101)\n', (31684, 31717), True, 'import numpy as np\n'), ((31795, 31839), 'numpy.linspace', 'np.linspace', (['minima_x[-2]', 'maxima_x[-2]', '(101)'], {}), '(minima_x[-2], maxima_x[-2], 101)\n', (31806, 31839), True, 'import numpy as np\n'), ((31918, 31978), 'numpy.linspace', 'np.linspace', (['(maxima_x[-1] - width)', '(maxima_x[-1] + width)', '(101)'], {}), '(maxima_x[-1] - width, maxima_x[-1] + width, 101)\n', (31929, 31978), True, 'import numpy as np\n'), ((31999, 32057), 'numpy.linspace', 'np.linspace', (['(5.4 * np.pi - width)', '(5.4 * np.pi + width)', '(101)'], {}), '(5.4 * np.pi - width, 5.4 * np.pi + width, 101)\n', (32010, 32057), True, 'import numpy as np\n'), ((32126, 32186), 'numpy.linspace', 'np.linspace', (['(minima_x[-1] - width)', '(minima_x[-1] + width)', '(101)'], {}), '(minima_x[-1] - width, minima_x[-1] + width, 101)\n', (32137, 32186), True, 'import numpy as np\n'), ((32207, 32265), 'numpy.linspace', 'np.linspace', (['(5.4 * np.pi - width)', '(5.4 * np.pi + width)', '(101)'], {}), '(5.4 * np.pi - width, 5.4 * np.pi + width, 101)\n', (32218, 32265), True, 'import numpy as np\n'), ((32338, 32382), 'numpy.linspace', 'np.linspace', (['minima_y[-1]', 'maxima_y[-1]', '(101)'], {}), '(minima_y[-1], maxima_y[-1], 101)\n', (32349, 32382), True, 'import numpy as np\n'), ((32463, 32523), 'numpy.linspace', 'np.linspace', (['(maxima_y[-1] - width)', '(maxima_y[-1] + width)', '(101)'], {}), '(maxima_y[-1] - width, maxima_y[-1] + width, 101)\n', (32474, 32523), True, 'import numpy as np\n'), ((32539, 32583), 'numpy.linspace', 'np.linspace', (['(-2.1 - width)', '(-2.1 + width)', '(101)'], {}), '(-2.1 - width, -2.1 + width, 101)\n', (32550, 32583), True, 'import numpy as np\n'), ((32647, 32707), 'numpy.linspace', 'np.linspace', (['(minima_y[-1] - width)', '(minima_y[-1] + width)', '(101)'], {}), '(minima_y[-1] - width, minima_y[-1] + width, 101)\n', (32658, 32707), True, 'import numpy as np\n'), ((32723, 32767), 'numpy.linspace', 'np.linspace', (['(-2.1 - width)', '(-2.1 + width)', '(101)'], {}), '(-2.1 - width, -2.1 + width, 101)\n', (32734, 32767), True, 'import numpy as np\n'), ((32845, 32889), 'numpy.linspace', 'np.linspace', (['minima_x[-1]', 'maxima_x[-1]', '(101)'], {}), '(minima_x[-1], maxima_x[-1], 101)\n', (32856, 32889), True, 'import numpy as np\n'), ((32958, 32974), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (32969, 32974), True, 'import matplotlib.pyplot as plt\n'), ((33014, 33062), 'matplotlib.pyplot.title', 'plt.title', (['"""Characteristic Wave Effects Example"""'], {}), "('Characteristic Wave Effects Example')\n", (33023, 33062), True, 'import matplotlib.pyplot as plt\n'), ((33063, 33119), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'time_series'], {'LineWidth': '(2)', 'label': '"""Signal"""'}), "(time, time_series, LineWidth=2, label='Signal')\n", (33071, 33119), True, 'import matplotlib.pyplot as plt\n'), ((33824, 33888), 'matplotlib.pyplot.scatter', 'plt.scatter', (['maxima_x', 'maxima_y'], {'c': '"""r"""', 'zorder': '(4)', 'label': '"""Maxima"""'}), "(maxima_x, maxima_y, c='r', zorder=4, label='Maxima')\n", (33835, 33888), True, 'import matplotlib.pyplot as plt\n'), ((33889, 33953), 'matplotlib.pyplot.scatter', 'plt.scatter', (['minima_x', 'minima_y'], {'c': '"""b"""', 'zorder': '(4)', 'label': '"""Minima"""'}), "(minima_x, minima_y, c='b', zorder=4, label='Minima')\n", (33900, 33953), True, 'import matplotlib.pyplot as plt\n'), ((34181, 34218), 'matplotlib.pyplot.plot', 'plt.plot', (['max_2_x_time', 'max_2_x', '"""k-"""'], {}), "(max_2_x_time, max_2_x, 'k-')\n", (34189, 34218), True, 'import matplotlib.pyplot as plt\n'), ((34219, 34261), 'matplotlib.pyplot.plot', 'plt.plot', (['max_2_x_time_side', 'max_2_x', '"""k-"""'], {}), "(max_2_x_time_side, max_2_x, 'k-')\n", (34227, 34261), True, 'import matplotlib.pyplot as plt\n'), ((34262, 34299), 'matplotlib.pyplot.plot', 'plt.plot', (['min_2_x_time', 'min_2_x', '"""k-"""'], {}), "(min_2_x_time, min_2_x, 'k-')\n", (34270, 34299), True, 'import matplotlib.pyplot as plt\n'), ((34300, 34342), 'matplotlib.pyplot.plot', 'plt.plot', (['min_2_x_time_side', 'min_2_x', '"""k-"""'], {}), "(min_2_x_time_side, min_2_x, 'k-')\n", (34308, 34342), True, 'import matplotlib.pyplot as plt\n'), ((34343, 34399), 'matplotlib.pyplot.plot', 'plt.plot', (['dash_max_min_2_x_time', 'dash_max_min_2_x', '"""k--"""'], {}), "(dash_max_min_2_x_time, dash_max_min_2_x, 'k--')\n", (34351, 34399), True, 'import matplotlib.pyplot as plt\n'), ((34400, 34438), 'matplotlib.pyplot.text', 'plt.text', (['(5.16 * np.pi)', '(0.85)', '"""$2a_2$"""'], {}), "(5.16 * np.pi, 0.85, '$2a_2$')\n", (34408, 34438), True, 'import matplotlib.pyplot as plt\n'), ((34441, 34478), 'matplotlib.pyplot.plot', 'plt.plot', (['max_2_y_time', 'max_2_y', '"""k-"""'], {}), "(max_2_y_time, max_2_y, 'k-')\n", (34449, 34478), True, 'import matplotlib.pyplot as plt\n'), ((34479, 34521), 'matplotlib.pyplot.plot', 'plt.plot', (['max_2_y_time', 'max_2_y_side', '"""k-"""'], {}), "(max_2_y_time, max_2_y_side, 'k-')\n", (34487, 34521), True, 'import matplotlib.pyplot as plt\n'), ((34522, 34559), 'matplotlib.pyplot.plot', 'plt.plot', (['min_2_y_time', 'min_2_y', '"""k-"""'], {}), "(min_2_y_time, min_2_y, 'k-')\n", (34530, 34559), True, 'import matplotlib.pyplot as plt\n'), ((34560, 34602), 'matplotlib.pyplot.plot', 'plt.plot', (['min_2_y_time', 'min_2_y_side', '"""k-"""'], {}), "(min_2_y_time, min_2_y_side, 'k-')\n", (34568, 34602), True, 'import matplotlib.pyplot as plt\n'), ((34603, 34659), 'matplotlib.pyplot.plot', 'plt.plot', (['dash_max_min_2_y_time', 'dash_max_min_2_y', '"""k--"""'], {}), "(dash_max_min_2_y_time, dash_max_min_2_y, 'k--')\n", (34611, 34659), True, 'import matplotlib.pyplot as plt\n'), ((34660, 34708), 'matplotlib.pyplot.text', 'plt.text', (['(4.08 * np.pi)', '(-2.2)', '"""$\\\\frac{p_2}{2}$"""'], {}), "(4.08 * np.pi, -2.2, '$\\\\frac{p_2}{2}$')\n", (34668, 34708), True, 'import matplotlib.pyplot as plt\n'), ((34710, 34747), 'matplotlib.pyplot.plot', 'plt.plot', (['max_1_x_time', 'max_1_x', '"""k-"""'], {}), "(max_1_x_time, max_1_x, 'k-')\n", (34718, 34747), True, 'import matplotlib.pyplot as plt\n'), ((34748, 34790), 'matplotlib.pyplot.plot', 'plt.plot', (['max_1_x_time_side', 'max_1_x', '"""k-"""'], {}), "(max_1_x_time_side, max_1_x, 'k-')\n", (34756, 34790), True, 'import matplotlib.pyplot as plt\n'), ((34791, 34828), 'matplotlib.pyplot.plot', 'plt.plot', (['min_1_x_time', 'min_1_x', '"""k-"""'], {}), "(min_1_x_time, min_1_x, 'k-')\n", (34799, 34828), True, 'import matplotlib.pyplot as plt\n'), ((34829, 34871), 'matplotlib.pyplot.plot', 'plt.plot', (['min_1_x_time_side', 'min_1_x', '"""k-"""'], {}), "(min_1_x_time_side, min_1_x, 'k-')\n", (34837, 34871), True, 'import matplotlib.pyplot as plt\n'), ((34872, 34928), 'matplotlib.pyplot.plot', 'plt.plot', (['dash_max_min_1_x_time', 'dash_max_min_1_x', '"""k--"""'], {}), "(dash_max_min_1_x_time, dash_max_min_1_x, 'k--')\n", (34880, 34928), True, 'import matplotlib.pyplot as plt\n'), ((34929, 34967), 'matplotlib.pyplot.text', 'plt.text', (['(5.42 * np.pi)', '(-0.1)', '"""$2a_1$"""'], {}), "(5.42 * np.pi, -0.1, '$2a_1$')\n", (34937, 34967), True, 'import matplotlib.pyplot as plt\n'), ((34970, 35007), 'matplotlib.pyplot.plot', 'plt.plot', (['max_1_y_time', 'max_1_y', '"""k-"""'], {}), "(max_1_y_time, max_1_y, 'k-')\n", (34978, 35007), True, 'import matplotlib.pyplot as plt\n'), ((35008, 35050), 'matplotlib.pyplot.plot', 'plt.plot', (['max_1_y_time', 'max_1_y_side', '"""k-"""'], {}), "(max_1_y_time, max_1_y_side, 'k-')\n", (35016, 35050), True, 'import matplotlib.pyplot as plt\n'), ((35051, 35088), 'matplotlib.pyplot.plot', 'plt.plot', (['min_1_y_time', 'min_1_y', '"""k-"""'], {}), "(min_1_y_time, min_1_y, 'k-')\n", (35059, 35088), True, 'import matplotlib.pyplot as plt\n'), ((35089, 35131), 'matplotlib.pyplot.plot', 'plt.plot', (['min_1_y_time', 'min_1_y_side', '"""k-"""'], {}), "(min_1_y_time, min_1_y_side, 'k-')\n", (35097, 35131), True, 'import matplotlib.pyplot as plt\n'), ((35132, 35188), 'matplotlib.pyplot.plot', 'plt.plot', (['dash_max_min_1_y_time', 'dash_max_min_1_y', '"""k--"""'], {}), "(dash_max_min_1_y_time, dash_max_min_1_y, 'k--')\n", (35140, 35188), True, 'import matplotlib.pyplot as plt\n'), ((35189, 35237), 'matplotlib.pyplot.text', 'plt.text', (['(4.48 * np.pi)', '(-2.5)', '"""$\\\\frac{p_1}{2}$"""'], {}), "(4.48 * np.pi, -2.5, '$\\\\frac{p_1}{2}$')\n", (35197, 35237), True, 'import matplotlib.pyplot as plt\n'), ((35239, 35273), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(3.9 * np.pi)', '(5.6 * np.pi)'], {}), '(3.9 * np.pi, 5.6 * np.pi)\n', (35247, 35273), True, 'import matplotlib.pyplot as plt\n'), ((35274, 35332), 'matplotlib.pyplot.xticks', 'plt.xticks', (['(4 * np.pi, 5 * np.pi)', "('4$\\\\pi$', '5$\\\\pi$')"], {}), "((4 * np.pi, 5 * np.pi), ('4$\\\\pi$', '5$\\\\pi$'))\n", (35284, 35332), True, 'import matplotlib.pyplot as plt\n'), ((35333, 35391), 'matplotlib.pyplot.yticks', 'plt.yticks', (['(-2, -1, 0, 1, 2)', "('-2', '-1', '0', '1', '2')"], {}), "((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2'))\n", (35343, 35391), True, 'import matplotlib.pyplot as plt\n'), ((35552, 35615), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""jss_figures/edge_effects_characteristic_wave.png"""'], {}), "('jss_figures/edge_effects_characteristic_wave.png')\n", (35563, 35615), True, 'import matplotlib.pyplot as plt\n'), ((35616, 35626), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (35624, 35626), True, 'import matplotlib.pyplot as plt\n'), ((35641, 35664), 'numpy.linspace', 'np.linspace', (['(5)', '(95)', '(100)'], {}), '(5, 95, 100)\n', (35652, 35664), True, 'import numpy as np\n'), ((35788, 35838), 'emd_utils.Utility', 'emd_utils.Utility', ([], {'time': 't', 'time_series': 'signal_orig'}), '(time=t, time_series=signal_orig)\n', (35805, 35838), False, 'import emd_utils\n'), ((36106, 36137), 'numpy.linspace', 'np.linspace', (['(0)', '(5 * np.pi)', '(1001)'], {}), '(0, 5 * np.pi, 1001)\n', (36117, 36137), True, 'import numpy as np\n'), ((36191, 36221), 'numpy.linspace', 'np.linspace', (['(0)', '(5 * np.pi)', '(101)'], {}), '(0, 5 * np.pi, 101)\n', (36202, 36221), True, 'import numpy as np\n'), ((36239, 36259), 'emd_utils.time_extension', 'time_extension', (['time'], {}), '(time)\n', (36253, 36259), False, 'from emd_utils import time_extension, Utility\n'), ((37406, 37429), 'numpy.hstack', 'np.hstack', (['(weights, 0)'], {}), '((weights, 0))\n', (37415, 37429), True, 'import numpy as np\n'), ((39418, 39440), 'cvxpy.Problem', 'cvx.Problem', (['objective'], {}), '(objective)\n', (39429, 39440), True, 'import cvxpy as cvx\n'), ((39507, 39525), 'numpy.array', 'np.array', (['vx.value'], {}), '(vx.value)\n', (39515, 39525), True, 'import numpy as np\n'), ((40751, 40803), 'emd_utils.Utility', 'emd_utils.Utility', ([], {'time': 'time', 'time_series': 'lsq_signal'}), '(time=time, time_series=lsq_signal)\n', (40768, 40803), False, 'import emd_utils\n'), ((40821, 40892), 'emd_utils.Utility', 'emd_utils.Utility', ([], {'time': 'time_extended', 'time_series': 'time_series_extended'}), '(time=time_extended, time_series=time_series_extended)\n', (40838, 40892), False, 'import emd_utils\n'), ((41503, 41519), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (41514, 41519), True, 'import matplotlib.pyplot as plt\n'), ((41559, 41608), 'matplotlib.pyplot.title', 'plt.title', (['"""Single Neuron Neural Network Example"""'], {}), "('Single Neuron Neural Network Example')\n", (41568, 41608), True, 'import matplotlib.pyplot as plt\n'), ((41609, 41661), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'lsq_signal'], {'zorder': '(2)', 'label': '"""Signal"""'}), "(time, lsq_signal, zorder=2, label='Signal')\n", (41617, 41661), True, 'import matplotlib.pyplot as plt\n'), ((41773, 41838), 'matplotlib.pyplot.scatter', 'plt.scatter', (['maxima_time', 'maxima'], {'c': '"""r"""', 'zorder': '(3)', 'label': '"""Maxima"""'}), "(maxima_time, maxima, c='r', zorder=3, label='Maxima')\n", (41784, 41838), True, 'import matplotlib.pyplot as plt\n'), ((41839, 41904), 'matplotlib.pyplot.scatter', 'plt.scatter', (['minima_time', 'minima'], {'c': '"""b"""', 'zorder': '(3)', 'label': '"""Minima"""'}), "(minima_time, minima, c='b', zorder=3, label='Minima')\n", (41850, 41904), True, 'import matplotlib.pyplot as plt\n'), ((44091, 44125), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(3.4 * np.pi)', '(5.6 * np.pi)'], {}), '(3.4 * np.pi, 5.6 * np.pi)\n', (44099, 44125), True, 'import matplotlib.pyplot as plt\n'), ((44126, 44184), 'matplotlib.pyplot.xticks', 'plt.xticks', (['(4 * np.pi, 5 * np.pi)', "('4$\\\\pi$', '5$\\\\pi$')"], {}), "((4 * np.pi, 5 * np.pi), ('4$\\\\pi$', '5$\\\\pi$'))\n", (44136, 44184), True, 'import matplotlib.pyplot as plt\n'), ((44185, 44243), 'matplotlib.pyplot.yticks', 'plt.yticks', (['(-2, -1, 0, 1, 2)', "('-2', '-1', '0', '1', '2')"], {}), "((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2'))\n", (44195, 44243), True, 'import matplotlib.pyplot as plt\n'), ((44404, 44449), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""jss_figures/neural_network.png"""'], {}), "('jss_figures/neural_network.png')\n", (44415, 44449), True, 'import matplotlib.pyplot as plt\n'), ((44450, 44460), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (44458, 44460), True, 'import matplotlib.pyplot as plt\n'), ((44472, 44489), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (44486, 44489), True, 'import numpy as np\n'), ((44498, 44529), 'numpy.linspace', 'np.linspace', (['(0)', '(5 * np.pi)', '(1001)'], {}), '(0, 5 * np.pi, 1001)\n', (44509, 44529), True, 'import numpy as np\n'), ((44541, 44570), 'numpy.linspace', 'np.linspace', (['(0)', '(5 * np.pi)', '(51)'], {}), '(0, 5 * np.pi, 51)\n', (44552, 44570), True, 'import numpy as np\n'), ((44722, 44761), 'AdvEMDpy.EMD', 'EMD', ([], {'time': 'time', 'time_series': 'time_series'}), '(time=time, time_series=time_series)\n', (44725, 44761), False, 'from AdvEMDpy import EMD\n'), ((44980, 45009), 'numpy.linspace', 'np.linspace', (['(0)', '(5 * np.pi)', '(31)'], {}), '(0, 5 * np.pi, 31)\n', (44991, 45009), True, 'import numpy as np\n'), ((45228, 45257), 'numpy.linspace', 'np.linspace', (['(0)', '(5 * np.pi)', '(11)'], {}), '(0, 5 * np.pi, 11)\n', (45239, 45257), True, 'import numpy as np\n'), ((45477, 45495), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {}), '(3, 1)\n', (45489, 45495), True, 'import matplotlib.pyplot as plt\n'), ((45592, 45623), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.1)'}), '(hspace=0.1)\n', (45611, 45623), True, 'import matplotlib.pyplot as plt\n'), ((49286, 49337), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""jss_figures/DFA_different_trends.png"""'], {}), "('jss_figures/DFA_different_trends.png')\n", (49297, 49337), True, 'import matplotlib.pyplot as plt\n'), ((49338, 49348), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (49346, 49348), True, 'import matplotlib.pyplot as plt\n'), ((49371, 49389), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {}), '(3, 1)\n', (49383, 49389), True, 'import matplotlib.pyplot as plt\n'), ((49500, 49531), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.1)'}), '(hspace=0.1)\n', (49519, 49531), True, 'import matplotlib.pyplot as plt\n'), ((51975, 52033), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""jss_figures/DFA_different_trends_zoomed.png"""'], {}), "('jss_figures/DFA_different_trends_zoomed.png')\n", (51986, 52033), True, 'import matplotlib.pyplot as plt\n'), ((52034, 52044), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (52042, 52044), True, 'import matplotlib.pyplot as plt\n'), ((52058, 52135), 'emd_hilbert.hilbert_spectrum', 'hilbert_spectrum', (['time', 'imfs_51', 'hts_51', 'ifs_51'], {'max_frequency': '(12)', 'plot': '(False)'}), '(time, imfs_51, hts_51, ifs_51, max_frequency=12, plot=False)\n', (52074, 52135), False, 'from emd_hilbert import Hilbert, hilbert_spectrum\n'), ((52152, 52168), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (52163, 52168), True, 'import matplotlib.pyplot as plt\n'), ((52951, 52989), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency (rad.s$^{-1}$)"""'], {}), "('Frequency (rad.s$^{-1}$)')\n", (52961, 52989), True, 'import matplotlib.pyplot as plt\n'), ((52991, 53013), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (s)"""'], {}), "('Time (s)')\n", (53001, 53013), True, 'import matplotlib.pyplot as plt\n'), ((53180, 53231), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""jss_figures/DFA_hilbert_spectrum.png"""'], {}), "('jss_figures/DFA_hilbert_spectrum.png')\n", (53191, 53231), True, 'import matplotlib.pyplot as plt\n'), ((53232, 53242), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (53240, 53242), True, 'import matplotlib.pyplot as plt\n'), ((53261, 53292), 'numpy.linspace', 'np.linspace', (['(0)', '(5 * np.pi)', '(1001)'], {}), '(0, 5 * np.pi, 1001)\n', (53272, 53292), True, 'import numpy as np\n'), ((53347, 53376), 'numpy.linspace', 'np.linspace', (['(0)', '(5 * np.pi)', '(51)'], {}), '(0, 5 * np.pi, 51)\n', (53358, 53376), True, 'import numpy as np\n'), ((53385, 53432), 'emd_mean.Fluctuation', 'Fluctuation', ([], {'time': 'time', 'time_series': 'time_series'}), '(time=time, time_series=time_series)\n', (53396, 53432), False, 'from emd_mean import Fluctuation\n'), ((53926, 53969), 'emd_utils.Utility', 'Utility', ([], {'time': 'time', 'time_series': 'time_series'}), '(time=time, time_series=time_series)\n', (53933, 53969), False, 'from emd_utils import time_extension, Utility\n'), ((54062, 54078), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (54073, 54078), True, 'import matplotlib.pyplot as plt\n'), ((54249, 54320), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'time_series'], {'label': '"""Time series"""', 'zorder': '(2)', 'LineWidth': '(2)'}), "(time, time_series, label='Time series', zorder=2, LineWidth=2)\n", (54257, 54320), True, 'import matplotlib.pyplot as plt\n'), ((54321, 54406), 'matplotlib.pyplot.scatter', 'plt.scatter', (['time[maxima]', 'time_series[maxima]'], {'c': '"""r"""', 'label': '"""Maxima"""', 'zorder': '(10)'}), "(time[maxima], time_series[maxima], c='r', label='Maxima', zorder=10\n )\n", (54332, 54406), True, 'import matplotlib.pyplot as plt\n'), ((54402, 54487), 'matplotlib.pyplot.scatter', 'plt.scatter', (['time[minima]', 'time_series[minima]'], {'c': '"""b"""', 'label': '"""Minima"""', 'zorder': '(10)'}), "(time[minima], time_series[minima], c='b', label='Minima', zorder=10\n )\n", (54413, 54487), True, 'import matplotlib.pyplot as plt\n'), ((55096, 55234), 'matplotlib.pyplot.xticks', 'plt.xticks', (['(0, 1 * np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi)', "('$0$', '$\\\\pi$', '2$\\\\pi$', '3$\\\\pi$', '4$\\\\pi$', '5$\\\\pi$')"], {}), "((0, 1 * np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi), (\n '$0$', '$\\\\pi$', '2$\\\\pi$', '3$\\\\pi$', '4$\\\\pi$', '5$\\\\pi$'))\n", (55106, 55234), True, 'import matplotlib.pyplot as plt\n'), ((55242, 55300), 'matplotlib.pyplot.yticks', 'plt.yticks', (['(-2, -1, 0, 1, 2)', "('-2', '-1', '0', '1', '2')"], {}), "((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2'))\n", (55252, 55300), True, 'import matplotlib.pyplot as plt\n'), ((55301, 55338), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-0.25 * np.pi)', '(5.25 * np.pi)'], {}), '(-0.25 * np.pi, 5.25 * np.pi)\n', (55309, 55338), True, 'import matplotlib.pyplot as plt\n'), ((55499, 55559), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""jss_figures/Schoenberg_Whitney_Conditions.png"""'], {}), "('jss_figures/Schoenberg_Whitney_Conditions.png')\n", (55510, 55559), True, 'import matplotlib.pyplot as plt\n'), ((55560, 55570), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (55568, 55570), True, 'import matplotlib.pyplot as plt\n'), ((55609, 55660), 'numpy.linspace', 'np.linspace', (['((0 + a) * np.pi)', '((5 - a) * np.pi)', '(1001)'], {}), '((0 + a) * np.pi, (5 - a) * np.pi, 1001)\n', (55620, 55660), True, 'import numpy as np\n'), ((55669, 55718), 'numpy.linspace', 'np.linspace', (['((0 + a) * np.pi)', '((5 - a) * np.pi)', '(11)'], {}), '((0 + a) * np.pi, (5 - a) * np.pi, 11)\n', (55680, 55718), True, 'import numpy as np\n'), ((55773, 55826), 'emd_utils.Utility', 'emd_utils.Utility', ([], {'time': 'time', 'time_series': 'time_series'}), '(time=time, time_series=time_series)\n', (55790, 55826), False, 'import emd_utils\n'), ((56176, 56232), 'emd_mean.Fluctuation', 'emd_mean.Fluctuation', ([], {'time': 'time', 'time_series': 'time_series'}), '(time=time, time_series=time_series)\n', (56196, 56232), False, 'import emd_mean\n'), ((58501, 58537), 'numpy.linspace', 'np.linspace', (['knots[0]', 'knots[-1]', '(31)'], {}), '(knots[0], knots[-1], 31)\n', (58512, 58537), True, 'import numpy as np\n'), ((58666, 58731), 'AdvEMDpy.EMD', 'AdvEMDpy.EMD', ([], {'time': 'derivative_time', 'time_series': 'derivative_of_lsq'}), '(time=derivative_time, time_series=derivative_of_lsq)\n', (58678, 58731), False, 'import AdvEMDpy\n'), ((58938, 59004), 'emd_utils.Utility', 'emd_utils.Utility', ([], {'time': 'time[:-1]', 'time_series': 'imf_1_of_derivative'}), '(time=time[:-1], time_series=imf_1_of_derivative)\n', (58955, 59004), False, 'import emd_utils\n'), ((60536, 60552), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (60547, 60552), True, 'import matplotlib.pyplot as plt\n'), ((60592, 60644), 'matplotlib.pyplot.title', 'plt.title', (['"""Detrended Fluctuation Analysis Examples"""'], {}), "('Detrended Fluctuation Analysis Examples')\n", (60601, 60644), True, 'import matplotlib.pyplot as plt\n'), ((60645, 60706), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'time_series'], {'LineWidth': '(2)', 'label': '"""Time series"""'}), "(time, time_series, LineWidth=2, label='Time series')\n", (60653, 60706), True, 'import matplotlib.pyplot as plt\n'), ((60707, 60771), 'matplotlib.pyplot.scatter', 'plt.scatter', (['maxima_x', 'maxima_y'], {'c': '"""r"""', 'zorder': '(4)', 'label': '"""Maxima"""'}), "(maxima_x, maxima_y, c='r', zorder=4, label='Maxima')\n", (60718, 60771), True, 'import matplotlib.pyplot as plt\n'), ((60772, 60836), 'matplotlib.pyplot.scatter', 'plt.scatter', (['minima_x', 'minima_y'], {'c': '"""b"""', 'zorder': '(4)', 'label': '"""Minima"""'}), "(minima_x, minima_y, c='b', zorder=4, label='Minima')\n", (60783, 60836), True, 'import matplotlib.pyplot as plt\n'), ((61316, 61361), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'minima_envelope'], {'c': '"""darkblue"""'}), "(time, minima_envelope, c='darkblue')\n", (61324, 61361), True, 'import matplotlib.pyplot as plt\n'), ((61362, 61431), 'matplotlib.pyplot.plot', 'plt.plot', (['time', '((maxima_envelope + minima_envelope) / 2)'], {'c': '"""darkblue"""'}), "(time, (maxima_envelope + minima_envelope) / 2, c='darkblue')\n", (61370, 61431), True, 'import matplotlib.pyplot as plt\n'), ((61526, 61577), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'minima_envelope_smooth'], {'c': '"""darkred"""'}), "(time, minima_envelope_smooth, c='darkred')\n", (61534, 61577), True, 'import matplotlib.pyplot as plt\n'), ((61578, 61665), 'matplotlib.pyplot.plot', 'plt.plot', (['time', '((maxima_envelope_smooth + minima_envelope_smooth) / 2)'], {'c': '"""darkred"""'}), "(time, (maxima_envelope_smooth + minima_envelope_smooth) / 2, c=\n 'darkred')\n", (61586, 61665), True, 'import matplotlib.pyplot as plt\n'), ((61755, 61806), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'EEMD_minima_envelope'], {'c': '"""darkgreen"""'}), "(time, EEMD_minima_envelope, c='darkgreen')\n", (61763, 61806), True, 'import matplotlib.pyplot as plt\n'), ((61807, 61892), 'matplotlib.pyplot.plot', 'plt.plot', (['time', '((EEMD_maxima_envelope + EEMD_minima_envelope) / 2)'], {'c': '"""darkgreen"""'}), "(time, (EEMD_maxima_envelope + EEMD_minima_envelope) / 2, c='darkgreen'\n )\n", (61815, 61892), True, 'import matplotlib.pyplot as plt\n'), ((62170, 62308), 'matplotlib.pyplot.xticks', 'plt.xticks', (['(0, 1 * np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi)', "('$0$', '$\\\\pi$', '2$\\\\pi$', '3$\\\\pi$', '4$\\\\pi$', '5$\\\\pi$')"], {}), "((0, 1 * np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi), (\n '$0$', '$\\\\pi$', '2$\\\\pi$', '3$\\\\pi$', '4$\\\\pi$', '5$\\\\pi$'))\n", (62180, 62308), True, 'import matplotlib.pyplot as plt\n'), ((62377, 62435), 'matplotlib.pyplot.yticks', 'plt.yticks', (['(-2, -1, 0, 1, 2)', "('-2', '-1', '0', '1', '2')"], {}), "((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2'))\n", (62387, 62435), True, 'import matplotlib.pyplot as plt\n'), ((62436, 62473), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-0.25 * np.pi)', '(5.25 * np.pi)'], {}), '(-0.25 * np.pi, 5.25 * np.pi)\n', (62444, 62473), True, 'import matplotlib.pyplot as plt\n'), ((62634, 62695), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""jss_figures/detrended_fluctuation_analysis.png"""'], {}), "('jss_figures/detrended_fluctuation_analysis.png')\n", (62645, 62695), True, 'import matplotlib.pyplot as plt\n'), ((62696, 62706), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (62704, 62706), True, 'import matplotlib.pyplot as plt\n'), ((62914, 62939), 'numpy.linspace', 'np.linspace', (['(0)', '(150)', '(1501)'], {}), '(0, 150, 1501)\n', (62925, 62939), True, 'import numpy as np\n'), ((62964, 62996), 'scipy.integrate.odeint', 'odeint', (['duffing_equation', 'XY0', 't'], {}), '(duffing_equation, XY0, t)\n', (62970, 62996), False, 'from scipy.integrate import odeint\n'), ((63156, 63174), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {}), '(2, 1)\n', (63168, 63174), True, 'import matplotlib.pyplot as plt\n'), ((63175, 63206), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.2)'}), '(hspace=0.2)\n', (63194, 63206), True, 'import matplotlib.pyplot as plt\n'), ((63783, 63830), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""jss_figures/Duffing_equation.png"""'], {}), "('jss_figures/Duffing_equation.png')\n", (63794, 63830), True, 'import matplotlib.pyplot as plt\n'), ((63831, 63841), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (63839, 63841), True, 'import matplotlib.pyplot as plt\n'), ((63891, 63902), 'PyEMD.EMD', 'pyemd0215', ([], {}), '()\n', (63900, 63902), True, 'from PyEMD import EMD as pyemd0215\n'), ((63934, 63993), 'emd.spectra.frequency_transform', 'emd040.spectra.frequency_transform', (['py_emd.T', '(10)', '"""hilbert"""'], {}), "(py_emd.T, 10, 'hilbert')\n", (63968, 63993), True, 'import emd as emd040\n'), ((64018, 64062), 'emd.spectra.define_hist_bins', 'emd040.spectra.define_hist_bins', (['(0)', '(0.2)', '(100)'], {}), '(0, 0.2, 100)\n', (64049, 64062), True, 'import emd as emd040\n'), ((64069, 64116), 'emd.spectra.hilberthuang', 'emd040.spectra.hilberthuang', (['IF', 'IA', 'freq_edges'], {}), '(IF, IA, freq_edges)\n', (64096, 64116), True, 'import emd as emd040\n'), ((64123, 64152), 'scipy.ndimage.gaussian_filter', 'gaussian_filter', (['hht'], {'sigma': '(1)'}), '(hht, sigma=1)\n', (64138, 64152), False, 'from scipy.ndimage import gaussian_filter\n'), ((64158, 64174), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (64169, 64174), True, 'import matplotlib.pyplot as plt\n'), ((64727, 64756), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, 50, 100, 150]'], {}), '([0, 50, 100, 150])\n', (64737, 64756), True, 'import matplotlib.pyplot as plt\n'), ((64757, 64782), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0, 0.1, 0.2]'], {}), '([0, 0.1, 0.2])\n', (64767, 64782), True, 'import matplotlib.pyplot as plt\n'), ((64783, 64811), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency (Hz)"""'], {}), "('Frequency (Hz)')\n", (64793, 64811), True, 'import matplotlib.pyplot as plt\n'), ((64812, 64834), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (s)"""'], {}), "('Time (s)')\n", (64822, 64834), True, 'import matplotlib.pyplot as plt\n'), ((65001, 65057), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""jss_figures/Duffing_equation_ht_pyemd.png"""'], {}), "('jss_figures/Duffing_equation_ht_pyemd.png')\n", (65012, 65057), True, 'import matplotlib.pyplot as plt\n'), ((65058, 65068), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (65066, 65068), True, 'import matplotlib.pyplot as plt\n'), ((65070, 65080), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (65078, 65080), True, 'import matplotlib.pyplot as plt\n'), ((65093, 65112), 'emd.sift.sift', 'emd040.sift.sift', (['x'], {}), '(x)\n', (65109, 65112), True, 'import emd as emd040\n'), ((65126, 65185), 'emd.spectra.frequency_transform', 'emd040.spectra.frequency_transform', (['emd_sift', '(10)', '"""hilbert"""'], {}), "(emd_sift, 10, 'hilbert')\n", (65160, 65185), True, 'import emd as emd040\n'), ((65210, 65254), 'emd.spectra.define_hist_bins', 'emd040.spectra.define_hist_bins', (['(0)', '(0.2)', '(100)'], {}), '(0, 0.2, 100)\n', (65241, 65254), True, 'import emd as emd040\n'), ((65261, 65308), 'emd.spectra.hilberthuang', 'emd040.spectra.hilberthuang', (['IF', 'IA', 'freq_edges'], {}), '(IF, IA, freq_edges)\n', (65288, 65308), True, 'import emd as emd040\n'), ((65315, 65344), 'scipy.ndimage.gaussian_filter', 'gaussian_filter', (['hht'], {'sigma': '(1)'}), '(hht, sigma=1)\n', (65330, 65344), False, 'from scipy.ndimage import gaussian_filter\n'), ((65350, 65366), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (65361, 65366), True, 'import matplotlib.pyplot as plt\n'), ((65916, 65945), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, 50, 100, 150]'], {}), '([0, 50, 100, 150])\n', (65926, 65945), True, 'import matplotlib.pyplot as plt\n'), ((65946, 65971), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0, 0.1, 0.2]'], {}), '([0, 0.1, 0.2])\n', (65956, 65971), True, 'import matplotlib.pyplot as plt\n'), ((65972, 66000), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency (Hz)"""'], {}), "('Frequency (Hz)')\n", (65982, 66000), True, 'import matplotlib.pyplot as plt\n'), ((66001, 66023), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (s)"""'], {}), "('Time (s)')\n", (66011, 66023), True, 'import matplotlib.pyplot as plt\n'), ((66190, 66244), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""jss_figures/Duffing_equation_ht_emd.png"""'], {}), "('jss_figures/Duffing_equation_ht_emd.png')\n", (66201, 66244), True, 'import matplotlib.pyplot as plt\n'), ((66245, 66255), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (66253, 66255), True, 'import matplotlib.pyplot as plt\n'), ((66314, 66349), 'AdvEMDpy.EMD', 'AdvEMDpy.EMD', ([], {'time': 't', 'time_series': 'x'}), '(time=t, time_series=x)\n', (66326, 66349), False, 'import AdvEMDpy\n'), ((66467, 66485), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {}), '(2, 1)\n', (66479, 66485), True, 'import matplotlib.pyplot as plt\n'), ((66486, 66517), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.3)'}), '(hspace=0.3)\n', (66505, 66517), True, 'import matplotlib.pyplot as plt\n'), ((68054, 68106), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""jss_figures/Duffing_equation_imfs.png"""'], {}), "('jss_figures/Duffing_equation_imfs.png')\n", (68065, 68106), True, 'import matplotlib.pyplot as plt\n'), ((68107, 68117), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (68115, 68117), True, 'import matplotlib.pyplot as plt\n'), ((68131, 68221), 'emd_hilbert.hilbert_spectrum', 'hilbert_spectrum', (['t', 'emd_duff', 'emd_ht_duff', 'emd_if_duff'], {'max_frequency': '(1.3)', 'plot': '(False)'}), '(t, emd_duff, emd_ht_duff, emd_if_duff, max_frequency=1.3,\n plot=False)\n', (68147, 68221), False, 'from emd_hilbert import Hilbert, hilbert_spectrum\n'), ((68224, 68240), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (68235, 68240), True, 'import matplotlib.pyplot as plt\n'), ((68842, 68871), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, 50, 100, 150]'], {}), '([0, 50, 100, 150])\n', (68852, 68871), True, 'import matplotlib.pyplot as plt\n'), ((68872, 68897), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0, 0.1, 0.2]'], {}), '([0, 0.1, 0.2])\n', (68882, 68897), True, 'import matplotlib.pyplot as plt\n'), ((68898, 68926), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency (Hz)"""'], {}), "('Frequency (Hz)')\n", (68908, 68926), True, 'import matplotlib.pyplot as plt\n'), ((68927, 68949), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (s)"""'], {}), "('Time (s)')\n", (68937, 68949), True, 'import matplotlib.pyplot as plt\n'), ((69116, 69166), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""jss_figures/Duffing_equation_ht.png"""'], {}), "('jss_figures/Duffing_equation_ht.png')\n", (69127, 69166), True, 'import matplotlib.pyplot as plt\n'), ((69167, 69177), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (69175, 69177), True, 'import matplotlib.pyplot as plt\n'), ((69230, 69275), 'pandas.read_csv', 'pd.read_csv', (['"""Data/co2_mm_mlo.csv"""'], {'header': '(51)'}), "('Data/co2_mm_mlo.csv', header=51)\n", (69241, 69275), True, 'import pandas as pd\n'), ((69277, 69330), 'matplotlib.pyplot.plot', 'plt.plot', (["CO2_data['month']", "CO2_data['decimal date']"], {}), "(CO2_data['month'], CO2_data['decimal date'])\n", (69285, 69330), True, 'import matplotlib.pyplot as plt\n'), ((69426, 69457), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Parts per million"""'], {}), "('Parts per million')\n", (69436, 69457), True, 'import matplotlib.pyplot as plt\n'), ((69458, 69484), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (years)"""'], {}), "('Time (years)')\n", (69468, 69484), True, 'import matplotlib.pyplot as plt\n'), ((69485, 69533), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""jss_figures/CO2_concentration.png"""'], {}), "('jss_figures/CO2_concentration.png')\n", (69496, 69533), True, 'import matplotlib.pyplot as plt\n'), ((69534, 69544), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (69542, 69544), True, 'import matplotlib.pyplot as plt\n'), ((69589, 69607), 'numpy.asarray', 'np.asarray', (['signal'], {}), '(signal)\n', (69599, 69607), True, 'import numpy as np\n'), ((69640, 69656), 'numpy.asarray', 'np.asarray', (['time'], {}), '(time)\n', (69650, 69656), True, 'import numpy as np\n'), ((69713, 69724), 'PyEMD.EMD', 'pyemd0215', ([], {}), '()\n', (69722, 69724), True, 'from PyEMD import EMD as pyemd0215\n'), ((69761, 69827), 'emd.spectra.frequency_transform', 'emd040.spectra.frequency_transform', (['py_emd[:2, :].T', '(12)', '"""hilbert"""'], {}), "(py_emd[:2, :].T, 12, 'hilbert')\n", (69795, 69827), True, 'import emd as emd040\n'), ((69954, 69996), 'emd.spectra.define_hist_bins', 'emd040.spectra.define_hist_bins', (['(0)', '(2)', '(100)'], {}), '(0, 2, 100)\n', (69985, 69996), True, 'import emd as emd040\n'), ((70003, 70050), 'emd.spectra.hilberthuang', 'emd040.spectra.hilberthuang', (['IF', 'IA', 'freq_edges'], {}), '(IF, IA, freq_edges)\n', (70030, 70050), True, 'import emd as emd040\n'), ((70057, 70086), 'scipy.ndimage.gaussian_filter', 'gaussian_filter', (['hht'], {'sigma': '(1)'}), '(hht, sigma=1)\n', (70072, 70086), False, 'from scipy.ndimage import gaussian_filter\n'), ((70097, 70111), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (70109, 70111), True, 'import matplotlib.pyplot as plt\n'), ((70348, 70385), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency (year$^{-1}$)"""'], {}), "('Frequency (year$^{-1}$)')\n", (70358, 70385), True, 'import matplotlib.pyplot as plt\n'), ((70386, 70412), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (years)"""'], {}), "('Time (years)')\n", (70396, 70412), True, 'import matplotlib.pyplot as plt\n'), ((70770, 70818), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""jss_figures/CO2_Hilbert_pyemd.png"""'], {}), "('jss_figures/CO2_Hilbert_pyemd.png')\n", (70781, 70818), True, 'import matplotlib.pyplot as plt\n'), ((70819, 70829), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (70827, 70829), True, 'import matplotlib.pyplot as plt\n'), ((70842, 70866), 'emd.sift.sift', 'emd040.sift.sift', (['signal'], {}), '(signal)\n', (70858, 70866), True, 'import emd as emd040\n'), ((70880, 70946), 'emd.spectra.frequency_transform', 'emd040.spectra.frequency_transform', (['emd_sift[:, :1]', '(12)', '"""hilbert"""'], {}), "(emd_sift[:, :1], 12, 'hilbert')\n", (70914, 70946), True, 'import emd as emd040\n'), ((71062, 71104), 'emd.spectra.define_hist_bins', 'emd040.spectra.define_hist_bins', (['(0)', '(2)', '(100)'], {}), '(0, 2, 100)\n', (71093, 71104), True, 'import emd as emd040\n'), ((71111, 71158), 'emd.spectra.hilberthuang', 'emd040.spectra.hilberthuang', (['IF', 'IA', 'freq_edges'], {}), '(IF, IA, freq_edges)\n', (71138, 71158), True, 'import emd as emd040\n'), ((71165, 71194), 'scipy.ndimage.gaussian_filter', 'gaussian_filter', (['hht'], {'sigma': '(1)'}), '(hht, sigma=1)\n', (71180, 71194), False, 'from scipy.ndimage import gaussian_filter\n'), ((71205, 71219), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (71217, 71219), True, 'import matplotlib.pyplot as plt\n'), ((71453, 71490), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency (year$^{-1}$)"""'], {}), "('Frequency (year$^{-1}$)')\n", (71463, 71490), True, 'import matplotlib.pyplot as plt\n'), ((71491, 71517), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (years)"""'], {}), "('Time (years)')\n", (71501, 71517), True, 'import matplotlib.pyplot as plt\n'), ((71875, 71921), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""jss_figures/CO2_Hilbert_emd.png"""'], {}), "('jss_figures/CO2_Hilbert_emd.png')\n", (71886, 71921), True, 'import matplotlib.pyplot as plt\n'), ((71922, 71932), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (71930, 71932), True, 'import matplotlib.pyplot as plt\n'), ((71992, 72027), 'numpy.linspace', 'np.linspace', (['time[0]', 'time[-1]', '(200)'], {}), '(time[0], time[-1], 200)\n', (72003, 72027), True, 'import numpy as np\n'), ((72043, 72086), 'AdvEMDpy.EMD', 'AdvEMDpy.EMD', ([], {'time': 'time', 'time_series': 'signal'}), '(time=time, time_series=signal)\n', (72055, 72086), False, 'import AdvEMDpy\n'), ((72341, 72359), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {}), '(2, 2)\n', (72353, 72359), True, 'import matplotlib.pyplot as plt\n'), ((72360, 72391), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.5)'}), '(hspace=0.5)\n', (72379, 72391), True, 'import matplotlib.pyplot as plt\n'), ((73151, 73189), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""jss_figures/CO2_EMD.png"""'], {}), "('jss_figures/CO2_EMD.png')\n", (73162, 73189), True, 'import matplotlib.pyplot as plt\n'), ((73190, 73200), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (73198, 73200), True, 'import matplotlib.pyplot as plt\n'), ((73214, 73302), 'emd_hilbert.hilbert_spectrum', 'hilbert_spectrum', (['time', 'imfs', 'hts', 'ifs'], {'max_frequency': '(10)', 'which_imfs': '[1]', 'plot': '(False)'}), '(time, imfs, hts, ifs, max_frequency=10, which_imfs=[1],\n plot=False)\n', (73230, 73302), False, 'from emd_hilbert import Hilbert, hilbert_spectrum\n'), ((73388, 73402), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (73400, 73402), True, 'import matplotlib.pyplot as plt\n'), ((73718, 73755), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency (year$^{-1}$)"""'], {}), "('Frequency (year$^{-1}$)')\n", (73728, 73755), True, 'import matplotlib.pyplot as plt\n'), ((73756, 73782), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (years)"""'], {}), "('Time (years)')\n", (73766, 73782), True, 'import matplotlib.pyplot as plt\n'), ((74105, 74147), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""jss_figures/CO2_Hilbert.png"""'], {}), "('jss_figures/CO2_Hilbert.png')\n", (74116, 74147), True, 'import matplotlib.pyplot as plt\n'), ((74148, 74158), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (74156, 74158), True, 'import matplotlib.pyplot as plt\n'), ((772, 795), 'numpy.sin', 'np.sin', (['pseudo_alg_time'], {}), '(pseudo_alg_time)\n', (778, 795), True, 'import numpy as np\n'), ((798, 825), 'numpy.sin', 'np.sin', (['(5 * pseudo_alg_time)'], {}), '(5 * pseudo_alg_time)\n', (804, 825), True, 'import numpy as np\n'), ((1807, 1830), 'numpy.sin', 'np.sin', (['pseudo_alg_time'], {}), '(pseudo_alg_time)\n', (1813, 1830), True, 'import numpy as np\n'), ((2994, 3021), 'numpy.linspace', 'np.linspace', (['(-0.2)', '(1.2)', '(100)'], {}), '(-0.2, 1.2, 100)\n', (3005, 3021), True, 'import numpy as np\n'), ((3056, 3083), 'numpy.linspace', 'np.linspace', (['(-0.2)', '(1.2)', '(100)'], {}), '(-0.2, 1.2, 100)\n', (3067, 3083), True, 'import numpy as np\n'), ((3286, 3315), 'numpy.sin', 'np.sin', (['knot_demonstrate_time'], {}), '(knot_demonstrate_time)\n', (3292, 3315), True, 'import numpy as np\n'), ((3318, 3351), 'numpy.sin', 'np.sin', (['(5 * knot_demonstrate_time)'], {}), '(5 * knot_demonstrate_time)\n', (3324, 3351), True, 'import numpy as np\n'), ((4529, 4552), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', '(101)'], {}), '(-2, 2, 101)\n', (4540, 4552), True, 'import numpy as np\n'), ((4662, 4685), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', '(101)'], {}), '(-2, 2, 101)\n', (4673, 4685), True, 'import numpy as np\n'), ((4763, 4786), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', '(101)'], {}), '(-2, 2, 101)\n', (4774, 4786), True, 'import numpy as np\n'), ((5139, 5168), 'numpy.sin', 'np.sin', (['knot_demonstrate_time'], {}), '(knot_demonstrate_time)\n', (5145, 5168), True, 'import numpy as np\n'), ((5171, 5204), 'numpy.sin', 'np.sin', (['(5 * knot_demonstrate_time)'], {}), '(5 * knot_demonstrate_time)\n', (5177, 5204), True, 'import numpy as np\n'), ((6445, 6468), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', '(101)'], {}), '(-2, 2, 101)\n', (6456, 6468), True, 'import numpy as np\n'), ((6570, 6593), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', '(101)'], {}), '(-2, 2, 101)\n', (6581, 6593), True, 'import numpy as np\n'), ((6663, 6686), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', '(101)'], {}), '(-2, 2, 101)\n', (6674, 6686), True, 'import numpy as np\n'), ((7017, 7046), 'numpy.sin', 'np.sin', (['knot_demonstrate_time'], {}), '(knot_demonstrate_time)\n', (7023, 7046), True, 'import numpy as np\n'), ((7049, 7082), 'numpy.sin', 'np.sin', (['(5 * knot_demonstrate_time)'], {}), '(5 * knot_demonstrate_time)\n', (7055, 7082), True, 'import numpy as np\n'), ((8309, 8332), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', '(101)'], {}), '(-2, 2, 101)\n', (8320, 8332), True, 'import numpy as np\n'), ((8437, 8460), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', '(101)'], {}), '(-2, 2, 101)\n', (8448, 8460), True, 'import numpy as np\n'), ((8533, 8556), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', '(101)'], {}), '(-2, 2, 101)\n', (8544, 8556), True, 'import numpy as np\n'), ((9378, 9400), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {}), '(0, 1)\n', (9394, 9400), True, 'import numpy as np\n'), ((10482, 10526), 'numpy.linspace', 'np.linspace', (['(0.85 * np.pi)', '(1.15 * np.pi)', '(101)'], {}), '(0.85 * np.pi, 1.15 * np.pi, 101)\n', (10493, 10526), True, 'import numpy as np\n'), ((10630, 10674), 'numpy.linspace', 'np.linspace', (['(0.85 * np.pi)', '(1.15 * np.pi)', '(101)'], {}), '(0.85 * np.pi, 1.15 * np.pi, 101)\n', (10641, 10674), True, 'import numpy as np\n'), ((10752, 10775), 'numpy.linspace', 'np.linspace', (['(-3)', '(3)', '(101)'], {}), '(-3, 3, 101)\n', (10763, 10775), True, 'import numpy as np\n'), ((10835, 10858), 'numpy.linspace', 'np.linspace', (['(-3)', '(3)', '(101)'], {}), '(-3, 3, 101)\n', (10846, 10858), True, 'import numpy as np\n'), ((13644, 13688), 'numpy.linspace', 'np.linspace', (['(0.85 * np.pi)', '(1.15 * np.pi)', '(101)'], {}), '(0.85 * np.pi, 1.15 * np.pi, 101)\n', (13655, 13688), True, 'import numpy as np\n'), ((13792, 13836), 'numpy.linspace', 'np.linspace', (['(0.85 * np.pi)', '(1.15 * np.pi)', '(101)'], {}), '(0.85 * np.pi, 1.15 * np.pi, 101)\n', (13803, 13836), True, 'import numpy as np\n'), ((13914, 13937), 'numpy.linspace', 'np.linspace', (['(-3)', '(3)', '(101)'], {}), '(-3, 3, 101)\n', (13925, 13937), True, 'import numpy as np\n'), ((13997, 14020), 'numpy.linspace', 'np.linspace', (['(-3)', '(3)', '(101)'], {}), '(-3, 3, 101)\n', (14008, 14020), True, 'import numpy as np\n'), ((15795, 15822), 'numpy.linspace', 'np.linspace', (['(-0.2)', '(0.8)', '(100)'], {}), '(-0.2, 0.8, 100)\n', (15806, 15822), True, 'import numpy as np\n'), ((15860, 15887), 'numpy.linspace', 'np.linspace', (['(-0.2)', '(0.8)', '(100)'], {}), '(-0.2, 0.8, 100)\n', (15871, 15887), True, 'import numpy as np\n'), ((16263, 16290), 'numpy.linspace', 'np.linspace', (['(-0.2)', '(1.2)', '(100)'], {}), '(-0.2, 1.2, 100)\n', (16274, 16290), True, 'import numpy as np\n'), ((16328, 16355), 'numpy.linspace', 'np.linspace', (['(-0.2)', '(1.2)', '(100)'], {}), '(-0.2, 1.2, 100)\n', (16339, 16355), True, 'import numpy as np\n'), ((16622, 16634), 'numpy.cos', 'np.cos', (['time'], {}), '(time)\n', (16628, 16634), True, 'import numpy as np\n'), ((16637, 16653), 'numpy.cos', 'np.cos', (['(5 * time)'], {}), '(5 * time)\n', (16643, 16653), True, 'import numpy as np\n'), ((17029, 17056), 'numpy.ones_like', 'np.ones_like', (['max_dash_time'], {}), '(max_dash_time)\n', (17041, 17056), True, 'import numpy as np\n'), ((17160, 17187), 'numpy.ones_like', 'np.ones_like', (['min_dash_time'], {}), '(min_dash_time)\n', (17172, 17187), True, 'import numpy as np\n'), ((17517, 17552), 'numpy.ones_like', 'np.ones_like', (['max_discard_dash_time'], {}), '(max_discard_dash_time)\n', (17529, 17552), True, 'import numpy as np\n'), ((18741, 18770), 'numpy.ones_like', 'np.ones_like', (['length_distance'], {}), '(length_distance)\n', (18753, 18770), True, 'import numpy as np\n'), ((18880, 18905), 'numpy.ones_like', 'np.ones_like', (['length_time'], {}), '(length_time)\n', (18892, 18905), True, 'import numpy as np\n'), ((18937, 18962), 'numpy.ones_like', 'np.ones_like', (['length_time'], {}), '(length_time)\n', (18949, 18962), True, 'import numpy as np\n'), ((19089, 19120), 'numpy.ones_like', 'np.ones_like', (['length_distance_2'], {}), '(length_distance_2)\n', (19101, 19120), True, 'import numpy as np\n'), ((19237, 19264), 'numpy.ones_like', 'np.ones_like', (['length_time_2'], {}), '(length_time_2)\n', (19249, 19264), True, 'import numpy as np\n'), ((19298, 19325), 'numpy.ones_like', 'np.ones_like', (['length_time_2'], {}), '(length_time_2)\n', (19310, 19325), True, 'import numpy as np\n'), ((19365, 19377), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (19372, 19377), True, 'import numpy as np\n'), ((19412, 19424), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (19419, 19424), True, 'import numpy as np\n'), ((19561, 19583), 'numpy.ones_like', 'np.ones_like', (['end_time'], {}), '(end_time)\n', (19573, 19583), True, 'import numpy as np\n'), ((19698, 19731), 'numpy.ones_like', 'np.ones_like', (['anti_symmetric_time'], {}), '(anti_symmetric_time)\n', (19710, 19731), True, 'import numpy as np\n'), ((22012, 22024), 'numpy.cos', 'np.cos', (['time'], {}), '(time)\n', (22018, 22024), True, 'import numpy as np\n'), ((22027, 22043), 'numpy.cos', 'np.cos', (['(5 * time)'], {}), '(5 * time)\n', (22033, 22043), True, 'import numpy as np\n'), ((22497, 22521), 'numpy.ones_like', 'np.ones_like', (['max_dash_1'], {}), '(max_dash_1)\n', (22509, 22521), True, 'import numpy as np\n'), ((22555, 22579), 'numpy.ones_like', 'np.ones_like', (['max_dash_1'], {}), '(max_dash_1)\n', (22567, 22579), True, 'import numpy as np\n'), ((22761, 22785), 'numpy.ones_like', 'np.ones_like', (['min_dash_1'], {}), '(min_dash_1)\n', (22773, 22785), True, 'import numpy as np\n'), ((22819, 22843), 'numpy.ones_like', 'np.ones_like', (['min_dash_1'], {}), '(min_dash_1)\n', (22831, 22843), True, 'import numpy as np\n'), ((23342, 23366), 'numpy.ones_like', 'np.ones_like', (['max_dash_1'], {}), '(max_dash_1)\n', (23354, 23366), True, 'import numpy as np\n'), ((23878, 23902), 'numpy.ones_like', 'np.ones_like', (['min_dash_1'], {}), '(min_dash_1)\n', (23890, 23902), True, 'import numpy as np\n'), ((24227, 24252), 'numpy.ones_like', 'np.ones_like', (['maxima_dash'], {}), '(maxima_dash)\n', (24239, 24252), True, 'import numpy as np\n'), ((24289, 24314), 'numpy.ones_like', 'np.ones_like', (['maxima_dash'], {}), '(maxima_dash)\n', (24301, 24314), True, 'import numpy as np\n'), ((24363, 24388), 'numpy.ones_like', 'np.ones_like', (['maxima_dash'], {}), '(maxima_dash)\n', (24375, 24388), True, 'import numpy as np\n'), ((24495, 24530), 'numpy.ones_like', 'np.ones_like', (['maxima_line_dash_time'], {}), '(maxima_line_dash_time)\n', (24507, 24530), True, 'import numpy as np\n'), ((24627, 24652), 'numpy.ones_like', 'np.ones_like', (['minima_dash'], {}), '(minima_dash)\n', (24639, 24652), True, 'import numpy as np\n'), ((24689, 24714), 'numpy.ones_like', 'np.ones_like', (['minima_dash'], {}), '(minima_dash)\n', (24701, 24714), True, 'import numpy as np\n'), ((24763, 24788), 'numpy.ones_like', 'np.ones_like', (['minima_dash'], {}), '(minima_dash)\n', (24775, 24788), True, 'import numpy as np\n'), ((24896, 24931), 'numpy.ones_like', 'np.ones_like', (['minima_line_dash_time'], {}), '(minima_line_dash_time)\n', (24908, 24931), True, 'import numpy as np\n'), ((25757, 25781), 'numpy.ones_like', 'np.ones_like', (['min_dash_4'], {}), '(min_dash_4)\n', (25769, 25781), True, 'import numpy as np\n'), ((29062, 29074), 'numpy.cos', 'np.cos', (['time'], {}), '(time)\n', (29068, 29074), True, 'import numpy as np\n'), ((29077, 29093), 'numpy.cos', 'np.cos', (['(5 * time)'], {}), '(5 * time)\n', (29083, 29093), True, 'import numpy as np\n'), ((29372, 29407), 'numpy.abs', 'np.abs', (['(maxima_y[-2] - minima_y[-2])'], {}), '(maxima_y[-2] - minima_y[-2])\n', (29378, 29407), True, 'import numpy as np\n'), ((29417, 29452), 'numpy.abs', 'np.abs', (['(maxima_y[-1] - minima_y[-1])'], {}), '(maxima_y[-1] - minima_y[-1])\n', (29423, 29452), True, 'import numpy as np\n'), ((29466, 29501), 'numpy.abs', 'np.abs', (['(maxima_x[-2] - minima_x[-2])'], {}), '(maxima_x[-2] - minima_x[-2])\n', (29472, 29501), True, 'import numpy as np\n'), ((29511, 29546), 'numpy.abs', 'np.abs', (['(maxima_x[-1] - minima_x[-1])'], {}), '(maxima_x[-1] - minima_x[-1])\n', (29517, 29546), True, 'import numpy as np\n'), ((29806, 29871), 'numpy.cos', 'np.cos', (['(2 * np.pi * (1 / P1) * (Coughlin_time - Coughlin_time[0]))'], {}), '(2 * np.pi * (1 / P1) * (Coughlin_time - Coughlin_time[0]))\n', (29812, 29871), True, 'import numpy as np\n'), ((31033, 31059), 'numpy.ones_like', 'np.ones_like', (['max_2_x_time'], {}), '(max_2_x_time)\n', (31045, 31059), True, 'import numpy as np\n'), ((31241, 31267), 'numpy.ones_like', 'np.ones_like', (['min_2_x_time'], {}), '(min_2_x_time)\n', (31253, 31267), True, 'import numpy as np\n'), ((31371, 31401), 'numpy.ones_like', 'np.ones_like', (['dash_max_min_2_x'], {}), '(dash_max_min_2_x)\n', (31383, 31401), True, 'import numpy as np\n'), ((31564, 31585), 'numpy.ones_like', 'np.ones_like', (['max_2_y'], {}), '(max_2_y)\n', (31576, 31585), True, 'import numpy as np\n'), ((31748, 31769), 'numpy.ones_like', 'np.ones_like', (['min_2_y'], {}), '(min_2_y)\n', (31760, 31769), True, 'import numpy as np\n'), ((31866, 31901), 'numpy.ones_like', 'np.ones_like', (['dash_max_min_2_y_time'], {}), '(dash_max_min_2_y_time)\n', (31878, 31901), True, 'import numpy as np\n'), ((32083, 32109), 'numpy.ones_like', 'np.ones_like', (['max_1_x_time'], {}), '(max_1_x_time)\n', (32095, 32109), True, 'import numpy as np\n'), ((32291, 32317), 'numpy.ones_like', 'np.ones_like', (['min_1_x_time'], {}), '(min_1_x_time)\n', (32303, 32317), True, 'import numpy as np\n'), ((32421, 32451), 'numpy.ones_like', 'np.ones_like', (['dash_max_min_1_x'], {}), '(dash_max_min_1_x)\n', (32433, 32451), True, 'import numpy as np\n'), ((32614, 32635), 'numpy.ones_like', 'np.ones_like', (['max_1_y'], {}), '(max_1_y)\n', (32626, 32635), True, 'import numpy as np\n'), ((32798, 32819), 'numpy.ones_like', 'np.ones_like', (['min_1_y'], {}), '(min_1_y)\n', (32810, 32819), True, 'import numpy as np\n'), ((32916, 32951), 'numpy.ones_like', 'np.ones_like', (['dash_max_min_1_y_time'], {}), '(dash_max_min_1_y_time)\n', (32928, 32951), True, 'import numpy as np\n'), ((36151, 36163), 'numpy.cos', 'np.cos', (['time'], {}), '(time)\n', (36157, 36163), True, 'import numpy as np\n'), ((36166, 36182), 'numpy.cos', 'np.cos', (['(5 * time)'], {}), '(5 * time)\n', (36172, 36182), True, 'import numpy as np\n'), ((36283, 36311), 'numpy.zeros_like', 'np.zeros_like', (['time_extended'], {}), '(time_extended)\n', (36296, 36311), True, 'import numpy as np\n'), ((36783, 36808), 'numpy.ones', 'np.ones', (['neural_network_k'], {}), '(neural_network_k)\n', (36790, 36808), True, 'import numpy as np\n'), ((36943, 36974), 'numpy.matmul', 'np.matmul', (['weights', 'train_input'], {}), '(weights, train_input)\n', (36952, 36974), True, 'import numpy as np\n'), ((37097, 37123), 'numpy.mean', 'np.mean', (['gradients'], {'axis': '(1)'}), '(gradients, axis=1)\n', (37104, 37123), True, 'import numpy as np\n'), ((39331, 39364), 'cvxpy.norm', 'cvx.norm', (['(2 * (vx * P) + 1 - t)', '(2)'], {}), '(2 * (vx * P) + 1 - t, 2)\n', (39339, 39364), True, 'import cvxpy as cvx\n'), ((42240, 42269), 'numpy.linspace', 'np.linspace', (['(-2.75)', '(2.75)', '(100)'], {}), '(-2.75, 2.75, 100)\n', (42251, 42269), True, 'import numpy as np\n'), ((42346, 42435), 'numpy.linspace', 'np.linspace', (['((time[-302] + time[-301]) / 2)', '((time[-302] + time[-301]) / 2 + 0.1)', '(100)'], {}), '((time[-302] + time[-301]) / 2, (time[-302] + time[-301]) / 2 + \n 0.1, 100)\n', (42357, 42435), True, 'import numpy as np\n'), ((42483, 42572), 'numpy.linspace', 'np.linspace', (['((time[-302] + time[-301]) / 2)', '((time[-302] + time[-301]) / 2 + 0.1)', '(100)'], {}), '((time[-302] + time[-301]) / 2, (time[-302] + time[-301]) / 2 + \n 0.1, 100)\n', (42494, 42572), True, 'import numpy as np\n'), ((42619, 42748), 'numpy.linspace', 'np.linspace', (['((time_extended[-1001] + time_extended[-1002]) / 2)', '((time_extended[-1001] + time_extended[-1002]) / 2 - 0.1)', '(100)'], {}), '((time_extended[-1001] + time_extended[-1002]) / 2, (\n time_extended[-1001] + time_extended[-1002]) / 2 - 0.1, 100)\n', (42630, 42748), True, 'import numpy as np\n'), ((42808, 42937), 'numpy.linspace', 'np.linspace', (['((time_extended[-1001] + time_extended[-1002]) / 2)', '((time_extended[-1001] + time_extended[-1002]) / 2 - 0.1)', '(100)'], {}), '((time_extended[-1001] + time_extended[-1002]) / 2, (\n time_extended[-1001] + time_extended[-1002]) / 2 - 0.1, 100)\n', (42819, 42937), True, 'import numpy as np\n'), ((43064, 43093), 'numpy.linspace', 'np.linspace', (['(-2.75)', '(2.75)', '(100)'], {}), '(-2.75, 2.75, 100)\n', (43075, 43093), True, 'import numpy as np\n'), ((43160, 43189), 'numpy.linspace', 'np.linspace', (['(-2.75)', '(2.75)', '(100)'], {}), '(-2.75, 2.75, 100)\n', (43171, 43189), True, 'import numpy as np\n'), ((43290, 43379), 'numpy.linspace', 'np.linspace', (['((time[-202] + time[-201]) / 2)', '((time[-202] + time[-201]) / 2 + 0.1)', '(100)'], {}), '((time[-202] + time[-201]) / 2, (time[-202] + time[-201]) / 2 + \n 0.1, 100)\n', (43301, 43379), True, 'import numpy as np\n'), ((43430, 43519), 'numpy.linspace', 'np.linspace', (['((time[-202] + time[-201]) / 2)', '((time[-202] + time[-201]) / 2 + 0.1)', '(100)'], {}), '((time[-202] + time[-201]) / 2, (time[-202] + time[-201]) / 2 + \n 0.1, 100)\n', (43441, 43519), True, 'import numpy as np\n'), ((43569, 43698), 'numpy.linspace', 'np.linspace', (['((time_extended[-1001] + time_extended[-1000]) / 2)', '((time_extended[-1001] + time_extended[-1000]) / 2 - 0.1)', '(100)'], {}), '((time_extended[-1001] + time_extended[-1000]) / 2, (\n time_extended[-1001] + time_extended[-1000]) / 2 - 0.1, 100)\n', (43580, 43698), True, 'import numpy as np\n'), ((43761, 43890), 'numpy.linspace', 'np.linspace', (['((time_extended[-1001] + time_extended[-1000]) / 2)', '((time_extended[-1001] + time_extended[-1000]) / 2 - 0.1)', '(100)'], {}), '((time_extended[-1001] + time_extended[-1000]) / 2, (\n time_extended[-1001] + time_extended[-1000]) / 2 - 0.1, 100)\n', (43772, 43890), True, 'import numpy as np\n'), ((44020, 44049), 'numpy.linspace', 'np.linspace', (['(-2.75)', '(2.75)', '(100)'], {}), '(-2.75, 2.75, 100)\n', (44031, 44049), True, 'import numpy as np\n'), ((44623, 44639), 'numpy.cos', 'np.cos', (['(8 * time)'], {}), '(8 * time)\n', (44629, 44639), True, 'import numpy as np\n'), ((45509, 45594), 'textwrap.fill', 'textwrap.fill', (['"""Comparison of Trends Extracted with Different Knot Sequences"""', '(40)'], {}), "('Comparison of Trends Extracted with Different Knot Sequences',\n 40)\n", (45522, 45594), False, 'import textwrap\n'), ((46081, 46104), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)', '(101)'], {}), '(-5, 5, 101)\n', (46092, 46104), True, 'import numpy as np\n'), ((46282, 46326), 'numpy.linspace', 'np.linspace', (['(0.95 * np.pi)', '(1.55 * np.pi)', '(101)'], {}), '(0.95 * np.pi, 1.55 * np.pi, 101)\n', (46293, 46326), True, 'import numpy as np\n'), ((46367, 46411), 'numpy.linspace', 'np.linspace', (['(0.95 * np.pi)', '(1.55 * np.pi)', '(101)'], {}), '(0.95 * np.pi, 1.55 * np.pi, 101)\n', (46378, 46411), True, 'import numpy as np\n'), ((46482, 46509), 'numpy.linspace', 'np.linspace', (['(-5.5)', '(5.5)', '(101)'], {}), '(-5.5, 5.5, 101)\n', (46493, 46509), True, 'import numpy as np\n'), ((46559, 46586), 'numpy.linspace', 'np.linspace', (['(-5.5)', '(5.5)', '(101)'], {}), '(-5.5, 5.5, 101)\n', (46570, 46586), True, 'import numpy as np\n'), ((47335, 47358), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)', '(101)'], {}), '(-5, 5, 101)\n', (47346, 47358), True, 'import numpy as np\n'), ((47719, 47763), 'numpy.linspace', 'np.linspace', (['(0.95 * np.pi)', '(1.55 * np.pi)', '(101)'], {}), '(0.95 * np.pi, 1.55 * np.pi, 101)\n', (47730, 47763), True, 'import numpy as np\n'), ((47804, 47848), 'numpy.linspace', 'np.linspace', (['(0.95 * np.pi)', '(1.55 * np.pi)', '(101)'], {}), '(0.95 * np.pi, 1.55 * np.pi, 101)\n', (47815, 47848), True, 'import numpy as np\n'), ((47919, 47946), 'numpy.linspace', 'np.linspace', (['(-5.5)', '(5.5)', '(101)'], {}), '(-5.5, 5.5, 101)\n', (47930, 47946), True, 'import numpy as np\n'), ((47996, 48023), 'numpy.linspace', 'np.linspace', (['(-5.5)', '(5.5)', '(101)'], {}), '(-5.5, 5.5, 101)\n', (48007, 48023), True, 'import numpy as np\n'), ((48529, 48552), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)', '(101)'], {}), '(-5, 5, 101)\n', (48540, 48552), True, 'import numpy as np\n'), ((48950, 48994), 'numpy.linspace', 'np.linspace', (['(0.95 * np.pi)', '(1.55 * np.pi)', '(101)'], {}), '(0.95 * np.pi, 1.55 * np.pi, 101)\n', (48961, 48994), True, 'import numpy as np\n'), ((49035, 49079), 'numpy.linspace', 'np.linspace', (['(0.95 * np.pi)', '(1.55 * np.pi)', '(101)'], {}), '(0.95 * np.pi, 1.55 * np.pi, 101)\n', (49046, 49079), True, 'import numpy as np\n'), ((49150, 49177), 'numpy.linspace', 'np.linspace', (['(-5.5)', '(5.5)', '(101)'], {}), '(-5.5, 5.5, 101)\n', (49161, 49177), True, 'import numpy as np\n'), ((49227, 49254), 'numpy.linspace', 'np.linspace', (['(-5.5)', '(5.5)', '(101)'], {}), '(-5.5, 5.5, 101)\n', (49238, 49254), True, 'import numpy as np\n'), ((49403, 49508), 'textwrap.fill', 'textwrap.fill', (['"""Comparison of Trends Extracted with Different Knot Sequences Zoomed Region"""', '(40)'], {}), "(\n 'Comparison of Trends Extracted with Different Knot Sequences Zoomed Region'\n , 40)\n", (49416, 49508), False, 'import textwrap\n'), ((49863, 49886), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)', '(101)'], {}), '(-5, 5, 101)\n', (49874, 49886), True, 'import numpy as np\n'), ((50730, 50753), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)', '(101)'], {}), '(-5, 5, 101)\n', (50741, 50753), True, 'import numpy as np\n'), ((51555, 51578), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)', '(101)'], {}), '(-5, 5, 101)\n', (51566, 51578), True, 'import numpy as np\n'), ((52303, 52417), 'textwrap.fill', 'textwrap.fill', (['"""Gaussian Filtered Hilbert Spectrum of Simple Sinusoidal Time Seres with Added Noise"""', '(50)'], {}), "(\n 'Gaussian Filtered Hilbert Spectrum of Simple Sinusoidal Time Seres with Added Noise'\n , 50)\n", (52316, 52417), False, 'import textwrap\n'), ((52489, 52498), 'numpy.abs', 'np.abs', (['z'], {}), '(z)\n', (52495, 52498), True, 'import numpy as np\n'), ((53307, 53319), 'numpy.cos', 'np.cos', (['time'], {}), '(time)\n', (53313, 53319), True, 'import numpy as np\n'), ((53322, 53338), 'numpy.cos', 'np.cos', (['(5 * time)'], {}), '(5 * time)\n', (53328, 53338), True, 'import numpy as np\n'), ((54128, 54257), 'textwrap.fill', 'textwrap.fill', (['"""Plot Demonstrating Unsmoothed Extrema Envelopes if Schoenberg–Whitney Conditions are Not Satisfied"""', '(50)'], {}), "(\n 'Plot Demonstrating Unsmoothed Extrema Envelopes if Schoenberg–Whitney Conditions are Not Satisfied'\n , 50)\n", (54141, 54257), False, 'import textwrap\n'), ((55025, 55053), 'numpy.linspace', 'np.linspace', (['(-3.0)', '(-2.0)', '(101)'], {}), '(-3.0, -2.0, 101)\n', (55036, 55053), True, 'import numpy as np\n'), ((55733, 55745), 'numpy.cos', 'np.cos', (['time'], {}), '(time)\n', (55739, 55745), True, 'import numpy as np\n'), ((55748, 55764), 'numpy.cos', 'np.cos', (['(5 * time)'], {}), '(5 * time)\n', (55754, 55764), True, 'import numpy as np\n'), ((62125, 62137), 'numpy.cos', 'np.cos', (['time'], {}), '(time)\n', (62131, 62137), True, 'import numpy as np\n'), ((64309, 64413), 'textwrap.fill', 'textwrap.fill', (['"""Gaussian Filtered Hilbert Spectrum of Duffing Equation using PyEMD 0.2.10"""', '(40)'], {}), "(\n 'Gaussian Filtered Hilbert Spectrum of Duffing Equation using PyEMD 0.2.10'\n , 40)\n", (64322, 64413), False, 'import textwrap\n'), ((65501, 65601), 'textwrap.fill', 'textwrap.fill', (['"""Gaussian Filtered Hilbert Spectrum of Duffing Equation using emd 0.3.3"""', '(40)'], {}), "(\n 'Gaussian Filtered Hilbert Spectrum of Duffing Equation using emd 0.3.3',\n 40)\n", (65514, 65601), False, 'import textwrap\n'), ((68251, 68351), 'textwrap.fill', 'textwrap.fill', (['"""Gaussian Filtered Hilbert Spectrum of Duffing Equation using AdvEMDpy"""', '(40)'], {}), "(\n 'Gaussian Filtered Hilbert Spectrum of Duffing Equation using AdvEMDpy', 40\n )\n", (68264, 68351), False, 'import textwrap\n'), ((68561, 68570), 'numpy.abs', 'np.abs', (['z'], {}), '(z)\n', (68567, 68570), True, 'import numpy as np\n'), ((69341, 69428), 'textwrap.fill', 'textwrap.fill', (['"""Mean Monthly Concentration of Carbon Dioxide in the Atmosphere"""', '(35)'], {}), "('Mean Monthly Concentration of Carbon Dioxide in the Atmosphere',\n 35)\n", (69354, 69428), False, 'import textwrap\n'), ((70246, 70356), 'textwrap.fill', 'textwrap.fill', (['"""Gaussian Filtered Hilbert Spectrum of CO$_{2}$ Concentration using PyEMD 0.2.10"""', '(45)'], {}), "(\n 'Gaussian Filtered Hilbert Spectrum of CO$_{2}$ Concentration using PyEMD 0.2.10'\n , 45)\n", (70259, 70356), False, 'import textwrap\n'), ((70528, 70546), 'numpy.ones_like', 'np.ones_like', (['time'], {}), '(time)\n', (70540, 70546), True, 'import numpy as np\n'), ((71354, 71461), 'textwrap.fill', 'textwrap.fill', (['"""Gaussian Filtered Hilbert Spectrum of CO$_{2}$ Concentration using emd 0.3.3"""', '(45)'], {}), "(\n 'Gaussian Filtered Hilbert Spectrum of CO$_{2}$ Concentration using emd 0.3.3'\n , 45)\n", (71367, 71461), False, 'import textwrap\n'), ((71633, 71651), 'numpy.ones_like', 'np.ones_like', (['time'], {}), '(time)\n', (71645, 71651), True, 'import numpy as np\n'), ((73550, 73559), 'numpy.abs', 'np.abs', (['z'], {}), '(z)\n', (73556, 73559), True, 'import numpy as np\n'), ((73619, 73725), 'textwrap.fill', 'textwrap.fill', (['"""Gaussian Filtered Hilbert Spectrum of CO$_{2}$ Concentration using AdvEMDpy"""', '(40)'], {}), "(\n 'Gaussian Filtered Hilbert Spectrum of CO$_{2}$ Concentration using AdvEMDpy'\n , 40)\n", (73632, 73725), False, 'import textwrap\n'), ((73804, 73828), 'numpy.ones_like', 'np.ones_like', (['x_hs[0, :]'], {}), '(x_hs[0, :])\n', (73816, 73828), True, 'import numpy as np\n'), ((983, 992), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (990, 992), True, 'import matplotlib.pyplot as plt\n'), ((1382, 1405), 'numpy.sin', 'np.sin', (['pseudo_alg_time'], {}), '(pseudo_alg_time)\n', (1388, 1405), True, 'import numpy as np\n'), ((1694, 1717), 'numpy.sin', 'np.sin', (['pseudo_alg_time'], {}), '(pseudo_alg_time)\n', (1700, 1717), True, 'import numpy as np\n'), ((2980, 2992), 'numpy.ones', 'np.ones', (['(100)'], {}), '(100)\n', (2987, 2992), True, 'import numpy as np\n'), ((3042, 3054), 'numpy.ones', 'np.ones', (['(100)'], {}), '(100)\n', (3049, 3054), True, 'import numpy as np\n'), ((3650, 3659), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (3657, 3659), True, 'import matplotlib.pyplot as plt\n'), ((4515, 4527), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (4522, 4527), True, 'import numpy as np\n'), ((4648, 4660), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (4655, 4660), True, 'import numpy as np\n'), ((4749, 4761), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (4756, 4761), True, 'import numpy as np\n'), ((5535, 5544), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (5542, 5544), True, 'import matplotlib.pyplot as plt\n'), ((6431, 6443), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (6438, 6443), True, 'import numpy as np\n'), ((6556, 6568), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (6563, 6568), True, 'import numpy as np\n'), ((6649, 6661), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (6656, 6661), True, 'import numpy as np\n'), ((7413, 7422), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (7420, 7422), True, 'import matplotlib.pyplot as plt\n'), ((8295, 8307), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (8302, 8307), True, 'import numpy as np\n'), ((8423, 8435), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (8430, 8435), True, 'import numpy as np\n'), ((8519, 8531), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (8526, 8531), True, 'import numpy as np\n'), ((8894, 8903), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (8901, 8903), True, 'import matplotlib.pyplot as plt\n'), ((8935, 8944), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (8942, 8944), True, 'import matplotlib.pyplot as plt\n'), ((9004, 9013), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (9011, 9013), True, 'import matplotlib.pyplot as plt\n'), ((9639, 9681), 'textwrap.fill', 'textwrap.fill', (['"""Noiseless time series"""', '(12)'], {}), "('Noiseless time series', 12)\n", (9652, 9681), False, 'import textwrap\n'), ((9766, 9798), 'textwrap.fill', 'textwrap.fill', (['"""Mean filter"""', '(12)'], {}), "('Mean filter', 12)\n", (9779, 9798), False, 'import textwrap\n'), ((9885, 9919), 'textwrap.fill', 'textwrap.fill', (['"""Median filter"""', '(13)'], {}), "('Median filter', 13)\n", (9898, 9919), False, 'import textwrap\n'), ((10009, 10047), 'textwrap.fill', 'textwrap.fill', (['"""Windsorize filter"""', '(12)'], {}), "('Windsorize filter', 12)\n", (10022, 10047), False, 'import textwrap\n'), ((10161, 10213), 'textwrap.fill', 'textwrap.fill', (['"""Windsorize interpolation filter"""', '(14)'], {}), "('Windsorize interpolation filter', 14)\n", (10174, 10213), False, 'import textwrap\n'), ((10332, 10368), 'textwrap.fill', 'textwrap.fill', (['"""Quantile window"""', '(12)'], {}), "('Quantile window', 12)\n", (10345, 10368), False, 'import textwrap\n'), ((10533, 10545), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (10540, 10545), True, 'import numpy as np\n'), ((10582, 10616), 'textwrap.fill', 'textwrap.fill', (['"""Zoomed region"""', '(10)'], {}), "('Zoomed region', 10)\n", (10595, 10616), False, 'import textwrap\n'), ((10680, 10692), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (10687, 10692), True, 'import numpy as np\n'), ((10738, 10750), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (10745, 10750), True, 'import numpy as np\n'), ((10821, 10833), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (10828, 10833), True, 'import numpy as np\n'), ((11163, 11205), 'textwrap.fill', 'textwrap.fill', (['"""Noiseless time series"""', '(12)'], {}), "('Noiseless time series', 12)\n", (11176, 11205), False, 'import textwrap\n'), ((11290, 11322), 'textwrap.fill', 'textwrap.fill', (['"""Mean filter"""', '(12)'], {}), "('Mean filter', 12)\n", (11303, 11322), False, 'import textwrap\n'), ((11409, 11443), 'textwrap.fill', 'textwrap.fill', (['"""Median filter"""', '(13)'], {}), "('Median filter', 13)\n", (11422, 11443), False, 'import textwrap\n'), ((11533, 11571), 'textwrap.fill', 'textwrap.fill', (['"""Windsorize filter"""', '(12)'], {}), "('Windsorize filter', 12)\n", (11546, 11571), False, 'import textwrap\n'), ((11685, 11737), 'textwrap.fill', 'textwrap.fill', (['"""Windsorize interpolation filter"""', '(14)'], {}), "('Windsorize interpolation filter', 14)\n", (11698, 11737), False, 'import textwrap\n'), ((11856, 11892), 'textwrap.fill', 'textwrap.fill', (['"""Quantile window"""', '(12)'], {}), "('Quantile window', 12)\n", (11869, 11892), False, 'import textwrap\n'), ((12615, 12624), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (12622, 12624), True, 'import matplotlib.pyplot as plt\n'), ((12656, 12665), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (12663, 12665), True, 'import matplotlib.pyplot as plt\n'), ((12725, 12734), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (12732, 12734), True, 'import matplotlib.pyplot as plt\n'), ((13009, 13051), 'textwrap.fill', 'textwrap.fill', (['"""Noiseless time series"""', '(12)'], {}), "('Noiseless time series', 12)\n", (13022, 13051), False, 'import textwrap\n'), ((13120, 13167), 'textwrap.fill', 'textwrap.fill', (['"""Hodrick-Prescott smoothing"""', '(12)'], {}), "('Hodrick-Prescott smoothing', 12)\n", (13133, 13167), False, 'import textwrap\n'), ((13244, 13294), 'textwrap.fill', 'textwrap.fill', (['"""Henderson-Whittaker smoothing"""', '(13)'], {}), "('Henderson-Whittaker smoothing', 13)\n", (13257, 13294), False, 'import textwrap\n'), ((13438, 13482), 'textwrap.fill', 'textwrap.fill', (['"""Downsampled & decimated"""', '(11)'], {}), "('Downsampled & decimated', 11)\n", (13451, 13482), False, 'import textwrap\n'), ((13598, 13630), 'textwrap.fill', 'textwrap.fill', (['"""Downsampled"""', '(13)'], {}), "('Downsampled', 13)\n", (13611, 13630), False, 'import textwrap\n'), ((13695, 13707), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (13702, 13707), True, 'import numpy as np\n'), ((13744, 13778), 'textwrap.fill', 'textwrap.fill', (['"""Zoomed region"""', '(10)'], {}), "('Zoomed region', 10)\n", (13757, 13778), False, 'import textwrap\n'), ((13842, 13854), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (13849, 13854), True, 'import numpy as np\n'), ((13900, 13912), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (13907, 13912), True, 'import numpy as np\n'), ((13983, 13995), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (13990, 13995), True, 'import numpy as np\n'), ((14336, 14378), 'textwrap.fill', 'textwrap.fill', (['"""Noiseless time series"""', '(12)'], {}), "('Noiseless time series', 12)\n", (14349, 14378), False, 'import textwrap\n'), ((14447, 14494), 'textwrap.fill', 'textwrap.fill', (['"""Hodrick-Prescott smoothing"""', '(12)'], {}), "('Hodrick-Prescott smoothing', 12)\n", (14460, 14494), False, 'import textwrap\n'), ((14571, 14621), 'textwrap.fill', 'textwrap.fill', (['"""Henderson-Whittaker smoothing"""', '(13)'], {}), "('Henderson-Whittaker smoothing', 13)\n", (14584, 14621), False, 'import textwrap\n'), ((14713, 14757), 'textwrap.fill', 'textwrap.fill', (['"""Downsampled & decimated"""', '(13)'], {}), "('Downsampled & decimated', 13)\n", (14726, 14757), False, 'import textwrap\n'), ((14821, 14853), 'textwrap.fill', 'textwrap.fill', (['"""Downsampled"""', '(13)'], {}), "('Downsampled', 13)\n", (14834, 14853), False, 'import textwrap\n'), ((15781, 15793), 'numpy.ones', 'np.ones', (['(100)'], {}), '(100)\n', (15788, 15793), True, 'import numpy as np\n'), ((15846, 15858), 'numpy.ones', 'np.ones', (['(100)'], {}), '(100)\n', (15853, 15858), True, 'import numpy as np\n'), ((16249, 16261), 'numpy.ones', 'np.ones', (['(100)'], {}), '(100)\n', (16256, 16261), True, 'import numpy as np\n'), ((16314, 16326), 'numpy.ones', 'np.ones', (['(100)'], {}), '(100)\n', (16321, 16326), True, 'import numpy as np\n'), ((19755, 19764), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (19762, 19764), True, 'import matplotlib.pyplot as plt\n'), ((19964, 20001), 'textwrap.fill', 'textwrap.fill', (['"""Symmetric signal"""', '(10)'], {}), "('Symmetric signal', 10)\n", (19977, 20001), False, 'import textwrap\n'), ((20108, 20150), 'textwrap.fill', 'textwrap.fill', (['"""Anti-symmetric signal"""', '(10)'], {}), "('Anti-symmetric signal', 10)\n", (20121, 20150), False, 'import textwrap\n'), ((20823, 20860), 'textwrap.fill', 'textwrap.fill', (['"""Axes of symmetry"""', '(10)'], {}), "('Axes of symmetry', 10)\n", (20836, 20860), False, 'import textwrap\n'), ((21149, 21194), 'textwrap.fill', 'textwrap.fill', (['"""Symmetric Discard maxima"""', '(10)'], {}), "('Symmetric Discard maxima', 10)\n", (21162, 21194), False, 'import textwrap\n'), ((21263, 21307), 'textwrap.fill', 'textwrap.fill', (['"""Symmetric Anchor maxima"""', '(10)'], {}), "('Symmetric Anchor maxima', 10)\n", (21276, 21307), False, 'import textwrap\n'), ((21385, 21427), 'textwrap.fill', 'textwrap.fill', (['"""Anti-Symmetric maxima"""', '(10)'], {}), "('Anti-Symmetric maxima', 10)\n", (21398, 21427), False, 'import textwrap\n'), ((21502, 21539), 'textwrap.fill', 'textwrap.fill', (['"""Symmetric maxima"""', '(10)'], {}), "('Symmetric maxima', 10)\n", (21515, 21539), False, 'import textwrap\n'), ((26015, 26024), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (26022, 26024), True, 'import matplotlib.pyplot as plt\n'), ((26056, 26065), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (26063, 26065), True, 'import matplotlib.pyplot as plt\n'), ((26125, 26134), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (26132, 26134), True, 'import matplotlib.pyplot as plt\n'), ((28058, 28098), 'textwrap.fill', 'textwrap.fill', (['"""Slope-based maximum"""', '(11)'], {}), "('Slope-based maximum', 11)\n", (28071, 28098), False, 'import textwrap\n'), ((28199, 28239), 'textwrap.fill', 'textwrap.fill', (['"""Slope-based minimum"""', '(11)'], {}), "('Slope-based minimum', 11)\n", (28212, 28239), False, 'import textwrap\n'), ((28360, 28409), 'textwrap.fill', 'textwrap.fill', (['"""Improved slope-based maximum"""', '(11)'], {}), "('Improved slope-based maximum', 11)\n", (28373, 28409), False, 'import textwrap\n'), ((28532, 28581), 'textwrap.fill', 'textwrap.fill', (['"""Improved slope-based minimum"""', '(11)'], {}), "('Improved slope-based minimum', 11)\n", (28545, 28581), False, 'import textwrap\n'), ((32975, 32984), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (32982, 32984), True, 'import matplotlib.pyplot as plt\n'), ((33188, 33222), 'textwrap.fill', 'textwrap.fill', (['"""Huang maximum"""', '(10)'], {}), "('Huang maximum', 10)\n", (33201, 33222), False, 'import textwrap\n'), ((33289, 33323), 'textwrap.fill', 'textwrap.fill', (['"""Huang minimum"""', '(10)'], {}), "('Huang minimum', 10)\n", (33302, 33323), False, 'import textwrap\n'), ((33414, 33451), 'textwrap.fill', 'textwrap.fill', (['"""Coughlin maximum"""', '(14)'], {}), "('Coughlin maximum', 14)\n", (33427, 33451), False, 'import textwrap\n'), ((33542, 33579), 'textwrap.fill', 'textwrap.fill', (['"""Coughlin minimum"""', '(14)'], {}), "('Coughlin minimum', 14)\n", (33555, 33579), False, 'import textwrap\n'), ((33667, 33703), 'textwrap.fill', 'textwrap.fill', (['"""Average maximum"""', '(14)'], {}), "('Average maximum', 14)\n", (33680, 33703), False, 'import textwrap\n'), ((33786, 33822), 'textwrap.fill', 'textwrap.fill', (['"""Average minimum"""', '(14)'], {}), "('Average minimum', 14)\n", (33799, 33822), False, 'import textwrap\n'), ((34015, 34061), 'textwrap.fill', 'textwrap.fill', (['"""Huang Characteristic Wave"""', '(14)'], {}), "('Huang Characteristic Wave', 14)\n", (34028, 34061), False, 'import textwrap\n'), ((34129, 34178), 'textwrap.fill', 'textwrap.fill', (['"""Coughlin Characteristic Wave"""', '(14)'], {}), "('Coughlin Characteristic Wave', 14)\n", (34142, 34178), False, 'import textwrap\n'), ((35679, 35705), 'numpy.cos', 'np.cos', (['(2 * np.pi * t / 50)'], {}), '(2 * np.pi * t / 50)\n', (35685, 35705), True, 'import numpy as np\n'), ((35749, 35776), 'numpy.sin', 'np.sin', (['(2 * np.pi * t / 200)'], {}), '(2 * np.pi * t / 200)\n', (35755, 35776), True, 'import numpy as np\n'), ((41520, 41529), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (41527, 41529), True, 'import matplotlib.pyplot as plt\n'), ((41731, 41771), 'textwrap.fill', 'textwrap.fill', (['"""Extrapolated signal"""', '(12)'], {}), "('Extrapolated signal', 12)\n", (41744, 41771), False, 'import textwrap\n'), ((42003, 42043), 'textwrap.fill', 'textwrap.fill', (['"""Extrapolated maxima"""', '(12)'], {}), "('Extrapolated maxima', 12)\n", (42016, 42043), False, 'import textwrap\n'), ((42140, 42180), 'textwrap.fill', 'textwrap.fill', (['"""Extrapolated minima"""', '(12)'], {}), "('Extrapolated minima', 12)\n", (42153, 42180), False, 'import textwrap\n'), ((42226, 42238), 'numpy.ones', 'np.ones', (['(100)'], {}), '(100)\n', (42233, 42238), True, 'import numpy as np\n'), ((42293, 42335), 'textwrap.fill', 'textwrap.fill', (['"""Neural network inputs"""', '(13)'], {}), "('Neural network inputs', 13)\n", (42306, 42335), False, 'import textwrap\n'), ((42453, 42465), 'numpy.ones', 'np.ones', (['(100)'], {}), '(100)\n', (42460, 42465), True, 'import numpy as np\n'), ((42589, 42601), 'numpy.ones', 'np.ones', (['(100)'], {}), '(100)\n', (42596, 42601), True, 'import numpy as np\n'), ((42778, 42790), 'numpy.ones', 'np.ones', (['(100)'], {}), '(100)\n', (42785, 42790), True, 'import numpy as np\n'), ((42966, 42978), 'numpy.ones', 'np.ones', (['(100)'], {}), '(100)\n', (42973, 42978), True, 'import numpy as np\n'), ((43050, 43062), 'numpy.ones', 'np.ones', (['(100)'], {}), '(100)\n', (43057, 43062), True, 'import numpy as np\n'), ((43146, 43158), 'numpy.ones', 'np.ones', (['(100)'], {}), '(100)\n', (43153, 43158), True, 'import numpy as np\n'), ((43236, 43279), 'textwrap.fill', 'textwrap.fill', (['"""Neural network targets"""', '(13)'], {}), "('Neural network targets', 13)\n", (43249, 43279), False, 'import textwrap\n'), ((43397, 43409), 'numpy.ones', 'np.ones', (['(100)'], {}), '(100)\n', (43404, 43409), True, 'import numpy as np\n'), ((43536, 43548), 'numpy.ones', 'np.ones', (['(100)'], {}), '(100)\n', (43543, 43548), True, 'import numpy as np\n'), ((43728, 43740), 'numpy.ones', 'np.ones', (['(100)'], {}), '(100)\n', (43735, 43740), True, 'import numpy as np\n'), ((43919, 43931), 'numpy.ones', 'np.ones', (['(100)'], {}), '(100)\n', (43926, 43931), True, 'import numpy as np\n'), ((44006, 44018), 'numpy.ones', 'np.ones', (['(100)'], {}), '(100)\n', (44013, 44018), True, 'import numpy as np\n'), ((44585, 44601), 'numpy.cos', 'np.cos', (['(2 * time)'], {}), '(2 * time)\n', (44591, 44601), True, 'import numpy as np\n'), ((44604, 44620), 'numpy.cos', 'np.cos', (['(4 * time)'], {}), '(4 * time)\n', (44610, 44620), True, 'import numpy as np\n'), ((45747, 45810), 'textwrap.fill', 'textwrap.fill', (['"""Sum of IMF 1, IMF 2, & IMF 3 with 51 knots"""', '(21)'], {}), "('Sum of IMF 1, IMF 2, & IMF 3 with 51 knots', 21)\n", (45760, 45810), False, 'import textwrap\n'), ((45997, 46020), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)', '(101)'], {}), '(-5, 5, 101)\n', (46008, 46020), True, 'import numpy as np\n'), ((46067, 46079), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (46074, 46079), True, 'import numpy as np\n'), ((46334, 46346), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (46341, 46346), True, 'import numpy as np\n'), ((46420, 46432), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (46427, 46432), True, 'import numpy as np\n'), ((46468, 46480), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (46475, 46480), True, 'import numpy as np\n'), ((46545, 46557), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (46552, 46557), True, 'import numpy as np\n'), ((46909, 46966), 'textwrap.fill', 'textwrap.fill', (['"""Sum of IMF 1 and IMF 2 with 31 knots"""', '(19)'], {}), "('Sum of IMF 1 and IMF 2 with 31 knots', 19)\n", (46922, 46966), False, 'import textwrap\n'), ((47023, 47080), 'textwrap.fill', 'textwrap.fill', (['"""Sum of IMF 2 and IMF 3 with 51 knots"""', '(19)'], {}), "('Sum of IMF 2 and IMF 3 with 51 knots', 19)\n", (47036, 47080), False, 'import textwrap\n'), ((47251, 47274), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)', '(101)'], {}), '(-5, 5, 101)\n', (47262, 47274), True, 'import numpy as np\n'), ((47321, 47333), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (47328, 47333), True, 'import numpy as np\n'), ((47771, 47783), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (47778, 47783), True, 'import numpy as np\n'), ((47857, 47869), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (47864, 47869), True, 'import numpy as np\n'), ((47905, 47917), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (47912, 47917), True, 'import numpy as np\n'), ((47982, 47994), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (47989, 47994), True, 'import numpy as np\n'), ((48445, 48468), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)', '(101)'], {}), '(-5, 5, 101)\n', (48456, 48468), True, 'import numpy as np\n'), ((48515, 48527), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (48522, 48527), True, 'import numpy as np\n'), ((49002, 49014), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (49009, 49014), True, 'import numpy as np\n'), ((49088, 49100), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (49095, 49100), True, 'import numpy as np\n'), ((49136, 49148), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (49143, 49148), True, 'import numpy as np\n'), ((49213, 49225), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (49220, 49225), True, 'import numpy as np\n'), ((49655, 49718), 'textwrap.fill', 'textwrap.fill', (['"""Sum of IMF 1, IMF 2, & IMF 3 with 51 knots"""', '(21)'], {}), "('Sum of IMF 1, IMF 2, & IMF 3 with 51 knots', 21)\n", (49668, 49718), False, 'import textwrap\n'), ((49779, 49802), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)', '(101)'], {}), '(-5, 5, 101)\n', (49790, 49802), True, 'import numpy as np\n'), ((49849, 49861), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (49856, 49861), True, 'import numpy as np\n'), ((50414, 50471), 'textwrap.fill', 'textwrap.fill', (['"""Sum of IMF 1 and IMF 2 with 31 knots"""', '(19)'], {}), "('Sum of IMF 1 and IMF 2 with 31 knots', 19)\n", (50427, 50471), False, 'import textwrap\n'), ((50528, 50585), 'textwrap.fill', 'textwrap.fill', (['"""Sum of IMF 2 and IMF 3 with 51 knots"""', '(19)'], {}), "('Sum of IMF 2 and IMF 3 with 51 knots', 19)\n", (50541, 50585), False, 'import textwrap\n'), ((50646, 50669), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)', '(101)'], {}), '(-5, 5, 101)\n', (50657, 50669), True, 'import numpy as np\n'), ((50716, 50728), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (50723, 50728), True, 'import numpy as np\n'), ((51471, 51494), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)', '(101)'], {}), '(-5, 5, 101)\n', (51482, 51494), True, 'import numpy as np\n'), ((51541, 51553), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (51548, 51553), True, 'import numpy as np\n'), ((52183, 52192), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (52190, 52192), True, 'import matplotlib.pyplot as plt\n'), ((52224, 52233), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (52231, 52233), True, 'import matplotlib.pyplot as plt\n'), ((52569, 52593), 'numpy.ones_like', 'np.ones_like', (['x_hs[0, :]'], {}), '(x_hs[0, :])\n', (52581, 52593), True, 'import numpy as np\n'), ((52661, 52685), 'numpy.ones_like', 'np.ones_like', (['x_hs[0, :]'], {}), '(x_hs[0, :])\n', (52673, 52685), True, 'import numpy as np\n'), ((52753, 52777), 'numpy.ones_like', 'np.ones_like', (['x_hs[0, :]'], {}), '(x_hs[0, :])\n', (52765, 52777), True, 'import numpy as np\n'), ((54079, 54088), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (54086, 54088), True, 'import matplotlib.pyplot as plt\n'), ((54523, 54570), 'textwrap.fill', 'textwrap.fill', (['"""Unsmoothed maxima envelope"""', '(10)'], {}), "('Unsmoothed maxima envelope', 10)\n", (54536, 54570), False, 'import textwrap\n'), ((54626, 54671), 'textwrap.fill', 'textwrap.fill', (['"""Smoothed maxima envelope"""', '(10)'], {}), "('Smoothed maxima envelope', 10)\n", (54639, 54671), False, 'import textwrap\n'), ((54722, 54769), 'textwrap.fill', 'textwrap.fill', (['"""Unsmoothed minima envelope"""', '(10)'], {}), "('Unsmoothed minima envelope', 10)\n", (54735, 54769), False, 'import textwrap\n'), ((54819, 54864), 'textwrap.fill', 'textwrap.fill', (['"""Smoothed minima envelope"""', '(10)'], {}), "('Smoothed minima envelope', 10)\n", (54832, 54864), False, 'import textwrap\n'), ((54934, 54962), 'numpy.linspace', 'np.linspace', (['(-3.0)', '(-2.0)', '(101)'], {}), '(-3.0, -2.0, 101)\n', (54945, 54962), True, 'import numpy as np\n'), ((55011, 55023), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (55018, 55023), True, 'import numpy as np\n'), ((60553, 60562), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (60560, 60562), True, 'import matplotlib.pyplot as plt\n'), ((60941, 60976), 'textwrap.fill', 'textwrap.fill', (['"""Optimal maxima"""', '(10)'], {}), "('Optimal maxima', 10)\n", (60954, 60976), False, 'import textwrap\n'), ((61083, 61118), 'textwrap.fill', 'textwrap.fill', (['"""Optimal minima"""', '(10)'], {}), "('Optimal minima', 10)\n", (61096, 61118), False, 'import textwrap\n'), ((61189, 61227), 'textwrap.fill', 'textwrap.fill', (['"""Inflection points"""', '(10)'], {}), "('Inflection points', 10)\n", (61202, 61227), False, 'import textwrap\n'), ((61281, 61314), 'textwrap.fill', 'textwrap.fill', (['"""EMD envelope"""', '(10)'], {}), "('EMD envelope', 10)\n", (61294, 61314), False, 'import textwrap\n'), ((61490, 61524), 'textwrap.fill', 'textwrap.fill', (['"""SEMD envelope"""', '(10)'], {}), "('SEMD envelope', 10)\n", (61503, 61524), False, 'import textwrap\n'), ((61719, 61753), 'textwrap.fill', 'textwrap.fill', (['"""EEMD envelope"""', '(10)'], {}), "('EEMD envelope', 10)\n", (61732, 61753), False, 'import textwrap\n'), ((61953, 61999), 'textwrap.fill', 'textwrap.fill', (['"""Inflection point envelope"""', '(10)'], {}), "('Inflection point envelope', 10)\n", (61966, 61999), False, 'import textwrap\n'), ((62062, 62108), 'textwrap.fill', 'textwrap.fill', (['"""Binomial average envelope"""', '(10)'], {}), "('Binomial average envelope', 10)\n", (62075, 62108), False, 'import textwrap\n'), ((64189, 64198), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (64196, 64198), True, 'import matplotlib.pyplot as plt\n'), ((64230, 64239), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (64237, 64239), True, 'import matplotlib.pyplot as plt\n'), ((64527, 64547), 'numpy.ones_like', 'np.ones_like', (['t[:-1]'], {}), '(t[:-1])\n', (64539, 64547), True, 'import numpy as np\n'), ((64561, 64617), 'textwrap.fill', 'textwrap.fill', (['"""Hamiltonian frequency approximation"""', '(15)'], {}), "('Hamiltonian frequency approximation', 15)\n", (64574, 64617), False, 'import textwrap\n'), ((64643, 64663), 'numpy.ones_like', 'np.ones_like', (['t[:-1]'], {}), '(t[:-1])\n', (64655, 64663), True, 'import numpy as np\n'), ((64678, 64725), 'textwrap.fill', 'textwrap.fill', (['"""Driving function frequency"""', '(15)'], {}), "('Driving function frequency', 15)\n", (64691, 64725), False, 'import textwrap\n'), ((65381, 65390), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (65388, 65390), True, 'import matplotlib.pyplot as plt\n'), ((65422, 65431), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (65429, 65431), True, 'import matplotlib.pyplot as plt\n'), ((65716, 65736), 'numpy.ones_like', 'np.ones_like', (['t[:-1]'], {}), '(t[:-1])\n', (65728, 65736), True, 'import numpy as np\n'), ((65750, 65806), 'textwrap.fill', 'textwrap.fill', (['"""Hamiltonian frequency approximation"""', '(15)'], {}), "('Hamiltonian frequency approximation', 15)\n", (65763, 65806), False, 'import textwrap\n'), ((65832, 65852), 'numpy.ones_like', 'np.ones_like', (['t[:-1]'], {}), '(t[:-1])\n', (65844, 65852), True, 'import numpy as np\n'), ((65867, 65914), 'textwrap.fill', 'textwrap.fill', (['"""Driving function frequency"""', '(15)'], {}), "('Driving function frequency', 15)\n", (65880, 65914), False, 'import textwrap\n'), ((66532, 66541), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (66539, 66541), True, 'import matplotlib.pyplot as plt\n'), ((66573, 66582), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (66580, 66582), True, 'import matplotlib.pyplot as plt\n'), ((67415, 67443), 'numpy.cos', 'np.cos', (['(0.04 * 2 * np.pi * t)'], {}), '(0.04 * 2 * np.pi * t)\n', (67421, 67443), True, 'import numpy as np\n'), ((68431, 68440), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (68438, 68440), True, 'import matplotlib.pyplot as plt\n'), ((68472, 68481), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (68479, 68481), True, 'import matplotlib.pyplot as plt\n'), ((68642, 68662), 'numpy.ones_like', 'np.ones_like', (['t[:-1]'], {}), '(t[:-1])\n', (68654, 68662), True, 'import numpy as np\n'), ((68676, 68732), 'textwrap.fill', 'textwrap.fill', (['"""Hamiltonian frequency approximation"""', '(15)'], {}), "('Hamiltonian frequency approximation', 15)\n", (68689, 68732), False, 'import textwrap\n'), ((68758, 68778), 'numpy.ones_like', 'np.ones_like', (['t[:-1]'], {}), '(t[:-1])\n', (68770, 68778), True, 'import numpy as np\n'), ((68793, 68840), 'textwrap.fill', 'textwrap.fill', (['"""Driving function frequency"""', '(15)'], {}), "('Driving function frequency', 15)\n", (68806, 68840), False, 'import textwrap\n'), ((70126, 70135), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (70133, 70135), True, 'import matplotlib.pyplot as plt\n'), ((70167, 70176), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (70174, 70176), True, 'import matplotlib.pyplot as plt\n'), ((70561, 70594), 'textwrap.fill', 'textwrap.fill', (['"""Annual cycle"""', '(10)'], {}), "('Annual cycle', 10)\n", (70574, 70594), False, 'import textwrap\n'), ((71234, 71243), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (71241, 71243), True, 'import matplotlib.pyplot as plt\n'), ((71275, 71284), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (71282, 71284), True, 'import matplotlib.pyplot as plt\n'), ((71666, 71699), 'textwrap.fill', 'textwrap.fill', (['"""Annual cycle"""', '(10)'], {}), "('Annual cycle', 10)\n", (71679, 71699), False, 'import textwrap\n'), ((72903, 72912), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (72910, 72912), True, 'import matplotlib.pyplot as plt\n'), ((73112, 73121), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (73119, 73121), True, 'import matplotlib.pyplot as plt\n'), ((73417, 73426), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (73424, 73426), True, 'import matplotlib.pyplot as plt\n'), ((73458, 73467), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (73465, 73467), True, 'import matplotlib.pyplot as plt\n'), ((73843, 73876), 'textwrap.fill', 'textwrap.fill', (['"""Annual cycle"""', '(10)'], {}), "('Annual cycle', 10)\n", (73856, 73876), False, 'import textwrap\n'), ((4934, 4957), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', '(101)'], {}), '(-2, 2, 101)\n', (4945, 4957), True, 'import numpy as np\n'), ((6818, 6841), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', '(101)'], {}), '(-2, 2, 101)\n', (6829, 6841), True, 'import numpy as np\n'), ((8694, 8717), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', '(101)'], {}), '(-2, 2, 101)\n', (8705, 8717), True, 'import numpy as np\n'), ((17828, 17884), 'numpy.linspace', 'np.linspace', (['((5 - 2.6 * a) * np.pi)', '((5 - a) * np.pi)', '(101)'], {}), '((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101)\n', (17839, 17884), True, 'import numpy as np\n'), ((35714, 35740), 'numpy.cos', 'np.cos', (['(2 * np.pi * t / 25)'], {}), '(2 * np.pi * t / 25)\n', (35720, 35740), True, 'import numpy as np\n'), ((37195, 37220), 'numpy.abs', 'np.abs', (['average_gradients'], {}), '(average_gradients)\n', (37201, 37220), True, 'import numpy as np\n'), ((45983, 45995), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (45990, 45995), True, 'import numpy as np\n'), ((47237, 47249), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (47244, 47249), True, 'import numpy as np\n'), ((48431, 48443), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (48438, 48443), True, 'import numpy as np\n'), ((49765, 49777), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (49772, 49777), True, 'import numpy as np\n'), ((50632, 50644), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (50639, 50644), True, 'import numpy as np\n'), ((51457, 51469), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (51464, 51469), True, 'import numpy as np\n'), ((52450, 52459), 'numpy.abs', 'np.abs', (['z'], {}), '(z)\n', (52456, 52459), True, 'import numpy as np\n'), ((54920, 54932), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (54927, 54932), True, 'import numpy as np\n'), ((68401, 68410), 'numpy.abs', 'np.abs', (['z'], {}), '(z)\n', (68407, 68410), True, 'import numpy as np\n'), ((73362, 73371), 'numpy.abs', 'np.abs', (['z'], {}), '(z)\n', (73368, 73371), True, 'import numpy as np\n'), ((4920, 4932), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (4927, 4932), True, 'import numpy as np\n'), ((6804, 6816), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (6811, 6816), True, 'import numpy as np\n'), ((8680, 8692), 'numpy.ones', 'np.ones', (['(101)'], {}), '(101)\n', (8687, 8692), True, 'import numpy as np\n'), ((17948, 18004), 'numpy.linspace', 'np.linspace', (['((5 - 2.6 * a) * np.pi)', '((5 - a) * np.pi)', '(101)'], {}), '((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101)\n', (17959, 18004), True, 'import numpy as np\n'), ((37228, 37253), 'numpy.abs', 'np.abs', (['average_gradients'], {}), '(average_gradients)\n', (37234, 37253), True, 'import numpy as np\n'), ((45861, 45930), 'numpy.var', 'np.var', (['(time_series - (imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :]))'], {}), '(time_series - (imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :]))\n', (45867, 45930), True, 'import numpy as np\n'), ((47131, 47184), 'numpy.var', 'np.var', (['(time_series - (imfs_31[1, :] + imfs_31[2, :]))'], {}), '(time_series - (imfs_31[1, :] + imfs_31[2, :]))\n', (47137, 47184), True, 'import numpy as np\n'), ((48343, 48378), 'numpy.var', 'np.var', (['(time_series - imfs_51[3, :])'], {}), '(time_series - imfs_51[3, :])\n', (48349, 48378), True, 'import numpy as np\n'), ((62888, 62906), 'numpy.cos', 'np.cos', (['(omega * ts)'], {}), '(omega * ts)\n', (62894, 62906), True, 'import numpy as np\n'), ((64487, 64498), 'numpy.abs', 'np.abs', (['hht'], {}), '(hht)\n', (64493, 64498), True, 'import numpy as np\n'), ((65676, 65687), 'numpy.abs', 'np.abs', (['hht'], {}), '(hht)\n', (65682, 65687), True, 'import numpy as np\n'), ((70498, 70509), 'numpy.abs', 'np.abs', (['hht'], {}), '(hht)\n', (70504, 70509), True, 'import numpy as np\n'), ((71603, 71614), 'numpy.abs', 'np.abs', (['hht'], {}), '(hht)\n', (71609, 71614), True, 'import numpy as np\n'), ((69898, 69920), 'numpy.ones_like', 'np.ones_like', (['IF[:, 0]'], {}), '(IF[:, 0])\n', (69910, 69920), True, 'import numpy as np\n'), ((72296, 72319), 'numpy.ones_like', 'np.ones_like', (['ifs[1, :]'], {}), '(ifs[1, :])\n', (72308, 72319), True, 'import numpy as np\n'), ((66996, 67024), 'numpy.cos', 'np.cos', (['(0.04 * 2 * np.pi * t)'], {}), '(0.04 * 2 * np.pi * t)\n', (67002, 67024), True, 'import numpy as np\n'), ((67170, 67198), 'numpy.cos', 'np.cos', (['(0.04 * 2 * np.pi * t)'], {}), '(0.04 * 2 * np.pi * t)\n', (67176, 67198), True, 'import numpy as np\n'), ((67339, 67367), 'numpy.cos', 'np.cos', (['(0.04 * 2 * np.pi * t)'], {}), '(0.04 * 2 * np.pi * t)\n', (67345, 67367), True, 'import numpy as np\n'), ((71009, 71025), 'numpy.ones_like', 'np.ones_like', (['IF'], {}), '(IF)\n', (71021, 71025), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# encoding: utf-8 -*-
"""
This module contains unit tests of the rmgpy.reaction module.
"""
import numpy
import unittest
from external.wip import work_in_progress
from rmgpy.species import Species, TransitionState
from rmgpy.reaction import Reaction
from rmgpy.statmech.translation import Translation, IdealGasTranslation
from rmgpy.statmech.rotation import Rotation, LinearRotor, NonlinearRotor, KRotor, SphericalTopRotor
from rmgpy.statmech.vibration import Vibration, HarmonicOscillator
from rmgpy.statmech.torsion import Torsion, HinderedRotor
from rmgpy.statmech.conformer import Conformer
from rmgpy.kinetics import Arrhenius
from rmgpy.thermo import Wilhoit
import rmgpy.constants as constants
################################################################################
class PseudoSpecies:
"""
Can be used in place of a :class:`rmg.species.Species` for isomorphism checks.
PseudoSpecies('a') is isomorphic with PseudoSpecies('A')
but nothing else.
"""
def __init__(self, label):
self.label = label
def __repr__(self):
return "PseudoSpecies('{0}')".format(self.label)
def __str__(self):
return self.label
def isIsomorphic(self, other):
return self.label.lower() == other.label.lower()
class TestReactionIsomorphism(unittest.TestCase):
"""
Contains unit tests of the isomorphism testing of the Reaction class.
"""
def makeReaction(self,reaction_string):
""""
Make a Reaction (containing PseudoSpecies) of from a string like 'Ab=CD'
"""
reactants, products = reaction_string.split('=')
reactants = [PseudoSpecies(i) for i in reactants]
products = [PseudoSpecies(i) for i in products]
return Reaction(reactants=reactants, products=products)
def test1to1(self):
r1 = self.makeReaction('A=B')
self.assertTrue(r1.isIsomorphic(self.makeReaction('a=B')))
self.assertTrue(r1.isIsomorphic(self.makeReaction('b=A')))
self.assertFalse(r1.isIsomorphic(self.makeReaction('B=a'),eitherDirection=False))
self.assertFalse(r1.isIsomorphic(self.makeReaction('A=C')))
self.assertFalse(r1.isIsomorphic(self.makeReaction('A=BB')))
def test1to2(self):
r1 = self.makeReaction('A=BC')
self.assertTrue(r1.isIsomorphic(self.makeReaction('a=Bc')))
self.assertTrue(r1.isIsomorphic(self.makeReaction('cb=a')))
self.assertTrue(r1.isIsomorphic(self.makeReaction('a=cb'),eitherDirection=False))
self.assertFalse(r1.isIsomorphic(self.makeReaction('bc=a'),eitherDirection=False))
self.assertFalse(r1.isIsomorphic(self.makeReaction('a=c')))
self.assertFalse(r1.isIsomorphic(self.makeReaction('ab=c')))
def test2to2(self):
r1 = self.makeReaction('AB=CD')
self.assertTrue(r1.isIsomorphic(self.makeReaction('ab=cd')))
self.assertTrue(r1.isIsomorphic(self.makeReaction('ab=dc'),eitherDirection=False))
self.assertTrue(r1.isIsomorphic(self.makeReaction('dc=ba')))
self.assertFalse(r1.isIsomorphic(self.makeReaction('cd=ab'),eitherDirection=False))
self.assertFalse(r1.isIsomorphic(self.makeReaction('ab=ab')))
self.assertFalse(r1.isIsomorphic(self.makeReaction('ab=cde')))
def test2to3(self):
r1 = self.makeReaction('AB=CDE')
self.assertTrue(r1.isIsomorphic(self.makeReaction('ab=cde')))
self.assertTrue(r1.isIsomorphic(self.makeReaction('ba=edc'),eitherDirection=False))
self.assertTrue(r1.isIsomorphic(self.makeReaction('dec=ba')))
self.assertFalse(r1.isIsomorphic(self.makeReaction('cde=ab'),eitherDirection=False))
self.assertFalse(r1.isIsomorphic(self.makeReaction('ab=abc')))
self.assertFalse(r1.isIsomorphic(self.makeReaction('abe=cde')))
class TestReaction(unittest.TestCase):
"""
Contains unit tests of the Reaction class.
"""
def setUp(self):
"""
A method that is called prior to each unit test in this class.
"""
ethylene = Species(
label = 'C2H4',
conformer = Conformer(
E0 = (44.7127, 'kJ/mol'),
modes = [
IdealGasTranslation(
mass = (28.0313, 'amu'),
),
NonlinearRotor(
inertia = (
[3.41526, 16.6498, 20.065],
'amu*angstrom^2',
),
symmetry = 4,
),
HarmonicOscillator(
frequencies = (
[828.397, 970.652, 977.223, 1052.93, 1233.55, 1367.56, 1465.09, 1672.25, 3098.46, 3111.7, 3165.79, 3193.54],
'cm^-1',
),
),
],
spinMultiplicity = 1,
opticalIsomers = 1,
),
)
hydrogen = Species(
label = 'H',
conformer = Conformer(
E0 = (211.794, 'kJ/mol'),
modes = [
IdealGasTranslation(
mass = (1.00783, 'amu'),
),
],
spinMultiplicity = 2,
opticalIsomers = 1,
),
)
ethyl = Species(
label = 'C2H5',
conformer = Conformer(
E0 = (111.603, 'kJ/mol'),
modes = [
IdealGasTranslation(
mass = (29.0391, 'amu'),
),
NonlinearRotor(
inertia = (
[4.8709, 22.2353, 23.9925],
'amu*angstrom^2',
),
symmetry = 1,
),
HarmonicOscillator(
frequencies = (
[482.224, 791.876, 974.355, 1051.48, 1183.21, 1361.36, 1448.65, 1455.07, 1465.48, 2688.22, 2954.51, 3033.39, 3101.54, 3204.73],
'cm^-1',
),
),
HinderedRotor(
inertia = (1.11481, 'amu*angstrom^2'),
symmetry = 6,
barrier = (0.244029, 'kJ/mol'),
semiclassical = None,
),
],
spinMultiplicity = 2,
opticalIsomers = 1,
),
)
TS = TransitionState(
label = 'TS',
conformer = Conformer(
E0 = (266.694, 'kJ/mol'),
modes = [
IdealGasTranslation(
mass = (29.0391, 'amu'),
),
NonlinearRotor(
inertia = (
[6.78512, 22.1437, 22.2114],
'amu*angstrom^2',
),
symmetry = 1,
),
HarmonicOscillator(
frequencies = (
[412.75, 415.206, 821.495, 924.44, 982.714, 1024.16, 1224.21, 1326.36, 1455.06, 1600.35, 3101.46, 3110.55, 3175.34, 3201.88],
'cm^-1',
),
),
],
spinMultiplicity = 2,
opticalIsomers = 1,
),
frequency = (-750.232, 'cm^-1'),
)
self.reaction = Reaction(
reactants = [hydrogen, ethylene],
products = [ethyl],
kinetics = Arrhenius(
A = (501366000.0, 'cm^3/(mol*s)'),
n = 1.637,
Ea = (4.32508, 'kJ/mol'),
T0 = (1, 'K'),
Tmin = (300, 'K'),
Tmax = (2500, 'K'),
),
transitionState = TS,
)
# CC(=O)O[O]
acetylperoxy = Species(
label='acetylperoxy',
thermo=Wilhoit(Cp0=(4.0*constants.R,"J/(mol*K)"), CpInf=(21.0*constants.R,"J/(mol*K)"), a0=-3.95, a1=9.26, a2=-15.6, a3=8.55, B=(500.0,"K"), H0=(-6.151e+04,"J/mol"), S0=(-790.2,"J/(mol*K)")),
)
# C[C]=O
acetyl = Species(
label='acetyl',
thermo=Wilhoit(Cp0=(4.0*constants.R,"J/(mol*K)"), CpInf=(15.5*constants.R,"J/(mol*K)"), a0=0.2541, a1=-0.4712, a2=-4.434, a3=2.25, B=(500.0,"K"), H0=(-1.439e+05,"J/mol"), S0=(-524.6,"J/(mol*K)")),
)
# [O][O]
oxygen = Species(
label='oxygen',
thermo=Wilhoit(Cp0=(3.5*constants.R,"J/(mol*K)"), CpInf=(4.5*constants.R,"J/(mol*K)"), a0=-0.9324, a1=26.18, a2=-70.47, a3=44.12, B=(500.0,"K"), H0=(1.453e+04,"J/mol"), S0=(-12.19,"J/(mol*K)")),
)
self.reaction2 = Reaction(
reactants=[acetyl, oxygen],
products=[acetylperoxy],
kinetics = Arrhenius(
A = (2.65e12, 'cm^3/(mol*s)'),
n = 0.0,
Ea = (0.0, 'kJ/mol'),
T0 = (1, 'K'),
Tmin = (300, 'K'),
Tmax = (2000, 'K'),
),
)
def testIsIsomerization(self):
"""
Test the Reaction.isIsomerization() method.
"""
isomerization = Reaction(reactants=[Species()], products=[Species()])
association = Reaction(reactants=[Species(),Species()], products=[Species()])
dissociation = Reaction(reactants=[Species()], products=[Species(),Species()])
bimolecular = Reaction(reactants=[Species(),Species()], products=[Species(),Species()])
self.assertTrue(isomerization.isIsomerization())
self.assertFalse(association.isIsomerization())
self.assertFalse(dissociation.isIsomerization())
self.assertFalse(bimolecular.isIsomerization())
def testIsAssociation(self):
"""
Test the Reaction.isAssociation() method.
"""
isomerization = Reaction(reactants=[Species()], products=[Species()])
association = Reaction(reactants=[Species(),Species()], products=[Species()])
dissociation = Reaction(reactants=[Species()], products=[Species(),Species()])
bimolecular = Reaction(reactants=[Species(),Species()], products=[Species(),Species()])
self.assertFalse(isomerization.isAssociation())
self.assertTrue(association.isAssociation())
self.assertFalse(dissociation.isAssociation())
self.assertFalse(bimolecular.isAssociation())
def testIsDissociation(self):
"""
Test the Reaction.isDissociation() method.
"""
isomerization = Reaction(reactants=[Species()], products=[Species()])
association = Reaction(reactants=[Species(),Species()], products=[Species()])
dissociation = Reaction(reactants=[Species()], products=[Species(),Species()])
bimolecular = Reaction(reactants=[Species(),Species()], products=[Species(),Species()])
self.assertFalse(isomerization.isDissociation())
self.assertFalse(association.isDissociation())
self.assertTrue(dissociation.isDissociation())
self.assertFalse(bimolecular.isDissociation())
def testHasTemplate(self):
"""
Test the Reaction.hasTemplate() method.
"""
reactants = self.reaction.reactants[:]
products = self.reaction.products[:]
self.assertTrue(self.reaction.hasTemplate(reactants, products))
self.assertTrue(self.reaction.hasTemplate(products, reactants))
self.assertFalse(self.reaction2.hasTemplate(reactants, products))
self.assertFalse(self.reaction2.hasTemplate(products, reactants))
reactants.reverse()
products.reverse()
self.assertTrue(self.reaction.hasTemplate(reactants, products))
self.assertTrue(self.reaction.hasTemplate(products, reactants))
self.assertFalse(self.reaction2.hasTemplate(reactants, products))
self.assertFalse(self.reaction2.hasTemplate(products, reactants))
reactants = self.reaction2.reactants[:]
products = self.reaction2.products[:]
self.assertFalse(self.reaction.hasTemplate(reactants, products))
self.assertFalse(self.reaction.hasTemplate(products, reactants))
self.assertTrue(self.reaction2.hasTemplate(reactants, products))
self.assertTrue(self.reaction2.hasTemplate(products, reactants))
reactants.reverse()
products.reverse()
self.assertFalse(self.reaction.hasTemplate(reactants, products))
self.assertFalse(self.reaction.hasTemplate(products, reactants))
self.assertTrue(self.reaction2.hasTemplate(reactants, products))
self.assertTrue(self.reaction2.hasTemplate(products, reactants))
def testEnthalpyOfReaction(self):
"""
Test the Reaction.getEnthalpyOfReaction() method.
"""
Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64)
Hlist0 = [float(v) for v in ['-146007', '-145886', '-144195', '-141973', '-139633', '-137341', '-135155', '-133093', '-131150', '-129316']]
Hlist = self.reaction2.getEnthalpiesOfReaction(Tlist)
for i in range(len(Tlist)):
self.assertAlmostEqual(Hlist[i] / 1000., Hlist0[i] / 1000., 2)
def testEntropyOfReaction(self):
"""
Test the Reaction.getEntropyOfReaction() method.
"""
Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64)
Slist0 = [float(v) for v in ['-156.793', '-156.872', '-153.504', '-150.317', '-147.707', '-145.616', '-143.93', '-142.552', '-141.407', '-140.441']]
Slist = self.reaction2.getEntropiesOfReaction(Tlist)
for i in range(len(Tlist)):
self.assertAlmostEqual(Slist[i], Slist0[i], 2)
def testFreeEnergyOfReaction(self):
"""
Test the Reaction.getFreeEnergyOfReaction() method.
"""
Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64)
Glist0 = [float(v) for v in ['-114648', '-83137.2', '-52092.4', '-21719.3', '8073.53', '37398.1', '66346.8', '94990.6', '123383', '151565']]
Glist = self.reaction2.getFreeEnergiesOfReaction(Tlist)
for i in range(len(Tlist)):
self.assertAlmostEqual(Glist[i] / 1000., Glist0[i] / 1000., 2)
def testEquilibriumConstantKa(self):
"""
Test the Reaction.getEquilibriumConstant() method.
"""
Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64)
Kalist0 = [float(v) for v in ['8.75951e+29', '7.1843e+10', '34272.7', '26.1877', '0.378696', '0.0235579', '0.00334673', '0.000792389', '0.000262777', '0.000110053']]
Kalist = self.reaction2.getEquilibriumConstants(Tlist, type='Ka')
for i in range(len(Tlist)):
self.assertAlmostEqual(Kalist[i] / Kalist0[i], 1.0, 4)
def testEquilibriumConstantKc(self):
"""
Test the Reaction.getEquilibriumConstant() method.
"""
Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64)
Kclist0 = [float(v) for v in ['1.45661e+28', '2.38935e+09', '1709.76', '1.74189', '0.0314866', '0.00235045', '0.000389568', '0.000105413', '3.93273e-05', '1.83006e-05']]
Kclist = self.reaction2.getEquilibriumConstants(Tlist, type='Kc')
for i in range(len(Tlist)):
self.assertAlmostEqual(Kclist[i] / Kclist0[i], 1.0, 4)
def testEquilibriumConstantKp(self):
"""
Test the Reaction.getEquilibriumConstant() method.
"""
Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64)
Kplist0 = [float(v) for v in ['8.75951e+24', '718430', '0.342727', '0.000261877', '3.78696e-06', '2.35579e-07', '3.34673e-08', '7.92389e-09', '2.62777e-09', '1.10053e-09']]
Kplist = self.reaction2.getEquilibriumConstants(Tlist, type='Kp')
for i in range(len(Tlist)):
self.assertAlmostEqual(Kplist[i] / Kplist0[i], 1.0, 4)
def testStoichiometricCoefficient(self):
"""
Test the Reaction.getStoichiometricCoefficient() method.
"""
for reactant in self.reaction.reactants:
self.assertEqual(self.reaction.getStoichiometricCoefficient(reactant), -1)
for product in self.reaction.products:
self.assertEqual(self.reaction.getStoichiometricCoefficient(product), 1)
for reactant in self.reaction2.reactants:
self.assertEqual(self.reaction.getStoichiometricCoefficient(reactant), 0)
for product in self.reaction2.products:
self.assertEqual(self.reaction.getStoichiometricCoefficient(product), 0)
def testRateCoefficient(self):
"""
Test the Reaction.getRateCoefficient() method.
"""
Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64)
P = 1e5
for T in Tlist:
self.assertAlmostEqual(self.reaction.getRateCoefficient(T, P) / self.reaction.kinetics.getRateCoefficient(T), 1.0, 6)
def testGenerateReverseRateCoefficient(self):
"""
Test the Reaction.generateReverseRateCoefficient() method.
"""
Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64)
P = 1e5
reverseKinetics = self.reaction2.generateReverseRateCoefficient()
for T in Tlist:
kr0 = self.reaction2.getRateCoefficient(T, P) / self.reaction2.getEquilibriumConstant(T)
kr = reverseKinetics.getRateCoefficient(T)
self.assertAlmostEqual(kr0 / kr, 1.0, 0)
def testGenerateReverseRateCoefficientArrhenius(self):
"""
Test the Reaction.generateReverseRateCoefficient() method works for the Arrhenius format.
"""
original_kinetics = Arrhenius(
A = (2.65e12, 'cm^3/(mol*s)'),
n = 0.0,
Ea = (0.0, 'kJ/mol'),
T0 = (1, 'K'),
Tmin = (300, 'K'),
Tmax = (2000, 'K'),
)
self.reaction2.kinetics = original_kinetics
reverseKinetics = self.reaction2.generateReverseRateCoefficient()
self.reaction2.kinetics = reverseKinetics
# reverse reactants, products to ensure Keq is correctly computed
self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants
reversereverseKinetics = self.reaction2.generateReverseRateCoefficient()
# check that reverting the reverse yields the original
Tlist = numpy.arange(original_kinetics.Tmin.value_si, original_kinetics.Tmax.value_si, 200.0, numpy.float64)
P = 1e5
for T in Tlist:
korig = original_kinetics.getRateCoefficient(T, P)
krevrev = reversereverseKinetics.getRateCoefficient(T, P)
self.assertAlmostEqual(korig / krevrev, 1.0, 0)
@work_in_progress
def testGenerateReverseRateCoefficientArrheniusEP(self):
"""
Test the Reaction.generateReverseRateCoefficient() method works for the ArrheniusEP format.
"""
from rmgpy.kinetics import ArrheniusEP
original_kinetics = ArrheniusEP(
A = (2.65e12, 'cm^3/(mol*s)'),
n = 0.0,
alpha = 0.5,
E0 = (41.84, 'kJ/mol'),
Tmin = (300, 'K'),
Tmax = (2000, 'K'),
)
self.reaction2.kinetics = original_kinetics
reverseKinetics = self.reaction2.generateReverseRateCoefficient()
self.reaction2.kinetics = reverseKinetics
# reverse reactants, products to ensure Keq is correctly computed
self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants
reversereverseKinetics = self.reaction2.generateReverseRateCoefficient()
# check that reverting the reverse yields the original
Tlist = numpy.arange(original_kinetics.Tmin, original_kinetics.Tmax, 200.0, numpy.float64)
P = 1e5
for T in Tlist:
korig = original_kinetics.getRateCoefficient(T, P)
krevrev = reversereverseKinetics.getRateCoefficient(T, P)
self.assertAlmostEqual(korig / krevrev, 1.0, 0)
def testGenerateReverseRateCoefficientPDepArrhenius(self):
"""
Test the Reaction.generateReverseRateCoefficient() method works for the PDepArrhenius format.
"""
from rmgpy.kinetics import PDepArrhenius
arrhenius0 = Arrhenius(
A = (1.0e6,"s^-1"),
n = 1.0,
Ea = (10.0,"kJ/mol"),
T0 = (300.0,"K"),
Tmin = (300.0,"K"),
Tmax = (2000.0,"K"),
comment = """This data is completely made up""",
)
arrhenius1 = Arrhenius(
A = (1.0e12,"s^-1"),
n = 1.0,
Ea = (20.0,"kJ/mol"),
T0 = (300.0,"K"),
Tmin = (300.0,"K"),
Tmax = (2000.0,"K"),
comment = """This data is completely made up""",
)
pressures = numpy.array([0.1, 10.0])
arrhenius = [arrhenius0, arrhenius1]
Tmin = 300.0
Tmax = 2000.0
Pmin = 0.1
Pmax = 10.0
comment = """This data is completely made up"""
original_kinetics = PDepArrhenius(
pressures = (pressures,"bar"),
arrhenius = arrhenius,
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
Pmin = (Pmin,"bar"),
Pmax = (Pmax,"bar"),
comment = comment,
)
self.reaction2.kinetics = original_kinetics
reverseKinetics = self.reaction2.generateReverseRateCoefficient()
self.reaction2.kinetics = reverseKinetics
# reverse reactants, products to ensure Keq is correctly computed
self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants
reversereverseKinetics = self.reaction2.generateReverseRateCoefficient()
# check that reverting the reverse yields the original
Tlist = numpy.arange(Tmin, Tmax, 200.0, numpy.float64)
P = 1e5
for T in Tlist:
korig = original_kinetics.getRateCoefficient(T, P)
krevrev = reversereverseKinetics.getRateCoefficient(T, P)
self.assertAlmostEqual(korig / krevrev, 1.0, 0)
def testGenerateReverseRateCoefficientMultiArrhenius(self):
"""
Test the Reaction.generateReverseRateCoefficient() method works for the MultiArrhenius format.
"""
from rmgpy.kinetics import MultiArrhenius
pressures = numpy.array([0.1, 10.0])
Tmin = 300.0
Tmax = 2000.0
Pmin = 0.1
Pmax = 10.0
comment = """This data is completely made up"""
arrhenius = [
Arrhenius(
A = (9.3e-14,"cm^3/(molecule*s)"),
n = 0.0,
Ea = (4740*constants.R*0.001,"kJ/mol"),
T0 = (1,"K"),
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
comment = comment,
),
Arrhenius(
A = (1.4e-9,"cm^3/(molecule*s)"),
n = 0.0,
Ea = (11200*constants.R*0.001,"kJ/mol"),
T0 = (1,"K"),
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
comment = comment,
),
]
original_kinetics = MultiArrhenius(
arrhenius = arrhenius,
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
comment = comment,
)
self.reaction2.kinetics = original_kinetics
reverseKinetics = self.reaction2.generateReverseRateCoefficient()
self.reaction2.kinetics = reverseKinetics
# reverse reactants, products to ensure Keq is correctly computed
self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants
reversereverseKinetics = self.reaction2.generateReverseRateCoefficient()
# check that reverting the reverse yields the original
Tlist = numpy.arange(Tmin, Tmax, 200.0, numpy.float64)
P = 1e5
for T in Tlist:
korig = original_kinetics.getRateCoefficient(T, P)
krevrev = reversereverseKinetics.getRateCoefficient(T, P)
self.assertAlmostEqual(korig / krevrev, 1.0, 0)
def testGenerateReverseRateCoefficientMultiPDepArrhenius(self):
"""
Test the Reaction.generateReverseRateCoefficient() method works for the MultiPDepArrhenius format.
"""
from rmgpy.kinetics import PDepArrhenius, MultiPDepArrhenius
Tmin = 350.
Tmax = 1500.
Pmin = 1e-1
Pmax = 1e1
pressures = numpy.array([1e-1,1e1])
comment = 'CH3 + C2H6 <=> CH4 + C2H5 (Baulch 2005)'
arrhenius = [
PDepArrhenius(
pressures = (pressures,"bar"),
arrhenius = [
Arrhenius(
A = (9.3e-16,"cm^3/(molecule*s)"),
n = 0.0,
Ea = (4740*constants.R*0.001,"kJ/mol"),
T0 = (1,"K"),
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
comment = comment,
),
Arrhenius(
A = (9.3e-14,"cm^3/(molecule*s)"),
n = 0.0,
Ea = (4740*constants.R*0.001,"kJ/mol"),
T0 = (1,"K"),
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
comment = comment,
),
],
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
Pmin = (Pmin,"bar"),
Pmax = (Pmax,"bar"),
comment = comment,
),
PDepArrhenius(
pressures = (pressures,"bar"),
arrhenius = [
Arrhenius(
A = (1.4e-11,"cm^3/(molecule*s)"),
n = 0.0,
Ea = (11200*constants.R*0.001,"kJ/mol"),
T0 = (1,"K"),
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
comment = comment,
),
Arrhenius(
A = (1.4e-9,"cm^3/(molecule*s)"),
n = 0.0,
Ea = (11200*constants.R*0.001,"kJ/mol"),
T0 = (1,"K"),
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
comment = comment,
),
],
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
Pmin = (Pmin,"bar"),
Pmax = (Pmax,"bar"),
comment = comment,
),
]
original_kinetics = MultiPDepArrhenius(
arrhenius = arrhenius,
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
Pmin = (Pmin,"bar"),
Pmax = (Pmax,"bar"),
comment = comment,
)
self.reaction2.kinetics = original_kinetics
reverseKinetics = self.reaction2.generateReverseRateCoefficient()
self.reaction2.kinetics = reverseKinetics
# reverse reactants, products to ensure Keq is correctly computed
self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants
reversereverseKinetics = self.reaction2.generateReverseRateCoefficient()
# check that reverting the reverse yields the original
Tlist = numpy.arange(Tmin, Tmax, 200.0, numpy.float64)
P = 1e5
for T in Tlist:
korig = original_kinetics.getRateCoefficient(T, P)
krevrev = reversereverseKinetics.getRateCoefficient(T, P)
self.assertAlmostEqual(korig / krevrev, 1.0, 0)
def testGenerateReverseRateCoefficientThirdBody(self):
"""
Test the Reaction.generateReverseRateCoefficient() method works for the ThirdBody format.
"""
from rmgpy.kinetics import ThirdBody
arrheniusLow = Arrhenius(
A = (2.62e+33,"cm^6/(mol^2*s)"),
n = -4.76,
Ea = (10.21,"kJ/mol"),
T0 = (1,"K"),
)
efficiencies = {"C": 3, "C(=O)=O": 2, "CC": 3, "O": 6, "[Ar]": 0.7, "[C]=O": 1.5, "[H][H]": 2}
Tmin = 300.
Tmax = 2000.
Pmin = 0.01
Pmax = 100.
comment = """H + CH3 -> CH4"""
thirdBody = ThirdBody(
arrheniusLow = arrheniusLow,
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
Pmin = (Pmin,"bar"),
Pmax = (Pmax,"bar"),
efficiencies = efficiencies,
comment = comment,
)
original_kinetics = thirdBody
self.reaction2.kinetics = original_kinetics
reverseKinetics = self.reaction2.generateReverseRateCoefficient()
self.reaction2.kinetics = reverseKinetics
# reverse reactants, products to ensure Keq is correctly computed
self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants
reversereverseKinetics = self.reaction2.generateReverseRateCoefficient()
# check that reverting the reverse yields the original
Tlist = numpy.arange(Tmin, Tmax, 200.0, numpy.float64)
P = 1e5
for T in Tlist:
korig = original_kinetics.getRateCoefficient(T, P)
krevrev = reversereverseKinetics.getRateCoefficient(T, P)
self.assertAlmostEqual(korig / krevrev, 1.0, 0)
def testGenerateReverseRateCoefficientLindemann(self):
"""
Test the Reaction.generateReverseRateCoefficient() method works for the Lindemann format.
"""
from rmgpy.kinetics import Lindemann
arrheniusHigh = Arrhenius(
A = (1.39e+16,"cm^3/(mol*s)"),
n = -0.534,
Ea = (2.243,"kJ/mol"),
T0 = (1,"K"),
)
arrheniusLow = Arrhenius(
A = (2.62e+33,"cm^6/(mol^2*s)"),
n = -4.76,
Ea = (10.21,"kJ/mol"),
T0 = (1,"K"),
)
efficiencies = {"C": 3, "C(=O)=O": 2, "CC": 3, "O": 6, "[Ar]": 0.7, "[C]=O": 1.5, "[H][H]": 2}
Tmin = 300.
Tmax = 2000.
Pmin = 0.01
Pmax = 100.
comment = """H + CH3 -> CH4"""
lindemann = Lindemann(
arrheniusHigh = arrheniusHigh,
arrheniusLow = arrheniusLow,
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
Pmin = (Pmin,"bar"),
Pmax = (Pmax,"bar"),
efficiencies = efficiencies,
comment = comment,
)
original_kinetics = lindemann
self.reaction2.kinetics = original_kinetics
reverseKinetics = self.reaction2.generateReverseRateCoefficient()
self.reaction2.kinetics = reverseKinetics
# reverse reactants, products to ensure Keq is correctly computed
self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants
reversereverseKinetics = self.reaction2.generateReverseRateCoefficient()
# check that reverting the reverse yields the original
Tlist = numpy.arange(Tmin, Tmax, 200.0, numpy.float64)
P = 1e5
for T in Tlist:
korig = original_kinetics.getRateCoefficient(T, P)
krevrev = reversereverseKinetics.getRateCoefficient(T, P)
self.assertAlmostEqual(korig / krevrev, 1.0, 0)
def testGenerateReverseRateCoefficientTroe(self):
"""
Test the Reaction.generateReverseRateCoefficient() method works for the Troe format.
"""
from rmgpy.kinetics import Troe
arrheniusHigh = Arrhenius(
A = (1.39e+16,"cm^3/(mol*s)"),
n = -0.534,
Ea = (2.243,"kJ/mol"),
T0 = (1,"K"),
)
arrheniusLow = Arrhenius(
A = (2.62e+33,"cm^6/(mol^2*s)"),
n = -4.76,
Ea = (10.21,"kJ/mol"),
T0 = (1,"K"),
)
alpha = 0.783
T3 = 74
T1 = 2941
T2 = 6964
efficiencies = {"C": 3, "C(=O)=O": 2, "CC": 3, "O": 6, "[Ar]": 0.7, "[C]=O": 1.5, "[H][H]": 2}
Tmin = 300.
Tmax = 2000.
Pmin = 0.01
Pmax = 100.
comment = """H + CH3 -> CH4"""
troe = Troe(
arrheniusHigh = arrheniusHigh,
arrheniusLow = arrheniusLow,
alpha = alpha,
T3 = (T3,"K"),
T1 = (T1,"K"),
T2 = (T2,"K"),
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
Pmin = (Pmin,"bar"),
Pmax = (Pmax,"bar"),
efficiencies = efficiencies,
comment = comment,
)
original_kinetics = troe
self.reaction2.kinetics = original_kinetics
reverseKinetics = self.reaction2.generateReverseRateCoefficient()
self.reaction2.kinetics = reverseKinetics
# reverse reactants, products to ensure Keq is correctly computed
self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants
reversereverseKinetics = self.reaction2.generateReverseRateCoefficient()
# check that reverting the reverse yields the original
Tlist = numpy.arange(Tmin, Tmax, 200.0, numpy.float64)
P = 1e5
for T in Tlist:
korig = original_kinetics.getRateCoefficient(T, P)
krevrev = reversereverseKinetics.getRateCoefficient(T, P)
self.assertAlmostEqual(korig / krevrev, 1.0, 0)
def testTSTCalculation(self):
"""
A test of the transition state theory k(T) calculation function,
using the reaction H + C2H4 -> C2H5.
"""
Tlist = 1000.0/numpy.arange(0.4, 3.35, 0.01)
klist = numpy.array([self.reaction.calculateTSTRateCoefficient(T) for T in Tlist])
arrhenius = Arrhenius().fitToData(Tlist, klist, kunits='m^3/(mol*s)')
klist2 = numpy.array([arrhenius.getRateCoefficient(T) for T in Tlist])
# Check that the correct Arrhenius parameters are returned
self.assertAlmostEqual(arrhenius.A.value_si, 2265.2488, delta=1e-2)
self.assertAlmostEqual(arrhenius.n.value_si, 1.45419, delta=1e-4)
self.assertAlmostEqual(arrhenius.Ea.value_si, 6645.24, delta=1e-2)
# Check that the fit is satisfactory (defined here as always within 5%)
for i in range(len(Tlist)):
self.assertAlmostEqual(klist[i], klist2[i], delta=5e-2 * klist[i])
def testPickle(self):
"""
Test that a Reaction object can be successfully pickled and
unpickled with no loss of information.
"""
import cPickle
reaction = cPickle.loads(cPickle.dumps(self.reaction,-1))
self.assertEqual(len(self.reaction.reactants), len(reaction.reactants))
self.assertEqual(len(self.reaction.products), len(reaction.products))
for reactant0, reactant in zip(self.reaction.reactants, reaction.reactants):
self.assertAlmostEqual(reactant0.conformer.E0.value_si / 1e6, reactant.conformer.E0.value_si / 1e6, 2)
self.assertEqual(reactant0.conformer.E0.units, reactant.conformer.E0.units)
for product0, product in zip(self.reaction.products, reaction.products):
self.assertAlmostEqual(product0.conformer.E0.value_si / 1e6, product.conformer.E0.value_si / 1e6, 2)
self.assertEqual(product0.conformer.E0.units, product.conformer.E0.units)
self.assertAlmostEqual(self.reaction.transitionState.conformer.E0.value_si / 1e6, reaction.transitionState.conformer.E0.value_si / 1e6, 2)
self.assertEqual(self.reaction.transitionState.conformer.E0.units, reaction.transitionState.conformer.E0.units)
self.assertAlmostEqual(self.reaction.transitionState.frequency.value_si, reaction.transitionState.frequency.value_si, 2)
self.assertEqual(self.reaction.transitionState.frequency.units, reaction.transitionState.frequency.units)
self.assertAlmostEqual(self.reaction.kinetics.A.value_si, reaction.kinetics.A.value_si, delta=1e-6)
self.assertAlmostEqual(self.reaction.kinetics.n.value_si, reaction.kinetics.n.value_si, delta=1e-6)
self.assertAlmostEqual(self.reaction.kinetics.T0.value_si, reaction.kinetics.T0.value_si, delta=1e-6)
self.assertAlmostEqual(self.reaction.kinetics.Ea.value_si, reaction.kinetics.Ea.value_si, delta=1e-6)
self.assertEqual(self.reaction.kinetics.comment, reaction.kinetics.comment)
self.assertEqual(self.reaction.duplicate, reaction.duplicate)
self.assertEqual(self.reaction.degeneracy, reaction.degeneracy)
def testOutput(self):
"""
Test that a Reaction object can be successfully reconstructed
from its repr() output with no loss of information.
"""
exec('reaction = %r' % (self.reaction))
self.assertEqual(len(self.reaction.reactants), len(reaction.reactants))
self.assertEqual(len(self.reaction.products), len(reaction.products))
for reactant0, reactant in zip(self.reaction.reactants, reaction.reactants):
self.assertAlmostEqual(reactant0.conformer.E0.value_si / 1e6, reactant.conformer.E0.value_si / 1e6, 2)
self.assertEqual(reactant0.conformer.E0.units, reactant.conformer.E0.units)
for product0, product in zip(self.reaction.products, reaction.products):
self.assertAlmostEqual(product0.conformer.E0.value_si / 1e6, product.conformer.E0.value_si / 1e6, 2)
self.assertEqual(product0.conformer.E0.units, product.conformer.E0.units)
self.assertAlmostEqual(self.reaction.transitionState.conformer.E0.value_si / 1e6, reaction.transitionState.conformer.E0.value_si / 1e6, 2)
self.assertEqual(self.reaction.transitionState.conformer.E0.units, reaction.transitionState.conformer.E0.units)
self.assertAlmostEqual(self.reaction.transitionState.frequency.value_si, reaction.transitionState.frequency.value_si, 2)
self.assertEqual(self.reaction.transitionState.frequency.units, reaction.transitionState.frequency.units)
self.assertAlmostEqual(self.reaction.kinetics.A.value_si, reaction.kinetics.A.value_si, delta=1e-6)
self.assertAlmostEqual(self.reaction.kinetics.n.value_si, reaction.kinetics.n.value_si, delta=1e-6)
self.assertAlmostEqual(self.reaction.kinetics.T0.value_si, reaction.kinetics.T0.value_si, delta=1e-6)
self.assertAlmostEqual(self.reaction.kinetics.Ea.value_si, reaction.kinetics.Ea.value_si, delta=1e-6)
self.assertEqual(self.reaction.kinetics.comment, reaction.kinetics.comment)
self.assertEqual(self.reaction.duplicate, reaction.duplicate)
self.assertEqual(self.reaction.degeneracy, reaction.degeneracy)
################################################################################
if __name__ == '__main__':
unittest.main(testRunner=unittest.TextTestRunner(verbosity=2))
|
[
"rmgpy.statmech.torsion.HinderedRotor",
"rmgpy.kinetics.Arrhenius",
"numpy.array",
"rmgpy.thermo.Wilhoit",
"rmgpy.reaction.Reaction",
"rmgpy.kinetics.Troe",
"rmgpy.kinetics.ThirdBody",
"unittest.TextTestRunner",
"rmgpy.species.Species",
"numpy.arange",
"rmgpy.statmech.translation.IdealGasTranslation",
"cPickle.dumps",
"rmgpy.kinetics.MultiPDepArrhenius",
"rmgpy.statmech.rotation.NonlinearRotor",
"rmgpy.kinetics.MultiArrhenius",
"rmgpy.kinetics.PDepArrhenius",
"rmgpy.kinetics.ArrheniusEP",
"rmgpy.statmech.vibration.HarmonicOscillator",
"rmgpy.kinetics.Lindemann"
] |
[((1776, 1824), 'rmgpy.reaction.Reaction', 'Reaction', ([], {'reactants': 'reactants', 'products': 'products'}), '(reactants=reactants, products=products)\n', (1784, 1824), False, 'from rmgpy.reaction import Reaction\n'), ((13183, 13232), 'numpy.arange', 'numpy.arange', (['(200.0)', '(2001.0)', '(200.0)', 'numpy.float64'], {}), '(200.0, 2001.0, 200.0, numpy.float64)\n', (13195, 13232), False, 'import numpy\n'), ((13689, 13738), 'numpy.arange', 'numpy.arange', (['(200.0)', '(2001.0)', '(200.0)', 'numpy.float64'], {}), '(200.0, 2001.0, 200.0, numpy.float64)\n', (13701, 13738), False, 'import numpy\n'), ((14193, 14242), 'numpy.arange', 'numpy.arange', (['(200.0)', '(2001.0)', '(200.0)', 'numpy.float64'], {}), '(200.0, 2001.0, 200.0, numpy.float64)\n', (14205, 14242), False, 'import numpy\n'), ((14708, 14757), 'numpy.arange', 'numpy.arange', (['(200.0)', '(2001.0)', '(200.0)', 'numpy.float64'], {}), '(200.0, 2001.0, 200.0, numpy.float64)\n', (14720, 14757), False, 'import numpy\n'), ((15250, 15299), 'numpy.arange', 'numpy.arange', (['(200.0)', '(2001.0)', '(200.0)', 'numpy.float64'], {}), '(200.0, 2001.0, 200.0, numpy.float64)\n', (15262, 15299), False, 'import numpy\n'), ((15796, 15845), 'numpy.arange', 'numpy.arange', (['(200.0)', '(2001.0)', '(200.0)', 'numpy.float64'], {}), '(200.0, 2001.0, 200.0, numpy.float64)\n', (15808, 15845), False, 'import numpy\n'), ((17007, 17056), 'numpy.arange', 'numpy.arange', (['(200.0)', '(2001.0)', '(200.0)', 'numpy.float64'], {}), '(200.0, 2001.0, 200.0, numpy.float64)\n', (17019, 17056), False, 'import numpy\n'), ((17389, 17438), 'numpy.arange', 'numpy.arange', (['(200.0)', '(2001.0)', '(200.0)', 'numpy.float64'], {}), '(200.0, 2001.0, 200.0, numpy.float64)\n', (17401, 17438), False, 'import numpy\n'), ((17972, 18097), 'rmgpy.kinetics.Arrhenius', 'Arrhenius', ([], {'A': "(2650000000000.0, 'cm^3/(mol*s)')", 'n': '(0.0)', 'Ea': "(0.0, 'kJ/mol')", 'T0': "(1, 'K')", 'Tmin': "(300, 'K')", 'Tmax': "(2000, 'K')"}), "(A=(2650000000000.0, 'cm^3/(mol*s)'), n=0.0, Ea=(0.0, 'kJ/mol'),\n T0=(1, 'K'), Tmin=(300, 'K'), Tmax=(2000, 'K'))\n", (17981, 18097), False, 'from rmgpy.kinetics import Arrhenius\n'), ((18760, 18865), 'numpy.arange', 'numpy.arange', (['original_kinetics.Tmin.value_si', 'original_kinetics.Tmax.value_si', '(200.0)', 'numpy.float64'], {}), '(original_kinetics.Tmin.value_si, original_kinetics.Tmax.\n value_si, 200.0, numpy.float64)\n', (18772, 18865), False, 'import numpy\n'), ((19378, 19506), 'rmgpy.kinetics.ArrheniusEP', 'ArrheniusEP', ([], {'A': "(2650000000000.0, 'cm^3/(mol*s)')", 'n': '(0.0)', 'alpha': '(0.5)', 'E0': "(41.84, 'kJ/mol')", 'Tmin': "(300, 'K')", 'Tmax': "(2000, 'K')"}), "(A=(2650000000000.0, 'cm^3/(mol*s)'), n=0.0, alpha=0.5, E0=(\n 41.84, 'kJ/mol'), Tmin=(300, 'K'), Tmax=(2000, 'K'))\n", (19389, 19506), False, 'from rmgpy.kinetics import ArrheniusEP\n'), ((20168, 20255), 'numpy.arange', 'numpy.arange', (['original_kinetics.Tmin', 'original_kinetics.Tmax', '(200.0)', 'numpy.float64'], {}), '(original_kinetics.Tmin, original_kinetics.Tmax, 200.0, numpy.\n float64)\n', (20180, 20255), False, 'import numpy\n'), ((20745, 20914), 'rmgpy.kinetics.Arrhenius', 'Arrhenius', ([], {'A': "(1000000.0, 's^-1')", 'n': '(1.0)', 'Ea': "(10.0, 'kJ/mol')", 'T0': "(300.0, 'K')", 'Tmin': "(300.0, 'K')", 'Tmax': "(2000.0, 'K')", 'comment': '"""This data is completely made up"""'}), "(A=(1000000.0, 's^-1'), n=1.0, Ea=(10.0, 'kJ/mol'), T0=(300.0, 'K'\n ), Tmin=(300.0, 'K'), Tmax=(2000.0, 'K'), comment=\n 'This data is completely made up')\n", (20754, 20914), False, 'from rmgpy.kinetics import Arrhenius\n'), ((21036, 21211), 'rmgpy.kinetics.Arrhenius', 'Arrhenius', ([], {'A': "(1000000000000.0, 's^-1')", 'n': '(1.0)', 'Ea': "(20.0, 'kJ/mol')", 'T0': "(300.0, 'K')", 'Tmin': "(300.0, 'K')", 'Tmax': "(2000.0, 'K')", 'comment': '"""This data is completely made up"""'}), "(A=(1000000000000.0, 's^-1'), n=1.0, Ea=(20.0, 'kJ/mol'), T0=(\n 300.0, 'K'), Tmin=(300.0, 'K'), Tmax=(2000.0, 'K'), comment=\n 'This data is completely made up')\n", (21045, 21211), False, 'from rmgpy.kinetics import Arrhenius\n'), ((21328, 21352), 'numpy.array', 'numpy.array', (['[0.1, 10.0]'], {}), '([0.1, 10.0])\n', (21339, 21352), False, 'import numpy\n'), ((21565, 21731), 'rmgpy.kinetics.PDepArrhenius', 'PDepArrhenius', ([], {'pressures': "(pressures, 'bar')", 'arrhenius': 'arrhenius', 'Tmin': "(Tmin, 'K')", 'Tmax': "(Tmax, 'K')", 'Pmin': "(Pmin, 'bar')", 'Pmax': "(Pmax, 'bar')", 'comment': 'comment'}), "(pressures=(pressures, 'bar'), arrhenius=arrhenius, Tmin=(Tmin,\n 'K'), Tmax=(Tmax, 'K'), Pmin=(Pmin, 'bar'), Pmax=(Pmax, 'bar'), comment\n =comment)\n", (21578, 21731), False, 'from rmgpy.kinetics import PDepArrhenius, MultiPDepArrhenius\n'), ((22354, 22400), 'numpy.arange', 'numpy.arange', (['Tmin', 'Tmax', '(200.0)', 'numpy.float64'], {}), '(Tmin, Tmax, 200.0, numpy.float64)\n', (22366, 22400), False, 'import numpy\n'), ((22898, 22922), 'numpy.array', 'numpy.array', (['[0.1, 10.0]'], {}), '([0.1, 10.0])\n', (22909, 22922), False, 'import numpy\n'), ((23733, 23825), 'rmgpy.kinetics.MultiArrhenius', 'MultiArrhenius', ([], {'arrhenius': 'arrhenius', 'Tmin': "(Tmin, 'K')", 'Tmax': "(Tmax, 'K')", 'comment': 'comment'}), "(arrhenius=arrhenius, Tmin=(Tmin, 'K'), Tmax=(Tmax, 'K'),\n comment=comment)\n", (23747, 23825), False, 'from rmgpy.kinetics import MultiArrhenius\n'), ((24411, 24457), 'numpy.arange', 'numpy.arange', (['Tmin', 'Tmax', '(200.0)', 'numpy.float64'], {}), '(Tmin, Tmax, 200.0, numpy.float64)\n', (24423, 24457), False, 'import numpy\n'), ((25061, 25085), 'numpy.array', 'numpy.array', (['[0.1, 10.0]'], {}), '([0.1, 10.0])\n', (25072, 25085), False, 'import numpy\n'), ((27357, 27493), 'rmgpy.kinetics.MultiPDepArrhenius', 'MultiPDepArrhenius', ([], {'arrhenius': 'arrhenius', 'Tmin': "(Tmin, 'K')", 'Tmax': "(Tmax, 'K')", 'Pmin': "(Pmin, 'bar')", 'Pmax': "(Pmax, 'bar')", 'comment': 'comment'}), "(arrhenius=arrhenius, Tmin=(Tmin, 'K'), Tmax=(Tmax, 'K'),\n Pmin=(Pmin, 'bar'), Pmax=(Pmax, 'bar'), comment=comment)\n", (27375, 27493), False, 'from rmgpy.kinetics import PDepArrhenius, MultiPDepArrhenius\n'), ((28105, 28151), 'numpy.arange', 'numpy.arange', (['Tmin', 'Tmax', '(200.0)', 'numpy.float64'], {}), '(Tmin, Tmax, 200.0, numpy.float64)\n', (28117, 28151), False, 'import numpy\n'), ((28637, 28727), 'rmgpy.kinetics.Arrhenius', 'Arrhenius', ([], {'A': "(2.62e+33, 'cm^6/(mol^2*s)')", 'n': '(-4.76)', 'Ea': "(10.21, 'kJ/mol')", 'T0': "(1, 'K')"}), "(A=(2.62e+33, 'cm^6/(mol^2*s)'), n=-4.76, Ea=(10.21, 'kJ/mol'), T0\n =(1, 'K'))\n", (28646, 28727), False, 'from rmgpy.kinetics import Arrhenius\n'), ((29033, 29197), 'rmgpy.kinetics.ThirdBody', 'ThirdBody', ([], {'arrheniusLow': 'arrheniusLow', 'Tmin': "(Tmin, 'K')", 'Tmax': "(Tmax, 'K')", 'Pmin': "(Pmin, 'bar')", 'Pmax': "(Pmax, 'bar')", 'efficiencies': 'efficiencies', 'comment': 'comment'}), "(arrheniusLow=arrheniusLow, Tmin=(Tmin, 'K'), Tmax=(Tmax, 'K'),\n Pmin=(Pmin, 'bar'), Pmax=(Pmax, 'bar'), efficiencies=efficiencies,\n comment=comment)\n", (29042, 29197), False, 'from rmgpy.kinetics import ThirdBody\n'), ((29867, 29913), 'numpy.arange', 'numpy.arange', (['Tmin', 'Tmax', '(200.0)', 'numpy.float64'], {}), '(Tmin, Tmax, 200.0, numpy.float64)\n', (29879, 29913), False, 'import numpy\n'), ((30400, 30489), 'rmgpy.kinetics.Arrhenius', 'Arrhenius', ([], {'A': "(1.39e+16, 'cm^3/(mol*s)')", 'n': '(-0.534)', 'Ea': "(2.243, 'kJ/mol')", 'T0': "(1, 'K')"}), "(A=(1.39e+16, 'cm^3/(mol*s)'), n=-0.534, Ea=(2.243, 'kJ/mol'), T0=\n (1, 'K'))\n", (30409, 30489), False, 'from rmgpy.kinetics import Arrhenius\n'), ((30575, 30665), 'rmgpy.kinetics.Arrhenius', 'Arrhenius', ([], {'A': "(2.62e+33, 'cm^6/(mol^2*s)')", 'n': '(-4.76)', 'Ea': "(10.21, 'kJ/mol')", 'T0': "(1, 'K')"}), "(A=(2.62e+33, 'cm^6/(mol^2*s)'), n=-4.76, Ea=(10.21, 'kJ/mol'), T0\n =(1, 'K'))\n", (30584, 30665), False, 'from rmgpy.kinetics import Arrhenius\n'), ((30971, 31165), 'rmgpy.kinetics.Lindemann', 'Lindemann', ([], {'arrheniusHigh': 'arrheniusHigh', 'arrheniusLow': 'arrheniusLow', 'Tmin': "(Tmin, 'K')", 'Tmax': "(Tmax, 'K')", 'Pmin': "(Pmin, 'bar')", 'Pmax': "(Pmax, 'bar')", 'efficiencies': 'efficiencies', 'comment': 'comment'}), "(arrheniusHigh=arrheniusHigh, arrheniusLow=arrheniusLow, Tmin=(\n Tmin, 'K'), Tmax=(Tmax, 'K'), Pmin=(Pmin, 'bar'), Pmax=(Pmax, 'bar'),\n efficiencies=efficiencies, comment=comment)\n", (30980, 31165), False, 'from rmgpy.kinetics import Lindemann\n'), ((31856, 31902), 'numpy.arange', 'numpy.arange', (['Tmin', 'Tmax', '(200.0)', 'numpy.float64'], {}), '(Tmin, Tmax, 200.0, numpy.float64)\n', (31868, 31902), False, 'import numpy\n'), ((32375, 32464), 'rmgpy.kinetics.Arrhenius', 'Arrhenius', ([], {'A': "(1.39e+16, 'cm^3/(mol*s)')", 'n': '(-0.534)', 'Ea': "(2.243, 'kJ/mol')", 'T0': "(1, 'K')"}), "(A=(1.39e+16, 'cm^3/(mol*s)'), n=-0.534, Ea=(2.243, 'kJ/mol'), T0=\n (1, 'K'))\n", (32384, 32464), False, 'from rmgpy.kinetics import Arrhenius\n'), ((32550, 32640), 'rmgpy.kinetics.Arrhenius', 'Arrhenius', ([], {'A': "(2.62e+33, 'cm^6/(mol^2*s)')", 'n': '(-4.76)', 'Ea': "(10.21, 'kJ/mol')", 'T0': "(1, 'K')"}), "(A=(2.62e+33, 'cm^6/(mol^2*s)'), n=-4.76, Ea=(10.21, 'kJ/mol'), T0\n =(1, 'K'))\n", (32559, 32640), False, 'from rmgpy.kinetics import Arrhenius\n'), ((33015, 33262), 'rmgpy.kinetics.Troe', 'Troe', ([], {'arrheniusHigh': 'arrheniusHigh', 'arrheniusLow': 'arrheniusLow', 'alpha': 'alpha', 'T3': "(T3, 'K')", 'T1': "(T1, 'K')", 'T2': "(T2, 'K')", 'Tmin': "(Tmin, 'K')", 'Tmax': "(Tmax, 'K')", 'Pmin': "(Pmin, 'bar')", 'Pmax': "(Pmax, 'bar')", 'efficiencies': 'efficiencies', 'comment': 'comment'}), "(arrheniusHigh=arrheniusHigh, arrheniusLow=arrheniusLow, alpha=alpha,\n T3=(T3, 'K'), T1=(T1, 'K'), T2=(T2, 'K'), Tmin=(Tmin, 'K'), Tmax=(Tmax,\n 'K'), Pmin=(Pmin, 'bar'), Pmax=(Pmax, 'bar'), efficiencies=efficiencies,\n comment=comment)\n", (33019, 33262), False, 'from rmgpy.kinetics import Troe\n'), ((33998, 34044), 'numpy.arange', 'numpy.arange', (['Tmin', 'Tmax', '(200.0)', 'numpy.float64'], {}), '(Tmin, Tmax, 200.0, numpy.float64)\n', (34010, 34044), False, 'import numpy\n'), ((23096, 23263), 'rmgpy.kinetics.Arrhenius', 'Arrhenius', ([], {'A': "(9.3e-14, 'cm^3/(molecule*s)')", 'n': '(0.0)', 'Ea': "(4740 * constants.R * 0.001, 'kJ/mol')", 'T0': "(1, 'K')", 'Tmin': "(Tmin, 'K')", 'Tmax': "(Tmax, 'K')", 'comment': 'comment'}), "(A=(9.3e-14, 'cm^3/(molecule*s)'), n=0.0, Ea=(4740 * constants.R *\n 0.001, 'kJ/mol'), T0=(1, 'K'), Tmin=(Tmin, 'K'), Tmax=(Tmax, 'K'),\n comment=comment)\n", (23105, 23263), False, 'from rmgpy.kinetics import Arrhenius\n'), ((23401, 23569), 'rmgpy.kinetics.Arrhenius', 'Arrhenius', ([], {'A': "(1.4e-09, 'cm^3/(molecule*s)')", 'n': '(0.0)', 'Ea': "(11200 * constants.R * 0.001, 'kJ/mol')", 'T0': "(1, 'K')", 'Tmin': "(Tmin, 'K')", 'Tmax': "(Tmax, 'K')", 'comment': 'comment'}), "(A=(1.4e-09, 'cm^3/(molecule*s)'), n=0.0, Ea=(11200 * constants.R *\n 0.001, 'kJ/mol'), T0=(1, 'K'), Tmin=(Tmin, 'K'), Tmax=(Tmax, 'K'),\n comment=comment)\n", (23410, 23569), False, 'from rmgpy.kinetics import Arrhenius\n'), ((34478, 34507), 'numpy.arange', 'numpy.arange', (['(0.4)', '(3.35)', '(0.01)'], {}), '(0.4, 3.35, 0.01)\n', (34490, 34507), False, 'import numpy\n'), ((35482, 35514), 'cPickle.dumps', 'cPickle.dumps', (['self.reaction', '(-1)'], {}), '(self.reaction, -1)\n', (35495, 35514), False, 'import cPickle\n'), ((39720, 39756), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (39743, 39756), False, 'import unittest\n'), ((7826, 7953), 'rmgpy.kinetics.Arrhenius', 'Arrhenius', ([], {'A': "(501366000.0, 'cm^3/(mol*s)')", 'n': '(1.637)', 'Ea': "(4.32508, 'kJ/mol')", 'T0': "(1, 'K')", 'Tmin': "(300, 'K')", 'Tmax': "(2500, 'K')"}), "(A=(501366000.0, 'cm^3/(mol*s)'), n=1.637, Ea=(4.32508, 'kJ/mol'),\n T0=(1, 'K'), Tmin=(300, 'K'), Tmax=(2500, 'K'))\n", (7835, 7953), False, 'from rmgpy.kinetics import Arrhenius\n'), ((8229, 8428), 'rmgpy.thermo.Wilhoit', 'Wilhoit', ([], {'Cp0': "(4.0 * constants.R, 'J/(mol*K)')", 'CpInf': "(21.0 * constants.R, 'J/(mol*K)')", 'a0': '(-3.95)', 'a1': '(9.26)', 'a2': '(-15.6)', 'a3': '(8.55)', 'B': "(500.0, 'K')", 'H0': "(-61510.0, 'J/mol')", 'S0': "(-790.2, 'J/(mol*K)')"}), "(Cp0=(4.0 * constants.R, 'J/(mol*K)'), CpInf=(21.0 * constants.R,\n 'J/(mol*K)'), a0=-3.95, a1=9.26, a2=-15.6, a3=8.55, B=(500.0, 'K'), H0=\n (-61510.0, 'J/mol'), S0=(-790.2, 'J/(mol*K)'))\n", (8236, 8428), False, 'from rmgpy.thermo import Wilhoit\n'), ((8515, 8719), 'rmgpy.thermo.Wilhoit', 'Wilhoit', ([], {'Cp0': "(4.0 * constants.R, 'J/(mol*K)')", 'CpInf': "(15.5 * constants.R, 'J/(mol*K)')", 'a0': '(0.2541)', 'a1': '(-0.4712)', 'a2': '(-4.434)', 'a3': '(2.25)', 'B': "(500.0, 'K')", 'H0': "(-143900.0, 'J/mol')", 'S0': "(-524.6, 'J/(mol*K)')"}), "(Cp0=(4.0 * constants.R, 'J/(mol*K)'), CpInf=(15.5 * constants.R,\n 'J/(mol*K)'), a0=0.2541, a1=-0.4712, a2=-4.434, a3=2.25, B=(500.0, 'K'),\n H0=(-143900.0, 'J/mol'), S0=(-524.6, 'J/(mol*K)'))\n", (8522, 8719), False, 'from rmgpy.thermo import Wilhoit\n'), ((8806, 9007), 'rmgpy.thermo.Wilhoit', 'Wilhoit', ([], {'Cp0': "(3.5 * constants.R, 'J/(mol*K)')", 'CpInf': "(4.5 * constants.R, 'J/(mol*K)')", 'a0': '(-0.9324)', 'a1': '(26.18)', 'a2': '(-70.47)', 'a3': '(44.12)', 'B': "(500.0, 'K')", 'H0': "(14530.0, 'J/mol')", 'S0': "(-12.19, 'J/(mol*K)')"}), "(Cp0=(3.5 * constants.R, 'J/(mol*K)'), CpInf=(4.5 * constants.R,\n 'J/(mol*K)'), a0=-0.9324, a1=26.18, a2=-70.47, a3=44.12, B=(500.0, 'K'),\n H0=(14530.0, 'J/mol'), S0=(-12.19, 'J/(mol*K)'))\n", (8813, 9007), False, 'from rmgpy.thermo import Wilhoit\n'), ((9150, 9275), 'rmgpy.kinetics.Arrhenius', 'Arrhenius', ([], {'A': "(2650000000000.0, 'cm^3/(mol*s)')", 'n': '(0.0)', 'Ea': "(0.0, 'kJ/mol')", 'T0': "(1, 'K')", 'Tmin': "(300, 'K')", 'Tmax': "(2000, 'K')"}), "(A=(2650000000000.0, 'cm^3/(mol*s)'), n=0.0, Ea=(0.0, 'kJ/mol'),\n T0=(1, 'K'), Tmin=(300, 'K'), Tmax=(2000, 'K'))\n", (9159, 9275), False, 'from rmgpy.kinetics import Arrhenius\n'), ((34619, 34630), 'rmgpy.kinetics.Arrhenius', 'Arrhenius', ([], {}), '()\n', (34628, 34630), False, 'from rmgpy.kinetics import Arrhenius\n'), ((9562, 9571), 'rmgpy.species.Species', 'Species', ([], {}), '()\n', (9569, 9571), False, 'from rmgpy.species import Species, TransitionState\n'), ((9584, 9593), 'rmgpy.species.Species', 'Species', ([], {}), '()\n', (9591, 9593), False, 'from rmgpy.species import Species, TransitionState\n'), ((9638, 9647), 'rmgpy.species.Species', 'Species', ([], {}), '()\n', (9645, 9647), False, 'from rmgpy.species import Species, TransitionState\n'), ((9648, 9657), 'rmgpy.species.Species', 'Species', ([], {}), '()\n', (9655, 9657), False, 'from rmgpy.species import Species, TransitionState\n'), ((9670, 9679), 'rmgpy.species.Species', 'Species', ([], {}), '()\n', (9677, 9679), False, 'from rmgpy.species import Species, TransitionState\n'), ((9725, 9734), 'rmgpy.species.Species', 'Species', ([], {}), '()\n', (9732, 9734), False, 'from rmgpy.species import Species, TransitionState\n'), ((9747, 9756), 'rmgpy.species.Species', 'Species', ([], {}), '()\n', (9754, 9756), False, 'from rmgpy.species import Species, TransitionState\n'), ((9757, 9766), 'rmgpy.species.Species', 'Species', ([], {}), '()\n', (9764, 9766), False, 'from rmgpy.species import Species, TransitionState\n'), ((9811, 9820), 'rmgpy.species.Species', 'Species', ([], {}), '()\n', (9818, 9820), False, 'from rmgpy.species import Species, TransitionState\n'), ((9821, 9830), 'rmgpy.species.Species', 'Species', ([], {}), '()\n', (9828, 9830), False, 'from rmgpy.species import Species, TransitionState\n'), ((9843, 9852), 'rmgpy.species.Species', 'Species', ([], {}), '()\n', (9850, 9852), False, 'from rmgpy.species import Species, TransitionState\n'), ((9853, 9862), 'rmgpy.species.Species', 'Species', ([], {}), '()\n', (9860, 9862), False, 'from rmgpy.species import Species, TransitionState\n'), ((10251, 10260), 'rmgpy.species.Species', 'Species', ([], {}), '()\n', (10258, 10260), False, 'from rmgpy.species import Species, TransitionState\n'), ((10273, 10282), 'rmgpy.species.Species', 'Species', ([], {}), '()\n', (10280, 10282), False, 'from rmgpy.species import Species, TransitionState\n'), ((10327, 10336), 'rmgpy.species.Species', 'Species', ([], {}), '()\n', (10334, 10336), False, 'from rmgpy.species import Species, TransitionState\n'), ((10337, 10346), 'rmgpy.species.Species', 'Species', ([], {}), '()\n', (10344, 10346), False, 'from rmgpy.species import Species, TransitionState\n'), ((10359, 10368), 'rmgpy.species.Species', 'Species', ([], {}), '()\n', (10366, 10368), False, 'from rmgpy.species import Species, TransitionState\n'), ((10414, 10423), 'rmgpy.species.Species', 'Species', ([], {}), '()\n', (10421, 10423), False, 'from rmgpy.species import Species, TransitionState\n'), ((10436, 10445), 'rmgpy.species.Species', 'Species', ([], {}), '()\n', (10443, 10445), False, 'from rmgpy.species import Species, TransitionState\n'), ((10446, 10455), 'rmgpy.species.Species', 'Species', ([], {}), '()\n', (10453, 10455), False, 'from rmgpy.species import Species, TransitionState\n'), ((10500, 10509), 'rmgpy.species.Species', 'Species', ([], {}), '()\n', (10507, 10509), False, 'from rmgpy.species import Species, TransitionState\n'), ((10510, 10519), 'rmgpy.species.Species', 'Species', ([], {}), '()\n', (10517, 10519), False, 'from rmgpy.species import Species, TransitionState\n'), ((10532, 10541), 'rmgpy.species.Species', 'Species', ([], {}), '()\n', (10539, 10541), False, 'from rmgpy.species import Species, TransitionState\n'), ((10542, 10551), 'rmgpy.species.Species', 'Species', ([], {}), '()\n', (10549, 10551), False, 'from rmgpy.species import Species, TransitionState\n'), ((10926, 10935), 'rmgpy.species.Species', 'Species', ([], {}), '()\n', (10933, 10935), False, 'from rmgpy.species import Species, TransitionState\n'), ((10948, 10957), 'rmgpy.species.Species', 'Species', ([], {}), '()\n', (10955, 10957), False, 'from rmgpy.species import Species, TransitionState\n'), ((11002, 11011), 'rmgpy.species.Species', 'Species', ([], {}), '()\n', (11009, 11011), False, 'from rmgpy.species import Species, TransitionState\n'), ((11012, 11021), 'rmgpy.species.Species', 'Species', ([], {}), '()\n', (11019, 11021), False, 'from rmgpy.species import Species, TransitionState\n'), ((11034, 11043), 'rmgpy.species.Species', 'Species', ([], {}), '()\n', (11041, 11043), False, 'from rmgpy.species import Species, TransitionState\n'), ((11089, 11098), 'rmgpy.species.Species', 'Species', ([], {}), '()\n', (11096, 11098), False, 'from rmgpy.species import Species, TransitionState\n'), ((11111, 11120), 'rmgpy.species.Species', 'Species', ([], {}), '()\n', (11118, 11120), False, 'from rmgpy.species import Species, TransitionState\n'), ((11121, 11130), 'rmgpy.species.Species', 'Species', ([], {}), '()\n', (11128, 11130), False, 'from rmgpy.species import Species, TransitionState\n'), ((11175, 11184), 'rmgpy.species.Species', 'Species', ([], {}), '()\n', (11182, 11184), False, 'from rmgpy.species import Species, TransitionState\n'), ((11185, 11194), 'rmgpy.species.Species', 'Species', ([], {}), '()\n', (11192, 11194), False, 'from rmgpy.species import Species, TransitionState\n'), ((11207, 11216), 'rmgpy.species.Species', 'Species', ([], {}), '()\n', (11214, 11216), False, 'from rmgpy.species import Species, TransitionState\n'), ((11217, 11226), 'rmgpy.species.Species', 'Species', ([], {}), '()\n', (11224, 11226), False, 'from rmgpy.species import Species, TransitionState\n'), ((25291, 25458), 'rmgpy.kinetics.Arrhenius', 'Arrhenius', ([], {'A': "(9.3e-16, 'cm^3/(molecule*s)')", 'n': '(0.0)', 'Ea': "(4740 * constants.R * 0.001, 'kJ/mol')", 'T0': "(1, 'K')", 'Tmin': "(Tmin, 'K')", 'Tmax': "(Tmax, 'K')", 'comment': 'comment'}), "(A=(9.3e-16, 'cm^3/(molecule*s)'), n=0.0, Ea=(4740 * constants.R *\n 0.001, 'kJ/mol'), T0=(1, 'K'), Tmin=(Tmin, 'K'), Tmax=(Tmax, 'K'),\n comment=comment)\n", (25300, 25458), False, 'from rmgpy.kinetics import Arrhenius\n'), ((25668, 25835), 'rmgpy.kinetics.Arrhenius', 'Arrhenius', ([], {'A': "(9.3e-14, 'cm^3/(molecule*s)')", 'n': '(0.0)', 'Ea': "(4740 * constants.R * 0.001, 'kJ/mol')", 'T0': "(1, 'K')", 'Tmin': "(Tmin, 'K')", 'Tmax': "(Tmax, 'K')", 'comment': 'comment'}), "(A=(9.3e-14, 'cm^3/(molecule*s)'), n=0.0, Ea=(4740 * constants.R *\n 0.001, 'kJ/mol'), T0=(1, 'K'), Tmin=(Tmin, 'K'), Tmax=(Tmax, 'K'),\n comment=comment)\n", (25677, 25835), False, 'from rmgpy.kinetics import Arrhenius\n'), ((26365, 26533), 'rmgpy.kinetics.Arrhenius', 'Arrhenius', ([], {'A': "(1.4e-11, 'cm^3/(molecule*s)')", 'n': '(0.0)', 'Ea': "(11200 * constants.R * 0.001, 'kJ/mol')", 'T0': "(1, 'K')", 'Tmin': "(Tmin, 'K')", 'Tmax': "(Tmax, 'K')", 'comment': 'comment'}), "(A=(1.4e-11, 'cm^3/(molecule*s)'), n=0.0, Ea=(11200 * constants.R *\n 0.001, 'kJ/mol'), T0=(1, 'K'), Tmin=(Tmin, 'K'), Tmax=(Tmax, 'K'),\n comment=comment)\n", (26374, 26533), False, 'from rmgpy.kinetics import Arrhenius\n'), ((26743, 26911), 'rmgpy.kinetics.Arrhenius', 'Arrhenius', ([], {'A': "(1.4e-09, 'cm^3/(molecule*s)')", 'n': '(0.0)', 'Ea': "(11200 * constants.R * 0.001, 'kJ/mol')", 'T0': "(1, 'K')", 'Tmin': "(Tmin, 'K')", 'Tmax': "(Tmax, 'K')", 'comment': 'comment'}), "(A=(1.4e-09, 'cm^3/(molecule*s)'), n=0.0, Ea=(11200 * constants.R *\n 0.001, 'kJ/mol'), T0=(1, 'K'), Tmin=(Tmin, 'K'), Tmax=(Tmax, 'K'),\n comment=comment)\n", (26752, 26911), False, 'from rmgpy.kinetics import Arrhenius\n'), ((4228, 4270), 'rmgpy.statmech.translation.IdealGasTranslation', 'IdealGasTranslation', ([], {'mass': "(28.0313, 'amu')"}), "(mass=(28.0313, 'amu'))\n", (4247, 4270), False, 'from rmgpy.statmech.translation import Translation, IdealGasTranslation\n'), ((4341, 4427), 'rmgpy.statmech.rotation.NonlinearRotor', 'NonlinearRotor', ([], {'inertia': "([3.41526, 16.6498, 20.065], 'amu*angstrom^2')", 'symmetry': '(4)'}), "(inertia=([3.41526, 16.6498, 20.065], 'amu*angstrom^2'),\n symmetry=4)\n", (4355, 4427), False, 'from rmgpy.statmech.rotation import Rotation, LinearRotor, NonlinearRotor, KRotor, SphericalTopRotor\n'), ((4603, 4762), 'rmgpy.statmech.vibration.HarmonicOscillator', 'HarmonicOscillator', ([], {'frequencies': "([828.397, 970.652, 977.223, 1052.93, 1233.55, 1367.56, 1465.09, 1672.25, \n 3098.46, 3111.7, 3165.79, 3193.54], 'cm^-1')"}), "(frequencies=([828.397, 970.652, 977.223, 1052.93, \n 1233.55, 1367.56, 1465.09, 1672.25, 3098.46, 3111.7, 3165.79, 3193.54],\n 'cm^-1'))\n", (4621, 4762), False, 'from rmgpy.statmech.vibration import Vibration, HarmonicOscillator\n'), ((5200, 5242), 'rmgpy.statmech.translation.IdealGasTranslation', 'IdealGasTranslation', ([], {'mass': "(1.00783, 'amu')"}), "(mass=(1.00783, 'amu'))\n", (5219, 5242), False, 'from rmgpy.statmech.translation import Translation, IdealGasTranslation\n'), ((5596, 5638), 'rmgpy.statmech.translation.IdealGasTranslation', 'IdealGasTranslation', ([], {'mass': "(29.0391, 'amu')"}), "(mass=(29.0391, 'amu'))\n", (5615, 5638), False, 'from rmgpy.statmech.translation import Translation, IdealGasTranslation\n'), ((5709, 5795), 'rmgpy.statmech.rotation.NonlinearRotor', 'NonlinearRotor', ([], {'inertia': "([4.8709, 22.2353, 23.9925], 'amu*angstrom^2')", 'symmetry': '(1)'}), "(inertia=([4.8709, 22.2353, 23.9925], 'amu*angstrom^2'),\n symmetry=1)\n", (5723, 5795), False, 'from rmgpy.statmech.rotation import Rotation, LinearRotor, NonlinearRotor, KRotor, SphericalTopRotor\n'), ((5971, 6149), 'rmgpy.statmech.vibration.HarmonicOscillator', 'HarmonicOscillator', ([], {'frequencies': "([482.224, 791.876, 974.355, 1051.48, 1183.21, 1361.36, 1448.65, 1455.07, \n 1465.48, 2688.22, 2954.51, 3033.39, 3101.54, 3204.73], 'cm^-1')"}), "(frequencies=([482.224, 791.876, 974.355, 1051.48, \n 1183.21, 1361.36, 1448.65, 1455.07, 1465.48, 2688.22, 2954.51, 3033.39,\n 3101.54, 3204.73], 'cm^-1'))\n", (5989, 6149), False, 'from rmgpy.statmech.vibration import Vibration, HarmonicOscillator\n'), ((6294, 6411), 'rmgpy.statmech.torsion.HinderedRotor', 'HinderedRotor', ([], {'inertia': "(1.11481, 'amu*angstrom^2')", 'symmetry': '(6)', 'barrier': "(0.244029, 'kJ/mol')", 'semiclassical': 'None'}), "(inertia=(1.11481, 'amu*angstrom^2'), symmetry=6, barrier=(\n 0.244029, 'kJ/mol'), semiclassical=None)\n", (6307, 6411), False, 'from rmgpy.statmech.torsion import Torsion, HinderedRotor\n'), ((6841, 6883), 'rmgpy.statmech.translation.IdealGasTranslation', 'IdealGasTranslation', ([], {'mass': "(29.0391, 'amu')"}), "(mass=(29.0391, 'amu'))\n", (6860, 6883), False, 'from rmgpy.statmech.translation import Translation, IdealGasTranslation\n'), ((6954, 7041), 'rmgpy.statmech.rotation.NonlinearRotor', 'NonlinearRotor', ([], {'inertia': "([6.78512, 22.1437, 22.2114], 'amu*angstrom^2')", 'symmetry': '(1)'}), "(inertia=([6.78512, 22.1437, 22.2114], 'amu*angstrom^2'),\n symmetry=1)\n", (6968, 7041), False, 'from rmgpy.statmech.rotation import Rotation, LinearRotor, NonlinearRotor, KRotor, SphericalTopRotor\n'), ((7217, 7392), 'rmgpy.statmech.vibration.HarmonicOscillator', 'HarmonicOscillator', ([], {'frequencies': "([412.75, 415.206, 821.495, 924.44, 982.714, 1024.16, 1224.21, 1326.36, \n 1455.06, 1600.35, 3101.46, 3110.55, 3175.34, 3201.88], 'cm^-1')"}), "(frequencies=([412.75, 415.206, 821.495, 924.44, 982.714,\n 1024.16, 1224.21, 1326.36, 1455.06, 1600.35, 3101.46, 3110.55, 3175.34,\n 3201.88], 'cm^-1'))\n", (7235, 7392), False, 'from rmgpy.statmech.vibration import Vibration, HarmonicOscillator\n')]
|
import rinobot_plugin as bot
import numpy as np
def main():
# lets get our parameters and data
filepath = bot.filepath()
data = bot.loadfile(filepath)
# now comes the custom plugin logic
shift = bot.get_arg('shift', type=float, required=True)
index = bot.index_from_args(data)
data[index] = data[index] + shift
outname = bot.no_extension() + '-shift-%s.txt' % shift
# then we set up the output
outpath = bot.output_filepath(outname)
np.savetxt(outpath, data)
if __name__ == "__main__":
main()
|
[
"rinobot_plugin.loadfile",
"rinobot_plugin.filepath",
"rinobot_plugin.no_extension",
"rinobot_plugin.index_from_args",
"numpy.savetxt",
"rinobot_plugin.get_arg",
"rinobot_plugin.output_filepath"
] |
[((115, 129), 'rinobot_plugin.filepath', 'bot.filepath', ([], {}), '()\n', (127, 129), True, 'import rinobot_plugin as bot\n'), ((141, 163), 'rinobot_plugin.loadfile', 'bot.loadfile', (['filepath'], {}), '(filepath)\n', (153, 163), True, 'import rinobot_plugin as bot\n'), ((217, 264), 'rinobot_plugin.get_arg', 'bot.get_arg', (['"""shift"""'], {'type': 'float', 'required': '(True)'}), "('shift', type=float, required=True)\n", (228, 264), True, 'import rinobot_plugin as bot\n'), ((277, 302), 'rinobot_plugin.index_from_args', 'bot.index_from_args', (['data'], {}), '(data)\n', (296, 302), True, 'import rinobot_plugin as bot\n'), ((447, 475), 'rinobot_plugin.output_filepath', 'bot.output_filepath', (['outname'], {}), '(outname)\n', (466, 475), True, 'import rinobot_plugin as bot\n'), ((480, 505), 'numpy.savetxt', 'np.savetxt', (['outpath', 'data'], {}), '(outpath, data)\n', (490, 505), True, 'import numpy as np\n'), ((356, 374), 'rinobot_plugin.no_extension', 'bot.no_extension', ([], {}), '()\n', (372, 374), True, 'import rinobot_plugin as bot\n')]
|
#!/usr/bin/env python
# encoding: utf-8
import numbers
import os
import re
import sys
from itertools import chain
import numpy as np
import scipy.sparse as sp
import six
import pickle
from .model import get_convo_nn2
from .stop_words import THAI_STOP_WORDS
from .utils import CHAR_TYPES_MAP, CHARS_MAP, create_feature_array
MODULE_PATH = os.path.dirname(__file__)
WEIGHT_PATH = os.path.join(MODULE_PATH, 'weight', 'cnn_without_ne_ab.h5')
TOKENIZER = None
def tokenize(text, custom_dict=None):
"""
Tokenize given Thai text string
Input
=====
text: str, Thai text string
custom_dict: str (or list), path to customized dictionary file
It allows the function not to tokenize given dictionary wrongly.
The file should contain custom words separated by line.
Alternatively, you can provide list of custom words too.
Output
======
tokens: list, list of tokenized words
Example
=======
>> deepcut.tokenize('ตัดคำได้ดีมาก')
>> ['ตัดคำ','ได้','ดี','มาก']
"""
global TOKENIZER
if not TOKENIZER:
TOKENIZER = DeepcutTokenizer()
return TOKENIZER.tokenize(text, custom_dict=custom_dict)
def _custom_dict(word, text, word_end):
word_length = len(word)
initial_loc = 0
while True:
try:
start_char = re.search(word, text).start()
first_char = start_char + initial_loc
last_char = first_char + word_length - 1
initial_loc += start_char + word_length
text = text[start_char + word_length:]
word_end[first_char:last_char] = (word_length - 1) * [0]
word_end[last_char] = 1
except:
break
return word_end
def _document_frequency(X):
"""
Count the number of non-zero values for each feature in sparse X.
"""
if sp.isspmatrix_csr(X):
return np.bincount(X.indices, minlength=X.shape[1])
return np.diff(sp.csc_matrix(X, copy=False).indptr)
def _check_stop_list(stop):
"""
Check stop words list
ref: https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/text.py#L87-L95
"""
if stop == "thai":
return THAI_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
# assume it's a collection
return frozenset(stop)
def load_model(file_path):
"""
Load saved pickle file of DeepcutTokenizer
Parameters
==========
file_path: str, path to saved model from ``save_model`` method in DeepcutTokenizer
"""
tokenizer = pickle.load(open(file_path, 'rb'))
tokenizer.model = get_convo_nn2()
tokenizer.model = tokenizer.model.load_weights(WEIGHT_PATH)
return tokenizer
class DeepcutTokenizer(object):
"""
Class for tokenizing given Thai text documents using deepcut library
Parameters
==========
ngram_range : tuple, tuple for ngram range for vocabulary, (1, 1) for unigram
and (1, 2) for bigram
stop_words : list or set, list or set of stop words to be removed
if None, max_df can be set to value [0.7, 1.0) to automatically remove
vocabulary. If using "thai", this will use list of pre-populated stop words
max_features : int or None, if provided, only consider number of vocabulary
ordered by term frequencies
max_df : float in range [0.0, 1.0] or int, default=1.0
ignore terms that have a document frequency higher than the given threshold
min_df : float in range [0.0, 1.0] or int, default=1
ignore terms that have a document frequency lower than the given threshold
dtype : type, optional
Example
=======
raw_documents = ['ฉันอยากกินข้าวของฉัน',
'ฉันอยากกินไก่',
'อยากนอนอย่างสงบ']
tokenizer = DeepcutTokenizer(ngram_range=(1, 1))
X = tokenizer.fit_tranform(raw_documents) # document-term matrix in sparse CSR format
>> X.todense()
>> [[0, 0, 1, 0, 1, 0, 2, 1],
[0, 1, 1, 0, 1, 0, 1, 0],
[1, 0, 0, 1, 1, 1, 0, 0]]
>> tokenizer.vocabulary_
>> {'นอน': 0, 'ไก่': 1, 'กิน': 2, 'อย่าง': 3, 'อยาก': 4, 'สงบ': 5, 'ฉัน': 6, 'ข้าว': 7}
"""
def __init__(self, ngram_range=(1, 1), stop_words=None,
max_df=1.0, min_df=1, max_features=None, dtype=np.dtype('float64')):
self.model = get_convo_nn2()
self.model.load_weights(WEIGHT_PATH)
self.vocabulary_ = {}
self.ngram_range = ngram_range
self.dtype = dtype
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df or min_df")
self.max_features = max_features
self.stop_words = _check_stop_list(stop_words)
def _word_ngrams(self, tokens):
"""
Turn tokens into a tokens of n-grams
ref: https://github.com/scikit-learn/scikit-learn/blob/ef5cb84a/sklearn/feature_extraction/text.py#L124-L153
"""
# handle stop words
if self.stop_words is not None:
tokens = [w for w in tokens if w not in self.stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
if min_n == 1:
# no need to do any slicing for unigrams
# just iterate through the original tokens
tokens = list(original_tokens)
min_n += 1
else:
tokens = []
n_original_tokens = len(original_tokens)
# bind method outside of loop to reduce overhead
tokens_append = tokens.append
space_join = " ".join
for n in range(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in range(n_original_tokens - n + 1):
tokens_append(space_join(original_tokens[i: i + n]))
return tokens
def _limit_features(self, X, vocabulary,
high=None, low=None, limit=None):
"""Remove too rare or too common features.
ref: https://github.com/scikit-learn/scikit-learn/blob/ef5cb84a/sklearn/feature_extraction/text.py#L734-L773
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
tfs = np.asarray(X.sum(axis=0)).ravel()
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(vocabulary.items()):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def transform(self, raw_documents, new_document=False):
"""
raw_documents: list, list of new documents to be transformed
new_document: bool, if True, assume seeing documents and build a new self.vobabulary_,
if False, use the previous self.vocabulary_
"""
n_doc = len(raw_documents)
tokenized_documents = []
for doc in raw_documents:
tokens = tokenize(doc) # method in this file
tokens = self._word_ngrams(tokens)
tokenized_documents.append(tokens)
if new_document:
self.vocabulary_ = {v: k for k, v in enumerate(set(chain.from_iterable(tokenized_documents)))}
values, row_indices, col_indices = [], [], []
for r, tokens in enumerate(tokenized_documents):
tokens = self._word_ngrams(tokens)
feature = {}
for token in tokens:
word_index = self.vocabulary_.get(token)
if word_index is not None:
if word_index not in feature.keys():
feature[word_index] = 1
else:
feature[word_index] += 1
for c, v in feature.items():
values.append(v)
row_indices.append(r)
col_indices.append(c)
# document-term matrix in CSR format
X = sp.csr_matrix((values, (row_indices, col_indices)),
shape=(n_doc, len(self.vocabulary_)),
dtype=self.dtype)
# truncate vocabulary by max_df and min_df
if new_document:
max_df = self.max_df
min_df = self.min_df
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, _ = self._limit_features(X, self.vocabulary_,
max_doc_count,
min_doc_count,
self.max_features)
return X
def fit_tranform(self, raw_documents):
"""
Transform given list of raw_documents to document-term matrix in
sparse CSR format (see scipy)
"""
X = self.transform(raw_documents, new_document=True)
return X
def tokenize(self, text, custom_dict=None):
n_pad = 21
if not text:
return [''] # case of empty string
if isinstance(text, str) and sys.version_info.major == 2:
text = text.decode('utf-8')
x_char, x_type = create_feature_array(text, n_pad=n_pad)
word_end = []
# Fix thread-related issue in Keras + TensorFlow + Flask async environment
# ref: https://github.com/keras-team/keras/issues/2397
y_predict = self.model.predict([x_char, x_type])
c = [i[0] for i in y_predict.tolist()]
return list(zip(list(text),c))
def save_model(self, file_path):
"""
Save tokenizer to pickle format
"""
self.model = None # set model to None to successfully save the model
with open(file_path, 'wb') as f:
pickle.dump(self, f)
|
[
"scipy.sparse.isspmatrix_csr",
"scipy.sparse.csc_matrix",
"pickle.dump",
"numpy.where",
"os.path.join",
"os.path.dirname",
"itertools.chain.from_iterable",
"numpy.cumsum",
"numpy.dtype",
"numpy.bincount",
"re.search"
] |
[((341, 366), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (356, 366), False, 'import os\n'), ((381, 440), 'os.path.join', 'os.path.join', (['MODULE_PATH', '"""weight"""', '"""cnn_without_ne_ab.h5"""'], {}), "(MODULE_PATH, 'weight', 'cnn_without_ne_ab.h5')\n", (393, 440), False, 'import os\n'), ((1845, 1865), 'scipy.sparse.isspmatrix_csr', 'sp.isspmatrix_csr', (['X'], {}), '(X)\n', (1862, 1865), True, 'import scipy.sparse as sp\n'), ((1882, 1926), 'numpy.bincount', 'np.bincount', (['X.indices'], {'minlength': 'X.shape[1]'}), '(X.indices, minlength=X.shape[1])\n', (1893, 1926), True, 'import numpy as np\n'), ((4399, 4418), 'numpy.dtype', 'np.dtype', (['"""float64"""'], {}), "('float64')\n", (4407, 4418), True, 'import numpy as np\n'), ((1946, 1974), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['X'], {'copy': '(False)'}), '(X, copy=False)\n', (1959, 1974), True, 'import scipy.sparse as sp\n'), ((7020, 7035), 'numpy.cumsum', 'np.cumsum', (['mask'], {}), '(mask)\n', (7029, 7035), True, 'import numpy as np\n'), ((7362, 7376), 'numpy.where', 'np.where', (['mask'], {}), '(mask)\n', (7370, 7376), True, 'import numpy as np\n'), ((11115, 11135), 'pickle.dump', 'pickle.dump', (['self', 'f'], {}), '(self, f)\n', (11126, 11135), False, 'import pickle\n'), ((1326, 1347), 're.search', 're.search', (['word', 'text'], {}), '(word, text)\n', (1335, 1347), False, 'import re\n'), ((6932, 6946), 'numpy.where', 'np.where', (['mask'], {}), '(mask)\n', (6940, 6946), True, 'import numpy as np\n'), ((8247, 8287), 'itertools.chain.from_iterable', 'chain.from_iterable', (['tokenized_documents'], {}), '(tokenized_documents)\n', (8266, 8287), False, 'from itertools import chain\n')]
|
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from cumm import tensorview as tv
from contextlib import AbstractContextManager
from spconv.cppconstants import CPU_ONLY_BUILD
from spconv.core_cc.csrc.utils.boxops import BoxOps
from spconv.core_cc.csrc.sparse.all.ops_cpu1d import Point2VoxelCPU as Point2VoxelCPU1d
from spconv.core_cc.csrc.sparse.all.ops_cpu2d import Point2VoxelCPU as Point2VoxelCPU2d
from spconv.core_cc.csrc.sparse.all.ops_cpu3d import Point2VoxelCPU as Point2VoxelCPU3d
from spconv.core_cc.csrc.sparse.all.ops_cpu4d import Point2VoxelCPU as Point2VoxelCPU4d
if not CPU_ONLY_BUILD:
from spconv.core_cc.csrc.sparse.all.ops1d import Point2Voxel as Point2VoxelGPU1d
from spconv.core_cc.csrc.sparse.all.ops2d import Point2Voxel as Point2VoxelGPU2d
from spconv.core_cc.csrc.sparse.all.ops3d import Point2Voxel as Point2VoxelGPU3d
from spconv.core_cc.csrc.sparse.all.ops4d import Point2Voxel as Point2VoxelGPU4d
class nullcontext(AbstractContextManager):
"""Context manager that does no additional processing.
Used as a stand-in for a normal context manager, when a particular
block of code is only sometimes used with a normal context manager:
cm = optional_cm if condition else nullcontext()
with cm:
# Perform operation, using optional_cm if condition is True
"""
def __init__(self, enter_result=None):
self.enter_result = enter_result
def __enter__(self):
return self.enter_result
def __exit__(self, *excinfo):
pass
def rbbox_iou(box_corners: np.ndarray, qbox_corners: np.ndarray,
standup_iou: np.ndarray, standup_thresh: float):
if not BoxOps.has_boost():
raise NotImplementedError(
"this op require spconv built with boost, download boost, export BOOST_ROOT and rebuild."
)
N = box_corners.shape[0]
K = qbox_corners.shape[0]
overlap = np.zeros((N, K), dtype=box_corners.dtype)
BoxOps.rbbox_iou(tv.from_numpy(box_corners), tv.from_numpy(qbox_corners),
tv.from_numpy(standup_iou), tv.from_numpy(overlap),
standup_thresh, False)
return overlap
def rbbox_intersection(box_corners: np.ndarray, qbox_corners: np.ndarray,
standup_iou: np.ndarray, standup_thresh: float):
if not BoxOps.has_boost():
raise NotImplementedError(
"this op require spconv built with boost, download boost, export BOOST_ROOT and rebuild."
)
N = box_corners.shape[0]
K = qbox_corners.shape[0]
overlap = np.zeros((N, K), dtype=box_corners.dtype)
BoxOps.rbbox_iou(tv.from_numpy(box_corners), tv.from_numpy(qbox_corners),
tv.from_numpy(standup_iou), tv.from_numpy(overlap),
standup_thresh, True)
return overlap
def rbbox_iou_loss(box_corners: np.ndarray, qbox_corners: np.ndarray):
if not BoxOps.has_boost():
raise NotImplementedError(
"this op require spconv built with boost, download boost, export BOOST_ROOT and rebuild."
)
N = box_corners.shape[0]
overlap = np.zeros((N, ), dtype=box_corners.dtype)
BoxOps.rbbox_iou_aligned(tv.from_numpy(box_corners),
tv.from_numpy(qbox_corners),
tv.from_numpy(overlap), False)
return overlap
def non_max_suppression_cpu(boxes: np.ndarray,
order: np.ndarray,
thresh: float,
eps: float = 0.0):
return BoxOps.non_max_suppression_cpu(tv.from_numpy(boxes),
tv.from_numpy(order), thresh, eps)
def rotate_non_max_suppression_cpu(boxes: np.ndarray, order: np.ndarray,
standup_iou: np.ndarray, thresh: float):
if not BoxOps.has_boost():
raise NotImplementedError(
"this op require spconv built with boost, download boost, export BOOST_ROOT and rebuild."
)
return BoxOps.rotate_non_max_suppression_cpu(tv.from_numpy(boxes),
tv.from_numpy(order),
tv.from_numpy(standup_iou),
thresh)
|
[
"numpy.zeros",
"cumm.tensorview.from_numpy",
"spconv.core_cc.csrc.utils.boxops.BoxOps.has_boost"
] |
[((2449, 2490), 'numpy.zeros', 'np.zeros', (['(N, K)'], {'dtype': 'box_corners.dtype'}), '((N, K), dtype=box_corners.dtype)\n', (2457, 2490), True, 'import numpy as np\n'), ((3105, 3146), 'numpy.zeros', 'np.zeros', (['(N, K)'], {'dtype': 'box_corners.dtype'}), '((N, K), dtype=box_corners.dtype)\n', (3113, 3146), True, 'import numpy as np\n'), ((3655, 3694), 'numpy.zeros', 'np.zeros', (['(N,)'], {'dtype': 'box_corners.dtype'}), '((N,), dtype=box_corners.dtype)\n', (3663, 3694), True, 'import numpy as np\n'), ((2209, 2227), 'spconv.core_cc.csrc.utils.boxops.BoxOps.has_boost', 'BoxOps.has_boost', ([], {}), '()\n', (2225, 2227), False, 'from spconv.core_cc.csrc.utils.boxops import BoxOps\n'), ((2513, 2539), 'cumm.tensorview.from_numpy', 'tv.from_numpy', (['box_corners'], {}), '(box_corners)\n', (2526, 2539), True, 'from cumm import tensorview as tv\n'), ((2541, 2568), 'cumm.tensorview.from_numpy', 'tv.from_numpy', (['qbox_corners'], {}), '(qbox_corners)\n', (2554, 2568), True, 'from cumm import tensorview as tv\n'), ((2591, 2617), 'cumm.tensorview.from_numpy', 'tv.from_numpy', (['standup_iou'], {}), '(standup_iou)\n', (2604, 2617), True, 'from cumm import tensorview as tv\n'), ((2619, 2641), 'cumm.tensorview.from_numpy', 'tv.from_numpy', (['overlap'], {}), '(overlap)\n', (2632, 2641), True, 'from cumm import tensorview as tv\n'), ((2865, 2883), 'spconv.core_cc.csrc.utils.boxops.BoxOps.has_boost', 'BoxOps.has_boost', ([], {}), '()\n', (2881, 2883), False, 'from spconv.core_cc.csrc.utils.boxops import BoxOps\n'), ((3169, 3195), 'cumm.tensorview.from_numpy', 'tv.from_numpy', (['box_corners'], {}), '(box_corners)\n', (3182, 3195), True, 'from cumm import tensorview as tv\n'), ((3197, 3224), 'cumm.tensorview.from_numpy', 'tv.from_numpy', (['qbox_corners'], {}), '(qbox_corners)\n', (3210, 3224), True, 'from cumm import tensorview as tv\n'), ((3247, 3273), 'cumm.tensorview.from_numpy', 'tv.from_numpy', (['standup_iou'], {}), '(standup_iou)\n', (3260, 3273), True, 'from cumm import tensorview as tv\n'), ((3275, 3297), 'cumm.tensorview.from_numpy', 'tv.from_numpy', (['overlap'], {}), '(overlap)\n', (3288, 3297), True, 'from cumm import tensorview as tv\n'), ((3445, 3463), 'spconv.core_cc.csrc.utils.boxops.BoxOps.has_boost', 'BoxOps.has_boost', ([], {}), '()\n', (3461, 3463), False, 'from spconv.core_cc.csrc.utils.boxops import BoxOps\n'), ((3726, 3752), 'cumm.tensorview.from_numpy', 'tv.from_numpy', (['box_corners'], {}), '(box_corners)\n', (3739, 3752), True, 'from cumm import tensorview as tv\n'), ((3783, 3810), 'cumm.tensorview.from_numpy', 'tv.from_numpy', (['qbox_corners'], {}), '(qbox_corners)\n', (3796, 3810), True, 'from cumm import tensorview as tv\n'), ((3841, 3863), 'cumm.tensorview.from_numpy', 'tv.from_numpy', (['overlap'], {}), '(overlap)\n', (3854, 3863), True, 'from cumm import tensorview as tv\n'), ((4119, 4139), 'cumm.tensorview.from_numpy', 'tv.from_numpy', (['boxes'], {}), '(boxes)\n', (4132, 4139), True, 'from cumm import tensorview as tv\n'), ((4183, 4203), 'cumm.tensorview.from_numpy', 'tv.from_numpy', (['order'], {}), '(order)\n', (4196, 4203), True, 'from cumm import tensorview as tv\n'), ((4380, 4398), 'spconv.core_cc.csrc.utils.boxops.BoxOps.has_boost', 'BoxOps.has_boost', ([], {}), '()\n', (4396, 4398), False, 'from spconv.core_cc.csrc.utils.boxops import BoxOps\n'), ((4596, 4616), 'cumm.tensorview.from_numpy', 'tv.from_numpy', (['boxes'], {}), '(boxes)\n', (4609, 4616), True, 'from cumm import tensorview as tv\n'), ((4667, 4687), 'cumm.tensorview.from_numpy', 'tv.from_numpy', (['order'], {}), '(order)\n', (4680, 4687), True, 'from cumm import tensorview as tv\n'), ((4738, 4764), 'cumm.tensorview.from_numpy', 'tv.from_numpy', (['standup_iou'], {}), '(standup_iou)\n', (4751, 4764), True, 'from cumm import tensorview as tv\n')]
|
"""
This code is used to scrape ScienceDirect of publication urls and write them to
a text file in the current directory for later use.
"""
import selenium
from selenium import webdriver
import numpy as np
import pandas as pd
import bs4
from bs4 import BeautifulSoup
import time
from sklearn.utils import shuffle
def scrape_page(driver):
"""
This method finds all the publication result web elements on the webpage.
Parameters
----------
driver (Selenium webdriver object) : Instance of the webdriver class e.g.
webdriver.Chrome()
Returns
-------
elems (list) : A list of all scraped hrefs from the page
"""
elems = driver.find_elements_by_class_name('ResultItem')
return elems
def clean(elems):
"""
This method takes a list of scraped selenium web elements
and filters/ returns only the hrefs leading to publications.
Filtering includes removing all urls with keywords that are indicative of
non-html links.
Parameters
----------
elems (list) : The list of hrefs to be filtered
Returns
-------
urls (list) : The new list of hrefs, which should be the same as the list
displayed on gui ScienceDirect
"""
titles = []
urls = []
for elem in elems:
href_child = elem.find_element_by_css_selector('a[href]')
url = href_child.get_attribute('href')
title = href_child.text
titles.append(title)
urls.append(url)
return urls, titles
def build_url_list(gui_prefix,search_terms,journal_list):
"""
This method takes the list of journals and creates a tiple nested dictionary
containing all accessible urls to each page, in each year, for each journal,
for a given search on sciencedirect.
"""
dict1 = {}
years = np.arange(1995,2020)
for journal in journal_list:
dict2 = {}
for year in years:
dict3 = {}
for i in range(60):
url = gui_prefix + search_terms + '&show=100'+ '&articleTypes=FLA%2CREV' + '&years='+ str(year)
if i != 0:
url = url + '&offset=' + str(i) +'00'
url = url + '&pub=' + journal
dict3[i] = url
dict2[year] = dict3
dict1[journal] = dict2
return dict1
def proxify(scraped_urls,uw_prefix):
"""
This method takes a list of scraped urls and turns them into urls that
go through the UW Library proxy so that all of them are full access.
Parameters
----------
scraped_urls (list) : The list of URLs to be converted
uw_prefix (str) : The string that all URLs which go through the UW Library
Proxy start with.
Returns
-------
proxy_urls (list) : The list of converted URLs which go through UW Library
proxy
"""
proxy_urls = []
for url in scraped_urls:
sd_id = url[-17:]
newlink = uw_prefix + sd_id
if sd_id.startswith('S'):
proxy_urls.append(newlink)
return proxy_urls
def write_urls(urls,titles,file,journal,year):
"""
This method takes a list of urls and writes them to a desired text file.
Parameters
----------
urls (list) : The list of URLs to be saved.
file (file object) : The opened .txt file which will be written to.
year (str or int) : The year associated with the publication date.
Returns
-------
Does not return anything
"""
for link,title in zip(urls,titles):
line = link + ',' + title + ',' + journal + ',' + str(year)
file.write(line)
file.write('\n')
def find_pubTitle(driver,journal):
"""
This method finds the identifying number for a specific journal. This
identifying number is added to the gui query URL to ensure only publciations
from the desired journal are being found.
"""
pub_elems = driver.find_elements_by_css_selector('input[id*=publicationTitles]')
pub_names = []
for elem in pub_elems:
pub_name = elem.get_attribute("name")
if pub_name == journal:
return elem.get_attribute('id')[-6:] #returns the identifying number
#for that journal
df = pd.read_excel('elsevier_journals.xls')
df.Full_Category = df.Full_Category.str.lower() # lowercase topics for searching
df = df.drop_duplicates(subset = 'Journal_Title') # drop any duplicate journals
df = shuffle(df,random_state = 42)
# The set of default strings that will be used to sort which journals we want
journal_strings = ['chemistry','energy','molecular','atomic','chemical','biochem'
,'organic','polymer','chemical engineering','biotech','coloid']
name = df.Full_Category.str.contains # making this an easier command to type
# new dataframe full of only journals who's topic description contained the
# desired keywords
df2 = df[name('polymer') | name('chemistry') | name('energy')
| name('molecular') | name('colloid') | name('biochem')
| name('organic') | name('biotech') | name('chemical')]
journal_list = df2.Journal_Title # Series of only the journals to be searched
gui_prefix = 'https://www.sciencedirect.com/search/advanced?qs='
search_terms = 'chemistry%20OR%20molecule%20OR%20polymer%20OR%20organic'
url_dict = build_url_list(gui_prefix,search_terms,journal_list)
driver = webdriver.Chrome()
uw_prefix = 'https://www-sciencedirect-com.offcampus.lib.washington.edu/science/article/pii/'
filename = input("Input filename with .txt extension for URL storage: ")
url_counter = 0
master_list = []
file = open(filename,'a+')
for journal in journal_list:
for year in np.arange(1995,2020):
for offset in np.arange(60):
page = url_dict[journal][year][offset]
print("journal, year, offset = ",journal,year,offset)
driver.get(page)
time.sleep(2) # need sleep to load the page properly
if offset == 0: # if on page 1, we need to grab the publisher number
try: # we may be at a page which won't have the item we are looking for
pubTitles = find_pubTitle(driver,journal_list[journal_counter])
for url in url_dict[journal]:
url = url + '&pubTitles=' + pubTitles # update every url in the list
driver.get(url_dict[journal][year][0]) # reload the first page with the new url
except:
pass # if there is an exception, it means we are on the right page
scraped_elems = scrape_page(driver) # scrape the page
scraped_urls, titles = clean(scraped_elems)
proxy_urls = proxify(scraped_urls,uw_prefix) # not even sure this is needed
write_urls(proxy_urls,titles,file,journal,year)
url_counter += len(proxy_urls)
print('Total URLs saved is: ',url_counter)
if len(scraped_elems) < 100: # after content is saved, go to the next year
break # because we know this is the last page of urls for this year
file.close()
driver.quit()
|
[
"sklearn.utils.shuffle",
"selenium.webdriver.Chrome",
"time.sleep",
"pandas.read_excel",
"numpy.arange"
] |
[((4232, 4270), 'pandas.read_excel', 'pd.read_excel', (['"""elsevier_journals.xls"""'], {}), "('elsevier_journals.xls')\n", (4245, 4270), True, 'import pandas as pd\n'), ((4437, 4465), 'sklearn.utils.shuffle', 'shuffle', (['df'], {'random_state': '(42)'}), '(df, random_state=42)\n', (4444, 4465), False, 'from sklearn.utils import shuffle\n'), ((5368, 5386), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {}), '()\n', (5384, 5386), False, 'from selenium import webdriver\n'), ((1802, 1823), 'numpy.arange', 'np.arange', (['(1995)', '(2020)'], {}), '(1995, 2020)\n', (1811, 1823), True, 'import numpy as np\n'), ((5664, 5685), 'numpy.arange', 'np.arange', (['(1995)', '(2020)'], {}), '(1995, 2020)\n', (5673, 5685), True, 'import numpy as np\n'), ((5708, 5721), 'numpy.arange', 'np.arange', (['(60)'], {}), '(60)\n', (5717, 5721), True, 'import numpy as np\n'), ((5884, 5897), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (5894, 5897), False, 'import time\n')]
|
"""
Greedy Word Swap with Word Importance Ranking
===================================================
When WIR method is set to ``unk``, this is a reimplementation of the search
method from the paper: Is BERT Really Robust?
A Strong Baseline for Natural Language Attack on Text Classification and
Entailment by Jin et. al, 2019. See https://arxiv.org/abs/1907.11932 and
https://github.com/jind11/TextFooler.
"""
import numpy as np
import torch
from torch.nn.functional import softmax
from textattack.goal_function_results import GoalFunctionResultStatus
from textattack.search_methods import SearchMethod
from textattack.shared.validators import (
transformation_consists_of_word_swaps_and_deletions,
)
class GreedyWordSwapWIR(SearchMethod):
"""An attack that greedily chooses from a list of possible perturbations in
order of index, after ranking indices by importance.
Args:
wir_method: method for ranking most important words
"""
def __init__(self, wir_method="unk"):
self.wir_method = wir_method
def _get_index_order(self, initial_text):
"""Returns word indices of ``initial_text`` in descending order of
importance."""
len_text = len(initial_text.words)
if self.wir_method == "unk":
leave_one_texts = [
initial_text.replace_word_at_index(i, "[UNK]") for i in range(len_text)
]
leave_one_results, search_over = self.get_goal_results(leave_one_texts)
index_scores = np.array([result.score for result in leave_one_results])
elif self.wir_method == "weighted-saliency":
# first, compute word saliency
leave_one_texts = [
initial_text.replace_word_at_index(i, "[UNK]") for i in range(len_text)
]
leave_one_results, search_over = self.get_goal_results(leave_one_texts)
saliency_scores = np.array([result.score for result in leave_one_results])
softmax_saliency_scores = softmax(
torch.Tensor(saliency_scores), dim=0
).numpy()
# compute the largest change in score we can find by swapping each word
delta_ps = []
for idx in range(len_text):
transformed_text_candidates = self.get_transformations(
initial_text,
original_text=initial_text,
indices_to_modify=[idx],
)
if not transformed_text_candidates:
# no valid synonym substitutions for this word
delta_ps.append(0.0)
continue
swap_results, _ = self.get_goal_results(transformed_text_candidates)
score_change = [result.score for result in swap_results]
max_score_change = np.max(score_change)
delta_ps.append(max_score_change)
index_scores = softmax_saliency_scores * np.array(delta_ps)
elif self.wir_method == "delete":
leave_one_texts = [
initial_text.delete_word_at_index(i) for i in range(len_text)
]
leave_one_results, search_over = self.get_goal_results(leave_one_texts)
index_scores = np.array([result.score for result in leave_one_results])
elif self.wir_method == "random":
index_order = np.arange(len_text)
np.random.shuffle(index_order)
search_over = False
else:
raise ValueError(f"Unsupported WIR method {self.wir_method}")
if self.wir_method != "random":
index_order = (-index_scores).argsort()
return index_order, search_over
def _perform_search(self, initial_result):
attacked_text = initial_result.attacked_text
# Sort words by order of importance
index_order, search_over = self._get_index_order(attacked_text)
i = 0
cur_result = initial_result
results = None
while i < len(index_order) and not search_over:
transformed_text_candidates = self.get_transformations(
cur_result.attacked_text,
original_text=initial_result.attacked_text,
indices_to_modify=[index_order[i]],
)
i += 1
if len(transformed_text_candidates) == 0:
continue
results, search_over = self.get_goal_results(transformed_text_candidates)
results = sorted(results, key=lambda x: -x.score)
# Skip swaps which don't improve the score
if results[0].score > cur_result.score:
cur_result = results[0]
else:
continue
# If we succeeded, return the index with best similarity.
if cur_result.goal_status == GoalFunctionResultStatus.SUCCEEDED:
best_result = cur_result
# @TODO: Use vectorwise operations
max_similarity = -float("inf")
for result in results:
if result.goal_status != GoalFunctionResultStatus.SUCCEEDED:
break
candidate = result.attacked_text
try:
similarity_score = candidate.attack_attrs["similarity_score"]
except KeyError:
# If the attack was run without any similarity metrics,
# candidates won't have a similarity score. In this
# case, break and return the candidate that changed
# the original score the most.
break
if similarity_score > max_similarity:
max_similarity = similarity_score
best_result = result
return best_result
return cur_result
def check_transformation_compatibility(self, transformation):
"""Since it ranks words by their importance, GreedyWordSwapWIR is
limited to word swap and deletion transformations."""
return transformation_consists_of_word_swaps_and_deletions(transformation)
def extra_repr_keys(self):
return ["wir_method"]
|
[
"torch.Tensor",
"numpy.max",
"numpy.array",
"textattack.shared.validators.transformation_consists_of_word_swaps_and_deletions",
"numpy.arange",
"numpy.random.shuffle"
] |
[((6129, 6196), 'textattack.shared.validators.transformation_consists_of_word_swaps_and_deletions', 'transformation_consists_of_word_swaps_and_deletions', (['transformation'], {}), '(transformation)\n', (6180, 6196), False, 'from textattack.shared.validators import transformation_consists_of_word_swaps_and_deletions\n'), ((1520, 1576), 'numpy.array', 'np.array', (['[result.score for result in leave_one_results]'], {}), '([result.score for result in leave_one_results])\n', (1528, 1576), True, 'import numpy as np\n'), ((1921, 1977), 'numpy.array', 'np.array', (['[result.score for result in leave_one_results]'], {}), '([result.score for result in leave_one_results])\n', (1929, 1977), True, 'import numpy as np\n'), ((2851, 2871), 'numpy.max', 'np.max', (['score_change'], {}), '(score_change)\n', (2857, 2871), True, 'import numpy as np\n'), ((2976, 2994), 'numpy.array', 'np.array', (['delta_ps'], {}), '(delta_ps)\n', (2984, 2994), True, 'import numpy as np\n'), ((3272, 3328), 'numpy.array', 'np.array', (['[result.score for result in leave_one_results]'], {}), '([result.score for result in leave_one_results])\n', (3280, 3328), True, 'import numpy as np\n'), ((3397, 3416), 'numpy.arange', 'np.arange', (['len_text'], {}), '(len_text)\n', (3406, 3416), True, 'import numpy as np\n'), ((3429, 3459), 'numpy.random.shuffle', 'np.random.shuffle', (['index_order'], {}), '(index_order)\n', (3446, 3459), True, 'import numpy as np\n'), ((2042, 2071), 'torch.Tensor', 'torch.Tensor', (['saliency_scores'], {}), '(saliency_scores)\n', (2054, 2071), False, 'import torch\n')]
|
from gtrain import Model
import numpy as np
import tensorflow as tf
class NetForHypinv(Model):
"""
Implementaion of the crutial function for the HypINV algorithm.
Warning: Do not use this class but implement its subclass, for example see FCNetForHypinv
"""
def __init__(self, weights):
self.eval_session = None
self.grad_session = None
self.initial_x = None
self.center = None
self.weights = weights
self.out_for_eval = None #(going to be filled in build_for_eval method)
self.boundary_out_for_eval = None
self.trained_x = None
self.training_class_index = None
self.x = None # tf variable for inversion (going to be filled in build method)
self.x_for_eval = None
self.out = None
self.boundary_out = None # list of tf tensorf for each class of softmax class vs others output
self.loss = None
self.boundary_loss = None
self.t = None #target
self.boundary_t = None
self.x1 = None # this attribute is used of purposes of modified loss function
def __del__(self):
# close arr sessions
if self.eval_session:
self.eval_session.close()
if self.grad_session:
self.grad_session.close()
def set_initial_x(self, initial_x):
# sets starting point for the search of the closest point
self.initial_x = initial_x
def set_center(self, center):
# sets center point
self.center = center / np.linalg.norm(center)
def set_x1(self, x1):
# sets x1 to which we want to found the cosest point x0
self.x1 = x1
def has_modified_loss(self):
pass # if uses modified loss then it returns true
def set_initial_x_in_session(self, x, session=None):
# sets initial x in certain session
if session is None:
self.set_initial_x(x)
else:
pass # overide this method
def eval(self, x):
if len(x.shape) == 1:
x = x.reshape((1,len(x)))
if not self.eval_session:
self.eval_session = tf.Session()
with self.eval_session.as_default():
self.build_for_eval()
self.eval_session.run(tf.global_variables_initializer())
return self.eval_session.run(self.out_for_eval, {self.x_for_eval: x})
def boundary_eval(self, x, class_index):
# evaluates binary classificaitons class_index and other classes
if not self.eval_session:
self.eval_session = tf.Session()
with self.eval_session.as_default():
self.build_for_eval()
self.eval_session.run(tf.global_variables_initializer())
return self.eval_session.run(self.boundary_out_for_eval[class_index], {self.x_for_eval: x})
def get_boundary_gradient(self, x, class_index):
# computes gradient of the boundary for specified class_index
if not self.grad_session:
self.grad_session = tf.Session()
with self.grad_session.as_default():
self.build_for_eval()
self.grad = list()
for i in range(len(self.weights[0][-1][0])):
self.grad.append(tf.gradients(self.boundary_out_for_eval[i], [self.x_for_eval])[0])
self.grad_x = self.x_for_eval
return self.grad_session.run(self.grad[class_index], {self.grad_x: x})
def build_for_eval(self):
# build model for evaluation
pass #override this method (fill self.out_for_eval)
def train_ended(self, session):
self.trained_x = session.run(self.x)
def build(self):
# build model for training
pass #override this method (fill self.x, self.out)
def set_train_class(self, class_index):
# sets class of the x1
self.training_class_index = class_index
# overided methods from gtrain.Model
def get_loss(self):
if self.training_class_index is None:
return self.loss
else:
return self.boundary_loss[self.training_class_index]
def get_hits(self):
return self.get_loss()
def get_count(self):
return self.get_loss()
def get_train_summaries(self):
return []
def get_dev_summaries(self):
return []
def get_placeholders(self):
if self.training_class_index is None:
return [self.t]
else:
return [self.boundary_t]
#________________________________________EXAMPLES_OF_NetForHypinv_CLASS_____________________________________________
class FCNetForHypinv(NetForHypinv):
"""
Implementation of multi layer perceptron to by used in HypINV rule extraction algorithm
"""
def __init__(self, weights, function=tf.sigmoid, use_modified_loss=False, mu = 0.01):
"""
:param weights: saved as [list of weights for layers][0 weight, 1 bias]
:param function: tf function for propagation. For example tf.nn.sigmoid, tf.atan
:param use_modified_loss: weather the modified loss should be used
:param mu: factor of the penalty terms that specified the distance between x0 and x1 and
the distance x1 from the boundary
"""
super(FCNetForHypinv, self).__init__(weights)
self.function = function
self.layer_sizes = [len(self.weights[0][0])]
for bias in weights[1]:
self.layer_sizes.append(len(bias))
self.num_classes = self.layer_sizes[-1]
self.initial_x = np.zeros([1, self.layer_sizes[0]])
self.use_modified_loss = use_modified_loss
self.mu = mu
def build(self):
with tf.name_scope("Input"):
if self.center is not None:
self.point_weights = tf.Variable(self.center.reshape((1, len(self.center))),
dtype=tf.float64, trainable=False, name="Boundary_point")
init_factor = self.center
init_factor[init_factor!=0] = self.initial_x[init_factor!=0] / self.center[init_factor!=0]
self.factor = tf.Variable(init_factor.reshape((1, len(self.center))),
dtype=tf.float64, name="factor")
else:
self.point_weights = tf.Variable(self.initial_x.reshape((1, len(self.initial_x))),
dtype=tf.float64, trainable=False, name="Boundary_point")
self.factor = tf.Variable(np.ones((1, len(self.center))),
dtype=tf.float64, name="factor")
self.x = self.point_weights * self.factor
with tf.name_scope("Target"):
if self.use_modified_loss:
x1_constant = tf.constant(self.x1.reshape((1, len(self.x1))), dtype=tf.float64)
self.t = tf.placeholder(tf.float64, shape=[None, self.num_classes], name="Target_output")
self.boundary_t = tf.placeholder(tf.float64, shape=[None, 2], name="Target_boundary_output")
with tf.name_scope("FC_net"):
flowing_x = self.x
for i, _ in enumerate(self.weights[0]):
with tf.name_scope("layer_{}".format(i)):
W = tf.constant(self.weights[0][i], name="Weight_{}".format(i), dtype=tf.float64)
b = tf.constant(self.weights[1][i], name="Bias_{}".format(i), dtype=tf.float64)
flowing_x = self.function(tf.nn.xw_plus_b(flowing_x, W, b))
y = flowing_x
self.out = tf.nn.softmax(y)
with tf.name_scope("Binary_class_output"):
self.boundary_out = list()
for i in range(self.num_classes):
mask = True+np.zeros(self.num_classes, dtype=np.bool)
mask[i] = False
x0 = self.out[:,i]
x1 = tf.reduce_max(tf.boolean_mask(self.out, mask, axis=1), axis=1)
s = x0+x1
out = tf.stack([x0/s, x1/s], axis=1)
self.boundary_out.append(out)
with tf.name_scope("Loss_functions"):
self.loss = tf.reduce_mean(
tf.nn.l2_loss(self.out-self.t),
name="loss")
with tf.name_scope("Binary_class_loss"):
self.boundary_loss = list()
if self.use_modified_loss:
for i in range(self.num_classes):
self.boundary_loss.append(
tf.reduce_mean(tf.nn.l2_loss(self.boundary_out[i]-self.boundary_t)) +
self.mu * tf.reduce_mean(tf.nn.l2_loss(self.x - x1_constant))
)
else:
for i in range(self.num_classes):
self.boundary_loss.append(
tf.reduce_mean(tf.nn.l2_loss(self.boundary_out[i] - self.boundary_t))
)
def set_initial_x_in_session(self, x, session=None):
if session is None:
self.set_initial_x(x)
else:
if self.center is None:
session.run([
self.point_weights.assign(x.reshape((1, len(x)))),
self.factor.assign(np.ones((1, len(x))))
])
else:
init_factor = self.center
init_factor[init_factor!=0] = x[init_factor!=0] / self.center[init_factor!=0]
session.run(self.factor.assign(init_factor.reshape((1,len(init_factor)))))
def build_for_eval(self):
with tf.name_scope("eInput"):
self.x_for_eval = tf.placeholder(tf.float32, shape=[None, len(self.weights[0][0])])#tf.Variable(tf.constant(self.initial_x), name="Boundary_point")
with tf.name_scope("eFC_net"):
flowing_x = self.x_for_eval
for i, _ in enumerate(self.weights[0]):
W = tf.constant(self.weights[0][i], name="eWeight_{}".format(i))
b = tf.constant(self.weights[1][i], name="eBias_{}".format(i))
flowing_x = self.function(tf.nn.xw_plus_b(flowing_x, W, b), name="elayer_{}".format(i))
y = flowing_x
self.out_for_eval = tf.nn.softmax(y)
with tf.name_scope("Binary_class_output"):
self.boundary_out_for_eval = list()
for i in range(self.num_classes):
mask = True+np.zeros(self.num_classes, dtype=np.bool)
mask[i] = False
x0 = self.out_for_eval[:, i]
x1 = tf.reduce_max(tf.boolean_mask(self.out_for_eval, mask, axis=1), axis=1)
s = x0+x1
out = tf.stack([x0/s, x1/s], axis=1)
self.boundary_out_for_eval.append(out)
def has_modified_loss(self):
return self.use_modified_loss
def name(self):
return "Hypinv_FC_net_{}".format("-".join([str(ls) for ls in self.layer_sizes]))
class FCNetForHypinvBinary(FCNetForHypinv):
"""
Implementation of multi layer perceptron to by used in HypINV rule extraction algorithm
The task is simplified to the binary classificaiton base_class_index against the other classes
"""
def __init__(self, weights, base_class_index, function=tf.sigmoid, use_modified_loss=False, mu = 0.01):
"""
:param weights: saved as [list of weights for layers][0 weight, 1 bias]
:param base_class_index: an index of the class which is used as the base class
:param function: tf function for propagation. For example tf.nn.sigmoid, tf.atan
:param use_modified_loss: weather the modified loss should be used
:param mu: factor of the penalty terms that specified the distance between x0 and x1 and
the distance x1 from the boundary
"""
super(FCNetForHypinvBinary, self).__init__(weights)
self.base_class_index = base_class_index
self.function = function
self.layer_sizes = [len(self.weights[0][0])]
for bias in weights[1]:
self.layer_sizes.append(len(bias))
self.num_classes = self.layer_sizes[-1]
self.initial_x = np.zeros([1, self.layer_sizes[0]])
self.use_modified_loss = use_modified_loss
self.mu = mu
def build(self):
with tf.name_scope("Input"):
self.init_point = tf.Variable(self.initial_x.reshape((1, len(self.initial_x))),
dtype=tf.float64, trainable=False, name="Boundary_point")
self.factor = tf.Variable(np.ones((1, len(self.initial_x))),
dtype=tf.float64, name="factor")
self.x = self.init_point * self.factor
with tf.name_scope("Target"):
if self.use_modified_loss:
x1_constant = tf.constant(self.x1.reshape((1, len(self.x1))), dtype=tf.float64)
self.t = tf.placeholder(tf.float64, shape=[None, 2], name="Target_output")
self.boundary_t = tf.placeholder(tf.float64, shape=[None, 2], name="Target_boundary_output")
with tf.name_scope("FC_net"):
flowing_x = self.x
for i, _ in enumerate(self.weights[0]):
with tf.name_scope("layer_{}".format(i)):
W = tf.constant(self.weights[0][i], name="Weight_{}".format(i), dtype=tf.float64)
b = tf.constant(self.weights[1][i], name="Bias_{}".format(i), dtype=tf.float64)
flowing_x = self.function(tf.nn.xw_plus_b(flowing_x, W, b))
y = flowing_x
full_out = tf.nn.softmax(y)
with tf.name_scope("Binary_class_output"):
self.boundary_out = list()
mask = True+np.zeros(self.num_classes, dtype=np.bool)
mask[self.base_class_index] = False
x0 = full_out[:,self.base_class_index]
x1 = tf.reduce_max(tf.boolean_mask(full_out, mask, axis=1), axis=1)
s = x0+x1
self.out = tf.stack([x0/s, x1/s], axis=1)
self.boundary_out.append(self.out)
self.boundary_out.append(tf.stack([x1/s, x0/s], axis=1))
with tf.name_scope("Loss_functions"):
self.loss = tf.reduce_mean(
tf.nn.l2_loss(self.out-self.t),
name="loss")
with tf.name_scope("Binary_class_loss"):
self.boundary_loss = list()
if self.use_modified_loss:
for i in range(2):
self.boundary_loss.append(
tf.reduce_mean(tf.nn.l2_loss(self.boundary_out[i]-self.boundary_t)) +
self.mu * tf.reduce_mean(tf.nn.l2_loss(self.x - x1_constant))
)
else:
for i in range(2):
self.boundary_loss.append(
tf.reduce_mean(tf.nn.l2_loss(self.boundary_out[i] - self.boundary_t))
)
def build_for_eval(self):
with tf.name_scope("eInput"):
self.x_for_eval = tf.placeholder(tf.float32, shape=[None, len(self.weights[0][0])])#tf.Variable(tf.constant(self.initial_x), name="Boundary_point")
with tf.name_scope("eFC_net"):
flowing_x = self.x_for_eval
for i, _ in enumerate(self.weights[0]):
W = tf.constant(self.weights[0][i], name="eWeight_{}".format(i))
b = tf.constant(self.weights[1][i], name="eBias_{}".format(i))
flowing_x = self.function(tf.nn.xw_plus_b(flowing_x, W, b), name="elayer_{}".format(i))
y = flowing_x
full_out = tf.nn.softmax(y)
with tf.name_scope("Binary_class_output"):
self.boundary_out_for_eval = list()
mask = True+np.zeros(self.num_classes, dtype=np.bool)
mask[self.base_class_index] = False
x0 = full_out[:, self.base_class_index]
x1 = tf.reduce_max(tf.boolean_mask(full_out, mask, axis=1), axis=1)
s = x0+x1
self.out_for_eval = tf.stack([x0/s, x1/s], axis=1)
self.boundary_out_for_eval.append(self.out_for_eval)
self.boundary_out_for_eval.append(tf.stack([x1/s, x0/s], axis=1))
def get_boundary_gradient(self, x, class_index):
if not self.grad_session:
self.grad_session = tf.Session()
with self.grad_session.as_default():
self.build_for_eval()
self.grad = list()
for i in range(2):
self.grad.append(tf.gradients(self.boundary_out_for_eval[i], [self.x_for_eval])[0])
self.grad_x = self.x_for_eval
return self.grad_session.run(self.grad[class_index], {self.grad_x: x})
def has_modified_loss(self):
return self.use_modified_loss
def name(self):
return "Hypinv_FC_net_{}".format("-".join([str(ls) for ls in self.layer_sizes]))
|
[
"tensorflow.boolean_mask",
"tensorflow.Session",
"tensorflow.placeholder",
"tensorflow.nn.xw_plus_b",
"tensorflow.nn.l2_loss",
"tensorflow.global_variables_initializer",
"tensorflow.gradients",
"numpy.zeros",
"tensorflow.name_scope",
"tensorflow.nn.softmax",
"numpy.linalg.norm",
"tensorflow.stack"
] |
[((5555, 5589), 'numpy.zeros', 'np.zeros', (['[1, self.layer_sizes[0]]'], {}), '([1, self.layer_sizes[0]])\n', (5563, 5589), True, 'import numpy as np\n'), ((12110, 12144), 'numpy.zeros', 'np.zeros', (['[1, self.layer_sizes[0]]'], {}), '([1, self.layer_sizes[0]])\n', (12118, 12144), True, 'import numpy as np\n'), ((1532, 1554), 'numpy.linalg.norm', 'np.linalg.norm', (['center'], {}), '(center)\n', (1546, 1554), True, 'import numpy as np\n'), ((2134, 2146), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2144, 2146), True, 'import tensorflow as tf\n'), ((2566, 2578), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2576, 2578), True, 'import tensorflow as tf\n'), ((3025, 3037), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3035, 3037), True, 'import tensorflow as tf\n'), ((5697, 5719), 'tensorflow.name_scope', 'tf.name_scope', (['"""Input"""'], {}), "('Input')\n", (5710, 5719), True, 'import tensorflow as tf\n'), ((6710, 6733), 'tensorflow.name_scope', 'tf.name_scope', (['"""Target"""'], {}), "('Target')\n", (6723, 6733), True, 'import tensorflow as tf\n'), ((6891, 6976), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float64'], {'shape': '[None, self.num_classes]', 'name': '"""Target_output"""'}), "(tf.float64, shape=[None, self.num_classes], name='Target_output'\n )\n", (6905, 6976), True, 'import tensorflow as tf\n'), ((7002, 7076), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float64'], {'shape': '[None, 2]', 'name': '"""Target_boundary_output"""'}), "(tf.float64, shape=[None, 2], name='Target_boundary_output')\n", (7016, 7076), True, 'import tensorflow as tf\n'), ((7090, 7113), 'tensorflow.name_scope', 'tf.name_scope', (['"""FC_net"""'], {}), "('FC_net')\n", (7103, 7113), True, 'import tensorflow as tf\n'), ((7587, 7603), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['y'], {}), '(y)\n', (7600, 7603), True, 'import tensorflow as tf\n'), ((7617, 7653), 'tensorflow.name_scope', 'tf.name_scope', (['"""Binary_class_output"""'], {}), "('Binary_class_output')\n", (7630, 7653), True, 'import tensorflow as tf\n'), ((8099, 8130), 'tensorflow.name_scope', 'tf.name_scope', (['"""Loss_functions"""'], {}), "('Loss_functions')\n", (8112, 8130), True, 'import tensorflow as tf\n'), ((8262, 8296), 'tensorflow.name_scope', 'tf.name_scope', (['"""Binary_class_loss"""'], {}), "('Binary_class_loss')\n", (8275, 8296), True, 'import tensorflow as tf\n'), ((9547, 9570), 'tensorflow.name_scope', 'tf.name_scope', (['"""eInput"""'], {}), "('eInput')\n", (9560, 9570), True, 'import tensorflow as tf\n'), ((9745, 9769), 'tensorflow.name_scope', 'tf.name_scope', (['"""eFC_net"""'], {}), "('eFC_net')\n", (9758, 9769), True, 'import tensorflow as tf\n'), ((10185, 10201), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['y'], {}), '(y)\n', (10198, 10201), True, 'import tensorflow as tf\n'), ((10215, 10251), 'tensorflow.name_scope', 'tf.name_scope', (['"""Binary_class_output"""'], {}), "('Binary_class_output')\n", (10228, 10251), True, 'import tensorflow as tf\n'), ((12253, 12275), 'tensorflow.name_scope', 'tf.name_scope', (['"""Input"""'], {}), "('Input')\n", (12266, 12275), True, 'import tensorflow as tf\n'), ((12671, 12694), 'tensorflow.name_scope', 'tf.name_scope', (['"""Target"""'], {}), "('Target')\n", (12684, 12694), True, 'import tensorflow as tf\n'), ((12852, 12917), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float64'], {'shape': '[None, 2]', 'name': '"""Target_output"""'}), "(tf.float64, shape=[None, 2], name='Target_output')\n", (12866, 12917), True, 'import tensorflow as tf\n'), ((12948, 13022), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float64'], {'shape': '[None, 2]', 'name': '"""Target_boundary_output"""'}), "(tf.float64, shape=[None, 2], name='Target_boundary_output')\n", (12962, 13022), True, 'import tensorflow as tf\n'), ((13036, 13059), 'tensorflow.name_scope', 'tf.name_scope', (['"""FC_net"""'], {}), "('FC_net')\n", (13049, 13059), True, 'import tensorflow as tf\n'), ((13533, 13549), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['y'], {}), '(y)\n', (13546, 13549), True, 'import tensorflow as tf\n'), ((13563, 13599), 'tensorflow.name_scope', 'tf.name_scope', (['"""Binary_class_output"""'], {}), "('Binary_class_output')\n", (13576, 13599), True, 'import tensorflow as tf\n'), ((13930, 13964), 'tensorflow.stack', 'tf.stack', (['[x0 / s, x1 / s]'], {'axis': '(1)'}), '([x0 / s, x1 / s], axis=1)\n', (13938, 13964), True, 'import tensorflow as tf\n'), ((14090, 14121), 'tensorflow.name_scope', 'tf.name_scope', (['"""Loss_functions"""'], {}), "('Loss_functions')\n", (14103, 14121), True, 'import tensorflow as tf\n'), ((14253, 14287), 'tensorflow.name_scope', 'tf.name_scope', (['"""Binary_class_loss"""'], {}), "('Binary_class_loss')\n", (14266, 14287), True, 'import tensorflow as tf\n'), ((14912, 14935), 'tensorflow.name_scope', 'tf.name_scope', (['"""eInput"""'], {}), "('eInput')\n", (14925, 14935), True, 'import tensorflow as tf\n'), ((15110, 15134), 'tensorflow.name_scope', 'tf.name_scope', (['"""eFC_net"""'], {}), "('eFC_net')\n", (15123, 15134), True, 'import tensorflow as tf\n'), ((15541, 15557), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['y'], {}), '(y)\n', (15554, 15557), True, 'import tensorflow as tf\n'), ((15571, 15607), 'tensorflow.name_scope', 'tf.name_scope', (['"""Binary_class_output"""'], {}), "('Binary_class_output')\n", (15584, 15607), True, 'import tensorflow as tf\n'), ((15957, 15991), 'tensorflow.stack', 'tf.stack', (['[x0 / s, x1 / s]'], {'axis': '(1)'}), '([x0 / s, x1 / s], axis=1)\n', (15965, 15991), True, 'import tensorflow as tf\n'), ((16251, 16263), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (16261, 16263), True, 'import tensorflow as tf\n'), ((2268, 2301), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2299, 2301), True, 'import tensorflow as tf\n'), ((2700, 2733), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2731, 2733), True, 'import tensorflow as tf\n'), ((8009, 8043), 'tensorflow.stack', 'tf.stack', (['[x0 / s, x1 / s]'], {'axis': '(1)'}), '([x0 / s, x1 / s], axis=1)\n', (8017, 8043), True, 'import tensorflow as tf\n'), ((8188, 8220), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['(self.out - self.t)'], {}), '(self.out - self.t)\n', (8201, 8220), True, 'import tensorflow as tf\n'), ((10635, 10669), 'tensorflow.stack', 'tf.stack', (['[x0 / s, x1 / s]'], {'axis': '(1)'}), '([x0 / s, x1 / s], axis=1)\n', (10643, 10669), True, 'import tensorflow as tf\n'), ((13664, 13705), 'numpy.zeros', 'np.zeros', (['self.num_classes'], {'dtype': 'np.bool'}), '(self.num_classes, dtype=np.bool)\n', (13672, 13705), True, 'import numpy as np\n'), ((13836, 13875), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['full_out', 'mask'], {'axis': '(1)'}), '(full_out, mask, axis=1)\n', (13851, 13875), True, 'import tensorflow as tf\n'), ((14045, 14079), 'tensorflow.stack', 'tf.stack', (['[x1 / s, x0 / s]'], {'axis': '(1)'}), '([x1 / s, x0 / s], axis=1)\n', (14053, 14079), True, 'import tensorflow as tf\n'), ((14179, 14211), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['(self.out - self.t)'], {}), '(self.out - self.t)\n', (14192, 14211), True, 'import tensorflow as tf\n'), ((15681, 15722), 'numpy.zeros', 'np.zeros', (['self.num_classes'], {'dtype': 'np.bool'}), '(self.num_classes, dtype=np.bool)\n', (15689, 15722), True, 'import numpy as np\n'), ((15854, 15893), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['full_out', 'mask'], {'axis': '(1)'}), '(full_out, mask, axis=1)\n', (15869, 15893), True, 'import tensorflow as tf\n'), ((16099, 16133), 'tensorflow.stack', 'tf.stack', (['[x1 / s, x0 / s]'], {'axis': '(1)'}), '([x1 / s, x0 / s], axis=1)\n', (16107, 16133), True, 'import tensorflow as tf\n'), ((7768, 7809), 'numpy.zeros', 'np.zeros', (['self.num_classes'], {'dtype': 'np.bool'}), '(self.num_classes, dtype=np.bool)\n', (7776, 7809), True, 'import numpy as np\n'), ((7912, 7951), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['self.out', 'mask'], {'axis': '(1)'}), '(self.out, mask, axis=1)\n', (7927, 7951), True, 'import tensorflow as tf\n'), ((10065, 10097), 'tensorflow.nn.xw_plus_b', 'tf.nn.xw_plus_b', (['flowing_x', 'W', 'b'], {}), '(flowing_x, W, b)\n', (10080, 10097), True, 'import tensorflow as tf\n'), ((10375, 10416), 'numpy.zeros', 'np.zeros', (['self.num_classes'], {'dtype': 'np.bool'}), '(self.num_classes, dtype=np.bool)\n', (10383, 10416), True, 'import numpy as np\n'), ((10529, 10577), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['self.out_for_eval', 'mask'], {'axis': '(1)'}), '(self.out_for_eval, mask, axis=1)\n', (10544, 10577), True, 'import tensorflow as tf\n'), ((15430, 15462), 'tensorflow.nn.xw_plus_b', 'tf.nn.xw_plus_b', (['flowing_x', 'W', 'b'], {}), '(flowing_x, W, b)\n', (15445, 15462), True, 'import tensorflow as tf\n'), ((7504, 7536), 'tensorflow.nn.xw_plus_b', 'tf.nn.xw_plus_b', (['flowing_x', 'W', 'b'], {}), '(flowing_x, W, b)\n', (7519, 7536), True, 'import tensorflow as tf\n'), ((13450, 13482), 'tensorflow.nn.xw_plus_b', 'tf.nn.xw_plus_b', (['flowing_x', 'W', 'b'], {}), '(flowing_x, W, b)\n', (13465, 13482), True, 'import tensorflow as tf\n'), ((3258, 3320), 'tensorflow.gradients', 'tf.gradients', (['self.boundary_out_for_eval[i]', '[self.x_for_eval]'], {}), '(self.boundary_out_for_eval[i], [self.x_for_eval])\n', (3270, 3320), True, 'import tensorflow as tf\n'), ((8830, 8883), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['(self.boundary_out[i] - self.boundary_t)'], {}), '(self.boundary_out[i] - self.boundary_t)\n', (8843, 8883), True, 'import tensorflow as tf\n'), ((14791, 14844), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['(self.boundary_out[i] - self.boundary_t)'], {}), '(self.boundary_out[i] - self.boundary_t)\n', (14804, 14844), True, 'import tensorflow as tf\n'), ((16458, 16520), 'tensorflow.gradients', 'tf.gradients', (['self.boundary_out_for_eval[i]', '[self.x_for_eval]'], {}), '(self.boundary_out_for_eval[i], [self.x_for_eval])\n', (16470, 16520), True, 'import tensorflow as tf\n'), ((8513, 8566), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['(self.boundary_out[i] - self.boundary_t)'], {}), '(self.boundary_out[i] - self.boundary_t)\n', (8526, 8566), True, 'import tensorflow as tf\n'), ((14489, 14542), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['(self.boundary_out[i] - self.boundary_t)'], {}), '(self.boundary_out[i] - self.boundary_t)\n', (14502, 14542), True, 'import tensorflow as tf\n'), ((8617, 8652), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['(self.x - x1_constant)'], {}), '(self.x - x1_constant)\n', (8630, 8652), True, 'import tensorflow as tf\n'), ((14593, 14628), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['(self.x - x1_constant)'], {}), '(self.x - x1_constant)\n', (14606, 14628), True, 'import tensorflow as tf\n')]
|
import numpy
from keras.preprocessing import sequence
from keras.preprocessing.text import Tokenizer
from src.support import support
class PhraseManager:
def __init__(self, configuration):
self.train_phrases, self.train_labels = self._read_train_phrases()
self.test_phrases, self.test_labels = self._read_test_phrases()
self.configuration = configuration
self.tokenizer = None
def get_phrases_train(self):
return self.train_phrases, self.train_labels
def get_phrases_test(self):
return self.test_phrases, self.test_labels
def get_dataset(self, level = None):
if level == support.WORD_LEVEL:
return self._word_process(self.configuration[support.WORD_MAX_LENGTH])
elif level == support.CHAR_LEVEL:
return self._char_process(self.configuration[support.CHAR_MAX_LENGTH])
else:
return self.train_phrases, self.train_labels, self.test_phrases, self.test_labels
def _word_process(self, word_max_length):
tokenizer = Tokenizer(num_words=self.configuration[support.QUANTITY_WORDS])
tokenizer.fit_on_texts(self.train_phrases)
x_train_sequence = tokenizer.texts_to_sequences(self.train_phrases)
x_test_sequence = tokenizer.texts_to_sequences(self.test_phrases)
x_train = sequence.pad_sequences(x_train_sequence, maxlen=word_max_length, padding='post', truncating='post')
x_test = sequence.pad_sequences(x_test_sequence, maxlen=word_max_length, padding='post', truncating='post')
y_train = numpy.array(self.train_labels)
y_test = numpy.array(self.test_labels)
return x_train, y_train, x_test, y_test
def _char_process(self, max_length):
embedding_w, embedding_dic = self._onehot_dic_build()
x_train = []
for i in range(len(self.train_phrases)):
doc_vec = self._doc_process(self.train_phrases[i].lower(), embedding_dic, max_length)
x_train.append(doc_vec)
x_train = numpy.asarray(x_train, dtype='int64')
y_train = numpy.array(self.train_labels, dtype='float32')
x_test = []
for i in range(len( self.test_phrases)):
doc_vec = self._doc_process( self.test_phrases[i].lower(), embedding_dic, max_length)
x_test.append(doc_vec)
x_test = numpy.asarray(x_test, dtype='int64')
y_test = numpy.array(self.test_labels, dtype='float32')
del embedding_w, embedding_dic
return x_train, y_train, x_test, y_test
def _doc_process(self, doc, embedding_dic, max_length):
min_length = min(max_length, len(doc))
doc_vec = numpy.zeros(max_length, dtype='int64')
for j in range(min_length):
if doc[j] in embedding_dic:
doc_vec[j] = embedding_dic[doc[j]]
else:
doc_vec[j] = embedding_dic['UNK']
return doc_vec
def _onehot_dic_build(self):
alphabet = "abcdefghijklmnopqrstuvwxyz0123456789-,;.!?:'\"/\\|_@#$%^&*~`+-=<>()[]{}"
embedding_dic = {}
embedding_w = []
embedding_dic["UNK"] = 0
embedding_w.append(numpy.zeros(len(alphabet), dtype='float32'))
for i, alpha in enumerate(alphabet):
onehot = numpy.zeros(len(alphabet), dtype='float32')
embedding_dic[alpha] = i + 1
onehot[i] = 1
embedding_w.append(onehot)
embedding_w = numpy.array(embedding_w, dtype='float32')
return embedding_w, embedding_dic
def get_tokenizer(self):
if self.tokenizer is None:
self.tokenizer = Tokenizer(num_words=self.configuration[support.QUANTITY_WORDS])
self.tokenizer.fit_on_texts(self.train_phrases)
return self.tokenizer
def text_to_vector_word(self, text):
vector_sequence = self.get_tokenizer().texts_to_sequences([text])
result = sequence.pad_sequences(vector_sequence, maxlen=self.configuration[support.WORD_MAX_LENGTH], padding='post', truncating='post')
return result
def text_to_vector_word_all(self, texts):
vector_sequence = self.get_tokenizer().texts_to_sequences(texts)
result = sequence.pad_sequences(vector_sequence, maxlen=self.configuration[support.WORD_MAX_LENGTH], padding='post', truncating='post')
return result
def text_to_vector_char(self, text):
embedding_dictionary = self._get_embedding_dictionary()
max_length = self.configuration[support.CHAR_MAX_LENGTH]
min_length = min(max_length, len(text))
text_vector = numpy.zeros(max_length, dtype="int64")
for j in range(min_length):
if text[j] in embedding_dictionary:
text_vector[j] = embedding_dictionary[text[j]]
else:
text_vector[j] = embedding_dictionary["UNK"]
return text_vector
def text_to_vector_char_all(self, texts):
embedding_w, embedding_dic = self._onehot_dic_build()
result = []
for i in range(len(texts)):
doc_vec = self.text_to_vector_char(texts[i].lower())
result.append(doc_vec)
result = numpy.asarray(result, dtype="int64")
del embedding_w, embedding_dic
return result
def _get_embedding_dictionary(self):
return {'UNK': 0, 'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5, 'f': 6, 'g': 7, 'h': 8, 'i': 9, 'j': 10,
'k': 11, 'l': 12,
'm': 13, 'n': 14, 'o': 15, 'p': 16, 'q': 17, 'r': 18, 's': 19, 't': 20, 'u': 21, 'v': 22,
'w': 23, 'x': 24,
'y': 25, 'z': 26, '0': 27, '1': 28, '2': 29, '3': 30, '4': 31, '5': 32, '6': 33, '7': 34,
'8': 35, '9': 36,
'-': 60, ',': 38, ';': 39, '.': 40, '!': 41, '?': 42, ':': 43, "'": 44, '"': 45, '/': 46,
'\\': 47, '|': 48,
'_': 49, '@': 50, '#': 51, '$': 52, '%': 53, '^': 54, '&': 55, '*': 56, '~': 57, '`': 58,
'+': 59, '=': 61,
'<': 62, '>': 63, '(': 64, ')': 65, '[': 66, ']': 67, '{': 68, '}': 69}
def get_classes(self):
pass
def _read_train_phrases(self):
pass
def _read_test_phrases(self):
pass
class Phrase:
def __init__(self, text, classification):
self.text = text
self.classification = classification
def __str__(self):
return "Classification: " + str(self.classification) + "\nText: " + self.text
|
[
"keras.preprocessing.text.Tokenizer",
"numpy.asarray",
"numpy.array",
"numpy.zeros",
"keras.preprocessing.sequence.pad_sequences"
] |
[((1055, 1118), 'keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {'num_words': 'self.configuration[support.QUANTITY_WORDS]'}), '(num_words=self.configuration[support.QUANTITY_WORDS])\n', (1064, 1118), False, 'from keras.preprocessing.text import Tokenizer\n'), ((1338, 1442), 'keras.preprocessing.sequence.pad_sequences', 'sequence.pad_sequences', (['x_train_sequence'], {'maxlen': 'word_max_length', 'padding': '"""post"""', 'truncating': '"""post"""'}), "(x_train_sequence, maxlen=word_max_length, padding=\n 'post', truncating='post')\n", (1360, 1442), False, 'from keras.preprocessing import sequence\n'), ((1455, 1558), 'keras.preprocessing.sequence.pad_sequences', 'sequence.pad_sequences', (['x_test_sequence'], {'maxlen': 'word_max_length', 'padding': '"""post"""', 'truncating': '"""post"""'}), "(x_test_sequence, maxlen=word_max_length, padding=\n 'post', truncating='post')\n", (1477, 1558), False, 'from keras.preprocessing import sequence\n'), ((1572, 1602), 'numpy.array', 'numpy.array', (['self.train_labels'], {}), '(self.train_labels)\n', (1583, 1602), False, 'import numpy\n'), ((1620, 1649), 'numpy.array', 'numpy.array', (['self.test_labels'], {}), '(self.test_labels)\n', (1631, 1649), False, 'import numpy\n'), ((2025, 2062), 'numpy.asarray', 'numpy.asarray', (['x_train'], {'dtype': '"""int64"""'}), "(x_train, dtype='int64')\n", (2038, 2062), False, 'import numpy\n'), ((2081, 2128), 'numpy.array', 'numpy.array', (['self.train_labels'], {'dtype': '"""float32"""'}), "(self.train_labels, dtype='float32')\n", (2092, 2128), False, 'import numpy\n'), ((2349, 2385), 'numpy.asarray', 'numpy.asarray', (['x_test'], {'dtype': '"""int64"""'}), "(x_test, dtype='int64')\n", (2362, 2385), False, 'import numpy\n'), ((2403, 2449), 'numpy.array', 'numpy.array', (['self.test_labels'], {'dtype': '"""float32"""'}), "(self.test_labels, dtype='float32')\n", (2414, 2449), False, 'import numpy\n'), ((2663, 2701), 'numpy.zeros', 'numpy.zeros', (['max_length'], {'dtype': '"""int64"""'}), "(max_length, dtype='int64')\n", (2674, 2701), False, 'import numpy\n'), ((3445, 3486), 'numpy.array', 'numpy.array', (['embedding_w'], {'dtype': '"""float32"""'}), "(embedding_w, dtype='float32')\n", (3456, 3486), False, 'import numpy\n'), ((3911, 4042), 'keras.preprocessing.sequence.pad_sequences', 'sequence.pad_sequences', (['vector_sequence'], {'maxlen': 'self.configuration[support.WORD_MAX_LENGTH]', 'padding': '"""post"""', 'truncating': '"""post"""'}), "(vector_sequence, maxlen=self.configuration[support.\n WORD_MAX_LENGTH], padding='post', truncating='post')\n", (3933, 4042), False, 'from keras.preprocessing import sequence\n'), ((4197, 4328), 'keras.preprocessing.sequence.pad_sequences', 'sequence.pad_sequences', (['vector_sequence'], {'maxlen': 'self.configuration[support.WORD_MAX_LENGTH]', 'padding': '"""post"""', 'truncating': '"""post"""'}), "(vector_sequence, maxlen=self.configuration[support.\n WORD_MAX_LENGTH], padding='post', truncating='post')\n", (4219, 4328), False, 'from keras.preprocessing import sequence\n'), ((4587, 4625), 'numpy.zeros', 'numpy.zeros', (['max_length'], {'dtype': '"""int64"""'}), "(max_length, dtype='int64')\n", (4598, 4625), False, 'import numpy\n'), ((5163, 5199), 'numpy.asarray', 'numpy.asarray', (['result'], {'dtype': '"""int64"""'}), "(result, dtype='int64')\n", (5176, 5199), False, 'import numpy\n'), ((3623, 3686), 'keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {'num_words': 'self.configuration[support.QUANTITY_WORDS]'}), '(num_words=self.configuration[support.QUANTITY_WORDS])\n', (3632, 3686), False, 'from keras.preprocessing.text import Tokenizer\n')]
|
import gym
import gym.spaces as spaces
import sys
import socket
from _thread import *
import os
import numpy as np
import pandas as pd
import math as m
import time
import random
class NetEnv(gym.Env):
def __init__(self):
# Robot State values that will be bounced with client
self.robot_state = None
self.pos = None
self.message = np.array(12345, dtype=np.float32)
# Socket Conneciton
# MAC find WiFi IP - ipconfig getifaddr en0
HOST = '192.168.1.29'
# Port to listen on (non-privileged ports are > 1023)
PORT = 65432
self.ThreadCount = 0
print('Connected')
# Set up Socket
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.s.bind((HOST, PORT))
except socket.error as e:
print(str(e))
print('Waiting for connection[s]...')
self.s.listen()
self.start = 0
# Wait for client[s] to join socket
self.conn1, addr1 = self.s.accept()
print('Connected by: ', addr1)
start_new_thread(self.main_client_thread, (self.conn1, ))
self.conn2, addr2 = self.s.accept()
print('Connected by: ', addr2)
start_new_thread(self.cam_client_thread, (self.conn2, ))
def main_client_thread(self, conn):
data = conn.recv(1024)
print('Main client says: {}'.format(data.decode('utf-8')))
conn.sendall(str.encode('Hi'))
def cam_client_thread(self, conn):
data = conn.recv(1024)
print('Cam client says: {}'.format(data.decode('utf-8')))
conn.sendall(str.encode('Hi'))
def step(self):
self.main_client_thread(self.conn1)
self.cam_client_thread(self.conn2)
if __name__ == '__main__':
# Construct MAIN SERVER object
env = NetEnv()
# WALK
for i in range(100000):
env.step()
print('Done')
|
[
"numpy.array",
"socket.socket"
] |
[((347, 380), 'numpy.array', 'np.array', (['(12345)'], {'dtype': 'np.float32'}), '(12345, dtype=np.float32)\n', (355, 380), True, 'import numpy as np\n'), ((621, 670), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (634, 670), False, 'import socket\n')]
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# isort:skip_file
import uuid
from datetime import datetime
import logging
from math import nan
from unittest.mock import Mock, patch
import numpy as np
import pandas as pd
import tests.test_app
import superset.viz as viz
from superset import app
from superset.constants import NULL_STRING
from superset.exceptions import SpatialException
from superset.utils.core import DTTM_ALIAS
from .base_tests import SupersetTestCase
from .utils import load_fixture
logger = logging.getLogger(__name__)
class BaseVizTestCase(SupersetTestCase):
def test_constructor_exception_no_datasource(self):
form_data = {}
datasource = None
with self.assertRaises(Exception):
viz.BaseViz(datasource, form_data)
def test_process_metrics(self):
# test TableViz metrics in correct order
form_data = {
"url_params": {},
"row_limit": 500,
"metric": "sum__SP_POP_TOTL",
"entity": "country_code",
"secondary_metric": "sum__SP_POP_TOTL",
"granularity_sqla": "year",
"page_length": 0,
"all_columns": [],
"viz_type": "table",
"since": "2014-01-01",
"until": "2014-01-02",
"metrics": ["sum__SP_POP_TOTL", "SUM(SE_PRM_NENR_MA)", "SUM(SP_URB_TOTL)"],
"country_fieldtype": "cca3",
"percent_metrics": ["count"],
"slice_id": 74,
"time_grain_sqla": None,
"order_by_cols": [],
"groupby": ["country_name"],
"compare_lag": "10",
"limit": "25",
"datasource": "2__table",
"table_timestamp_format": "%Y-%m-%d %H:%M:%S",
"markup_type": "markdown",
"where": "",
"compare_suffix": "o10Y",
}
datasource = Mock()
datasource.type = "table"
test_viz = viz.BaseViz(datasource, form_data)
expect_metric_labels = [
u"sum__SP_POP_TOTL",
u"SUM(SE_PRM_NENR_MA)",
u"SUM(SP_URB_TOTL)",
u"count",
]
self.assertEqual(test_viz.metric_labels, expect_metric_labels)
self.assertEqual(test_viz.all_metrics, expect_metric_labels)
def test_get_df_returns_empty_df(self):
form_data = {"dummy": 123}
query_obj = {"granularity": "day"}
datasource = self.get_datasource_mock()
test_viz = viz.BaseViz(datasource, form_data)
result = test_viz.get_df(query_obj)
self.assertEqual(type(result), pd.DataFrame)
self.assertTrue(result.empty)
def test_get_df_handles_dttm_col(self):
form_data = {"dummy": 123}
query_obj = {"granularity": "day"}
results = Mock()
results.query = Mock()
results.status = Mock()
results.error_message = Mock()
datasource = Mock()
datasource.type = "table"
datasource.query = Mock(return_value=results)
mock_dttm_col = Mock()
datasource.get_column = Mock(return_value=mock_dttm_col)
test_viz = viz.BaseViz(datasource, form_data)
test_viz.df_metrics_to_num = Mock()
test_viz.get_fillna_for_columns = Mock(return_value=0)
results.df = pd.DataFrame(data={DTTM_ALIAS: ["1960-01-01 05:00:00"]})
datasource.offset = 0
mock_dttm_col = Mock()
datasource.get_column = Mock(return_value=mock_dttm_col)
mock_dttm_col.python_date_format = "epoch_ms"
result = test_viz.get_df(query_obj)
import logging
logger.info(result)
pd.testing.assert_series_equal(
result[DTTM_ALIAS], pd.Series([datetime(1960, 1, 1, 5, 0)], name=DTTM_ALIAS)
)
mock_dttm_col.python_date_format = None
result = test_viz.get_df(query_obj)
pd.testing.assert_series_equal(
result[DTTM_ALIAS], pd.Series([datetime(1960, 1, 1, 5, 0)], name=DTTM_ALIAS)
)
datasource.offset = 1
result = test_viz.get_df(query_obj)
pd.testing.assert_series_equal(
result[DTTM_ALIAS], pd.Series([datetime(1960, 1, 1, 6, 0)], name=DTTM_ALIAS)
)
datasource.offset = 0
results.df = pd.DataFrame(data={DTTM_ALIAS: ["1960-01-01"]})
mock_dttm_col.python_date_format = "%Y-%m-%d"
result = test_viz.get_df(query_obj)
pd.testing.assert_series_equal(
result[DTTM_ALIAS], pd.Series([datetime(1960, 1, 1, 0, 0)], name=DTTM_ALIAS)
)
def test_cache_timeout(self):
datasource = self.get_datasource_mock()
datasource.cache_timeout = 0
test_viz = viz.BaseViz(datasource, form_data={})
self.assertEqual(0, test_viz.cache_timeout)
datasource.cache_timeout = 156
test_viz = viz.BaseViz(datasource, form_data={})
self.assertEqual(156, test_viz.cache_timeout)
datasource.cache_timeout = None
datasource.database.cache_timeout = 0
self.assertEqual(0, test_viz.cache_timeout)
datasource.database.cache_timeout = 1666
self.assertEqual(1666, test_viz.cache_timeout)
datasource.database.cache_timeout = None
test_viz = viz.BaseViz(datasource, form_data={})
self.assertEqual(app.config["CACHE_DEFAULT_TIMEOUT"], test_viz.cache_timeout)
class TableVizTestCase(SupersetTestCase):
def test_get_data_applies_percentage(self):
form_data = {
"groupby": ["groupA", "groupB"],
"metrics": [
{
"expressionType": "SIMPLE",
"aggregate": "SUM",
"label": "SUM(value1)",
"column": {"column_name": "value1", "type": "DOUBLE"},
},
"count",
"avg__C",
],
"percent_metrics": [
{
"expressionType": "SIMPLE",
"aggregate": "SUM",
"label": "SUM(value1)",
"column": {"column_name": "value1", "type": "DOUBLE"},
},
"avg__B",
],
}
datasource = self.get_datasource_mock()
df = pd.DataFrame(
{
"SUM(value1)": [15, 20, 25, 40],
"avg__B": [10, 20, 5, 15],
"avg__C": [11, 22, 33, 44],
"count": [6, 7, 8, 9],
"groupA": ["A", "B", "C", "C"],
"groupB": ["x", "x", "y", "z"],
}
)
test_viz = viz.TableViz(datasource, form_data)
data = test_viz.get_data(df)
# Check method correctly transforms data and computes percents
self.assertEqual(
[
"groupA",
"groupB",
"SUM(value1)",
"count",
"avg__C",
"%SUM(value1)",
"%avg__B",
],
list(data["columns"]),
)
expected = [
{
"groupA": "A",
"groupB": "x",
"SUM(value1)": 15,
"count": 6,
"avg__C": 11,
"%SUM(value1)": 0.15,
"%avg__B": 0.2,
},
{
"groupA": "B",
"groupB": "x",
"SUM(value1)": 20,
"count": 7,
"avg__C": 22,
"%SUM(value1)": 0.2,
"%avg__B": 0.4,
},
{
"groupA": "C",
"groupB": "y",
"SUM(value1)": 25,
"count": 8,
"avg__C": 33,
"%SUM(value1)": 0.25,
"%avg__B": 0.1,
},
{
"groupA": "C",
"groupB": "z",
"SUM(value1)": 40,
"count": 9,
"avg__C": 44,
"%SUM(value1)": 0.4,
"%avg__B": 0.3,
},
]
self.assertEqual(expected, data["records"])
def test_parse_adhoc_filters(self):
form_data = {
"metrics": [
{
"expressionType": "SIMPLE",
"aggregate": "SUM",
"label": "SUM(value1)",
"column": {"column_name": "value1", "type": "DOUBLE"},
}
],
"adhoc_filters": [
{
"expressionType": "SIMPLE",
"clause": "WHERE",
"subject": "value2",
"operator": ">",
"comparator": "100",
},
{
"expressionType": "SIMPLE",
"clause": "HAVING",
"subject": "SUM(value1)",
"operator": "<",
"comparator": "10",
},
{
"expressionType": "SQL",
"clause": "HAVING",
"sqlExpression": "SUM(value1) > 5",
},
{
"expressionType": "SQL",
"clause": "WHERE",
"sqlExpression": "value3 in ('North America')",
},
],
}
datasource = self.get_datasource_mock()
test_viz = viz.TableViz(datasource, form_data)
query_obj = test_viz.query_obj()
self.assertEqual(
[{"col": "value2", "val": "100", "op": ">"}], query_obj["filter"]
)
self.assertEqual(
[{"op": "<", "val": "10", "col": "SUM(value1)"}],
query_obj["extras"]["having_druid"],
)
self.assertEqual("(value3 in ('North America'))", query_obj["extras"]["where"])
self.assertEqual("(SUM(value1) > 5)", query_obj["extras"]["having"])
def test_adhoc_filters_overwrite_legacy_filters(self):
form_data = {
"metrics": [
{
"expressionType": "SIMPLE",
"aggregate": "SUM",
"label": "SUM(value1)",
"column": {"column_name": "value1", "type": "DOUBLE"},
}
],
"adhoc_filters": [
{
"expressionType": "SIMPLE",
"clause": "WHERE",
"subject": "value2",
"operator": ">",
"comparator": "100",
},
{
"expressionType": "SQL",
"clause": "WHERE",
"sqlExpression": "value3 in ('North America')",
},
],
"having": "SUM(value1) > 5",
}
datasource = self.get_datasource_mock()
test_viz = viz.TableViz(datasource, form_data)
query_obj = test_viz.query_obj()
self.assertEqual(
[{"col": "value2", "val": "100", "op": ">"}], query_obj["filter"]
)
self.assertEqual([], query_obj["extras"]["having_druid"])
self.assertEqual("(value3 in ('North America'))", query_obj["extras"]["where"])
self.assertEqual("", query_obj["extras"]["having"])
def test_query_obj_merges_percent_metrics(self):
datasource = self.get_datasource_mock()
form_data = {
"metrics": ["sum__A", "count", "avg__C"],
"percent_metrics": ["sum__A", "avg__B", "max__Y"],
}
test_viz = viz.TableViz(datasource, form_data)
query_obj = test_viz.query_obj()
self.assertEqual(
["sum__A", "count", "avg__C", "avg__B", "max__Y"], query_obj["metrics"]
)
def test_query_obj_throws_columns_and_metrics(self):
datasource = self.get_datasource_mock()
form_data = {"all_columns": ["A", "B"], "metrics": ["x", "y"]}
with self.assertRaises(Exception):
test_viz = viz.TableViz(datasource, form_data)
test_viz.query_obj()
del form_data["metrics"]
form_data["groupby"] = ["B", "C"]
with self.assertRaises(Exception):
test_viz = viz.TableViz(datasource, form_data)
test_viz.query_obj()
@patch("superset.viz.BaseViz.query_obj")
def test_query_obj_merges_all_columns(self, super_query_obj):
datasource = self.get_datasource_mock()
form_data = {
"all_columns": ["colA", "colB", "colC"],
"order_by_cols": ['["colA", "colB"]', '["colC"]'],
}
super_query_obj.return_value = {
"columns": ["colD", "colC"],
"groupby": ["colA", "colB"],
}
test_viz = viz.TableViz(datasource, form_data)
query_obj = test_viz.query_obj()
self.assertEqual(form_data["all_columns"], query_obj["columns"])
self.assertEqual([], query_obj["groupby"])
self.assertEqual([["colA", "colB"], ["colC"]], query_obj["orderby"])
def test_query_obj_uses_sortby(self):
datasource = self.get_datasource_mock()
form_data = {
"metrics": ["colA", "colB"],
"order_desc": False,
}
def run_test(metric):
form_data["timeseries_limit_metric"] = metric
test_viz = viz.TableViz(datasource, form_data)
query_obj = test_viz.query_obj()
self.assertEqual(["colA", "colB", metric], query_obj["metrics"])
self.assertEqual([(metric, True)], query_obj["orderby"])
run_test("simple_metric")
run_test(
{
"label": "adhoc_metric",
"expressionType": "SIMPLE",
"aggregate": "SUM",
"column": {"column_name": "sort_column",},
}
)
def test_should_be_timeseries_raises_when_no_granularity(self):
datasource = self.get_datasource_mock()
form_data = {"include_time": True}
with self.assertRaises(Exception):
test_viz = viz.TableViz(datasource, form_data)
test_viz.should_be_timeseries()
def test_adhoc_metric_with_sortby(self):
metrics = [
{
"expressionType": "SIMPLE",
"aggregate": "SUM",
"label": "sum_value",
"column": {"column_name": "value1", "type": "DOUBLE"},
}
]
form_data = {
"metrics": metrics,
"timeseries_limit_metric": {
"expressionType": "SIMPLE",
"aggregate": "SUM",
"label": "SUM(value1)",
"column": {"column_name": "value1", "type": "DOUBLE"},
},
"order_desc": False,
}
df = pd.DataFrame({"SUM(value1)": [15], "sum_value": [15]})
datasource = self.get_datasource_mock()
test_viz = viz.TableViz(datasource, form_data)
data = test_viz.get_data(df)
self.assertEqual(["sum_value"], data["columns"])
class DistBarVizTestCase(SupersetTestCase):
def test_groupby_nulls(self):
form_data = {
"metrics": ["votes"],
"adhoc_filters": [],
"groupby": ["toppings"],
"columns": [],
}
datasource = self.get_datasource_mock()
df = pd.DataFrame(
{
"toppings": ["cheese", "pepperoni", "anchovies", None],
"votes": [3, 5, 1, 2],
}
)
test_viz = viz.DistributionBarViz(datasource, form_data)
data = test_viz.get_data(df)[0]
self.assertEqual("votes", data["key"])
expected_values = [
{"x": "pepperoni", "y": 5},
{"x": "cheese", "y": 3},
{"x": NULL_STRING, "y": 2},
{"x": "anchovies", "y": 1},
]
self.assertEqual(expected_values, data["values"])
def test_groupby_nans(self):
form_data = {
"metrics": ["count"],
"adhoc_filters": [],
"groupby": ["beds"],
"columns": [],
}
datasource = self.get_datasource_mock()
df = pd.DataFrame({"beds": [0, 1, nan, 2], "count": [30, 42, 3, 29]})
test_viz = viz.DistributionBarViz(datasource, form_data)
data = test_viz.get_data(df)[0]
self.assertEqual("count", data["key"])
expected_values = [
{"x": "1.0", "y": 42},
{"x": "0.0", "y": 30},
{"x": "2.0", "y": 29},
{"x": NULL_STRING, "y": 3},
]
self.assertEqual(expected_values, data["values"])
def test_column_nulls(self):
form_data = {
"metrics": ["votes"],
"adhoc_filters": [],
"groupby": ["toppings"],
"columns": ["role"],
}
datasource = self.get_datasource_mock()
df = pd.DataFrame(
{
"toppings": ["cheese", "pepperoni", "cheese", "pepperoni"],
"role": ["engineer", "engineer", None, None],
"votes": [3, 5, 1, 2],
}
)
test_viz = viz.DistributionBarViz(datasource, form_data)
data = test_viz.get_data(df)
expected = [
{
"key": NULL_STRING,
"values": [{"x": "pepperoni", "y": 2}, {"x": "cheese", "y": 1}],
},
{
"key": "engineer",
"values": [{"x": "pepperoni", "y": 5}, {"x": "cheese", "y": 3}],
},
]
self.assertEqual(expected, data)
class PairedTTestTestCase(SupersetTestCase):
def test_get_data_transforms_dataframe(self):
form_data = {
"groupby": ["groupA", "groupB", "groupC"],
"metrics": ["metric1", "metric2", "metric3"],
}
datasource = self.get_datasource_mock()
# Test data
raw = {}
raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300]
raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
df = pd.DataFrame(raw)
pairedTTestViz = viz.viz_types["paired_ttest"](datasource, form_data)
data = pairedTTestViz.get_data(df)
# Check method correctly transforms data
expected = {
"metric1": [
{
"values": [
{"x": 100, "y": 1},
{"x": 200, "y": 2},
{"x": 300, "y": 3},
],
"group": ("a1", "a2", "a3"),
},
{
"values": [
{"x": 100, "y": 4},
{"x": 200, "y": 5},
{"x": 300, "y": 6},
],
"group": ("b1", "b2", "b3"),
},
{
"values": [
{"x": 100, "y": 7},
{"x": 200, "y": 8},
{"x": 300, "y": 9},
],
"group": ("c1", "c2", "c3"),
},
],
"metric2": [
{
"values": [
{"x": 100, "y": 10},
{"x": 200, "y": 20},
{"x": 300, "y": 30},
],
"group": ("a1", "a2", "a3"),
},
{
"values": [
{"x": 100, "y": 40},
{"x": 200, "y": 50},
{"x": 300, "y": 60},
],
"group": ("b1", "b2", "b3"),
},
{
"values": [
{"x": 100, "y": 70},
{"x": 200, "y": 80},
{"x": 300, "y": 90},
],
"group": ("c1", "c2", "c3"),
},
],
"metric3": [
{
"values": [
{"x": 100, "y": 100},
{"x": 200, "y": 200},
{"x": 300, "y": 300},
],
"group": ("a1", "a2", "a3"),
},
{
"values": [
{"x": 100, "y": 400},
{"x": 200, "y": 500},
{"x": 300, "y": 600},
],
"group": ("b1", "b2", "b3"),
},
{
"values": [
{"x": 100, "y": 700},
{"x": 200, "y": 800},
{"x": 300, "y": 900},
],
"group": ("c1", "c2", "c3"),
},
],
}
self.assertEqual(data, expected)
def test_get_data_empty_null_keys(self):
form_data = {"groupby": [], "metrics": ["", None]}
datasource = self.get_datasource_mock()
# Test data
raw = {}
raw[DTTM_ALIAS] = [100, 200, 300]
raw[""] = [1, 2, 3]
raw[None] = [10, 20, 30]
df = pd.DataFrame(raw)
pairedTTestViz = viz.viz_types["paired_ttest"](datasource, form_data)
data = pairedTTestViz.get_data(df)
# Check method correctly transforms data
expected = {
"N/A": [
{
"values": [
{"x": 100, "y": 1},
{"x": 200, "y": 2},
{"x": 300, "y": 3},
],
"group": "All",
}
],
"NULL": [
{
"values": [
{"x": 100, "y": 10},
{"x": 200, "y": 20},
{"x": 300, "y": 30},
],
"group": "All",
}
],
}
self.assertEqual(data, expected)
class PartitionVizTestCase(SupersetTestCase):
@patch("superset.viz.BaseViz.query_obj")
def test_query_obj_time_series_option(self, super_query_obj):
datasource = self.get_datasource_mock()
form_data = {}
test_viz = viz.PartitionViz(datasource, form_data)
super_query_obj.return_value = {}
query_obj = test_viz.query_obj()
self.assertFalse(query_obj["is_timeseries"])
test_viz.form_data["time_series_option"] = "agg_sum"
query_obj = test_viz.query_obj()
self.assertTrue(query_obj["is_timeseries"])
def test_levels_for_computes_levels(self):
raw = {}
raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300]
raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
df = pd.DataFrame(raw)
groups = ["groupA", "groupB", "groupC"]
time_op = "agg_sum"
test_viz = viz.PartitionViz(Mock(), {})
levels = test_viz.levels_for(time_op, groups, df)
self.assertEqual(4, len(levels))
expected = {DTTM_ALIAS: 1800, "metric1": 45, "metric2": 450, "metric3": 4500}
self.assertEqual(expected, levels[0].to_dict())
expected = {
DTTM_ALIAS: {"a1": 600, "b1": 600, "c1": 600},
"metric1": {"a1": 6, "b1": 15, "c1": 24},
"metric2": {"a1": 60, "b1": 150, "c1": 240},
"metric3": {"a1": 600, "b1": 1500, "c1": 2400},
}
self.assertEqual(expected, levels[1].to_dict())
self.assertEqual(["groupA", "groupB"], levels[2].index.names)
self.assertEqual(["groupA", "groupB", "groupC"], levels[3].index.names)
time_op = "agg_mean"
levels = test_viz.levels_for(time_op, groups, df)
self.assertEqual(4, len(levels))
expected = {
DTTM_ALIAS: 200.0,
"metric1": 5.0,
"metric2": 50.0,
"metric3": 500.0,
}
self.assertEqual(expected, levels[0].to_dict())
expected = {
DTTM_ALIAS: {"a1": 200, "c1": 200, "b1": 200},
"metric1": {"a1": 2, "b1": 5, "c1": 8},
"metric2": {"a1": 20, "b1": 50, "c1": 80},
"metric3": {"a1": 200, "b1": 500, "c1": 800},
}
self.assertEqual(expected, levels[1].to_dict())
self.assertEqual(["groupA", "groupB"], levels[2].index.names)
self.assertEqual(["groupA", "groupB", "groupC"], levels[3].index.names)
def test_levels_for_diff_computes_difference(self):
raw = {}
raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300]
raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
df = pd.DataFrame(raw)
groups = ["groupA", "groupB", "groupC"]
test_viz = viz.PartitionViz(Mock(), {})
time_op = "point_diff"
levels = test_viz.levels_for_diff(time_op, groups, df)
expected = {"metric1": 6, "metric2": 60, "metric3": 600}
self.assertEqual(expected, levels[0].to_dict())
expected = {
"metric1": {"a1": 2, "b1": 2, "c1": 2},
"metric2": {"a1": 20, "b1": 20, "c1": 20},
"metric3": {"a1": 200, "b1": 200, "c1": 200},
}
self.assertEqual(expected, levels[1].to_dict())
self.assertEqual(4, len(levels))
self.assertEqual(["groupA", "groupB", "groupC"], levels[3].index.names)
def test_levels_for_time_calls_process_data_and_drops_cols(self):
raw = {}
raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300]
raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
df = pd.DataFrame(raw)
groups = ["groupA", "groupB", "groupC"]
test_viz = viz.PartitionViz(Mock(), {"groupby": groups})
def return_args(df_drop, aggregate):
return df_drop
test_viz.process_data = Mock(side_effect=return_args)
levels = test_viz.levels_for_time(groups, df)
self.assertEqual(4, len(levels))
cols = [DTTM_ALIAS, "metric1", "metric2", "metric3"]
self.assertEqual(sorted(cols), sorted(levels[0].columns.tolist()))
cols += ["groupA"]
self.assertEqual(sorted(cols), sorted(levels[1].columns.tolist()))
cols += ["groupB"]
self.assertEqual(sorted(cols), sorted(levels[2].columns.tolist()))
cols += ["groupC"]
self.assertEqual(sorted(cols), sorted(levels[3].columns.tolist()))
self.assertEqual(4, len(test_viz.process_data.mock_calls))
def test_nest_values_returns_hierarchy(self):
raw = {}
raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
df = pd.DataFrame(raw)
test_viz = viz.PartitionViz(Mock(), {})
groups = ["groupA", "groupB", "groupC"]
levels = test_viz.levels_for("agg_sum", groups, df)
nest = test_viz.nest_values(levels)
self.assertEqual(3, len(nest))
for i in range(0, 3):
self.assertEqual("metric" + str(i + 1), nest[i]["name"])
self.assertEqual(3, len(nest[0]["children"]))
self.assertEqual(1, len(nest[0]["children"][0]["children"]))
self.assertEqual(1, len(nest[0]["children"][0]["children"][0]["children"]))
def test_nest_procs_returns_hierarchy(self):
raw = {}
raw[DTTM_ALIAS] = [100, 200, 300, 100, 200, 300, 100, 200, 300]
raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900]
df = pd.DataFrame(raw)
test_viz = viz.PartitionViz(Mock(), {})
groups = ["groupA", "groupB", "groupC"]
metrics = ["metric1", "metric2", "metric3"]
procs = {}
for i in range(0, 4):
df_drop = df.drop(groups[i:], 1)
pivot = df_drop.pivot_table(
index=DTTM_ALIAS, columns=groups[:i], values=metrics
)
procs[i] = pivot
nest = test_viz.nest_procs(procs)
self.assertEqual(3, len(nest))
for i in range(0, 3):
self.assertEqual("metric" + str(i + 1), nest[i]["name"])
self.assertEqual(None, nest[i].get("val"))
self.assertEqual(3, len(nest[0]["children"]))
self.assertEqual(3, len(nest[0]["children"][0]["children"]))
self.assertEqual(1, len(nest[0]["children"][0]["children"][0]["children"]))
self.assertEqual(
1, len(nest[0]["children"][0]["children"][0]["children"][0]["children"])
)
def test_get_data_calls_correct_method(self):
test_viz = viz.PartitionViz(Mock(), {})
df = Mock()
with self.assertRaises(ValueError):
test_viz.get_data(df)
test_viz.levels_for = Mock(return_value=1)
test_viz.nest_values = Mock(return_value=1)
test_viz.form_data["groupby"] = ["groups"]
test_viz.form_data["time_series_option"] = "not_time"
test_viz.get_data(df)
self.assertEqual("agg_sum", test_viz.levels_for.mock_calls[0][1][0])
test_viz.form_data["time_series_option"] = "agg_sum"
test_viz.get_data(df)
self.assertEqual("agg_sum", test_viz.levels_for.mock_calls[1][1][0])
test_viz.form_data["time_series_option"] = "agg_mean"
test_viz.get_data(df)
self.assertEqual("agg_mean", test_viz.levels_for.mock_calls[2][1][0])
test_viz.form_data["time_series_option"] = "point_diff"
test_viz.levels_for_diff = Mock(return_value=1)
test_viz.get_data(df)
self.assertEqual("point_diff", test_viz.levels_for_diff.mock_calls[0][1][0])
test_viz.form_data["time_series_option"] = "point_percent"
test_viz.get_data(df)
self.assertEqual("point_percent", test_viz.levels_for_diff.mock_calls[1][1][0])
test_viz.form_data["time_series_option"] = "point_factor"
test_viz.get_data(df)
self.assertEqual("point_factor", test_viz.levels_for_diff.mock_calls[2][1][0])
test_viz.levels_for_time = Mock(return_value=1)
test_viz.nest_procs = Mock(return_value=1)
test_viz.form_data["time_series_option"] = "adv_anal"
test_viz.get_data(df)
self.assertEqual(1, len(test_viz.levels_for_time.mock_calls))
self.assertEqual(1, len(test_viz.nest_procs.mock_calls))
test_viz.form_data["time_series_option"] = "time_series"
test_viz.get_data(df)
self.assertEqual("agg_sum", test_viz.levels_for.mock_calls[3][1][0])
self.assertEqual(7, len(test_viz.nest_values.mock_calls))
class RoseVisTestCase(SupersetTestCase):
def test_rose_vis_get_data(self):
raw = {}
t1 = pd.Timestamp("2000")
t2 = pd.Timestamp("2002")
t3 = pd.Timestamp("2004")
raw[DTTM_ALIAS] = [t1, t2, t3, t1, t2, t3, t1, t2, t3]
raw["groupA"] = ["a1", "a1", "a1", "b1", "b1", "b1", "c1", "c1", "c1"]
raw["groupB"] = ["a2", "a2", "a2", "b2", "b2", "b2", "c2", "c2", "c2"]
raw["groupC"] = ["a3", "a3", "a3", "b3", "b3", "b3", "c3", "c3", "c3"]
raw["metric1"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
df = pd.DataFrame(raw)
fd = {"metrics": ["metric1"], "groupby": ["groupA"]}
test_viz = viz.RoseViz(Mock(), fd)
test_viz.metrics = fd["metrics"]
res = test_viz.get_data(df)
expected = {
946684800000000000: [
{"time": t1, "value": 1, "key": ("a1",), "name": ("a1",)},
{"time": t1, "value": 4, "key": ("b1",), "name": ("b1",)},
{"time": t1, "value": 7, "key": ("c1",), "name": ("c1",)},
],
1009843200000000000: [
{"time": t2, "value": 2, "key": ("a1",), "name": ("a1",)},
{"time": t2, "value": 5, "key": ("b1",), "name": ("b1",)},
{"time": t2, "value": 8, "key": ("c1",), "name": ("c1",)},
],
1072915200000000000: [
{"time": t3, "value": 3, "key": ("a1",), "name": ("a1",)},
{"time": t3, "value": 6, "key": ("b1",), "name": ("b1",)},
{"time": t3, "value": 9, "key": ("c1",), "name": ("c1",)},
],
}
self.assertEqual(expected, res)
class TimeSeriesTableVizTestCase(SupersetTestCase):
def test_get_data_metrics(self):
form_data = {"metrics": ["sum__A", "count"], "groupby": []}
datasource = self.get_datasource_mock()
raw = {}
t1 = pd.Timestamp("2000")
t2 = pd.Timestamp("2002")
raw[DTTM_ALIAS] = [t1, t2]
raw["sum__A"] = [15, 20]
raw["count"] = [6, 7]
df = pd.DataFrame(raw)
test_viz = viz.TimeTableViz(datasource, form_data)
data = test_viz.get_data(df)
# Check method correctly transforms data
self.assertEqual(set(["count", "sum__A"]), set(data["columns"]))
time_format = "%Y-%m-%d %H:%M:%S"
expected = {
t1.strftime(time_format): {"sum__A": 15, "count": 6},
t2.strftime(time_format): {"sum__A": 20, "count": 7},
}
self.assertEqual(expected, data["records"])
def test_get_data_group_by(self):
form_data = {"metrics": ["sum__A"], "groupby": ["groupby1"]}
datasource = self.get_datasource_mock()
raw = {}
t1 = pd.Timestamp("2000")
t2 = pd.Timestamp("2002")
raw[DTTM_ALIAS] = [t1, t1, t1, t2, t2, t2]
raw["sum__A"] = [15, 20, 25, 30, 35, 40]
raw["groupby1"] = ["a1", "a2", "a3", "a1", "a2", "a3"]
df = pd.DataFrame(raw)
test_viz = viz.TimeTableViz(datasource, form_data)
data = test_viz.get_data(df)
# Check method correctly transforms data
self.assertEqual(set(["a1", "a2", "a3"]), set(data["columns"]))
time_format = "%Y-%m-%d %H:%M:%S"
expected = {
t1.strftime(time_format): {"a1": 15, "a2": 20, "a3": 25},
t2.strftime(time_format): {"a1": 30, "a2": 35, "a3": 40},
}
self.assertEqual(expected, data["records"])
@patch("superset.viz.BaseViz.query_obj")
def test_query_obj_throws_metrics_and_groupby(self, super_query_obj):
datasource = self.get_datasource_mock()
form_data = {"groupby": ["a"]}
super_query_obj.return_value = {}
test_viz = viz.TimeTableViz(datasource, form_data)
with self.assertRaises(Exception):
test_viz.query_obj()
form_data["metrics"] = ["x", "y"]
test_viz = viz.TimeTableViz(datasource, form_data)
with self.assertRaises(Exception):
test_viz.query_obj()
class BaseDeckGLVizTestCase(SupersetTestCase):
def test_get_metrics(self):
form_data = load_fixture("deck_path_form_data.json")
datasource = self.get_datasource_mock()
test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data)
result = test_viz_deckgl.get_metrics()
assert result == [form_data.get("size")]
form_data = {}
test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data)
result = test_viz_deckgl.get_metrics()
assert result == []
def test_scatterviz_get_metrics(self):
form_data = load_fixture("deck_path_form_data.json")
datasource = self.get_datasource_mock()
form_data = {}
test_viz_deckgl = viz.DeckScatterViz(datasource, form_data)
test_viz_deckgl.point_radius_fixed = {"type": "metric", "value": "int"}
result = test_viz_deckgl.get_metrics()
assert result == ["int"]
form_data = {}
test_viz_deckgl = viz.DeckScatterViz(datasource, form_data)
test_viz_deckgl.point_radius_fixed = {}
result = test_viz_deckgl.get_metrics()
assert result == []
def test_get_js_columns(self):
form_data = load_fixture("deck_path_form_data.json")
datasource = self.get_datasource_mock()
mock_d = {"a": "dummy1", "b": "dummy2", "c": "dummy3"}
test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data)
result = test_viz_deckgl.get_js_columns(mock_d)
assert result == {"color": None}
def test_get_properties(self):
mock_d = {}
form_data = load_fixture("deck_path_form_data.json")
datasource = self.get_datasource_mock()
test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data)
with self.assertRaises(NotImplementedError) as context:
test_viz_deckgl.get_properties(mock_d)
self.assertTrue("" in str(context.exception))
def test_process_spatial_query_obj(self):
form_data = load_fixture("deck_path_form_data.json")
datasource = self.get_datasource_mock()
mock_key = "spatial_key"
mock_gb = []
test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data)
with self.assertRaises(ValueError) as context:
test_viz_deckgl.process_spatial_query_obj(mock_key, mock_gb)
self.assertTrue("Bad spatial key" in str(context.exception))
test_form_data = {
"latlong_key": {"type": "latlong", "lonCol": "lon", "latCol": "lat"},
"delimited_key": {"type": "delimited", "lonlatCol": "lonlat"},
"geohash_key": {"type": "geohash", "geohashCol": "geo"},
}
datasource = self.get_datasource_mock()
expected_results = {
"latlong_key": ["lon", "lat"],
"delimited_key": ["lonlat"],
"geohash_key": ["geo"],
}
for mock_key in ["latlong_key", "delimited_key", "geohash_key"]:
mock_gb = []
test_viz_deckgl = viz.BaseDeckGLViz(datasource, test_form_data)
test_viz_deckgl.process_spatial_query_obj(mock_key, mock_gb)
assert expected_results.get(mock_key) == mock_gb
def test_geojson_query_obj(self):
form_data = load_fixture("deck_geojson_form_data.json")
datasource = self.get_datasource_mock()
test_viz_deckgl = viz.DeckGeoJson(datasource, form_data)
results = test_viz_deckgl.query_obj()
assert results["metrics"] == []
assert results["groupby"] == []
assert results["columns"] == ["test_col"]
def test_parse_coordinates(self):
form_data = load_fixture("deck_path_form_data.json")
datasource = self.get_datasource_mock()
viz_instance = viz.BaseDeckGLViz(datasource, form_data)
coord = viz_instance.parse_coordinates("1.23, 3.21")
self.assertEqual(coord, (1.23, 3.21))
coord = viz_instance.parse_coordinates("1.23 3.21")
self.assertEqual(coord, (1.23, 3.21))
self.assertEqual(viz_instance.parse_coordinates(None), None)
self.assertEqual(viz_instance.parse_coordinates(""), None)
def test_parse_coordinates_raises(self):
form_data = load_fixture("deck_path_form_data.json")
datasource = self.get_datasource_mock()
test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data)
with self.assertRaises(SpatialException):
test_viz_deckgl.parse_coordinates("NULL")
with self.assertRaises(SpatialException):
test_viz_deckgl.parse_coordinates("fldkjsalkj,fdlaskjfjadlksj")
@patch("superset.utils.core.uuid.uuid4")
def test_filter_nulls(self, mock_uuid4):
mock_uuid4.return_value = uuid.UUID("12345678123456781234567812345678")
test_form_data = {
"latlong_key": {"type": "latlong", "lonCol": "lon", "latCol": "lat"},
"delimited_key": {"type": "delimited", "lonlatCol": "lonlat"},
"geohash_key": {"type": "geohash", "geohashCol": "geo"},
}
datasource = self.get_datasource_mock()
expected_results = {
"latlong_key": [
{
"clause": "WHERE",
"expressionType": "SIMPLE",
"filterOptionName": "12345678-1234-5678-1234-567812345678",
"comparator": "",
"operator": "IS NOT NULL",
"subject": "lat",
"isExtra": False,
},
{
"clause": "WHERE",
"expressionType": "SIMPLE",
"filterOptionName": "12345678-1234-5678-1234-567812345678",
"comparator": "",
"operator": "IS NOT NULL",
"subject": "lon",
"isExtra": False,
},
],
"delimited_key": [
{
"clause": "WHERE",
"expressionType": "SIMPLE",
"filterOptionName": "12345678-1234-5678-1234-567812345678",
"comparator": "",
"operator": "IS NOT NULL",
"subject": "lonlat",
"isExtra": False,
}
],
"geohash_key": [
{
"clause": "WHERE",
"expressionType": "SIMPLE",
"filterOptionName": "12345678-1234-5678-1234-567812345678",
"comparator": "",
"operator": "IS NOT NULL",
"subject": "geo",
"isExtra": False,
}
],
}
for mock_key in ["latlong_key", "delimited_key", "geohash_key"]:
test_viz_deckgl = viz.BaseDeckGLViz(datasource, test_form_data.copy())
test_viz_deckgl.spatial_control_keys = [mock_key]
test_viz_deckgl.add_null_filters()
adhoc_filters = test_viz_deckgl.form_data["adhoc_filters"]
assert expected_results.get(mock_key) == adhoc_filters
class TimeSeriesVizTestCase(SupersetTestCase):
def test_timeseries_unicode_data(self):
datasource = self.get_datasource_mock()
form_data = {"groupby": ["name"], "metrics": ["sum__payout"]}
raw = {}
raw["name"] = [
"Real Madrid C.F.🇺🇸🇬🇧",
"Real Madrid C.F.🇺🇸🇬🇧",
"Real Madrid Basket",
"Real Madrid Basket",
]
raw["__timestamp"] = [
"2018-02-20T00:00:00",
"2018-03-09T00:00:00",
"2018-02-20T00:00:00",
"2018-03-09T00:00:00",
]
raw["sum__payout"] = [2, 2, 4, 4]
df = pd.DataFrame(raw)
test_viz = viz.NVD3TimeSeriesViz(datasource, form_data)
viz_data = {}
viz_data = test_viz.get_data(df)
expected = [
{
u"values": [
{u"y": 4, u"x": u"2018-02-20T00:00:00"},
{u"y": 4, u"x": u"2018-03-09T00:00:00"},
],
u"key": (u"Real Madrid Basket",),
},
{
u"values": [
{u"y": 2, u"x": u"2018-02-20T00:00:00"},
{u"y": 2, u"x": u"2018-03-09T00:00:00"},
],
u"key": (u"Real Madrid C.F.\U0001f1fa\U0001f1f8\U0001f1ec\U0001f1e7",),
},
]
self.assertEqual(expected, viz_data)
def test_process_data_resample(self):
datasource = self.get_datasource_mock()
df = pd.DataFrame(
{
"__timestamp": pd.to_datetime(
["2019-01-01", "2019-01-02", "2019-01-05", "2019-01-07"]
),
"y": [1.0, 2.0, 5.0, 7.0],
}
)
self.assertEqual(
viz.NVD3TimeSeriesViz(
datasource,
{"metrics": ["y"], "resample_method": "sum", "resample_rule": "1D"},
)
.process_data(df)["y"]
.tolist(),
[1.0, 2.0, 0.0, 0.0, 5.0, 0.0, 7.0],
)
np.testing.assert_equal(
viz.NVD3TimeSeriesViz(
datasource,
{"metrics": ["y"], "resample_method": "asfreq", "resample_rule": "1D"},
)
.process_data(df)["y"]
.tolist(),
[1.0, 2.0, np.nan, np.nan, 5.0, np.nan, 7.0],
)
def test_apply_rolling(self):
datasource = self.get_datasource_mock()
df = pd.DataFrame(
index=pd.to_datetime(
["2019-01-01", "2019-01-02", "2019-01-05", "2019-01-07"]
),
data={"y": [1.0, 2.0, 3.0, 4.0]},
)
self.assertEqual(
viz.BigNumberViz(
datasource,
{
"metrics": ["y"],
"rolling_type": "cumsum",
"rolling_periods": 0,
"min_periods": 0,
},
)
.apply_rolling(df)["y"]
.tolist(),
[1.0, 3.0, 6.0, 10.0],
)
self.assertEqual(
viz.BigNumberViz(
datasource,
{
"metrics": ["y"],
"rolling_type": "sum",
"rolling_periods": 2,
"min_periods": 0,
},
)
.apply_rolling(df)["y"]
.tolist(),
[1.0, 3.0, 5.0, 7.0],
)
self.assertEqual(
viz.BigNumberViz(
datasource,
{
"metrics": ["y"],
"rolling_type": "mean",
"rolling_periods": 10,
"min_periods": 0,
},
)
.apply_rolling(df)["y"]
.tolist(),
[1.0, 1.5, 2.0, 2.5],
)
class BigNumberVizTestCase(SupersetTestCase):
def test_get_data(self):
datasource = self.get_datasource_mock()
df = pd.DataFrame(
data={
DTTM_ALIAS: pd.to_datetime(
["2019-01-01", "2019-01-02", "2019-01-05", "2019-01-07"]
),
"y": [1.0, 2.0, 3.0, 4.0],
}
)
data = viz.BigNumberViz(datasource, {"metrics": ["y"]}).get_data(df)
self.assertEqual(data[2], {DTTM_ALIAS: pd.Timestamp("2019-01-05"), "y": 3})
def test_get_data_with_none(self):
datasource = self.get_datasource_mock()
df = pd.DataFrame(
data={
DTTM_ALIAS: pd.to_datetime(
["2019-01-01", "2019-01-02", "2019-01-05", "2019-01-07"]
),
"y": [1.0, 2.0, None, 4.0],
}
)
data = viz.BigNumberViz(datasource, {"metrics": ["y"]}).get_data(df)
assert np.isnan(data[2]["y"])
|
[
"logging.getLogger",
"superset.viz.DistributionBarViz",
"superset.viz.BaseViz",
"superset.viz.DeckScatterViz",
"unittest.mock.patch",
"superset.viz.TableViz",
"pandas.to_datetime",
"datetime.datetime",
"superset.viz.PartitionViz",
"pandas.DataFrame",
"superset.viz.DeckGeoJson",
"unittest.mock.Mock",
"superset.viz.NVD3TimeSeriesViz",
"superset.viz.TimeTableViz",
"numpy.isnan",
"uuid.UUID",
"superset.viz.BigNumberViz",
"superset.viz.BaseDeckGLViz",
"pandas.Timestamp"
] |
[((1253, 1280), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1270, 1280), False, 'import logging\n'), ((13055, 13094), 'unittest.mock.patch', 'patch', (['"""superset.viz.BaseViz.query_obj"""'], {}), "('superset.viz.BaseViz.query_obj')\n", (13060, 13094), False, 'from unittest.mock import Mock, patch\n'), ((23258, 23297), 'unittest.mock.patch', 'patch', (['"""superset.viz.BaseViz.query_obj"""'], {}), "('superset.viz.BaseViz.query_obj')\n", (23263, 23297), False, 'from unittest.mock import Mock, patch\n'), ((36882, 36921), 'unittest.mock.patch', 'patch', (['"""superset.viz.BaseViz.query_obj"""'], {}), "('superset.viz.BaseViz.query_obj')\n", (36887, 36921), False, 'from unittest.mock import Mock, patch\n'), ((42026, 42065), 'unittest.mock.patch', 'patch', (['"""superset.utils.core.uuid.uuid4"""'], {}), "('superset.utils.core.uuid.uuid4')\n", (42031, 42065), False, 'from unittest.mock import Mock, patch\n'), ((2623, 2629), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (2627, 2629), False, 'from unittest.mock import Mock, patch\n'), ((2683, 2717), 'superset.viz.BaseViz', 'viz.BaseViz', (['datasource', 'form_data'], {}), '(datasource, form_data)\n', (2694, 2717), True, 'import superset.viz as viz\n'), ((3215, 3249), 'superset.viz.BaseViz', 'viz.BaseViz', (['datasource', 'form_data'], {}), '(datasource, form_data)\n', (3226, 3249), True, 'import superset.viz as viz\n'), ((3526, 3532), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (3530, 3532), False, 'from unittest.mock import Mock, patch\n'), ((3557, 3563), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (3561, 3563), False, 'from unittest.mock import Mock, patch\n'), ((3589, 3595), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (3593, 3595), False, 'from unittest.mock import Mock, patch\n'), ((3628, 3634), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (3632, 3634), False, 'from unittest.mock import Mock, patch\n'), ((3656, 3662), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (3660, 3662), False, 'from unittest.mock import Mock, patch\n'), ((3724, 3750), 'unittest.mock.Mock', 'Mock', ([], {'return_value': 'results'}), '(return_value=results)\n', (3728, 3750), False, 'from unittest.mock import Mock, patch\n'), ((3775, 3781), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (3779, 3781), False, 'from unittest.mock import Mock, patch\n'), ((3814, 3846), 'unittest.mock.Mock', 'Mock', ([], {'return_value': 'mock_dttm_col'}), '(return_value=mock_dttm_col)\n', (3818, 3846), False, 'from unittest.mock import Mock, patch\n'), ((3867, 3901), 'superset.viz.BaseViz', 'viz.BaseViz', (['datasource', 'form_data'], {}), '(datasource, form_data)\n', (3878, 3901), True, 'import superset.viz as viz\n'), ((3939, 3945), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (3943, 3945), False, 'from unittest.mock import Mock, patch\n'), ((3988, 4008), 'unittest.mock.Mock', 'Mock', ([], {'return_value': '(0)'}), '(return_value=0)\n', (3992, 4008), False, 'from unittest.mock import Mock, patch\n'), ((4031, 4087), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{DTTM_ALIAS: ['1960-01-01 05:00:00']}"}), "(data={DTTM_ALIAS: ['1960-01-01 05:00:00']})\n", (4043, 4087), True, 'import pandas as pd\n'), ((4142, 4148), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (4146, 4148), False, 'from unittest.mock import Mock, patch\n'), ((4181, 4213), 'unittest.mock.Mock', 'Mock', ([], {'return_value': 'mock_dttm_col'}), '(return_value=mock_dttm_col)\n', (4185, 4213), False, 'from unittest.mock import Mock, patch\n'), ((5001, 5048), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{DTTM_ALIAS: ['1960-01-01']}"}), "(data={DTTM_ALIAS: ['1960-01-01']})\n", (5013, 5048), True, 'import pandas as pd\n'), ((5425, 5462), 'superset.viz.BaseViz', 'viz.BaseViz', (['datasource'], {'form_data': '{}'}), '(datasource, form_data={})\n', (5436, 5462), True, 'import superset.viz as viz\n'), ((5574, 5611), 'superset.viz.BaseViz', 'viz.BaseViz', (['datasource'], {'form_data': '{}'}), '(datasource, form_data={})\n', (5585, 5611), True, 'import superset.viz as viz\n'), ((5979, 6016), 'superset.viz.BaseViz', 'viz.BaseViz', (['datasource'], {'form_data': '{}'}), '(datasource, form_data={})\n', (5990, 6016), True, 'import superset.viz as viz\n'), ((6987, 7184), 'pandas.DataFrame', 'pd.DataFrame', (["{'SUM(value1)': [15, 20, 25, 40], 'avg__B': [10, 20, 5, 15], 'avg__C': [11,\n 22, 33, 44], 'count': [6, 7, 8, 9], 'groupA': ['A', 'B', 'C', 'C'],\n 'groupB': ['x', 'x', 'y', 'z']}"], {}), "({'SUM(value1)': [15, 20, 25, 40], 'avg__B': [10, 20, 5, 15],\n 'avg__C': [11, 22, 33, 44], 'count': [6, 7, 8, 9], 'groupA': ['A', 'B',\n 'C', 'C'], 'groupB': ['x', 'x', 'y', 'z']})\n", (6999, 7184), True, 'import pandas as pd\n'), ((7330, 7365), 'superset.viz.TableViz', 'viz.TableViz', (['datasource', 'form_data'], {}), '(datasource, form_data)\n', (7342, 7365), True, 'import superset.viz as viz\n'), ((10191, 10226), 'superset.viz.TableViz', 'viz.TableViz', (['datasource', 'form_data'], {}), '(datasource, form_data)\n', (10203, 10226), True, 'import superset.viz as viz\n'), ((11655, 11690), 'superset.viz.TableViz', 'viz.TableViz', (['datasource', 'form_data'], {}), '(datasource, form_data)\n', (11667, 11690), True, 'import superset.viz as viz\n'), ((12330, 12365), 'superset.viz.TableViz', 'viz.TableViz', (['datasource', 'form_data'], {}), '(datasource, form_data)\n', (12342, 12365), True, 'import superset.viz as viz\n'), ((13509, 13544), 'superset.viz.TableViz', 'viz.TableViz', (['datasource', 'form_data'], {}), '(datasource, form_data)\n', (13521, 13544), True, 'import superset.viz as viz\n'), ((15551, 15605), 'pandas.DataFrame', 'pd.DataFrame', (["{'SUM(value1)': [15], 'sum_value': [15]}"], {}), "({'SUM(value1)': [15], 'sum_value': [15]})\n", (15563, 15605), True, 'import pandas as pd\n'), ((15673, 15708), 'superset.viz.TableViz', 'viz.TableViz', (['datasource', 'form_data'], {}), '(datasource, form_data)\n', (15685, 15708), True, 'import superset.viz as viz\n'), ((16107, 16204), 'pandas.DataFrame', 'pd.DataFrame', (["{'toppings': ['cheese', 'pepperoni', 'anchovies', None], 'votes': [3, 5, 1, 2]}"], {}), "({'toppings': ['cheese', 'pepperoni', 'anchovies', None],\n 'votes': [3, 5, 1, 2]})\n", (16119, 16204), True, 'import pandas as pd\n'), ((16289, 16334), 'superset.viz.DistributionBarViz', 'viz.DistributionBarViz', (['datasource', 'form_data'], {}), '(datasource, form_data)\n', (16311, 16334), True, 'import superset.viz as viz\n'), ((16929, 16993), 'pandas.DataFrame', 'pd.DataFrame', (["{'beds': [0, 1, nan, 2], 'count': [30, 42, 3, 29]}"], {}), "({'beds': [0, 1, nan, 2], 'count': [30, 42, 3, 29]})\n", (16941, 16993), True, 'import pandas as pd\n'), ((17013, 17058), 'superset.viz.DistributionBarViz', 'viz.DistributionBarViz', (['datasource', 'form_data'], {}), '(datasource, form_data)\n', (17035, 17058), True, 'import superset.viz as viz\n'), ((17651, 17798), 'pandas.DataFrame', 'pd.DataFrame', (["{'toppings': ['cheese', 'pepperoni', 'cheese', 'pepperoni'], 'role': [\n 'engineer', 'engineer', None, None], 'votes': [3, 5, 1, 2]}"], {}), "({'toppings': ['cheese', 'pepperoni', 'cheese', 'pepperoni'],\n 'role': ['engineer', 'engineer', None, None], 'votes': [3, 5, 1, 2]})\n", (17663, 17798), True, 'import pandas as pd\n'), ((17899, 17944), 'superset.viz.DistributionBarViz', 'viz.DistributionBarViz', (['datasource', 'form_data'], {}), '(datasource, form_data)\n', (17921, 17944), True, 'import superset.viz as viz\n'), ((19180, 19197), 'pandas.DataFrame', 'pd.DataFrame', (['raw'], {}), '(raw)\n', (19192, 19197), True, 'import pandas as pd\n'), ((22351, 22368), 'pandas.DataFrame', 'pd.DataFrame', (['raw'], {}), '(raw)\n', (22363, 22368), True, 'import pandas as pd\n'), ((23454, 23493), 'superset.viz.PartitionViz', 'viz.PartitionViz', (['datasource', 'form_data'], {}), '(datasource, form_data)\n', (23470, 23493), True, 'import superset.viz as viz\n'), ((24357, 24374), 'pandas.DataFrame', 'pd.DataFrame', (['raw'], {}), '(raw)\n', (24369, 24374), True, 'import pandas as pd\n'), ((26583, 26600), 'pandas.DataFrame', 'pd.DataFrame', (['raw'], {}), '(raw)\n', (26595, 26600), True, 'import pandas as pd\n'), ((27881, 27898), 'pandas.DataFrame', 'pd.DataFrame', (['raw'], {}), '(raw)\n', (27893, 27898), True, 'import pandas as pd\n'), ((28118, 28147), 'unittest.mock.Mock', 'Mock', ([], {'side_effect': 'return_args'}), '(side_effect=return_args)\n', (28122, 28147), False, 'from unittest.mock import Mock, patch\n'), ((29256, 29273), 'pandas.DataFrame', 'pd.DataFrame', (['raw'], {}), '(raw)\n', (29268, 29273), True, 'import pandas as pd\n'), ((30394, 30411), 'pandas.DataFrame', 'pd.DataFrame', (['raw'], {}), '(raw)\n', (30406, 30411), True, 'import pandas as pd\n'), ((31482, 31488), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (31486, 31488), False, 'from unittest.mock import Mock, patch\n'), ((31597, 31617), 'unittest.mock.Mock', 'Mock', ([], {'return_value': '(1)'}), '(return_value=1)\n', (31601, 31617), False, 'from unittest.mock import Mock, patch\n'), ((31649, 31669), 'unittest.mock.Mock', 'Mock', ([], {'return_value': '(1)'}), '(return_value=1)\n', (31653, 31669), False, 'from unittest.mock import Mock, patch\n'), ((32327, 32347), 'unittest.mock.Mock', 'Mock', ([], {'return_value': '(1)'}), '(return_value=1)\n', (32331, 32347), False, 'from unittest.mock import Mock, patch\n'), ((32866, 32886), 'unittest.mock.Mock', 'Mock', ([], {'return_value': '(1)'}), '(return_value=1)\n', (32870, 32886), False, 'from unittest.mock import Mock, patch\n'), ((32917, 32937), 'unittest.mock.Mock', 'Mock', ([], {'return_value': '(1)'}), '(return_value=1)\n', (32921, 32937), False, 'from unittest.mock import Mock, patch\n'), ((33514, 33534), 'pandas.Timestamp', 'pd.Timestamp', (['"""2000"""'], {}), "('2000')\n", (33526, 33534), True, 'import pandas as pd\n'), ((33548, 33568), 'pandas.Timestamp', 'pd.Timestamp', (['"""2002"""'], {}), "('2002')\n", (33560, 33568), True, 'import pandas as pd\n'), ((33582, 33602), 'pandas.Timestamp', 'pd.Timestamp', (['"""2004"""'], {}), "('2004')\n", (33594, 33602), True, 'import pandas as pd\n'), ((33969, 33986), 'pandas.DataFrame', 'pd.DataFrame', (['raw'], {}), '(raw)\n', (33981, 33986), True, 'import pandas as pd\n'), ((35300, 35320), 'pandas.Timestamp', 'pd.Timestamp', (['"""2000"""'], {}), "('2000')\n", (35312, 35320), True, 'import pandas as pd\n'), ((35334, 35354), 'pandas.Timestamp', 'pd.Timestamp', (['"""2002"""'], {}), "('2002')\n", (35346, 35354), True, 'import pandas as pd\n'), ((35466, 35483), 'pandas.DataFrame', 'pd.DataFrame', (['raw'], {}), '(raw)\n', (35478, 35483), True, 'import pandas as pd\n'), ((35503, 35542), 'superset.viz.TimeTableViz', 'viz.TimeTableViz', (['datasource', 'form_data'], {}), '(datasource, form_data)\n', (35519, 35542), True, 'import superset.viz as viz\n'), ((36145, 36165), 'pandas.Timestamp', 'pd.Timestamp', (['"""2000"""'], {}), "('2000')\n", (36157, 36165), True, 'import pandas as pd\n'), ((36179, 36199), 'pandas.Timestamp', 'pd.Timestamp', (['"""2002"""'], {}), "('2002')\n", (36191, 36199), True, 'import pandas as pd\n'), ((36376, 36393), 'pandas.DataFrame', 'pd.DataFrame', (['raw'], {}), '(raw)\n', (36388, 36393), True, 'import pandas as pd\n'), ((36413, 36452), 'superset.viz.TimeTableViz', 'viz.TimeTableViz', (['datasource', 'form_data'], {}), '(datasource, form_data)\n', (36429, 36452), True, 'import superset.viz as viz\n'), ((37144, 37183), 'superset.viz.TimeTableViz', 'viz.TimeTableViz', (['datasource', 'form_data'], {}), '(datasource, form_data)\n', (37160, 37183), True, 'import superset.viz as viz\n'), ((37321, 37360), 'superset.viz.TimeTableViz', 'viz.TimeTableViz', (['datasource', 'form_data'], {}), '(datasource, form_data)\n', (37337, 37360), True, 'import superset.viz as viz\n'), ((37653, 37693), 'superset.viz.BaseDeckGLViz', 'viz.BaseDeckGLViz', (['datasource', 'form_data'], {}), '(datasource, form_data)\n', (37670, 37693), True, 'import superset.viz as viz\n'), ((37840, 37880), 'superset.viz.BaseDeckGLViz', 'viz.BaseDeckGLViz', (['datasource', 'form_data'], {}), '(datasource, form_data)\n', (37857, 37880), True, 'import superset.viz as viz\n'), ((38159, 38200), 'superset.viz.DeckScatterViz', 'viz.DeckScatterViz', (['datasource', 'form_data'], {}), '(datasource, form_data)\n', (38177, 38200), True, 'import superset.viz as viz\n'), ((38411, 38452), 'superset.viz.DeckScatterViz', 'viz.DeckScatterViz', (['datasource', 'form_data'], {}), '(datasource, form_data)\n', (38429, 38452), True, 'import superset.viz as viz\n'), ((38810, 38850), 'superset.viz.BaseDeckGLViz', 'viz.BaseDeckGLViz', (['datasource', 'form_data'], {}), '(datasource, form_data)\n', (38827, 38850), True, 'import superset.viz as viz\n'), ((39140, 39180), 'superset.viz.BaseDeckGLViz', 'viz.BaseDeckGLViz', (['datasource', 'form_data'], {}), '(datasource, form_data)\n', (39157, 39180), True, 'import superset.viz as viz\n'), ((39588, 39628), 'superset.viz.BaseDeckGLViz', 'viz.BaseDeckGLViz', (['datasource', 'form_data'], {}), '(datasource, form_data)\n', (39605, 39628), True, 'import superset.viz as viz\n'), ((40785, 40823), 'superset.viz.DeckGeoJson', 'viz.DeckGeoJson', (['datasource', 'form_data'], {}), '(datasource, form_data)\n', (40800, 40823), True, 'import superset.viz as viz\n'), ((41172, 41212), 'superset.viz.BaseDeckGLViz', 'viz.BaseDeckGLViz', (['datasource', 'form_data'], {}), '(datasource, form_data)\n', (41189, 41212), True, 'import superset.viz as viz\n'), ((41747, 41787), 'superset.viz.BaseDeckGLViz', 'viz.BaseDeckGLViz', (['datasource', 'form_data'], {}), '(datasource, form_data)\n', (41764, 41787), True, 'import superset.viz as viz\n'), ((42145, 42190), 'uuid.UUID', 'uuid.UUID', (['"""12345678123456781234567812345678"""'], {}), "('12345678123456781234567812345678')\n", (42154, 42190), False, 'import uuid\n'), ((45178, 45195), 'pandas.DataFrame', 'pd.DataFrame', (['raw'], {}), '(raw)\n', (45190, 45195), True, 'import pandas as pd\n'), ((45216, 45260), 'superset.viz.NVD3TimeSeriesViz', 'viz.NVD3TimeSeriesViz', (['datasource', 'form_data'], {}), '(datasource, form_data)\n', (45237, 45260), True, 'import superset.viz as viz\n'), ((49374, 49396), 'numpy.isnan', 'np.isnan', (["data[2]['y']"], {}), "(data[2]['y'])\n", (49382, 49396), True, 'import numpy as np\n'), ((1484, 1518), 'superset.viz.BaseViz', 'viz.BaseViz', (['datasource', 'form_data'], {}), '(datasource, form_data)\n', (1495, 1518), True, 'import superset.viz as viz\n'), ((12770, 12805), 'superset.viz.TableViz', 'viz.TableViz', (['datasource', 'form_data'], {}), '(datasource, form_data)\n', (12782, 12805), True, 'import superset.viz as viz\n'), ((12980, 13015), 'superset.viz.TableViz', 'viz.TableViz', (['datasource', 'form_data'], {}), '(datasource, form_data)\n', (12992, 13015), True, 'import superset.viz as viz\n'), ((14096, 14131), 'superset.viz.TableViz', 'viz.TableViz', (['datasource', 'form_data'], {}), '(datasource, form_data)\n', (14108, 14131), True, 'import superset.viz as viz\n'), ((14820, 14855), 'superset.viz.TableViz', 'viz.TableViz', (['datasource', 'form_data'], {}), '(datasource, form_data)\n', (14832, 14855), True, 'import superset.viz as viz\n'), ((24487, 24493), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (24491, 24493), False, 'from unittest.mock import Mock, patch\n'), ((26685, 26691), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (26689, 26691), False, 'from unittest.mock import Mock, patch\n'), ((27983, 27989), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (27987, 27989), False, 'from unittest.mock import Mock, patch\n'), ((29310, 29316), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (29314, 29316), False, 'from unittest.mock import Mock, patch\n'), ((30448, 30454), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (30452, 30454), False, 'from unittest.mock import Mock, patch\n'), ((31457, 31463), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (31461, 31463), False, 'from unittest.mock import Mock, patch\n'), ((34079, 34085), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (34083, 34085), False, 'from unittest.mock import Mock, patch\n'), ((40428, 40473), 'superset.viz.BaseDeckGLViz', 'viz.BaseDeckGLViz', (['datasource', 'test_form_data'], {}), '(datasource, test_form_data)\n', (40445, 40473), True, 'import superset.viz as viz\n'), ((46100, 46172), 'pandas.to_datetime', 'pd.to_datetime', (["['2019-01-01', '2019-01-02', '2019-01-05', '2019-01-07']"], {}), "(['2019-01-01', '2019-01-02', '2019-01-05', '2019-01-07'])\n", (46114, 46172), True, 'import pandas as pd\n'), ((47038, 47110), 'pandas.to_datetime', 'pd.to_datetime', (["['2019-01-01', '2019-01-02', '2019-01-05', '2019-01-07']"], {}), "(['2019-01-01', '2019-01-02', '2019-01-05', '2019-01-07'])\n", (47052, 47110), True, 'import pandas as pd\n'), ((48794, 48842), 'superset.viz.BigNumberViz', 'viz.BigNumberViz', (['datasource', "{'metrics': ['y']}"], {}), "(datasource, {'metrics': ['y']})\n", (48810, 48842), True, 'import superset.viz as viz\n'), ((48903, 48929), 'pandas.Timestamp', 'pd.Timestamp', (['"""2019-01-05"""'], {}), "('2019-01-05')\n", (48915, 48929), True, 'import pandas as pd\n'), ((49297, 49345), 'superset.viz.BigNumberViz', 'viz.BigNumberViz', (['datasource', "{'metrics': ['y']}"], {}), "(datasource, {'metrics': ['y']})\n", (49313, 49345), True, 'import superset.viz as viz\n'), ((4447, 4473), 'datetime.datetime', 'datetime', (['(1960)', '(1)', '(1)', '(5)', '(0)'], {}), '(1960, 1, 1, 5, 0)\n', (4455, 4473), False, 'from datetime import datetime\n'), ((4679, 4705), 'datetime.datetime', 'datetime', (['(1960)', '(1)', '(1)', '(5)', '(0)'], {}), '(1960, 1, 1, 5, 0)\n', (4687, 4705), False, 'from datetime import datetime\n'), ((4893, 4919), 'datetime.datetime', 'datetime', (['(1960)', '(1)', '(1)', '(6)', '(0)'], {}), '(1960, 1, 1, 6, 0)\n', (4901, 4919), False, 'from datetime import datetime\n'), ((5230, 5256), 'datetime.datetime', 'datetime', (['(1960)', '(1)', '(1)', '(0)', '(0)'], {}), '(1960, 1, 1, 0, 0)\n', (5238, 5256), False, 'from datetime import datetime\n'), ((48600, 48672), 'pandas.to_datetime', 'pd.to_datetime', (["['2019-01-01', '2019-01-02', '2019-01-05', '2019-01-07']"], {}), "(['2019-01-01', '2019-01-02', '2019-01-05', '2019-01-07'])\n", (48614, 48672), True, 'import pandas as pd\n'), ((49102, 49174), 'pandas.to_datetime', 'pd.to_datetime', (["['2019-01-01', '2019-01-02', '2019-01-05', '2019-01-07']"], {}), "(['2019-01-01', '2019-01-02', '2019-01-05', '2019-01-07'])\n", (49116, 49174), True, 'import pandas as pd\n'), ((46318, 46424), 'superset.viz.NVD3TimeSeriesViz', 'viz.NVD3TimeSeriesViz', (['datasource', "{'metrics': ['y'], 'resample_method': 'sum', 'resample_rule': '1D'}"], {}), "(datasource, {'metrics': ['y'], 'resample_method':\n 'sum', 'resample_rule': '1D'})\n", (46339, 46424), True, 'import superset.viz as viz\n'), ((46631, 46740), 'superset.viz.NVD3TimeSeriesViz', 'viz.NVD3TimeSeriesViz', (['datasource', "{'metrics': ['y'], 'resample_method': 'asfreq', 'resample_rule': '1D'}"], {}), "(datasource, {'metrics': ['y'], 'resample_method':\n 'asfreq', 'resample_rule': '1D'})\n", (46652, 46740), True, 'import superset.viz as viz\n'), ((47236, 47354), 'superset.viz.BigNumberViz', 'viz.BigNumberViz', (['datasource', "{'metrics': ['y'], 'rolling_type': 'cumsum', 'rolling_periods': 0,\n 'min_periods': 0}"], {}), "(datasource, {'metrics': ['y'], 'rolling_type': 'cumsum',\n 'rolling_periods': 0, 'min_periods': 0})\n", (47252, 47354), True, 'import superset.viz as viz\n'), ((47639, 47754), 'superset.viz.BigNumberViz', 'viz.BigNumberViz', (['datasource', "{'metrics': ['y'], 'rolling_type': 'sum', 'rolling_periods': 2,\n 'min_periods': 0}"], {}), "(datasource, {'metrics': ['y'], 'rolling_type': 'sum',\n 'rolling_periods': 2, 'min_periods': 0})\n", (47655, 47754), True, 'import superset.viz as viz\n'), ((48038, 48155), 'superset.viz.BigNumberViz', 'viz.BigNumberViz', (['datasource', "{'metrics': ['y'], 'rolling_type': 'mean', 'rolling_periods': 10,\n 'min_periods': 0}"], {}), "(datasource, {'metrics': ['y'], 'rolling_type': 'mean',\n 'rolling_periods': 10, 'min_periods': 0})\n", (48054, 48155), True, 'import superset.viz as viz\n')]
|
import sys
import typing
import numpy as np
def solve(a: np.ndarray, k: int) -> typing.NoReturn:
n = len(a)
def compute_dp(a: np.ndarray) -> np.ndarray:
dp = np.zeros((n + 1, k), np.bool8)
dp[0, 0] = True
for i in range(n):
dp[i + 1] = dp[i].copy()
dp[i + 1, a[i] :] |= dp[i, : -a[i]]
return dp
dp_l = compute_dp(a)
dp_r = compute_dp(a[::-1])[::-1]
dp_r = dp_r.astype(np.int64).cumsum(axis=1)
cnt = 0
for p in range(n):
l, r = dp_l[p], dp_r[n - p]
x = a[p]
for i in np.flatnonzero(l).tolist():
if (
not r[k - i - 1]
- (0 if k - x - i - 1 < 0 else r[k - x - i - 1])
>= 1
):
continue
cnt += 1
break
print(n - cnt)
def main() -> typing.NoReturn:
n, k = map(int, input().split())
a = np.array(sys.stdin.readline().split(), dtype=np.int64)
solve(a, k)
main()
|
[
"sys.stdin.readline",
"numpy.zeros",
"numpy.flatnonzero"
] |
[((188, 218), 'numpy.zeros', 'np.zeros', (['(n + 1, k)', 'np.bool8'], {}), '((n + 1, k), np.bool8)\n', (196, 218), True, 'import numpy as np\n'), ((605, 622), 'numpy.flatnonzero', 'np.flatnonzero', (['l'], {}), '(l)\n', (619, 622), True, 'import numpy as np\n'), ((968, 988), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (986, 988), False, 'import sys\n')]
|
import numpy as np
from typing import Tuple, Union, Optional
from autoarray.structures.arrays.two_d import array_2d_util
from autoarray.geometry import geometry_util
from autoarray import numba_util
from autoarray.mask import mask_2d_util
@numba_util.jit()
def grid_2d_centre_from(grid_2d_slim: np.ndarray) -> Tuple[float, float]:
"""
Returns the centre of a grid from a 1D grid.
Parameters
----------
grid_2d_slim
The 1D grid of values which are mapped to a 2D array.
Returns
-------
(float, float)
The (y,x) central coordinates of the grid.
"""
centre_y = (np.max(grid_2d_slim[:, 0]) + np.min(grid_2d_slim[:, 0])) / 2.0
centre_x = (np.max(grid_2d_slim[:, 1]) + np.min(grid_2d_slim[:, 1])) / 2.0
return centre_y, centre_x
@numba_util.jit()
def grid_2d_slim_via_mask_from(
mask_2d: np.ndarray,
pixel_scales: Union[float, Tuple[float, float]],
sub_size: int,
origin: Tuple[float, float] = (0.0, 0.0),
) -> np.ndarray:
"""
For a sub-grid, every unmasked pixel of its 2D mask with shape (total_y_pixels, total_x_pixels) is divided into
a finer uniform grid of shape (total_y_pixels*sub_size, total_x_pixels*sub_size). This routine computes the (y,x)
scaled coordinates a the centre of every sub-pixel defined by this 2D mask array.
The sub-grid is returned on an array of shape (total_unmasked_pixels*sub_size**2, 2). y coordinates are
stored in the 0 index of the second dimension, x coordinates in the 1 index. Masked coordinates are therefore
removed and not included in the slimmed grid.
Grid2D are defined from the top-left corner, where the first unmasked sub-pixel corresponds to index 0.
Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second
sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth.
Parameters
----------
mask_2d
A 2D array of bools, where `False` values are unmasked and therefore included as part of the calculated
sub-grid.
pixel_scales
The (y,x) scaled units to pixel units conversion factor of the 2D mask array.
sub_size
The size of the sub-grid that each pixel of the 2D mask array is divided into.
origin : (float, flloat)
The (y,x) origin of the 2D array, which the sub-grid is shifted around.
Returns
-------
ndarray
A slimmed sub grid of (y,x) scaled coordinates at the centre of every pixel unmasked pixel on the 2D mask
array. The sub grid array has dimensions (total_unmasked_pixels*sub_size**2, 2).
Examples
--------
mask = np.array([[True, False, True],
[False, False, False]
[True, False, True]])
grid_slim = grid_2d_slim_via_mask_from(mask=mask, pixel_scales=(0.5, 0.5), sub_size=1, origin=(0.0, 0.0))
"""
total_sub_pixels = mask_2d_util.total_sub_pixels_2d_from(mask_2d, sub_size)
grid_slim = np.zeros(shape=(total_sub_pixels, 2))
centres_scaled = geometry_util.central_scaled_coordinate_2d_from(
shape_native=mask_2d.shape, pixel_scales=pixel_scales, origin=origin
)
sub_index = 0
y_sub_half = pixel_scales[0] / 2
y_sub_step = pixel_scales[0] / (sub_size)
x_sub_half = pixel_scales[1] / 2
x_sub_step = pixel_scales[1] / (sub_size)
for y in range(mask_2d.shape[0]):
for x in range(mask_2d.shape[1]):
if not mask_2d[y, x]:
y_scaled = (y - centres_scaled[0]) * pixel_scales[0]
x_scaled = (x - centres_scaled[1]) * pixel_scales[1]
for y1 in range(sub_size):
for x1 in range(sub_size):
grid_slim[sub_index, 0] = -(
y_scaled - y_sub_half + y1 * y_sub_step + (y_sub_step / 2.0)
)
grid_slim[sub_index, 1] = (
x_scaled - x_sub_half + x1 * x_sub_step + (x_sub_step / 2.0)
)
sub_index += 1
return grid_slim
def grid_2d_via_mask_from(
mask_2d: np.ndarray,
pixel_scales: Union[float, Tuple[float, float]],
sub_size: int,
origin: Tuple[float, float] = (0.0, 0.0),
) -> np.ndarray:
"""
For a sub-grid, every unmasked pixel of its 2D mask with shape (total_y_pixels, total_x_pixels) is divided into a
finer uniform grid of shape (total_y_pixels*sub_size, total_x_pixels*sub_size). This routine computes the (y,x)
scaled coordinates at the centre of every sub-pixel defined by this 2D mask array.
The sub-grid is returned in its native dimensions with shape (total_y_pixels*sub_size, total_x_pixels*sub_size).
y coordinates are stored in the 0 index of the second dimension, x coordinates in the 1 index. Masked pixels are
given values (0.0, 0.0).
Grids are defined from the top-left corner, where the first unmasked sub-pixel corresponds to index 0.
Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second
sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth.
Parameters
----------
mask_2d
A 2D array of bools, where `False` values are unmasked and therefore included as part of the calculated
sub-grid.
pixel_scales
The (y,x) scaled units to pixel units conversion factor of the 2D mask array.
sub_size
The size of the sub-grid that each pixel of the 2D mask array is divided into.
origin : (float, flloat)
The (y,x) origin of the 2D array, which the sub-grid is shifted around.
Returns
-------
ndarray
A sub grid of (y,x) scaled coordinates at the centre of every pixel unmasked pixel on the 2D mask
array. The sub grid array has dimensions (total_y_pixels*sub_size, total_x_pixels*sub_size).
Examples
--------
mask = np.array([[True, False, True],
[False, False, False]
[True, False, True]])
grid_2d = grid_2d_via_mask_from(mask=mask, pixel_scales=(0.5, 0.5), sub_size=1, origin=(0.0, 0.0))
"""
grid_2d_slim = grid_2d_slim_via_mask_from(
mask_2d=mask_2d, pixel_scales=pixel_scales, sub_size=sub_size, origin=origin
)
return grid_2d_native_from(
grid_2d_slim=grid_2d_slim, mask_2d=mask_2d, sub_size=sub_size
)
def grid_2d_slim_via_shape_native_from(
shape_native: Tuple[int, int],
pixel_scales: Union[float, Tuple[float, float]],
sub_size: int,
origin: Tuple[float, float] = (0.0, 0.0),
) -> np.ndarray:
"""
For a sub-grid, every unmasked pixel of its 2D mask with shape (total_y_pixels, total_x_pixels) is divided into a
finer uniform grid of shape (total_y_pixels*sub_size, total_x_pixels*sub_size). This routine computes the (y,x)
scaled coordinates at the centre of every sub-pixel defined by this 2D mask array.
The sub-grid is returned in its slimmed dimensions with shape (total_pixels**2*sub_size**2, 2). y coordinates are
stored in the 0 index of the second dimension, x coordinates in the 1 index.
Grid2D are defined from the top-left corner, where the first sub-pixel corresponds to index [0,0].
Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second
sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth.
Parameters
----------
shape_native
The (y,x) shape of the 2D array the sub-grid of coordinates is computed for.
pixel_scales
The (y,x) scaled units to pixel units conversion factor of the 2D mask array.
sub_size
The size of the sub-grid that each pixel of the 2D mask array is divided into.
origin
The (y,x) origin of the 2D array, which the sub-grid is shifted around.
Returns
-------
ndarray
A sub grid of (y,x) scaled coordinates at the centre of every pixel unmasked pixel on the 2D mask
array. The sub grid is slimmed and has dimensions (total_unmasked_pixels*sub_size**2, 2).
Examples
--------
mask = np.array([[True, False, True],
[False, False, False]
[True, False, True]])
grid_2d_slim = grid_2d_slim_via_shape_native_from(shape_native=(3,3), pixel_scales=(0.5, 0.5), sub_size=2, origin=(0.0, 0.0))
"""
return grid_2d_slim_via_mask_from(
mask_2d=np.full(fill_value=False, shape=shape_native),
pixel_scales=pixel_scales,
sub_size=sub_size,
origin=origin,
)
def grid_2d_via_shape_native_from(
shape_native: Tuple[int, int],
pixel_scales: Union[float, Tuple[float, float]],
sub_size: int,
origin: Tuple[float, float] = (0.0, 0.0),
) -> np.ndarray:
"""
For a sub-grid, every unmasked pixel of its 2D mask with shape (total_y_pixels, total_x_pixels) is divided
into a finer uniform grid of shape (total_y_pixels*sub_size, total_x_pixels*sub_size). This routine computes
the (y,x) scaled coordinates at the centre of every sub-pixel defined by this 2D mask array.
The sub-grid is returned in its native dimensions with shape (total_y_pixels*sub_size, total_x_pixels*sub_size).
y coordinates are stored in the 0 index of the second dimension, x coordinates in the 1 index.
Grids are defined from the top-left corner, where the first sub-pixel corresponds to index [0,0].
Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second
sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth.
Parameters
----------
shape_native
The (y,x) shape of the 2D array the sub-grid of coordinates is computed for.
pixel_scales
The (y,x) scaled units to pixel units conversion factor of the 2D mask array.
sub_size
The size of the sub-grid that each pixel of the 2D mask array is divided into.
origin : (float, flloat)
The (y,x) origin of the 2D array, which the sub-grid is shifted around.
Returns
-------
ndarray
A sub grid of (y,x) scaled coordinates at the centre of every pixel unmasked pixel on the 2D mask
array. The sub grid array has dimensions (total_y_pixels*sub_size, total_x_pixels*sub_size).
Examples
--------
grid_2d = grid_2d_via_shape_native_from(shape_native=(3, 3), pixel_scales=(1.0, 1.0), sub_size=2, origin=(0.0, 0.0))
"""
return grid_2d_via_mask_from(
mask_2d=np.full(fill_value=False, shape=shape_native),
pixel_scales=pixel_scales,
sub_size=sub_size,
origin=origin,
)
@numba_util.jit()
def grid_scaled_2d_slim_radial_projected_from(
extent: np.ndarray,
centre: Tuple[float, float],
pixel_scales: Union[float, Tuple[float, float]],
sub_size: int,
shape_slim: Optional[int] = 0,
) -> np.ndarray:
"""
Determine a projected radial grid of points from a 2D region of coordinates defined by an
extent [xmin, xmax, ymin, ymax] and with a (y,x) centre. This functions operates as follows:
1) Given the region defined by the extent [xmin, xmax, ymin, ymax], the algorithm finds the longest 1D distance of
the 4 paths from the (y,x) centre to the edge of the region (e.g. following the positive / negative y and x axes).
2) Use the pixel-scale corresponding to the direction chosen (e.g. if the positive x-axis was the longest, the
pixel_scale in the x dimension is used).
3) Determine the number of pixels between the centre and the edge of the region using the longest path between the
two chosen above.
4) Create a (y,x) grid of radial points where all points are at the centre's y value = 0.0 and the x values iterate
from the centre in increasing steps of the pixel-scale.
5) Rotate these radial coordinates by the input `angle` clockwise.
A schematric is shown below:
-------------------
| |
|<- - - - ->x | x = centre
| | <-> = longest radial path from centre to extent edge
| |
-------------------
Using the centre x above, this function finds the longest radial path to the edge of the extent window.
The returned `grid_radii` represents a radial set of points that in 1D sample the 2D grid outwards from its centre.
This grid stores the radial coordinates as (y,x) values (where all y values are the same) as opposed to a 1D data
structure so that it can be used in functions which require that a 2D grid structure is input.
Parameters
----------
extent
The extent of the grid the radii grid is computed using, with format [xmin, xmax, ymin, ymax]
centre : (float, flloat)
The (y,x) central coordinate which the radial grid is traced outwards from.
pixel_scales
The (y,x) scaled units to pixel units conversion factor of the 2D mask array.
sub_size
The size of the sub-grid that each pixel of the 2D mask array is divided into.
shape_slim
Manually choose the shape of the 1D projected grid that is returned. If 0, the border based on the 2D grid is
used (due to numba None cannot be used as a default value).
Returns
-------
ndarray
A radial set of points sampling the longest distance from the centre to the edge of the extent in along the
positive x-axis.
"""
distance_to_positive_x = extent[1] - centre[1]
distance_to_positive_y = extent[3] - centre[0]
distance_to_negative_x = centre[1] - extent[0]
distance_to_negative_y = centre[0] - extent[2]
scaled_distance = max(
[
distance_to_positive_x,
distance_to_positive_y,
distance_to_negative_x,
distance_to_negative_y,
]
)
if (scaled_distance == distance_to_positive_y) or (
scaled_distance == distance_to_negative_y
):
pixel_scale = pixel_scales[0]
else:
pixel_scale = pixel_scales[1]
if shape_slim == 0:
shape_slim = sub_size * int((scaled_distance / pixel_scale)) + 1
grid_scaled_2d_slim_radii = np.zeros((shape_slim, 2))
grid_scaled_2d_slim_radii[:, 0] += centre[0]
radii = centre[1]
for slim_index in range(shape_slim):
grid_scaled_2d_slim_radii[slim_index, 1] = radii
radii += pixel_scale / sub_size
return grid_scaled_2d_slim_radii
@numba_util.jit()
def grid_pixels_2d_slim_from(
grid_scaled_2d_slim: np.ndarray,
shape_native: Tuple[int, int],
pixel_scales: Union[float, Tuple[float, float]],
origin: Tuple[float, float] = (0.0, 0.0),
) -> np.ndarray:
"""
Convert a slimmed grid of 2d (y,x) scaled coordinates to a slimmed grid of 2d (y,x) pixel coordinate values. Pixel
coordinates are returned as floats such that they include the decimal offset from each pixel's top-left corner
relative to the input scaled coordinate.
The input and output grids are both slimmed and therefore shape (total_pixels, 2).
The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to
the highest (most positive) y scaled coordinate and lowest (most negative) x scaled coordinate on the gird.
The scaled grid is defined by an origin and coordinates are shifted to this origin before computing their
1D grid pixel coordinate values.
Parameters
----------
grid_scaled_2d_slim: np.ndarray
The slimmed grid of 2D (y,x) coordinates in scaled units which are converted to pixel value coordinates.
shape_native
The (y,x) shape of the original 2D array the scaled coordinates were computed on.
pixel_scales
The (y,x) scaled units to pixel units conversion factor of the original 2D array.
origin : (float, flloat)
The (y,x) origin of the grid, which the scaled grid is shifted to.
Returns
-------
ndarray
A slimmed grid of 2D (y,x) pixel-value coordinates with dimensions (total_pixels, 2).
Examples
--------
grid_scaled_2d_slim = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]])
grid_pixels_2d_slim = grid_scaled_2d_slim_from(grid_scaled_2d_slim=grid_scaled_2d_slim, shape=(2,2),
pixel_scales=(0.5, 0.5), origin=(0.0, 0.0))
"""
grid_pixels_2d_slim = np.zeros((grid_scaled_2d_slim.shape[0], 2))
centres_scaled = geometry_util.central_scaled_coordinate_2d_from(
shape_native=shape_native, pixel_scales=pixel_scales, origin=origin
)
for slim_index in range(grid_scaled_2d_slim.shape[0]):
grid_pixels_2d_slim[slim_index, 0] = (
(-grid_scaled_2d_slim[slim_index, 0] / pixel_scales[0])
+ centres_scaled[0]
+ 0.5
)
grid_pixels_2d_slim[slim_index, 1] = (
(grid_scaled_2d_slim[slim_index, 1] / pixel_scales[1])
+ centres_scaled[1]
+ 0.5
)
return grid_pixels_2d_slim
@numba_util.jit()
def grid_pixel_centres_2d_slim_from(
grid_scaled_2d_slim: np.ndarray,
shape_native: Tuple[int, int],
pixel_scales: Union[float, Tuple[float, float]],
origin: Tuple[float, float] = (0.0, 0.0),
) -> np.ndarray:
"""
Convert a slimmed grid of 2D (y,x) scaled coordinates to a slimmed grid of 2D (y,x) pixel values. Pixel coordinates
are returned as integers such that they map directly to the pixel they are contained within.
The input and output grids are both slimmed and therefore shape (total_pixels, 2).
The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to
the highest (most positive) y scaled coordinate and lowest (most negative) x scaled coordinate on the gird.
The scaled coordinate grid is defined by the class attribute origin, and coordinates are shifted to this
origin before computing their 1D grid pixel indexes.
Parameters
----------
grid_scaled_2d_slim: np.ndarray
The slimmed grid of 2D (y,x) coordinates in scaled units which is converted to pixel indexes.
shape_native
The (y,x) shape of the original 2D array the scaled coordinates were computed on.
pixel_scales
The (y,x) scaled units to pixel units conversion factor of the original 2D array.
origin : (float, flloat)
The (y,x) origin of the grid, which the scaled grid is shifted
Returns
-------
ndarray
A slimmed grid of 2D (y,x) pixel indexes with dimensions (total_pixels, 2).
Examples
--------
grid_scaled_2d_slim = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]])
grid_pixels_2d_slim = grid_scaled_2d_slim_from(grid_scaled_2d_slim=grid_scaled_2d_slim, shape=(2,2),
pixel_scales=(0.5, 0.5), origin=(0.0, 0.0))
"""
grid_pixels_2d_slim = np.zeros((grid_scaled_2d_slim.shape[0], 2))
centres_scaled = geometry_util.central_scaled_coordinate_2d_from(
shape_native=shape_native, pixel_scales=pixel_scales, origin=origin
)
for slim_index in range(grid_scaled_2d_slim.shape[0]):
grid_pixels_2d_slim[slim_index, 0] = int(
(-grid_scaled_2d_slim[slim_index, 0] / pixel_scales[0])
+ centres_scaled[0]
+ 0.5
)
grid_pixels_2d_slim[slim_index, 1] = int(
(grid_scaled_2d_slim[slim_index, 1] / pixel_scales[1])
+ centres_scaled[1]
+ 0.5
)
return grid_pixels_2d_slim
@numba_util.jit()
def grid_pixel_indexes_2d_slim_from(
grid_scaled_2d_slim: np.ndarray,
shape_native: Tuple[int, int],
pixel_scales: Union[float, Tuple[float, float]],
origin: Tuple[float, float] = (0.0, 0.0),
) -> np.ndarray:
"""
Convert a slimmed grid of 2D (y,x) scaled coordinates to a slimmed grid of pixel indexes. Pixel coordinates are
returned as integers such that they are the pixel from the top-left of the 2D grid going rights and then downwards.
The input and output grids are both slimmed and have shapes (total_pixels, 2) and (total_pixels,).
For example:
The pixel at the top-left, whose native index is [0,0], corresponds to slimmed pixel index 0.
The fifth pixel on the top row, whose native index is [0,5], corresponds to slimmed pixel index 4.
The first pixel on the second row, whose native index is [0,1], has slimmed pixel index 10 if a row has 10 pixels.
The scaled coordinate grid is defined by the class attribute origin, and coordinates are shifted to this
origin before computing their 1D grid pixel indexes.
The input and output grids are both of shape (total_pixels, 2).
Parameters
----------
grid_scaled_2d_slim: np.ndarray
The slimmed grid of 2D (y,x) coordinates in scaled units which is converted to slimmed pixel indexes.
shape_native
The (y,x) shape of the original 2D array the scaled coordinates were computed on.
pixel_scales
The (y,x) scaled units to pixel units conversion factor of the original 2D array.
origin : (float, flloat)
The (y,x) origin of the grid, which the scaled grid is shifted.
Returns
-------
ndarray
A grid of slimmed pixel indexes with dimensions (total_pixels,).
Examples
--------
grid_scaled_2d_slim = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]])
grid_pixel_indexes_2d_slim = grid_pixel_indexes_2d_slim_from(grid_scaled_2d_slim=grid_scaled_2d_slim, shape=(2,2),
pixel_scales=(0.5, 0.5), origin=(0.0, 0.0))
"""
grid_pixels_2d_slim = grid_pixel_centres_2d_slim_from(
grid_scaled_2d_slim=grid_scaled_2d_slim,
shape_native=shape_native,
pixel_scales=pixel_scales,
origin=origin,
)
grid_pixel_indexes_2d_slim = np.zeros(grid_pixels_2d_slim.shape[0])
for slim_index in range(grid_pixels_2d_slim.shape[0]):
grid_pixel_indexes_2d_slim[slim_index] = int(
grid_pixels_2d_slim[slim_index, 0] * shape_native[1]
+ grid_pixels_2d_slim[slim_index, 1]
)
return grid_pixel_indexes_2d_slim
@numba_util.jit()
def grid_scaled_2d_slim_from(
grid_pixels_2d_slim: np.ndarray,
shape_native: Tuple[int, int],
pixel_scales: Union[float, Tuple[float, float]],
origin: Tuple[float, float] = (0.0, 0.0),
) -> np.ndarray:
"""
Convert a slimmed grid of 2D (y,x) pixel coordinates to a slimmed grid of 2D (y,x) scaled values.
The input and output grids are both slimmed and therefore shape (total_pixels, 2).
The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to
the highest (most positive) y scaled coordinate and lowest (most negative) x scaled coordinate on the gird.
The scaled coordinate origin is defined by the class attribute origin, and coordinates are shifted to this
origin after computing their values from the 1D grid pixel indexes.
Parameters
----------
grid_pixels_2d_slim: np.ndarray
The slimmed grid of (y,x) coordinates in pixel values which is converted to scaled coordinates.
shape_native
The (y,x) shape of the original 2D array the scaled coordinates were computed on.
pixel_scales
The (y,x) scaled units to pixel units conversion factor of the original 2D array.
origin : (float, flloat)
The (y,x) origin of the grid, which the scaled grid is shifted.
Returns
-------
ndarray
A slimmed grid of 2d scaled coordinates with dimensions (total_pixels, 2).
Examples
--------
grid_pixels_2d_slim = np.array([[0,0], [0,1], [1,0], [1,1])
grid_pixels_2d_slim = grid_scaled_2d_slim_from(grid_pixels_2d_slim=grid_pixels_2d_slim, shape=(2,2),
pixel_scales=(0.5, 0.5), origin=(0.0, 0.0))
"""
grid_scaled_2d_slim = np.zeros((grid_pixels_2d_slim.shape[0], 2))
centres_scaled = geometry_util.central_scaled_coordinate_2d_from(
shape_native=shape_native, pixel_scales=pixel_scales, origin=origin
)
for slim_index in range(grid_scaled_2d_slim.shape[0]):
grid_scaled_2d_slim[slim_index, 0] = (
-(grid_pixels_2d_slim[slim_index, 0] - centres_scaled[0] - 0.5)
* pixel_scales[0]
)
grid_scaled_2d_slim[slim_index, 1] = (
grid_pixels_2d_slim[slim_index, 1] - centres_scaled[1] - 0.5
) * pixel_scales[1]
return grid_scaled_2d_slim
@numba_util.jit()
def grid_pixel_centres_2d_from(
grid_scaled_2d: np.ndarray,
shape_native: Tuple[int, int],
pixel_scales: Union[float, Tuple[float, float]],
origin: Tuple[float, float] = (0.0, 0.0),
) -> np.ndarray:
"""
Convert a native grid of 2D (y,x) scaled coordinates to a native grid of 2D (y,x) pixel values. Pixel coordinates
are returned as integers such that they map directly to the pixel they are contained within.
The input and output grids are both native resolution and therefore have shape (y_pixels, x_pixels, 2).
The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to
the highest (most positive) y scaled coordinate and lowest (most negative) x scaled coordinate on the gird.
The scaled coordinate grid is defined by the class attribute origin, and coordinates are shifted to this
origin before computing their 1D grid pixel indexes.
Parameters
----------
grid_scaled_2d: np.ndarray
The native grid of 2D (y,x) coordinates in scaled units which is converted to pixel indexes.
shape_native
The (y,x) shape of the original 2D array the scaled coordinates were computed on.
pixel_scales
The (y,x) scaled units to pixel units conversion factor of the original 2D array.
origin : (float, flloat)
The (y,x) origin of the grid, which the scaled grid is shifted
Returns
-------
ndarray
A native grid of 2D (y,x) pixel indexes with dimensions (y_pixels, x_pixels, 2).
Examples
--------
grid_scaled_2d = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]])
grid_pixel_centres_2d = grid_pixel_centres_2d_from(grid_scaled_2d=grid_scaled_2d, shape=(2,2),
pixel_scales=(0.5, 0.5), origin=(0.0, 0.0))
"""
grid_pixels_2d = np.zeros((grid_scaled_2d.shape[0], grid_scaled_2d.shape[1], 2))
centres_scaled = geometry_util.central_scaled_coordinate_2d_from(
shape_native=shape_native, pixel_scales=pixel_scales, origin=origin
)
for y in range(grid_scaled_2d.shape[0]):
for x in range(grid_scaled_2d.shape[1]):
grid_pixels_2d[y, x, 0] = int(
(-grid_scaled_2d[y, x, 0] / pixel_scales[0]) + centres_scaled[0] + 0.5
)
grid_pixels_2d[y, x, 1] = int(
(grid_scaled_2d[y, x, 1] / pixel_scales[1]) + centres_scaled[1] + 0.5
)
return grid_pixels_2d
@numba_util.jit()
def relocated_grid_via_jit_from(grid, border_grid):
"""
Relocate the coordinates of a grid to its border if they are outside the border, where the border is
defined as all pixels at the edge of the grid's mask (see *mask._border_1d_indexes*).
This is performed as follows:
1: Use the mean value of the grid's y and x coordinates to determine the origin of the grid.
2: Compute the radial distance of every grid coordinate from the origin.
3: For every coordinate, find its nearest pixel in the border.
4: Determine if it is outside the border, by comparing its radial distance from the origin to its paired
border pixel's radial distance.
5: If its radial distance is larger, use the ratio of radial distances to move the coordinate to the
border (if its inside the border, do nothing).
The method can be used on uniform or irregular grids, however for irregular grids the border of the
'image-plane' mask is used to define border pixels.
Parameters
----------
grid : Grid2D
The grid (uniform or irregular) whose pixels are to be relocated to the border edge if outside it.
border_grid : Grid2D
The grid of border (y,x) coordinates.
"""
grid_relocated = np.zeros(grid.shape)
grid_relocated[:, :] = grid[:, :]
border_origin = np.zeros(2)
border_origin[0] = np.mean(border_grid[:, 0])
border_origin[1] = np.mean(border_grid[:, 1])
border_grid_radii = np.sqrt(
np.add(
np.square(np.subtract(border_grid[:, 0], border_origin[0])),
np.square(np.subtract(border_grid[:, 1], border_origin[1])),
)
)
border_min_radii = np.min(border_grid_radii)
grid_radii = np.sqrt(
np.add(
np.square(np.subtract(grid[:, 0], border_origin[0])),
np.square(np.subtract(grid[:, 1], border_origin[1])),
)
)
for pixel_index in range(grid.shape[0]):
if grid_radii[pixel_index] > border_min_radii:
closest_pixel_index = np.argmin(
np.square(grid[pixel_index, 0] - border_grid[:, 0])
+ np.square(grid[pixel_index, 1] - border_grid[:, 1])
)
move_factor = (
border_grid_radii[closest_pixel_index] / grid_radii[pixel_index]
)
if move_factor < 1.0:
grid_relocated[pixel_index, :] = (
move_factor * (grid[pixel_index, :] - border_origin[:])
+ border_origin[:]
)
return grid_relocated
@numba_util.jit()
def furthest_grid_2d_slim_index_from(
grid_2d_slim: np.ndarray, slim_indexes: np.ndarray, coordinate: Tuple[float, float]
) -> int:
distance_to_centre = 0.0
for slim_index in slim_indexes:
y = grid_2d_slim[slim_index, 0]
x = grid_2d_slim[slim_index, 1]
distance_to_centre_new = (x - coordinate[1]) ** 2 + (y - coordinate[0]) ** 2
if distance_to_centre_new >= distance_to_centre:
distance_to_centre = distance_to_centre_new
furthest_grid_2d_slim_index = slim_index
return furthest_grid_2d_slim_index
def grid_2d_slim_from(
grid_2d_native: np.ndarray, mask: np.ndarray, sub_size: int
) -> np.ndarray:
"""
For a native 2D grid and mask of shape [total_y_pixels, total_x_pixels, 2], map the values of all unmasked
pixels to a slimmed grid of shape [total_unmasked_pixels, 2].
The pixel coordinate origin is at the top left corner of the native grid and goes right-wards and downwards, such
that for an grid of shape (3,3) where all pixels are unmasked:
- pixel [0,0] of the 2D grid will correspond to index 0 of the 1D grid.
- pixel [0,1] of the 2D grid will correspond to index 1 of the 1D grid.
- pixel [1,0] of the 2D grid will correspond to index 4 of the 1D grid.
Parameters
----------
grid_2d_native : ndarray
The native grid of (y,x) values which are mapped to the slimmed grid.
mask_2d
A 2D array of bools, where `False` values mean unmasked and are included in the mapping.
sub_size
The size (sub_size x sub_size) of each unmasked pixels sub-array.
Returns
-------
ndarray
A 1D grid of values mapped from the 2D grid with dimensions (total_unmasked_pixels).
"""
grid_1d_slim_y = array_2d_util.array_2d_slim_from(
array_2d_native=grid_2d_native[:, :, 0], mask_2d=mask, sub_size=sub_size
)
grid_1d_slim_x = array_2d_util.array_2d_slim_from(
array_2d_native=grid_2d_native[:, :, 1], mask_2d=mask, sub_size=sub_size
)
return np.stack((grid_1d_slim_y, grid_1d_slim_x), axis=-1)
def grid_2d_native_from(
grid_2d_slim: np.ndarray, mask_2d: np.ndarray, sub_size: int
) -> np.ndarray:
"""
For a slimmed 2D grid of shape [total_unmasked_pixels, 2], that was computed by extracting the unmasked values
from a native 2D grid of shape [total_y_pixels, total_x_pixels, 2], map the slimmed grid's coordinates back to the
native 2D grid where masked values are set to zero.
This uses a 1D array 'slim_to_native' where each index gives the 2D pixel indexes of the grid's native unmasked
pixels, for example:
- If slim_to_native[0] = [0,0], the first value of the 1D array maps to the pixels [0,0,:] of the native 2D grid.
- If slim_to_native[1] = [0,1], the second value of the 1D array maps to the pixels [0,1,:] of the native 2D grid.
- If slim_to_native[4] = [1,1], the fifth value of the 1D array maps to the pixels [1,1,:] of the native 2D grid.
Parameters
----------
grid_2d_slim
The (y,x) values of the slimmed 2D grid which are mapped to the native 2D grid.
mask_2d
A 2D array of bools, where `False` values mean unmasked and are included in the mapping.
sub_size
The size (sub_size x sub_size) of each unmasked pixels sub-array.
Returns
-------
ndarray
A NumPy array of shape [total_y_pixels, total_x_pixels, 2] corresponding to the (y,x) values of the native 2D
mapped from the slimmed grid.
"""
grid_2d_native_y = array_2d_util.array_2d_native_from(
array_2d_slim=grid_2d_slim[:, 0], mask_2d=mask_2d, sub_size=sub_size
)
grid_2d_native_x = array_2d_util.array_2d_native_from(
array_2d_slim=grid_2d_slim[:, 1], mask_2d=mask_2d, sub_size=sub_size
)
return np.stack((grid_2d_native_y, grid_2d_native_x), axis=-1)
@numba_util.jit()
def grid_2d_slim_upscaled_from(
grid_slim: np.ndarray,
upscale_factor: int,
pixel_scales: Union[float, Tuple[float, float]],
) -> np.ndarray:
"""
From an input slimmed 2D grid, return an upscaled slimmed 2D grid where (y,x) coordinates are added at an
upscaled resolution to each grid coordinate, analogous to a sub-grid.
Parameters
----------
grid_slim
The slimmed grid of (y,x) coordinates over which a square uniform grid is overlaid.
upscale_factor
The upscaled resolution at which the new grid coordinates are computed.
pixel_scales
The pixel scale of the uniform grid that laid over the irregular grid of (y,x) coordinates.
"""
grid_2d_slim_upscaled = np.zeros(
shape=(grid_slim.shape[0] * upscale_factor ** 2, 2)
)
upscale_index = 0
y_upscale_half = pixel_scales[0] / 2
y_upscale_step = pixel_scales[0] / upscale_factor
x_upscale_half = pixel_scales[1] / 2
x_upscale_step = pixel_scales[1] / upscale_factor
for slim_index in range(grid_slim.shape[0]):
y_grid = grid_slim[slim_index, 0]
x_grid = grid_slim[slim_index, 1]
for y in range(upscale_factor):
for x in range(upscale_factor):
grid_2d_slim_upscaled[upscale_index, 0] = (
y_grid
+ y_upscale_half
- y * y_upscale_step
- (y_upscale_step / 2.0)
)
grid_2d_slim_upscaled[upscale_index, 1] = (
x_grid
- x_upscale_half
+ x * x_upscale_step
+ (x_upscale_step / 2.0)
)
upscale_index += 1
return grid_2d_slim_upscaled
def grid_2d_of_points_within_radius(
radius: float, centre: Tuple[float, float], grid_2d: np.ndarray
):
y_inside = []
x_inside = []
for i in range(len(grid_2d[:, 0])):
if (grid_2d[i, 0] - centre[0]) ** 2 + (
grid_2d[i, 1] - centre[1]
) ** 2 > radius ** 2:
y_inside.append(grid_2d[i, 0])
x_inside.append(grid_2d[i, 1])
return np.asarray(y_inside, x_inside)
def compute_polygon_area(points):
x = points[:, 1]
y = points[:, 0]
return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
|
[
"autoarray.geometry.geometry_util.central_scaled_coordinate_2d_from",
"numpy.mean",
"autoarray.structures.arrays.two_d.array_2d_util.array_2d_native_from",
"numpy.roll",
"numpy.asarray",
"autoarray.numba_util.jit",
"numpy.max",
"numpy.subtract",
"numpy.stack",
"numpy.zeros",
"numpy.square",
"autoarray.structures.arrays.two_d.array_2d_util.array_2d_slim_from",
"autoarray.mask.mask_2d_util.total_sub_pixels_2d_from",
"numpy.min",
"numpy.full"
] |
[((252, 268), 'autoarray.numba_util.jit', 'numba_util.jit', ([], {}), '()\n', (266, 268), False, 'from autoarray import numba_util\n'), ((824, 840), 'autoarray.numba_util.jit', 'numba_util.jit', ([], {}), '()\n', (838, 840), False, 'from autoarray import numba_util\n'), ((11036, 11052), 'autoarray.numba_util.jit', 'numba_util.jit', ([], {}), '()\n', (11050, 11052), False, 'from autoarray import numba_util\n'), ((14926, 14942), 'autoarray.numba_util.jit', 'numba_util.jit', ([], {}), '()\n', (14940, 14942), False, 'from autoarray import numba_util\n'), ((17600, 17616), 'autoarray.numba_util.jit', 'numba_util.jit', ([], {}), '()\n', (17614, 17616), False, 'from autoarray import numba_util\n'), ((20218, 20234), 'autoarray.numba_util.jit', 'numba_util.jit', ([], {}), '()\n', (20232, 20234), False, 'from autoarray import numba_util\n'), ((22955, 22971), 'autoarray.numba_util.jit', 'numba_util.jit', ([], {}), '()\n', (22969, 22971), False, 'from autoarray import numba_util\n'), ((25401, 25417), 'autoarray.numba_util.jit', 'numba_util.jit', ([], {}), '()\n', (25415, 25417), False, 'from autoarray import numba_util\n'), ((27988, 28004), 'autoarray.numba_util.jit', 'numba_util.jit', ([], {}), '()\n', (28002, 28004), False, 'from autoarray import numba_util\n'), ((30644, 30660), 'autoarray.numba_util.jit', 'numba_util.jit', ([], {}), '()\n', (30658, 30660), False, 'from autoarray import numba_util\n'), ((34662, 34678), 'autoarray.numba_util.jit', 'numba_util.jit', ([], {}), '()\n', (34676, 34678), False, 'from autoarray import numba_util\n'), ((3015, 3071), 'autoarray.mask.mask_2d_util.total_sub_pixels_2d_from', 'mask_2d_util.total_sub_pixels_2d_from', (['mask_2d', 'sub_size'], {}), '(mask_2d, sub_size)\n', (3052, 3071), False, 'from autoarray.mask import mask_2d_util\n'), ((3091, 3128), 'numpy.zeros', 'np.zeros', ([], {'shape': '(total_sub_pixels, 2)'}), '(shape=(total_sub_pixels, 2))\n', (3099, 3128), True, 'import numpy as np\n'), ((3153, 3274), 'autoarray.geometry.geometry_util.central_scaled_coordinate_2d_from', 'geometry_util.central_scaled_coordinate_2d_from', ([], {'shape_native': 'mask_2d.shape', 'pixel_scales': 'pixel_scales', 'origin': 'origin'}), '(shape_native=mask_2d.shape,\n pixel_scales=pixel_scales, origin=origin)\n', (3200, 3274), False, 'from autoarray.geometry import geometry_util\n'), ((14632, 14657), 'numpy.zeros', 'np.zeros', (['(shape_slim, 2)'], {}), '((shape_slim, 2))\n', (14640, 14657), True, 'import numpy as np\n'), ((16936, 16979), 'numpy.zeros', 'np.zeros', (['(grid_scaled_2d_slim.shape[0], 2)'], {}), '((grid_scaled_2d_slim.shape[0], 2))\n', (16944, 16979), True, 'import numpy as np\n'), ((17004, 17124), 'autoarray.geometry.geometry_util.central_scaled_coordinate_2d_from', 'geometry_util.central_scaled_coordinate_2d_from', ([], {'shape_native': 'shape_native', 'pixel_scales': 'pixel_scales', 'origin': 'origin'}), '(shape_native=shape_native,\n pixel_scales=pixel_scales, origin=origin)\n', (17051, 17124), False, 'from autoarray.geometry import geometry_util\n'), ((19548, 19591), 'numpy.zeros', 'np.zeros', (['(grid_scaled_2d_slim.shape[0], 2)'], {}), '((grid_scaled_2d_slim.shape[0], 2))\n', (19556, 19591), True, 'import numpy as np\n'), ((19616, 19736), 'autoarray.geometry.geometry_util.central_scaled_coordinate_2d_from', 'geometry_util.central_scaled_coordinate_2d_from', ([], {'shape_native': 'shape_native', 'pixel_scales': 'pixel_scales', 'origin': 'origin'}), '(shape_native=shape_native,\n pixel_scales=pixel_scales, origin=origin)\n', (19663, 19736), False, 'from autoarray.geometry import geometry_util\n'), ((22623, 22661), 'numpy.zeros', 'np.zeros', (['grid_pixels_2d_slim.shape[0]'], {}), '(grid_pixels_2d_slim.shape[0])\n', (22631, 22661), True, 'import numpy as np\n'), ((24778, 24821), 'numpy.zeros', 'np.zeros', (['(grid_pixels_2d_slim.shape[0], 2)'], {}), '((grid_pixels_2d_slim.shape[0], 2))\n', (24786, 24821), True, 'import numpy as np\n'), ((24846, 24966), 'autoarray.geometry.geometry_util.central_scaled_coordinate_2d_from', 'geometry_util.central_scaled_coordinate_2d_from', ([], {'shape_native': 'shape_native', 'pixel_scales': 'pixel_scales', 'origin': 'origin'}), '(shape_native=shape_native,\n pixel_scales=pixel_scales, origin=origin)\n', (24893, 24966), False, 'from autoarray.geometry import geometry_util\n'), ((27341, 27404), 'numpy.zeros', 'np.zeros', (['(grid_scaled_2d.shape[0], grid_scaled_2d.shape[1], 2)'], {}), '((grid_scaled_2d.shape[0], grid_scaled_2d.shape[1], 2))\n', (27349, 27404), True, 'import numpy as np\n'), ((27429, 27549), 'autoarray.geometry.geometry_util.central_scaled_coordinate_2d_from', 'geometry_util.central_scaled_coordinate_2d_from', ([], {'shape_native': 'shape_native', 'pixel_scales': 'pixel_scales', 'origin': 'origin'}), '(shape_native=shape_native,\n pixel_scales=pixel_scales, origin=origin)\n', (27476, 27549), False, 'from autoarray.geometry import geometry_util\n'), ((29283, 29303), 'numpy.zeros', 'np.zeros', (['grid.shape'], {}), '(grid.shape)\n', (29291, 29303), True, 'import numpy as np\n'), ((29366, 29377), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (29374, 29377), True, 'import numpy as np\n'), ((29402, 29428), 'numpy.mean', 'np.mean', (['border_grid[:, 0]'], {}), '(border_grid[:, 0])\n', (29409, 29428), True, 'import numpy as np\n'), ((29453, 29479), 'numpy.mean', 'np.mean', (['border_grid[:, 1]'], {}), '(border_grid[:, 1])\n', (29460, 29479), True, 'import numpy as np\n'), ((29721, 29746), 'numpy.min', 'np.min', (['border_grid_radii'], {}), '(border_grid_radii)\n', (29727, 29746), True, 'import numpy as np\n'), ((32486, 32596), 'autoarray.structures.arrays.two_d.array_2d_util.array_2d_slim_from', 'array_2d_util.array_2d_slim_from', ([], {'array_2d_native': 'grid_2d_native[:, :, 0]', 'mask_2d': 'mask', 'sub_size': 'sub_size'}), '(array_2d_native=grid_2d_native[:, :, 0],\n mask_2d=mask, sub_size=sub_size)\n', (32518, 32596), False, 'from autoarray.structures.arrays.two_d import array_2d_util\n'), ((32633, 32743), 'autoarray.structures.arrays.two_d.array_2d_util.array_2d_slim_from', 'array_2d_util.array_2d_slim_from', ([], {'array_2d_native': 'grid_2d_native[:, :, 1]', 'mask_2d': 'mask', 'sub_size': 'sub_size'}), '(array_2d_native=grid_2d_native[:, :, 1],\n mask_2d=mask, sub_size=sub_size)\n', (32665, 32743), False, 'from autoarray.structures.arrays.two_d import array_2d_util\n'), ((32770, 32821), 'numpy.stack', 'np.stack', (['(grid_1d_slim_y, grid_1d_slim_x)'], {'axis': '(-1)'}), '((grid_1d_slim_y, grid_1d_slim_x), axis=-1)\n', (32778, 32821), True, 'import numpy as np\n'), ((34318, 34426), 'autoarray.structures.arrays.two_d.array_2d_util.array_2d_native_from', 'array_2d_util.array_2d_native_from', ([], {'array_2d_slim': 'grid_2d_slim[:, 0]', 'mask_2d': 'mask_2d', 'sub_size': 'sub_size'}), '(array_2d_slim=grid_2d_slim[:, 0],\n mask_2d=mask_2d, sub_size=sub_size)\n', (34352, 34426), False, 'from autoarray.structures.arrays.two_d import array_2d_util\n'), ((34465, 34573), 'autoarray.structures.arrays.two_d.array_2d_util.array_2d_native_from', 'array_2d_util.array_2d_native_from', ([], {'array_2d_slim': 'grid_2d_slim[:, 1]', 'mask_2d': 'mask_2d', 'sub_size': 'sub_size'}), '(array_2d_slim=grid_2d_slim[:, 1],\n mask_2d=mask_2d, sub_size=sub_size)\n', (34499, 34573), False, 'from autoarray.structures.arrays.two_d import array_2d_util\n'), ((34600, 34655), 'numpy.stack', 'np.stack', (['(grid_2d_native_y, grid_2d_native_x)'], {'axis': '(-1)'}), '((grid_2d_native_y, grid_2d_native_x), axis=-1)\n', (34608, 34655), True, 'import numpy as np\n'), ((35435, 35496), 'numpy.zeros', 'np.zeros', ([], {'shape': '(grid_slim.shape[0] * upscale_factor ** 2, 2)'}), '(shape=(grid_slim.shape[0] * upscale_factor ** 2, 2))\n', (35443, 35496), True, 'import numpy as np\n'), ((36925, 36955), 'numpy.asarray', 'np.asarray', (['y_inside', 'x_inside'], {}), '(y_inside, x_inside)\n', (36935, 36955), True, 'import numpy as np\n'), ((644, 670), 'numpy.max', 'np.max', (['grid_2d_slim[:, 0]'], {}), '(grid_2d_slim[:, 0])\n', (650, 670), True, 'import numpy as np\n'), ((673, 699), 'numpy.min', 'np.min', (['grid_2d_slim[:, 0]'], {}), '(grid_2d_slim[:, 0])\n', (679, 699), True, 'import numpy as np\n'), ((724, 750), 'numpy.max', 'np.max', (['grid_2d_slim[:, 1]'], {}), '(grid_2d_slim[:, 1])\n', (730, 750), True, 'import numpy as np\n'), ((753, 779), 'numpy.min', 'np.min', (['grid_2d_slim[:, 1]'], {}), '(grid_2d_slim[:, 1])\n', (759, 779), True, 'import numpy as np\n'), ((8748, 8793), 'numpy.full', 'np.full', ([], {'fill_value': '(False)', 'shape': 'shape_native'}), '(fill_value=False, shape=shape_native)\n', (8755, 8793), True, 'import numpy as np\n'), ((10888, 10933), 'numpy.full', 'np.full', ([], {'fill_value': '(False)', 'shape': 'shape_native'}), '(fill_value=False, shape=shape_native)\n', (10895, 10933), True, 'import numpy as np\n'), ((29554, 29602), 'numpy.subtract', 'np.subtract', (['border_grid[:, 0]', 'border_origin[0]'], {}), '(border_grid[:, 0], border_origin[0])\n', (29565, 29602), True, 'import numpy as np\n'), ((29628, 29676), 'numpy.subtract', 'np.subtract', (['border_grid[:, 1]', 'border_origin[1]'], {}), '(border_grid[:, 1], border_origin[1])\n', (29639, 29676), True, 'import numpy as np\n'), ((29816, 29857), 'numpy.subtract', 'np.subtract', (['grid[:, 0]', 'border_origin[0]'], {}), '(grid[:, 0], border_origin[0])\n', (29827, 29857), True, 'import numpy as np\n'), ((29883, 29924), 'numpy.subtract', 'np.subtract', (['grid[:, 1]', 'border_origin[1]'], {}), '(grid[:, 1], border_origin[1])\n', (29894, 29924), True, 'import numpy as np\n'), ((30116, 30167), 'numpy.square', 'np.square', (['(grid[pixel_index, 0] - border_grid[:, 0])'], {}), '(grid[pixel_index, 0] - border_grid[:, 0])\n', (30125, 30167), True, 'import numpy as np\n'), ((30187, 30238), 'numpy.square', 'np.square', (['(grid[pixel_index, 1] - border_grid[:, 1])'], {}), '(grid[pixel_index, 1] - border_grid[:, 1])\n', (30196, 30238), True, 'import numpy as np\n'), ((37078, 37091), 'numpy.roll', 'np.roll', (['y', '(1)'], {}), '(y, 1)\n', (37085, 37091), True, 'import numpy as np\n'), ((37105, 37118), 'numpy.roll', 'np.roll', (['x', '(1)'], {}), '(x, 1)\n', (37112, 37118), True, 'import numpy as np\n')]
|
import logging
import george
import numpy as np
from robo.priors.default_priors import DefaultPrior
from robo.models.gaussian_process import GaussianProcess
from robo.models.gaussian_process_mcmc import GaussianProcessMCMC
from robo.maximizers.random_sampling import RandomSampling
from robo.maximizers.scipy_optimizer import SciPyOptimizer
from robo.maximizers.differential_evolution import DifferentialEvolution
from robo.solver.bayesian_optimization import BayesianOptimization
from robo.acquisition_functions.information_gain import InformationGain
from robo.acquisition_functions.ei import EI
from robo.acquisition_functions.marginalization import MarginalizationGPMCMC
from robo.initial_design import init_latin_hypercube_sampling
logger = logging.getLogger(__name__)
def entropy_search(objective_function, lower, upper, num_iterations=30,
maximizer="random", model="gp_mcmc",
n_init=3, output_path=None, rng=None):
"""
Entropy search for global black box optimization problems. This is a reimplemenation of the entropy search
algorithm by Henning and Schuler[1].
[1] Entropy search for information-efficient global optimization.
<NAME> and <NAME>.
JMLR, (1), 2012.
Parameters
----------
objective_function: function
The objective function that is minimized. This function gets a numpy array (D,) as input and returns
the function value (scalar)
lower: np.ndarray (D,)
The lower bound of the search space
upper: np.ndarray (D,)
The upper bound of the search space
num_iterations: int
The number of iterations (initial design + BO)
maximizer: {"random", "scipy", "differential_evolution"}
Defines how the acquisition function is maximized.
model: {"gp", "gp_mcmc"}
The model for the objective function.
n_init: int
Number of points for the initial design. Make sure that it is <= num_iterations.
output_path: string
Specifies the path where the intermediate output after each iteration will be saved.
If None no output will be saved to disk.
rng: numpy.random.RandomState
Random number generator
Returns
-------
dict with all results
"""
assert upper.shape[0] == lower.shape[0], "Dimension miss match"
assert np.all(lower < upper), "Lower bound >= upper bound"
assert n_init <= num_iterations, "Number of initial design point has to be <= than the number of iterations"
if rng is None:
rng = np.random.RandomState(np.random.randint(0, 10000))
cov_amp = 2
n_dims = lower.shape[0]
initial_ls = np.ones([n_dims])
exp_kernel = george.kernels.Matern52Kernel(initial_ls,
ndim=n_dims)
kernel = cov_amp * exp_kernel
prior = DefaultPrior(len(kernel) + 1)
n_hypers = 3 * len(kernel)
if n_hypers % 2 == 1:
n_hypers += 1
if model == "gp":
gp = GaussianProcess(kernel, prior=prior, rng=rng,
normalize_output=False, normalize_input=True,
lower=lower, upper=upper)
elif model == "gp_mcmc":
gp = GaussianProcessMCMC(kernel, prior=prior,
n_hypers=n_hypers,
chain_length=200,
burnin_steps=100,
normalize_input=True,
normalize_output=False,
rng=rng, lower=lower, upper=upper)
else:
print("ERROR: %s is not a valid model!" % model)
return
a = InformationGain(gp, lower=lower, upper=upper, sampling_acquisition=EI)
if model == "gp":
acquisition_func = a
elif model == "gp_mcmc":
acquisition_func = MarginalizationGPMCMC(a)
if maximizer == "random":
max_func = RandomSampling(acquisition_func, lower, upper, rng=rng)
elif maximizer == "scipy":
max_func = SciPyOptimizer(acquisition_func, lower, upper, rng=rng)
elif maximizer == "differential_evolution":
max_func = DifferentialEvolution(acquisition_func, lower, upper, rng=rng)
else:
print("ERROR: %s is not a valid function to maximize the acquisition function!" % maximizer)
return
bo = BayesianOptimization(objective_function, lower, upper, acquisition_func, gp, max_func,
initial_design=init_latin_hypercube_sampling,
initial_points=n_init, rng=rng, output_path=output_path)
x_best, f_min = bo.run(num_iterations)
results = dict()
results["x_opt"] = x_best
results["f_opt"] = f_min
results["incumbents"] = [inc for inc in bo.incumbents]
results["incumbent_values"] = [val for val in bo.incumbents_values]
results["runtime"] = bo.runtime
results["overhead"] = bo.time_overhead
results["X"] = [x.tolist() for x in bo.X]
results["y"] = [y for y in bo.y]
return results
|
[
"logging.getLogger",
"george.kernels.Matern52Kernel",
"numpy.ones",
"robo.maximizers.random_sampling.RandomSampling",
"robo.models.gaussian_process_mcmc.GaussianProcessMCMC",
"robo.maximizers.differential_evolution.DifferentialEvolution",
"robo.acquisition_functions.information_gain.InformationGain",
"numpy.random.randint",
"robo.models.gaussian_process.GaussianProcess",
"robo.solver.bayesian_optimization.BayesianOptimization",
"robo.acquisition_functions.marginalization.MarginalizationGPMCMC",
"numpy.all",
"robo.maximizers.scipy_optimizer.SciPyOptimizer"
] |
[((748, 775), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (765, 775), False, 'import logging\n'), ((2351, 2372), 'numpy.all', 'np.all', (['(lower < upper)'], {}), '(lower < upper)\n', (2357, 2372), True, 'import numpy as np\n'), ((2665, 2682), 'numpy.ones', 'np.ones', (['[n_dims]'], {}), '([n_dims])\n', (2672, 2682), True, 'import numpy as np\n'), ((2700, 2754), 'george.kernels.Matern52Kernel', 'george.kernels.Matern52Kernel', (['initial_ls'], {'ndim': 'n_dims'}), '(initial_ls, ndim=n_dims)\n', (2729, 2754), False, 'import george\n'), ((3679, 3749), 'robo.acquisition_functions.information_gain.InformationGain', 'InformationGain', (['gp'], {'lower': 'lower', 'upper': 'upper', 'sampling_acquisition': 'EI'}), '(gp, lower=lower, upper=upper, sampling_acquisition=EI)\n', (3694, 3749), False, 'from robo.acquisition_functions.information_gain import InformationGain\n'), ((4361, 4559), 'robo.solver.bayesian_optimization.BayesianOptimization', 'BayesianOptimization', (['objective_function', 'lower', 'upper', 'acquisition_func', 'gp', 'max_func'], {'initial_design': 'init_latin_hypercube_sampling', 'initial_points': 'n_init', 'rng': 'rng', 'output_path': 'output_path'}), '(objective_function, lower, upper, acquisition_func, gp,\n max_func, initial_design=init_latin_hypercube_sampling, initial_points=\n n_init, rng=rng, output_path=output_path)\n', (4381, 4559), False, 'from robo.solver.bayesian_optimization import BayesianOptimization\n'), ((2995, 3116), 'robo.models.gaussian_process.GaussianProcess', 'GaussianProcess', (['kernel'], {'prior': 'prior', 'rng': 'rng', 'normalize_output': '(False)', 'normalize_input': '(True)', 'lower': 'lower', 'upper': 'upper'}), '(kernel, prior=prior, rng=rng, normalize_output=False,\n normalize_input=True, lower=lower, upper=upper)\n', (3010, 3116), False, 'from robo.models.gaussian_process import GaussianProcess\n'), ((3933, 3988), 'robo.maximizers.random_sampling.RandomSampling', 'RandomSampling', (['acquisition_func', 'lower', 'upper'], {'rng': 'rng'}), '(acquisition_func, lower, upper, rng=rng)\n', (3947, 3988), False, 'from robo.maximizers.random_sampling import RandomSampling\n'), ((2573, 2600), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10000)'], {}), '(0, 10000)\n', (2590, 2600), True, 'import numpy as np\n'), ((3213, 3398), 'robo.models.gaussian_process_mcmc.GaussianProcessMCMC', 'GaussianProcessMCMC', (['kernel'], {'prior': 'prior', 'n_hypers': 'n_hypers', 'chain_length': '(200)', 'burnin_steps': '(100)', 'normalize_input': '(True)', 'normalize_output': '(False)', 'rng': 'rng', 'lower': 'lower', 'upper': 'upper'}), '(kernel, prior=prior, n_hypers=n_hypers, chain_length=\n 200, burnin_steps=100, normalize_input=True, normalize_output=False,\n rng=rng, lower=lower, upper=upper)\n', (3232, 3398), False, 'from robo.models.gaussian_process_mcmc import GaussianProcessMCMC\n'), ((3858, 3882), 'robo.acquisition_functions.marginalization.MarginalizationGPMCMC', 'MarginalizationGPMCMC', (['a'], {}), '(a)\n', (3879, 3882), False, 'from robo.acquisition_functions.marginalization import MarginalizationGPMCMC\n'), ((4039, 4094), 'robo.maximizers.scipy_optimizer.SciPyOptimizer', 'SciPyOptimizer', (['acquisition_func', 'lower', 'upper'], {'rng': 'rng'}), '(acquisition_func, lower, upper, rng=rng)\n', (4053, 4094), False, 'from robo.maximizers.scipy_optimizer import SciPyOptimizer\n'), ((4162, 4224), 'robo.maximizers.differential_evolution.DifferentialEvolution', 'DifferentialEvolution', (['acquisition_func', 'lower', 'upper'], {'rng': 'rng'}), '(acquisition_func, lower, upper, rng=rng)\n', (4183, 4224), False, 'from robo.maximizers.differential_evolution import DifferentialEvolution\n')]
|
"""
Functions for loading input data.
Author: <NAME> <<EMAIL>>
"""
import os
import numpy as np
def load_img(path: str, img_nums: list, shape: tuple) -> np.array:
"""
Loads a image in the human-readable format.
Args:
path:
The path to the to the folder with mnist images.
img_nums:
A list with the numbers of the images we want to load.
shape:
The shape of a single image.
Returns:
The images as a MxCx28x28 numpy array.
"""
images = np.zeros((len(img_nums), *shape), dtype=float)
for idx, i in enumerate(img_nums):
file = os.path.join(path, "image" + str(i))
with open(file, "r") as f:
data = [float(pixel) for pixel in f.readlines()[0].split(",")[:-1]]
images[idx, :, :] = np.array(data).reshape(*shape)
return images
def load_mnist_human_readable(path: str, img_nums: list) -> np.array:
"""
Loads a mnist image from the neurify dataset.
Args:
path:
The path to the to the folder with mnist images.
img_nums:
A list with the numbers of the images we want to load.
Returns:
The images as a Mx28x28 numpy array.
"""
return load_img(path, img_nums, (28, 28))
def load_cifar10_human_readable(path: str, img_nums: list) -> np.array:
"""
Loads the Cifar10 images in human readable format.
Args:
path:
The path to the to the folder with mnist images.
img_nums:
A list with the numbers of the images we want to load.
Returns:
The images as a Mx3x32x32 numpy array.
"""
return load_img(path, img_nums, (3, 32, 32))
def load_images_eran(img_csv: str = "../../resources/images/cifar10_test.csv", num_images: int = 100,
image_shape: tuple = (3, 32, 32)) -> tuple:
"""
Loads the images from the eran csv.
Args:
The csv path
Returns:
images, targets
"""
num_images = 100
images_array = np.zeros((num_images, np.prod(image_shape)), dtype=np.float32)
targets_array = np.zeros(num_images, dtype=int)
with open(img_csv, "r") as file:
for j in range(num_images):
line_arr = file.readline().split(",")
targets_array[j] = int(line_arr[0])
images_array[j] = [float(pixel) for pixel in line_arr[1:]]
return images_array.reshape((num_images, *image_shape)), targets_array
|
[
"numpy.array",
"numpy.prod",
"numpy.zeros"
] |
[((2132, 2163), 'numpy.zeros', 'np.zeros', (['num_images'], {'dtype': 'int'}), '(num_images, dtype=int)\n', (2140, 2163), True, 'import numpy as np\n'), ((2071, 2091), 'numpy.prod', 'np.prod', (['image_shape'], {}), '(image_shape)\n', (2078, 2091), True, 'import numpy as np\n'), ((821, 835), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (829, 835), True, 'import numpy as np\n')]
|
import numpy as np
from stumpff import C, S
from CelestialBody import BODIES
from numerical import newton, laguerre
from lagrange import calc_f, calc_fd, calc_g, calc_gd
def kepler_chi(chi, alpha, r0, vr0, mu, dt):
''' Kepler's Equation of the universal anomaly, modified
for use in numerical solvers. '''
z = alpha*chi**2
return (r0*vr0/np.sqrt(mu))*chi**2*C(z) + \
(1 - alpha*r0)*chi**3*S(z) + \
r0*chi - np.sqrt(mu)*dt
def dkepler_dchi(chi, alpha, r0, vr0, mu, dt):
''' Derivative of Kepler's Equation of the universal anomaly,
modified for use in numerical solvers. '''
z = alpha*chi**2
return (r0*vr0/np.sqrt(mu))*chi*(1 - alpha*chi**2*S(z)) + \
(1 - alpha*r0)*chi**2*C(z) + r0
def d2kepler_dchi2(chi, alpha, r0, vr0, mu, dt):
''' Second derivative of Kepler's Equation of the universal
anomaly, modified for use in numerical solvers. '''
z = alpha*chi**2
S_ = S(z)
return (r0*vr0/np.sqrt(mu))*(1 - 3*z*S_ + z*(C(z) - 3*S_)) + \
chi*(1 - z*S_)*(1 - alpha*r0)
def solve_kepler_chi(r_0, v_0, dt, body=BODIES['Earth'], method='laguerre', tol=1e-7, max_iters=100):
''' Solve Kepler's Equation of the universal anomaly chi using the specified
numerical method. Applies Algorithm 3.4 from Orbital Mechanics for Engineering
Students, 4 ed, Curtis.
:param r_0: `iterable` (km) initial position 3-vector
:param v_0: `iterable` (km/s) initial velocity 3-vector
:param dt: `float` (s) time after initial state to solve for r, v as 3-vectors
:param body: `CelestialBody` (--) the celestial body to use for orbital parameters
:param method: `str` (--) which numerical method to use to solve Kepler's Equation
:param tol: `float` (--) decimal tolerance for numerical method (default 1e-7 is IEEE 745 single precision)
:param max_iters: `int` (--) maximum number of iterations in numerical method before breaking
:return: (km) final position 3-vector, (km/s) final velocity 3-vector
'''
VALID_METHODS = ('laguerre', 'newton')
mu = body.mu # (km**3/s**2) gravitational parameter of the specified primary body
r0 = np.linalg.norm(r_0) # (km) initial position magnitude
v0 = np.linalg.norm(v_0) # (km/s) initial velocity magnitude
vr0 = np.dot(v_0, r_0)/r0 # (km/s) initial radial velocity magnitude
alpha = 2/r0 - v0**2/mu # (1/km) inverse of semi-major axis
chi0 = np.sqrt(mu)*np.abs(alpha)*dt
if method not in VALID_METHODS:
print(f'Method \'{method}\' is not valid, must be one of {VALID_METHODS}.\nDefaulting to laguerre method.')
chi, _, _ = laguerre(chi0, kepler_chi, dkepler_dchi, d2kepler_dchi2, alpha, r0, vr0, mu, dt)
elif method == 'newton':
chi, _, _ = newton(chi0, kepler_chi, dkepler_dchi, alpha, r0, vr0, mu, dt)
else: # method == 'laguerre'
chi, _, _ = laguerre(chi0, kepler_chi, dkepler_dchi, d2kepler_dchi2, alpha, r0, vr0, mu, dt)
f = calc_f(chi, r0, alpha)
g = calc_g(dt, mu, chi, alpha)
r_1 = f*r_0 + g*v_0
r1 = np.linalg.norm(r_1)
fd = calc_fd(mu, r1, r0, alpha, chi)
gd = calc_gd(chi, r1, alpha)
v_1 = fd*r_0 + gd*v_0
return r_1, v_1
def solve_kepler_E(e, Me, tol=1e-7, max_iters=100):
''' Solve Kepler's Equation in the form containing Eccentric Anomaly (E),
eccentricity (e), and Mean Anomaly of Ellipse (Me). Uses Algorithm 3.1 from Orbital
Mechanics for Engineering Students, 4 ed, Curtis. '''
# TODO: have this function make use of one of the numerical methods in numerical.py
def f(E, e, Me):
return E - e*np.sin(E) - Me
def fp(E, e):
return 1 - e*np.cos(E)
E = Me + e/2 if Me < np.pi else Me - e/2
ratio = f(E, e, Me)/fp(E, e)
iters = 0
while abs(ratio) > tol and iters < max_iters:
E -= ratio
ratio = f(E, e, Me)/fp(E, e)
iters += 1
E -= ratio
converged = np.abs(ratio) <= tol
return E, iters, converged
def test():
''' Test the functionality of solve_kepler_chi
and solve_kepler_laguerre using Problem 3.20 from
Orbital Mechanics for Engineering Students, 4 ed, Curtis.
'''
# given starting information
Earth = BODIES['Earth'] # `CelestialBody` (--) Earth and all the Earth things
r_0 = np.array([20000, -105000, -19000]) # (km) initial position vector
v_0 = np.array([0.9, -3.4, -1.5]) # (km/s) initial velocity vector
dt = 2*60*60 # (s) time of interest after initial time
# given correct answer from textbook
correct_r_1 = np.array([26338, -128750, -29656]) # (km) final position vector
correct_v_1 = np.array([0.86280, -3.2116, -1.4613]) # (km/s) final velocity vector
# solve using above methods
r_n, v_n = solve_kepler_chi(r_0, v_0, dt, Earth, method='newton')
r_l, v_l = solve_kepler_chi(r_0, v_0, dt, Earth, method='laguerre')
# check correctness
# tolerance based on significant figures of given answers
newton_valid = np.allclose(r_n, correct_r_1, atol=1) and np.allclose(v_n, correct_v_1, atol=1e-4)
laguerre_valid = np.allclose(r_l, correct_r_1, atol=1) and np.allclose(v_l, correct_v_1, atol=1e-4)
return all([newton_valid, laguerre_valid])
if __name__ == '__main__':
print(test())
|
[
"numpy.abs",
"numpy.allclose",
"numpy.sqrt",
"lagrange.calc_f",
"numerical.laguerre",
"lagrange.calc_g",
"numpy.array",
"numpy.dot",
"numerical.newton",
"lagrange.calc_fd",
"lagrange.calc_gd",
"numpy.cos",
"numpy.linalg.norm",
"numpy.sin",
"stumpff.S",
"stumpff.C"
] |
[((953, 957), 'stumpff.S', 'S', (['z'], {}), '(z)\n', (954, 957), False, 'from stumpff import C, S\n'), ((2171, 2190), 'numpy.linalg.norm', 'np.linalg.norm', (['r_0'], {}), '(r_0)\n', (2185, 2190), True, 'import numpy as np\n'), ((2235, 2254), 'numpy.linalg.norm', 'np.linalg.norm', (['v_0'], {}), '(v_0)\n', (2249, 2254), True, 'import numpy as np\n'), ((2982, 3004), 'lagrange.calc_f', 'calc_f', (['chi', 'r0', 'alpha'], {}), '(chi, r0, alpha)\n', (2988, 3004), False, 'from lagrange import calc_f, calc_fd, calc_g, calc_gd\n'), ((3013, 3039), 'lagrange.calc_g', 'calc_g', (['dt', 'mu', 'chi', 'alpha'], {}), '(dt, mu, chi, alpha)\n', (3019, 3039), False, 'from lagrange import calc_f, calc_fd, calc_g, calc_gd\n'), ((3073, 3092), 'numpy.linalg.norm', 'np.linalg.norm', (['r_1'], {}), '(r_1)\n', (3087, 3092), True, 'import numpy as np\n'), ((3103, 3134), 'lagrange.calc_fd', 'calc_fd', (['mu', 'r1', 'r0', 'alpha', 'chi'], {}), '(mu, r1, r0, alpha, chi)\n', (3110, 3134), False, 'from lagrange import calc_f, calc_fd, calc_g, calc_gd\n'), ((3144, 3167), 'lagrange.calc_gd', 'calc_gd', (['chi', 'r1', 'alpha'], {}), '(chi, r1, alpha)\n', (3151, 3167), False, 'from lagrange import calc_f, calc_fd, calc_g, calc_gd\n'), ((4309, 4343), 'numpy.array', 'np.array', (['[20000, -105000, -19000]'], {}), '([20000, -105000, -19000])\n', (4317, 4343), True, 'import numpy as np\n'), ((4386, 4413), 'numpy.array', 'np.array', (['[0.9, -3.4, -1.5]'], {}), '([0.9, -3.4, -1.5])\n', (4394, 4413), True, 'import numpy as np\n'), ((4568, 4602), 'numpy.array', 'np.array', (['[26338, -128750, -29656]'], {}), '([26338, -128750, -29656])\n', (4576, 4602), True, 'import numpy as np\n'), ((4651, 4687), 'numpy.array', 'np.array', (['[0.8628, -3.2116, -1.4613]'], {}), '([0.8628, -3.2116, -1.4613])\n', (4659, 4687), True, 'import numpy as np\n'), ((2302, 2318), 'numpy.dot', 'np.dot', (['v_0', 'r_0'], {}), '(v_0, r_0)\n', (2308, 2318), True, 'import numpy as np\n'), ((2645, 2730), 'numerical.laguerre', 'laguerre', (['chi0', 'kepler_chi', 'dkepler_dchi', 'd2kepler_dchi2', 'alpha', 'r0', 'vr0', 'mu', 'dt'], {}), '(chi0, kepler_chi, dkepler_dchi, d2kepler_dchi2, alpha, r0, vr0, mu, dt\n )\n', (2653, 2730), False, 'from numerical import newton, laguerre\n'), ((3940, 3953), 'numpy.abs', 'np.abs', (['ratio'], {}), '(ratio)\n', (3946, 3953), True, 'import numpy as np\n'), ((5002, 5039), 'numpy.allclose', 'np.allclose', (['r_n', 'correct_r_1'], {'atol': '(1)'}), '(r_n, correct_r_1, atol=1)\n', (5013, 5039), True, 'import numpy as np\n'), ((5044, 5086), 'numpy.allclose', 'np.allclose', (['v_n', 'correct_v_1'], {'atol': '(0.0001)'}), '(v_n, correct_v_1, atol=0.0001)\n', (5055, 5086), True, 'import numpy as np\n'), ((5106, 5143), 'numpy.allclose', 'np.allclose', (['r_l', 'correct_r_1'], {'atol': '(1)'}), '(r_l, correct_r_1, atol=1)\n', (5117, 5143), True, 'import numpy as np\n'), ((5148, 5190), 'numpy.allclose', 'np.allclose', (['v_l', 'correct_v_1'], {'atol': '(0.0001)'}), '(v_l, correct_v_1, atol=0.0001)\n', (5159, 5190), True, 'import numpy as np\n'), ((447, 458), 'numpy.sqrt', 'np.sqrt', (['mu'], {}), '(mu)\n', (454, 458), True, 'import numpy as np\n'), ((2443, 2454), 'numpy.sqrt', 'np.sqrt', (['mu'], {}), '(mu)\n', (2450, 2454), True, 'import numpy as np\n'), ((2455, 2468), 'numpy.abs', 'np.abs', (['alpha'], {}), '(alpha)\n', (2461, 2468), True, 'import numpy as np\n'), ((2775, 2837), 'numerical.newton', 'newton', (['chi0', 'kepler_chi', 'dkepler_dchi', 'alpha', 'r0', 'vr0', 'mu', 'dt'], {}), '(chi0, kepler_chi, dkepler_dchi, alpha, r0, vr0, mu, dt)\n', (2781, 2837), False, 'from numerical import newton, laguerre\n'), ((2892, 2977), 'numerical.laguerre', 'laguerre', (['chi0', 'kepler_chi', 'dkepler_dchi', 'd2kepler_dchi2', 'alpha', 'r0', 'vr0', 'mu', 'dt'], {}), '(chi0, kepler_chi, dkepler_dchi, d2kepler_dchi2, alpha, r0, vr0, mu, dt\n )\n', (2900, 2977), False, 'from numerical import newton, laguerre\n'), ((742, 746), 'stumpff.C', 'C', (['z'], {}), '(z)\n', (743, 746), False, 'from stumpff import C, S\n'), ((977, 988), 'numpy.sqrt', 'np.sqrt', (['mu'], {}), '(mu)\n', (984, 988), True, 'import numpy as np\n'), ((3680, 3689), 'numpy.cos', 'np.cos', (['E'], {}), '(E)\n', (3686, 3689), True, 'import numpy as np\n'), ((376, 380), 'stumpff.C', 'C', (['z'], {}), '(z)\n', (377, 380), False, 'from stumpff import C, S\n'), ((418, 422), 'stumpff.S', 'S', (['z'], {}), '(z)\n', (419, 422), False, 'from stumpff import C, S\n'), ((3625, 3634), 'numpy.sin', 'np.sin', (['E'], {}), '(E)\n', (3631, 3634), True, 'import numpy as np\n'), ((664, 675), 'numpy.sqrt', 'np.sqrt', (['mu'], {}), '(mu)\n', (671, 675), True, 'import numpy as np\n'), ((699, 703), 'stumpff.S', 'S', (['z'], {}), '(z)\n', (700, 703), False, 'from stumpff import C, S\n'), ((1007, 1011), 'stumpff.C', 'C', (['z'], {}), '(z)\n', (1008, 1011), False, 'from stumpff import C, S\n'), ((356, 367), 'numpy.sqrt', 'np.sqrt', (['mu'], {}), '(mu)\n', (363, 367), True, 'import numpy as np\n')]
|
import io
import logging
import json
import numpy
import torch
import numpy as np
from tqdm import tqdm
from clie.inputters import constant
from clie.objects import Sentence
from torch.utils.data import Dataset
from torch.utils.data.sampler import Sampler
logger = logging.getLogger(__name__)
def load_word_embeddings(file):
embeddings_index = {}
fin = io.open(file, 'r', encoding='utf-8', newline='\n', errors='ignore')
n, d = map(int, fin.readline().split())
for i, line in tqdm(enumerate(fin), total=n):
tokens = line.rstrip().split(' ')
v = numpy.array(tokens[1:], dtype=float)
embeddings_index[tokens[0]] = v
return embeddings_index
# ------------------------------------------------------------------------------
# Data loading
# ------------------------------------------------------------------------------
def load_data(filename, src_lang, tgt_lang, knn_file,
knn_size, max_examples=-1):
examples = []
wrong_subj_pos, wrong_obj_pos = 0, 0
with open(filename) as f:
data = json.load(f)
knn_dict = None
if knn_file:
with open(knn_file) as f:
knn_dict = json.load(f)
for idx, ex in enumerate(tqdm(data, total=len(data))):
sentence = Sentence(ex['id'])
sentence.language = src_lang
sentence.words = ex['token']
sentence.pos = ex['stanford_pos']
sentence.ner = ex['stanford_ner']
sentence.deprel = ex['stanford_deprel']
sentence.head = [int(x) for x in ex['stanford_head']]
sentence.subj_type = ex['subj_type']
sentence.obj_type = ex['obj_type']
sentence.relation = ex['relation']
if ex['subj_end'] - ex['subj_start'] < 0:
# we swap the start and end index
wrong_subj_pos += 1
sentence.subject = [ex['subj_end'], ex['subj_start']]
else:
sentence.subject = [ex['subj_start'], ex['subj_end']]
if ex['obj_end'] - ex['obj_start'] < 0:
# we swap the start and end index
wrong_obj_pos += 1
sentence.object = [ex['obj_end'], ex['obj_start']]
else:
sentence.object = [ex['obj_start'], ex['obj_end']]
# store KNN word info
if knn_dict:
sentence.tgt_lang = tgt_lang
knn_words = []
for w in ex['token']:
w = '!{}_{}'.format(src_lang, w)
if w in knn_dict:
assert len(knn_dict[w]) == knn_size
knn_words.append(knn_dict[w])
else:
knn_words.append([constant.UNK_WORD] * knn_size)
sentence.knn_words = knn_words
examples.append(sentence)
if max_examples != -1 and len(examples) > max_examples:
break
if wrong_subj_pos > 0 or wrong_obj_pos > 0:
logger.info('{} and {} wrong subject and object positions found!'.format(
wrong_subj_pos, wrong_obj_pos))
return examples
def vectorize(ex, model, iseval):
"""Torchify a single example."""
words = ['!{}_{}'.format(ex.language, w) for w in ex.words]
words = [model.word_dict[w] for w in words]
knn_word = None
if ex.knn_words:
knn_word = [[model.word_dict[w] for w in knn]
for knn in ex.knn_words]
knn_word = torch.LongTensor(knn_word)
word = torch.LongTensor(words)
pos = torch.LongTensor([model.pos_dict[p] for p in ex.pos])
ner = torch.LongTensor([model.ner_dict[n] for n in ex.ner])
deprel = torch.LongTensor([model.deprel_dict[d] for d in ex.deprel])
assert any([x == 0 for x in ex.head])
head = torch.LongTensor(ex.head)
subj_position = torch.LongTensor(ex.subj_position)
obj_position = torch.LongTensor(ex.obj_position)
type = [0] * len(ex.words)
ttype = model.type_dict[ex.subj_type]
start, end = ex.subject
type[start: end + 1] = [ttype] * (end - start + 1)
atype = model.type_dict[ex.obj_type]
start, end = ex.object
type[start: end + 1] = [atype] * (end - start + 1)
type = torch.LongTensor(type)
return {
'id': ex.id,
'language': ex.language,
'word': word,
'pos': pos,
'ner': ner,
'deprel': deprel,
'type': type,
'head': head,
'subject': ex.subj_text,
'object': ex.obj_text,
'subject_pos': subj_position,
'object_pos': obj_position,
'relation': model.label_dict[ex.relation],
'knn_word': knn_word
}
def batchify(batch):
"""Gather a batch of individual examples into one batch."""
# batch is a list of vectorized examples
batch_size = len(batch)
ids = [ex['id'] for ex in batch]
language = [ex['language'] for ex in batch]
use_knn = batch[0]['knn_word'] is not None
# NOTE. batch[0]['knn_word'] is a 2d list
knn_size = len(batch[0]['knn_word'][0]) if use_knn else 0
# --------- Prepare Code tensors ---------
max_len = max([ex['word'].size(0) for ex in batch])
# Batch Code Representations
len_rep = torch.LongTensor(batch_size).fill_(constant.PAD)
word_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
head_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
subject_pos_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
object_pos_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
pos_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
ner_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
deprel_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
type_rep = torch.LongTensor(batch_size, max_len).fill_(constant.PAD)
labels = torch.LongTensor(batch_size)
subject = []
object = []
knn_rep = None
if use_knn:
knn_rep = torch.LongTensor(batch_size, max_len, knn_size).fill_(constant.PAD)
for i, ex in enumerate(batch):
len_rep[i] = ex['word'].size(0)
labels[i] = ex['relation']
word_rep[i, :len_rep[i]] = ex['word']
head_rep[i, :len_rep[i]] = ex['head']
subject_pos_rep[i, :len_rep[i]] = ex['subject_pos']
object_pos_rep[i, :len_rep[i]] = ex['object_pos']
pos_rep[i, :len_rep[i]] = ex['pos']
ner_rep[i, :len_rep[i]] = ex['ner']
deprel_rep[i, :len_rep[i]] = ex['deprel']
type_rep[i, :len_rep[i]] = ex['type']
subject.append(ex['subject'])
object.append(ex['object'])
if use_knn:
knn_rep[i, :len_rep[i]] = ex['knn_word']
return {
'ids': ids,
'language': language,
'batch_size': batch_size,
'len_rep': len_rep,
'word_rep': word_rep,
'knn_rep': knn_rep,
'head_rep': head_rep,
'subject': subject,
'object': object,
'subject_pos_rep': subject_pos_rep,
'object_pos_rep': object_pos_rep,
'labels': labels,
'pos_rep': pos_rep,
'ner_rep': ner_rep,
'deprel_rep': deprel_rep,
'type_rep': type_rep
}
class ACE05Dataset(Dataset):
def __init__(self, examples, model, evaluation=False):
self.model = model
self.examples = examples
self.evaluation = evaluation
def __len__(self):
return len(self.examples)
def __getitem__(self, index):
return vectorize(self.examples[index], self.model,
iseval=self.evaluation)
def lengths(self):
return [len(ex.words) for ex in self.examples]
class SortedBatchSampler(Sampler):
def __init__(self, lengths, batch_size, shuffle=True):
self.lengths = lengths
self.batch_size = batch_size
self.shuffle = shuffle
def __iter__(self):
lengths = np.array(
[(-l, np.random.random()) for l in self.lengths],
dtype=[('l1', np.int_), ('rand', np.float_)]
)
indices = np.argsort(lengths, order=('l1', 'rand'))
batches = [indices[i:i + self.batch_size]
for i in range(0, len(indices), self.batch_size)]
if self.shuffle:
np.random.shuffle(batches)
return iter([i for batch in batches for i in batch])
def __len__(self):
return len(self.lengths)
|
[
"logging.getLogger",
"numpy.random.random",
"torch.LongTensor",
"clie.objects.Sentence",
"io.open",
"numpy.argsort",
"numpy.array",
"json.load",
"numpy.random.shuffle"
] |
[((266, 293), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (283, 293), False, 'import logging\n'), ((364, 431), 'io.open', 'io.open', (['file', '"""r"""'], {'encoding': '"""utf-8"""', 'newline': '"""\n"""', 'errors': '"""ignore"""'}), "(file, 'r', encoding='utf-8', newline='\\n', errors='ignore')\n", (371, 431), False, 'import io\n'), ((3566, 3589), 'torch.LongTensor', 'torch.LongTensor', (['words'], {}), '(words)\n', (3582, 3589), False, 'import torch\n'), ((3600, 3653), 'torch.LongTensor', 'torch.LongTensor', (['[model.pos_dict[p] for p in ex.pos]'], {}), '([model.pos_dict[p] for p in ex.pos])\n', (3616, 3653), False, 'import torch\n'), ((3664, 3717), 'torch.LongTensor', 'torch.LongTensor', (['[model.ner_dict[n] for n in ex.ner]'], {}), '([model.ner_dict[n] for n in ex.ner])\n', (3680, 3717), False, 'import torch\n'), ((3731, 3790), 'torch.LongTensor', 'torch.LongTensor', (['[model.deprel_dict[d] for d in ex.deprel]'], {}), '([model.deprel_dict[d] for d in ex.deprel])\n', (3747, 3790), False, 'import torch\n'), ((3844, 3869), 'torch.LongTensor', 'torch.LongTensor', (['ex.head'], {}), '(ex.head)\n', (3860, 3869), False, 'import torch\n'), ((3890, 3924), 'torch.LongTensor', 'torch.LongTensor', (['ex.subj_position'], {}), '(ex.subj_position)\n', (3906, 3924), False, 'import torch\n'), ((3944, 3977), 'torch.LongTensor', 'torch.LongTensor', (['ex.obj_position'], {}), '(ex.obj_position)\n', (3960, 3977), False, 'import torch\n'), ((4269, 4291), 'torch.LongTensor', 'torch.LongTensor', (['type'], {}), '(type)\n', (4285, 4291), False, 'import torch\n'), ((5928, 5956), 'torch.LongTensor', 'torch.LongTensor', (['batch_size'], {}), '(batch_size)\n', (5944, 5956), False, 'import torch\n'), ((580, 616), 'numpy.array', 'numpy.array', (['tokens[1:]'], {'dtype': 'float'}), '(tokens[1:], dtype=float)\n', (591, 616), False, 'import numpy\n'), ((1067, 1079), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1076, 1079), False, 'import json\n'), ((3527, 3553), 'torch.LongTensor', 'torch.LongTensor', (['knn_word'], {}), '(knn_word)\n', (3543, 3553), False, 'import torch\n'), ((8131, 8172), 'numpy.argsort', 'np.argsort', (['lengths'], {'order': "('l1', 'rand')"}), "(lengths, order=('l1', 'rand'))\n", (8141, 8172), True, 'import numpy as np\n'), ((1289, 1307), 'clie.objects.Sentence', 'Sentence', (["ex['id']"], {}), "(ex['id'])\n", (1297, 1307), False, 'from clie.objects import Sentence\n'), ((5269, 5297), 'torch.LongTensor', 'torch.LongTensor', (['batch_size'], {}), '(batch_size)\n', (5285, 5297), False, 'import torch\n'), ((5333, 5370), 'torch.LongTensor', 'torch.LongTensor', (['batch_size', 'max_len'], {}), '(batch_size, max_len)\n', (5349, 5370), False, 'import torch\n'), ((5406, 5443), 'torch.LongTensor', 'torch.LongTensor', (['batch_size', 'max_len'], {}), '(batch_size, max_len)\n', (5422, 5443), False, 'import torch\n'), ((5486, 5523), 'torch.LongTensor', 'torch.LongTensor', (['batch_size', 'max_len'], {}), '(batch_size, max_len)\n', (5502, 5523), False, 'import torch\n'), ((5565, 5602), 'torch.LongTensor', 'torch.LongTensor', (['batch_size', 'max_len'], {}), '(batch_size, max_len)\n', (5581, 5602), False, 'import torch\n'), ((5637, 5674), 'torch.LongTensor', 'torch.LongTensor', (['batch_size', 'max_len'], {}), '(batch_size, max_len)\n', (5653, 5674), False, 'import torch\n'), ((5709, 5746), 'torch.LongTensor', 'torch.LongTensor', (['batch_size', 'max_len'], {}), '(batch_size, max_len)\n', (5725, 5746), False, 'import torch\n'), ((5784, 5821), 'torch.LongTensor', 'torch.LongTensor', (['batch_size', 'max_len'], {}), '(batch_size, max_len)\n', (5800, 5821), False, 'import torch\n'), ((5857, 5894), 'torch.LongTensor', 'torch.LongTensor', (['batch_size', 'max_len'], {}), '(batch_size, max_len)\n', (5873, 5894), False, 'import torch\n'), ((8329, 8355), 'numpy.random.shuffle', 'np.random.shuffle', (['batches'], {}), '(batches)\n', (8346, 8355), True, 'import numpy as np\n'), ((1190, 1202), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1199, 1202), False, 'import json\n'), ((6044, 6091), 'torch.LongTensor', 'torch.LongTensor', (['batch_size', 'max_len', 'knn_size'], {}), '(batch_size, max_len, knn_size)\n', (6060, 6091), False, 'import torch\n'), ((8002, 8020), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (8018, 8020), True, 'import numpy as np\n')]
|
# coding: UTF-8
import time
import torch
import numpy as np
from train_eval import train, init_network
from importlib import import_module
import argparse
parser = argparse.ArgumentParser(description='Chinese Text Classification')
parser.add_argument('--model', type=str, required=True, help='choose a model: TextCNN')
parser.add_argument('--embedding', default='pre_trained', type=str, help='random or pre_trained')
parser.add_argument('--word', default=False, type=bool, help='True for word, False for char')
args = parser.parse_args()
if __name__ == '__main__':
dataset = 'THUCNews' # 数据集
# 搜狗新闻:embedding_SougouNews.npz, 腾讯:embedding_Tencent.npz, 随机初始化:random
# embedding = 'random'
model_name = args.model # TextCNN
from utils import build_dataset, build_iterator, get_time_dif
x = import_module('models.' + model_name)
from config import Config
config = Config(dataset)
np.random.seed(1)
torch.manual_seed(1)
torch.cuda.manual_seed_all(1)
torch.backends.cudnn.deterministic = True # 保证每次结果一样
start_time = time.time()
print("Loading data...")
vocab, train_data, dev_data, test_data = build_dataset(config, args.word)
train_iter = build_iterator(train_data, config)
dev_iter = build_iterator(dev_data, config)
test_iter = build_iterator(test_data, config)
time_dif = get_time_dif(start_time)
print("Time usage:", time_dif)
# train
config.n_vocab = len(vocab)
model = x.Model().to(config.device)
init_network(model)
print(model.parameters)
train(config, model, train_iter, dev_iter, test_iter)
|
[
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"utils.get_time_dif",
"importlib.import_module",
"argparse.ArgumentParser",
"config.Config",
"train_eval.init_network",
"train_eval.train",
"utils.build_iterator",
"utils.build_dataset",
"numpy.random.seed",
"time.time"
] |
[((165, 231), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Chinese Text Classification"""'}), "(description='Chinese Text Classification')\n", (188, 231), False, 'import argparse\n'), ((820, 857), 'importlib.import_module', 'import_module', (["('models.' + model_name)"], {}), "('models.' + model_name)\n", (833, 857), False, 'from importlib import import_module\n'), ((901, 916), 'config.Config', 'Config', (['dataset'], {}), '(dataset)\n', (907, 916), False, 'from config import Config\n'), ((921, 938), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (935, 938), True, 'import numpy as np\n'), ((943, 963), 'torch.manual_seed', 'torch.manual_seed', (['(1)'], {}), '(1)\n', (960, 963), False, 'import torch\n'), ((968, 997), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['(1)'], {}), '(1)\n', (994, 997), False, 'import torch\n'), ((1074, 1085), 'time.time', 'time.time', ([], {}), '()\n', (1083, 1085), False, 'import time\n'), ((1160, 1192), 'utils.build_dataset', 'build_dataset', (['config', 'args.word'], {}), '(config, args.word)\n', (1173, 1192), False, 'from utils import build_dataset, build_iterator, get_time_dif\n'), ((1210, 1244), 'utils.build_iterator', 'build_iterator', (['train_data', 'config'], {}), '(train_data, config)\n', (1224, 1244), False, 'from utils import build_dataset, build_iterator, get_time_dif\n'), ((1260, 1292), 'utils.build_iterator', 'build_iterator', (['dev_data', 'config'], {}), '(dev_data, config)\n', (1274, 1292), False, 'from utils import build_dataset, build_iterator, get_time_dif\n'), ((1309, 1342), 'utils.build_iterator', 'build_iterator', (['test_data', 'config'], {}), '(test_data, config)\n', (1323, 1342), False, 'from utils import build_dataset, build_iterator, get_time_dif\n'), ((1358, 1382), 'utils.get_time_dif', 'get_time_dif', (['start_time'], {}), '(start_time)\n', (1370, 1382), False, 'from utils import build_dataset, build_iterator, get_time_dif\n'), ((1507, 1526), 'train_eval.init_network', 'init_network', (['model'], {}), '(model)\n', (1519, 1526), False, 'from train_eval import train, init_network\n'), ((1559, 1612), 'train_eval.train', 'train', (['config', 'model', 'train_iter', 'dev_iter', 'test_iter'], {}), '(config, model, train_iter, dev_iter, test_iter)\n', (1564, 1612), False, 'from train_eval import train, init_network\n')]
|
"""Python interfaces to DGL farthest point sampler."""
from dgl._ffi.base import DGLError
import numpy as np
from .._ffi.function import _init_api
from .. import backend as F
from .. import ndarray as nd
def _farthest_point_sampler(data, batch_size, sample_points, dist, start_idx, result):
r"""Farthest Point Sampler
Parameters
----------
data : tensor
A tensor of shape (N, d) where N is the number of points and d is the dimension.
batch_size : int
The number of batches in the ``data``. N should be divisible by batch_size.
sample_points : int
The number of points to sample in each batch.
dist : tensor
Pre-allocated tensor of shape (N, ) for to-sample distance.
start_idx : tensor of int
Pre-allocated tensor of shape (batch_size, ) for the starting sample in each batch.
result : tensor of int
Pre-allocated tensor of shape (sample_points * batch_size, ) for the sampled index.
Returns
-------
No return value. The input variable ``result`` will be overwriten with sampled indices.
"""
assert F.shape(data)[0] >= sample_points * batch_size
assert F.shape(data)[0] % batch_size == 0
_CAPI_FarthestPointSampler(F.zerocopy_to_dgl_ndarray(data),
batch_size, sample_points,
F.zerocopy_to_dgl_ndarray(dist),
F.zerocopy_to_dgl_ndarray(start_idx),
F.zerocopy_to_dgl_ndarray(result))
def _neighbor_matching(graph_idx, num_nodes, edge_weights=None, relabel_idx=True):
"""
Description
-----------
The neighbor matching procedure of edge coarsening used in
`Metis <http://cacs.usc.edu/education/cs653/Karypis-METIS-SIAMJSC98.pdf>`__
and
`Graclus <https://www.cs.utexas.edu/users/inderjit/public_papers/multilevel_pami.pdf>`__
for homogeneous graph coarsening. This procedure keeps picking an unmarked
vertex and matching it with one its unmarked neighbors (that maximizes its
edge weight) until no match can be done.
If no edge weight is given, this procedure will randomly pick neighbor for each
vertex.
The GPU implementation is based on `A GPU Algorithm for Greedy Graph Matching
<http://www.staff.science.uu.nl/~bisse101/Articles/match12.pdf>`__
NOTE: The input graph must be bi-directed (undirected) graph. Call :obj:`dgl.to_bidirected`
if you are not sure your graph is bi-directed.
Parameters
----------
graph : HeteroGraphIndex
The input homogeneous graph.
num_nodes : int
The number of nodes in this homogeneous graph.
edge_weight : tensor, optional
The edge weight tensor holding non-negative scalar weight for each edge.
default: :obj:`None`
relabel_idx : bool, optional
If true, relabel resulting node labels to have consecutive node ids.
default: :obj:`True`
Returns
-------
a 1-D tensor
A vector with each element that indicates the cluster ID of a vertex.
"""
edge_weight_capi = nd.NULL["int64"]
if edge_weights is not None:
edge_weight_capi = F.zerocopy_to_dgl_ndarray(edge_weights)
node_label = F.full_1d(
num_nodes, -1, getattr(F, graph_idx.dtype), F.to_backend_ctx(graph_idx.ctx))
node_label_capi = F.zerocopy_to_dgl_ndarray_for_write(node_label)
_CAPI_NeighborMatching(graph_idx, edge_weight_capi, node_label_capi)
if F.reduce_sum(node_label < 0).item() != 0:
raise DGLError("Find unmatched node")
# reorder node id
# TODO: actually we can add `return_inverse` option for `unique`
# function in backend for efficiency.
if relabel_idx:
node_label_np = F.zerocopy_to_numpy(node_label)
_, node_label_np = np.unique(node_label_np, return_inverse=True)
return F.tensor(node_label_np)
else:
return node_label
_init_api('dgl.geometry', __name__)
|
[
"dgl._ffi.base.DGLError",
"numpy.unique"
] |
[((3538, 3569), 'dgl._ffi.base.DGLError', 'DGLError', (['"""Find unmatched node"""'], {}), "('Find unmatched node')\n", (3546, 3569), False, 'from dgl._ffi.base import DGLError\n'), ((3813, 3858), 'numpy.unique', 'np.unique', (['node_label_np'], {'return_inverse': '(True)'}), '(node_label_np, return_inverse=True)\n', (3822, 3858), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2021 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
""" Tests of precipitation_type utilities"""
import numpy as np
import pytest
from iris.exceptions import CoordinateNotFoundError
from improver.metadata.constants import FLOAT_DTYPE
from improver.precipitation_type.utilities import make_shower_condition_cube
from improver.synthetic_data.set_up_test_cubes import set_up_probability_cube
def set_up_test_cube(n_thresholds=1):
"""Set up a cube testing shower condition conversion"""
thresholds = np.arange(n_thresholds)
shape = [2, 2]
shape = [n_thresholds, *shape] if n_thresholds > 0 else shape
data = np.ones(shape, dtype=FLOAT_DTYPE)
cube = set_up_probability_cube(
data,
thresholds,
variable_name="texture_of_cloud_area_fraction",
threshold_units=1,
spatial_grid="equalarea",
)
return cube
def test_basic():
"""Test that with a valid input the cube is transformed into a shower
condition cube."""
cube = set_up_test_cube()
result = make_shower_condition_cube(cube)
threshold_coord = result.coord(var_name="threshold")
assert result.name() == "probability_of_shower_condition_above_threshold"
assert result.dtype == FLOAT_DTYPE
assert (result.data == cube.data).all()
assert threshold_coord.name() == "shower_condition"
assert threshold_coord.units == 1
def test_no_threshold_coord():
"""Test an exception is raised if the proxy diagnostic cube does not have
a threshold coordinate."""
cube = set_up_test_cube()
cube.remove_coord("texture_of_cloud_area_fraction")
expected = "Input has no threshold coordinate and cannot be used"
with pytest.raises(CoordinateNotFoundError, match=expected):
make_shower_condition_cube(cube)
def test_multi_valued_threshold_coord():
"""Test an exception is raised if the proxy diagnostic cube has a multi
valued threshold coordinate."""
cube = set_up_test_cube(n_thresholds=2)
expected = "Expected a single valued threshold coordinate.*"
with pytest.raises(ValueError, match=expected):
make_shower_condition_cube(cube)
|
[
"numpy.ones",
"improver.synthetic_data.set_up_test_cubes.set_up_probability_cube",
"pytest.raises",
"improver.precipitation_type.utilities.make_shower_condition_cube",
"numpy.arange"
] |
[((2118, 2141), 'numpy.arange', 'np.arange', (['n_thresholds'], {}), '(n_thresholds)\n', (2127, 2141), True, 'import numpy as np\n'), ((2238, 2271), 'numpy.ones', 'np.ones', (['shape'], {'dtype': 'FLOAT_DTYPE'}), '(shape, dtype=FLOAT_DTYPE)\n', (2245, 2271), True, 'import numpy as np\n'), ((2283, 2427), 'improver.synthetic_data.set_up_test_cubes.set_up_probability_cube', 'set_up_probability_cube', (['data', 'thresholds'], {'variable_name': '"""texture_of_cloud_area_fraction"""', 'threshold_units': '(1)', 'spatial_grid': '"""equalarea"""'}), "(data, thresholds, variable_name=\n 'texture_of_cloud_area_fraction', threshold_units=1, spatial_grid=\n 'equalarea')\n", (2306, 2427), False, 'from improver.synthetic_data.set_up_test_cubes import set_up_probability_cube\n'), ((2642, 2674), 'improver.precipitation_type.utilities.make_shower_condition_cube', 'make_shower_condition_cube', (['cube'], {}), '(cube)\n', (2668, 2674), False, 'from improver.precipitation_type.utilities import make_shower_condition_cube\n'), ((3297, 3351), 'pytest.raises', 'pytest.raises', (['CoordinateNotFoundError'], {'match': 'expected'}), '(CoordinateNotFoundError, match=expected)\n', (3310, 3351), False, 'import pytest\n'), ((3361, 3393), 'improver.precipitation_type.utilities.make_shower_condition_cube', 'make_shower_condition_cube', (['cube'], {}), '(cube)\n', (3387, 3393), False, 'from improver.precipitation_type.utilities import make_shower_condition_cube\n'), ((3669, 3710), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'expected'}), '(ValueError, match=expected)\n', (3682, 3710), False, 'import pytest\n'), ((3720, 3752), 'improver.precipitation_type.utilities.make_shower_condition_cube', 'make_shower_condition_cube', (['cube'], {}), '(cube)\n', (3746, 3752), False, 'from improver.precipitation_type.utilities import make_shower_condition_cube\n')]
|
import cv2
import torch
import yaml
import imageio
import throttle
import numpy as np
import matplotlib.pyplot as plt
from argparse import ArgumentParser
from skimage.transform import resize
from scipy.spatial import ConvexHull
from modules.generator import OcclusionAwareGenerator
from modules.keypoint_detector import KPDetector
from sync_batchnorm import DataParallelWithCallback
#from animate import normalize_kp
# command = [ffmpeg,
# '-y',
# '-f', 'rawvideo',
# '-vcodec','rawvideo',
# '-pix_fmt', 'bgr24',
# '-s', dimension,
# '-i', '-',
# '-c:v', 'libx264',
# '-pix_fmt', 'yuv420p',
# '-preset', 'ultrafast',
# '-f', 'flv',
# 'rtmp://10.10.10.80/live/mystream']
def normalize_kp(kp_source, kp_driving, kp_driving_initial, adapt_movement_scale=False,
use_relative_movement=False, use_relative_jacobian=False):
if adapt_movement_scale:
source_area = ConvexHull(kp_source['value'][0].data.cpu().numpy()).volume
driving_area = ConvexHull(kp_driving_initial['value'][0].data.cpu().numpy()).volume
adapt_movement_scale = np.sqrt(source_area) / np.sqrt(driving_area)
else:
adapt_movement_scale = 1
kp_new = {k: v for k, v in kp_driving.items()}
if use_relative_movement:
kp_value_diff = (kp_driving['value'] - kp_driving_initial['value'])
kp_value_diff *= adapt_movement_scale
kp_new['value'] = kp_value_diff + kp_source['value']
if use_relative_jacobian:
jacobian_diff = torch.matmul(kp_driving['jacobian'], torch.inverse(kp_driving_initial['jacobian']))
kp_new['jacobian'] = torch.matmul(jacobian_diff, kp_source['jacobian'])
return kp_new
def load_checkpoints(config_path, checkpoint_path, cpu=False):
with open(config_path) as f:
config = yaml.load(f)
generator = OcclusionAwareGenerator(**config['model_params']['generator_params'],
**config['model_params']['common_params'])
if not cpu:
generator.cuda()
kp_detector = KPDetector(**config['model_params']['kp_detector_params'],
**config['model_params']['common_params'])
if not cpu:
kp_detector.cuda()
if cpu:
checkpoint = torch.load(checkpoint_path, map_location=torch.device('cpu'))
else:
checkpoint = torch.load(checkpoint_path)
generator.load_state_dict(checkpoint['generator'])
kp_detector.load_state_dict(checkpoint['kp_detector'])
if not cpu:
generator = DataParallelWithCallback(generator)
kp_detector = DataParallelWithCallback(kp_detector)
generator.eval()
kp_detector.eval()
return generator, kp_detector
@throttle.wrap(1, 2)
def forward(source_image, driving_frame, kp_source, kp_driving_initial, generator, kp_detector, relative=True, adapt_scale=True, cpu=True):
kp_driving = kp_detector(driving_frame)
kp_norm = normalize_kp(
kp_source=kp_source,
kp_driving=kp_driving,
kp_driving_initial=kp_driving_initial,
use_relative_movement=relative,
use_relative_jacobian=relative,
adapt_movement_scale=adapt_scale
)
out = generator(source_image, kp_source=kp_source, kp_driving=kp_norm)
return np.transpose(out["prediction"].data.cpu().numpy(), [0, 2, 3, 1])[0]
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--config", required=True, help="path to config")
parser.add_argument("--source_image", required=True, help="path to source image")
parser.add_argument("--checkpoint", default="vox-cpk.pth.tar", help="path to checkpoint")
parser.add_argument("--relative", dest="relative", action="store_true", help="use relative or absolute keypoint coordinates")
parser.add_argument("--adapt_scale", dest="adapt_scale", action="store_true", help="adapt movement scale based on convex hull of keypoints")
parser.add_argument("--cpu", dest="cpu", action="store_true", help="CPU mode")
parser.set_defaults(relative=False)
parser.set_defaults(adapt_scale=False)
opt = parser.parse_args()
generator, kp_detector = load_checkpoints(config_path=opt.config, checkpoint_path=opt.checkpoint, cpu=opt.cpu)
source_image = imageio.imread(opt.source_image)
source_image = resize(source_image, (256, 256))[..., :3]
source = torch.tensor(source_image[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2)
if not opt.cpu:
source = source.cuda()
kp_source = kp_detector(source)
#out = cv2.VideoWriter('outpy.avi', cv2.VideoWriter_fourcc('M','J','P','G'), 30, (256, 256))
kp_driving_initial = None
camera = cv2.VideoCapture(0)
ret, frame = camera.read()
while True:
ret, frame = camera.read()
resized = resize(frame, (256, 256))[..., :3]
if not opt.cpu:
resized = resized.cuda()
# y = torch.tensor(np.array(resized))
# x = y.cpu().numpy()
# image = cv2.cvtColor(x, cv2.COLOR_BGR2RGB)
# # x = y.permute(1, 2, 0)
# plt.imshow(np.array(image))
# plt.show()
driving_resized = torch.tensor(np.array(resized)[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2)
if not kp_driving_initial:
kp_driving_initial = kp_detector(driving_resized)
fake_frame = forward(
source,
driving_resized,
kp_source,
kp_driving_initial,
generator,
kp_detector,
relative=opt.relative,
adapt_scale=opt.adapt_scale,
cpu=opt.cpu
)
cv2.imshow("frame", fake_frame)
#x = np.squeeze(driving_resized, axis=(0,))
#x = driving_resized[0].permute(1, 2, 0)
# plt_driving = driving_resized #permute(2, 3, 1)
#print(plt_driving.shape)
#plt.imshow(x)
#plt.show()
if cv2.waitKey(1) & 0xFF == ord('q'):
break
camera.release()
cv2.destroyAllWindows()
|
[
"sync_batchnorm.DataParallelWithCallback",
"numpy.sqrt",
"modules.generator.OcclusionAwareGenerator",
"argparse.ArgumentParser",
"torch.load",
"modules.keypoint_detector.KPDetector",
"throttle.wrap",
"yaml.load",
"cv2.imshow",
"numpy.array",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"torch.matmul",
"imageio.imread",
"skimage.transform.resize",
"cv2.waitKey",
"torch.inverse",
"torch.device"
] |
[((2735, 2754), 'throttle.wrap', 'throttle.wrap', (['(1)', '(2)'], {}), '(1, 2)\n', (2748, 2754), False, 'import throttle\n'), ((1863, 1980), 'modules.generator.OcclusionAwareGenerator', 'OcclusionAwareGenerator', ([], {}), "(**config['model_params']['generator_params'], **\n config['model_params']['common_params'])\n", (1886, 1980), False, 'from modules.generator import OcclusionAwareGenerator\n'), ((2076, 2182), 'modules.keypoint_detector.KPDetector', 'KPDetector', ([], {}), "(**config['model_params']['kp_detector_params'], **config[\n 'model_params']['common_params'])\n", (2086, 2182), False, 'from modules.keypoint_detector import KPDetector\n'), ((3360, 3376), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (3374, 3376), False, 'from argparse import ArgumentParser\n'), ((4216, 4248), 'imageio.imread', 'imageio.imread', (['opt.source_image'], {}), '(opt.source_image)\n', (4230, 4248), False, 'import imageio\n'), ((4614, 4633), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (4630, 4633), False, 'import cv2\n'), ((5766, 5789), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5787, 5789), False, 'import cv2\n'), ((1833, 1845), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (1842, 1845), False, 'import yaml\n'), ((2377, 2404), 'torch.load', 'torch.load', (['checkpoint_path'], {}), '(checkpoint_path)\n', (2387, 2404), False, 'import torch\n'), ((2557, 2592), 'sync_batchnorm.DataParallelWithCallback', 'DataParallelWithCallback', (['generator'], {}), '(generator)\n', (2581, 2592), False, 'from sync_batchnorm import DataParallelWithCallback\n'), ((2615, 2652), 'sync_batchnorm.DataParallelWithCallback', 'DataParallelWithCallback', (['kp_detector'], {}), '(kp_detector)\n', (2639, 2652), False, 'from sync_batchnorm import DataParallelWithCallback\n'), ((4266, 4298), 'skimage.transform.resize', 'resize', (['source_image', '(256, 256)'], {}), '(source_image, (256, 256))\n', (4272, 4298), False, 'from skimage.transform import resize\n'), ((5441, 5472), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'fake_frame'], {}), "('frame', fake_frame)\n", (5451, 5472), False, 'import cv2\n'), ((1115, 1135), 'numpy.sqrt', 'np.sqrt', (['source_area'], {}), '(source_area)\n', (1122, 1135), True, 'import numpy as np\n'), ((1138, 1159), 'numpy.sqrt', 'np.sqrt', (['driving_area'], {}), '(driving_area)\n', (1145, 1159), True, 'import numpy as np\n'), ((1649, 1699), 'torch.matmul', 'torch.matmul', (['jacobian_diff', "kp_source['jacobian']"], {}), "(jacobian_diff, kp_source['jacobian'])\n", (1661, 1699), False, 'import torch\n'), ((4723, 4748), 'skimage.transform.resize', 'resize', (['frame', '(256, 256)'], {}), '(frame, (256, 256))\n', (4729, 4748), False, 'from skimage.transform import resize\n'), ((1569, 1614), 'torch.inverse', 'torch.inverse', (["kp_driving_initial['jacobian']"], {}), "(kp_driving_initial['jacobian'])\n", (1582, 1614), False, 'import torch\n'), ((2325, 2344), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2337, 2344), False, 'import torch\n'), ((5694, 5708), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (5705, 5708), False, 'import cv2\n'), ((5046, 5063), 'numpy.array', 'np.array', (['resized'], {}), '(resized)\n', (5054, 5063), True, 'import numpy as np\n')]
|
import numpy as np
from albumentations import (Compose, HorizontalFlip, VerticalFlip, Rotate, RandomRotate90,
ShiftScaleRotate, ElasticTransform,
GridDistortion, RandomSizedCrop, RandomCrop, CenterCrop,
RandomBrightnessContrast, HueSaturationValue, IAASharpen,
RandomGamma, RandomBrightness, RandomBrightnessContrast,
GaussianBlur,CLAHE,
Cutout, CoarseDropout, GaussNoise, ChannelShuffle, ToGray, OpticalDistortion,
Normalize, OneOf, NoOp)
from albumentations.pytorch import ToTensorV2 as ToTensor
from get_config import get_config
config = get_config()
MEAN = np.array([0.485, 0.456, 0.406])
STD = np.array([0.229, 0.224, 0.225])
def get_transforms_train():
transform_train = Compose([
#Basic
RandomRotate90(p=1),
HorizontalFlip(p=0.5),
#Morphology
ShiftScaleRotate(shift_limit=0, scale_limit=(-0.2,0.2), rotate_limit=(-30,30),
interpolation=1, border_mode=0, value=(0,0,0), p=0.5),
GaussNoise(var_limit=(0,50.0), mean=0, p=0.5),
GaussianBlur(blur_limit=(3,7), p=0.5),
#Color
RandomBrightnessContrast(brightness_limit=0.35, contrast_limit=0.5,
brightness_by_max=True,p=0.5),
HueSaturationValue(hue_shift_limit=30, sat_shift_limit=30,
val_shift_limit=0, p=0.5),
CoarseDropout(max_holes=2,
max_height=config['input_resolution'][0]//4, max_width=config['input_resolution'][1]//4,
min_holes=1,
min_height=config['input_resolution'][0]//16, min_width=config['input_resolution'][1]//16,
fill_value=0, mask_fill_value=0, p=0.5),
Normalize(mean=(MEAN[0], MEAN[1], MEAN[2]),
std=(STD[0], STD[1], STD[2])),
ToTensor(),
])
return transform_train
def get_transforms_valid():
transform_valid = Compose([
Normalize(mean=(MEAN[0], MEAN[1], MEAN[2]),
std=(STD[0], STD[1], STD[2])),
ToTensor(),
] )
return transform_valid
def denormalize(z, mean=MEAN.reshape(-1,1,1), std=STD.reshape(-1,1,1)):
return std*z + mean
|
[
"albumentations.ShiftScaleRotate",
"albumentations.pytorch.ToTensorV2",
"albumentations.RandomBrightnessContrast",
"albumentations.GaussianBlur",
"albumentations.CoarseDropout",
"albumentations.GaussNoise",
"albumentations.HueSaturationValue",
"numpy.array",
"albumentations.Normalize",
"get_config.get_config",
"albumentations.RandomRotate90",
"albumentations.HorizontalFlip"
] |
[((737, 749), 'get_config.get_config', 'get_config', ([], {}), '()\n', (747, 749), False, 'from get_config import get_config\n'), ((758, 789), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (766, 789), True, 'import numpy as np\n'), ((797, 828), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (805, 828), True, 'import numpy as np\n'), ((913, 932), 'albumentations.RandomRotate90', 'RandomRotate90', ([], {'p': '(1)'}), '(p=1)\n', (927, 932), False, 'from albumentations import Compose, HorizontalFlip, VerticalFlip, Rotate, RandomRotate90, ShiftScaleRotate, ElasticTransform, GridDistortion, RandomSizedCrop, RandomCrop, CenterCrop, RandomBrightnessContrast, HueSaturationValue, IAASharpen, RandomGamma, RandomBrightness, RandomBrightnessContrast, GaussianBlur, CLAHE, Cutout, CoarseDropout, GaussNoise, ChannelShuffle, ToGray, OpticalDistortion, Normalize, OneOf, NoOp\n'), ((942, 963), 'albumentations.HorizontalFlip', 'HorizontalFlip', ([], {'p': '(0.5)'}), '(p=0.5)\n', (956, 963), False, 'from albumentations import Compose, HorizontalFlip, VerticalFlip, Rotate, RandomRotate90, ShiftScaleRotate, ElasticTransform, GridDistortion, RandomSizedCrop, RandomCrop, CenterCrop, RandomBrightnessContrast, HueSaturationValue, IAASharpen, RandomGamma, RandomBrightness, RandomBrightnessContrast, GaussianBlur, CLAHE, Cutout, CoarseDropout, GaussNoise, ChannelShuffle, ToGray, OpticalDistortion, Normalize, OneOf, NoOp\n'), ((1002, 1142), 'albumentations.ShiftScaleRotate', 'ShiftScaleRotate', ([], {'shift_limit': '(0)', 'scale_limit': '(-0.2, 0.2)', 'rotate_limit': '(-30, 30)', 'interpolation': '(1)', 'border_mode': '(0)', 'value': '(0, 0, 0)', 'p': '(0.5)'}), '(shift_limit=0, scale_limit=(-0.2, 0.2), rotate_limit=(-30,\n 30), interpolation=1, border_mode=0, value=(0, 0, 0), p=0.5)\n', (1018, 1142), False, 'from albumentations import Compose, HorizontalFlip, VerticalFlip, Rotate, RandomRotate90, ShiftScaleRotate, ElasticTransform, GridDistortion, RandomSizedCrop, RandomCrop, CenterCrop, RandomBrightnessContrast, HueSaturationValue, IAASharpen, RandomGamma, RandomBrightness, RandomBrightnessContrast, GaussianBlur, CLAHE, Cutout, CoarseDropout, GaussNoise, ChannelShuffle, ToGray, OpticalDistortion, Normalize, OneOf, NoOp\n'), ((1170, 1216), 'albumentations.GaussNoise', 'GaussNoise', ([], {'var_limit': '(0, 50.0)', 'mean': '(0)', 'p': '(0.5)'}), '(var_limit=(0, 50.0), mean=0, p=0.5)\n', (1180, 1216), False, 'from albumentations import Compose, HorizontalFlip, VerticalFlip, Rotate, RandomRotate90, ShiftScaleRotate, ElasticTransform, GridDistortion, RandomSizedCrop, RandomCrop, CenterCrop, RandomBrightnessContrast, HueSaturationValue, IAASharpen, RandomGamma, RandomBrightness, RandomBrightnessContrast, GaussianBlur, CLAHE, Cutout, CoarseDropout, GaussNoise, ChannelShuffle, ToGray, OpticalDistortion, Normalize, OneOf, NoOp\n'), ((1225, 1263), 'albumentations.GaussianBlur', 'GaussianBlur', ([], {'blur_limit': '(3, 7)', 'p': '(0.5)'}), '(blur_limit=(3, 7), p=0.5)\n', (1237, 1263), False, 'from albumentations import Compose, HorizontalFlip, VerticalFlip, Rotate, RandomRotate90, ShiftScaleRotate, ElasticTransform, GridDistortion, RandomSizedCrop, RandomCrop, CenterCrop, RandomBrightnessContrast, HueSaturationValue, IAASharpen, RandomGamma, RandomBrightness, RandomBrightnessContrast, GaussianBlur, CLAHE, Cutout, CoarseDropout, GaussNoise, ChannelShuffle, ToGray, OpticalDistortion, Normalize, OneOf, NoOp\n'), ((1296, 1398), 'albumentations.RandomBrightnessContrast', 'RandomBrightnessContrast', ([], {'brightness_limit': '(0.35)', 'contrast_limit': '(0.5)', 'brightness_by_max': '(True)', 'p': '(0.5)'}), '(brightness_limit=0.35, contrast_limit=0.5,\n brightness_by_max=True, p=0.5)\n', (1320, 1398), False, 'from albumentations import Compose, HorizontalFlip, VerticalFlip, Rotate, RandomRotate90, ShiftScaleRotate, ElasticTransform, GridDistortion, RandomSizedCrop, RandomCrop, CenterCrop, RandomBrightnessContrast, HueSaturationValue, IAASharpen, RandomGamma, RandomBrightness, RandomBrightnessContrast, GaussianBlur, CLAHE, Cutout, CoarseDropout, GaussNoise, ChannelShuffle, ToGray, OpticalDistortion, Normalize, OneOf, NoOp\n'), ((1437, 1526), 'albumentations.HueSaturationValue', 'HueSaturationValue', ([], {'hue_shift_limit': '(30)', 'sat_shift_limit': '(30)', 'val_shift_limit': '(0)', 'p': '(0.5)'}), '(hue_shift_limit=30, sat_shift_limit=30, val_shift_limit=\n 0, p=0.5)\n', (1455, 1526), False, 'from albumentations import Compose, HorizontalFlip, VerticalFlip, Rotate, RandomRotate90, ShiftScaleRotate, ElasticTransform, GridDistortion, RandomSizedCrop, RandomCrop, CenterCrop, RandomBrightnessContrast, HueSaturationValue, IAASharpen, RandomGamma, RandomBrightness, RandomBrightnessContrast, GaussianBlur, CLAHE, Cutout, CoarseDropout, GaussNoise, ChannelShuffle, ToGray, OpticalDistortion, Normalize, OneOf, NoOp\n'), ((1568, 1849), 'albumentations.CoarseDropout', 'CoarseDropout', ([], {'max_holes': '(2)', 'max_height': "(config['input_resolution'][0] // 4)", 'max_width': "(config['input_resolution'][1] // 4)", 'min_holes': '(1)', 'min_height': "(config['input_resolution'][0] // 16)", 'min_width': "(config['input_resolution'][1] // 16)", 'fill_value': '(0)', 'mask_fill_value': '(0)', 'p': '(0.5)'}), "(max_holes=2, max_height=config['input_resolution'][0] // 4,\n max_width=config['input_resolution'][1] // 4, min_holes=1, min_height=\n config['input_resolution'][0] // 16, min_width=config[\n 'input_resolution'][1] // 16, fill_value=0, mask_fill_value=0, p=0.5)\n", (1581, 1849), False, 'from albumentations import Compose, HorizontalFlip, VerticalFlip, Rotate, RandomRotate90, ShiftScaleRotate, ElasticTransform, GridDistortion, RandomSizedCrop, RandomCrop, CenterCrop, RandomBrightnessContrast, HueSaturationValue, IAASharpen, RandomGamma, RandomBrightness, RandomBrightnessContrast, GaussianBlur, CLAHE, Cutout, CoarseDropout, GaussNoise, ChannelShuffle, ToGray, OpticalDistortion, Normalize, OneOf, NoOp\n'), ((1937, 2010), 'albumentations.Normalize', 'Normalize', ([], {'mean': '(MEAN[0], MEAN[1], MEAN[2])', 'std': '(STD[0], STD[1], STD[2])'}), '(mean=(MEAN[0], MEAN[1], MEAN[2]), std=(STD[0], STD[1], STD[2]))\n', (1946, 2010), False, 'from albumentations import Compose, HorizontalFlip, VerticalFlip, Rotate, RandomRotate90, ShiftScaleRotate, ElasticTransform, GridDistortion, RandomSizedCrop, RandomCrop, CenterCrop, RandomBrightnessContrast, HueSaturationValue, IAASharpen, RandomGamma, RandomBrightness, RandomBrightnessContrast, GaussianBlur, CLAHE, Cutout, CoarseDropout, GaussNoise, ChannelShuffle, ToGray, OpticalDistortion, Normalize, OneOf, NoOp\n'), ((2039, 2049), 'albumentations.pytorch.ToTensorV2', 'ToTensor', ([], {}), '()\n', (2047, 2049), True, 'from albumentations.pytorch import ToTensorV2 as ToTensor\n'), ((2155, 2228), 'albumentations.Normalize', 'Normalize', ([], {'mean': '(MEAN[0], MEAN[1], MEAN[2])', 'std': '(STD[0], STD[1], STD[2])'}), '(mean=(MEAN[0], MEAN[1], MEAN[2]), std=(STD[0], STD[1], STD[2]))\n', (2164, 2228), False, 'from albumentations import Compose, HorizontalFlip, VerticalFlip, Rotate, RandomRotate90, ShiftScaleRotate, ElasticTransform, GridDistortion, RandomSizedCrop, RandomCrop, CenterCrop, RandomBrightnessContrast, HueSaturationValue, IAASharpen, RandomGamma, RandomBrightness, RandomBrightnessContrast, GaussianBlur, CLAHE, Cutout, CoarseDropout, GaussNoise, ChannelShuffle, ToGray, OpticalDistortion, Normalize, OneOf, NoOp\n'), ((2257, 2267), 'albumentations.pytorch.ToTensorV2', 'ToTensor', ([], {}), '()\n', (2265, 2267), True, 'from albumentations.pytorch import ToTensorV2 as ToTensor\n')]
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Defines coordinate frames and ties them to data axes.
"""
from __future__ import absolute_import, division, unicode_literals, print_function
import numpy as np
from astropy import units as u
from astropy import utils as astutil
from astropy import coordinates as coord
from astropy.extern import six
from . import utils as gwutils
__all__ = ['Frame2D', 'CelestialFrame', 'SpectralFrame', 'CompositeFrame',
'CoordinateFrame']
STANDARD_REFERENCE_FRAMES = [frame.upper() for frame in coord.builtin_frames.__all__]
STANDARD_REFERENCE_POSITION = ["GEOCENTER", "BARYCENTER", "HELIOCENTER",
"TOPOCENTER", "LSR", "LSRK", "LSRD",
"GALACTIC_CENTER", "LOCAL_GROUP_CENTER"]
class CoordinateFrame(object):
"""
Base class for CoordinateFrames.
Parameters
----------
naxes : int
Number of axes.
axes_type : str
One of ["SPATIAL", "SPECTRAL", "TIME"]
axes_order : tuple of int
A dimension in the input data that corresponds to this axis.
reference_frame : astropy.coordinates.builtin_frames
Reference frame (usually used with output_frame to convert to world coordinate objects).
reference_position : str
Reference position - one of `STANDARD_REFERENCE_POSITION`
unit : list of astropy.units.Unit
Unit for each axis.
axes_names : list
Names of the axes in this frame.
name : str
Name of this frame.
"""
def __init__(self, naxes, axes_type, axes_order, reference_frame=None,
reference_position=None, unit=None, axes_names=None,
name=None):
self._naxes = naxes
self._axes_order = tuple(axes_order)
if isinstance(axes_type, six.string_types):
self._axes_type = (axes_type,)
else:
self._axes_type = tuple(axes_type)
self._reference_frame = reference_frame
if unit is not None:
if astutil.isiterable(unit):
unit = tuple(unit)
else:
unit = (unit,)
if len(unit) != naxes:
raise ValueError("Number of units does not match number of axes.")
else:
self._unit = tuple([u.Unit(au) for au in unit])
if axes_names is not None:
if isinstance(axes_names, six.string_types):
axes_names = (axes_names,)
else:
axes_names = tuple(axes_names)
if len(axes_names) != naxes:
raise ValueError("Number of axes names does not match number of axes.")
else:
axes_names = tuple([""] * naxes)
self._axes_names = axes_names
if name is None:
self._name = self.__class__.__name__
else:
self._name = name
if reference_position is not None:
self._reference_position = reference_position
else:
self._reference_position = None
super(CoordinateFrame, self).__init__()
def __repr__(self):
fmt = '<{0}(name="{1}", unit={2}, axes_names={3}, axes_order={4}'.format(
self.__class__.__name__, self.name,
self.unit, self.axes_names, self.axes_order)
if self.reference_position is not None:
fmt += ', reference_position="{0}"'.format(self.reference_position)
if self.reference_frame is not None:
fmt += ", reference_frame={0}".format(self.reference_frame)
fmt += ")>"
return fmt
def __str__(self):
if self._name is not None:
return self._name
else:
return self.__class__.__name__
@property
def name(self):
""" A custom name of this frame."""
return self._name
@name.setter
def name(self, val):
""" A custom name of this frame."""
self._name = val
@property
def naxes(self):
""" The number of axes intheis frame."""
return self._naxes
@property
def unit(self):
"""The unit of this frame."""
return self._unit
@property
def axes_names(self):
""" Names of axes in the frame."""
return self._axes_names
@property
def axes_order(self):
""" A tuple of indices which map inputs to axes."""
return self._axes_order
@property
def reference_frame(self):
return self._reference_frame
@property
def reference_position(self):
try:
return self._reference_position
except AttributeError:
return None
def input_axes(self, start_frame=None):
"""
Computes which axes in `start_frame` contribute to each axis in the current frame.
Parameters
----------
start_frame : ~gwcs.coordinate_frames.CoordinateFrame
A frame in the WCS pipeline
The transform between start_frame and the current frame is used to compute the
mapping inputs: outputs.
"""
sep = self._separable(start_frame)
inputs = []
for ax in self.axes_order:
inputs.append(list(sep[ax].nonzero()[0]))
return inputs
@property
def axes_type(self):
""" Type of this frame : 'SPATIAL', 'SPECTRAL', 'TIME'. """
return self._axes_type
def coordinates(self, *args):
""" Create world coordinates object"""
raise NotImplementedError("Subclasses may implement this")
class CelestialFrame(CoordinateFrame):
"""
Celestial Frame Representation
Parameters
----------
axes_order : tuple of int
A dimension in the input data that corresponds to this axis.
reference_frame : astropy.coordinates.builtin_frames
A reference frame.
reference_position : str
Reference position.
unit : str or units.Unit instance or iterable of those
Units on axes.
axes_names : list
Names of the axes in this frame.
name : str
Name of this frame.
"""
def __init__(self, axes_order=None, reference_frame=None,
unit=None, axes_names=None,
name=None):
naxes = 2
if reference_frame is not None:
if reference_frame.name.upper() in STANDARD_REFERENCE_FRAMES:
_axes_names = list(reference_frame.representation_component_names.values())
if 'distance' in _axes_names:
_axes_names.remove('distance')
if axes_names is None:
axes_names = _axes_names
naxes = len(_axes_names)
_unit = list(reference_frame.representation_component_units.values())
if unit is None and _unit:
unit = _unit
if axes_order is None:
axes_order = tuple(range(naxes))
if unit is None:
unit = tuple([u.degree] * naxes)
axes_type = ['SPATIAL'] * naxes
super(CelestialFrame, self).__init__(naxes=naxes, axes_type=axes_type,
axes_order=axes_order,
reference_frame=reference_frame,
unit=unit,
axes_names=axes_names,
name=name)
def coordinates(self, *args):
"""
Create a SkyCoord object.
Parameters
----------
args : float
inputs to wcs.input_frame
"""
# Reorder axes if necesary.
try:
return coord.SkyCoord(*args, unit=self.unit, frame=self._reference_frame)
except:
raise
class SpectralFrame(CoordinateFrame):
"""
Represents Spectral Frame
Parameters
----------
axes_order : tuple or int
A dimension in the input data that corresponds to this axis.
reference_frame : astropy.coordinates.builtin_frames
Reference frame (usually used with output_frame to convert to world coordinate objects).
unit : str or units.Unit instance
Spectral unit.
axes_names : str
Spectral axis name.
name : str
Name for this frame.
"""
def __init__(self, axes_order=(0,), reference_frame=None, unit=None,
axes_names=None, name=None, reference_position=None):
super(SpectralFrame, self).__init__(naxes=1, axes_type="SPECTRAL", axes_order=axes_order,
axes_names=axes_names, reference_frame=reference_frame,
unit=unit, name=name,
reference_position=reference_position)
def coordinates(self, *args):
if np.isscalar(args):
return args * self.unit[0]
else:
return args[0] * self.unit[0]
class CompositeFrame(CoordinateFrame):
"""
Represents one or more frames.
Parameters
----------
frames : list
List of frames (TimeFrame, CelestialFrame, SpectralFrame, CoordinateFrame).
name : str
Name for this frame.
"""
def __init__(self, frames, name=None):
self._frames = frames[:]
naxes = sum([frame._naxes for frame in self._frames])
axes_type = list(range(naxes))
unit = list(range(naxes))
axes_names = list(range(naxes))
axes_order = []
for frame in frames:
axes_order.extend(frame.axes_order)
for frame in frames:
for ind, axtype, un, n in zip(frame.axes_order, frame.axes_type,
frame.unit, frame.axes_names):
axes_type[ind] = axtype
axes_names[ind] = n
unit[ind] = un
if len(np.unique(axes_order)) != len(axes_order):
raise ValueError("Incorrect numbering of axes, "
"axes_order should contain unique numbers, "
"got {}.".format(axes_order))
super(CompositeFrame, self).__init__(naxes, axes_type=axes_type,
axes_order=axes_order,
unit=unit, axes_names=axes_names,
name=name)
@property
def frames(self):
return self._frames
def __repr__(self):
return repr(self.frames)
def coordinates(self, *args):
coo = []
for frame in self.frames:
fargs = [args[i] for i in frame.axes_order]
print(frame, fargs, frame.axes_order)
coo.append(frame.coordinates(*fargs))
return coo
class Frame2D(CoordinateFrame):
"""
A 2D coordinate frame.
Parameters
----------
axes_order : tuple of int
A dimension in the input data that corresponds to this axis.
unit : list of astropy.units.Unit
Unit for each axis.
axes_names : list
Names of the axes in this frame.
name : str
Name of this frame.
"""
def __init__(self, axes_order=(0, 1), unit=(u.pix, u.pix), axes_names=('x', 'y'),
name=None):
super(Frame2D, self).__init__(2, ["SPATIAL", "SPATIAL"], axes_order, name=name,
axes_names=axes_names, unit=unit)
def coordinates(self, *args):
args = [args[i] for i in self.axes_order]
coo = tuple([arg * un for arg, un in zip(args, self.unit)])
return coo
|
[
"numpy.unique",
"numpy.isscalar",
"astropy.units.Unit",
"astropy.coordinates.SkyCoord",
"astropy.utils.isiterable"
] |
[((8880, 8897), 'numpy.isscalar', 'np.isscalar', (['args'], {}), '(args)\n', (8891, 8897), True, 'import numpy as np\n'), ((2050, 2074), 'astropy.utils.isiterable', 'astutil.isiterable', (['unit'], {}), '(unit)\n', (2068, 2074), True, 'from astropy import utils as astutil\n'), ((7717, 7783), 'astropy.coordinates.SkyCoord', 'coord.SkyCoord', (['*args'], {'unit': 'self.unit', 'frame': 'self._reference_frame'}), '(*args, unit=self.unit, frame=self._reference_frame)\n', (7731, 7783), True, 'from astropy import coordinates as coord\n'), ((9919, 9940), 'numpy.unique', 'np.unique', (['axes_order'], {}), '(axes_order)\n', (9928, 9940), True, 'import numpy as np\n'), ((2332, 2342), 'astropy.units.Unit', 'u.Unit', (['au'], {}), '(au)\n', (2338, 2342), True, 'from astropy import units as u\n')]
|
'''ResNet using PSG in PyTorch.
For Pre-activation ResNet, see 'preact_resnet.py'.
Reference:
[1] <NAME>, <NAME>, <NAME>, <NAME>
Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
from numpy.lib.arraysetops import isin
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
from models.masked_psg_seed_conv import PredictiveSeedConv2d
from masked_layers import layers
# Fixed
NUM_BITS = 32
NUM_BITS_WEIGHT = 32
NUM_BITS_GRAD = None
BIPRECISION = False
PREDICTIVE_FORWARD = False
WRITER = None
WRITER_PREFIX_COUNTER = 0
# Tunable
PREDICTIVE_BACKWARD = True
MSB_BITS = 4
MSB_BITS_WEIGHT = 4
MSB_BITS_GRAD = 8
THRESHOLD = 0.0
SPARSIFY = False
SIGN = True
def conv1x1(in_planes, out_planes, stride=1, input_signed=True, predictive_forward=True, writer_prefix=""):
"1x1 convolution with no padding"
predictive_forward = PREDICTIVE_FORWARD and predictive_forward
return PredictiveSeedConv2d(
in_planes, out_planes, kernel_size=1, stride=stride, padding=0, bias=False,
num_bits=NUM_BITS, num_bits_weight=NUM_BITS_WEIGHT, num_bits_grad=NUM_BITS_GRAD,
biprecision=BIPRECISION, input_signed=input_signed,
predictive_forward=predictive_forward, predictive_backward=PREDICTIVE_BACKWARD,
msb_bits=MSB_BITS, msb_bits_weight=MSB_BITS_WEIGHT, msb_bits_grad=MSB_BITS_GRAD,
threshold=THRESHOLD, sparsify=SPARSIFY, sign=SIGN,
writer=WRITER, writer_prefix=writer_prefix)
def conv3x3(in_planes, out_planes, stride=1, input_signed=False, predictive_forward=True, writer_prefix=""):
"3x3 convolution with padding"
predictive_forward = PREDICTIVE_FORWARD and predictive_forward
return PredictiveSeedConv2d(
in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False,
num_bits=NUM_BITS, num_bits_weight=NUM_BITS_WEIGHT, num_bits_grad=NUM_BITS_GRAD,
biprecision=BIPRECISION, input_signed=input_signed,
predictive_forward=predictive_forward, predictive_backward=PREDICTIVE_BACKWARD,
msb_bits=MSB_BITS, msb_bits_weight=MSB_BITS_WEIGHT, msb_bits_grad=MSB_BITS_GRAD,
threshold=THRESHOLD, sparsify=SPARSIFY, sign=SIGN,
writer=WRITER, writer_prefix=writer_prefix)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(in_planes, planes, stride=stride, input_signed=False, predictive_forward=False, writer_prefix=None)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride=1, input_signed=False, predictive_forward=False, writer_prefix=None)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
# nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
conv1x1(in_planes, self.expansion*planes, stride=stride, input_signed=False, predictive_forward=False, writer_prefix=None),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
# self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.conv1 = conv1x1(in_planes, planes, stride=1, input_signed=False, predictive_forward=False, writer_prefix=None)
self.bn1 = nn.BatchNorm2d(planes)
# self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.conv2 = conv3x3(planes, planes, stride=stride, input_signed=False, predictive_forward=False, writer_prefix=None)
self.bn2 = nn.BatchNorm2d(planes)
# self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.conv3 = conv1x1(planes, self.expansion*planes, stride=1, input_signed=False, predictive_forward=False, writer_prefix=None)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
# nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
conv1x1(in_planes, self.expansion*planes, stride=stride, input_signed=False, predictive_forward=False, writer_prefix=None),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, in_planes=64, num_classes=10, init_method='standard'):
super(ResNet, self).__init__()
self.in_planes = in_planes
self.conv1 = conv3x3(3, self.in_planes, stride=1, input_signed=True, predictive_forward=False, writer_prefix=None)
self.bn1 = nn.BatchNorm2d(self.in_planes)
if self.in_planes == 64:
# self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512*block.expansion, num_classes)
#self.linear = layers.Linear(512*block.expansion, num_classes)
elif self.in_planes == 16:
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.layer4 = None
self.linear = nn.Linear(64, num_classes)
self.reset_conv_parameters(init_method)
print('conv weights reset to {}'.format(init_method))
def reset_parameters(self, module, init_method="kaiming_uniform") -> None:
if init_method == "kaiming_constant_signed":
fan = nn.init._calculate_correct_fan(module.weight, "fan_in")
gain = nn.init.calculate_gain("relu")
std = gain / math.sqrt(fan)
with torch.no_grad():
module.weight.data = module.weight.data.sign() * std
elif init_method == "kaiming_constant_unsigned":
fan = nn.init._calculate_correct_fan(module.weight, "fan_in")
gain = nn.init.calculate_gain("relu")
std = gain / math.sqrt(fan)
with torch.no_grad():
module.weight.data = torch.ones_like(module.weight.data) * std
elif init_method == "kaiming_normal":
nn.init.kaiming_normal_(module.weight, mode="fan_in", nonlinearity="relu")
elif init_method == "kaiming_uniform":
nn.init.kaiming_uniform_(module.weight, mode="fan_in", nonlinearity="relu")
elif init_method == "kaiming_laplace":
fan = nn.init._calculate_correct_fan(module.weight, "fan_in")
gain = nn.init.calculate_gain("relu")
scale = gain / math.sqrt(2.0 * fan)
with torch.no_grad():
new_weight = np.random.laplace(loc=0.0, scale=scale, size=module.weight.shape)
module.weight.data = module.weight.data.new_tensor(torch.from_numpy(new_weight).clone().detach())
elif init_method == "xavier_normal":
nn.init.xavier_normal_(module.weight)
elif init_method == "xavier_constant":
fan_in, fan_out = nn.init._calculate_fan_in_and_fan_out(module.weight)
std = math.sqrt(2.0 / float(fan_in + fan_out))
with torch.no_grad():
module.weight.data = module.weight.data.sign() * std
elif init_method == "standard":
nn.init.kaiming_uniform_(module.weight, a=math.sqrt(5))
else:
raise ValueError(f"{init_method} is not an initialization option!")
def reset_conv_parameters(self, init_method="standard") -> None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
self.reset_parameters(m, init_method)
def get_bop_params(self):
bop_params = []
for m in self.modules():
if isinstance(m, nn.Conv2d):
bop_params += list(m.parameters())
return bop_params
def get_non_bop_params(self):
non_bop_params = []
for m in self.modules():
if isinstance(m, (nn.Linear, nn.BatchNorm2d,)):
non_bop_params += list(m.parameters())
return non_bop_params
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
if self.layer4 is not None:
out = self.layer4(out)
# out = F.avg_pool2d(out, 4)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def PsgSeedResNet20(
num_classes=10,
init_method='standard',
predictive_backward=True,
msb_bits=4,
msb_bits_weight=4,
msb_bits_grad=8,
threshold=0.0,
sparsify=False,
sign=True
):
global PREDICTIVE_BACKWARD, MSB_BITS, MSB_BITS_WEIGHT, MSB_BITS_GRAD, THRESHOLD, SPARSIFY, SIGN
PREDICTIVE_BACKWARD = predictive_backward
MSB_BITS = msb_bits
MSB_BITS_WEIGHT = msb_bits_weight
MSB_BITS_GRAD = msb_bits_grad
THRESHOLD = threshold
SPARSIFY = sparsify
SIGN = sign
return ResNet(BasicBlock, [3,3,3], in_planes=16, num_classes=num_classes, init_method=init_method)
def PsgSeedResNet18(
num_classes=10,
init_method='standard',
predictive_backward=True,
msb_bits=4,
msb_bits_weight=4,
msb_bits_grad=8,
threshold=0.0,
sparsify=False,
sign=True
):
global PREDICTIVE_BACKWARD, MSB_BITS, MSB_BITS_WEIGHT, MSB_BITS_GRAD, THRESHOLD, SPARSIFY, SIGN
PREDICTIVE_BACKWARD = predictive_backward
MSB_BITS = msb_bits
MSB_BITS_WEIGHT = msb_bits_weight
MSB_BITS_GRAD = msb_bits_grad
THRESHOLD = threshold
SPARSIFY = sparsify
SIGN = sign
return ResNet(BasicBlock, [2,2,2,2], num_classes=num_classes, init_method=init_method)
def PsgSeedResNet34(
num_classes=10,
init_method='standard',
predictive_backward=True,
msb_bits=4,
msb_bits_weight=4,
msb_bits_grad=8,
threshold=0.0,
sparsify=False,
sign=True
):
global PREDICTIVE_BACKWARD, MSB_BITS, MSB_BITS_WEIGHT, MSB_BITS_GRAD, THRESHOLD, SPARSIFY, SIGN
PREDICTIVE_BACKWARD = predictive_backward
MSB_BITS = msb_bits
MSB_BITS_WEIGHT = msb_bits_weight
MSB_BITS_GRAD = msb_bits_grad
THRESHOLD = threshold
SPARSIFY = sparsify
SIGN = sign
return ResNet(BasicBlock, [3,4,6,3], num_classes=num_classes, init_method=init_method)
def PsgSeedResNet50(
num_classes=10,
init_method='standard',
predictive_backward=True,
msb_bits=4,
msb_bits_weight=4,
msb_bits_grad=8,
threshold=0.0,
sparsify=False,
sign=True
):
global PREDICTIVE_BACKWARD, MSB_BITS, MSB_BITS_WEIGHT, MSB_BITS_GRAD, THRESHOLD, SPARSIFY, SIGN
PREDICTIVE_BACKWARD = predictive_backward
MSB_BITS = msb_bits
MSB_BITS_WEIGHT = msb_bits_weight
MSB_BITS_GRAD = msb_bits_grad
THRESHOLD = threshold
SPARSIFY = sparsify
SIGN = sign
return ResNet(Bottleneck, [3,4,6,3], num_classes=num_classes, init_method=init_method)
def PsgSeedResNet101(
num_classes=10,
init_method='standard',
predictive_backward=True,
msb_bits=4,
msb_bits_weight=4,
msb_bits_grad=8,
threshold=0.0,
sparsify=False,
sign=True
):
global PREDICTIVE_BACKWARD, MSB_BITS, MSB_BITS_WEIGHT, MSB_BITS_GRAD, THRESHOLD, SPARSIFY, SIGN
PREDICTIVE_BACKWARD = predictive_backward
MSB_BITS = msb_bits
MSB_BITS_WEIGHT = msb_bits_weight
MSB_BITS_GRAD = msb_bits_grad
THRESHOLD = threshold
SPARSIFY = sparsify
SIGN = sign
return ResNet(Bottleneck, [3,4,23,3], num_classes=num_classes, init_method=init_method)
def PsgSeedResNet152(
num_classes=10,
init_method='standard',
predictive_backward=True,
msb_bits=4,
msb_bits_weight=4,
msb_bits_grad=8,
threshold=0.0,
sparsify=False,
sign=True
):
global PREDICTIVE_BACKWARD, MSB_BITS, MSB_BITS_WEIGHT, MSB_BITS_GRAD, THRESHOLD, SPARSIFY, SIGN
PREDICTIVE_BACKWARD = predictive_backward
MSB_BITS = msb_bits
MSB_BITS_WEIGHT = msb_bits_weight
MSB_BITS_GRAD = msb_bits_grad
THRESHOLD = threshold
SPARSIFY = sparsify
SIGN = sign
return ResNet(Bottleneck, [3,8,36,3], num_classes=num_classes, init_method=init_method)
def test():
net = ResNet18()
y = net(torch.randn(1,3,32,32))
print(y.size())
# test()
|
[
"torch.nn.BatchNorm2d",
"torch.ones_like",
"torch.nn.Sequential",
"torch.nn.init._calculate_correct_fan",
"math.sqrt",
"models.masked_psg_seed_conv.PredictiveSeedConv2d",
"torch.nn.init.kaiming_normal_",
"torch.nn.init.kaiming_uniform_",
"torch.nn.init.xavier_normal_",
"torch.nn.init._calculate_fan_in_and_fan_out",
"torch.from_numpy",
"numpy.random.laplace",
"torch.nn.functional.relu",
"torch.nn.Linear",
"torch.nn.init.calculate_gain",
"torch.no_grad",
"torch.randn"
] |
[((952, 1468), 'models.masked_psg_seed_conv.PredictiveSeedConv2d', 'PredictiveSeedConv2d', (['in_planes', 'out_planes'], {'kernel_size': '(1)', 'stride': 'stride', 'padding': '(0)', 'bias': '(False)', 'num_bits': 'NUM_BITS', 'num_bits_weight': 'NUM_BITS_WEIGHT', 'num_bits_grad': 'NUM_BITS_GRAD', 'biprecision': 'BIPRECISION', 'input_signed': 'input_signed', 'predictive_forward': 'predictive_forward', 'predictive_backward': 'PREDICTIVE_BACKWARD', 'msb_bits': 'MSB_BITS', 'msb_bits_weight': 'MSB_BITS_WEIGHT', 'msb_bits_grad': 'MSB_BITS_GRAD', 'threshold': 'THRESHOLD', 'sparsify': 'SPARSIFY', 'sign': 'SIGN', 'writer': 'WRITER', 'writer_prefix': 'writer_prefix'}), '(in_planes, out_planes, kernel_size=1, stride=stride,\n padding=0, bias=False, num_bits=NUM_BITS, num_bits_weight=\n NUM_BITS_WEIGHT, num_bits_grad=NUM_BITS_GRAD, biprecision=BIPRECISION,\n input_signed=input_signed, predictive_forward=predictive_forward,\n predictive_backward=PREDICTIVE_BACKWARD, msb_bits=MSB_BITS,\n msb_bits_weight=MSB_BITS_WEIGHT, msb_bits_grad=MSB_BITS_GRAD, threshold\n =THRESHOLD, sparsify=SPARSIFY, sign=SIGN, writer=WRITER, writer_prefix=\n writer_prefix)\n', (972, 1468), False, 'from models.masked_psg_seed_conv import PredictiveSeedConv2d\n'), ((1719, 2235), 'models.masked_psg_seed_conv.PredictiveSeedConv2d', 'PredictiveSeedConv2d', (['in_planes', 'out_planes'], {'kernel_size': '(3)', 'stride': 'stride', 'padding': '(1)', 'bias': '(False)', 'num_bits': 'NUM_BITS', 'num_bits_weight': 'NUM_BITS_WEIGHT', 'num_bits_grad': 'NUM_BITS_GRAD', 'biprecision': 'BIPRECISION', 'input_signed': 'input_signed', 'predictive_forward': 'predictive_forward', 'predictive_backward': 'PREDICTIVE_BACKWARD', 'msb_bits': 'MSB_BITS', 'msb_bits_weight': 'MSB_BITS_WEIGHT', 'msb_bits_grad': 'MSB_BITS_GRAD', 'threshold': 'THRESHOLD', 'sparsify': 'SPARSIFY', 'sign': 'SIGN', 'writer': 'WRITER', 'writer_prefix': 'writer_prefix'}), '(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False, num_bits=NUM_BITS, num_bits_weight=\n NUM_BITS_WEIGHT, num_bits_grad=NUM_BITS_GRAD, biprecision=BIPRECISION,\n input_signed=input_signed, predictive_forward=predictive_forward,\n predictive_backward=PREDICTIVE_BACKWARD, msb_bits=MSB_BITS,\n msb_bits_weight=MSB_BITS_WEIGHT, msb_bits_grad=MSB_BITS_GRAD, threshold\n =THRESHOLD, sparsify=SPARSIFY, sign=SIGN, writer=WRITER, writer_prefix=\n writer_prefix)\n', (1739, 2235), False, 'from models.masked_psg_seed_conv import PredictiveSeedConv2d\n'), ((2556, 2578), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {}), '(planes)\n', (2570, 2578), True, 'import torch.nn as nn\n'), ((2719, 2741), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {}), '(planes)\n', (2733, 2741), True, 'import torch.nn as nn\n'), ((2767, 2782), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (2780, 2782), True, 'import torch.nn as nn\n'), ((3360, 3371), 'torch.nn.functional.relu', 'F.relu', (['out'], {}), '(out)\n', (3366, 3371), True, 'import torch.nn.functional as F\n'), ((3759, 3781), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {}), '(planes)\n', (3773, 3781), True, 'import torch.nn as nn\n'), ((4029, 4051), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['planes'], {}), '(planes)\n', (4043, 4051), True, 'import torch.nn as nn\n'), ((4298, 4337), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(self.expansion * planes)'], {}), '(self.expansion * planes)\n', (4312, 4337), True, 'import torch.nn as nn\n'), ((4361, 4376), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (4374, 4376), True, 'import torch.nn as nn\n'), ((5002, 5013), 'torch.nn.functional.relu', 'F.relu', (['out'], {}), '(out)\n', (5008, 5013), True, 'import torch.nn.functional as F\n'), ((5374, 5404), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['self.in_planes'], {}), '(self.in_planes)\n', (5388, 5404), True, 'import torch.nn as nn\n'), ((9456, 9478), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (9469, 9478), True, 'import torch.nn as nn\n'), ((13644, 13669), 'torch.randn', 'torch.randn', (['(1)', '(3)', '(32)', '(32)'], {}), '(1, 3, 32, 32)\n', (13655, 13669), False, 'import torch\n'), ((5876, 5921), 'torch.nn.Linear', 'nn.Linear', (['(512 * block.expansion)', 'num_classes'], {}), '(512 * block.expansion, num_classes)\n', (5885, 5921), True, 'import torch.nn as nn\n'), ((6613, 6668), 'torch.nn.init._calculate_correct_fan', 'nn.init._calculate_correct_fan', (['module.weight', '"""fan_in"""'], {}), "(module.weight, 'fan_in')\n", (6643, 6668), True, 'import torch.nn as nn\n'), ((6688, 6718), 'torch.nn.init.calculate_gain', 'nn.init.calculate_gain', (['"""relu"""'], {}), "('relu')\n", (6710, 6718), True, 'import torch.nn as nn\n'), ((3149, 3188), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(self.expansion * planes)'], {}), '(self.expansion * planes)\n', (3163, 3188), True, 'import torch.nn as nn\n'), ((4743, 4782), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(self.expansion * planes)'], {}), '(self.expansion * planes)\n', (4757, 4782), True, 'import torch.nn as nn\n'), ((6324, 6350), 'torch.nn.Linear', 'nn.Linear', (['(64)', 'num_classes'], {}), '(64, num_classes)\n', (6333, 6350), True, 'import torch.nn as nn\n'), ((6744, 6758), 'math.sqrt', 'math.sqrt', (['fan'], {}), '(fan)\n', (6753, 6758), False, 'import math\n'), ((6776, 6791), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6789, 6791), False, 'import torch\n'), ((6937, 6992), 'torch.nn.init._calculate_correct_fan', 'nn.init._calculate_correct_fan', (['module.weight', '"""fan_in"""'], {}), "(module.weight, 'fan_in')\n", (6967, 6992), True, 'import torch.nn as nn\n'), ((7012, 7042), 'torch.nn.init.calculate_gain', 'nn.init.calculate_gain', (['"""relu"""'], {}), "('relu')\n", (7034, 7042), True, 'import torch.nn as nn\n'), ((7068, 7082), 'math.sqrt', 'math.sqrt', (['fan'], {}), '(fan)\n', (7077, 7082), False, 'import math\n'), ((7100, 7115), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7113, 7115), False, 'import torch\n'), ((7254, 7328), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['module.weight'], {'mode': '"""fan_in"""', 'nonlinearity': '"""relu"""'}), "(module.weight, mode='fan_in', nonlinearity='relu')\n", (7277, 7328), True, 'import torch.nn as nn\n'), ((7154, 7189), 'torch.ones_like', 'torch.ones_like', (['module.weight.data'], {}), '(module.weight.data)\n', (7169, 7189), False, 'import torch\n'), ((7388, 7463), 'torch.nn.init.kaiming_uniform_', 'nn.init.kaiming_uniform_', (['module.weight'], {'mode': '"""fan_in"""', 'nonlinearity': '"""relu"""'}), "(module.weight, mode='fan_in', nonlinearity='relu')\n", (7412, 7463), True, 'import torch.nn as nn\n'), ((7529, 7584), 'torch.nn.init._calculate_correct_fan', 'nn.init._calculate_correct_fan', (['module.weight', '"""fan_in"""'], {}), "(module.weight, 'fan_in')\n", (7559, 7584), True, 'import torch.nn as nn\n'), ((7604, 7634), 'torch.nn.init.calculate_gain', 'nn.init.calculate_gain', (['"""relu"""'], {}), "('relu')\n", (7626, 7634), True, 'import torch.nn as nn\n'), ((7662, 7682), 'math.sqrt', 'math.sqrt', (['(2.0 * fan)'], {}), '(2.0 * fan)\n', (7671, 7682), False, 'import math\n'), ((7700, 7715), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7713, 7715), False, 'import torch\n'), ((7746, 7811), 'numpy.random.laplace', 'np.random.laplace', ([], {'loc': '(0.0)', 'scale': 'scale', 'size': 'module.weight.shape'}), '(loc=0.0, scale=scale, size=module.weight.shape)\n', (7763, 7811), True, 'import numpy as np\n'), ((7983, 8020), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['module.weight'], {}), '(module.weight)\n', (8005, 8020), True, 'import torch.nn as nn\n'), ((8098, 8150), 'torch.nn.init._calculate_fan_in_and_fan_out', 'nn.init._calculate_fan_in_and_fan_out', (['module.weight'], {}), '(module.weight)\n', (8135, 8150), True, 'import torch.nn as nn\n'), ((8227, 8242), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8240, 8242), False, 'import torch\n'), ((8407, 8419), 'math.sqrt', 'math.sqrt', (['(5)'], {}), '(5)\n', (8416, 8419), False, 'import math\n'), ((7879, 7907), 'torch.from_numpy', 'torch.from_numpy', (['new_weight'], {}), '(new_weight)\n', (7895, 7907), False, 'import torch\n')]
|
#!/usr/bin/env python3
import tensorflow as tf
physical_devices = tf.config.list_physical_devices('GPU')
try:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
except:
# Invalid device or cannot modify virtual devices once initialized.
pass
import numpy as np
import os, time, csv
import tqdm
import umap
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import datetime
import signal
import net
from matplotlib import rcParams
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Hiragino Maru Gothic Pro', 'Yu Gothic', 'Meirio', 'Takao', 'IPAexGothic', 'IPAPGothic', 'Noto Sans CJK JP']
import net
class SimpleEncodeDecoder:
def __init__(self):
self.save_dir = './result/step1/'
self.result_dir = './result/plot/'
os.makedirs(self.result_dir, exist_ok=True)
checkpoint_dir = self.save_dir
self.max_epoch = 300
self.steps_per_epoch = 1000
self.batch_size = 64
lr = tf.keras.optimizers.schedules.ExponentialDecay(1e-3, 1e5, 0.5)
self.optimizer = tf.keras.optimizers.Adam(lr)
self.encoder = net.FeatureBlock()
self.encoder.summary()
self.decoder = net.SimpleDecoderBlock()
self.decoder.summary()
inputs = {
'image': tf.keras.Input(shape=(128,128,3)),
}
feature_out = self.encoder(inputs)
outputs = self.decoder(feature_out)
self.model = tf.keras.Model(inputs, outputs, name='SimpleEncodeDecoder')
checkpoint = tf.train.Checkpoint(optimizer=self.optimizer,
model=self.model)
last = tf.train.latest_checkpoint(checkpoint_dir)
checkpoint.restore(last)
self.manager = tf.train.CheckpointManager(
checkpoint, directory=checkpoint_dir, max_to_keep=2)
if not last is None:
self.init_epoch = int(os.path.basename(last).split('-')[1])
print('loaded %d epoch'%self.init_epoch)
else:
self.init_epoch = 0
self.model.summary()
def eval(self):
self.data = net.FontData()
print("Plot: ", self.init_epoch + 1)
acc = self.make_plot(self.data.test_data(self.batch_size), (self.init_epoch + 1))
print('acc', acc)
@tf.function
def eval_substep(self, inputs):
input_data = {
'image': inputs['input'],
}
feature = self.encoder(input_data)
outputs = self.decoder(feature)
target_id = inputs['index']
target_id1 = inputs['idx1']
target_id2 = inputs['idx2']
pred_id1 = tf.nn.softmax(outputs['id1'], -1)
pred_id2 = tf.nn.softmax(outputs['id2'], -1)
return {
'feature': feature,
'pred_id1': pred_id1,
'pred_id2': pred_id2,
'target_id': target_id,
'target_id1': target_id1,
'target_id2': target_id2,
}
def make_plot(self, test_ds, epoch):
result = []
labels = []
with open(os.path.join(self.result_dir,'test_result-%d.txt'%epoch),'w') as txt:
correct_count = 0
failed_count = 0
with tqdm.tqdm(total=len(self.data.test_keys)) as pbar:
for inputs in test_ds:
pred = self.eval_substep(inputs)
result += [pred['feature']]
labels += [pred['target_id']]
for i in range(pred['target_id1'].shape[0]):
txt.write('---\n')
target = pred['target_id'][i].numpy()
txt.write('target: id %d = %s\n'%(target, self.data.glyphs[target-1]))
predid1 = np.argmax(pred['pred_id1'][i])
predid2 = np.argmax(pred['pred_id2'][i])
predid = predid1 * 100 + predid2
if predid == 0:
txt.write('predict: id %d nothing (p=%f)\n'%(predid, pred['pred_id1'][i][predid1] * pred['pred_id2'][i][predid2]))
elif predid > self.data.id_count + 1:
txt.write('predict: id %d nothing (p=%f)\n'%(predid, pred['pred_id1'][i][predid1] * pred['pred_id2'][i][predid2]))
else:
txt.write('predict: id %d = %s (p=%f)\n'%(predid, self.data.glyphs[predid-1], pred['pred_id1'][i][predid1] * pred['pred_id2'][i][predid2]))
if target == predid:
txt.write('Correct!\n')
correct_count += 1
else:
txt.write('Failed!\n')
failed_count += 1
pbar.update(1)
acc = correct_count / (correct_count + failed_count)
txt.write('==============\n')
txt.write('Correct = %d\n'%correct_count)
txt.write('Failed = %d\n'%failed_count)
txt.write('accuracy = %f\n'%acc)
result = np.concatenate(result)
labels = np.concatenate(labels)
print('run UMAP')
X_reduced = umap.UMAP(metric='cosine').fit_transform(result)
fig, ax = plt.subplots(figsize=(50, 50))
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], c=labels, cmap=plt.get_cmap('hsv'))
print('plot UMAP')
for i, label in enumerate(labels):
ax.annotate(self.data.glyphs[label-1], (X_reduced[i,0], X_reduced[i,1]))
plt.savefig(os.path.join(self.result_dir,'test_result-%d.png'%epoch), dpi=300)
plt.close('all')
return acc
def eval():
encoder = SimpleEncodeDecoder()
encoder.eval()
if __name__ == '__main__':
eval()
|
[
"tensorflow.train.Checkpoint",
"tensorflow.config.list_physical_devices",
"tensorflow.nn.softmax",
"umap.UMAP",
"net.FeatureBlock",
"matplotlib.pyplot.close",
"net.FontData",
"net.SimpleDecoderBlock",
"numpy.concatenate",
"tensorflow.train.CheckpointManager",
"matplotlib.use",
"numpy.argmax",
"tensorflow.keras.Input",
"tensorflow.train.latest_checkpoint",
"matplotlib.pyplot.get_cmap",
"os.makedirs",
"tensorflow.config.experimental.set_memory_growth",
"tensorflow.keras.optimizers.schedules.ExponentialDecay",
"os.path.join",
"tensorflow.keras.optimizers.Adam",
"os.path.basename",
"tensorflow.keras.Model",
"matplotlib.pyplot.subplots"
] |
[((67, 105), 'tensorflow.config.list_physical_devices', 'tf.config.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (98, 105), True, 'import tensorflow as tf\n'), ((355, 376), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (369, 376), False, 'import matplotlib\n'), ((115, 182), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['physical_devices[0]', '(True)'], {}), '(physical_devices[0], True)\n', (155, 182), True, 'import tensorflow as tf\n'), ((820, 863), 'os.makedirs', 'os.makedirs', (['self.result_dir'], {'exist_ok': '(True)'}), '(self.result_dir, exist_ok=True)\n', (831, 863), False, 'import os, time, csv\n'), ((1012, 1080), 'tensorflow.keras.optimizers.schedules.ExponentialDecay', 'tf.keras.optimizers.schedules.ExponentialDecay', (['(0.001)', '(100000.0)', '(0.5)'], {}), '(0.001, 100000.0, 0.5)\n', (1058, 1080), True, 'import tensorflow as tf\n'), ((1100, 1128), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['lr'], {}), '(lr)\n', (1124, 1128), True, 'import tensorflow as tf\n'), ((1153, 1171), 'net.FeatureBlock', 'net.FeatureBlock', ([], {}), '()\n', (1169, 1171), False, 'import net\n'), ((1226, 1250), 'net.SimpleDecoderBlock', 'net.SimpleDecoderBlock', ([], {}), '()\n', (1248, 1250), False, 'import net\n'), ((1475, 1534), 'tensorflow.keras.Model', 'tf.keras.Model', (['inputs', 'outputs'], {'name': '"""SimpleEncodeDecoder"""'}), "(inputs, outputs, name='SimpleEncodeDecoder')\n", (1489, 1534), True, 'import tensorflow as tf\n'), ((1556, 1619), 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {'optimizer': 'self.optimizer', 'model': 'self.model'}), '(optimizer=self.optimizer, model=self.model)\n', (1575, 1619), True, 'import tensorflow as tf\n'), ((1664, 1706), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (1690, 1706), True, 'import tensorflow as tf\n'), ((1763, 1842), 'tensorflow.train.CheckpointManager', 'tf.train.CheckpointManager', (['checkpoint'], {'directory': 'checkpoint_dir', 'max_to_keep': '(2)'}), '(checkpoint, directory=checkpoint_dir, max_to_keep=2)\n', (1789, 1842), True, 'import tensorflow as tf\n'), ((2131, 2145), 'net.FontData', 'net.FontData', ([], {}), '()\n', (2143, 2145), False, 'import net\n'), ((2642, 2675), 'tensorflow.nn.softmax', 'tf.nn.softmax', (["outputs['id1']", '(-1)'], {}), "(outputs['id1'], -1)\n", (2655, 2675), True, 'import tensorflow as tf\n'), ((2695, 2728), 'tensorflow.nn.softmax', 'tf.nn.softmax', (["outputs['id2']", '(-1)'], {}), "(outputs['id2'], -1)\n", (2708, 2728), True, 'import tensorflow as tf\n'), ((5079, 5101), 'numpy.concatenate', 'np.concatenate', (['result'], {}), '(result)\n', (5093, 5101), True, 'import numpy as np\n'), ((5119, 5141), 'numpy.concatenate', 'np.concatenate', (['labels'], {}), '(labels)\n', (5133, 5141), True, 'import numpy as np\n'), ((5256, 5286), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(50, 50)'}), '(figsize=(50, 50))\n', (5268, 5286), True, 'import matplotlib.pyplot as plt\n'), ((5636, 5652), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (5645, 5652), True, 'import matplotlib.pyplot as plt\n'), ((1322, 1357), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(128, 128, 3)'}), '(shape=(128, 128, 3))\n', (1336, 1357), True, 'import tensorflow as tf\n'), ((5561, 5620), 'os.path.join', 'os.path.join', (['self.result_dir', "('test_result-%d.png' % epoch)"], {}), "(self.result_dir, 'test_result-%d.png' % epoch)\n", (5573, 5620), False, 'import os, time, csv\n'), ((3068, 3127), 'os.path.join', 'os.path.join', (['self.result_dir', "('test_result-%d.txt' % epoch)"], {}), "(self.result_dir, 'test_result-%d.txt' % epoch)\n", (3080, 3127), False, 'import os, time, csv\n'), ((5189, 5215), 'umap.UMAP', 'umap.UMAP', ([], {'metric': '"""cosine"""'}), "(metric='cosine')\n", (5198, 5215), False, 'import umap\n'), ((5355, 5374), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""hsv"""'], {}), "('hsv')\n", (5367, 5374), True, 'import matplotlib.pyplot as plt\n'), ((3754, 3784), 'numpy.argmax', 'np.argmax', (["pred['pred_id1'][i]"], {}), "(pred['pred_id1'][i])\n", (3763, 3784), True, 'import numpy as np\n'), ((3819, 3849), 'numpy.argmax', 'np.argmax', (["pred['pred_id2'][i]"], {}), "(pred['pred_id2'][i])\n", (3828, 3849), True, 'import numpy as np\n'), ((1923, 1945), 'os.path.basename', 'os.path.basename', (['last'], {}), '(last)\n', (1939, 1945), False, 'import os, time, csv\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
try:
import tiledb
except ImportError: # pragma: no cover
tiledb = None
from ...lib.sparse import SparseNDArray
from ...lib.sparse.core import sps
from ..expressions import datastore
from .utils import get_tiledb_ctx
def _store_tiledb(ctx, chunk):
tiledb_ctx = get_tiledb_ctx(chunk.op.tiledb_config)
uri = chunk.op.tiledb_uri
key = chunk.op.tiledb_key
timestamp = chunk.op.tiledb_timestamp
axis_offsets = chunk.op.axis_offsets
if not chunk.issparse():
# dense
to_store = np.ascontiguousarray(ctx[chunk.op.input.key])
slcs = []
for axis in range(chunk.ndim):
axis_offset = axis_offsets[axis]
axis_length = chunk.op.input.shape[axis]
slcs.append(slice(axis_offset, axis_offset + axis_length))
with tiledb.DenseArray(tiledb_ctx, uri, mode='w',
key=key, timestamp=timestamp) as arr:
arr[tuple(slcs)] = to_store
ctx[chunk.key] = np.empty((0,) * chunk.ndim, dtype=chunk.dtype)
else:
# sparse
to_store = ctx[chunk.op.input.key].spmatrix.tocoo()
if to_store.nnz > 0:
with tiledb.SparseArray(tiledb_ctx, uri, mode='w',
key=key, timestamp=timestamp) as arr:
if chunk.ndim == 1:
vec = to_store.col if to_store.shape[0] == 1 else to_store.row
vec += axis_offsets[0]
arr[vec] = to_store.data
else:
i, j = to_store.row + axis_offsets[0], to_store.col + axis_offsets[1]
arr[i, j] = to_store.data
ctx[chunk.key] = SparseNDArray(sps.csr_matrix((0, 0), dtype=chunk.dtype),
shape=chunk.shape)
def register_data_store_handler():
from ...executor import register
register(datastore.TensorTileDBDataStore, _store_tiledb)
|
[
"tiledb.SparseArray",
"tiledb.DenseArray",
"numpy.empty",
"numpy.ascontiguousarray"
] |
[((1186, 1231), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['ctx[chunk.op.input.key]'], {}), '(ctx[chunk.op.input.key])\n', (1206, 1231), True, 'import numpy as np\n'), ((1650, 1696), 'numpy.empty', 'np.empty', (['((0,) * chunk.ndim)'], {'dtype': 'chunk.dtype'}), '((0,) * chunk.ndim, dtype=chunk.dtype)\n', (1658, 1696), True, 'import numpy as np\n'), ((1471, 1545), 'tiledb.DenseArray', 'tiledb.DenseArray', (['tiledb_ctx', 'uri'], {'mode': '"""w"""', 'key': 'key', 'timestamp': 'timestamp'}), "(tiledb_ctx, uri, mode='w', key=key, timestamp=timestamp)\n", (1488, 1545), False, 'import tiledb\n'), ((1830, 1905), 'tiledb.SparseArray', 'tiledb.SparseArray', (['tiledb_ctx', 'uri'], {'mode': '"""w"""', 'key': 'key', 'timestamp': 'timestamp'}), "(tiledb_ctx, uri, mode='w', key=key, timestamp=timestamp)\n", (1848, 1905), False, 'import tiledb\n')]
|
"""Routines for numerical differentiation."""
from __future__ import division
import numpy as np
from numpy.linalg import norm
from scipy.sparse.linalg import LinearOperator
from ..sparse import issparse, csc_matrix, csr_matrix, coo_matrix, find
from ._group_columns import group_dense, group_sparse
EPS = np.finfo(np.float64).eps
def _adjust_scheme_to_bounds(x0, h, num_steps, scheme, lb, ub):
"""Adjust final difference scheme to the presence of bounds.
Parameters
----------
x0 : ndarray, shape (n,)
Point at which we wish to estimate derivative.
h : ndarray, shape (n,)
Desired finite difference steps.
num_steps : int
Number of `h` steps in one direction required to implement finite
difference scheme. For example, 2 means that we need to evaluate
f(x0 + 2 * h) or f(x0 - 2 * h)
scheme : {'1-sided', '2-sided'}
Whether steps in one or both directions are required. In other
words '1-sided' applies to forward and backward schemes, '2-sided'
applies to center schemes.
lb : ndarray, shape (n,)
Lower bounds on independent variables.
ub : ndarray, shape (n,)
Upper bounds on independent variables.
Returns
-------
h_adjusted : ndarray, shape (n,)
Adjusted step sizes. Step size decreases only if a sign flip or
switching to one-sided scheme doesn't allow to take a full step.
use_one_sided : ndarray of bool, shape (n,)
Whether to switch to one-sided scheme. Informative only for
``scheme='2-sided'``.
"""
if scheme == '1-sided':
use_one_sided = np.ones_like(h, dtype=bool)
elif scheme == '2-sided':
h = np.abs(h)
use_one_sided = np.zeros_like(h, dtype=bool)
else:
raise ValueError("`scheme` must be '1-sided' or '2-sided'.")
if np.all((lb == -np.inf) & (ub == np.inf)):
return h, use_one_sided
h_total = h * num_steps
h_adjusted = h.copy()
lower_dist = x0 - lb
upper_dist = ub - x0
if scheme == '1-sided':
x = x0 + h_total
violated = (x < lb) | (x > ub)
fitting = np.abs(h_total) <= np.maximum(lower_dist, upper_dist)
h_adjusted[violated & fitting] *= -1
forward = (upper_dist >= lower_dist) & ~fitting
h_adjusted[forward] = upper_dist[forward] / num_steps
backward = (upper_dist < lower_dist) & ~fitting
h_adjusted[backward] = -lower_dist[backward] / num_steps
elif scheme == '2-sided':
central = (lower_dist >= h_total) & (upper_dist >= h_total)
forward = (upper_dist >= lower_dist) & ~central
h_adjusted[forward] = np.minimum(
h[forward], 0.5 * upper_dist[forward] / num_steps)
use_one_sided[forward] = True
backward = (upper_dist < lower_dist) & ~central
h_adjusted[backward] = -np.minimum(
h[backward], 0.5 * lower_dist[backward] / num_steps)
use_one_sided[backward] = True
min_dist = np.minimum(upper_dist, lower_dist) / num_steps
adjusted_central = (~central & (np.abs(h_adjusted) <= min_dist))
h_adjusted[adjusted_central] = min_dist[adjusted_central]
use_one_sided[adjusted_central] = False
return h_adjusted, use_one_sided
relative_step = {"2-point": EPS**0.5,
"3-point": EPS**(1/3),
"cs": EPS**0.5}
def _compute_absolute_step(rel_step, x0, method):
if rel_step is None:
rel_step = relative_step[method]
sign_x0 = (x0 >= 0).astype(float) * 2 - 1
return rel_step * sign_x0 * np.maximum(1.0, np.abs(x0))
def _prepare_bounds(bounds, x0):
lb, ub = [np.asarray(b, dtype=float) for b in bounds]
if lb.ndim == 0:
lb = np.resize(lb, x0.shape)
if ub.ndim == 0:
ub = np.resize(ub, x0.shape)
return lb, ub
def group_columns(A, order=0):
"""Group columns of a 2-D matrix for sparse finite differencing [1]_.
Two columns are in the same group if in each row at least one of them
has zero. A greedy sequential algorithm is used to construct groups.
Parameters
----------
A : array_like or sparse matrix, shape (m, n)
Matrix of which to group columns.
order : int, iterable of int with shape (n,) or None
Permutation array which defines the order of columns enumeration.
If int or None, a random permutation is used with `order` used as
a random seed. Default is 0, that is use a random permutation but
guarantee repeatability.
Returns
-------
groups : ndarray of int, shape (n,)
Contains values from 0 to n_groups-1, where n_groups is the number
of found groups. Each value ``groups[i]`` is an index of a group to
which ith column assigned. The procedure was helpful only if
n_groups is significantly less than n.
References
----------
.. [1] <NAME>, <NAME>, and <NAME>, "On the estimation of
sparse Jacobian matrices", Journal of the Institute of Mathematics
and its Applications, 13 (1974), pp. 117-120.
"""
if issparse(A):
A = csc_matrix(A)
else:
A = np.atleast_2d(A)
A = (A != 0).astype(np.int32)
if A.ndim != 2:
raise ValueError("`A` must be 2-dimensional.")
m, n = A.shape
if order is None or np.isscalar(order):
rng = np.random.RandomState(order)
order = rng.permutation(n)
else:
order = np.asarray(order)
if order.shape != (n,):
raise ValueError("`order` has incorrect shape.")
A = A[:, order]
if issparse(A):
groups = group_sparse(m, n, A.indices, A.indptr)
else:
groups = group_dense(m, n, A)
groups[order] = groups.copy()
return groups
def approx_derivative(fun, x0, method='3-point', rel_step=None, f0=None,
bounds=(-np.inf, np.inf), sparsity=None,
as_linear_operator=False, args=(), kwargs={}):
"""Compute finite difference approximation of the derivatives of a
vector-valued function.
If a function maps from R^n to R^m, its derivatives form m-by-n matrix
called the Jacobian, where an element (i, j) is a partial derivative of
f[i] with respect to x[j].
Parameters
----------
fun : callable
Function of which to estimate the derivatives. The argument x
passed to this function is ndarray of shape (n,) (never a scalar
even if n=1). It must return 1-D array_like of shape (m,) or a scalar.
x0 : array_like of shape (n,) or float
Point at which to estimate the derivatives. Float will be converted
to a 1-D array.
method : {'3-point', '2-point', 'cs'}, optional
Finite difference method to use:
- '2-point' - use the first order accuracy forward or backward
difference.
- '3-point' - use central difference in interior points and the
second order accuracy forward or backward difference
near the boundary.
- 'cs' - use a complex-step finite difference scheme. This assumes
that the user function is real-valued and can be
analytically continued to the complex plane. Otherwise,
produces bogus results.
rel_step : None or array_like, optional
Relative step size to use. The absolute step size is computed as
``h = rel_step * sign(x0) * max(1, abs(x0))``, possibly adjusted to
fit into the bounds. For ``method='3-point'`` the sign of `h` is
ignored. If None (default) then step is selected automatically,
see Notes.
f0 : None or array_like, optional
If not None it is assumed to be equal to ``fun(x0)``, in this case
the ``fun(x0)`` is not called. Default is None.
bounds : tuple of array_like, optional
Lower and upper bounds on independent variables. Defaults to no bounds.
Each bound must match the size of `x0` or be a scalar, in the latter
case the bound will be the same for all variables. Use it to limit the
range of function evaluation. Bounds checking is not implemented
when `as_linear_operator` is True.
sparsity : {None, array_like, sparse matrix, 2-tuple}, optional
Defines a sparsity structure of the Jacobian matrix. If the Jacobian
matrix is known to have only few non-zero elements in each row, then
it's possible to estimate its several columns by a single function
evaluation [3]_. To perform such economic computations two ingredients
are required:
* structure : array_like or sparse matrix of shape (m, n). A zero
element means that a corresponding element of the Jacobian
identically equals to zero.
* groups : array_like of shape (n,). A column grouping for a given
sparsity structure, use `group_columns` to obtain it.
A single array or a sparse matrix is interpreted as a sparsity
structure, and groups are computed inside the function. A tuple is
interpreted as (structure, groups). If None (default), a standard
dense differencing will be used.
Note, that sparse differencing makes sense only for large Jacobian
matrices where each row contains few non-zero elements.
as_linear_operator : bool, optional
When True the function returns an `scipy.sparse.linalg.LinearOperator`.
Otherwise it returns a dense array or a sparse matrix depending on
`sparsity`. The linear operator provides an efficient way of computing
``J.dot(p)`` for any vector ``p`` of shape (n,), but does not allow
direct access to individual elements of the matrix. By default
`as_linear_operator` is False.
args, kwargs : tuple and dict, optional
Additional arguments passed to `fun`. Both empty by default.
The calling signature is ``fun(x, *args, **kwargs)``.
Returns
-------
J : {ndarray, sparse matrix, LinearOperator}
Finite difference approximation of the Jacobian matrix.
If `as_linear_operator` is True returns a LinearOperator
with shape (m, n). Otherwise it returns a dense array or sparse
matrix depending on how `sparsity` is defined. If `sparsity`
is None then a ndarray with shape (m, n) is returned. If
`sparsity` is not None returns a csr_matrix with shape (m, n).
For sparse matrices and linear operators it is always returned as
a 2-D structure, for ndarrays, if m=1 it is returned
as a 1-D gradient array with shape (n,).
See Also
--------
check_derivative : Check correctness of a function computing derivatives.
Notes
-----
If `rel_step` is not provided, it assigned to ``EPS**(1/s)``, where EPS is
machine epsilon for float64 numbers, s=2 for '2-point' method and s=3 for
'3-point' method. Such relative step approximately minimizes a sum of
truncation and round-off errors, see [1]_.
A finite difference scheme for '3-point' method is selected automatically.
The well-known central difference scheme is used for points sufficiently
far from the boundary, and 3-point forward or backward scheme is used for
points near the boundary. Both schemes have the second-order accuracy in
terms of Taylor expansion. Refer to [2]_ for the formulas of 3-point
forward and backward difference schemes.
For dense differencing when m=1 Jacobian is returned with a shape (n,),
on the other hand when n=1 Jacobian is returned with a shape (m, 1).
Our motivation is the following: a) It handles a case of gradient
computation (m=1) in a conventional way. b) It clearly separates these two
different cases. b) In all cases np.atleast_2d can be called to get 2-D
Jacobian with correct dimensions.
References
----------
.. [1] W. H. Press et. al. "Numerical Recipes. The Art of Scientific
Computing. 3rd edition", sec. 5.7.
.. [2] <NAME>, <NAME>, and <NAME>, "On the estimation of
sparse Jacobian matrices", Journal of the Institute of Mathematics
and its Applications, 13 (1974), pp. 117-120.
.. [3] <NAME>, "Generation of Finite Difference Formulas on
Arbitrarily Spaced Grids", Mathematics of Computation 51, 1988.
Examples
--------
>>> import numpy as np
>>> from scipy.optimize import approx_derivative
>>>
>>> def f(x, c1, c2):
... return np.array([x[0] * np.sin(c1 * x[1]),
... x[0] * np.cos(c2 * x[1])])
...
>>> x0 = np.array([1.0, 0.5 * np.pi])
>>> approx_derivative(f, x0, args=(1, 2))
array([[ 1., 0.],
[-1., 0.]])
Bounds can be used to limit the region of function evaluation.
In the example below we compute left and right derivative at point 1.0.
>>> def g(x):
... return x**2 if x >= 1 else x
...
>>> x0 = 1.0
>>> approx_derivative(g, x0, bounds=(-np.inf, 1.0))
array([ 1.])
>>> approx_derivative(g, x0, bounds=(1.0, np.inf))
array([ 2.])
"""
if method not in ['2-point', '3-point', 'cs']:
raise ValueError("Unknown method '%s'. " % method)
x0 = np.atleast_1d(x0)
if x0.ndim > 1:
raise ValueError("`x0` must have at most 1 dimension.")
lb, ub = _prepare_bounds(bounds, x0)
if lb.shape != x0.shape or ub.shape != x0.shape:
raise ValueError("Inconsistent shapes between bounds and `x0`.")
if as_linear_operator and not (np.all(np.isinf(lb))
and np.all(np.isinf(ub))):
raise ValueError("Bounds not supported when "
"`as_linear_operator` is True.")
def fun_wrapped(x):
f = np.atleast_1d(fun(x, *args, **kwargs))
if f.ndim > 1:
raise RuntimeError("`fun` return value has "
"more than 1 dimension.")
return f
if f0 is None:
f0 = fun_wrapped(x0)
else:
f0 = np.atleast_1d(f0)
if f0.ndim > 1:
raise ValueError("`f0` passed has more than 1 dimension.")
if np.any((x0 < lb) | (x0 > ub)):
raise ValueError("`x0` violates bound constraints.")
if as_linear_operator:
if rel_step is None:
rel_step = relative_step[method]
return _linear_operator_difference(fun_wrapped, x0,
f0, rel_step, method)
else:
h = _compute_absolute_step(rel_step, x0, method)
if method == '2-point':
h, use_one_sided = _adjust_scheme_to_bounds(
x0, h, 1, '1-sided', lb, ub)
elif method == '3-point':
h, use_one_sided = _adjust_scheme_to_bounds(
x0, h, 1, '2-sided', lb, ub)
elif method == 'cs':
use_one_sided = False
if sparsity is None:
return _dense_difference(fun_wrapped, x0, f0, h,
use_one_sided, method)
else:
if not issparse(sparsity) and len(sparsity) == 2:
structure, groups = sparsity
else:
structure = sparsity
groups = group_columns(sparsity)
if issparse(structure):
structure = csc_matrix(structure)
else:
structure = np.atleast_2d(structure)
groups = np.atleast_1d(groups)
return _sparse_difference(fun_wrapped, x0, f0, h,
use_one_sided, structure,
groups, method)
def _linear_operator_difference(fun, x0, f0, h, method):
m = f0.size
n = x0.size
if method == '2-point':
def matvec(p):
if np.array_equal(p, np.zeros_like(p)):
return np.zeros(m)
dx = h / norm(p)
x = x0 + dx*p
df = fun(x) - f0
return df / dx
elif method == '3-point':
def matvec(p):
if np.array_equal(p, np.zeros_like(p)):
return np.zeros(m)
dx = 2*h / norm(p)
x1 = x0 - (dx/2)*p
x2 = x0 + (dx/2)*p
f1 = fun(x1)
f2 = fun(x2)
df = f2 - f1
return df / dx
elif method == 'cs':
def matvec(p):
if np.array_equal(p, np.zeros_like(p)):
return np.zeros(m)
dx = h / norm(p)
x = x0 + dx*p*1.j
f1 = fun(x)
df = f1.imag
return df / dx
else:
raise RuntimeError("Never be here.")
return LinearOperator((m, n), matvec)
def _dense_difference(fun, x0, f0, h, use_one_sided, method):
m = f0.size
n = x0.size
J_transposed = np.empty((n, m))
h_vecs = np.diag(h)
for i in range(h.size):
if method == '2-point':
x = x0 + h_vecs[i]
dx = x[i] - x0[i] # Recompute dx as exactly representable number.
df = fun(x) - f0
elif method == '3-point' and use_one_sided[i]:
x1 = x0 + h_vecs[i]
x2 = x0 + 2 * h_vecs[i]
dx = x2[i] - x0[i]
f1 = fun(x1)
f2 = fun(x2)
df = -3.0 * f0 + 4 * f1 - f2
elif method == '3-point' and not use_one_sided[i]:
x1 = x0 - h_vecs[i]
x2 = x0 + h_vecs[i]
dx = x2[i] - x1[i]
f1 = fun(x1)
f2 = fun(x2)
df = f2 - f1
elif method == 'cs':
f1 = fun(x0 + h_vecs[i]*1.j)
df = f1.imag
dx = h_vecs[i, i]
else:
raise RuntimeError("Never be here.")
J_transposed[i] = df / dx
if m == 1:
J_transposed = np.ravel(J_transposed)
return J_transposed.T
def _sparse_difference(fun, x0, f0, h, use_one_sided,
structure, groups, method):
m = f0.size
n = x0.size
row_indices = []
col_indices = []
fractions = []
n_groups = np.max(groups) + 1
for group in range(n_groups):
# Perturb variables which are in the same group simultaneously.
e = np.equal(group, groups)
h_vec = h * e
if method == '2-point':
x = x0 + h_vec
dx = x - x0
df = fun(x) - f0
# The result is written to columns which correspond to perturbed
# variables.
cols, = np.nonzero(e)
# Find all non-zero elements in selected columns of Jacobian.
i, j, _ = find(structure[:, cols])
# Restore column indices in the full array.
j = cols[j]
elif method == '3-point':
# Here we do conceptually the same but separate one-sided
# and two-sided schemes.
x1 = x0.copy()
x2 = x0.copy()
mask_1 = use_one_sided & e
x1[mask_1] += h_vec[mask_1]
x2[mask_1] += 2 * h_vec[mask_1]
mask_2 = ~use_one_sided & e
x1[mask_2] -= h_vec[mask_2]
x2[mask_2] += h_vec[mask_2]
dx = np.zeros(n)
dx[mask_1] = x2[mask_1] - x0[mask_1]
dx[mask_2] = x2[mask_2] - x1[mask_2]
f1 = fun(x1)
f2 = fun(x2)
cols, = np.nonzero(e)
i, j, _ = find(structure[:, cols])
j = cols[j]
mask = use_one_sided[j]
df = np.empty(m)
rows = i[mask]
df[rows] = -3 * f0[rows] + 4 * f1[rows] - f2[rows]
rows = i[~mask]
df[rows] = f2[rows] - f1[rows]
elif method == 'cs':
f1 = fun(x0 + h_vec*1.j)
df = f1.imag
dx = h_vec
cols, = np.nonzero(e)
i, j, _ = find(structure[:, cols])
j = cols[j]
else:
raise ValueError("Never be here.")
# All that's left is to compute the fraction. We store i, j and
# fractions as separate arrays and later construct coo_matrix.
row_indices.append(i)
col_indices.append(j)
fractions.append(df[i] / dx[j])
row_indices = np.hstack(row_indices)
col_indices = np.hstack(col_indices)
fractions = np.hstack(fractions)
J = coo_matrix((fractions, (row_indices, col_indices)), shape=(m, n))
return csr_matrix(J)
def check_derivative(fun, jac, x0, bounds=(-np.inf, np.inf), args=(),
kwargs={}):
"""Check correctness of a function computing derivatives (Jacobian or
gradient) by comparison with a finite difference approximation.
Parameters
----------
fun : callable
Function of which to estimate the derivatives. The argument x
passed to this function is ndarray of shape (n,) (never a scalar
even if n=1). It must return 1-D array_like of shape (m,) or a scalar.
jac : callable
Function which computes Jacobian matrix of `fun`. It must work with
argument x the same way as `fun`. The return value must be array_like
or sparse matrix with an appropriate shape.
x0 : array_like of shape (n,) or float
Point at which to estimate the derivatives. Float will be converted
to 1-D array.
bounds : 2-tuple of array_like, optional
Lower and upper bounds on independent variables. Defaults to no bounds.
Each bound must match the size of `x0` or be a scalar, in the latter
case the bound will be the same for all variables. Use it to limit the
range of function evaluation.
args, kwargs : tuple and dict, optional
Additional arguments passed to `fun` and `jac`. Both empty by default.
The calling signature is ``fun(x, *args, **kwargs)`` and the same
for `jac`.
Returns
-------
accuracy : float
The maximum among all relative errors for elements with absolute values
higher than 1 and absolute errors for elements with absolute values
less or equal than 1. If `accuracy` is on the order of 1e-6 or lower,
then it is likely that your `jac` implementation is correct.
See Also
--------
approx_derivative : Compute finite difference approximation of derivative.
Examples
--------
>>> import numpy as np
>>> from scipy.optimize import check_derivative
>>>
>>>
>>> def f(x, c1, c2):
... return np.array([x[0] * np.sin(c1 * x[1]),
... x[0] * np.cos(c2 * x[1])])
...
>>> def jac(x, c1, c2):
... return np.array([
... [np.sin(c1 * x[1]), c1 * x[0] * np.cos(c1 * x[1])],
... [np.cos(c2 * x[1]), -c2 * x[0] * np.sin(c2 * x[1])]
... ])
...
>>>
>>> x0 = np.array([1.0, 0.5 * np.pi])
>>> check_derivative(f, jac, x0, args=(1, 2))
2.4492935982947064e-16
"""
J_to_test = jac(x0, *args, **kwargs)
if issparse(J_to_test):
J_diff = approx_derivative(fun, x0, bounds=bounds, sparsity=J_to_test,
args=args, kwargs=kwargs)
J_to_test = csr_matrix(J_to_test)
abs_err = J_to_test - J_diff
i, j, abs_err_data = find(abs_err)
J_diff_data = np.asarray(J_diff[i, j]).ravel()
return np.max(np.abs(abs_err_data) /
np.maximum(1, np.abs(J_diff_data)))
else:
J_diff = approx_derivative(fun, x0, bounds=bounds,
args=args, kwargs=kwargs)
abs_err = np.abs(J_to_test - J_diff)
return np.max(abs_err / np.maximum(1, np.abs(J_diff)))
|
[
"scipy.sparse.linalg.LinearOperator",
"numpy.hstack",
"numpy.equal",
"numpy.linalg.norm",
"numpy.random.RandomState",
"numpy.atleast_2d",
"numpy.isscalar",
"numpy.asarray",
"numpy.max",
"numpy.resize",
"numpy.empty",
"numpy.maximum",
"numpy.isinf",
"numpy.abs",
"numpy.any",
"numpy.nonzero",
"numpy.finfo",
"numpy.atleast_1d",
"numpy.ones_like",
"numpy.minimum",
"numpy.diag",
"numpy.zeros",
"numpy.ravel",
"numpy.all",
"numpy.zeros_like"
] |
[((310, 330), 'numpy.finfo', 'np.finfo', (['np.float64'], {}), '(np.float64)\n', (318, 330), True, 'import numpy as np\n'), ((1858, 1898), 'numpy.all', 'np.all', (['((lb == -np.inf) & (ub == np.inf))'], {}), '((lb == -np.inf) & (ub == np.inf))\n', (1864, 1898), True, 'import numpy as np\n'), ((13340, 13357), 'numpy.atleast_1d', 'np.atleast_1d', (['x0'], {}), '(x0)\n', (13353, 13357), True, 'import numpy as np\n'), ((14265, 14294), 'numpy.any', 'np.any', (['((x0 < lb) | (x0 > ub))'], {}), '((x0 < lb) | (x0 > ub))\n', (14271, 14294), True, 'import numpy as np\n'), ((16760, 16790), 'scipy.sparse.linalg.LinearOperator', 'LinearOperator', (['(m, n)', 'matvec'], {}), '((m, n), matvec)\n', (16774, 16790), False, 'from scipy.sparse.linalg import LinearOperator\n'), ((16906, 16922), 'numpy.empty', 'np.empty', (['(n, m)'], {}), '((n, m))\n', (16914, 16922), True, 'import numpy as np\n'), ((16936, 16946), 'numpy.diag', 'np.diag', (['h'], {}), '(h)\n', (16943, 16946), True, 'import numpy as np\n'), ((20279, 20301), 'numpy.hstack', 'np.hstack', (['row_indices'], {}), '(row_indices)\n', (20288, 20301), True, 'import numpy as np\n'), ((20320, 20342), 'numpy.hstack', 'np.hstack', (['col_indices'], {}), '(col_indices)\n', (20329, 20342), True, 'import numpy as np\n'), ((20359, 20379), 'numpy.hstack', 'np.hstack', (['fractions'], {}), '(fractions)\n', (20368, 20379), True, 'import numpy as np\n'), ((1638, 1665), 'numpy.ones_like', 'np.ones_like', (['h'], {'dtype': 'bool'}), '(h, dtype=bool)\n', (1650, 1665), True, 'import numpy as np\n'), ((3669, 3695), 'numpy.asarray', 'np.asarray', (['b'], {'dtype': 'float'}), '(b, dtype=float)\n', (3679, 3695), True, 'import numpy as np\n'), ((3747, 3770), 'numpy.resize', 'np.resize', (['lb', 'x0.shape'], {}), '(lb, x0.shape)\n', (3756, 3770), True, 'import numpy as np\n'), ((3806, 3829), 'numpy.resize', 'np.resize', (['ub', 'x0.shape'], {}), '(ub, x0.shape)\n', (3815, 3829), True, 'import numpy as np\n'), ((5174, 5190), 'numpy.atleast_2d', 'np.atleast_2d', (['A'], {}), '(A)\n', (5187, 5190), True, 'import numpy as np\n'), ((5350, 5368), 'numpy.isscalar', 'np.isscalar', (['order'], {}), '(order)\n', (5361, 5368), True, 'import numpy as np\n'), ((5384, 5412), 'numpy.random.RandomState', 'np.random.RandomState', (['order'], {}), '(order)\n', (5405, 5412), True, 'import numpy as np\n'), ((5474, 5491), 'numpy.asarray', 'np.asarray', (['order'], {}), '(order)\n', (5484, 5491), True, 'import numpy as np\n'), ((14144, 14161), 'numpy.atleast_1d', 'np.atleast_1d', (['f0'], {}), '(f0)\n', (14157, 14161), True, 'import numpy as np\n'), ((17883, 17905), 'numpy.ravel', 'np.ravel', (['J_transposed'], {}), '(J_transposed)\n', (17891, 17905), True, 'import numpy as np\n'), ((18149, 18163), 'numpy.max', 'np.max', (['groups'], {}), '(groups)\n', (18155, 18163), True, 'import numpy as np\n'), ((18286, 18309), 'numpy.equal', 'np.equal', (['group', 'groups'], {}), '(group, groups)\n', (18294, 18309), True, 'import numpy as np\n'), ((23611, 23637), 'numpy.abs', 'np.abs', (['(J_to_test - J_diff)'], {}), '(J_to_test - J_diff)\n', (23617, 23637), True, 'import numpy as np\n'), ((1708, 1717), 'numpy.abs', 'np.abs', (['h'], {}), '(h)\n', (1714, 1717), True, 'import numpy as np\n'), ((1742, 1770), 'numpy.zeros_like', 'np.zeros_like', (['h'], {'dtype': 'bool'}), '(h, dtype=bool)\n', (1755, 1770), True, 'import numpy as np\n'), ((2149, 2164), 'numpy.abs', 'np.abs', (['h_total'], {}), '(h_total)\n', (2155, 2164), True, 'import numpy as np\n'), ((2168, 2202), 'numpy.maximum', 'np.maximum', (['lower_dist', 'upper_dist'], {}), '(lower_dist, upper_dist)\n', (2178, 2202), True, 'import numpy as np\n'), ((2673, 2734), 'numpy.minimum', 'np.minimum', (['h[forward]', '(0.5 * upper_dist[forward] / num_steps)'], {}), '(h[forward], 0.5 * upper_dist[forward] / num_steps)\n', (2683, 2734), True, 'import numpy as np\n'), ((3608, 3618), 'numpy.abs', 'np.abs', (['x0'], {}), '(x0)\n', (3614, 3618), True, 'import numpy as np\n'), ((15542, 15563), 'numpy.atleast_1d', 'np.atleast_1d', (['groups'], {}), '(groups)\n', (15555, 15563), True, 'import numpy as np\n'), ((18567, 18580), 'numpy.nonzero', 'np.nonzero', (['e'], {}), '(e)\n', (18577, 18580), True, 'import numpy as np\n'), ((2875, 2938), 'numpy.minimum', 'np.minimum', (['h[backward]', '(0.5 * lower_dist[backward] / num_steps)'], {}), '(h[backward], 0.5 * lower_dist[backward] / num_steps)\n', (2885, 2938), True, 'import numpy as np\n'), ((3011, 3045), 'numpy.minimum', 'np.minimum', (['upper_dist', 'lower_dist'], {}), '(upper_dist, lower_dist)\n', (3021, 3045), True, 'import numpy as np\n'), ((15495, 15519), 'numpy.atleast_2d', 'np.atleast_2d', (['structure'], {}), '(structure)\n', (15508, 15519), True, 'import numpy as np\n'), ((15920, 15936), 'numpy.zeros_like', 'np.zeros_like', (['p'], {}), '(p)\n', (15933, 15936), True, 'import numpy as np\n'), ((15962, 15973), 'numpy.zeros', 'np.zeros', (['m'], {}), '(m)\n', (15970, 15973), True, 'import numpy as np\n'), ((15995, 16002), 'numpy.linalg.norm', 'norm', (['p'], {}), '(p)\n', (15999, 16002), False, 'from numpy.linalg import norm\n'), ((19240, 19251), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (19248, 19251), True, 'import numpy as np\n'), ((19422, 19435), 'numpy.nonzero', 'np.nonzero', (['e'], {}), '(e)\n', (19432, 19435), True, 'import numpy as np\n'), ((19561, 19572), 'numpy.empty', 'np.empty', (['m'], {}), '(m)\n', (19569, 19572), True, 'import numpy as np\n'), ((23327, 23351), 'numpy.asarray', 'np.asarray', (['J_diff[i, j]'], {}), '(J_diff[i, j])\n', (23337, 23351), True, 'import numpy as np\n'), ((23382, 23402), 'numpy.abs', 'np.abs', (['abs_err_data'], {}), '(abs_err_data)\n', (23388, 23402), True, 'import numpy as np\n'), ((3098, 3116), 'numpy.abs', 'np.abs', (['h_adjusted'], {}), '(h_adjusted)\n', (3104, 3116), True, 'import numpy as np\n'), ((13654, 13666), 'numpy.isinf', 'np.isinf', (['lb'], {}), '(lb)\n', (13662, 13666), True, 'import numpy as np\n'), ((13714, 13726), 'numpy.isinf', 'np.isinf', (['ub'], {}), '(ub)\n', (13722, 13726), True, 'import numpy as np\n'), ((16172, 16188), 'numpy.zeros_like', 'np.zeros_like', (['p'], {}), '(p)\n', (16185, 16188), True, 'import numpy as np\n'), ((16214, 16225), 'numpy.zeros', 'np.zeros', (['m'], {}), '(m)\n', (16222, 16225), True, 'import numpy as np\n'), ((16249, 16256), 'numpy.linalg.norm', 'norm', (['p'], {}), '(p)\n', (16253, 16256), False, 'from numpy.linalg import norm\n'), ((19870, 19883), 'numpy.nonzero', 'np.nonzero', (['e'], {}), '(e)\n', (19880, 19883), True, 'import numpy as np\n'), ((23441, 23460), 'numpy.abs', 'np.abs', (['J_diff_data'], {}), '(J_diff_data)\n', (23447, 23460), True, 'import numpy as np\n'), ((23684, 23698), 'numpy.abs', 'np.abs', (['J_diff'], {}), '(J_diff)\n', (23690, 23698), True, 'import numpy as np\n'), ((16503, 16519), 'numpy.zeros_like', 'np.zeros_like', (['p'], {}), '(p)\n', (16516, 16519), True, 'import numpy as np\n'), ((16545, 16556), 'numpy.zeros', 'np.zeros', (['m'], {}), '(m)\n', (16553, 16556), True, 'import numpy as np\n'), ((16578, 16585), 'numpy.linalg.norm', 'norm', (['p'], {}), '(p)\n', (16582, 16585), False, 'from numpy.linalg import norm\n')]
|
"""The present code is the Version 1.0 of the RCNN approach to perform MPS
in 3D for categorical variables. It has been developed by <NAME> and <NAME> in the
Geometallurygical Group at Queen's University as part of a PhD program.
The code is not free of bugs but running end-to-end.
Any comments and further improvements are well recevied to: <EMAIL>
April 16, 2019.
Geomet Group - Queen's University - Canada"""
# Do not display the AVX message about using GPU
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
#from tensorflow.python.client import device_lib
#print(device_lib.list_local_devices())
#os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
#os.environ["CUDA_VISIBLE_DEVICES"]="0"
## #########################
import numpy as np
import tensorflow as tf
import time
import External_Functions_3D as fns_nested
import gc
for ind0 in range(1):
start_time_AllTrain = time.time()
HyperPar = []
HyperPar.append(50) # SGsizex - Num 0
HyperPar.append(50) # SGsizey - Num 1
HyperPar.append(50) # SGsizez - Num 2
HyperPar.append(int(7)) # Search_x - Num 3
HyperPar.append(int(7)) # Search_y - Num 4
HyperPar.append(int(7)) # Search_z - Num 5
HyperPar.append(int(7)) # IPsizex - Num 6
HyperPar.append(int(7)) # IPsizey - Num 7
HyperPar.append(int(7)) # IPsizez - Num 8
HyperPar.append(50) # Percentage of Data Conditioning - Num 9 .. divided by 3 so 1% is 10 represents 1%
HyperPar.append(1) # MinDC - Num 10
HyperPar.append(1500) # Num Fully Connected - Num 11
HyperPar.append(3) # wdnh - Num 12
HyperPar.append(16) # convdepth - Num 13
HyperPar.append(2) # num of categories - Num 14
print("SG: ", int(HyperPar[3]),"x",int(HyperPar[4]),"x",int(HyperPar[5]), "IP: ", int(HyperPar[6]),"x",int(HyperPar[7]),"x",int(HyperPar[8]))
Ncicles = 500
Nepoch = 1
#Nbatch = 250
Nsamples = 512
TrainingImage = "TI_Collaboration_1of4_50x50x50_newRepresentation.dat"
LocModel = 'Models/3D_NewRepresentation/Allperc/%sx%sx%s_%sx%sx%s_4ConvNets_4HL_BN_3FC%s_ws%sx%sx%s_%sconvdepth/FeatMaps'%(int(HyperPar[3]),int(HyperPar[4]),int(HyperPar[5]), int(HyperPar[6]),int(HyperPar[7]),int(HyperPar[8]), int(HyperPar[11]), int(HyperPar[12]),int(HyperPar[12]),int(HyperPar[12]), int(HyperPar[13]))
#LocModel = 'Models/3D_NewRepresentation/New%sperc/%sx%sx%s_%sx%sx%s_4ConvNets_4HL_BN_3FC%s_ws%sx%sx%s_%sconvdepth/FeatMaps'%(int(HyperPar[9]), int(HyperPar[3]),int(HyperPar[4]),int(HyperPar[5]), int(HyperPar[6]),int(HyperPar[7]),int(HyperPar[8]), int(HyperPar[11]), int(HyperPar[12]),int(HyperPar[12]),int(HyperPar[12]), int(HyperPar[13]))
LocFile = 'Models/3D_NewRepresentation/Allperc/%sx%sx%s_%sx%sx%s_4ConvNets_4HL_BN_3FC%s_ws%sx%sx%s_%sconvdepth'%(int(HyperPar[3]),int(HyperPar[4]),int(HyperPar[5]), int(HyperPar[6]),int(HyperPar[7]),int(HyperPar[8]), int(HyperPar[11]), int(HyperPar[12]),int(HyperPar[12]),int(HyperPar[12]), int(HyperPar[13]))
#LocFile = 'Models/3D_NewRepresentation/New%sperc/%sx%sx%s_%sx%sx%s_4ConvNets_4HL_BN_3FC%s_ws%sx%sx%s_%sconvdepth'%(int(HyperPar[9]), int(HyperPar[3]),int(HyperPar[4]),int(HyperPar[5]), int(HyperPar[6]),int(HyperPar[7]),int(HyperPar[8]), int(HyperPar[11]), int(HyperPar[12]),int(HyperPar[12]),int(HyperPar[12]), int(HyperPar[13]))
print("[Graph]")
#fns_nested.CreateGraph_4ConvNets_4HL_NFeaConv_wdnhxwdnh_BN_3D_NoBN(HyperPar=HyperPar, LocModel=LocModel)
fns_nested.CreateGraph_4ConvNets_4HL_NFeaConv_wdnhxwdnh_BN_3D(HyperPar=HyperPar, LocModel=LocModel)
# To save the TI
TempSimGrid = fns_nested.Grid(HyperPar=HyperPar, DBname=TrainingImage, Lvl=3,Training=False, Padding=True)
TempSimGrid.SavePlot(name=LocModel+'_TI.png', Level=1)
MaxLR, MinLR = 0.01, 0.001
StepLR = 10
PointStart = 1
for indTrain in range(Ncicles):
#HyperPar[9] = np.random.randint(41)+10
cuos = indTrain%(2*StepLR)
if cuos < StepLR:
LearningRate = np.around(((MaxLR - MinLR)/StepLR)*cuos + MinLR, decimals=7)
else:
LearningRate = np.around(((MaxLR - MinLR)/StepLR)*(StepLR - cuos) + MaxLR, decimals=7)
start_time_1 = time.time()
print ("Cicle: {}".format(indTrain+PointStart), "Learning Rate: ", LearningRate)
TempSimGrid = fns_nested.Grid(HyperPar=HyperPar, DBname=TrainingImage, Lvl=5, Training=True, Padding=True)
print("[Sim]")
TempSimGrid.Simulate_4ConvNets_BN_3D(LocModel=LocModel, Cicle=(indTrain+PointStart), Plot=True)
print("[Saving Grid]")
TempSimGrid.SaveGrid(file="{}/TrainReas_{}.txt".format(LocFile, indTrain+PointStart))
print("[Train]")
TempSimGrid.Train_4ConvNets_BN_3D(Epochs=Nepoch, Num_samples=Nsamples, LocModel=LocModel, LR=LearningRate)
print("--%s seconds of whole training process-" % (np.around((time.time() - start_time_1), decimals=2)))
gc.collect()
print(" ")
print("--%s minutes of ALL training-" % ((time.time() - start_time_AllTrain)/60))
|
[
"External_Functions_3D.Grid",
"numpy.around",
"gc.collect",
"External_Functions_3D.CreateGraph_4ConvNets_4HL_NFeaConv_wdnhxwdnh_BN_3D",
"time.time"
] |
[((926, 937), 'time.time', 'time.time', ([], {}), '()\n', (935, 937), False, 'import time\n'), ((3406, 3510), 'External_Functions_3D.CreateGraph_4ConvNets_4HL_NFeaConv_wdnhxwdnh_BN_3D', 'fns_nested.CreateGraph_4ConvNets_4HL_NFeaConv_wdnhxwdnh_BN_3D', ([], {'HyperPar': 'HyperPar', 'LocModel': 'LocModel'}), '(HyperPar=\n HyperPar, LocModel=LocModel)\n', (3467, 3510), True, 'import External_Functions_3D as fns_nested\n'), ((3547, 3645), 'External_Functions_3D.Grid', 'fns_nested.Grid', ([], {'HyperPar': 'HyperPar', 'DBname': 'TrainingImage', 'Lvl': '(3)', 'Training': '(False)', 'Padding': '(True)'}), '(HyperPar=HyperPar, DBname=TrainingImage, Lvl=3, Training=\n False, Padding=True)\n', (3562, 3645), True, 'import External_Functions_3D as fns_nested\n'), ((4086, 4097), 'time.time', 'time.time', ([], {}), '()\n', (4095, 4097), False, 'import time\n'), ((4199, 4296), 'External_Functions_3D.Grid', 'fns_nested.Grid', ([], {'HyperPar': 'HyperPar', 'DBname': 'TrainingImage', 'Lvl': '(5)', 'Training': '(True)', 'Padding': '(True)'}), '(HyperPar=HyperPar, DBname=TrainingImage, Lvl=5, Training=\n True, Padding=True)\n', (4214, 4296), True, 'import External_Functions_3D as fns_nested\n'), ((4771, 4783), 'gc.collect', 'gc.collect', ([], {}), '()\n', (4781, 4783), False, 'import gc\n'), ((3905, 3967), 'numpy.around', 'np.around', (['((MaxLR - MinLR) / StepLR * cuos + MinLR)'], {'decimals': '(7)'}), '((MaxLR - MinLR) / StepLR * cuos + MinLR, decimals=7)\n', (3914, 3967), True, 'import numpy as np\n'), ((3995, 4068), 'numpy.around', 'np.around', (['((MaxLR - MinLR) / StepLR * (StepLR - cuos) + MaxLR)'], {'decimals': '(7)'}), '((MaxLR - MinLR) / StepLR * (StepLR - cuos) + MaxLR, decimals=7)\n', (4004, 4068), True, 'import numpy as np\n'), ((4848, 4859), 'time.time', 'time.time', ([], {}), '()\n', (4857, 4859), False, 'import time\n'), ((4722, 4733), 'time.time', 'time.time', ([], {}), '()\n', (4731, 4733), False, 'import time\n')]
|
"""This file contains functions for loading and preprocessing pianoroll data.
"""
import logging
import numpy as np
import tensorflow.compat.v1 as tf
from musegan.config import SHUFFLE_BUFFER_SIZE, PREFETCH_SIZE
LOGGER = logging.getLogger(__name__)
# --- Data loader --------------------------------------------------------------
def load_data_from_npy(filename):
"""Load and return the training data from a npy file."""
return np.load(filename)
def load_data_from_npz(filename):
"""Load and return the training data from a npz file (sparse format)."""
with np.load(filename) as f:
data = np.zeros(f['shape'], np.bool_)
data[[x for x in f['nonzero']]] = True
return data
def load_data(data_source, data_filename):
"""Load and return the training data."""
if data_source == 'sa':
import SharedArray as sa
return sa.attach(data_filename)
if data_source == 'npy':
return load_data_from_npy(data_filename)
if data_source == 'npz':
return load_data_from_npz(data_filename)
raise ValueError("Expect `data_source` to be one of 'sa', 'npy', 'npz'. "
"But get " + str(data_source))
# --- Dataset Utilities -------------------------------------------------------
def random_transpose(pianoroll):
"""Randomly transpose a pianoroll with [-5, 6] semitones."""
semitone = np.random.randint(-5, 6)
if semitone > 0:
pianoroll[:, semitone:, 1:] = pianoroll[:, :-semitone, 1:]
pianoroll[:, :semitone, 1:] = 0
elif semitone < 0:
pianoroll[:, :semitone, 1:] = pianoroll[:, -semitone:, 1:]
pianoroll[:, semitone:, 1:] = 0
return pianoroll
def set_pianoroll_shape(pianoroll, data_shape):
"""Set the pianoroll shape and return the pianoroll."""
pianoroll.set_shape(data_shape)
return pianoroll
def set_label_shape(label):
"""Set the label shape and return the label."""
label.set_shape([1])
return label
# --- Sampler ------------------------------------------------------------------
def get_samples(n_samples, data, labels=None, use_random_transpose=False):
"""Return some random samples of the training data."""
indices = np.random.choice(len(data), n_samples, False)
if np.issubdtype(data.dtype, np.bool_):
sample_data = data[indices] * 2. - 1.
else:
sample_data = data[indices]
if use_random_transpose:
sample_data = np.array([random_transpose(x) for x in sample_data])
if labels is None:
return sample_data
return sample_data, labels[indices]
# --- Tensorflow Dataset -------------------------------------------------------
def _gen_data(data, labels=None):
"""Data Generator."""
if labels is None:
for item in data:
if np.issubdtype(data.dtype, np.bool_):
yield item * 2. - 1.
else:
yield item
else:
for i, item in enumerate(data):
if np.issubdtype(data.dtype, np.bool_):
yield (item * 2. - 1., labels[i])
else:
yield (item, labels[i])
def get_dataset(data, labels=None, batch_size=None, data_shape=None,
use_random_transpose=False, num_threads=1):
"""Create and return a tensorflow dataset from an array."""
if labels is None:
dataset = tf.data.Dataset.from_generator(
lambda: _gen_data(data), tf.float32)
if use_random_transpose:
dataset = dataset.map(
lambda pianoroll: tf.py_func(
random_transpose, [pianoroll], tf.float32),
num_parallel_calls=num_threads)
dataset = dataset.map(lambda pianoroll: set_pianoroll_shape(
pianoroll, data_shape), num_parallel_calls=num_threads)
else:
assert len(data) == len(labels), (
"Lengths of `data` and `lables` do not match.")
dataset = tf.data.Dataset.from_generator(
lambda: _gen_data(data, labels), [tf.float32, tf.int32])
if use_random_transpose:
dataset = dataset.map(
lambda pianoroll, label: (
tf.py_func(random_transpose, [pianoroll], tf.float32),
label),
num_parallel_calls=num_threads)
dataset = dataset.map(
lambda pianoroll, label: (set_pianoroll_shape(
pianoroll, data_shape), set_label_shape(label)),
num_parallel_calls=num_threads)
dataset = dataset.shuffle(SHUFFLE_BUFFER_SIZE).repeat().batch(batch_size)
return dataset.prefetch(PREFETCH_SIZE)
|
[
"logging.getLogger",
"numpy.issubdtype",
"numpy.random.randint",
"numpy.zeros",
"tensorflow.compat.v1.py_func",
"SharedArray.attach",
"numpy.load"
] |
[((221, 248), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (238, 248), False, 'import logging\n'), ((437, 454), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (444, 454), True, 'import numpy as np\n'), ((1379, 1403), 'numpy.random.randint', 'np.random.randint', (['(-5)', '(6)'], {}), '(-5, 6)\n', (1396, 1403), True, 'import numpy as np\n'), ((2255, 2290), 'numpy.issubdtype', 'np.issubdtype', (['data.dtype', 'np.bool_'], {}), '(data.dtype, np.bool_)\n', (2268, 2290), True, 'import numpy as np\n'), ((576, 593), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (583, 593), True, 'import numpy as np\n'), ((615, 645), 'numpy.zeros', 'np.zeros', (["f['shape']", 'np.bool_'], {}), "(f['shape'], np.bool_)\n", (623, 645), True, 'import numpy as np\n'), ((874, 898), 'SharedArray.attach', 'sa.attach', (['data_filename'], {}), '(data_filename)\n', (883, 898), True, 'import SharedArray as sa\n'), ((2784, 2819), 'numpy.issubdtype', 'np.issubdtype', (['data.dtype', 'np.bool_'], {}), '(data.dtype, np.bool_)\n', (2797, 2819), True, 'import numpy as np\n'), ((2968, 3003), 'numpy.issubdtype', 'np.issubdtype', (['data.dtype', 'np.bool_'], {}), '(data.dtype, np.bool_)\n', (2981, 3003), True, 'import numpy as np\n'), ((3532, 3585), 'tensorflow.compat.v1.py_func', 'tf.py_func', (['random_transpose', '[pianoroll]', 'tf.float32'], {}), '(random_transpose, [pianoroll], tf.float32)\n', (3542, 3585), True, 'import tensorflow.compat.v1 as tf\n'), ((4156, 4209), 'tensorflow.compat.v1.py_func', 'tf.py_func', (['random_transpose', '[pianoroll]', 'tf.float32'], {}), '(random_transpose, [pianoroll], tf.float32)\n', (4166, 4209), True, 'import tensorflow.compat.v1 as tf\n')]
|
"""
CTC-like decoder utilitis.
"""
from itertools import groupby
import numpy as np
def ctc_best_path_decode(probs_seq, vocabulary):
"""
Best path decoding, also called argmax decoding or greedy decoding.
Path consisting of the most probable tokens are further post-processed to
remove consecutive repetitions and all blanks.
:param probs_seq: 2-D list of probabilities over the vocabulary for each
character. Each element is a list of float probabilities
for one character.
:type probs_seq: list
:param vocabulary: Vocabulary list.
:type vocabulary: list
:return: Decoding result string.
:rtype: baseline
"""
# dimension verification
for probs in probs_seq:
if not len(probs) == len(vocabulary) + 1:
raise ValueError("probs_seq dimension mismatchedd with vocabulary")
# argmax to get the best index for each time step
max_index_list = list(np.array(probs_seq).argmax(axis=1))
# remove consecutive duplicate indexes
index_list = [index_group[0] for index_group in groupby(max_index_list)]
# remove blank indexes
blank_index = len(vocabulary)
index_list = [index for index in index_list if index != blank_index]
# convert index list to string
return ''.join([vocabulary[index] for index in index_list])
def ctc_decode(probs_seq, vocabulary, method):
"""
CTC-like sequence decoding from a sequence of likelihood probablilites.
:param probs_seq: 2-D list of probabilities over the vocabulary for each
character. Each element is a list of float probabilities
for one character.
:type probs_seq: list
:param vocabulary: Vocabulary list.
:type vocabulary: list
:param method: Decoding method name, with options: "best_path".
:type method: basestring
:return: Decoding result string.
:rtype: baseline
"""
for prob_list in probs_seq:
if not len(prob_list) == len(vocabulary) + 1:
raise ValueError("probs dimension mismatchedd with vocabulary")
if method == "best_path":
return ctc_best_path_decode(probs_seq, vocabulary)
else:
raise ValueError("Decoding method [%s] is not supported.")
|
[
"numpy.array",
"itertools.groupby"
] |
[((1104, 1127), 'itertools.groupby', 'groupby', (['max_index_list'], {}), '(max_index_list)\n', (1111, 1127), False, 'from itertools import groupby\n'), ((973, 992), 'numpy.array', 'np.array', (['probs_seq'], {}), '(probs_seq)\n', (981, 992), True, 'import numpy as np\n')]
|
import os
import string
from collections import Counter
from datetime import datetime
from functools import partial
from pathlib import Path
from typing import Optional
import numpy as np
import pandas as pd
from scipy.stats.stats import chisquare
from tangled_up_in_unicode import block, block_abbr, category, category_long, script
from pandas_profiling.config import Settings
from pandas_profiling.model.summary_helpers_image import (
extract_exif,
hash_image,
is_image_truncated,
open_image,
)
def mad(arr: np.ndarray) -> np.ndarray:
"""Median Absolute Deviation: a "Robust" version of standard deviation.
Indices variability of the sample.
https://en.wikipedia.org/wiki/Median_absolute_deviation
"""
return np.median(np.abs(arr - np.median(arr)))
def named_aggregate_summary(series: pd.Series, key: str) -> dict:
summary = {
f"max_{key}": np.max(series),
f"mean_{key}": np.mean(series),
f"median_{key}": np.median(series),
f"min_{key}": np.min(series),
}
return summary
def length_summary(series: pd.Series, summary: dict = None) -> dict:
if summary is None:
summary = {}
length = series.str.len()
summary.update({"length": length})
summary.update(named_aggregate_summary(length, "length"))
return summary
def file_summary(series: pd.Series) -> dict:
"""
Args:
series: series to summarize
Returns:
"""
# Transform
stats = series.map(lambda x: os.stat(x))
def convert_datetime(x: float) -> str:
return datetime.fromtimestamp(x).strftime("%Y-%m-%d %H:%M:%S")
# Transform some more
summary = {
"file_size": stats.map(lambda x: x.st_size),
"file_created_time": stats.map(lambda x: x.st_ctime).map(convert_datetime),
"file_accessed_time": stats.map(lambda x: x.st_atime).map(convert_datetime),
"file_modified_time": stats.map(lambda x: x.st_mtime).map(convert_datetime),
}
return summary
def path_summary(series: pd.Series) -> dict:
"""
Args:
series: series to summarize
Returns:
"""
# TODO: optimize using value counts
summary = {
"common_prefix": os.path.commonprefix(series.values.tolist())
or "No common prefix",
"stem_counts": series.map(lambda x: os.path.splitext(x)[0]).value_counts(),
"suffix_counts": series.map(lambda x: os.path.splitext(x)[1]).value_counts(),
"name_counts": series.map(lambda x: os.path.basename(x)).value_counts(),
"parent_counts": series.map(lambda x: os.path.dirname(x)).value_counts(),
"anchor_counts": series.map(lambda x: os.path.splitdrive(x)[0]).value_counts(),
}
summary["n_stem_unique"] = len(summary["stem_counts"])
summary["n_suffix_unique"] = len(summary["suffix_counts"])
summary["n_name_unique"] = len(summary["name_counts"])
summary["n_parent_unique"] = len(summary["parent_counts"])
summary["n_anchor_unique"] = len(summary["anchor_counts"])
return summary
def url_summary(series: pd.Series) -> dict:
"""
Args:
series: series to summarize
Returns:
"""
summary = {
"scheme_counts": series.map(lambda x: x.scheme).value_counts(),
"netloc_counts": series.map(lambda x: x.netloc).value_counts(),
"path_counts": series.map(lambda x: x.path).value_counts(),
"query_counts": series.map(lambda x: x.query).value_counts(),
"fragment_counts": series.map(lambda x: x.fragment).value_counts(),
}
return summary
def count_duplicate_hashes(image_descriptions: dict) -> int:
"""
Args:
image_descriptions:
Returns:
"""
counts = pd.Series(
[x["hash"] for x in image_descriptions if "hash" in x]
).value_counts()
return counts.sum() - len(counts)
def extract_exif_series(image_exifs: list) -> dict:
"""
Args:
image_exifs:
Returns:
"""
exif_keys = []
exif_values: dict = {}
for image_exif in image_exifs:
# Extract key
exif_keys.extend(list(image_exif.keys()))
# Extract values per key
for exif_key, exif_val in image_exif.items():
if exif_key not in exif_values:
exif_values[exif_key] = []
exif_values[exif_key].append(exif_val)
series = {"exif_keys": pd.Series(exif_keys, dtype=object).value_counts().to_dict()}
for k, v in exif_values.items():
series[k] = pd.Series(v).value_counts()
return series
def extract_image_information(
path: Path, exif: bool = False, hash: bool = False
) -> dict:
"""Extracts all image information per file, as opening files is slow
Args:
path: Path to the image
exif: extract exif information
hash: calculate hash (for duplicate detection)
Returns:
A dict containing image information
"""
information: dict = {}
image = open_image(path)
information["opened"] = image is not None
if image is not None:
information["truncated"] = is_image_truncated(image)
if not information["truncated"]:
information["size"] = image.size
if exif:
information["exif"] = extract_exif(image)
if hash:
information["hash"] = hash_image(image)
return information
def image_summary(series: pd.Series, exif: bool = False, hash: bool = False) -> dict:
"""
Args:
series: series to summarize
exif: extract exif information
hash: calculate hash (for duplicate detection)
Returns:
"""
image_information = series.apply(
partial(extract_image_information, exif=exif, hash=hash)
)
summary = {
"n_truncated": sum(
[1 for x in image_information if "truncated" in x and x["truncated"]]
),
"image_dimensions": pd.Series(
[x["size"] for x in image_information if "size" in x],
name="image_dimensions",
),
}
image_widths = summary["image_dimensions"].map(lambda x: x[0])
summary.update(named_aggregate_summary(image_widths, "width"))
image_heights = summary["image_dimensions"].map(lambda x: x[1])
summary.update(named_aggregate_summary(image_heights, "height"))
image_areas = image_widths * image_heights
summary.update(named_aggregate_summary(image_areas, "area"))
if hash:
summary["n_duplicate_hash"] = count_duplicate_hashes(image_information)
if exif:
exif_series = extract_exif_series(
[x["exif"] for x in image_information if "exif" in x]
)
summary["exif_keys_counts"] = exif_series["exif_keys"]
summary["exif_data"] = exif_series
return summary
def get_character_counts(series: pd.Series) -> Counter:
"""Function to return the character counts
Args:
series: the Series to process
Returns:
A dict with character counts
"""
return Counter(series.str.cat())
def counter_to_series(counter: Counter) -> pd.Series:
if not counter:
return pd.Series([], dtype=object)
counter_as_tuples = counter.most_common()
items, counts = zip(*counter_as_tuples)
return pd.Series(counts, index=items)
def unicode_summary(series: pd.Series) -> dict:
# Unicode Character Summaries (category and script name)
character_counts = get_character_counts(series)
character_counts_series = counter_to_series(character_counts)
char_to_block = {key: block(key) for key in character_counts.keys()}
char_to_category_short = {key: category(key) for key in character_counts.keys()}
char_to_script = {key: script(key) for key in character_counts.keys()}
summary = {
"n_characters": len(character_counts_series),
"character_counts": character_counts_series,
"category_alias_values": {
key: category_long(value) for key, value in char_to_category_short.items()
},
"block_alias_values": {
key: block_abbr(value) for key, value in char_to_block.items()
},
}
# Retrieve original distribution
block_alias_counts: Counter = Counter()
per_block_char_counts: dict = {
k: Counter() for k in summary["block_alias_values"].values()
}
for char, n_char in character_counts.items():
block_name = summary["block_alias_values"][char]
block_alias_counts[block_name] += n_char
per_block_char_counts[block_name][char] = n_char
summary["block_alias_counts"] = counter_to_series(block_alias_counts)
summary["block_alias_char_counts"] = {
k: counter_to_series(v) for k, v in per_block_char_counts.items()
}
script_counts: Counter = Counter()
per_script_char_counts: dict = {k: Counter() for k in char_to_script.values()}
for char, n_char in character_counts.items():
script_name = char_to_script[char]
script_counts[script_name] += n_char
per_script_char_counts[script_name][char] = n_char
summary["script_counts"] = counter_to_series(script_counts)
summary["script_char_counts"] = {
k: counter_to_series(v) for k, v in per_script_char_counts.items()
}
category_alias_counts: Counter = Counter()
per_category_alias_char_counts: dict = {
k: Counter() for k in summary["category_alias_values"].values()
}
for char, n_char in character_counts.items():
category_alias_name = summary["category_alias_values"][char]
category_alias_counts[category_alias_name] += n_char
per_category_alias_char_counts[category_alias_name][char] += n_char
summary["category_alias_counts"] = counter_to_series(category_alias_counts)
summary["category_alias_char_counts"] = {
k: counter_to_series(v) for k, v in per_category_alias_char_counts.items()
}
# Unique counts
summary["n_category"] = len(summary["category_alias_counts"])
summary["n_scripts"] = len(summary["script_counts"])
summary["n_block_alias"] = len(summary["block_alias_counts"])
if len(summary["category_alias_counts"]) > 0:
summary["category_alias_counts"].index = summary[
"category_alias_counts"
].index.str.replace("_", " ")
return summary
def histogram_compute(
config: Settings,
finite_values: np.ndarray,
n_unique: int,
name: str = "histogram",
weights: Optional[np.ndarray] = None,
) -> dict:
stats = {}
bins = config.plot.histogram.bins
bins_arg = "auto" if bins == 0 else min(bins, n_unique)
stats[name] = np.histogram(finite_values, bins=bins_arg, weights=weights)
max_bins = config.plot.histogram.max_bins
if bins_arg == "auto" and len(stats[name][1]) > max_bins:
stats[name] = np.histogram(finite_values, bins=max_bins, weights=None)
return stats
def chi_square(
values: Optional[np.ndarray] = None, histogram: Optional[np.ndarray] = None
) -> dict:
if histogram is None:
histogram, _ = np.histogram(values, bins="auto")
return dict(chisquare(histogram)._asdict())
def word_summary(series: pd.Series) -> dict:
# TODO: preprocess (stopwords)
# TODO: configurable lowercase/punctuation etc.
word_lists = series.str.lower().str.split()
words = word_lists.explode()
words = words.str.strip(string.punctuation)
return {"word_counts": words.value_counts()}
|
[
"pandas_profiling.model.summary_helpers_image.open_image",
"numpy.mean",
"numpy.histogram",
"tangled_up_in_unicode.category_long",
"os.path.splitdrive",
"tangled_up_in_unicode.category",
"numpy.max",
"numpy.min",
"tangled_up_in_unicode.block",
"tangled_up_in_unicode.script",
"scipy.stats.stats.chisquare",
"pandas_profiling.model.summary_helpers_image.extract_exif",
"pandas_profiling.model.summary_helpers_image.hash_image",
"os.path.splitext",
"os.path.dirname",
"tangled_up_in_unicode.block_abbr",
"pandas.Series",
"numpy.median",
"datetime.datetime.fromtimestamp",
"collections.Counter",
"functools.partial",
"os.path.basename",
"pandas_profiling.model.summary_helpers_image.is_image_truncated",
"os.stat"
] |
[((5143, 5159), 'pandas_profiling.model.summary_helpers_image.open_image', 'open_image', (['path'], {}), '(path)\n', (5153, 5159), False, 'from pandas_profiling.model.summary_helpers_image import extract_exif, hash_image, is_image_truncated, open_image\n'), ((7511, 7541), 'pandas.Series', 'pd.Series', (['counts'], {'index': 'items'}), '(counts, index=items)\n', (7520, 7541), True, 'import pandas as pd\n'), ((8482, 8491), 'collections.Counter', 'Counter', ([], {}), '()\n', (8489, 8491), False, 'from collections import Counter\n'), ((9056, 9065), 'collections.Counter', 'Counter', ([], {}), '()\n', (9063, 9065), False, 'from collections import Counter\n'), ((9578, 9587), 'collections.Counter', 'Counter', ([], {}), '()\n', (9585, 9587), False, 'from collections import Counter\n'), ((10939, 10998), 'numpy.histogram', 'np.histogram', (['finite_values'], {'bins': 'bins_arg', 'weights': 'weights'}), '(finite_values, bins=bins_arg, weights=weights)\n', (10951, 10998), True, 'import numpy as np\n'), ((929, 943), 'numpy.max', 'np.max', (['series'], {}), '(series)\n', (935, 943), True, 'import numpy as np\n'), ((969, 984), 'numpy.mean', 'np.mean', (['series'], {}), '(series)\n', (976, 984), True, 'import numpy as np\n'), ((1012, 1029), 'numpy.median', 'np.median', (['series'], {}), '(series)\n', (1021, 1029), True, 'import numpy as np\n'), ((1054, 1068), 'numpy.min', 'np.min', (['series'], {}), '(series)\n', (1060, 1068), True, 'import numpy as np\n'), ((5270, 5295), 'pandas_profiling.model.summary_helpers_image.is_image_truncated', 'is_image_truncated', (['image'], {}), '(image)\n', (5288, 5295), False, 'from pandas_profiling.model.summary_helpers_image import extract_exif, hash_image, is_image_truncated, open_image\n'), ((5893, 5949), 'functools.partial', 'partial', (['extract_image_information'], {'exif': 'exif', 'hash': 'hash'}), '(extract_image_information, exif=exif, hash=hash)\n', (5900, 5949), False, 'from functools import partial\n'), ((6127, 6221), 'pandas.Series', 'pd.Series', (["[x['size'] for x in image_information if 'size' in x]"], {'name': '"""image_dimensions"""'}), "([x['size'] for x in image_information if 'size' in x], name=\n 'image_dimensions')\n", (6136, 6221), True, 'import pandas as pd\n'), ((7377, 7404), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': 'object'}), '([], dtype=object)\n', (7386, 7404), True, 'import pandas as pd\n'), ((7808, 7818), 'tangled_up_in_unicode.block', 'block', (['key'], {}), '(key)\n', (7813, 7818), False, 'from tangled_up_in_unicode import block, block_abbr, category, category_long, script\n'), ((7891, 7904), 'tangled_up_in_unicode.category', 'category', (['key'], {}), '(key)\n', (7899, 7904), False, 'from tangled_up_in_unicode import block, block_abbr, category, category_long, script\n'), ((7969, 7980), 'tangled_up_in_unicode.script', 'script', (['key'], {}), '(key)\n', (7975, 7980), False, 'from tangled_up_in_unicode import block, block_abbr, category, category_long, script\n'), ((8541, 8550), 'collections.Counter', 'Counter', ([], {}), '()\n', (8548, 8550), False, 'from collections import Counter\n'), ((9106, 9115), 'collections.Counter', 'Counter', ([], {}), '()\n', (9113, 9115), False, 'from collections import Counter\n'), ((9646, 9655), 'collections.Counter', 'Counter', ([], {}), '()\n', (9653, 9655), False, 'from collections import Counter\n'), ((11134, 11190), 'numpy.histogram', 'np.histogram', (['finite_values'], {'bins': 'max_bins', 'weights': 'None'}), '(finite_values, bins=max_bins, weights=None)\n', (11146, 11190), True, 'import numpy as np\n'), ((11376, 11409), 'numpy.histogram', 'np.histogram', (['values'], {'bins': '"""auto"""'}), "(values, bins='auto')\n", (11388, 11409), True, 'import numpy as np\n'), ((1569, 1579), 'os.stat', 'os.stat', (['x'], {}), '(x)\n', (1576, 1579), False, 'import os\n'), ((3853, 3918), 'pandas.Series', 'pd.Series', (["[x['hash'] for x in image_descriptions if 'hash' in x]"], {}), "([x['hash'] for x in image_descriptions if 'hash' in x])\n", (3862, 3918), True, 'import pandas as pd\n'), ((8199, 8219), 'tangled_up_in_unicode.category_long', 'category_long', (['value'], {}), '(value)\n', (8212, 8219), False, 'from tangled_up_in_unicode import block, block_abbr, category, category_long, script\n'), ((8332, 8349), 'tangled_up_in_unicode.block_abbr', 'block_abbr', (['value'], {}), '(value)\n', (8342, 8349), False, 'from tangled_up_in_unicode import block, block_abbr, category, category_long, script\n'), ((801, 815), 'numpy.median', 'np.median', (['arr'], {}), '(arr)\n', (810, 815), True, 'import numpy as np\n'), ((1643, 1668), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['x'], {}), '(x)\n', (1665, 1668), False, 'from datetime import datetime\n'), ((4663, 4675), 'pandas.Series', 'pd.Series', (['v'], {}), '(v)\n', (4672, 4675), True, 'import pandas as pd\n'), ((5445, 5464), 'pandas_profiling.model.summary_helpers_image.extract_exif', 'extract_exif', (['image'], {}), '(image)\n', (5457, 5464), False, 'from pandas_profiling.model.summary_helpers_image import extract_exif, hash_image, is_image_truncated, open_image\n'), ((5526, 5543), 'pandas_profiling.model.summary_helpers_image.hash_image', 'hash_image', (['image'], {}), '(image)\n', (5536, 5543), False, 'from pandas_profiling.model.summary_helpers_image import extract_exif, hash_image, is_image_truncated, open_image\n'), ((11427, 11447), 'scipy.stats.stats.chisquare', 'chisquare', (['histogram'], {}), '(histogram)\n', (11436, 11447), False, 'from scipy.stats.stats import chisquare\n'), ((2599, 2618), 'os.path.basename', 'os.path.basename', (['x'], {}), '(x)\n', (2615, 2618), False, 'import os\n'), ((2683, 2701), 'os.path.dirname', 'os.path.dirname', (['x'], {}), '(x)\n', (2698, 2701), False, 'import os\n'), ((4541, 4575), 'pandas.Series', 'pd.Series', (['exif_keys'], {'dtype': 'object'}), '(exif_keys, dtype=object)\n', (4550, 4575), True, 'import pandas as pd\n'), ((2427, 2446), 'os.path.splitext', 'os.path.splitext', (['x'], {}), '(x)\n', (2443, 2446), False, 'import os\n'), ((2514, 2533), 'os.path.splitext', 'os.path.splitext', (['x'], {}), '(x)\n', (2530, 2533), False, 'import os\n'), ((2766, 2787), 'os.path.splitdrive', 'os.path.splitdrive', (['x'], {}), '(x)\n', (2784, 2787), False, 'import os\n')]
|
import torch
import numpy as np
import pickle
torch.manual_seed(17)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(17)
import argparse
import torch.nn as nn
import torch.nn.functional as F
import matplotlib
import os
from rational.torch import Rational, RecurrentRational, RecurrentRationalModule
from torchvision import datasets, transforms
from torch.utils.tensorboard import SummaryWriter
from mnist import VGG, LeNet5, actfvs
from matplotlib import pyplot as plt
font = {'family': 'normal',
'weight': 'bold',
'size': 22}
matplotlib.rc('font', **font)
torch.set_anomaly_enabled(True)
def test(args, model, device, test_loader, epoch):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
acc = 100. * correct / len(test_loader.dataset)
print('\nTest set: Epoch: {}, Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(epoch, test_loss,
correct,
len(test_loader.dataset),
acc))
return acc
def main():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=17, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--dataset', type=str, default='mnist',
help='dataset to use')
parser.add_argument('--arch', type=str, required=True)
parser.add_argument('--init', type=str, default="", choices=["", "xavier", "he"])
args = parser.parse_args()
networks = dict({
"vgg": VGG,
"lenet": LeNet5,
})
network = networks[args.arch]
# activation_function_keys = [x for x in list(actfvs.keys()) if 'pau' in x]
# activation_function_keys = ['pau']
# activation_function_keys = ['recurrent_pau']
activation_function_keys = ['pau', 'recurrent_pau']
optimizer = 'sgd'
epochs = ['final']
for activation_function_key in activation_function_keys:
for epoch in epochs:
print("---" * 42)
print("Starting with dataset: {}, activation function: {}".format(args.dataset, activation_function_key))
print("---" * 42)
load_path = 'examples/runs/mnist/paper_{}_{}_{}{}_seed{}/'.format(args.dataset, args.arch, optimizer,
"_init_{}".format(args.init) if args.init != "" else "",
args.seed) + activation_function_key
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
if args.dataset == 'mnist':
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
lr_scheduler_milestones = [30, 60, 90] # Simple CNN with 3 Conv
# lr_scheduler_milestones = [40, 80] # VGG
elif args.dataset == 'fmnist':
test_loader = torch.utils.data.DataLoader(
datasets.FashionMNIST('../data', train=False, transform=transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
lr_scheduler_milestones = [40, 80]
else:
raise ValueError('dataset error')
model = network(activation_func=activation_function_key).to(device)
model.load_state_dict(torch.load(os.path.join(load_path, 'model_{}.pt'.format(epoch))))
paus = list()
for name, layer in model.named_modules():
if isinstance(layer, Rational):
layer.input_retrieve_mode(max_saves=10)
paus.append(('rational', name, layer))
if isinstance(layer, RecurrentRationalModule):
layer.input_retrieve_mode(max_saves=10)
paus.append(('recurrent_rational', name, layer))
if len(paus) > 0:
os.makedirs(os.path.join(load_path, 'plots'), exist_ok=True)
# dict(model.named_parameters())["features.3.0.bias"][0]
# dict(model.named_parameters())["features.4.2.numerator"][0]
print("Starting model eval")
acc = test(args, model, device, test_loader, epoch)
print("Finished model eval -> Plot")
# fig = plt.figure(1, figsize=(6*len(paus),6))
fig_dicts = []
for i, p in enumerate(paus):
fig = p[2].show(display=False)
print(fig)
fig_dicts.append(fig)
pickle.dump(fig_dicts, open(f'{args.dataset}_{args.arch}_{activation_function_key}_(acc{acc}%).fig.pkl', "wb"))
else:
print("No Rational Activations found. Exit without plotting")
if __name__ == '__main__':
main()
|
[
"torch.manual_seed",
"argparse.ArgumentParser",
"torch.nn.functional.nll_loss",
"os.path.join",
"torch.cuda.is_available",
"torch.set_anomaly_enabled",
"matplotlib.rc",
"numpy.random.seed",
"torchvision.transforms.Resize",
"torch.no_grad",
"torchvision.transforms.ToTensor",
"torchvision.transforms.Normalize",
"torch.device"
] |
[((47, 68), 'torch.manual_seed', 'torch.manual_seed', (['(17)'], {}), '(17)\n', (64, 68), False, 'import torch\n'), ((150, 168), 'numpy.random.seed', 'np.random.seed', (['(17)'], {}), '(17)\n', (164, 168), True, 'import numpy as np\n'), ((592, 621), 'matplotlib.rc', 'matplotlib.rc', (['"""font"""'], {}), "('font', **font)\n", (605, 621), False, 'import matplotlib\n'), ((623, 654), 'torch.set_anomaly_enabled', 'torch.set_anomaly_enabled', (['(True)'], {}), '(True)\n', (648, 654), False, 'import torch\n'), ((1771, 1831), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch MNIST Example"""'}), "(description='PyTorch MNIST Example')\n", (1794, 1831), False, 'import argparse\n'), ((768, 783), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (781, 783), False, 'import torch\n'), ((3773, 3801), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (3790, 3801), False, 'import torch\n'), ((3919, 3944), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (3933, 3944), True, 'import numpy as np\n'), ((3967, 4010), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (3979, 4010), False, 'import torch\n'), ((3735, 3760), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3758, 3760), False, 'import torch\n'), ((946, 989), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['output', 'target'], {'reduction': '"""sum"""'}), "(output, target, reduction='sum')\n", (956, 989), True, 'import torch.nn.functional as F\n'), ((5948, 5980), 'os.path.join', 'os.path.join', (['load_path', '"""plots"""'], {}), "(load_path, 'plots')\n", (5960, 5980), False, 'import os\n'), ((4305, 4332), 'torchvision.transforms.Resize', 'transforms.Resize', (['(32, 32)'], {}), '((32, 32))\n', (4322, 4332), False, 'from torchvision import datasets, transforms\n'), ((4358, 4379), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4377, 4379), False, 'from torchvision import datasets, transforms\n'), ((4405, 4447), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.1307,)', '(0.3081,)'], {}), '((0.1307,), (0.3081,))\n', (4425, 4447), False, 'from torchvision import datasets, transforms\n'), ((4909, 4936), 'torchvision.transforms.Resize', 'transforms.Resize', (['(32, 32)'], {}), '((32, 32))\n', (4926, 4936), False, 'from torchvision import datasets, transforms\n'), ((4962, 4983), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4981, 4983), False, 'from torchvision import datasets, transforms\n'), ((5009, 5051), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.1307,)', '(0.3081,)'], {}), '((0.1307,), (0.3081,))\n', (5029, 5051), False, 'from torchvision import datasets, transforms\n')]
|
from data.data_loader_dad import (
NASA_Anomaly,
WADI
)
from exp.exp_basic import Exp_Basic
from models.model import Informer
from utils.tools import EarlyStopping, adjust_learning_rate
from utils.metrics import metric
from sklearn.metrics import classification_report
import numpy as np
import torch
import torch.nn as nn
from torch import optim
from torch.utils.data import DataLoader
import os
import time
import warnings
warnings.filterwarnings('ignore')
class Exp_Informer_DAD(Exp_Basic):
def __init__(self, args):
super(Exp_Informer_DAD, self).__init__(args)
def _build_model(self):
model_dict = {
'informer':Informer,
}
if self.args.model=='informer':
model = model_dict[self.args.model](
self.args.enc_in,
self.args.dec_in,
self.args.c_out,
self.args.seq_len,
self.args.label_len,
self.args.pred_len,
self.args.factor,
self.args.d_model,
self.args.n_heads,
self.args.e_layers,
self.args.d_layers,
self.args.d_ff,
self.args.dropout,
self.args.attn,
self.args.embed,
self.args.data[:-1],
self.args.activation,
self.device
)
return model.double()
def _get_data(self, flag):
args = self.args
data_dict = {
'SMAP':NASA_Anomaly,
'MSL':NASA_Anomaly,
'WADI':WADI,
}
Data = data_dict[self.args.data]
if flag == 'test':
shuffle_flag = False; drop_last = True; batch_size = args.batch_size
else:
shuffle_flag = True; drop_last = True; batch_size = args.batch_size
data_set = Data(
root_path=args.root_path,
data_path=args.data_path,
flag=flag,
size=[args.seq_len, args.label_len, args.pred_len],
features=args.features,
target=args.target
)
print(flag, len(data_set))
data_loader = DataLoader(
data_set,
batch_size=batch_size,
shuffle=shuffle_flag,
num_workers=args.num_workers,
drop_last=drop_last)
return data_set, data_loader
def _select_optimizer(self):
model_optim = optim.Adam(self.model.parameters(), lr=self.args.learning_rate)
return model_optim
def _select_criterion(self):
criterion = nn.MSELoss()
return criterion
def vali(self, vali_data, vali_loader, criterion):
self.model.eval()
total_loss = []
for i, (batch_x,batch_y,batch_x_mark,batch_y_mark,batch_label) in enumerate(vali_loader):
batch_x = batch_x.double().to(self.device)
batch_y = batch_y.double()
batch_x_mark = batch_x_mark.double().to(self.device)
batch_y_mark = batch_y_mark.double().to(self.device)
# decoder input
dec_inp = torch.zeros_like(batch_y[:,-self.args.pred_len:,:]).double()
dec_inp = torch.cat([batch_y[:,:self.args.label_len,:], dec_inp], dim=1).double().to(self.device)
# encoder - decoder
outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)
batch_y = batch_y[:,-self.args.pred_len:,:].to(self.device)
pred = outputs.detach().cpu()
true = batch_y.detach().cpu()
loss = criterion(pred, true)
total_loss.append(loss)
total_loss = np.average(total_loss)
self.model.train()
return total_loss
def train(self, setting):
train_data, train_loader = self._get_data(flag = 'train')
vali_data, vali_loader = self._get_data(flag = 'val')
test_data, test_loader = self._get_data(flag = 'test')
path = './checkpoints/'+setting
if not os.path.exists(path):
os.makedirs(path)
time_now = time.time()
train_steps = len(train_loader)
early_stopping = EarlyStopping(patience=self.args.patience, verbose=True)
model_optim = self._select_optimizer()
criterion = self._select_criterion()
for epoch in range(self.args.train_epochs):
iter_count = 0
train_loss = []
self.model.train()
for i, (batch_x,batch_y,batch_x_mark,batch_y_mark) in enumerate(train_loader):
iter_count += 1
model_optim.zero_grad()
batch_x = batch_x.double().to(self.device)
batch_y = batch_y.double()
batch_x_mark = batch_x_mark.double().to(self.device)
batch_y_mark = batch_y_mark.double().to(self.device)
# decoder input
dec_inp = torch.zeros_like(batch_y[:,-self.args.pred_len:,:]).double()
dec_inp = torch.cat([batch_y[:,:self.args.label_len,:], dec_inp], dim=1).double().to(self.device)
# encoder - decoder
outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)
batch_y = batch_y[:,-self.args.pred_len:,:].to(self.device)
loss = criterion(outputs, batch_y)
train_loss.append(loss.item())
if (i+1) % 100==0:
print("\titers: {0}, epoch: {1} | loss: {2:.7f}".format(i + 1, epoch + 1, loss.item()))
speed = (time.time()-time_now)/iter_count
left_time = speed*((self.args.train_epochs - epoch)*train_steps - i)
print('\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(speed, left_time))
iter_count = 0
time_now = time.time()
loss.backward()
model_optim.step()
train_loss = np.average(train_loss)
vali_loss = self.vali(vali_data, vali_loader, criterion)
test_loss = self.vali(test_data, test_loader, criterion)
print("Epoch: {0}, Steps: {1} | Train Loss: {2:.7f} Vali Loss: {3:.7f} Test Loss: {4:.7f}".format(
epoch + 1, train_steps, train_loss, vali_loss, test_loss))
early_stopping(vali_loss, self.model, path)
if early_stopping.early_stop:
print("Early stopping")
break
adjust_learning_rate(model_optim, epoch+1, self.args)
best_model_path = path+'/'+'checkpoint.pth'
self.model.load_state_dict(torch.load(best_model_path))
return self.model
def test(self, setting):
test_data, test_loader = self._get_data(flag='test')
self.model.eval()
preds = []
trues = []
labels = []
with torch.no_grad():
for i, (batch_x,batch_y,batch_x_mark,batch_y_mark,batch_label) in enumerate(test_loader):
batch_x = batch_x.double().to(self.device)
batch_y = batch_y.double()
batch_x_mark = batch_x_mark.double().to(self.device)
batch_y_mark = batch_y_mark.double().to(self.device)
# decoder input
dec_inp = torch.zeros_like(batch_y[:,-self.args.pred_len:,:]).double()
dec_inp = torch.cat([batch_y[:,:self.args.label_len,:], dec_inp], dim=1).double().to(self.device)
# encoder - decoder
outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)
batch_y = batch_y[:,-self.args.pred_len:,:].to(self.device)
pred = outputs.detach().cpu().numpy()#.squeeze()
true = batch_y.detach().cpu().numpy()#.squeeze()
batch_label = batch_label.long().detach().numpy()
preds.append(pred)
trues.append(true)
labels.append(batch_label)
preds = np.array(preds)
trues = np.array(trues)
labels = np.array(labels)
print('test shape:', preds.shape, trues.shape)
preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1])
trues = trues.reshape(-1, trues.shape[-2], trues.shape[-1])
labels = labels.reshape(-1, labels.shape[-1])
print('test shape:', preds.shape, trues.shape)
# result save
folder_path = './results/' + setting +'/'
if not os.path.exists(folder_path):
os.makedirs(folder_path)
mae, mse, rmse, mape, mspe = metric(preds, trues)
print('mse:{}, mae:{}'.format(mse, mae))
np.save(folder_path+'metrics.npy', np.array([mae, mse, rmse, mape, mspe]))
np.save(folder_path+'pred.npy', preds)
np.save(folder_path+'true.npy', trues)
np.save(folder_path+'label.npy', labels)
return
|
[
"os.path.exists",
"os.makedirs",
"numpy.average",
"torch.load",
"torch.nn.MSELoss",
"utils.tools.EarlyStopping",
"numpy.array",
"utils.tools.adjust_learning_rate",
"torch.cat",
"torch.utils.data.DataLoader",
"utils.metrics.metric",
"torch.no_grad",
"torch.zeros_like",
"time.time",
"warnings.filterwarnings",
"numpy.save"
] |
[((438, 471), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (461, 471), False, 'import warnings\n'), ((2215, 2335), 'torch.utils.data.DataLoader', 'DataLoader', (['data_set'], {'batch_size': 'batch_size', 'shuffle': 'shuffle_flag', 'num_workers': 'args.num_workers', 'drop_last': 'drop_last'}), '(data_set, batch_size=batch_size, shuffle=shuffle_flag,\n num_workers=args.num_workers, drop_last=drop_last)\n', (2225, 2335), False, 'from torch.utils.data import DataLoader\n'), ((2637, 2649), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (2647, 2649), True, 'import torch.nn as nn\n'), ((3705, 3727), 'numpy.average', 'np.average', (['total_loss'], {}), '(total_loss)\n', (3715, 3727), True, 'import numpy as np\n'), ((4139, 4150), 'time.time', 'time.time', ([], {}), '()\n', (4148, 4150), False, 'import time\n'), ((4225, 4281), 'utils.tools.EarlyStopping', 'EarlyStopping', ([], {'patience': 'self.args.patience', 'verbose': '(True)'}), '(patience=self.args.patience, verbose=True)\n', (4238, 4281), False, 'from utils.tools import EarlyStopping, adjust_learning_rate\n'), ((8199, 8214), 'numpy.array', 'np.array', (['preds'], {}), '(preds)\n', (8207, 8214), True, 'import numpy as np\n'), ((8231, 8246), 'numpy.array', 'np.array', (['trues'], {}), '(trues)\n', (8239, 8246), True, 'import numpy as np\n'), ((8264, 8280), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (8272, 8280), True, 'import numpy as np\n'), ((8773, 8793), 'utils.metrics.metric', 'metric', (['preds', 'trues'], {}), '(preds, trues)\n', (8779, 8793), False, 'from utils.metrics import metric\n'), ((8935, 8975), 'numpy.save', 'np.save', (["(folder_path + 'pred.npy')", 'preds'], {}), "(folder_path + 'pred.npy', preds)\n", (8942, 8975), True, 'import numpy as np\n'), ((8982, 9022), 'numpy.save', 'np.save', (["(folder_path + 'true.npy')", 'trues'], {}), "(folder_path + 'true.npy', trues)\n", (8989, 9022), True, 'import numpy as np\n'), ((9029, 9071), 'numpy.save', 'np.save', (["(folder_path + 'label.npy')", 'labels'], {}), "(folder_path + 'label.npy', labels)\n", (9036, 9071), True, 'import numpy as np\n'), ((4067, 4087), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (4081, 4087), False, 'import os\n'), ((4101, 4118), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (4112, 4118), False, 'import os\n'), ((6113, 6135), 'numpy.average', 'np.average', (['train_loss'], {}), '(train_loss)\n', (6123, 6135), True, 'import numpy as np\n'), ((6634, 6689), 'utils.tools.adjust_learning_rate', 'adjust_learning_rate', (['model_optim', '(epoch + 1)', 'self.args'], {}), '(model_optim, epoch + 1, self.args)\n', (6654, 6689), False, 'from utils.tools import EarlyStopping, adjust_learning_rate\n'), ((6788, 6815), 'torch.load', 'torch.load', (['best_model_path'], {}), '(best_model_path)\n', (6798, 6815), False, 'import torch\n'), ((7067, 7082), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7080, 7082), False, 'import torch\n'), ((8669, 8696), 'os.path.exists', 'os.path.exists', (['folder_path'], {}), '(folder_path)\n', (8683, 8696), False, 'import os\n'), ((8710, 8734), 'os.makedirs', 'os.makedirs', (['folder_path'], {}), '(folder_path)\n', (8721, 8734), False, 'import os\n'), ((8887, 8925), 'numpy.array', 'np.array', (['[mae, mse, rmse, mape, mspe]'], {}), '([mae, mse, rmse, mape, mspe])\n', (8895, 8925), True, 'import numpy as np\n'), ((3156, 3209), 'torch.zeros_like', 'torch.zeros_like', (['batch_y[:, -self.args.pred_len:, :]'], {}), '(batch_y[:, -self.args.pred_len:, :])\n', (3172, 3209), False, 'import torch\n'), ((5991, 6002), 'time.time', 'time.time', ([], {}), '()\n', (6000, 6002), False, 'import time\n'), ((5049, 5102), 'torch.zeros_like', 'torch.zeros_like', (['batch_y[:, -self.args.pred_len:, :]'], {}), '(batch_y[:, -self.args.pred_len:, :])\n', (5065, 5102), False, 'import torch\n'), ((7485, 7538), 'torch.zeros_like', 'torch.zeros_like', (['batch_y[:, -self.args.pred_len:, :]'], {}), '(batch_y[:, -self.args.pred_len:, :])\n', (7501, 7538), False, 'import torch\n'), ((3239, 3303), 'torch.cat', 'torch.cat', (['[batch_y[:, :self.args.label_len, :], dec_inp]'], {'dim': '(1)'}), '([batch_y[:, :self.args.label_len, :], dec_inp], dim=1)\n', (3248, 3303), False, 'import torch\n'), ((5707, 5718), 'time.time', 'time.time', ([], {}), '()\n', (5716, 5718), False, 'import time\n'), ((5136, 5200), 'torch.cat', 'torch.cat', (['[batch_y[:, :self.args.label_len, :], dec_inp]'], {'dim': '(1)'}), '([batch_y[:, :self.args.label_len, :], dec_inp], dim=1)\n', (5145, 5200), False, 'import torch\n'), ((7572, 7636), 'torch.cat', 'torch.cat', (['[batch_y[:, :self.args.label_len, :], dec_inp]'], {'dim': '(1)'}), '([batch_y[:, :self.args.label_len, :], dec_inp], dim=1)\n', (7581, 7636), False, 'import torch\n')]
|
import os
import numpy as np
import pandas as pd
from keras.utils import to_categorical
from sklearn.model_selection import KFold, train_test_split
def load_data(path):
train = pd.read_json(os.path.join(path, "./train.json"))
test = pd.read_json(os.path.join(path, "./test.json"))
return (train, test)
def preprocess(df,
means=(-22.159262, -24.953745, 40.021883465782651),
stds=(5.33146, 4.5463958, 4.0815391476694414)):
X_band_1 = np.array([np.array(band).astype(np.float32).reshape(75, 75)
for band in df["band_1"]])
X_band_2 = np.array([np.array(band).astype(np.float32).reshape(75, 75)
for band in df["band_2"]])
angl = df['inc_angle'].map(lambda x: np.cos(x * np.pi / 180) if x != 'na' else means[3])
angl = np.array([np.full(shape=(75, 75), fill_value=angel).astype(np.float32)
for angel in angl])
X_band_1 = (X_band_1 - means[0]) / stds[0]
X_band_2 = (X_band_2 - means[1]) / stds[1]
angl = (angl - means[2]) / stds[2]
images = np.concatenate([X_band_1[:, :, :, np.newaxis],
X_band_2[:, :, :, np.newaxis],
angl[:, :, :, np.newaxis]],
axis=-1)
return images
def prepare_data_cv(path):
train, test = load_data(path)
X_train, y_train = (preprocess(train),
to_categorical(train['is_iceberg'].as_matrix().reshape(-1, 1)))
kfold_data = []
kf = KFold(n_splits=5, shuffle=True, random_state=0xCAFFE)
for train_indices, val_indices in kf.split(y_train):
X_train_cv = X_train[train_indices]
y_train_cv = y_train[train_indices]
X_val = X_train[val_indices]
y_val = y_train[val_indices]
kfold_data.append((X_train_cv, y_train_cv, X_val, y_val))
X_test = preprocess(test)
return (kfold_data, X_test)
def prepare_data(path):
train, test = load_data(path)
X_train, y_train = (preprocess(train),
to_categorical(train['is_iceberg'].as_matrix().reshape(-1, 1)))
X_train_cv, X_valid, y_train_cv, y_valid = train_test_split(X_train,
y_train,
random_state=0xCAFFE,
train_size=0.8)
X_test = preprocess(test)
return ([(X_train_cv, y_train_cv, X_valid, y_valid)], X_test)
|
[
"sklearn.model_selection.train_test_split",
"os.path.join",
"numpy.array",
"numpy.cos",
"numpy.concatenate",
"numpy.full",
"sklearn.model_selection.KFold"
] |
[((1090, 1209), 'numpy.concatenate', 'np.concatenate', (['[X_band_1[:, :, :, np.newaxis], X_band_2[:, :, :, np.newaxis], angl[:, :, :,\n np.newaxis]]'], {'axis': '(-1)'}), '([X_band_1[:, :, :, np.newaxis], X_band_2[:, :, :, np.newaxis\n ], angl[:, :, :, np.newaxis]], axis=-1)\n', (1104, 1209), True, 'import numpy as np\n'), ((1533, 1585), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(5)', 'shuffle': '(True)', 'random_state': '(831486)'}), '(n_splits=5, shuffle=True, random_state=831486)\n', (1538, 1585), False, 'from sklearn.model_selection import KFold, train_test_split\n'), ((2178, 2249), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_train', 'y_train'], {'random_state': '(831486)', 'train_size': '(0.8)'}), '(X_train, y_train, random_state=831486, train_size=0.8)\n', (2194, 2249), False, 'from sklearn.model_selection import KFold, train_test_split\n'), ((198, 232), 'os.path.join', 'os.path.join', (['path', '"""./train.json"""'], {}), "(path, './train.json')\n", (210, 232), False, 'import os\n'), ((258, 291), 'os.path.join', 'os.path.join', (['path', '"""./test.json"""'], {}), "(path, './test.json')\n", (270, 291), False, 'import os\n'), ((767, 790), 'numpy.cos', 'np.cos', (['(x * np.pi / 180)'], {}), '(x * np.pi / 180)\n', (773, 790), True, 'import numpy as np\n'), ((840, 881), 'numpy.full', 'np.full', ([], {'shape': '(75, 75)', 'fill_value': 'angel'}), '(shape=(75, 75), fill_value=angel)\n', (847, 881), True, 'import numpy as np\n'), ((495, 509), 'numpy.array', 'np.array', (['band'], {}), '(band)\n', (503, 509), True, 'import numpy as np\n'), ((623, 637), 'numpy.array', 'np.array', (['band'], {}), '(band)\n', (631, 637), True, 'import numpy as np\n')]
|
import numpy as np
import tensorflow as tf
H = 2
N = 2
M = 3
BS = 10
def my_softmax(arr):
max_elements = np.reshape(np.max(arr, axis = 2), (BS, N, 1))
arr = arr - max_elements
exp_array = np.exp(arr)
print (exp_array)
sum_array = np.reshape(np.sum(exp_array, axis=2), (BS, N, 1))
return exp_array /sum_array
def masked_softmax(logits, mask, dim):
"""
Takes masked softmax over given dimension of logits.
Inputs:
logits: Numpy array. We want to take softmax over dimension dim.
mask: Numpy array of same shape as logits.
Has 1s where there's real data in logits, 0 where there's padding
dim: int. dimension over which to take softmax
Returns:
masked_logits: Numpy array same shape as logits.
This is the same as logits, but with 1e30 subtracted
(i.e. very large negative number) in the padding locations.
prob_dist: Numpy array same shape as logits.
The result of taking softmax over masked_logits in given dimension.
Should be 0 in padding locations.
Should sum to 1 over given dimension.
"""
exp_mask = (1 - tf.cast(mask, 'float64')) * (-1e30) # -large where there's padding, 0 elsewhere
print (exp_mask)
masked_logits = tf.add(logits, exp_mask) # where there's padding, set logits to -large
prob_dist = tf.nn.softmax(masked_logits, dim)
return masked_logits, prob_dist
def test_build_similarity(contexts, questions):
w_sim_1 = tf.get_variable('w_sim_1',
initializer=w_1) # 2 * H
w_sim_2 = tf.get_variable('w_sim_2',
initializer=w_2) # 2 * self.hidden_size
w_sim_3 = tf.get_variable('w_sim_3',
initializer=w_3) # 2 * self.hidden_size
q_tile = tf.tile(tf.expand_dims(questions, 0), [N, 1, 1, 1]) # N x BS x M x 2H
q_tile = tf.transpose(q_tile, (1, 0, 3, 2)) # BS x N x 2H x M
contexts = tf.expand_dims(contexts, -1) # BS x N x 2H x 1
result = (contexts * q_tile) # BS x N x 2H x M
tf.assert_equal(tf.shape(result), [BS, N, 2 * H, M])
result = tf.transpose(result, (0, 1, 3, 2)) # BS x N x M x 2H
result = tf.reshape(result, (-1, N * M, 2 * H)) # BS x (NxM) x 2H
tf.assert_equal(tf.shape(result), [BS, N*M, 2*H])
# w_sim_1 = tf.tile(tf.expand_dims(w_sim_1, 0), [BS, 1])
# w_sim_2 = tf.tile(tf.expand_dims(w_sim_2, 0), [BS, 1])
# w_sim_3 = tf.tile(tf.expand_dims(w_sim_3, 0), [BS, 1])
term1 = tf.matmul(tf.reshape(contexts, (BS * N, 2*H)), tf.expand_dims(w_sim_1, -1)) # BS x N
term1 = tf.reshape(term1, (-1, N))
term2 = tf.matmul(tf.reshape(questions, (BS * M, 2*H)), tf.expand_dims(w_sim_2, -1)) # BS x M
term2 = tf.reshape(term2, (-1, M))
term3 = tf.matmul(tf.reshape(result, (BS * N * M, 2* H)), tf.expand_dims(w_sim_3, -1))
term3 = tf.reshape(term3, (-1, N, M)) # BS x N x M
S = tf.reshape(term1,(-1, N, 1)) + term3 + tf.reshape(term2, (-1, 1, M))
return S
def test_build_sim_mask():
context_mask = np.array([True, True]) # BS x N
question_mask = np.array([True, True, False]) # BS x M
context_mask = np.tile(context_mask, [BS, 1])
question_mask = np.tile(question_mask, [BS, 1])
context_mask = tf.get_variable('context_mask', initializer=context_mask)
question_mask = tf.get_variable('question_mask', initializer=question_mask)
context_mask = tf.expand_dims(context_mask, -1) # BS x N x 1
question_mask = tf.expand_dims(question_mask, -1) # BS x M x 1
question_mask = tf.transpose(question_mask, (0, 2, 1)) # BS x 1 x M
sim_mask = tf.matmul(tf.cast(context_mask, dtype=tf.int32),
tf.cast(question_mask, dtype=tf.int32)) # BS x N x M
return sim_mask
def test_build_c2q(S, S_mask, questions):
_, alpha = masked_softmax(S, mask, 2) # BS x N x M
return tf.matmul(alpha, questions)
def test_build_q2c(S, S_mask, contexts):
# S = BS x N x M
# contexts = BS x N x 2H
m = tf.reduce_max(S * tf.cast(S_mask, dtype=tf.float64), axis=2) # BS x N
beta = tf.expand_dims(tf.nn.softmax(m), -1) # BS x N x 1
beta = tf.transpose(beta, (0, 2, 1))
q2c = tf.matmul(beta, contexts)
return m, beta, q2c
def test_concatenation(c2q, q2c):
q2c = tf.tile(q2c, (1, N, 1))
output = tf.concat([c2q, q2c], axis=2)
tf.assert_equal(tf.shape(output), [BS, N, 4*H])
return output
if __name__== "__main__":
w_1 = np.array([1., 2., 3., 4.])
w_2 = np.array([5., 6., 7., 8.])
w_3 = np.array([13., 12., 11., 10.])
c = np.array([[[1., 2., 3., 4.], [5., 6., 7., 8.]]]) # BS x N x 2H
q = np.array([[[1., 2., 3., 0.], [5., 6., 7., 4.], [8., 9. , 10., 11.]]]) # BS x M x 2H
c = np.tile(c, [BS, 1, 1])
q = np.tile(q, [BS, 1, 1])
questions = tf.get_variable('questions', initializer=q)
contexts = tf.get_variable('contexts', initializer=c)
S = test_build_similarity(contexts, questions)
mask = test_build_sim_mask()
c2q = test_build_c2q(S, mask, questions)
m, beta, q2c = test_build_q2c(S, mask, contexts)
output = test_concatenation(c2q, q2c)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
S_result, mask_result, c2q_r = sess.run([S, mask, c2q])
actual_result = np.tile(np.array([[228, 772, 1372], [548, 1828, 3140]]), [BS, 1, 1])
assert np.array_equal(actual_result, S_result), 'Arrays are not equal'
print ("Building similarity matrix is successful!")
print ("Context 2 Question attention")
m_r, beta_r, q2c_r = sess.run([m, beta, q2c])
output_r = sess.run(output)
|
[
"tensorflow.tile",
"tensorflow.shape",
"tensorflow.get_variable",
"tensorflow.transpose",
"numpy.array",
"tensorflow.nn.softmax",
"tensorflow.cast",
"tensorflow.Session",
"numpy.max",
"numpy.exp",
"tensorflow.concat",
"tensorflow.matmul",
"numpy.tile",
"tensorflow.add",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.global_variables_initializer",
"numpy.sum",
"numpy.array_equal"
] |
[((201, 212), 'numpy.exp', 'np.exp', (['arr'], {}), '(arr)\n', (207, 212), True, 'import numpy as np\n'), ((1260, 1284), 'tensorflow.add', 'tf.add', (['logits', 'exp_mask'], {}), '(logits, exp_mask)\n', (1266, 1284), True, 'import tensorflow as tf\n'), ((1347, 1380), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['masked_logits', 'dim'], {}), '(masked_logits, dim)\n', (1360, 1380), True, 'import tensorflow as tf\n'), ((1481, 1524), 'tensorflow.get_variable', 'tf.get_variable', (['"""w_sim_1"""'], {'initializer': 'w_1'}), "('w_sim_1', initializer=w_1)\n", (1496, 1524), True, 'import tensorflow as tf\n'), ((1555, 1598), 'tensorflow.get_variable', 'tf.get_variable', (['"""w_sim_2"""'], {'initializer': 'w_2'}), "('w_sim_2', initializer=w_2)\n", (1570, 1598), True, 'import tensorflow as tf\n'), ((1644, 1687), 'tensorflow.get_variable', 'tf.get_variable', (['"""w_sim_3"""'], {'initializer': 'w_3'}), "('w_sim_3', initializer=w_3)\n", (1659, 1687), True, 'import tensorflow as tf\n'), ((1816, 1850), 'tensorflow.transpose', 'tf.transpose', (['q_tile', '(1, 0, 3, 2)'], {}), '(q_tile, (1, 0, 3, 2))\n', (1828, 1850), True, 'import tensorflow as tf\n'), ((1884, 1912), 'tensorflow.expand_dims', 'tf.expand_dims', (['contexts', '(-1)'], {}), '(contexts, -1)\n', (1898, 1912), True, 'import tensorflow as tf\n'), ((2052, 2086), 'tensorflow.transpose', 'tf.transpose', (['result', '(0, 1, 3, 2)'], {}), '(result, (0, 1, 3, 2))\n', (2064, 2086), True, 'import tensorflow as tf\n'), ((2118, 2156), 'tensorflow.reshape', 'tf.reshape', (['result', '(-1, N * M, 2 * H)'], {}), '(result, (-1, N * M, 2 * H))\n', (2128, 2156), True, 'import tensorflow as tf\n'), ((2522, 2548), 'tensorflow.reshape', 'tf.reshape', (['term1', '(-1, N)'], {}), '(term1, (-1, N))\n', (2532, 2548), True, 'import tensorflow as tf\n'), ((2659, 2685), 'tensorflow.reshape', 'tf.reshape', (['term2', '(-1, M)'], {}), '(term2, (-1, M))\n', (2669, 2685), True, 'import tensorflow as tf\n'), ((2789, 2818), 'tensorflow.reshape', 'tf.reshape', (['term3', '(-1, N, M)'], {}), '(term3, (-1, N, M))\n', (2799, 2818), True, 'import tensorflow as tf\n'), ((2969, 2991), 'numpy.array', 'np.array', (['[True, True]'], {}), '([True, True])\n', (2977, 2991), True, 'import numpy as np\n'), ((3021, 3050), 'numpy.array', 'np.array', (['[True, True, False]'], {}), '([True, True, False])\n', (3029, 3050), True, 'import numpy as np\n'), ((3079, 3109), 'numpy.tile', 'np.tile', (['context_mask', '[BS, 1]'], {}), '(context_mask, [BS, 1])\n', (3086, 3109), True, 'import numpy as np\n'), ((3130, 3161), 'numpy.tile', 'np.tile', (['question_mask', '[BS, 1]'], {}), '(question_mask, [BS, 1])\n', (3137, 3161), True, 'import numpy as np\n'), ((3181, 3238), 'tensorflow.get_variable', 'tf.get_variable', (['"""context_mask"""'], {'initializer': 'context_mask'}), "('context_mask', initializer=context_mask)\n", (3196, 3238), True, 'import tensorflow as tf\n'), ((3259, 3318), 'tensorflow.get_variable', 'tf.get_variable', (['"""question_mask"""'], {'initializer': 'question_mask'}), "('question_mask', initializer=question_mask)\n", (3274, 3318), True, 'import tensorflow as tf\n'), ((3338, 3370), 'tensorflow.expand_dims', 'tf.expand_dims', (['context_mask', '(-1)'], {}), '(context_mask, -1)\n', (3352, 3370), True, 'import tensorflow as tf\n'), ((3404, 3437), 'tensorflow.expand_dims', 'tf.expand_dims', (['question_mask', '(-1)'], {}), '(question_mask, -1)\n', (3418, 3437), True, 'import tensorflow as tf\n'), ((3471, 3509), 'tensorflow.transpose', 'tf.transpose', (['question_mask', '(0, 2, 1)'], {}), '(question_mask, (0, 2, 1))\n', (3483, 3509), True, 'import tensorflow as tf\n'), ((3781, 3808), 'tensorflow.matmul', 'tf.matmul', (['alpha', 'questions'], {}), '(alpha, questions)\n', (3790, 3808), True, 'import tensorflow as tf\n'), ((4051, 4080), 'tensorflow.transpose', 'tf.transpose', (['beta', '(0, 2, 1)'], {}), '(beta, (0, 2, 1))\n', (4063, 4080), True, 'import tensorflow as tf\n'), ((4091, 4116), 'tensorflow.matmul', 'tf.matmul', (['beta', 'contexts'], {}), '(beta, contexts)\n', (4100, 4116), True, 'import tensorflow as tf\n'), ((4186, 4209), 'tensorflow.tile', 'tf.tile', (['q2c', '(1, N, 1)'], {}), '(q2c, (1, N, 1))\n', (4193, 4209), True, 'import tensorflow as tf\n'), ((4223, 4252), 'tensorflow.concat', 'tf.concat', (['[c2q, q2c]'], {'axis': '(2)'}), '([c2q, q2c], axis=2)\n', (4232, 4252), True, 'import tensorflow as tf\n'), ((4360, 4390), 'numpy.array', 'np.array', (['[1.0, 2.0, 3.0, 4.0]'], {}), '([1.0, 2.0, 3.0, 4.0])\n', (4368, 4390), True, 'import numpy as np\n'), ((4397, 4427), 'numpy.array', 'np.array', (['[5.0, 6.0, 7.0, 8.0]'], {}), '([5.0, 6.0, 7.0, 8.0])\n', (4405, 4427), True, 'import numpy as np\n'), ((4434, 4468), 'numpy.array', 'np.array', (['[13.0, 12.0, 11.0, 10.0]'], {}), '([13.0, 12.0, 11.0, 10.0])\n', (4442, 4468), True, 'import numpy as np\n'), ((4474, 4530), 'numpy.array', 'np.array', (['[[[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0]]]'], {}), '([[[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0]]])\n', (4482, 4530), True, 'import numpy as np\n'), ((4545, 4630), 'numpy.array', 'np.array', (['[[[1.0, 2.0, 3.0, 0.0], [5.0, 6.0, 7.0, 4.0], [8.0, 9.0, 10.0, 11.0]]]'], {}), '([[[1.0, 2.0, 3.0, 0.0], [5.0, 6.0, 7.0, 4.0], [8.0, 9.0, 10.0, 11.0]]]\n )\n', (4553, 4630), True, 'import numpy as np\n'), ((4637, 4659), 'numpy.tile', 'np.tile', (['c', '[BS, 1, 1]'], {}), '(c, [BS, 1, 1])\n', (4644, 4659), True, 'import numpy as np\n'), ((4668, 4690), 'numpy.tile', 'np.tile', (['q', '[BS, 1, 1]'], {}), '(q, [BS, 1, 1])\n', (4675, 4690), True, 'import numpy as np\n'), ((4709, 4752), 'tensorflow.get_variable', 'tf.get_variable', (['"""questions"""'], {'initializer': 'q'}), "('questions', initializer=q)\n", (4724, 4752), True, 'import tensorflow as tf\n'), ((4768, 4810), 'tensorflow.get_variable', 'tf.get_variable', (['"""contexts"""'], {'initializer': 'c'}), "('contexts', initializer=c)\n", (4783, 4810), True, 'import tensorflow as tf\n'), ((5048, 5081), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (5079, 5081), True, 'import tensorflow as tf\n'), ((121, 140), 'numpy.max', 'np.max', (['arr'], {'axis': '(2)'}), '(arr, axis=2)\n', (127, 140), True, 'import numpy as np\n'), ((262, 287), 'numpy.sum', 'np.sum', (['exp_array'], {'axis': '(2)'}), '(exp_array, axis=2)\n', (268, 287), True, 'import numpy as np\n'), ((1740, 1768), 'tensorflow.expand_dims', 'tf.expand_dims', (['questions', '(0)'], {}), '(questions, 0)\n', (1754, 1768), True, 'import tensorflow as tf\n'), ((2002, 2018), 'tensorflow.shape', 'tf.shape', (['result'], {}), '(result)\n', (2010, 2018), True, 'import tensorflow as tf\n'), ((2195, 2211), 'tensorflow.shape', 'tf.shape', (['result'], {}), '(result)\n', (2203, 2211), True, 'import tensorflow as tf\n'), ((2435, 2472), 'tensorflow.reshape', 'tf.reshape', (['contexts', '(BS * N, 2 * H)'], {}), '(contexts, (BS * N, 2 * H))\n', (2445, 2472), True, 'import tensorflow as tf\n'), ((2472, 2499), 'tensorflow.expand_dims', 'tf.expand_dims', (['w_sim_1', '(-1)'], {}), '(w_sim_1, -1)\n', (2486, 2499), True, 'import tensorflow as tf\n'), ((2571, 2609), 'tensorflow.reshape', 'tf.reshape', (['questions', '(BS * M, 2 * H)'], {}), '(questions, (BS * M, 2 * H))\n', (2581, 2609), True, 'import tensorflow as tf\n'), ((2609, 2636), 'tensorflow.expand_dims', 'tf.expand_dims', (['w_sim_2', '(-1)'], {}), '(w_sim_2, -1)\n', (2623, 2636), True, 'import tensorflow as tf\n'), ((2708, 2747), 'tensorflow.reshape', 'tf.reshape', (['result', '(BS * N * M, 2 * H)'], {}), '(result, (BS * N * M, 2 * H))\n', (2718, 2747), True, 'import tensorflow as tf\n'), ((2748, 2775), 'tensorflow.expand_dims', 'tf.expand_dims', (['w_sim_3', '(-1)'], {}), '(w_sim_3, -1)\n', (2762, 2775), True, 'import tensorflow as tf\n'), ((2879, 2908), 'tensorflow.reshape', 'tf.reshape', (['term2', '(-1, 1, M)'], {}), '(term2, (-1, 1, M))\n', (2889, 2908), True, 'import tensorflow as tf\n'), ((3548, 3585), 'tensorflow.cast', 'tf.cast', (['context_mask'], {'dtype': 'tf.int32'}), '(context_mask, dtype=tf.int32)\n', (3555, 3585), True, 'import tensorflow as tf\n'), ((3599, 3637), 'tensorflow.cast', 'tf.cast', (['question_mask'], {'dtype': 'tf.int32'}), '(question_mask, dtype=tf.int32)\n', (3606, 3637), True, 'import tensorflow as tf\n'), ((4005, 4021), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['m'], {}), '(m)\n', (4018, 4021), True, 'import tensorflow as tf\n'), ((4273, 4289), 'tensorflow.shape', 'tf.shape', (['output'], {}), '(output)\n', (4281, 4289), True, 'import tensorflow as tf\n'), ((5091, 5103), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (5101, 5103), True, 'import tensorflow as tf\n'), ((5308, 5347), 'numpy.array_equal', 'np.array_equal', (['actual_result', 'S_result'], {}), '(actual_result, S_result)\n', (5322, 5347), True, 'import numpy as np\n'), ((1139, 1163), 'tensorflow.cast', 'tf.cast', (['mask', '"""float64"""'], {}), "(mask, 'float64')\n", (1146, 1163), True, 'import tensorflow as tf\n'), ((2840, 2869), 'tensorflow.reshape', 'tf.reshape', (['term1', '(-1, N, 1)'], {}), '(term1, (-1, N, 1))\n', (2850, 2869), True, 'import tensorflow as tf\n'), ((3927, 3960), 'tensorflow.cast', 'tf.cast', (['S_mask'], {'dtype': 'tf.float64'}), '(S_mask, dtype=tf.float64)\n', (3934, 3960), True, 'import tensorflow as tf\n'), ((5232, 5279), 'numpy.array', 'np.array', (['[[228, 772, 1372], [548, 1828, 3140]]'], {}), '([[228, 772, 1372], [548, 1828, 3140]])\n', (5240, 5279), True, 'import numpy as np\n')]
|
import numpy as np
import pytest
from astropy import convolution
from scipy.signal import medfilt
import astropy.units as u
from ..spectra.spectrum1d import Spectrum1D
from ..tests.spectral_examples import simulated_spectra
from ..manipulation.smoothing import (convolution_smooth, box_smooth,
gaussian_smooth, trapezoid_smooth,
median_smooth)
def compare_flux(flux_smooth1, flux_smooth2, flux_original, rtol=0.01):
"""
There are two things to compare for each set of smoothing:
1. Compare the smoothed flux from the astropy machinery vs
the smoothed flux from specutils. This is done by
comparing flux_smooth1 and flux_smooth2.
2. Next we want to compare the smoothed flux to the original
flux. This is a little more difficult as smoothing will
make a difference for median filter, but less so for
convolution based smoothing if the kernel is normalized
(area under the kernel = 1).
In this second case the rtol (relative tolerance) is used
judiciously.
"""
# Compare, element by element, the two smoothed fluxes.
assert np.allclose(flux_smooth1, flux_smooth2)
# Compare the total spectral flux of the smoothed to the original.
assert np.allclose(sum(flux_smooth1), sum(flux_original), rtol=rtol)
def test_smooth_custom_kernel(simulated_spectra):
"""
Test CustomKernel smoothing with correct parmaeters.
"""
# Create the original spectrum
spec1 = simulated_spectra.s1_um_mJy_e1
flux_original = spec1.flux
# Create a custom kernel (some weird asymmetric-ness)
numpy_kernel = np.array([0.5, 1, 2, 0.5, 0.2])
numpy_kernel = numpy_kernel / np.sum(numpy_kernel)
custom_kernel = convolution.CustomKernel(numpy_kernel)
flux_smoothed_astropy = convolution.convolve(flux_original, custom_kernel)
# Calculate the custom smoothed
spec1_smoothed = convolution_smooth(spec1, custom_kernel)
compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value)
@pytest.mark.parametrize("width", [1, 2.3])
def test_smooth_box_good(simulated_spectra, width):
"""
Test Box1DKernel smoothing with correct parmaeters.
Width values need to be a number greater than 0.
"""
# Create the original spectrum
spec1 = simulated_spectra.s1_um_mJy_e1
flux_original = spec1.flux
# Calculate the smoothed flux using Astropy
box_kernel = convolution.Box1DKernel(width)
flux_smoothed_astropy = convolution.convolve(flux_original, box_kernel)
# Calculate the box smoothed
spec1_smoothed = box_smooth(spec1, width)
compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value)
# Check the input and output units
assert spec1.wavelength.unit == spec1_smoothed.wavelength.unit
assert spec1.flux.unit == spec1_smoothed.flux.unit
@pytest.mark.parametrize("width", [-1, 0, 'a'])
def test_smooth_box_bad(simulated_spectra, width):
"""
Test Box1DKernel smoothing with incorrect parmaeters.
Width values need to be a number greater than 0.
"""
# Create the spectrum
spec1 = simulated_spectra.s1_um_mJy_e1
# Test bad input parameters
with pytest.raises(ValueError):
box_smooth(spec1, width)
@pytest.mark.parametrize("stddev", [1, 2.3])
def test_smooth_gaussian_good(simulated_spectra, stddev):
"""
Test Gaussian1DKernel smoothing with correct parmaeters.
Standard deviation values need to be a number greater than 0.
"""
# Create the spectrum
spec1 = simulated_spectra.s1_um_mJy_e1
flux_original = spec1.flux
# Calculate the smoothed flux using Astropy
gaussian_kernel = convolution.Gaussian1DKernel(stddev)
flux_smoothed_astropy = convolution.convolve(flux_original, gaussian_kernel)
# Test gaussian smoothing
spec1_smoothed = gaussian_smooth(spec1, stddev)
compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value, rtol=0.02)
# Check the input and output units
assert spec1.wavelength.unit == spec1_smoothed.wavelength.unit
assert spec1.flux.unit == spec1_smoothed.flux.unit
@pytest.mark.parametrize("stddev", [-1, 0, 'a'])
def test_smooth_gaussian_bad(simulated_spectra, stddev):
"""
Test MexicanHat1DKernel smoothing with incorrect parmaeters.
Standard deviation values need to be a number greater than 0.
"""
# Create the spectrum
spec1 = simulated_spectra.s1_um_mJy_e1
# Test bad input paramters
with pytest.raises(ValueError):
gaussian_smooth(spec1, stddev)
@pytest.mark.parametrize("stddev", [1, 2.3])
def test_smooth_trapezoid_good(simulated_spectra, stddev):
"""
Test Trapezoid1DKernel smoothing with correct parmaeters.
Standard deviation values need to be a number greater than 0.
"""
# Create the spectrum
spec1 = simulated_spectra.s1_um_mJy_e1
flux_original = spec1.flux
# Create the flux_smoothed which is what we want to compare to
trapezoid_kernel = convolution.Trapezoid1DKernel(stddev)
flux_smoothed_astropy = convolution.convolve(flux_original, trapezoid_kernel)
# Test trapezoid smoothing
spec1_smoothed = trapezoid_smooth(spec1, stddev)
compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value)
# Check the input and output units
assert spec1.wavelength.unit == spec1_smoothed.wavelength.unit
assert spec1.flux.unit == spec1_smoothed.flux.unit
@pytest.mark.parametrize("stddev", [-1, 0, 'a'])
def test_smooth_trapezoid_bad(simulated_spectra, stddev):
"""
Test Trapezoid1DKernel smoothing with incorrect parmaeters.
Standard deviation values need to be a number greater than 0.
"""
# Create the spectrum
spec1 = simulated_spectra.s1_um_mJy_e1
# Test bad parameters
with pytest.raises(ValueError):
trapezoid_smooth(spec1, stddev)
@pytest.mark.parametrize("width", [1, 3, 9])
def test_smooth_median_good(simulated_spectra, width):
"""
Test Median smoothing with correct parmaeters.
Width values need to be a number greater than 0.
"""
# Create the spectrum
spec1 = simulated_spectra.s1_um_mJy_e1
flux_original = spec1.flux
# Create the flux_smoothed which is what we want to compare to
flux_smoothed_astropy = medfilt(flux_original, width)
# Test median smoothing
spec1_smoothed = median_smooth(spec1, width)
compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value, rtol=0.15)
# Check the input and output units
assert spec1.wavelength.unit == spec1_smoothed.wavelength.unit
assert spec1.flux.unit == spec1_smoothed.flux.unit
@pytest.mark.parametrize("width", [-1, 0, 'a'])
def test_smooth_median_bad(simulated_spectra, width):
"""
Test Median smoothing with incorrect parmaeters.
Width values need to be a number greater than 0.
"""
# Create the spectrum
spec1 = simulated_spectra.s1_um_mJy_e1
# Test bad parameters
with pytest.raises(ValueError):
median_smooth(spec1, width)
|
[
"numpy.allclose",
"astropy.convolution.CustomKernel",
"pytest.mark.parametrize",
"numpy.array",
"astropy.convolution.convolve",
"astropy.convolution.Gaussian1DKernel",
"astropy.convolution.Box1DKernel",
"scipy.signal.medfilt",
"numpy.sum",
"pytest.raises",
"astropy.convolution.Trapezoid1DKernel"
] |
[((2105, 2147), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""width"""', '[1, 2.3]'], {}), "('width', [1, 2.3])\n", (2128, 2147), False, 'import pytest\n'), ((2941, 2987), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""width"""', "[-1, 0, 'a']"], {}), "('width', [-1, 0, 'a'])\n", (2964, 2987), False, 'import pytest\n'), ((3343, 3386), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""stddev"""', '[1, 2.3]'], {}), "('stddev', [1, 2.3])\n", (3366, 3386), False, 'import pytest\n'), ((4227, 4274), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""stddev"""', "[-1, 0, 'a']"], {}), "('stddev', [-1, 0, 'a'])\n", (4250, 4274), False, 'import pytest\n'), ((4661, 4704), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""stddev"""', '[1, 2.3]'], {}), "('stddev', [1, 2.3])\n", (4684, 4704), False, 'import pytest\n'), ((5560, 5607), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""stddev"""', "[-1, 0, 'a']"], {}), "('stddev', [-1, 0, 'a'])\n", (5583, 5607), False, 'import pytest\n'), ((5990, 6033), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""width"""', '[1, 3, 9]'], {}), "('width', [1, 3, 9])\n", (6013, 6033), False, 'import pytest\n'), ((6780, 6826), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""width"""', "[-1, 0, 'a']"], {}), "('width', [-1, 0, 'a'])\n", (6803, 6826), False, 'import pytest\n'), ((1191, 1230), 'numpy.allclose', 'np.allclose', (['flux_smooth1', 'flux_smooth2'], {}), '(flux_smooth1, flux_smooth2)\n', (1202, 1230), True, 'import numpy as np\n'), ((1689, 1720), 'numpy.array', 'np.array', (['[0.5, 1, 2, 0.5, 0.2]'], {}), '([0.5, 1, 2, 0.5, 0.2])\n', (1697, 1720), True, 'import numpy as np\n'), ((1797, 1835), 'astropy.convolution.CustomKernel', 'convolution.CustomKernel', (['numpy_kernel'], {}), '(numpy_kernel)\n', (1821, 1835), False, 'from astropy import convolution\n'), ((1864, 1914), 'astropy.convolution.convolve', 'convolution.convolve', (['flux_original', 'custom_kernel'], {}), '(flux_original, custom_kernel)\n', (1884, 1914), False, 'from astropy import convolution\n'), ((2502, 2532), 'astropy.convolution.Box1DKernel', 'convolution.Box1DKernel', (['width'], {}), '(width)\n', (2525, 2532), False, 'from astropy import convolution\n'), ((2561, 2608), 'astropy.convolution.convolve', 'convolution.convolve', (['flux_original', 'box_kernel'], {}), '(flux_original, box_kernel)\n', (2581, 2608), False, 'from astropy import convolution\n'), ((3762, 3798), 'astropy.convolution.Gaussian1DKernel', 'convolution.Gaussian1DKernel', (['stddev'], {}), '(stddev)\n', (3790, 3798), False, 'from astropy import convolution\n'), ((3827, 3879), 'astropy.convolution.convolve', 'convolution.convolve', (['flux_original', 'gaussian_kernel'], {}), '(flux_original, gaussian_kernel)\n', (3847, 3879), False, 'from astropy import convolution\n'), ((5102, 5139), 'astropy.convolution.Trapezoid1DKernel', 'convolution.Trapezoid1DKernel', (['stddev'], {}), '(stddev)\n', (5131, 5139), False, 'from astropy import convolution\n'), ((5168, 5221), 'astropy.convolution.convolve', 'convolution.convolve', (['flux_original', 'trapezoid_kernel'], {}), '(flux_original, trapezoid_kernel)\n', (5188, 5221), False, 'from astropy import convolution\n'), ((6408, 6437), 'scipy.signal.medfilt', 'medfilt', (['flux_original', 'width'], {}), '(flux_original, width)\n', (6415, 6437), False, 'from scipy.signal import medfilt\n'), ((1755, 1775), 'numpy.sum', 'np.sum', (['numpy_kernel'], {}), '(numpy_kernel)\n', (1761, 1775), True, 'import numpy as np\n'), ((3280, 3305), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3293, 3305), False, 'import pytest\n'), ((4592, 4617), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4605, 4617), False, 'import pytest\n'), ((5920, 5945), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5933, 5945), False, 'import pytest\n'), ((7111, 7136), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7124, 7136), False, 'import pytest\n')]
|
import os
import numpy as np
import pandas as pd
import tensorflow as tf
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing.image import img_to_array, load_img
from keras.utils.np_utils import to_categorical
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.preprocessing import LabelEncoder, StandardScaler
def load_numeric_training(standardize=True):
data = pd.read_csv('../train.csv')
ID = data.pop('id')
y = data.pop('species')
y = LabelEncoder().fit(y).transform(y)
X = StandardScaler().fit(data).transform(data) if standardize else data.values
return ID.values, X, y
def load_numeric_test(standardize=True):
data = pd.read_csv('../test.csv')
ID = data.pop('id')
test = StandardScaler().fit(data).transform(data) if standardize else data.values
return ID.values, test
def resize_img(img, max_dim=96):
max_axis = np.argmax(img.size)
scale = max_dim / img.size[max_axis]
return img.resize((int(img.size[0] * scale), int(img.size[1] * scale)))
def load_img_data(ids, max_dim=96, center=True):
X = np.empty((len(ids), max_dim, max_dim, 1))
for i, id in enumerate(ids):
img = load_img('../images/{}.jpg'.format(id), grayscale=True)
img = resize_img(img, max_dim=max_dim)
x = img_to_array(img)
h, w = x.shape[:2]
if center:
h1 = (max_dim - h) >> 1
h2 = h1 + h
w1 = (max_dim - w) >> 1
w2 = w1 + w
else:
h1, h2, w1, w2 = 0, h, 0, w
X[i][h1:h2, w1:w2][:] = x
return np.around(X / 255)
def load_train_data(split=0.9, random_state=7):
ID, X_num_train, y = load_numeric_training()
X_img_train = load_img_data(ID)
sss = StratifiedShuffleSplit(n_splits=1, train_size=split, test_size=1 - split, random_state=random_state)
train_idx, val_idx = next(sss.split(X_num_train, y))
ID_tr, X_num_tr, X_img_tr, y_tr = ID[train_idx], X_num_train[train_idx], X_img_train[train_idx], y[train_idx]
ID_val, X_num_val, X_img_val, y_val = ID[val_idx], X_num_train[val_idx], X_img_train[val_idx], y[val_idx]
return (ID_tr, X_num_tr, X_img_tr, y_tr), (ID_val, X_num_val, X_img_val, y_val)
def load_test_data():
ID, X_num_test = load_numeric_test()
X_img_test = load_img_data(ID)
return ID, X_num_test, X_img_test
print('Loading train data ...')
(ID_train, X_num_tr, X_img_tr, y_tr), (ID_val, X_num_val, X_img_val, y_val) = load_train_data()
# Prepare ID-to-label and ID-to-numerical dictionary
ID_y_dic, ID_num_dic = {}, {}
for i in range(len(ID_train)):
ID_y_dic[ID_train[i]] = y_tr[i]
ID_num_dic[ID_train[i]] = X_num_tr[i, :]
print('Loading test data ...')
ID_test, X_num_test, X_img_test = load_test_data()
# Convert label to categorical/one-hot
ID_train, y_tr, y_val = to_categorical(ID_train), to_categorical(y_tr), to_categorical((y_val))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _float32_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def write_val_data():
val_data_path = '../tfrecords/val_data_1.tfrecords'
if os.path.exists(val_data_path):
print('Warning: old file exists, removed.')
os.remove(val_data_path)
val_image, val_num, val_label = X_img_val.astype(np.bool), X_num_val.astype(np.float64), y_val.astype(np.bool)
print(val_image.shape, val_num.shape, val_label.shape)
val_writer = tf.python_io.TFRecordWriter(val_data_path)
print('Writing data into tfrecord ...')
for i in range(len(val_image)):
image, num, label = val_image[i], val_num[i], val_label[i]
feature = {'image': _bytes_feature(image.tostring()),
'num': _bytes_feature(num.tostring()),
'label': _bytes_feature(label.tostring())}
example = tf.train.Example(features=tf.train.Features(feature=feature))
val_writer.write(example.SerializeToString())
print('Done!')
def write_train_data():
imgen = ImageDataGenerator(rotation_range=20, zoom_range=0.2, horizontal_flip=True,
vertical_flip=True, fill_mode='nearest')
imgen_train = imgen.flow(X_img_tr, ID_train, batch_size=32, seed=7)
print('Generating augmented images')
all_images = []
all_ID = []
p = True
for i in range(28 * 200):
print('Generating augmented images for epoch {}, batch {}'.format(i // 28, i % 28))
X, ID = imgen_train.next()
all_images.append(X)
all_ID.append(np.argmax(ID, axis=1))
all_images = np.concatenate(all_images).astype(np.bool)
all_ID = np.concatenate(all_ID)
all_y = np.zeros(all_ID.shape)
all_nums = np.zeros((all_ID.shape[0], X_num_tr.shape[1]))
for i in range(len(all_ID)):
all_nums[i, :] = ID_num_dic[all_ID[i]]
all_y[i] = ID_y_dic[all_ID[i]]
all_y = to_categorical(all_y).astype(np.bool)
print('Data shapes:')
print('Image:', all_images.shape)
print('Label:', all_y.shape)
print('Numerical:', all_nums.shape)
train_data_path = '../tfrecords/train_data_1.tfrecords'
if os.path.exists(train_data_path):
print('Warning: old file exists, removed.')
os.remove(train_data_path)
# compression = tf.python_io.TFRecordCompressionType.GZIP
# train_writer = tf.python_io.TFRecordWriter(train_data_path, options=tf.python_io.TFRecordOptions(compression))
train_writer = tf.python_io.TFRecordWriter(train_data_path)
print('Writing data into tfrecord ...')
for i in range(len(all_images)):
if i % 891 == 0:
print('Writing {} th epoch data ...'.format(i // 891))
image, num, label = all_images[i], all_nums[i], all_y[i]
feature = {'image': _bytes_feature(image.tostring()),
'num': _bytes_feature(num.tostring()),
'label': _bytes_feature(label.tostring())}
example = tf.train.Example(features=tf.train.Features(feature=feature))
train_writer.write(example.SerializeToString())
print('Done!')
write_val_data()
|
[
"sklearn.model_selection.StratifiedShuffleSplit",
"os.path.exists",
"keras.preprocessing.image.img_to_array",
"sklearn.preprocessing.LabelEncoder",
"pandas.read_csv",
"numpy.argmax",
"keras.preprocessing.image.ImageDataGenerator",
"tensorflow.train.BytesList",
"tensorflow.train.Int64List",
"sklearn.preprocessing.StandardScaler",
"numpy.zeros",
"keras.utils.np_utils.to_categorical",
"tensorflow.train.Features",
"numpy.around",
"numpy.concatenate",
"tensorflow.python_io.TFRecordWriter",
"tensorflow.train.FloatList",
"os.remove"
] |
[((419, 446), 'pandas.read_csv', 'pd.read_csv', (['"""../train.csv"""'], {}), "('../train.csv')\n", (430, 446), True, 'import pandas as pd\n'), ((706, 732), 'pandas.read_csv', 'pd.read_csv', (['"""../test.csv"""'], {}), "('../test.csv')\n", (717, 732), True, 'import pandas as pd\n'), ((920, 939), 'numpy.argmax', 'np.argmax', (['img.size'], {}), '(img.size)\n', (929, 939), True, 'import numpy as np\n'), ((1603, 1621), 'numpy.around', 'np.around', (['(X / 255)'], {}), '(X / 255)\n', (1612, 1621), True, 'import numpy as np\n'), ((1767, 1871), 'sklearn.model_selection.StratifiedShuffleSplit', 'StratifiedShuffleSplit', ([], {'n_splits': '(1)', 'train_size': 'split', 'test_size': '(1 - split)', 'random_state': 'random_state'}), '(n_splits=1, train_size=split, test_size=1 - split,\n random_state=random_state)\n', (1789, 1871), False, 'from sklearn.model_selection import StratifiedShuffleSplit\n'), ((2844, 2868), 'keras.utils.np_utils.to_categorical', 'to_categorical', (['ID_train'], {}), '(ID_train)\n', (2858, 2868), False, 'from keras.utils.np_utils import to_categorical\n'), ((2870, 2890), 'keras.utils.np_utils.to_categorical', 'to_categorical', (['y_tr'], {}), '(y_tr)\n', (2884, 2890), False, 'from keras.utils.np_utils import to_categorical\n'), ((2892, 2913), 'keras.utils.np_utils.to_categorical', 'to_categorical', (['y_val'], {}), '(y_val)\n', (2906, 2913), False, 'from keras.utils.np_utils import to_categorical\n'), ((3312, 3341), 'os.path.exists', 'os.path.exists', (['val_data_path'], {}), '(val_data_path)\n', (3326, 3341), False, 'import os\n'), ((3620, 3662), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['val_data_path'], {}), '(val_data_path)\n', (3647, 3662), True, 'import tensorflow as tf\n'), ((4184, 4304), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rotation_range': '(20)', 'zoom_range': '(0.2)', 'horizontal_flip': '(True)', 'vertical_flip': '(True)', 'fill_mode': '"""nearest"""'}), "(rotation_range=20, zoom_range=0.2, horizontal_flip=True,\n vertical_flip=True, fill_mode='nearest')\n", (4202, 4304), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((4800, 4822), 'numpy.concatenate', 'np.concatenate', (['all_ID'], {}), '(all_ID)\n', (4814, 4822), True, 'import numpy as np\n'), ((4835, 4857), 'numpy.zeros', 'np.zeros', (['all_ID.shape'], {}), '(all_ID.shape)\n', (4843, 4857), True, 'import numpy as np\n'), ((4873, 4919), 'numpy.zeros', 'np.zeros', (['(all_ID.shape[0], X_num_tr.shape[1])'], {}), '((all_ID.shape[0], X_num_tr.shape[1]))\n', (4881, 4919), True, 'import numpy as np\n'), ((5295, 5326), 'os.path.exists', 'os.path.exists', (['train_data_path'], {}), '(train_data_path)\n', (5309, 5326), False, 'import os\n'), ((5614, 5658), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['train_data_path'], {}), '(train_data_path)\n', (5641, 5658), True, 'import tensorflow as tf\n'), ((1320, 1337), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['img'], {}), '(img)\n', (1332, 1337), False, 'from keras.preprocessing.image import img_to_array, load_img\n'), ((3403, 3427), 'os.remove', 'os.remove', (['val_data_path'], {}), '(val_data_path)\n', (3412, 3427), False, 'import os\n'), ((5388, 5414), 'os.remove', 'os.remove', (['train_data_path'], {}), '(train_data_path)\n', (5397, 5414), False, 'import os\n'), ((2984, 3017), 'tensorflow.train.BytesList', 'tf.train.BytesList', ([], {'value': '[value]'}), '(value=[value])\n', (3002, 3017), True, 'import tensorflow as tf\n'), ((3087, 3120), 'tensorflow.train.Int64List', 'tf.train.Int64List', ([], {'value': '[value]'}), '(value=[value])\n', (3105, 3120), True, 'import tensorflow as tf\n'), ((3192, 3223), 'tensorflow.train.FloatList', 'tf.train.FloatList', ([], {'value': 'value'}), '(value=value)\n', (3210, 3223), True, 'import tensorflow as tf\n'), ((4703, 4724), 'numpy.argmax', 'np.argmax', (['ID'], {'axis': '(1)'}), '(ID, axis=1)\n', (4712, 4724), True, 'import numpy as np\n'), ((4744, 4770), 'numpy.concatenate', 'np.concatenate', (['all_images'], {}), '(all_images)\n', (4758, 4770), True, 'import numpy as np\n'), ((5051, 5072), 'keras.utils.np_utils.to_categorical', 'to_categorical', (['all_y'], {}), '(all_y)\n', (5065, 5072), False, 'from keras.utils.np_utils import to_categorical\n'), ((4036, 4070), 'tensorflow.train.Features', 'tf.train.Features', ([], {'feature': 'feature'}), '(feature=feature)\n', (4053, 4070), True, 'import tensorflow as tf\n'), ((6124, 6158), 'tensorflow.train.Features', 'tf.train.Features', ([], {'feature': 'feature'}), '(feature=feature)\n', (6141, 6158), True, 'import tensorflow as tf\n'), ((507, 521), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (519, 521), False, 'from sklearn.preprocessing import LabelEncoder, StandardScaler\n'), ((550, 566), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (564, 566), False, 'from sklearn.preprocessing import LabelEncoder, StandardScaler\n'), ((768, 784), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (782, 784), False, 'from sklearn.preprocessing import LabelEncoder, StandardScaler\n')]
|
import numpy as np
from skimage.transform import resize
from skimage import measure
from skimage.measure import regionprops
class OCROnObjects():
def __init__(self, license_plate):
character_objects = self.identify_boundary_objects(license_plate)
self.get_regions(character_objects, license_plate)
def identify_boundary_objects(self, a_license_plate):
labelImage = measure.label(a_license_plate)
character_dimensions = (0.4*a_license_plate.shape[0], 0.85*a_license_plate.shape[0], 0.04*a_license_plate.shape[1], 0.15*a_license_plate.shape[1])
minHeight, maxHeight, minWidth, maxWidth = character_dimensions
regionLists = regionprops(labelImage)
return regionLists
def get_regions(self, character_objects, a_license_plate):
"""
used to map out regions where the license plate charcters are
the principle of connected component analysis and labelling
were used
Parameters:
-----------
a_license_plate: 2D numpy binary image of the license plate
Returns:
--------
a dictionary containing the index
fullscale: 3D array containig 2D array of each character
columnsVal: 1D array the starting column of each character
coordinates:
"""
cord = []
counter=0
column_list = []
character_dimensions = (0.35*a_license_plate.shape[0], 0.60*a_license_plate.shape[0], 0.05*a_license_plate.shape[1], 0.15*a_license_plate.shape[1])
minHeight, maxHeight, minWidth, maxWidth = character_dimensions
for regions in character_objects:
minimumRow, minimumCol, maximumRow, maximumCol = regions.bbox
character_height = maximumRow - minimumRow
character_width = maximumCol - minimumCol
roi = a_license_plate[minimumRow:maximumRow, minimumCol:maximumCol]
if character_height > minHeight and character_height < maxHeight and character_width > minWidth and character_width < maxWidth:
if counter == 0:
samples = resize(roi, (20,20))
cord.append(regions.bbox)
counter += 1
elif counter == 1:
roismall = resize(roi, (20,20))
samples = np.concatenate((samples[None,:,:], roismall[None,:,:]), axis=0)
cord.append(regions.bbox)
counter+=1
else:
roismall = resize(roi, (20,20))
samples = np.concatenate((samples[:,:,:], roismall[None,:,:]), axis=0)
cord.append(regions.bbox)
column_list.append(minimumCol)
if len(column_list) == 0:
self.candidates = {}
else:
self.candidates = {
'fullscale': samples,
'coordinates': np.array(cord),
'columnsVal': column_list
}
return self.candidates
|
[
"skimage.measure.regionprops",
"numpy.array",
"numpy.concatenate",
"skimage.transform.resize",
"skimage.measure.label"
] |
[((412, 442), 'skimage.measure.label', 'measure.label', (['a_license_plate'], {}), '(a_license_plate)\n', (425, 442), False, 'from skimage import measure\n'), ((692, 715), 'skimage.measure.regionprops', 'regionprops', (['labelImage'], {}), '(labelImage)\n', (703, 715), False, 'from skimage.measure import regionprops\n'), ((2943, 2957), 'numpy.array', 'np.array', (['cord'], {}), '(cord)\n', (2951, 2957), True, 'import numpy as np\n'), ((2129, 2150), 'skimage.transform.resize', 'resize', (['roi', '(20, 20)'], {}), '(roi, (20, 20))\n', (2135, 2150), False, 'from skimage.transform import resize\n'), ((2295, 2316), 'skimage.transform.resize', 'resize', (['roi', '(20, 20)'], {}), '(roi, (20, 20))\n', (2301, 2316), False, 'from skimage.transform import resize\n'), ((2346, 2413), 'numpy.concatenate', 'np.concatenate', (['(samples[None, :, :], roismall[None, :, :])'], {'axis': '(0)'}), '((samples[None, :, :], roismall[None, :, :]), axis=0)\n', (2360, 2413), True, 'import numpy as np\n'), ((2540, 2561), 'skimage.transform.resize', 'resize', (['roi', '(20, 20)'], {}), '(roi, (20, 20))\n', (2546, 2561), False, 'from skimage.transform import resize\n'), ((2591, 2655), 'numpy.concatenate', 'np.concatenate', (['(samples[:, :, :], roismall[None, :, :])'], {'axis': '(0)'}), '((samples[:, :, :], roismall[None, :, :]), axis=0)\n', (2605, 2655), True, 'import numpy as np\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.