Dataset Viewer
code
stringlengths 31
1.05M
| apis
sequence | extract_api
stringlengths 97
1.91M
|
---|---|---|
#
# Copyright (c) 2021 The GPflux Contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import numpy as np
import pytest
import tensorflow as tf
import tensorflow_probability as tfp
from gpflow.kullback_leiblers import gauss_kl
from gpflux.encoders import DirectlyParameterizedNormalDiag
from gpflux.layers import LatentVariableLayer, LayerWithObservations, TrackableLayer
tf.keras.backend.set_floatx("float64")
############
# Utilities
############
def _zero_one_normal_prior(w_dim):
""" N(0, I) prior """
return tfp.distributions.MultivariateNormalDiag(loc=np.zeros(w_dim), scale_diag=np.ones(w_dim))
def get_distributions_with_w_dim():
distributions = []
for d in [1, 5]:
mean = np.zeros(d)
scale_tri_l = np.eye(d)
mvn = tfp.distributions.MultivariateNormalTriL(mean, scale_tri_l)
std = np.ones(d)
mvn_diag = tfp.distributions.MultivariateNormalDiag(mean, std)
distributions.append((mvn, d))
distributions.append((mvn_diag, d))
return distributions
############
# Tests
############
@pytest.mark.parametrize("distribution, w_dim", get_distributions_with_w_dim())
def test_local_kls(distribution, w_dim):
lv = LatentVariableLayer(encoder=None, prior=distribution)
# test kl is 0 when posteriors == priors
posterior = distribution
assert lv._local_kls(posterior) == 0
# test kl > 0 when posteriors != priors
batch_size = 10
params = distribution.parameters
posterior_params = {
k: [v + 0.5 for _ in range(batch_size)]
for k, v in params.items()
if isinstance(v, np.ndarray)
}
posterior = lv.distribution_class(**posterior_params)
local_kls = lv._local_kls(posterior)
assert np.all(local_kls > 0)
assert local_kls.shape == (batch_size,)
@pytest.mark.parametrize("w_dim", [1, 5])
def test_local_kl_gpflow_consistency(w_dim):
num_data = 400
means = np.random.randn(num_data, w_dim)
encoder = DirectlyParameterizedNormalDiag(num_data, w_dim, means)
lv = LatentVariableLayer(encoder=encoder, prior=_zero_one_normal_prior(w_dim))
posteriors = lv._inference_posteriors(
[np.random.randn(num_data, 3), np.random.randn(num_data, 2)]
)
q_mu = posteriors.parameters["loc"]
q_sqrt = posteriors.parameters["scale_diag"]
gpflow_local_kls = gauss_kl(q_mu, q_sqrt)
tfp_local_kls = tf.reduce_sum(lv._local_kls(posteriors))
np.testing.assert_allclose(tfp_local_kls, gpflow_local_kls, rtol=1e-10)
class ArrayMatcher:
def __init__(self, expected):
self.expected = expected
def __eq__(self, actual):
return np.allclose(actual, self.expected, equal_nan=True)
@pytest.mark.parametrize("w_dim", [1, 5])
def test_latent_variable_layer_losses(mocker, w_dim):
num_data, x_dim, y_dim = 43, 3, 1
prior_shape = (w_dim,)
posteriors_shape = (num_data, w_dim)
prior = tfp.distributions.MultivariateNormalDiag(
loc=np.random.randn(*prior_shape),
scale_diag=np.random.randn(*prior_shape) ** 2,
)
posteriors = tfp.distributions.MultivariateNormalDiag(
loc=np.random.randn(*posteriors_shape),
scale_diag=np.random.randn(*posteriors_shape) ** 2,
)
encoder = mocker.Mock(return_value=(posteriors.loc, posteriors.scale.diag))
lv = LatentVariableLayer(encoder=encoder, prior=prior)
inputs = np.full((num_data, x_dim), np.nan)
targets = np.full((num_data, y_dim), np.nan)
observations = [inputs, targets]
encoder_inputs = np.concatenate(observations, axis=-1)
_ = lv(inputs)
encoder.assert_not_called()
assert lv.losses == [0.0]
_ = lv(inputs, observations=observations, training=True)
# assert_called_once_with uses == for comparison which fails on arrays
encoder.assert_called_once_with(ArrayMatcher(encoder_inputs), training=True)
expected_loss = [tf.reduce_mean(posteriors.kl_divergence(prior))]
np.testing.assert_equal(lv.losses, expected_loss) # also checks shapes match
@pytest.mark.parametrize("w_dim", [1, 5])
@pytest.mark.parametrize("seed2", [None, 42])
def test_latent_variable_layer_samples(mocker, test_data, w_dim, seed2):
seed = 123
inputs, targets = test_data
num_data, x_dim = inputs.shape
prior_shape = (w_dim,)
posteriors_shape = (num_data, w_dim)
prior = tfp.distributions.MultivariateNormalDiag(
loc=np.random.randn(*prior_shape),
scale_diag=np.random.randn(*prior_shape) ** 2,
)
posteriors = tfp.distributions.MultivariateNormalDiag(
loc=np.random.randn(*posteriors_shape),
scale_diag=np.random.randn(*posteriors_shape) ** 2,
)
encoder = mocker.Mock(return_value=(posteriors.loc, posteriors.scale.diag))
lv = LatentVariableLayer(prior=prior, encoder=encoder)
tf.random.set_seed(seed)
sample_prior = lv(inputs, seed=seed2)
tf.random.set_seed(seed)
prior_expected = np.concatenate([inputs, prior.sample(num_data, seed=seed2)], axis=-1)
np.testing.assert_array_equal(sample_prior, prior_expected)
tf.random.set_seed(seed)
sample_posterior = lv(inputs, observations=[inputs, targets], training=True, seed=seed2)
tf.random.set_seed(seed)
posterior_expected = np.concatenate([inputs, posteriors.sample(seed=seed2)], axis=-1)
np.testing.assert_array_equal(sample_posterior, posterior_expected)
def test_no_tensorflow_metaclass_overwritten():
"""
LayerWithObservations is a subclass of tf.keras.layers.Layer (via TrackableLayer);
this test ensures that TrackableLayer does not have a metaclass, and hence by adding
the ABCMeta to LayerWithObservations we are not accidentally removing some required
TensorFlow magic metaclass.
"""
assert LayerWithObservations.__bases__ == (TrackableLayer,)
assert type(TrackableLayer) is type
assert type(LayerWithObservations) is abc.ABCMeta
|
[
"numpy.testing.assert_equal",
"tensorflow_probability.distributions.MultivariateNormalDiag",
"tensorflow_probability.distributions.MultivariateNormalTriL",
"numpy.testing.assert_allclose",
"numpy.concatenate",
"numpy.testing.assert_array_equal",
"numpy.eye",
"numpy.allclose",
"gpflux.encoders.DirectlyParameterizedNormalDiag",
"numpy.ones",
"tensorflow.keras.backend.set_floatx",
"numpy.random.randn",
"gpflux.layers.LatentVariableLayer",
"tensorflow.random.set_seed",
"gpflow.kullback_leiblers.gauss_kl",
"pytest.mark.parametrize",
"numpy.zeros",
"numpy.full",
"numpy.all"
] |
[((897, 935), 'tensorflow.keras.backend.set_floatx', 'tf.keras.backend.set_floatx', (['"""float64"""'], {}), "('float64')\n", (924, 935), True, 'import tensorflow as tf\n'), ((2330, 2370), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""w_dim"""', '[1, 5]'], {}), "('w_dim', [1, 5])\n", (2353, 2370), False, 'import pytest\n'), ((3216, 3256), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""w_dim"""', '[1, 5]'], {}), "('w_dim', [1, 5])\n", (3239, 3256), False, 'import pytest\n'), ((4542, 4582), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""w_dim"""', '[1, 5]'], {}), "('w_dim', [1, 5])\n", (4565, 4582), False, 'import pytest\n'), ((4584, 4628), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""seed2"""', '[None, 42]'], {}), "('seed2', [None, 42])\n", (4607, 4628), False, 'import pytest\n'), ((1728, 1781), 'gpflux.layers.LatentVariableLayer', 'LatentVariableLayer', ([], {'encoder': 'None', 'prior': 'distribution'}), '(encoder=None, prior=distribution)\n', (1747, 1781), False, 'from gpflux.layers import LatentVariableLayer, LayerWithObservations, TrackableLayer\n'), ((2261, 2282), 'numpy.all', 'np.all', (['(local_kls > 0)'], {}), '(local_kls > 0)\n', (2267, 2282), True, 'import numpy as np\n'), ((2447, 2479), 'numpy.random.randn', 'np.random.randn', (['num_data', 'w_dim'], {}), '(num_data, w_dim)\n', (2462, 2479), True, 'import numpy as np\n'), ((2494, 2549), 'gpflux.encoders.DirectlyParameterizedNormalDiag', 'DirectlyParameterizedNormalDiag', (['num_data', 'w_dim', 'means'], {}), '(num_data, w_dim, means)\n', (2525, 2549), False, 'from gpflux.encoders import DirectlyParameterizedNormalDiag\n'), ((2866, 2888), 'gpflow.kullback_leiblers.gauss_kl', 'gauss_kl', (['q_mu', 'q_sqrt'], {}), '(q_mu, q_sqrt)\n', (2874, 2888), False, 'from gpflow.kullback_leiblers import gauss_kl\n'), ((2955, 3026), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['tfp_local_kls', 'gpflow_local_kls'], {'rtol': '(1e-10)'}), '(tfp_local_kls, gpflow_local_kls, rtol=1e-10)\n', (2981, 3026), True, 'import numpy as np\n'), ((3841, 3890), 'gpflux.layers.LatentVariableLayer', 'LatentVariableLayer', ([], {'encoder': 'encoder', 'prior': 'prior'}), '(encoder=encoder, prior=prior)\n', (3860, 3890), False, 'from gpflux.layers import LatentVariableLayer, LayerWithObservations, TrackableLayer\n'), ((3905, 3939), 'numpy.full', 'np.full', (['(num_data, x_dim)', 'np.nan'], {}), '((num_data, x_dim), np.nan)\n', (3912, 3939), True, 'import numpy as np\n'), ((3954, 3988), 'numpy.full', 'np.full', (['(num_data, y_dim)', 'np.nan'], {}), '((num_data, y_dim), np.nan)\n', (3961, 3988), True, 'import numpy as np\n'), ((4047, 4084), 'numpy.concatenate', 'np.concatenate', (['observations'], {'axis': '(-1)'}), '(observations, axis=-1)\n', (4061, 4084), True, 'import numpy as np\n'), ((4461, 4510), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['lv.losses', 'expected_loss'], {}), '(lv.losses, expected_loss)\n', (4484, 4510), True, 'import numpy as np\n'), ((5277, 5326), 'gpflux.layers.LatentVariableLayer', 'LatentVariableLayer', ([], {'prior': 'prior', 'encoder': 'encoder'}), '(prior=prior, encoder=encoder)\n', (5296, 5326), False, 'from gpflux.layers import LatentVariableLayer, LayerWithObservations, TrackableLayer\n'), ((5332, 5356), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['seed'], {}), '(seed)\n', (5350, 5356), True, 'import tensorflow as tf\n'), ((5403, 5427), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['seed'], {}), '(seed)\n', (5421, 5427), True, 'import tensorflow as tf\n'), ((5523, 5582), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['sample_prior', 'prior_expected'], {}), '(sample_prior, prior_expected)\n', (5552, 5582), True, 'import numpy as np\n'), ((5588, 5612), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['seed'], {}), '(seed)\n', (5606, 5612), True, 'import tensorflow as tf\n'), ((5710, 5734), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['seed'], {}), '(seed)\n', (5728, 5734), True, 'import tensorflow as tf\n'), ((5829, 5896), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['sample_posterior', 'posterior_expected'], {}), '(sample_posterior, posterior_expected)\n', (5858, 5896), True, 'import numpy as np\n'), ((1236, 1247), 'numpy.zeros', 'np.zeros', (['d'], {}), '(d)\n', (1244, 1247), True, 'import numpy as np\n'), ((1270, 1279), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (1276, 1279), True, 'import numpy as np\n'), ((1294, 1353), 'tensorflow_probability.distributions.MultivariateNormalTriL', 'tfp.distributions.MultivariateNormalTriL', (['mean', 'scale_tri_l'], {}), '(mean, scale_tri_l)\n', (1334, 1353), True, 'import tensorflow_probability as tfp\n'), ((1369, 1379), 'numpy.ones', 'np.ones', (['d'], {}), '(d)\n', (1376, 1379), True, 'import numpy as np\n'), ((1399, 1450), 'tensorflow_probability.distributions.MultivariateNormalDiag', 'tfp.distributions.MultivariateNormalDiag', (['mean', 'std'], {}), '(mean, std)\n', (1439, 1450), True, 'import tensorflow_probability as tfp\n'), ((3162, 3212), 'numpy.allclose', 'np.allclose', (['actual', 'self.expected'], {'equal_nan': '(True)'}), '(actual, self.expected, equal_nan=True)\n', (3173, 3212), True, 'import numpy as np\n'), ((1094, 1109), 'numpy.zeros', 'np.zeros', (['w_dim'], {}), '(w_dim)\n', (1102, 1109), True, 'import numpy as np\n'), ((1122, 1136), 'numpy.ones', 'np.ones', (['w_dim'], {}), '(w_dim)\n', (1129, 1136), True, 'import numpy as np\n'), ((2686, 2714), 'numpy.random.randn', 'np.random.randn', (['num_data', '(3)'], {}), '(num_data, 3)\n', (2701, 2714), True, 'import numpy as np\n'), ((2716, 2744), 'numpy.random.randn', 'np.random.randn', (['num_data', '(2)'], {}), '(num_data, 2)\n', (2731, 2744), True, 'import numpy as np\n'), ((3485, 3514), 'numpy.random.randn', 'np.random.randn', (['*prior_shape'], {}), '(*prior_shape)\n', (3500, 3514), True, 'import numpy as np\n'), ((3648, 3682), 'numpy.random.randn', 'np.random.randn', (['*posteriors_shape'], {}), '(*posteriors_shape)\n', (3663, 3682), True, 'import numpy as np\n'), ((4921, 4950), 'numpy.random.randn', 'np.random.randn', (['*prior_shape'], {}), '(*prior_shape)\n', (4936, 4950), True, 'import numpy as np\n'), ((5084, 5118), 'numpy.random.randn', 'np.random.randn', (['*posteriors_shape'], {}), '(*posteriors_shape)\n', (5099, 5118), True, 'import numpy as np\n'), ((3535, 3564), 'numpy.random.randn', 'np.random.randn', (['*prior_shape'], {}), '(*prior_shape)\n', (3550, 3564), True, 'import numpy as np\n'), ((3703, 3737), 'numpy.random.randn', 'np.random.randn', (['*posteriors_shape'], {}), '(*posteriors_shape)\n', (3718, 3737), True, 'import numpy as np\n'), ((4971, 5000), 'numpy.random.randn', 'np.random.randn', (['*prior_shape'], {}), '(*prior_shape)\n', (4986, 5000), True, 'import numpy as np\n'), ((5139, 5173), 'numpy.random.randn', 'np.random.randn', (['*posteriors_shape'], {}), '(*posteriors_shape)\n', (5154, 5173), True, 'import numpy as np\n')]
|
"""
This script will modulate the blinky lights using the following algorithm:
1) uses user-provided location to obtain row of pixel data from bathy image
2) samples a 'number of LEDs' number of pixels from that row
3) shifts the sampled row data to center it at the location specified by user
4) displays resulting pixels on Blinky Tape
5) shifts next row by a given latitude, also specified by user
6) sleeps for user-specified period of time
Uses the following arguments:
-l/--location: tuple
Location of the user in tuple(lat, lon). This represents the center of the LED strip. Defaults to (0, 0)
-u/--update-interval: int
Update interval of the script, in minutes. Defaults to 10.
-p/--port: str
Serial port of the BlinkyLight (e.g., 'ttyAMA0', 'COM3'). Defaults to 'COM5'.
-d/--delta_latitude: int
Vertical change in latitude every update rate. May be 0, but this will result in a never-changing LEDs.
-i/--image: str
Name of the PNG image that contains the color coded pathymetric data.
The file current named mapserv.png was obtained using the following API:
https://www.gebco.net/data_and_products/gebco_web_services/web_map_service/mapserv?request=getmap&service=wms&BBOX=-90,-180,90,180&format=image/png&height=600&width=1200&crs=EPSG:4326&layers=GEBCO_LATEST_SUB_ICE_TOPO&version=1.3.0
In lieu of providing command line arguments, you may alternatively edit the defaults in bath_config.json.
NOTE: runs via:
runfile('/BlinkyTape_Python/bathymetry_blink/bathymetry_blink.py', wdir='/BlinkyTape_Python/')
(C) 2021 <NAME> (https://joeycodes.dev)
MIT Licensed
"""
import optparse
import json
from blinkytape import BlinkyTape
from time import sleep
from PIL import Image
import numpy as np
import sys
MAX_ERRORS = 3
num_errors = 0
# Obtain default parameters
with open("./bathymetry_blink/bathy_config.json") as f:
config = json.load(f)
# Default Blinky Tape port on Raspberry Pi is /dev/ttyACM0
parser = optparse.OptionParser()
parser.add_option("-p", "--port", dest="portname",
help="serial port (ex: /dev/ttyACM0)", default=config["port"])
parser.add_option("-l", "--location", dest="location",
help="Location of the center of the LED strip (ex: 70,-110)", default=config["location"])
parser.add_option("-u", "--update-rate", dest="update_rate",
help="How often to update elevation profile (mins) (ex: 5)", default=config["update_rate"])
parser.add_option("-d", "--delta-latitude", dest="delta_latitude",
help="Change in latitude during update (ex: 5)", default=config["delta_latitude"])
parser.add_option("-n", "--num-leds", dest="num_leds",
help="Number of LEDs in strip (ex: 60)", default=config["num_leds"])
parser.add_option("-i", "--image", dest="image_name",
help="Name of the map/bathymetry image (ex: ./mapserv.png)", default=config["image"])
(options, args) = parser.parse_args()
if args:
print("Unknown parameters: " + args)
# grab the values provided by user (or defaults)
port = options.portname
loc = options.location
rate = options.update_rate
delta = options.delta_latitude
n_leds = options.num_leds
i_name = options.image_name
# Some visual indication that it works, for headless setups (green tape)
bt = BlinkyTape(port, n_leds)
bt.displayColor(0, 100, 0)
bt.show()
sleep(2)
while True:
try:
# first, load image
im = Image.open(i_name) # Can be many different formats.
cols, rows = im.size
a = np.asarray(im) # of shape (rows, cols, channels)
# map loc latitude to 0-based index
latitude_index = min(rows - 1, max(0, (int)(((loc[0] - -90) / (90 - -90)) * (rows - 0) + 0)))
longitude_index = min(cols - 1, max(0, (int)(((loc[1] - -180) / (180 - -180)) * (cols - 0) + 0)))
# update the location of the next row of elevation data to take
loc[0] += delta
loc[0] = ((loc[0] + 90) % 180) - 90 # wraps to next pole if overflow
print("Lat index: " + str(latitude_index))
print("Lon index: " + str(longitude_index))
print("Next latitude: " + str(loc[0]))
# grab the applicable pixel indices
indices = [(int)(x*(cols/n_leds)) for x in range(n_leds)]
# sample that row of pixel data
output_pixels = np.take(a[latitude_index], indices, axis=0)
# rotate the row to center around the specified longitude
output_pixels = np.roll(output_pixels, longitude_index, axis=0)
# send all pixel data to bt
for pixel in output_pixels:
print("Sending r: {}, g: {}, b: {}".format(*pixel))
bt.sendPixel(*pixel)
# finally, show the image
bt.show()
# delete variables for memory management
del a
del im
# Tape resets to stored pattern after a few seconds of inactivity
sleep(rate * 60) # Wait specified number of minutes
# sleep(10) # Wait specified number of minutes
except KeyboardInterrupt:
print("Keyboard interrupt, ending program.")
sys.exit()
except RuntimeError as e:
print("Encountered runtime error: " + e.args[0])
# flush any incomplete data
bt.show()
num_errors += 1
if num_errors > MAX_ERRORS:
sys.exit("Error count exceeds that allowed.")
|
[
"PIL.Image.open",
"numpy.roll",
"numpy.asarray",
"optparse.OptionParser",
"time.sleep",
"numpy.take",
"blinkytape.BlinkyTape",
"sys.exit",
"json.load"
] |
[((1945, 1968), 'optparse.OptionParser', 'optparse.OptionParser', ([], {}), '()\n', (1966, 1968), False, 'import optparse\n'), ((3304, 3328), 'blinkytape.BlinkyTape', 'BlinkyTape', (['port', 'n_leds'], {}), '(port, n_leds)\n', (3314, 3328), False, 'from blinkytape import BlinkyTape\n'), ((3366, 3374), 'time.sleep', 'sleep', (['(2)'], {}), '(2)\n', (3371, 3374), False, 'from time import sleep\n'), ((1863, 1875), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1872, 1875), False, 'import json\n'), ((3438, 3456), 'PIL.Image.open', 'Image.open', (['i_name'], {}), '(i_name)\n', (3448, 3456), False, 'from PIL import Image\n'), ((3532, 3546), 'numpy.asarray', 'np.asarray', (['im'], {}), '(im)\n', (3542, 3546), True, 'import numpy as np\n'), ((4371, 4414), 'numpy.take', 'np.take', (['a[latitude_index]', 'indices'], {'axis': '(0)'}), '(a[latitude_index], indices, axis=0)\n', (4378, 4414), True, 'import numpy as np\n'), ((4514, 4561), 'numpy.roll', 'np.roll', (['output_pixels', 'longitude_index'], {'axis': '(0)'}), '(output_pixels, longitude_index, axis=0)\n', (4521, 4561), True, 'import numpy as np\n'), ((4971, 4987), 'time.sleep', 'sleep', (['(rate * 60)'], {}), '(rate * 60)\n', (4976, 4987), False, 'from time import sleep\n'), ((5172, 5182), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5180, 5182), False, 'import sys\n'), ((5423, 5468), 'sys.exit', 'sys.exit', (['"""Error count exceeds that allowed."""'], {}), "('Error count exceeds that allowed.')\n", (5431, 5468), False, 'import sys\n')]
|
"""
Basic usage
===========
This example presents the basic usage of brokenaxes
"""
import matplotlib.pyplot as plt
from brokenaxes import brokenaxes
import numpy as np
fig = plt.figure(figsize=(5,2))
bax = brokenaxes(xlims=((0, .1), (.4, .7)), ylims=((-1, .7), (.79, 1)), hspace=.05)
x = np.linspace(0, 1, 100)
bax.plot(x, np.sin(10 * x), label='sin')
bax.plot(x, np.cos(10 * x), label='cos')
bax.legend(loc=3)
bax.set_xlabel('time')
bax.set_ylabel('value')
|
[
"matplotlib.pyplot.figure",
"brokenaxes.brokenaxes",
"numpy.linspace",
"numpy.cos",
"numpy.sin"
] |
[((180, 206), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 2)'}), '(figsize=(5, 2))\n', (190, 206), True, 'import matplotlib.pyplot as plt\n'), ((212, 299), 'brokenaxes.brokenaxes', 'brokenaxes', ([], {'xlims': '((0, 0.1), (0.4, 0.7))', 'ylims': '((-1, 0.7), (0.79, 1))', 'hspace': '(0.05)'}), '(xlims=((0, 0.1), (0.4, 0.7)), ylims=((-1, 0.7), (0.79, 1)),\n hspace=0.05)\n', (222, 299), False, 'from brokenaxes import brokenaxes\n'), ((294, 316), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (305, 316), True, 'import numpy as np\n'), ((329, 343), 'numpy.sin', 'np.sin', (['(10 * x)'], {}), '(10 * x)\n', (335, 343), True, 'import numpy as np\n'), ((370, 384), 'numpy.cos', 'np.cos', (['(10 * x)'], {}), '(10 * x)\n', (376, 384), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
#from math import *
from math import sin, cos
from qiskit_metal import draw, Dict
from qiskit_metal.qlibrary.core.base import QComponent
import numpy as np
#from ... import config
#if not config.is_building_docs():
# from qiskit_metal import is_true
class TransmonInterdigitated(QComponent):
"""
The base "TransmonInterdigitated" inherits the "QComponent" class.
This creates a transmon pocket with two large pads connected by a Josephson
junction. Both pads have four interdigitated "fingers" which increase the
capacitance of the structure. There are three coupling capacitor pads with qpins
defined; these can be connected to other structures in a design using CPWs.
Default Options:
* pad_width: '1000um' -- width of the large rectanglular pads on either side
of the junction
* pad_height: '300um' -- height of the large rectanglular pads on either side
of the junction
* finger_width: '50um' -- width of the "finger" on either side of the junction
* finger_height: '100um' -- height of the "finger" on the side of the junction
* finger_space: '50um' -- height of the Josephson Junction (equivalently; space
between two fingers)
* pad_pos_x: '0um' -- the internal coordinate defining the center of the bottom
rectangular pad
* pad_pos_y: '0um' -- the internal coordinate defining the center of the bottom
rectangular pad
* comb_width: '50um' -- the width of the four interdigitated combs connected to
either pad
* comb_space_vert: '50um' -- the space between the edge of a comb and the edge of
the opposite rectangular pad
* comb_space_hor: '50um' -- the space between adjacent interdigitated comb structures
* jj_width: '20um' -- the width of the Josephson Junction located between the two
fingers of the device
* cc_space: '50um' -- the space between the lower rectangular pad and the coupling
capacitor below it
* cc_width: '100um' -- the width of the coupling capacitor located below the bottom
rectangular pad
* cc_height: '100um' -- the height of the coupling capacitor located below the bottom
rectangular pad
* cc_topleft_space: '50um' -- the space between the upper rectangular pad and the top
left coupling capacitor
* cc_topleft_width: '100um' -- the width of the top left coupling capacitor pad
* cc_topleft_height: '100um' -- the height of the top left coupling capacitor pad
* cc_topright_space: '50um' -- the space between the upper rectangular pad and the
top right coupling capacitor
* cc_topright_width: '100um' -- the width of the top right coupling capacitor pad
* cc_topright_height: '100um' -- the height of the top right coupling capacitor pad
* position_x: '0um' -- the x-coordinate defining the center of the transmon pocket
on the chip
* position_y: '0um' -- the y-coordinate defining the center of the transmon pocket
on the chip
* rotation: '0.0' -- the angle at which the entire structure is rotated
* rotation_top_pad: '180' -- internal coordinate defining the angle of rotation
between top and bottom pads
* layer: '1' -- all objcets are drawn assuming they are part of the same layer on a
the chip
"""
# Default drawing options
default_options = Dict(pad_width='1000um',
pad_height='300um',
finger_width='50um',
finger_height='100um',
finger_space='50um',
pad_pos_x='0um',
pad_pos_y='0um',
comb_width='50um',
comb_space_vert='50um',
comb_space_hor='50um',
jj_width='20um',
cc_space='50um',
cc_width='100um',
cc_height='100um',
cc_topleft_space='50um',
cc_topleft_width='100um',
cc_topleft_height='100um',
cc_topright_space='50um',
cc_topright_width='100um',
cc_topright_height='100um',
position_x='0um',
position_y='0um',
rotation='0.0',
rotation_top_pad='180',
layer='1')
"""Default drawing options"""
# Name prefix of component, if user doesn't provide name
component_metadata = Dict(short_name='component')
"""Component metadata"""
def make(self):
"""Convert self.options into QGeometry."""
p = self.parse_options() # Parse the string options into numbers
# draw the lower pad as a rectangle
pad_lower = draw.rectangle(p.pad_width, p.pad_height, p.pad_pos_x,
p.pad_pos_y)
# draw the lower finger as a rectangle
finger_lower = draw.rectangle(
p.finger_width, p.finger_height, p.pad_pos_x, p.pad_pos_y +
0.49999 * (p.pad_height) + 0.49999 * (p.finger_height))
# draw the Josephson Junction
rect_jj = draw.rectangle(
p.jj_width, p.finger_space, p.pad_pos_x,
0.5 * (p.pad_height) + p.finger_height + 0.5 * (p.finger_space))
# draw the first comb to the right of the lower finger as a rectangle
comb1_lower = draw.rectangle(
p.comb_width,
(2 * p.finger_height + p.finger_space - p.comb_space_vert),
(0.5 * p.finger_width + p.comb_space_hor + 0.5 * p.comb_width),
(0.5 * p.pad_height + 0.5 *
(p.pad_pos_y + 0.5 * (p.pad_height) + 0.5 * (p.finger_height))))
# draw the second comb to the right of the lower finger by translating the first comb
comb2_lower = draw.translate(comb1_lower,
2.0 * (p.comb_space_hor + p.comb_width),
0.0)
# draw the first comb to the left of the lower finger
comb3_lower = draw.rectangle(
p.comb_width,
(2 * p.finger_height + p.finger_space - p.comb_space_vert),
(-0.5 * p.finger_width - 2.0 * p.comb_space_hor -
1.5 * p.comb_width),
(0.5 * p.pad_height + 0.5 *
(p.pad_pos_y + 0.5 * (p.pad_height) + 0.5 * (p.finger_height))))
# draw the second comb to the left of the lower finger
comb4_lower = draw.translate(comb3_lower,
-2.0 * (p.comb_space_hor + p.comb_width),
0.0)
coupling_capacitor = draw.rectangle(
p.cc_width, p.cc_height, p.pad_pos_x,
p.pad_pos_y - 0.5 * (p.pad_height) - p.cc_space - 0.5 * p.cc_height)
cc_topleft = draw.rectangle(
p.cc_topleft_width, p.cc_topleft_height,
p.pad_pos_x - 0.5 * p.pad_width + 0.5 * p.cc_topleft_width,
p.pad_pos_y + 1.5 * p.pad_height + 2.0 * p.finger_height +
p.finger_space + p.cc_topleft_space + 0.5 * p.cc_topleft_height)
cc_topright = draw.translate(
cc_topleft,
p.pad_width - 0.5 * p.cc_topleft_width - 0.5 * p.cc_topright_width,
0.0)
# merge the bottom elements
bottom = draw.union(pad_lower, finger_lower, comb1_lower, comb2_lower,
comb3_lower, comb4_lower)
# create the top portion of the comb by translating and rotating
# the bottom portion of the comb
top = draw.translate(bottom, 0.0, p.pad_height + p.finger_space)
top = draw.rotate(top, p.rotation_top_pad)
# merge everything into a single design
design = draw.union(bottom, top, rect_jj, coupling_capacitor,
cc_topleft, cc_topright)
# draw the transmon pocket bounding box
pocket = draw.rectangle(1.5 * p.pad_width, 5.0 * p.pad_height)
# the origin is originally set to the middle of the lower pad.
# Let's move it to the center of the JJ.
design = draw.translate(
design, 0.0,
-0.5 * p.pad_height - p.finger_height - 0.5 * p.finger_space)
# now translate the final structure according to the user input
design = draw.rotate(design, p.rotation, origin=(0, 0))
design = draw.translate(design, p.position_x, p.position_y)
pocket = draw.rotate(pocket, p.rotation, origin=(0, 0))
pocket = draw.translate(pocket, p.position_x, p.position_y)
geom = {'design': design}
geom_pocket = {'pocket': pocket}
self.add_qgeometry('poly', geom, layer=p.layer, subtract=False)
self.add_qgeometry('poly', geom_pocket, layer=p.layer, subtract=True)
###################################################################
# Add Qpin connections for coupling capacitors
# define a function that both rotates and translates the
# qpin coordinates
def qpin_rotate_translate(x):
""" This function rotates the coordinates of the three qpins
according to the user inputs for "position_x", "position_y"
and "rotation".
"""
y = list(x)
z = [0.0, 0.0]
z[0] = y[0] * cos(p.rotation * 3.14159 / 180) - y[1] * sin(
p.rotation * 3.14159 / 180)
z[1] = y[0] * sin(p.rotation * 3.14159 / 180) + y[1] * cos(
p.rotation * 3.14159 / 180)
z[0] = z[0] + p.position_x
z[1] = z[1] + p.position_y
x = (z[0], z[1])
return x
# Add Qpin connections for the bottom coupling capacitor
qp1a = (0.0,
-0.5 * p.pad_height - p.finger_height - 0.5 * p.finger_space)
qp1b = (0.0, -0.5 * p.pad_height - p.cc_space - p.cc_height -
0.5 * p.pad_height - p.finger_height - 0.5 * p.finger_space)
# rotate and translate the qpin coordinates
qp1a = qpin_rotate_translate(qp1a)
qp1b = qpin_rotate_translate(qp1b)
self.add_pin('pin1',
points=np.array([qp1a, qp1b]),
width=0.01,
input_as_norm=True)
# Add Qpin connections for top left coupling capacitor
qp2a = (p.pad_pos_x - 0.5 * p.pad_width + 0.5 * p.cc_topleft_width,
p.pad_pos_y + 1.5 * p.pad_height + 2.0 * p.finger_height +
p.finger_space + p.cc_topleft_space +
0.5 * p.cc_topleft_height - 0.5 * p.pad_height -
p.finger_height - 0.5 * p.finger_space)
qp2b = (p.pad_pos_x - 0.5 * p.pad_width, p.pad_pos_y +
1.5 * p.pad_height + 2.0 * p.finger_height + p.finger_space +
p.cc_topleft_space + 0.5 * p.cc_topleft_height -
0.5 * p.pad_height - p.finger_height - 0.5 * p.finger_space)
qp2a = qpin_rotate_translate(qp2a)
qp2b = qpin_rotate_translate(qp2b)
self.add_pin('pin2',
points=np.array([qp2a, qp2b]),
width=0.01,
input_as_norm=True)
# Add Qpin connections for top right coupling capacitor
qp3a = (p.pad_pos_x + 0.5 * p.pad_width - 0.5 * p.cc_topleft_width,
p.pad_pos_y + 1.5 * p.pad_height + 2.0 * p.finger_height +
p.finger_space + p.cc_topleft_space +
0.5 * p.cc_topleft_height - 0.5 * p.pad_height -
p.finger_height - 0.5 * p.finger_space)
qp3b = (p.pad_pos_x + 0.5 * p.pad_width, p.pad_pos_y +
1.5 * p.pad_height + 2.0 * p.finger_height + p.finger_space +
p.cc_topleft_space + 0.5 * p.cc_topleft_height -
0.5 * p.pad_height - p.finger_height - 0.5 * p.finger_space)
qp3a = qpin_rotate_translate(qp3a)
qp3b = qpin_rotate_translate(qp3b)
self.add_pin('pin3',
points=np.array([qp3a, qp3b]),
width=0.01,
input_as_norm=True)
|
[
"qiskit_metal.draw.union",
"math.cos",
"qiskit_metal.Dict",
"numpy.array",
"qiskit_metal.draw.rectangle",
"qiskit_metal.draw.translate",
"qiskit_metal.draw.rotate",
"math.sin"
] |
[((4010, 4566), 'qiskit_metal.Dict', 'Dict', ([], {'pad_width': '"""1000um"""', 'pad_height': '"""300um"""', 'finger_width': '"""50um"""', 'finger_height': '"""100um"""', 'finger_space': '"""50um"""', 'pad_pos_x': '"""0um"""', 'pad_pos_y': '"""0um"""', 'comb_width': '"""50um"""', 'comb_space_vert': '"""50um"""', 'comb_space_hor': '"""50um"""', 'jj_width': '"""20um"""', 'cc_space': '"""50um"""', 'cc_width': '"""100um"""', 'cc_height': '"""100um"""', 'cc_topleft_space': '"""50um"""', 'cc_topleft_width': '"""100um"""', 'cc_topleft_height': '"""100um"""', 'cc_topright_space': '"""50um"""', 'cc_topright_width': '"""100um"""', 'cc_topright_height': '"""100um"""', 'position_x': '"""0um"""', 'position_y': '"""0um"""', 'rotation': '"""0.0"""', 'rotation_top_pad': '"""180"""', 'layer': '"""1"""'}), "(pad_width='1000um', pad_height='300um', finger_width='50um',\n finger_height='100um', finger_space='50um', pad_pos_x='0um', pad_pos_y=\n '0um', comb_width='50um', comb_space_vert='50um', comb_space_hor='50um',\n jj_width='20um', cc_space='50um', cc_width='100um', cc_height='100um',\n cc_topleft_space='50um', cc_topleft_width='100um', cc_topleft_height=\n '100um', cc_topright_space='50um', cc_topright_width='100um',\n cc_topright_height='100um', position_x='0um', position_y='0um',\n rotation='0.0', rotation_top_pad='180', layer='1')\n", (4014, 4566), False, 'from qiskit_metal import draw, Dict\n'), ((5306, 5334), 'qiskit_metal.Dict', 'Dict', ([], {'short_name': '"""component"""'}), "(short_name='component')\n", (5310, 5334), False, 'from qiskit_metal import draw, Dict\n'), ((5576, 5643), 'qiskit_metal.draw.rectangle', 'draw.rectangle', (['p.pad_width', 'p.pad_height', 'p.pad_pos_x', 'p.pad_pos_y'], {}), '(p.pad_width, p.pad_height, p.pad_pos_x, p.pad_pos_y)\n', (5590, 5643), False, 'from qiskit_metal import draw, Dict\n'), ((5750, 5881), 'qiskit_metal.draw.rectangle', 'draw.rectangle', (['p.finger_width', 'p.finger_height', 'p.pad_pos_x', '(p.pad_pos_y + 0.49999 * p.pad_height + 0.49999 * p.finger_height)'], {}), '(p.finger_width, p.finger_height, p.pad_pos_x, p.pad_pos_y + \n 0.49999 * p.pad_height + 0.49999 * p.finger_height)\n', (5764, 5881), False, 'from qiskit_metal import draw, Dict\n'), ((5963, 6083), 'qiskit_metal.draw.rectangle', 'draw.rectangle', (['p.jj_width', 'p.finger_space', 'p.pad_pos_x', '(0.5 * p.pad_height + p.finger_height + 0.5 * p.finger_space)'], {}), '(p.jj_width, p.finger_space, p.pad_pos_x, 0.5 * p.pad_height +\n p.finger_height + 0.5 * p.finger_space)\n', (5977, 6083), False, 'from qiskit_metal import draw, Dict\n'), ((6210, 6460), 'qiskit_metal.draw.rectangle', 'draw.rectangle', (['p.comb_width', '(2 * p.finger_height + p.finger_space - p.comb_space_vert)', '(0.5 * p.finger_width + p.comb_space_hor + 0.5 * p.comb_width)', '(0.5 * p.pad_height + 0.5 * (p.pad_pos_y + 0.5 * p.pad_height + 0.5 * p.\n finger_height))'], {}), '(p.comb_width, 2 * p.finger_height + p.finger_space - p.\n comb_space_vert, 0.5 * p.finger_width + p.comb_space_hor + 0.5 * p.\n comb_width, 0.5 * p.pad_height + 0.5 * (p.pad_pos_y + 0.5 * p.\n pad_height + 0.5 * p.finger_height))\n', (6224, 6460), False, 'from qiskit_metal import draw, Dict\n'), ((6635, 6708), 'qiskit_metal.draw.translate', 'draw.translate', (['comb1_lower', '(2.0 * (p.comb_space_hor + p.comb_width))', '(0.0)'], {}), '(comb1_lower, 2.0 * (p.comb_space_hor + p.comb_width), 0.0)\n', (6649, 6708), False, 'from qiskit_metal import draw, Dict\n'), ((6868, 7124), 'qiskit_metal.draw.rectangle', 'draw.rectangle', (['p.comb_width', '(2 * p.finger_height + p.finger_space - p.comb_space_vert)', '(-0.5 * p.finger_width - 2.0 * p.comb_space_hor - 1.5 * p.comb_width)', '(0.5 * p.pad_height + 0.5 * (p.pad_pos_y + 0.5 * p.pad_height + 0.5 * p.\n finger_height))'], {}), '(p.comb_width, 2 * p.finger_height + p.finger_space - p.\n comb_space_vert, -0.5 * p.finger_width - 2.0 * p.comb_space_hor - 1.5 *\n p.comb_width, 0.5 * p.pad_height + 0.5 * (p.pad_pos_y + 0.5 * p.\n pad_height + 0.5 * p.finger_height))\n', (6882, 7124), False, 'from qiskit_metal import draw, Dict\n'), ((7282, 7356), 'qiskit_metal.draw.translate', 'draw.translate', (['comb3_lower', '(-2.0 * (p.comb_space_hor + p.comb_width))', '(0.0)'], {}), '(comb3_lower, -2.0 * (p.comb_space_hor + p.comb_width), 0.0)\n', (7296, 7356), False, 'from qiskit_metal import draw, Dict\n'), ((7461, 7585), 'qiskit_metal.draw.rectangle', 'draw.rectangle', (['p.cc_width', 'p.cc_height', 'p.pad_pos_x', '(p.pad_pos_y - 0.5 * p.pad_height - p.cc_space - 0.5 * p.cc_height)'], {}), '(p.cc_width, p.cc_height, p.pad_pos_x, p.pad_pos_y - 0.5 * p.\n pad_height - p.cc_space - 0.5 * p.cc_height)\n', (7475, 7585), False, 'from qiskit_metal import draw, Dict\n'), ((7630, 7883), 'qiskit_metal.draw.rectangle', 'draw.rectangle', (['p.cc_topleft_width', 'p.cc_topleft_height', '(p.pad_pos_x - 0.5 * p.pad_width + 0.5 * p.cc_topleft_width)', '(p.pad_pos_y + 1.5 * p.pad_height + 2.0 * p.finger_height + p.finger_space +\n p.cc_topleft_space + 0.5 * p.cc_topleft_height)'], {}), '(p.cc_topleft_width, p.cc_topleft_height, p.pad_pos_x - 0.5 *\n p.pad_width + 0.5 * p.cc_topleft_width, p.pad_pos_y + 1.5 * p.\n pad_height + 2.0 * p.finger_height + p.finger_space + p.\n cc_topleft_space + 0.5 * p.cc_topleft_height)\n', (7644, 7883), False, 'from qiskit_metal import draw, Dict\n'), ((7942, 8046), 'qiskit_metal.draw.translate', 'draw.translate', (['cc_topleft', '(p.pad_width - 0.5 * p.cc_topleft_width - 0.5 * p.cc_topright_width)', '(0.0)'], {}), '(cc_topleft, p.pad_width - 0.5 * p.cc_topleft_width - 0.5 * p\n .cc_topright_width, 0.0)\n', (7956, 8046), False, 'from qiskit_metal import draw, Dict\n'), ((8133, 8224), 'qiskit_metal.draw.union', 'draw.union', (['pad_lower', 'finger_lower', 'comb1_lower', 'comb2_lower', 'comb3_lower', 'comb4_lower'], {}), '(pad_lower, finger_lower, comb1_lower, comb2_lower, comb3_lower,\n comb4_lower)\n', (8143, 8224), False, 'from qiskit_metal import draw, Dict\n'), ((8378, 8436), 'qiskit_metal.draw.translate', 'draw.translate', (['bottom', '(0.0)', '(p.pad_height + p.finger_space)'], {}), '(bottom, 0.0, p.pad_height + p.finger_space)\n', (8392, 8436), False, 'from qiskit_metal import draw, Dict\n'), ((8451, 8487), 'qiskit_metal.draw.rotate', 'draw.rotate', (['top', 'p.rotation_top_pad'], {}), '(top, p.rotation_top_pad)\n', (8462, 8487), False, 'from qiskit_metal import draw, Dict\n'), ((8554, 8631), 'qiskit_metal.draw.union', 'draw.union', (['bottom', 'top', 'rect_jj', 'coupling_capacitor', 'cc_topleft', 'cc_topright'], {}), '(bottom, top, rect_jj, coupling_capacitor, cc_topleft, cc_topright)\n', (8564, 8631), False, 'from qiskit_metal import draw, Dict\n'), ((8726, 8779), 'qiskit_metal.draw.rectangle', 'draw.rectangle', (['(1.5 * p.pad_width)', '(5.0 * p.pad_height)'], {}), '(1.5 * p.pad_width, 5.0 * p.pad_height)\n', (8740, 8779), False, 'from qiskit_metal import draw, Dict\n'), ((8918, 9012), 'qiskit_metal.draw.translate', 'draw.translate', (['design', '(0.0)', '(-0.5 * p.pad_height - p.finger_height - 0.5 * p.finger_space)'], {}), '(design, 0.0, -0.5 * p.pad_height - p.finger_height - 0.5 * p\n .finger_space)\n', (8932, 9012), False, 'from qiskit_metal import draw, Dict\n'), ((9123, 9169), 'qiskit_metal.draw.rotate', 'draw.rotate', (['design', 'p.rotation'], {'origin': '(0, 0)'}), '(design, p.rotation, origin=(0, 0))\n', (9134, 9169), False, 'from qiskit_metal import draw, Dict\n'), ((9187, 9237), 'qiskit_metal.draw.translate', 'draw.translate', (['design', 'p.position_x', 'p.position_y'], {}), '(design, p.position_x, p.position_y)\n', (9201, 9237), False, 'from qiskit_metal import draw, Dict\n'), ((9256, 9302), 'qiskit_metal.draw.rotate', 'draw.rotate', (['pocket', 'p.rotation'], {'origin': '(0, 0)'}), '(pocket, p.rotation, origin=(0, 0))\n', (9267, 9302), False, 'from qiskit_metal import draw, Dict\n'), ((9320, 9370), 'qiskit_metal.draw.translate', 'draw.translate', (['pocket', 'p.position_x', 'p.position_y'], {}), '(pocket, p.position_x, p.position_y)\n', (9334, 9370), False, 'from qiskit_metal import draw, Dict\n'), ((10971, 10993), 'numpy.array', 'np.array', (['[qp1a, qp1b]'], {}), '([qp1a, qp1b])\n', (10979, 10993), True, 'import numpy as np\n'), ((11888, 11910), 'numpy.array', 'np.array', (['[qp2a, qp2b]'], {}), '([qp2a, qp2b])\n', (11896, 11910), True, 'import numpy as np\n'), ((12806, 12828), 'numpy.array', 'np.array', (['[qp3a, qp3b]'], {}), '([qp3a, qp3b])\n', (12814, 12828), True, 'import numpy as np\n'), ((10127, 10158), 'math.cos', 'cos', (['(p.rotation * 3.14159 / 180)'], {}), '(p.rotation * 3.14159 / 180)\n', (10130, 10158), False, 'from math import sin, cos\n'), ((10168, 10199), 'math.sin', 'sin', (['(p.rotation * 3.14159 / 180)'], {}), '(p.rotation * 3.14159 / 180)\n', (10171, 10199), False, 'from math import sin, cos\n'), ((10243, 10274), 'math.sin', 'sin', (['(p.rotation * 3.14159 / 180)'], {}), '(p.rotation * 3.14159 / 180)\n', (10246, 10274), False, 'from math import sin, cos\n'), ((10284, 10315), 'math.cos', 'cos', (['(p.rotation * 3.14159 / 180)'], {}), '(p.rotation * 3.14159 / 180)\n', (10287, 10315), False, 'from math import sin, cos\n')]
|
import inspect
import numpy as np
from pandas._libs import reduction as libreduction
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.common import (
is_dict_like,
is_extension_array_dtype,
is_list_like,
is_sequence,
)
from pandas.core.dtypes.generic import ABCSeries
def frame_apply(
obj,
func,
axis=0,
raw=False,
result_type=None,
ignore_failures=False,
args=None,
kwds=None,
):
""" construct and return a row or column based frame apply object """
axis = obj._get_axis_number(axis)
if axis == 0:
klass = FrameRowApply
elif axis == 1:
klass = FrameColumnApply
return klass(
obj,
func,
raw=raw,
result_type=result_type,
ignore_failures=ignore_failures,
args=args,
kwds=kwds,
)
class FrameApply:
def __init__(self, obj, func, raw, result_type, ignore_failures, args, kwds):
self.obj = obj
self.raw = raw
self.ignore_failures = ignore_failures
self.args = args or ()
self.kwds = kwds or {}
if result_type not in [None, "reduce", "broadcast", "expand"]:
raise ValueError(
"invalid value for result_type, must be one "
"of {None, 'reduce', 'broadcast', 'expand'}"
)
self.result_type = result_type
# curry if needed
if (kwds or args) and not isinstance(func, (np.ufunc, str)):
def f(x):
return func(x, *args, **kwds)
else:
f = func
self.f = f
# results
self.result = None
self.res_index = None
self.res_columns = None
@property
def columns(self):
return self.obj.columns
@property
def index(self):
return self.obj.index
@cache_readonly
def values(self):
return self.obj.values
@cache_readonly
def dtypes(self):
return self.obj.dtypes
@property
def agg_axis(self):
return self.obj._get_agg_axis(self.axis)
def get_result(self):
""" compute the results """
# dispatch to agg
if is_list_like(self.f) or is_dict_like(self.f):
return self.obj.aggregate(self.f, axis=self.axis, *self.args, **self.kwds)
# all empty
if len(self.columns) == 0 and len(self.index) == 0:
return self.apply_empty_result()
# string dispatch
if isinstance(self.f, str):
# Support for `frame.transform('method')`
# Some methods (shift, etc.) require the axis argument, others
# don't, so inspect and insert if necessary.
func = getattr(self.obj, self.f)
sig = inspect.getfullargspec(func)
if "axis" in sig.args:
self.kwds["axis"] = self.axis
return func(*self.args, **self.kwds)
# ufunc
elif isinstance(self.f, np.ufunc):
with np.errstate(all="ignore"):
results = self.obj._data.apply("apply", func=self.f)
return self.obj._constructor(
data=results, index=self.index, columns=self.columns, copy=False
)
# broadcasting
if self.result_type == "broadcast":
return self.apply_broadcast()
# one axis empty
elif not all(self.obj.shape):
return self.apply_empty_result()
# raw
elif self.raw and not self.obj._is_mixed_type:
return self.apply_raw()
return self.apply_standard()
def apply_empty_result(self):
"""
we have an empty result; at least 1 axis is 0
we will try to apply the function to an empty
series in order to see if this is a reduction function
"""
# we are not asked to reduce or infer reduction
# so just return a copy of the existing object
if self.result_type not in ["reduce", None]:
return self.obj.copy()
# we may need to infer
should_reduce = self.result_type == "reduce"
from pandas import Series
if not should_reduce:
try:
r = self.f(Series([]))
except Exception:
pass
else:
should_reduce = not isinstance(r, Series)
if should_reduce:
if len(self.agg_axis):
r = self.f(Series([]))
else:
r = np.nan
return self.obj._constructor_sliced(r, index=self.agg_axis)
else:
return self.obj.copy()
def apply_raw(self):
""" apply to the values as a numpy array """
try:
result = libreduction.compute_reduction(self.values, self.f, axis=self.axis)
except ValueError as err:
if "Function does not reduce" not in str(err):
# catch only ValueError raised intentionally in libreduction
raise
result = np.apply_along_axis(self.f, self.axis, self.values)
# TODO: mixed type case
if result.ndim == 2:
return self.obj._constructor(result, index=self.index, columns=self.columns)
else:
return self.obj._constructor_sliced(result, index=self.agg_axis)
def apply_broadcast(self, target):
result_values = np.empty_like(target.values)
# axis which we want to compare compliance
result_compare = target.shape[0]
for i, col in enumerate(target.columns):
res = self.f(target[col])
ares = np.asarray(res).ndim
# must be a scalar or 1d
if ares > 1:
raise ValueError("too many dims to broadcast")
elif ares == 1:
# must match return dim
if result_compare != len(res):
raise ValueError("cannot broadcast result")
result_values[:, i] = res
# we *always* preserve the original index / columns
result = self.obj._constructor(
result_values, index=target.index, columns=target.columns
)
return result
def apply_standard(self):
# try to reduce first (by default)
# this only matters if the reduction in values is of different dtype
# e.g. if we want to apply to a SparseFrame, then can't directly reduce
# we cannot reduce using non-numpy dtypes,
# as demonstrated in gh-12244
if (
self.result_type in ["reduce", None]
and not self.dtypes.apply(is_extension_array_dtype).any()
# Disallow complex_internals since libreduction shortcut
# cannot handle MultiIndex
and not self.agg_axis._has_complex_internals
):
values = self.values
index = self.obj._get_axis(self.axis)
labels = self.agg_axis
empty_arr = np.empty(len(index), dtype=values.dtype)
# Preserve subclass for e.g. test_subclassed_apply
dummy = self.obj._constructor_sliced(
empty_arr, index=index, dtype=values.dtype
)
try:
result = libreduction.compute_reduction(
values, self.f, axis=self.axis, dummy=dummy, labels=labels
)
except ValueError as err:
if "Function does not reduce" not in str(err):
# catch only ValueError raised intentionally in libreduction
raise
except TypeError:
# e.g. test_apply_ignore_failures we just ignore
if not self.ignore_failures:
raise
except ZeroDivisionError:
# reached via numexpr; fall back to python implementation
pass
else:
return self.obj._constructor_sliced(result, index=labels)
# compute the result using the series generator
self.apply_series_generator()
# wrap results
return self.wrap_results()
def apply_series_generator(self):
series_gen = self.series_generator
res_index = self.result_index
i = None
keys = []
results = {}
if self.ignore_failures:
successes = []
for i, v in enumerate(series_gen):
try:
results[i] = self.f(v)
except Exception:
pass
else:
keys.append(v.name)
successes.append(i)
# so will work with MultiIndex
if len(successes) < len(res_index):
res_index = res_index.take(successes)
else:
for i, v in enumerate(series_gen):
results[i] = self.f(v)
keys.append(v.name)
self.results = results
self.res_index = res_index
self.res_columns = self.result_columns
def wrap_results(self):
results = self.results
# see if we can infer the results
if len(results) > 0 and 0 in results and is_sequence(results[0]):
return self.wrap_results_for_axis()
# dict of scalars
result = self.obj._constructor_sliced(results)
result.index = self.res_index
return result
class FrameRowApply(FrameApply):
axis = 0
def apply_broadcast(self):
return super().apply_broadcast(self.obj)
@property
def series_generator(self):
return (self.obj._ixs(i, axis=1) for i in range(len(self.columns)))
@property
def result_index(self):
return self.columns
@property
def result_columns(self):
return self.index
def wrap_results_for_axis(self):
""" return the results for the rows """
results = self.results
result = self.obj._constructor(data=results)
if not isinstance(results[0], ABCSeries):
if len(result.index) == len(self.res_columns):
result.index = self.res_columns
if len(result.columns) == len(self.res_index):
result.columns = self.res_index
return result
class FrameColumnApply(FrameApply):
axis = 1
def apply_broadcast(self):
result = super().apply_broadcast(self.obj.T)
return result.T
@property
def series_generator(self):
constructor = self.obj._constructor_sliced
return (
constructor(arr, index=self.columns, name=name)
for i, (arr, name) in enumerate(zip(self.values, self.index))
)
@property
def result_index(self):
return self.index
@property
def result_columns(self):
return self.columns
def wrap_results_for_axis(self):
""" return the results for the columns """
results = self.results
# we have requested to expand
if self.result_type == "expand":
result = self.infer_to_same_shape()
# we have a non-series and don't want inference
elif not isinstance(results[0], ABCSeries):
from pandas import Series
result = Series(results)
result.index = self.res_index
# we may want to infer results
else:
result = self.infer_to_same_shape()
return result
def infer_to_same_shape(self):
""" infer the results to the same shape as the input object """
results = self.results
result = self.obj._constructor(data=results)
result = result.T
# set the index
result.index = self.res_index
# infer dtypes
result = result.infer_objects()
return result
|
[
"pandas.core.dtypes.common.is_list_like",
"pandas._libs.reduction.compute_reduction",
"pandas.Series",
"pandas.core.dtypes.common.is_sequence",
"numpy.asarray",
"inspect.getfullargspec",
"numpy.errstate",
"numpy.apply_along_axis",
"numpy.empty_like",
"pandas.core.dtypes.common.is_dict_like"
] |
[((5381, 5409), 'numpy.empty_like', 'np.empty_like', (['target.values'], {}), '(target.values)\n', (5394, 5409), True, 'import numpy as np\n'), ((2193, 2213), 'pandas.core.dtypes.common.is_list_like', 'is_list_like', (['self.f'], {}), '(self.f)\n', (2205, 2213), False, 'from pandas.core.dtypes.common import is_dict_like, is_extension_array_dtype, is_list_like, is_sequence\n'), ((2217, 2237), 'pandas.core.dtypes.common.is_dict_like', 'is_dict_like', (['self.f'], {}), '(self.f)\n', (2229, 2237), False, 'from pandas.core.dtypes.common import is_dict_like, is_extension_array_dtype, is_list_like, is_sequence\n'), ((2764, 2792), 'inspect.getfullargspec', 'inspect.getfullargspec', (['func'], {}), '(func)\n', (2786, 2792), False, 'import inspect\n'), ((4742, 4809), 'pandas._libs.reduction.compute_reduction', 'libreduction.compute_reduction', (['self.values', 'self.f'], {'axis': 'self.axis'}), '(self.values, self.f, axis=self.axis)\n', (4772, 4809), True, 'from pandas._libs import reduction as libreduction\n'), ((9164, 9187), 'pandas.core.dtypes.common.is_sequence', 'is_sequence', (['results[0]'], {}), '(results[0])\n', (9175, 9187), False, 'from pandas.core.dtypes.common import is_dict_like, is_extension_array_dtype, is_list_like, is_sequence\n'), ((5023, 5074), 'numpy.apply_along_axis', 'np.apply_along_axis', (['self.f', 'self.axis', 'self.values'], {}), '(self.f, self.axis, self.values)\n', (5042, 5074), True, 'import numpy as np\n'), ((5610, 5625), 'numpy.asarray', 'np.asarray', (['res'], {}), '(res)\n', (5620, 5625), True, 'import numpy as np\n'), ((7224, 7318), 'pandas._libs.reduction.compute_reduction', 'libreduction.compute_reduction', (['values', 'self.f'], {'axis': 'self.axis', 'dummy': 'dummy', 'labels': 'labels'}), '(values, self.f, axis=self.axis, dummy=dummy,\n labels=labels)\n', (7254, 7318), True, 'from pandas._libs import reduction as libreduction\n'), ((11205, 11220), 'pandas.Series', 'Series', (['results'], {}), '(results)\n', (11211, 11220), False, 'from pandas import Series\n'), ((3000, 3025), 'numpy.errstate', 'np.errstate', ([], {'all': '"""ignore"""'}), "(all='ignore')\n", (3011, 3025), True, 'import numpy as np\n'), ((4222, 4232), 'pandas.Series', 'Series', (['[]'], {}), '([])\n', (4228, 4232), False, 'from pandas import Series\n'), ((4450, 4460), 'pandas.Series', 'Series', (['[]'], {}), '([])\n', (4456, 4460), False, 'from pandas import Series\n')]
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Test for the piezo tensor class
"""
__author__ = "<NAME>"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
__date__ = "4/1/16"
import os
import unittest
import numpy as np
from pymatgen.analysis.piezo import PiezoTensor
from pymatgen.util.testing import PymatgenTest
class PiezoTest(PymatgenTest):
def setUp(self):
self.piezo_struc = self.get_structure("BaNiO3")
self.voigt_matrix = np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.03839, 0.0],
[0.0, 0.0, 0.0, 0.03839, 0.0, 0.0],
[6.89822, 6.89822, 27.46280, 0.0, 0.0, 0.0],
]
)
self.vasp_matrix = np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.0, 0.03839],
[0.0, 0.0, 0.0, 0.0, 0.03839, 0.0, 0.0],
[6.89822, 6.89822, 27.46280, 0.0, 0.0, 0.0],
]
)
self.full_tensor_array = [
[[0.0, 0.0, 0.03839], [0.0, 0.0, 0.0], [0.03839, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.03839], [0.0, 0.03839, 0.0]],
[[6.89822, 0.0, 0.0], [0.0, 6.89822, 0.0], [0.0, 0.0, 27.4628]],
]
def test_new(self):
pt = PiezoTensor(self.full_tensor_array)
self.assertArrayAlmostEqual(pt, self.full_tensor_array)
bad_dim_array = np.zeros((3, 3))
self.assertRaises(ValueError, PiezoTensor, bad_dim_array)
def test_from_voigt(self):
bad_voigt = np.zeros((3, 7))
pt = PiezoTensor.from_voigt(self.voigt_matrix)
self.assertArrayEqual(pt, self.full_tensor_array)
self.assertRaises(ValueError, PiezoTensor.from_voigt, bad_voigt)
self.assertArrayEqual(self.voigt_matrix, pt.voigt)
def test_from_vasp_voigt(self):
bad_voigt = np.zeros((3, 7))
pt = PiezoTensor.from_vasp_voigt(self.vasp_matrix)
self.assertArrayEqual(pt, self.full_tensor_array)
self.assertRaises(ValueError, PiezoTensor.from_voigt, bad_voigt)
self.assertArrayEqual(self.voigt_matrix, pt.voigt)
if __name__ == "__main__":
unittest.main()
|
[
"pymatgen.analysis.piezo.PiezoTensor.from_vasp_voigt",
"pymatgen.analysis.piezo.PiezoTensor",
"numpy.array",
"numpy.zeros",
"pymatgen.analysis.piezo.PiezoTensor.from_voigt",
"unittest.main"
] |
[((2195, 2210), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2208, 2210), False, 'import unittest\n'), ((554, 684), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0, 0.0, 0.03839, 0.0], [0.0, 0.0, 0.0, 0.03839, 0.0, 0.0], [\n 6.89822, 6.89822, 27.4628, 0.0, 0.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0, 0.0, 0.03839, 0.0], [0.0, 0.0, 0.0, 0.03839, 0.0,\n 0.0], [6.89822, 6.89822, 27.4628, 0.0, 0.0, 0.0]])\n', (562, 684), True, 'import numpy as np\n'), ((794, 929), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0, 0.0, 0.0, 0.03839], [0.0, 0.0, 0.0, 0.0, 0.03839, 0.0, 0.0\n ], [6.89822, 6.89822, 27.4628, 0.0, 0.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0, 0.0, 0.0, 0.03839], [0.0, 0.0, 0.0, 0.0, 0.03839,\n 0.0, 0.0], [6.89822, 6.89822, 27.4628, 0.0, 0.0, 0.0]])\n', (802, 929), True, 'import numpy as np\n'), ((1318, 1353), 'pymatgen.analysis.piezo.PiezoTensor', 'PiezoTensor', (['self.full_tensor_array'], {}), '(self.full_tensor_array)\n', (1329, 1353), False, 'from pymatgen.analysis.piezo import PiezoTensor\n'), ((1442, 1458), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (1450, 1458), True, 'import numpy as np\n'), ((1577, 1593), 'numpy.zeros', 'np.zeros', (['(3, 7)'], {}), '((3, 7))\n', (1585, 1593), True, 'import numpy as np\n'), ((1607, 1648), 'pymatgen.analysis.piezo.PiezoTensor.from_voigt', 'PiezoTensor.from_voigt', (['self.voigt_matrix'], {}), '(self.voigt_matrix)\n', (1629, 1648), False, 'from pymatgen.analysis.piezo import PiezoTensor\n'), ((1896, 1912), 'numpy.zeros', 'np.zeros', (['(3, 7)'], {}), '((3, 7))\n', (1904, 1912), True, 'import numpy as np\n'), ((1926, 1971), 'pymatgen.analysis.piezo.PiezoTensor.from_vasp_voigt', 'PiezoTensor.from_vasp_voigt', (['self.vasp_matrix'], {}), '(self.vasp_matrix)\n', (1953, 1971), False, 'from pymatgen.analysis.piezo import PiezoTensor\n')]
|
import argparse
import json
import numpy as np
import pandas as pd
import os
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report,f1_score
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras import backend as K
from keras.utils.vis_utils import plot_model
from sklearn.externals import joblib
import time
def f1(y_true, y_pred):
def recall(y_true, y_pred):
"""Recall metric.
Only computes a batch-wise average of recall.
Computes the recall, a metric for multi-label classification of
how many relevant items are selected.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision(y_true, y_pred):
"""Precision metric.
Only computes a batch-wise average of precision.
Computes the precision, a metric for multi-label classification of
how many selected items are relevant.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
precision = precision(y_true, y_pred)
recall = recall(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
def get_embeddings(sentences_list,layer_json):
'''
:param sentences_list: the path o the sentences.txt
:param layer_json: the path of the json file that contains the embeddings of the sentences
:return: Dictionary with key each sentence of the sentences_list and as value the embedding
'''
sentences = dict()#dict with key the index of each line of the sentences_list.txt and as value the sentence
embeddings = dict()##dict with key the index of each sentence and as value the its embedding
sentence_emb = dict()#key:sentence,value:its embedding
with open(sentences_list,'r') as file:
for index,line in enumerate(file):
sentences[index] = line.strip()
with open(layer_json, 'r',encoding='utf-8') as f:
for line in f:
embeddings[json.loads(line)['linex_index']] = np.asarray(json.loads(line)['features'])
for key,value in sentences.items():
sentence_emb[value] = embeddings[key]
return sentence_emb
def train_classifier(sentences_list,layer_json,dataset_csv,filename):
'''
:param sentences_list: the path o the sentences.txt
:param layer_json: the path of the json file that contains the embeddings of the sentences
:param dataset_csv: the path of the dataset
:param filename: The path of the pickle file that the model will be stored
:return:
'''
dataset = pd.read_csv(dataset_csv)
bert_dict = get_embeddings(sentences_list,layer_json)
length = list()
sentence_emb = list()
previous_emb = list()
next_list = list()
section_list = list()
label = list()
errors = 0
for row in dataset.iterrows():
sentence = row[1][0].strip()
previous = row[1][1].strip()
nexts = row[1][2].strip()
section = row[1][3].strip()
if sentence in bert_dict:
sentence_emb.append(bert_dict[sentence])
else:
sentence_emb.append(np.zeros(768))
print(sentence)
errors += 1
if previous in bert_dict:
previous_emb.append(bert_dict[previous])
else:
previous_emb.append(np.zeros(768))
if nexts in bert_dict:
next_list.append(bert_dict[nexts])
else:
next_list.append(np.zeros(768))
if section in bert_dict:
section_list.append(bert_dict[section])
else:
section_list.append(np.zeros(768))
length.append(row[1][4])
label.append(row[1][5])
sentence_emb = np.asarray(sentence_emb)
print(sentence_emb.shape)
next_emb = np.asarray(next_list)
print(next_emb.shape)
previous_emb = np.asarray(previous_emb)
print(previous_emb.shape)
section_emb = np.asarray(section_list)
print(sentence_emb.shape)
length = np.asarray(length)
print(length.shape)
label = np.asarray(label)
print(errors)
features = np.concatenate([sentence_emb, previous_emb, next_emb,section_emb], axis=1)
features = np.column_stack([features, length]) # np.append(features,length,axis=1)
print(features.shape)
X_train, X_val, y_train, y_val = train_test_split(features, label, test_size=0.33, random_state=42)
log = LogisticRegression(random_state=0, solver='newton-cg', max_iter=1000, C=0.1)
log.fit(X_train, y_train)
#save the model
_ = joblib.dump(log, filename, compress=9)
predictions = log.predict(X_val)
print("###########################################")
print("Results using embeddings from the",layer_json,"file")
print(classification_report(y_val, predictions))
print("F1 score using Logistic Regression:",f1_score(y_val, predictions))
print("###########################################")
#train a DNN
f1_results = list()
for i in range(3):
model = Sequential()
model.add(Dense(64, activation='relu', trainable=True))
model.add(Dense(128, activation='relu', trainable=True))
model.add(Dropout(0.30))
model.add(Dense(64, activation='relu', trainable=True))
model.add(Dropout(0.25))
model.add(Dense(64, activation='relu', trainable=True))
model.add(Dropout(0.35))
model.add(Dense(1, activation='sigmoid'))
# compile network
model.compile(loss='binary_crossentropy', optimizer='sgd', metrics=[f1])
# fit network
model.fit(X_train, y_train, epochs=100, batch_size=64)
loss, f_1 = model.evaluate(X_val, y_val, verbose=1)
print('\nTest F1: %f' % (f_1 * 100))
f1_results.append(f_1)
model = None
print("###########################################")
print("Results using embeddings from the", layer_json, "file")
# evaluate
print(np.mean(f1_results))
print("###########################################")
def parameter_tuning_LR(sentences_list,layer_json,dataset_csv):
'''
:param sentences_list: the path o the sentences.txt
:param layer_json: the path of the json file that contains the embeddings of the sentences
:param dataset_csv: the path of the dataset
:return:
'''
dataset = pd.read_csv(dataset_csv)
bert_dict = get_embeddings(sentences_list,layer_json)
length = list()
sentence_emb = list()
previous_emb = list()
next_list = list()
section_list = list()
label = list()
errors = 0
for row in dataset.iterrows():
sentence = row[1][0].strip()
previous = row[1][1].strip()
nexts = row[1][2].strip()
section = row[1][3].strip()
if sentence in bert_dict:
sentence_emb.append(bert_dict[sentence])
else:
sentence_emb.append(np.zeros(768))
print(sentence)
errors += 1
if previous in bert_dict:
previous_emb.append(bert_dict[previous])
else:
previous_emb.append(np.zeros(768))
if nexts in bert_dict:
next_list.append(bert_dict[nexts])
else:
next_list.append(np.zeros(768))
if section in bert_dict:
section_list.append(bert_dict[section])
else:
section_list.append(np.zeros(768))
length.append(row[1][4])
label.append(row[1][5])
sentence_emb = np.asarray(sentence_emb)
print(sentence_emb.shape)
next_emb = np.asarray(next_list)
print(next_emb.shape)
previous_emb = np.asarray(previous_emb)
print(previous_emb.shape)
section_emb = np.asarray(section_list)
print(sentence_emb.shape)
length = np.asarray(length)
print(length.shape)
label = np.asarray(label)
print(errors)
features = np.concatenate([sentence_emb, previous_emb, next_emb,section_emb], axis=1)
features = np.column_stack([features, length])
print(features.shape)
X_train, X_val, y_train, y_val = train_test_split(features, label, test_size=0.33, random_state=42)
C = [0.1,1,2,5,10]
solver = ['newton-cg','saga','sag']
best_params = dict()
best_score = 0.0
for c in C:
for s in solver:
start = time.time()
log = LogisticRegression(random_state=0, solver=s, max_iter=1000, C=c)
log.fit(X_train, y_train)
predictions = log.predict(X_val)
print("###########################################")
print("LR with C =",c,'and solver = ',s)
print("Results using embeddings from the", layer_json, "file")
print(classification_report(y_val, predictions))
f1 = f1_score(y_val, predictions)
if f1 > best_score:
best_score = f1
best_params['c'] = c
best_params['solver'] = s
print("F1 score using Logistic Regression:",f1)
print("###########################################")
end = time.time()
running_time = end - start
print("Running time:"+str(running_time))
def visualize_DNN(file_to_save):
'''
Save the DNN architecture to a png file. Better use the Visulize_DNN.ipynd
:param file_to_save: the png file that the architecture of the DNN will be saved.
:return: None
'''
model = Sequential()
model.add(Dense(64, activation='relu', trainable=True))
model.add(Dense(128, activation='relu', trainable=True))
model.add(Dropout(0.30))
model.add(Dense(64, activation='relu', trainable=True))
model.add(Dropout(0.25))
model.add(Dense(64, activation='relu', trainable=True))
model.add(Dropout(0.35))
model.add(Dense(1, activation='sigmoid'))
plot_model(model, to_file=file_to_save, show_shapes=True)
def save_model(sentences_list,layer_json,dataset_csv,pkl):
dataset = pd.read_csv(dataset_csv)
bert_dict = get_embeddings(sentences_list, layer_json)
length = list()
sentence_emb = list()
previous_emb = list()
next_list = list()
section_list = list()
label = list()
errors = 0
for row in dataset.iterrows():
sentence = row[1][0].strip()
previous = row[1][1].strip()
nexts = row[1][2].strip()
section = row[1][3].strip()
if sentence in bert_dict:
sentence_emb.append(bert_dict[sentence])
else:
sentence_emb.append(np.zeros(768))
print(sentence)
errors += 1
if previous in bert_dict:
previous_emb.append(bert_dict[previous])
else:
previous_emb.append(np.zeros(768))
if nexts in bert_dict:
next_list.append(bert_dict[nexts])
else:
next_list.append(np.zeros(768))
if section in bert_dict:
section_list.append(bert_dict[section])
else:
section_list.append(np.zeros(768))
length.append(row[1][4])
label.append(row[1][5])
sentence_emb = np.asarray(sentence_emb)
print(sentence_emb.shape)
next_emb = np.asarray(next_list)
print(next_emb.shape)
previous_emb = np.asarray(previous_emb)
print(previous_emb.shape)
section_emb = np.asarray(section_list)
print(sentence_emb.shape)
length = np.asarray(length)
print(length.shape)
label = np.asarray(label)
print(errors)
features = np.concatenate([sentence_emb, previous_emb, next_emb, section_emb], axis=1)
features = np.column_stack([features, length])
print(features.shape)
log = LogisticRegression(random_state=0, solver='saga', max_iter=1000, C=1)
log.fit(features, label)
_ = joblib.dump(log, pkl, compress=9)
if __name__ == '__main__':
#save_model('sentences_list.txt','Fudan_output_layer_-1.json','train_sentences1.csv','summarizer1.pkl')
ap = argparse.ArgumentParser()
ap.add_argument("-s", "--sentences", required=True, help="sentences list")
ap.add_argument("-o", "--output", required=True, help="output")
ap.add_argument("-ts", "--train set", required=True, help="path to train set")
ap.add_argument("-sp", "--summarizer path", required=True, help="path to save summarizer")
args = vars(ap.parse_args())
layer = train_classifier(args['sentences'], args['output'], args['train set'],args['summarizer path'])
#layer_1 = train_classifier('sentences_list.txt', 'new_output_layer_-1.json', 'train_sentences1.csv','fine_tune_BERT_sentence_classification1.pkl')
#layer_2 = train_classifier('sentences_list.txt','new_output_layer_-2.json','train_sentences1.csv','fine_tune_BERT_sentence_classification2.pkl')
#layer_3 = train_classifier('sentences_list.txt','new_output_layer_-3.json','train_sentences1.csv','fine_tune_BERT_sentence_classification3.pkl')
#layer_4 = train_classifier('sentences_list.txt','new_output_layer_-4.json','train_sentences1.csv','fine_tune_BERT_sentence_classification4.pkl')
#tuning = parameter_tuning_LR('sentences_list.txt','new_output_layer_-1.json','train_sentences1.csv')
#layer_1 = train_classifier('sentences_list.txt','output_layer_-1.json','train_sentences1.csv','fine_tune_BERT_sentence_classification.pkl')
#layer_2 = train_classifier('sentences_list.txt','output_layer_-2.json','train_sentences1.csv','fine_tune_BERT_sentence_classification.pkl')
#layer_3 = train_classifier('sentences_list.txt','output_layer_-3.json','train_sentences1.csv','fine_tune_BERT_sentence_classification.pkl')
#layer_4 = train_classifier('sentences_list.txt','output_layer_-4.json','train_sentences1.csv','fine_tune_BERT_sentence_classification.pkl')
|
[
"pandas.read_csv",
"sklearn.metrics.classification_report",
"keras.utils.vis_utils.plot_model",
"numpy.column_stack",
"keras.layers.Dense",
"numpy.mean",
"argparse.ArgumentParser",
"keras.backend.clip",
"numpy.asarray",
"numpy.concatenate",
"keras.backend.epsilon",
"json.loads",
"sklearn.model_selection.train_test_split",
"keras.models.Sequential",
"sklearn.externals.joblib.dump",
"time.time",
"keras.layers.Dropout",
"sklearn.metrics.f1_score",
"sklearn.linear_model.LogisticRegression",
"numpy.zeros"
] |
[((2973, 2997), 'pandas.read_csv', 'pd.read_csv', (['dataset_csv'], {}), '(dataset_csv)\n', (2984, 2997), True, 'import pandas as pd\n'), ((4112, 4136), 'numpy.asarray', 'np.asarray', (['sentence_emb'], {}), '(sentence_emb)\n', (4122, 4136), True, 'import numpy as np\n'), ((4182, 4203), 'numpy.asarray', 'np.asarray', (['next_list'], {}), '(next_list)\n', (4192, 4203), True, 'import numpy as np\n'), ((4249, 4273), 'numpy.asarray', 'np.asarray', (['previous_emb'], {}), '(previous_emb)\n', (4259, 4273), True, 'import numpy as np\n'), ((4322, 4346), 'numpy.asarray', 'np.asarray', (['section_list'], {}), '(section_list)\n', (4332, 4346), True, 'import numpy as np\n'), ((4390, 4408), 'numpy.asarray', 'np.asarray', (['length'], {}), '(length)\n', (4400, 4408), True, 'import numpy as np\n'), ((4445, 4462), 'numpy.asarray', 'np.asarray', (['label'], {}), '(label)\n', (4455, 4462), True, 'import numpy as np\n'), ((4496, 4571), 'numpy.concatenate', 'np.concatenate', (['[sentence_emb, previous_emb, next_emb, section_emb]'], {'axis': '(1)'}), '([sentence_emb, previous_emb, next_emb, section_emb], axis=1)\n', (4510, 4571), True, 'import numpy as np\n'), ((4586, 4621), 'numpy.column_stack', 'np.column_stack', (['[features, length]'], {}), '([features, length])\n', (4601, 4621), True, 'import numpy as np\n'), ((4723, 4789), 'sklearn.model_selection.train_test_split', 'train_test_split', (['features', 'label'], {'test_size': '(0.33)', 'random_state': '(42)'}), '(features, label, test_size=0.33, random_state=42)\n', (4739, 4789), False, 'from sklearn.model_selection import train_test_split\n'), ((4801, 4877), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(0)', 'solver': '"""newton-cg"""', 'max_iter': '(1000)', 'C': '(0.1)'}), "(random_state=0, solver='newton-cg', max_iter=1000, C=0.1)\n", (4819, 4877), False, 'from sklearn.linear_model import LogisticRegression\n'), ((4937, 4975), 'sklearn.externals.joblib.dump', 'joblib.dump', (['log', 'filename'], {'compress': '(9)'}), '(log, filename, compress=9)\n', (4948, 4975), False, 'from sklearn.externals import joblib\n'), ((6715, 6739), 'pandas.read_csv', 'pd.read_csv', (['dataset_csv'], {}), '(dataset_csv)\n', (6726, 6739), True, 'import pandas as pd\n'), ((7854, 7878), 'numpy.asarray', 'np.asarray', (['sentence_emb'], {}), '(sentence_emb)\n', (7864, 7878), True, 'import numpy as np\n'), ((7924, 7945), 'numpy.asarray', 'np.asarray', (['next_list'], {}), '(next_list)\n', (7934, 7945), True, 'import numpy as np\n'), ((7991, 8015), 'numpy.asarray', 'np.asarray', (['previous_emb'], {}), '(previous_emb)\n', (8001, 8015), True, 'import numpy as np\n'), ((8064, 8088), 'numpy.asarray', 'np.asarray', (['section_list'], {}), '(section_list)\n', (8074, 8088), True, 'import numpy as np\n'), ((8132, 8150), 'numpy.asarray', 'np.asarray', (['length'], {}), '(length)\n', (8142, 8150), True, 'import numpy as np\n'), ((8187, 8204), 'numpy.asarray', 'np.asarray', (['label'], {}), '(label)\n', (8197, 8204), True, 'import numpy as np\n'), ((8238, 8313), 'numpy.concatenate', 'np.concatenate', (['[sentence_emb, previous_emb, next_emb, section_emb]'], {'axis': '(1)'}), '([sentence_emb, previous_emb, next_emb, section_emb], axis=1)\n', (8252, 8313), True, 'import numpy as np\n'), ((8328, 8363), 'numpy.column_stack', 'np.column_stack', (['[features, length]'], {}), '([features, length])\n', (8343, 8363), True, 'import numpy as np\n'), ((8428, 8494), 'sklearn.model_selection.train_test_split', 'train_test_split', (['features', 'label'], {'test_size': '(0.33)', 'random_state': '(42)'}), '(features, label, test_size=0.33, random_state=42)\n', (8444, 8494), False, 'from sklearn.model_selection import train_test_split\n'), ((9783, 9795), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (9793, 9795), False, 'from keras.models import Sequential\n'), ((10175, 10232), 'keras.utils.vis_utils.plot_model', 'plot_model', (['model'], {'to_file': 'file_to_save', 'show_shapes': '(True)'}), '(model, to_file=file_to_save, show_shapes=True)\n', (10185, 10232), False, 'from keras.utils.vis_utils import plot_model\n'), ((10309, 10333), 'pandas.read_csv', 'pd.read_csv', (['dataset_csv'], {}), '(dataset_csv)\n', (10320, 10333), True, 'import pandas as pd\n'), ((11448, 11472), 'numpy.asarray', 'np.asarray', (['sentence_emb'], {}), '(sentence_emb)\n', (11458, 11472), True, 'import numpy as np\n'), ((11518, 11539), 'numpy.asarray', 'np.asarray', (['next_list'], {}), '(next_list)\n', (11528, 11539), True, 'import numpy as np\n'), ((11585, 11609), 'numpy.asarray', 'np.asarray', (['previous_emb'], {}), '(previous_emb)\n', (11595, 11609), True, 'import numpy as np\n'), ((11658, 11682), 'numpy.asarray', 'np.asarray', (['section_list'], {}), '(section_list)\n', (11668, 11682), True, 'import numpy as np\n'), ((11726, 11744), 'numpy.asarray', 'np.asarray', (['length'], {}), '(length)\n', (11736, 11744), True, 'import numpy as np\n'), ((11781, 11798), 'numpy.asarray', 'np.asarray', (['label'], {}), '(label)\n', (11791, 11798), True, 'import numpy as np\n'), ((11832, 11907), 'numpy.concatenate', 'np.concatenate', (['[sentence_emb, previous_emb, next_emb, section_emb]'], {'axis': '(1)'}), '([sentence_emb, previous_emb, next_emb, section_emb], axis=1)\n', (11846, 11907), True, 'import numpy as np\n'), ((11923, 11958), 'numpy.column_stack', 'np.column_stack', (['[features, length]'], {}), '([features, length])\n', (11938, 11958), True, 'import numpy as np\n'), ((11996, 12065), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(0)', 'solver': '"""saga"""', 'max_iter': '(1000)', 'C': '(1)'}), "(random_state=0, solver='saga', max_iter=1000, C=1)\n", (12014, 12065), False, 'from sklearn.linear_model import LogisticRegression\n'), ((12104, 12137), 'sklearn.externals.joblib.dump', 'joblib.dump', (['log', 'pkl'], {'compress': '(9)'}), '(log, pkl, compress=9)\n', (12115, 12137), False, 'from sklearn.externals import joblib\n'), ((12286, 12311), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (12309, 12311), False, 'import argparse\n'), ((5146, 5187), 'sklearn.metrics.classification_report', 'classification_report', (['y_val', 'predictions'], {}), '(y_val, predictions)\n', (5167, 5187), False, 'from sklearn.metrics import classification_report, f1_score\n'), ((5237, 5265), 'sklearn.metrics.f1_score', 'f1_score', (['y_val', 'predictions'], {}), '(y_val, predictions)\n', (5245, 5265), False, 'from sklearn.metrics import classification_report, f1_score\n'), ((5406, 5418), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (5416, 5418), False, 'from keras.models import Sequential\n'), ((6327, 6346), 'numpy.mean', 'np.mean', (['f1_results'], {}), '(f1_results)\n', (6334, 6346), True, 'import numpy as np\n'), ((9810, 9854), 'keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""', 'trainable': '(True)'}), "(64, activation='relu', trainable=True)\n", (9815, 9854), False, 'from keras.layers import Dense, Dropout\n'), ((9870, 9915), 'keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""', 'trainable': '(True)'}), "(128, activation='relu', trainable=True)\n", (9875, 9915), False, 'from keras.layers import Dense, Dropout\n'), ((9931, 9943), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (9938, 9943), False, 'from keras.layers import Dense, Dropout\n'), ((9960, 10004), 'keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""', 'trainable': '(True)'}), "(64, activation='relu', trainable=True)\n", (9965, 10004), False, 'from keras.layers import Dense, Dropout\n'), ((10020, 10033), 'keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (10027, 10033), False, 'from keras.layers import Dense, Dropout\n'), ((10049, 10093), 'keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""', 'trainable': '(True)'}), "(64, activation='relu', trainable=True)\n", (10054, 10093), False, 'from keras.layers import Dense, Dropout\n'), ((10109, 10122), 'keras.layers.Dropout', 'Dropout', (['(0.35)'], {}), '(0.35)\n', (10116, 10122), False, 'from keras.layers import Dense, Dropout\n'), ((10138, 10168), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (10143, 10168), False, 'from keras.layers import Dense, Dropout\n'), ((5437, 5481), 'keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""', 'trainable': '(True)'}), "(64, activation='relu', trainable=True)\n", (5442, 5481), False, 'from keras.layers import Dense, Dropout\n'), ((5501, 5546), 'keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""', 'trainable': '(True)'}), "(128, activation='relu', trainable=True)\n", (5506, 5546), False, 'from keras.layers import Dense, Dropout\n'), ((5566, 5578), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (5573, 5578), False, 'from keras.layers import Dense, Dropout\n'), ((5599, 5643), 'keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""', 'trainable': '(True)'}), "(64, activation='relu', trainable=True)\n", (5604, 5643), False, 'from keras.layers import Dense, Dropout\n'), ((5663, 5676), 'keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (5670, 5676), False, 'from keras.layers import Dense, Dropout\n'), ((5696, 5740), 'keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""', 'trainable': '(True)'}), "(64, activation='relu', trainable=True)\n", (5701, 5740), False, 'from keras.layers import Dense, Dropout\n'), ((5760, 5773), 'keras.layers.Dropout', 'Dropout', (['(0.35)'], {}), '(0.35)\n', (5767, 5773), False, 'from keras.layers import Dense, Dropout\n'), ((5793, 5823), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (5798, 5823), False, 'from keras.layers import Dense, Dropout\n'), ((8666, 8677), 'time.time', 'time.time', ([], {}), '()\n', (8675, 8677), False, 'import time\n'), ((8696, 8760), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(0)', 'solver': 's', 'max_iter': '(1000)', 'C': 'c'}), '(random_state=0, solver=s, max_iter=1000, C=c)\n', (8714, 8760), False, 'from sklearn.linear_model import LogisticRegression\n'), ((9116, 9144), 'sklearn.metrics.f1_score', 'f1_score', (['y_val', 'predictions'], {}), '(y_val, predictions)\n', (9124, 9144), False, 'from sklearn.metrics import classification_report, f1_score\n'), ((9431, 9442), 'time.time', 'time.time', ([], {}), '()\n', (9440, 9442), False, 'import time\n'), ((751, 780), 'keras.backend.clip', 'K.clip', (['(y_true * y_pred)', '(0)', '(1)'], {}), '(y_true * y_pred, 0, 1)\n', (757, 780), True, 'from keras import backend as K\n'), ((826, 846), 'keras.backend.clip', 'K.clip', (['y_true', '(0)', '(1)'], {}), '(y_true, 0, 1)\n', (832, 846), True, 'from keras import backend as K\n'), ((905, 916), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (914, 916), True, 'from keras import backend as K\n'), ((1236, 1265), 'keras.backend.clip', 'K.clip', (['(y_true * y_pred)', '(0)', '(1)'], {}), '(y_true * y_pred, 0, 1)\n', (1242, 1265), True, 'from keras import backend as K\n'), ((1312, 1332), 'keras.backend.clip', 'K.clip', (['y_pred', '(0)', '(1)'], {}), '(y_pred, 0, 1)\n', (1318, 1332), True, 'from keras import backend as K\n'), ((1395, 1406), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (1404, 1406), True, 'from keras import backend as K\n'), ((1562, 1573), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (1571, 1573), True, 'from keras import backend as K\n'), ((3526, 3539), 'numpy.zeros', 'np.zeros', (['(768)'], {}), '(768)\n', (3534, 3539), True, 'import numpy as np\n'), ((3727, 3740), 'numpy.zeros', 'np.zeros', (['(768)'], {}), '(768)\n', (3735, 3740), True, 'import numpy as np\n'), ((3864, 3877), 'numpy.zeros', 'np.zeros', (['(768)'], {}), '(768)\n', (3872, 3877), True, 'import numpy as np\n'), ((4011, 4024), 'numpy.zeros', 'np.zeros', (['(768)'], {}), '(768)\n', (4019, 4024), True, 'import numpy as np\n'), ((7268, 7281), 'numpy.zeros', 'np.zeros', (['(768)'], {}), '(768)\n', (7276, 7281), True, 'import numpy as np\n'), ((7469, 7482), 'numpy.zeros', 'np.zeros', (['(768)'], {}), '(768)\n', (7477, 7482), True, 'import numpy as np\n'), ((7606, 7619), 'numpy.zeros', 'np.zeros', (['(768)'], {}), '(768)\n', (7614, 7619), True, 'import numpy as np\n'), ((7753, 7766), 'numpy.zeros', 'np.zeros', (['(768)'], {}), '(768)\n', (7761, 7766), True, 'import numpy as np\n'), ((9056, 9097), 'sklearn.metrics.classification_report', 'classification_report', (['y_val', 'predictions'], {}), '(y_val, predictions)\n', (9077, 9097), False, 'from sklearn.metrics import classification_report, f1_score\n'), ((10862, 10875), 'numpy.zeros', 'np.zeros', (['(768)'], {}), '(768)\n', (10870, 10875), True, 'import numpy as np\n'), ((11063, 11076), 'numpy.zeros', 'np.zeros', (['(768)'], {}), '(768)\n', (11071, 11076), True, 'import numpy as np\n'), ((11200, 11213), 'numpy.zeros', 'np.zeros', (['(768)'], {}), '(768)\n', (11208, 11213), True, 'import numpy as np\n'), ((11347, 11360), 'numpy.zeros', 'np.zeros', (['(768)'], {}), '(768)\n', (11355, 11360), True, 'import numpy as np\n'), ((2388, 2404), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (2398, 2404), False, 'import json\n'), ((2434, 2450), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (2444, 2450), False, 'import json\n')]
|
'''
-------------------------------------------------------------------------------------------------
This code accompanies the paper titled "Human injury-based safety decision of automated vehicles"
Author: <NAME>, <NAME>, <NAME>, <NAME>
Corresponding author: <NAME> (<EMAIL>)
-------------------------------------------------------------------------------------------------
'''
import torch
import numpy as np
from torch import nn
from torch.nn.utils import weight_norm
__author__ = "<NAME>"
def Collision_cond(veh_striking_list, V1_v, V2_v, delta_angle, veh_param):
''' Estimate the collision condition. '''
(veh_l, veh_w, veh_cgf, veh_cgs, veh_k, veh_m) = veh_param
delta_angle_2 = np.arccos(np.abs(np.cos(delta_angle)))
if -1e-6 < delta_angle_2 < 1e-6:
delta_angle_2 = 1e-6
delta_v1_list = []
delta_v2_list = []
# Estimate the collision condition (delat-v) according to the principal impact direction.
for veh_striking in veh_striking_list:
if veh_striking[0] == 1:
veh_ca = np.arctan(veh_cgf[0] / veh_cgs[0])
veh_a2 = np.abs(veh_cgs[1] - veh_striking[3])
veh_RDS = np.abs(V1_v * np.cos(delta_angle) - V2_v)
veh_a1 = np.abs(np.sqrt(veh_cgf[0] ** 2 + veh_cgs[0] ** 2) * np.cos(veh_ca + delta_angle_2))
if (veh_striking[1]+1) in [16, 1, 2, 3, 17, 20, 21] and (veh_striking[2]+1) in [16, 1, 2, 3, 17, 20, 21]:
veh_e = 2 / veh_RDS
else:
veh_e = 0.5 / veh_RDS
elif veh_striking[0] == 2:
veh_ca = np.arctan(veh_cgf[0] / veh_cgs[0])
veh_a2 = np.abs(veh_cgf[1] - veh_striking[3])
veh_a1 = np.abs(np.sqrt(veh_cgf[0] ** 2 + veh_cgs[0] ** 2) * np.cos(delta_angle_2 - veh_ca + np.pi / 2))
veh_RDS = V1_v * np.sin(delta_angle_2)
veh_e = 1.5 / veh_RDS
elif veh_striking[0] == 3:
veh_ca = np.arctan(veh_cgf[1] / veh_cgs[1])
veh_a1 = np.abs(veh_cgs[0] - veh_striking[3])
veh_RDS = np.abs(V2_v * np.cos(delta_angle) - V1_v)
veh_a2 = np.abs(np.sqrt(veh_cgf[1] ** 2 + veh_cgs[1] ** 2) * np.cos(veh_ca + delta_angle_2))
if (veh_striking[1]+1) in [16, 1, 2, 3, 17, 20, 21] and (veh_striking[2]+1) in [16, 1, 2, 3, 17, 20, 21]:
veh_e = 2 / veh_RDS
else:
veh_e = 0.5 / veh_RDS
elif veh_striking[0] == 4:
veh_ca = np.arctan(veh_cgf[1] / veh_cgs[1])
veh_a1 = np.abs(veh_cgf[0] - veh_striking[3])
veh_a2 = np.abs(np.sqrt(veh_cgf[1] ** 2 + veh_cgs[1] ** 2) * np.cos(delta_angle_2 - veh_ca + np.pi / 2))
veh_RDS = V2_v * np.sin(delta_angle_2)
veh_e = 1.5 / veh_RDS
# Obtain delta-v based on the plane 2-DOF rigid-body collision model with momentum conservation.
veh_y1 = veh_k[0] ** 2 / (veh_a1 ** 2 + veh_k[0] ** 2)
veh_y2 = veh_k[1] ** 2 / (veh_a2 ** 2 + veh_k[1] ** 2)
delta_v1 = (1 + veh_e) * veh_m[1] * veh_y1 * veh_y2 * veh_RDS / (veh_m[0] * veh_y1 + veh_m[1] * veh_y2)
delta_v2 = (1 + veh_e) * veh_m[0] * veh_y1 * veh_y2 * veh_RDS / (veh_m[0] * veh_y1 + veh_m[1] * veh_y2)
delta_v1_list.append(delta_v1)
delta_v2_list.append(delta_v2)
delta_v1_ = max(delta_v1_list)
delta_v2_ = max(delta_v2_list)
index = delta_v1_list.index(max(delta_v1_list))
return delta_v1_, delta_v2_, index
|
[
"numpy.abs",
"numpy.sqrt",
"numpy.cos",
"numpy.sin",
"numpy.arctan"
] |
[((723, 742), 'numpy.cos', 'np.cos', (['delta_angle'], {}), '(delta_angle)\n', (729, 742), True, 'import numpy as np\n'), ((1050, 1084), 'numpy.arctan', 'np.arctan', (['(veh_cgf[0] / veh_cgs[0])'], {}), '(veh_cgf[0] / veh_cgs[0])\n', (1059, 1084), True, 'import numpy as np\n'), ((1106, 1142), 'numpy.abs', 'np.abs', (['(veh_cgs[1] - veh_striking[3])'], {}), '(veh_cgs[1] - veh_striking[3])\n', (1112, 1142), True, 'import numpy as np\n'), ((1579, 1613), 'numpy.arctan', 'np.arctan', (['(veh_cgf[0] / veh_cgs[0])'], {}), '(veh_cgf[0] / veh_cgs[0])\n', (1588, 1613), True, 'import numpy as np\n'), ((1635, 1671), 'numpy.abs', 'np.abs', (['(veh_cgf[1] - veh_striking[3])'], {}), '(veh_cgf[1] - veh_striking[3])\n', (1641, 1671), True, 'import numpy as np\n'), ((1235, 1277), 'numpy.sqrt', 'np.sqrt', (['(veh_cgf[0] ** 2 + veh_cgs[0] ** 2)'], {}), '(veh_cgf[0] ** 2 + veh_cgs[0] ** 2)\n', (1242, 1277), True, 'import numpy as np\n'), ((1280, 1310), 'numpy.cos', 'np.cos', (['(veh_ca + delta_angle_2)'], {}), '(veh_ca + delta_angle_2)\n', (1286, 1310), True, 'import numpy as np\n'), ((1818, 1839), 'numpy.sin', 'np.sin', (['delta_angle_2'], {}), '(delta_angle_2)\n', (1824, 1839), True, 'import numpy as np\n'), ((1931, 1965), 'numpy.arctan', 'np.arctan', (['(veh_cgf[1] / veh_cgs[1])'], {}), '(veh_cgf[1] / veh_cgs[1])\n', (1940, 1965), True, 'import numpy as np\n'), ((1987, 2023), 'numpy.abs', 'np.abs', (['(veh_cgs[0] - veh_striking[3])'], {}), '(veh_cgs[0] - veh_striking[3])\n', (1993, 2023), True, 'import numpy as np\n'), ((1179, 1198), 'numpy.cos', 'np.cos', (['delta_angle'], {}), '(delta_angle)\n', (1185, 1198), True, 'import numpy as np\n'), ((1700, 1742), 'numpy.sqrt', 'np.sqrt', (['(veh_cgf[0] ** 2 + veh_cgs[0] ** 2)'], {}), '(veh_cgf[0] ** 2 + veh_cgs[0] ** 2)\n', (1707, 1742), True, 'import numpy as np\n'), ((1745, 1787), 'numpy.cos', 'np.cos', (['(delta_angle_2 - veh_ca + np.pi / 2)'], {}), '(delta_angle_2 - veh_ca + np.pi / 2)\n', (1751, 1787), True, 'import numpy as np\n'), ((2460, 2494), 'numpy.arctan', 'np.arctan', (['(veh_cgf[1] / veh_cgs[1])'], {}), '(veh_cgf[1] / veh_cgs[1])\n', (2469, 2494), True, 'import numpy as np\n'), ((2516, 2552), 'numpy.abs', 'np.abs', (['(veh_cgf[0] - veh_striking[3])'], {}), '(veh_cgf[0] - veh_striking[3])\n', (2522, 2552), True, 'import numpy as np\n'), ((2116, 2158), 'numpy.sqrt', 'np.sqrt', (['(veh_cgf[1] ** 2 + veh_cgs[1] ** 2)'], {}), '(veh_cgf[1] ** 2 + veh_cgs[1] ** 2)\n', (2123, 2158), True, 'import numpy as np\n'), ((2161, 2191), 'numpy.cos', 'np.cos', (['(veh_ca + delta_angle_2)'], {}), '(veh_ca + delta_angle_2)\n', (2167, 2191), True, 'import numpy as np\n'), ((2699, 2720), 'numpy.sin', 'np.sin', (['delta_angle_2'], {}), '(delta_angle_2)\n', (2705, 2720), True, 'import numpy as np\n'), ((2060, 2079), 'numpy.cos', 'np.cos', (['delta_angle'], {}), '(delta_angle)\n', (2066, 2079), True, 'import numpy as np\n'), ((2581, 2623), 'numpy.sqrt', 'np.sqrt', (['(veh_cgf[1] ** 2 + veh_cgs[1] ** 2)'], {}), '(veh_cgf[1] ** 2 + veh_cgs[1] ** 2)\n', (2588, 2623), True, 'import numpy as np\n'), ((2626, 2668), 'numpy.cos', 'np.cos', (['(delta_angle_2 - veh_ca + np.pi / 2)'], {}), '(delta_angle_2 - veh_ca + np.pi / 2)\n', (2632, 2668), True, 'import numpy as np\n')]
|
"\"\"\"Test the search module\"\"\"\n\nfrom collections.abc import Iterable, Sized\nfrom io import S(...TRUNCATED) | ["sklearn.utils._testing.assert_warns_message","sklearn.model_selection.GridSearchCV","sklearn.model(...TRUNCATED) | "[((4079, 4125), 'numpy.array', 'np.array', (['[[-1, -1], [-2, -1], [1, 1], [2, 1]]'], {}), '([[-1, (...TRUNCATED) |
"# -*- encoding:utf-8 -*-\n# @Time : 2021/1/3 15:15\n# @Author : gfjiang\nimport os.path as osp\(...TRUNCATED) | ["math.sqrt","cvtools.imwrite","mmdet.ops.nms","matplotlib.pyplot.imshow","os.path.exists","cvtools.(...TRUNCATED) | "[((643, 671), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(32, 32)'}), '(figsize=(3(...TRUNCATED) |
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 10